├── swarmsible ├── .gitignore ├── roles │ ├── docker-post-setup │ │ ├── files │ │ │ ├── 2019_03_15 │ │ │ │ └── daemon.json │ │ │ └── 2020_07_17 │ │ │ │ └── daemon.json │ │ ├── templates │ │ │ ├── 2021_03_04 │ │ │ │ └── daemon.json │ │ │ ├── 2022_06_11 │ │ │ │ └── daemon.json.j2 │ │ │ └── 2024_04_16 │ │ │ │ └── daemon.json.j2 │ │ └── tasks │ │ │ ├── main.yml │ │ │ └── subtasks │ │ │ ├── 2024_04_16.yml │ │ │ ├── 2019_03_15.yml │ │ │ ├── 2020_07_17.yml │ │ │ ├── 2021_03_04.yml │ │ │ └── 2022_06_11.yml │ ├── user-setup │ │ ├── tasks │ │ │ ├── subtasks │ │ │ │ ├── vars │ │ │ │ │ ├── Debian.yml │ │ │ │ │ └── Ubuntu.yml │ │ │ │ ├── root-setup.yml │ │ │ │ ├── sshd-config.yml │ │ │ │ ├── setup-single-user.yml │ │ │ │ └── templates │ │ │ │ │ ├── Debian.sshd_config.j2 │ │ │ │ │ └── Ubuntu.sshd_config.j2 │ │ │ └── main.yml │ │ ├── templates │ │ │ └── sudoers.j2 │ │ └── files │ │ │ └── .bashrc │ ├── copy-ssl-certs │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── docker-swarm-leave │ │ └── tasks │ │ │ └── main.yml │ ├── docker-pre-setup │ │ ├── templates │ │ │ ├── apt-preference-docker.j2 │ │ │ ├── apt-preference-docker-cli.j2 │ │ │ ├── apt-preference-containerd.j2 │ │ │ └── apt-preference-docker-ce-rootless-extras.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── docker-swarm-hosts │ │ └── tasks │ │ │ └── main.yml │ ├── docker-sysctl-tune │ │ └── tasks │ │ │ └── main.yml │ ├── developer-accounts │ │ ├── subtasks │ │ │ ├── add-multiple-keys.yml │ │ │ ├── add-additional-keys.yml │ │ │ └── add-single-key.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── files │ │ │ └── .bashrc │ ├── docker-swarm-labels │ │ └── tasks │ │ │ └── main.yml │ ├── essential-software-setup │ │ ├── subtasks │ │ │ └── molly-guard.yml │ │ └── tasks │ │ │ └── main.yml │ ├── docker-login │ │ └── tasks │ │ │ └── main.yml │ ├── notnagel-user │ │ └── tasks │ │ │ └── main.yml │ ├── docker-swarm-add-manager │ │ └── tasks │ │ │ └── main.yml │ ├── docker-swarm-add-worker │ │ └── tasks │ │ │ └── main.yml │ ├── docker-setup │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── setup-Debian.yml │ │ └── defaults │ │ │ └── main.yml │ ├── ufw-docker-install │ │ ├── tasks │ │ │ └── main.yml │ │ └── files │ │ │ └── ufw-docker │ ├── docker-node-upgrade │ │ └── tasks │ │ │ ├── main.yml │ │ │ └── upgrade-node.yml │ ├── docker-swarm-hetzner-init-variables │ │ └── tasks │ │ │ └── main.yml │ ├── full-apt-upgrade │ │ └── tasks │ │ │ └── main.yml │ ├── docker-swarm-init │ │ └── tasks │ │ │ └── main.yml │ └── docker-swarm-firewall │ │ └── tasks │ │ └── main.yml ├── docker_swarm_firewall.yml ├── developer_accounts.yml ├── docker_swarm_relabel.yml ├── upgrade.yml ├── docker_swarm_volumes.yml ├── docker_swarm.yml └── ansible_setup.yml ├── README.md └── LICENSE /swarmsible/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/files/2019_03_15/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | } -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | SSH_CONFIG: /etc/ssh/sshd_config -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/vars/Ubuntu.yml: -------------------------------------------------------------------------------- 1 | SSH_CONFIG: /etc/ssh/sshd_config -------------------------------------------------------------------------------- /swarmsible/roles/copy-ssl-certs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ssl_certs_base_dir: "{{playbook_dir}}/files/certs/{{inventory_hostname}}" 4 | ssl_cert_dirs: [] -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-leave/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Nodes Leaving the Swarm 3 | shell: docker swarm leave -f 4 | ignore_errors: true 5 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/files/2020_07_17/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "default-address-pools": [ 3 | {"base":"172.18.0.0/16","size":24} 4 | ] 5 | } -------------------------------------------------------------------------------- /swarmsible/roles/docker-pre-setup/templates/apt-preference-docker.j2: -------------------------------------------------------------------------------- 1 | Package: docker-ce 2 | Pin: version {{ docker_pinned_package }} 3 | Pin-Priority: 1000 4 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-pre-setup/templates/apt-preference-docker-cli.j2: -------------------------------------------------------------------------------- 1 | Package: docker-ce-cli 2 | Pin: version {{ docker_pinned_package }} 3 | Pin-Priority: 1000 4 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-pre-setup/templates/apt-preference-containerd.j2: -------------------------------------------------------------------------------- 1 | Package: containerd.io 2 | Pin: version {{ containerd_pinned_package }} 3 | Pin-Priority: 1000 4 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-pre-setup/templates/apt-preference-docker-ce-rootless-extras.j2: -------------------------------------------------------------------------------- 1 | Package: docker-ce-rootless-extras 2 | Pin: version {{ docker_pinned_package }} 3 | Pin-Priority: 1000 4 | -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/root-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up bashrc for root 3 | copy: 4 | src: .bashrc 5 | dest: /root/.bashrc 6 | owner: root 7 | group: root -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/templates/sudoers.j2: -------------------------------------------------------------------------------- 1 | {% for group_info in all_group_infos %} 2 | {% if group_info.requires_root_password == False %} 3 | {{ group_info.name }} ALL=(ALL:ALL) NOPASSWD:ALL 4 | {% endif %} 5 | {% endfor %} -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-hosts/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Hosts File 3 | lineinfile: path=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].host_ip }} {{item}}" state=present 4 | when: hostvars[item].host_ip is defined 5 | with_items: "{{ groups['docker_swarm'] }}" -------------------------------------------------------------------------------- /swarmsible/roles/docker-sysctl-tune/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "set {{ item.key }}={{ item.value }}" 4 | sysctl: 5 | name: "{{ item.key }}" 6 | value: "{{ item.value }}" 7 | sysctl_file: /etc/sysctl.conf 8 | reload: yes 9 | with_items: "{{ docker_sysctl_settings | default([]) }}" -------------------------------------------------------------------------------- /swarmsible/roles/developer-accounts/subtasks/add-multiple-keys.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Add public keys for {{ user }}" 3 | authorized_key: 4 | user: "{{ user }}" 5 | state: present 6 | key: "{{ lookup('file', '{{ key }}') }}" 7 | with_fileglob: 8 | - "{{ playbook_dir }}/files/all/ssh_files/developer_ssh_keys/{{ user }}/*.pub" 9 | loop_control: 10 | loop_var: key -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/templates/2021_03_04/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "default-address-pools": [ 3 | {"base":"172.18.0.0/16","size":24} 4 | ], 5 | "log-driver": "syslog", 6 | "log-opts": { 7 | "syslog-facility": "daemon", 8 | "tag": "{{ inventory_hostname }}|{{ '{{' }}.Name{{ '}}' }}" 9 | }, 10 | "metrics-addr" : "127.0.0.1:9323" 11 | } -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-labels/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: set docker swarm labels 4 | docker_node: 5 | labels: "{{ hostvars[item]['docker_swarm_labels'] }}" 6 | labels_state: replace 7 | hostname: "{{ hostvars[item]['inventory_hostname'] }}" 8 | when: inventory_hostname == docker_swarm_main_manager 9 | with_items: "{{ groups['docker_swarm'] | default([]) }}" 10 | -------------------------------------------------------------------------------- /swarmsible/docker_swarm_firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_swarm 3 | become: true 4 | vars: 5 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 6 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 7 | roles: 8 | - docker-swarm-hetzner-init-variables 9 | - docker-swarm-firewall -------------------------------------------------------------------------------- /swarmsible/developer_accounts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_swarm_manager ansiblemanager 3 | become: true 4 | vars: 5 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 6 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 7 | apt_update_cache: True 8 | 9 | roles: 10 | - role: developer-accounts -------------------------------------------------------------------------------- /swarmsible/roles/developer-accounts/subtasks/add-additional-keys.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add additional keys for {{ user }} 3 | authorized_key: 4 | user: "{{ user }}" 5 | state: present 6 | key: "{{ lookup('file', '{{ playbook_dir }}/files/all/ssh_files/developer_ssh_keys/{{ key }}.pub') }}" 7 | loop_control: 8 | loop_var: key 9 | with_items: 10 | - "{{ additional_keys }}" 11 | when: 12 | - key is defined -------------------------------------------------------------------------------- /swarmsible/docker_swarm_relabel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: docker_swarm 4 | become: true 5 | vars: 6 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 7 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 8 | roles: 9 | - docker-swarm-hetzner-init-variables 10 | - docker-swarm-labels 11 | 12 | 13 | -------------------------------------------------------------------------------- /swarmsible/roles/essential-software-setup/subtasks/molly-guard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install molly-guard 3 | apt: 4 | name: molly-guard 5 | state: present 6 | update_cache: "{{ apt_update_cache | default('True') }}" 7 | become: yes 8 | 9 | - name: Enable molly-guard for screen/ssh-sessions 10 | lineinfile: 11 | dest: /etc/molly-guard/rc 12 | regexp: '^#ALWAYS_QUERY_HOSTNAME=true' 13 | line: 'ALWAYS_QUERY_HOSTNAME=true' 14 | become: yes -------------------------------------------------------------------------------- /swarmsible/roles/docker-login/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: python - Install docker package 3 | pip: 4 | name: docker 5 | 6 | - name: "Login to private registry at {{ item.registry }} and force re-authorization" 7 | docker_login: 8 | registry: "{{ item.registry }}" 9 | username: "{{ item.user }}" 10 | password: "{{ item.passwd }}" 11 | reauthorize: yes 12 | no_log: True 13 | with_items: "{{ docker_registry_client_credentials | default([]) }}" -------------------------------------------------------------------------------- /swarmsible/roles/notnagel-user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create the notnagel user 4 | user: 5 | name: notnagel 6 | password: "{{ notnagel_password | password_hash('sha512', notnagel_salt) }}" 7 | groups: sudo # Empty by default. 8 | state: present 9 | shell: /bin/bash # Defaults to /bin/bash 10 | system: no # Defaults to no 11 | createhome: yes # Defaults to yes 12 | home: /home/notnagel # Defaults to /home/ -------------------------------------------------------------------------------- /swarmsible/roles/developer-accounts/subtasks/add-single-key.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if {{ user }} has only one public key 3 | become: false 4 | local_action: 5 | module: stat 6 | path: "{{ project_base_dir | default(playbook_dir) }}/files/all/ssh_files/developer_ssh_keys/{{ user }}.pub" 7 | register: single_key 8 | 9 | - name: Add single key for {{ user }} 10 | authorized_key: 11 | user: "{{ user }}" 12 | state: present 13 | key: "{{ lookup('file', '{{ project_base_dir | default(playbook_dir) }}/files/all/ssh_files/developer_ssh_keys/{{ user }}.pub') }}" 14 | when: single_key.stat.exists -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-add-manager/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Swarm is Already Initialized 3 | shell: docker node ls 4 | register: swarm_status 5 | ignore_errors: true 6 | 7 | - name: Add Managers to the Swarm 8 | shell: | 9 | docker swarm join \ 10 | --token {{ hostvars[docker_swarm_main_manager]['manager_token']['stdout'] }} \ 11 | --advertise-addr={{ hostvars[inventory_hostname]['docker_swarm_advertise_addr'] | default(hostvars[inventory_hostname]['host_ip']) }} \ 12 | {{ hostvars[docker_swarm_main_manager]['docker_swarm_advertise_addr'] | default(hostvars[docker_swarm_main_manager]['host_ip']) }} 13 | when: swarm_status.rc != 0 -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/templates/2022_06_11/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "default-address-pools": [ 3 | {"base":"172.18.0.0/16","size":24} 4 | ], 5 | "log-driver": "syslog", 6 | "log-opts": { 7 | "syslog-facility": "daemon", 8 | "tag": "{{ inventory_hostname }}|{{ '{{' }}.Name{{ '}}' }}" 9 | }, 10 | "metrics-addr" : "0.0.0.0:9323", 11 | "node-generic-resources": [ 12 | {% for docker_swarm_node_generic_resource in docker_swarm_node_generic_resources | default([]) %} 13 | {% if not loop.last %} 14 | "{{ docker_swarm_node_generic_resource }}", 15 | {% else %} 16 | "{{ docker_swarm_node_generic_resource }}" 17 | {% endif%} 18 | {% endfor %} 19 | ] 20 | } -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-add-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Swarm is Already Initialized 3 | shell: docker info --format '{{ '{{' }}.Swarm.LocalNodeState{{ '}}' }}' 4 | register: swarm_status 5 | ignore_errors: true 6 | 7 | - name: Add Workers to the Swarm 8 | shell: | 9 | docker swarm join \ 10 | --token {{ hostvars[docker_swarm_main_manager]['worker_token']['stdout'] }} \ 11 | --advertise-addr={{ hostvars[inventory_hostname]['docker_swarm_advertise_addr'] | default(hostvars[inventory_hostname]['host_ip']) }} \ 12 | {{ hostvars[docker_swarm_main_manager]['docker_swarm_advertise_addr'] | default(hostvars[docker_swarm_main_manager]['host_ip']) }} 13 | when: swarm_status.rc != 0 or (swarm_status.stdout | default('not_found')) != 'active' 14 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include_tasks: setup-Debian.yml 4 | when: ansible_os_family == 'Debian' 5 | 6 | - name: check if package is installed 7 | package: 8 | name: "{{ docker_packages[0] }}" 9 | state: present 10 | # important: essentially a 'dry-run' 11 | check_mode: true 12 | register: docker_installed 13 | 14 | - name: Install Docker packages (with downgrade option). 15 | package: 16 | name: "{{ docker_packages }}" 17 | state: "present" 18 | when: docker_installed.changed 19 | 20 | - name: Ensure /etc/docker/ directory exists. 21 | file: 22 | path: /etc/docker 23 | state: directory 24 | mode: 0755 25 | 26 | - name: Ensure Docker is started and enabled at boot. 27 | service: 28 | name: docker 29 | state: "started" 30 | enabled: true 31 | -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/sshd-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Note: with_first_found searches in the current dir! 3 | # TODO: should we change the path for these then? 4 | 5 | - debug: 6 | var: ansible_distribution 7 | 8 | - name: gather os specific variables 9 | include_vars: "{{ item }}" 10 | with_first_found: 11 | - "{{ ansible_distribution }}-{{ ansible_distribution_major_version}}.yml" 12 | - "{{ ansible_distribution }}.yml" 13 | - "defaults.yml" 14 | 15 | - name: configure ssh 16 | template: 17 | src: "{{ item }}" 18 | dest: "{{ SSH_CONFIG }}" 19 | backup: yes 20 | with_first_found: 21 | - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.sshd_config.j2" 22 | - "{{ ansible_distribution }}.sshd_config.j2" 23 | 24 | - name: reload sshd 25 | systemd: 26 | name: sshd 27 | state: reloaded 28 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "ensure /data/ansible exists" 4 | file: 5 | path: /data/ansible 6 | state: directory 7 | 8 | - name: "ensure /data/ansible/state exists" 9 | file: 10 | path: /data/ansible/state 11 | state: directory 12 | 13 | - name: "ensure /data/ansible/state/docker exists" 14 | file: 15 | path: /data/ansible/state/docker 16 | state: directory 17 | 18 | - name: "ensure /data/ansible/state/docker/changes exists" 19 | file: 20 | path: /data/ansible/state/docker/changes 21 | state: directory 22 | 23 | - name: Install pip docker package 24 | pip: 25 | name: docker 26 | 27 | - include_tasks: subtasks/2019_03_15.yml 28 | - include_tasks: subtasks/2020_07_17.yml 29 | - include_tasks: subtasks/2021_03_04.yml 30 | - include_tasks: subtasks/2022_06_11.yml 31 | - include_tasks: subtasks/2024_04_16.yml -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/templates/2024_04_16/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "default-address-pools": [ 3 | {"base":"172.18.0.0/16","size":24} 4 | ], 5 | "log-driver": "syslog", 6 | "log-opts": { 7 | "syslog-facility": "daemon", 8 | "tag": "{{ inventory_hostname }}|{{ '{{' }}.Name{{ '}}' }}" 9 | }, 10 | "metrics-addr" : "0.0.0.0:9323", 11 | "node-generic-resources": [ 12 | {% for docker_swarm_node_generic_resource in docker_swarm_node_generic_resources | default([]) %} 13 | {% if not loop.last %} 14 | "{{ docker_swarm_node_generic_resource }}", 15 | {% else %} 16 | "{{ docker_swarm_node_generic_resource }}" 17 | {% endif %} 18 | {% endfor %} 19 | ], 20 | {% if docker_swarm_diagnostic_port is defined | default(false) %} 21 | "network-diagnostic-port": {{ docker_swarm_diagnostic_port }}, 22 | {% endif %} 23 | "network-control-plane-mtu": {{ docker_swarm_control_plane_mtu | default(1350) }} 24 | } -------------------------------------------------------------------------------- /swarmsible/roles/ufw-docker-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Copy ufw-docker from neuroforgede repo 2 | copy: 3 | src: ufw-docker 4 | dest: /usr/local/bin/ufw-docker 5 | 6 | - name: ensure permissions on ufw-docker to 751 7 | file: 8 | path: /usr/local/bin/ufw-docker 9 | owner: root 10 | group: root 11 | mode: '0751' 12 | 13 | - name: run ufw-docker install 14 | args: 15 | executable: /bin/bash 16 | shell: | 17 | check_result () { 18 | ___RESULT=$? 19 | if [ $___RESULT -ne 0 ]; then 20 | echo $1 21 | exit 1 22 | fi 23 | } 24 | ufw-docker install 25 | check_result "failed to run 'ufw-docker install'" 26 | register: firewall 27 | until: firewall is succeeded 28 | retries: 10 29 | delay: 3 30 | 31 | - name: restart ufw 32 | service: 33 | name: ufw 34 | state: restarted 35 | register: firewall 36 | until: firewall is succeeded 37 | retries: 10 38 | delay: 3 -------------------------------------------------------------------------------- /swarmsible/roles/docker-setup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition). 3 | docker_edition: 'ce' 4 | docker_packages: 5 | - "docker-{{ docker_edition }}" 6 | - "docker-{{ docker_edition }}-cli" 7 | - "docker-{{ docker_edition }}-rootless-extras" 8 | - "containerd.io" 9 | - "apparmor-utils" 10 | docker_packages_state: present 11 | 12 | # Docker repo URL. 13 | docker_repo_url: https://download.docker.com/linux 14 | 15 | # Used only for Debian/Ubuntu. Switch 'stable' to 'nightly' if needed. 16 | docker_apt_release_channel: stable 17 | docker_apt_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}" 18 | docker_apt_repository: "deb [arch={{ docker_apt_arch }}] {{ docker_repo_url }}/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}" 19 | docker_apt_ignore_key_error: true 20 | docker_apt_gpg_key: "{{ docker_repo_url }}/{{ ansible_distribution | lower }}/gpg" 21 | -------------------------------------------------------------------------------- /swarmsible/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 5 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 6 | become: True 7 | gather_facts: true 8 | tasks: 9 | - name: "Pin {{ item }} version" 10 | dpkg_selections: 11 | name: "{{ item }}" 12 | selection: hold 13 | with_items: 14 | - containerd.io 15 | - docker-ce-cli 16 | - docker-ce 17 | - docker-ce-rootless-extras 18 | - docker-scan-plugin 19 | 20 | - hosts: all 21 | vars: 22 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 23 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 24 | become: True 25 | gather_facts: true 26 | roles: 27 | - docker-swarm-hetzner-init-variables 28 | - docker-pre-setup 29 | - docker-node-upgrade 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Proudly made by [NeuroForge](https://neuroforge.de/) in Bayreuth, Germany. 2 | 3 | # swarmsible 4 | 5 | Simple Ansible based Tooling for setting up and managing a production grade Docker Swarm. 6 | 7 | Currently tested and validated on Ubuntu 18.04/20.04. 8 | 9 | # Companion Repos 10 | 11 | - [swarmsible-hetzner](https://github.com/neuroforgede/swarmsible-hetzner) (Automatic provisioning of VMs at Hetzner for use with this repo) 12 | - [swarmsible-stacks](https://github.com/neuroforgede/swarmsible-stacks) (Production grade Docker Stacks) 13 | - [swarmsible-example](https://github.com/neuroforgede/swarmsible-example) (Example Environment) 14 | 15 | # Used software 16 | 17 | 1. NeuroForge [ufw-docker fork](https://github.com/neuroforgede/ufw-docker) to configure firewall to disable access to published ports on public IPs by default 18 | 2. Docker CE with Docker Swarm Mode 19 | 3. Ansible >= 2.8 20 | 4. docker-setup role adapted from geerlingguy.docker 21 | 5. docker-stack-deploy for secret rotation (https://github.com/neuroforgede/docker-stack-deploy) 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | swarmsible 2 | 3 | Copyright 2022 NeuroForge GmbH & Co. KG 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/tasks/subtasks/2024_04_16.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if docker_changes_2024_04_16 were already applied 3 | stat: 4 | path: "/data/ansible/state/docker/changes/2024_04_16" 5 | register: "docker_changes_2024_04_16" 6 | 7 | - name: set docker_changes_2024_04_16_val 8 | set_fact: 9 | docker_changes_2024_04_16_val: "{{ docker_changes_2024_04_16.stat.exists }}" 10 | 11 | - name: set up docker changes for 2024_04_16 12 | when: not docker_changes_2024_04_16_val|bool 13 | block: 14 | 15 | - name: "copy daemon.json to /etc/docker/daemon.json" 16 | template: 17 | src: "2024_04_16/daemon.json.j2" 18 | dest: "/etc/docker/daemon.json" 19 | mode: 0600 20 | owner: root 21 | group: root 22 | 23 | - name: "restart docker" 24 | service: 25 | name: docker 26 | state: restarted 27 | enabled: yes 28 | retries: 10 29 | delay: 10 30 | register: result 31 | until: result is succeeded 32 | 33 | - name: "touch /data/ansible/state/docker/changes/2024_04_16" 34 | file: 35 | path: "/data/ansible/state/docker/changes/2024_04_16" 36 | state: touch 37 | mode: "u=rw,g=r,o=r" -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/setup-single-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # inspired by https://medium.com/sallesslice-com/visudo-with-ansible-746f83547bb3 3 | - name: user setup 4 | become: yes 5 | remote_user: root 6 | block: 7 | - name: "create user {{ user.name }}" 8 | user: 9 | name: "{{ user.name }}" 10 | append: yes 11 | shell: /bin/bash 12 | system: "{{ user.system | default('False') | bool }}" 13 | groups: 14 | - "{{ user.group }}" 15 | create_home: yes 16 | 17 | - name: "add user {{ user.name }} to sudoers" 18 | user: 19 | name: "{{ user.name }}" 20 | append: yes 21 | groups: 22 | - "sudo" 23 | when: user.is_sudo == True 24 | 25 | - name: "add public key for user {{ user.name }}" 26 | when: user.ssh_key is defined 27 | authorized_key: 28 | user: "{{ user.group }}" 29 | state: present 30 | key: "{{ lookup('file', '{{ user.ssh_key }}.pub') }}" 31 | 32 | - name: "set up bashrc for user {{ user.name }}" 33 | copy: 34 | src: .bashrc 35 | dest: "/home/{{ user.name }}/.bashrc" 36 | owner: "{{ user.name }}" 37 | group: "{{ user.group }}" -------------------------------------------------------------------------------- /swarmsible/docker_swarm_volumes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_swarm 3 | become: true 4 | vars: 5 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 6 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 7 | tasks: 8 | - name: "ensure /mnt-alias exists" 9 | file: 10 | path: "/mnt-alias" 11 | state: "directory" 12 | owner: root 13 | group: root 14 | mode: '700' 15 | - name: "create symlink for cloud volume" 16 | file: 17 | src: "/mnt/{{ item.value }}/" 18 | dest: "/mnt-alias/{{ item.key }}" 19 | owner: root 20 | group: root 21 | state: link 22 | with_dict: "{{ docker_cloud_volume_aliases | default({}) }}" 23 | - name: "ensure {{ item.path }} exists" 24 | file: 25 | path: "{{ item.path | default(omit) }}" 26 | state: "{{ item.state | default('directory') }}" 27 | owner: "{{ item.owner | default('root') }}" 28 | group: "{{ item.group | default('root') }}" 29 | mode: "{{ item.mode | default('700') }}" 30 | with_items: "{{ docker_volumes | default([]) }}" 31 | 32 | 33 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/tasks/subtasks/2019_03_15.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if docker_changes_2019_03_15 were already applied 3 | stat: 4 | path: "/data/ansible/state/docker/changes/2019_03_15" 5 | register: "docker_changes_2019_03_15" 6 | 7 | - name: set docker_changes_2019_03_15_val 8 | set_fact: 9 | docker_changes_2019_03_15_val: "{{ docker_changes_2019_03_15.stat.exists }}" 10 | 11 | - name: set up docker changes for 2019_03_15 12 | when: not docker_changes_2019_03_15_val|bool 13 | block: 14 | 15 | - name: "copy daemon.json to /etc/docker/daemon.json" 16 | copy: 17 | src: "2019_03_15/daemon.json" 18 | dest: "/etc/docker/daemon.json" 19 | mode: 0600 20 | owner: root 21 | group: root 22 | 23 | - name: "change docker config file to /etc/docker/daemon.json" 24 | command: echo 'DOCKER_OPTS="--config-file=/etc/docker/daemon.json"' > /etc/default/docker 25 | 26 | - name: "restart docker" 27 | service: 28 | name: docker 29 | state: restarted 30 | enabled: yes 31 | 32 | - name: "touch /data/ansible/state/docker/changes/2019_03_15" 33 | file: 34 | path: "/data/ansible/state/docker/changes/2019_03_15" 35 | state: touch 36 | mode: "u=rw,g=r,o=r" -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/tasks/subtasks/2020_07_17.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if docker_changes_2020_07_17 were already applied 3 | stat: 4 | path: "/data/ansible/state/docker/changes/2020_07_17" 5 | register: "docker_changes_2020_07_17" 6 | 7 | - name: set docker_changes_2020_07_17_val 8 | set_fact: 9 | docker_changes_2020_07_17_val: "{{ docker_changes_2020_07_17.stat.exists }}" 10 | 11 | - name: set up docker changes for 2020_07_17 12 | when: not docker_changes_2020_07_17_val|bool 13 | block: 14 | 15 | - name: "copy daemon.json to /etc/docker/daemon.json" 16 | copy: 17 | src: "2020_07_17/daemon.json" 18 | dest: "/etc/docker/daemon.json" 19 | mode: 0600 20 | owner: root 21 | group: root 22 | 23 | - name: "change docker config file to /etc/docker/daemon.json" 24 | command: echo 'DOCKER_OPTS="--config-file=/etc/docker/daemon.json"' > /etc/default/docker 25 | 26 | - name: "restart docker" 27 | service: 28 | name: docker 29 | state: restarted 30 | enabled: yes 31 | 32 | - name: "touch /data/ansible/state/docker/changes/2020_07_17" 33 | file: 34 | path: "/data/ansible/state/docker/changes/2020_07_17" 35 | state: touch 36 | mode: "u=rw,g=r,o=r" -------------------------------------------------------------------------------- /swarmsible/roles/docker-node-upgrade/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # This role is definitely not written optimally and does stuff in a convoluted way 2 | # but encodes the proper way to do swarm upgrades rather nicely 3 | 4 | # Upgrade all managers first 5 | - name: Update Manager Nodes in Swarm 6 | vars: 7 | current_docker_node: "{{ __item }}" 8 | include_tasks: upgrade-node.yml 9 | loop_control: 10 | loop_var: "__item" 11 | when: __item != docker_swarm_main_manager 12 | with_items: "{{ groups['docker_swarm_manager'] | default([]) }}" 13 | 14 | # once every manager is upgraded, upgrade the ansible main node 15 | - name: Update main Manager Node 16 | vars: 17 | current_docker_node: "{{ __item }}" 18 | include_tasks: upgrade-node.yml 19 | loop_control: 20 | loop_var: "__item" 21 | when: __item == docker_swarm_main_manager 22 | with_items: "{{ groups['docker_swarm_manager'] | default([]) }}" 23 | 24 | # first upgrade all "non main nodes" - our ansible playbooks need a main node 25 | - name: Update Worker Nodes in Swarm 26 | vars: 27 | current_docker_node: "{{ __item }}" 28 | include_tasks: upgrade-node.yml 29 | loop_control: 30 | loop_var: "__item" 31 | when: __item != docker_swarm_main_manager 32 | with_items: "{{ groups['docker_swarm_worker'] | default([]) }}" 33 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/tasks/subtasks/2021_03_04.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if docker_changes_2021_03_04 were already applied 3 | stat: 4 | path: "/data/ansible/state/docker/changes/2021_03_04" 5 | register: "docker_changes_2021_03_04" 6 | 7 | - name: set docker_changes_2021_03_04_val 8 | set_fact: 9 | docker_changes_2021_03_04_val: "{{ docker_changes_2021_03_04.stat.exists }}" 10 | 11 | - name: set up docker changes for 2021_03_04 12 | when: not docker_changes_2021_03_04_val|bool 13 | block: 14 | 15 | - name: "copy daemon.json to /etc/docker/daemon.json" 16 | template: 17 | src: "2021_03_04/daemon.json" 18 | dest: "/etc/docker/daemon.json" 19 | mode: 0600 20 | owner: root 21 | group: root 22 | 23 | - name: "change docker config file to /etc/docker/daemon.json" 24 | command: echo 'DOCKER_OPTS="--config-file=/etc/docker/daemon.json"' > /etc/default/docker 25 | 26 | - name: "restart docker" 27 | service: 28 | name: docker 29 | state: restarted 30 | enabled: yes 31 | retries: 10 32 | delay: 10 33 | register: result 34 | until: result is succeeded 35 | 36 | - name: "touch /data/ansible/state/docker/changes/2021_03_04" 37 | file: 38 | path: "/data/ansible/state/docker/changes/2021_03_04" 39 | state: touch 40 | mode: "u=rw,g=r,o=r" -------------------------------------------------------------------------------- /swarmsible/roles/docker-pre-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - fail: 3 | msg: "Either docker_pinned_package or containerd_pinned_package are not properly pinned. Improper pinning no longer accepted" 4 | when: (docker_pinned_package is not defined) or (docker_pinned_package == '*') or (containerd_pinned_package is not defined) or (containerd_pinned_package == '*') 5 | 6 | - name: "pin docker version to {{ docker_pinned_package }}" 7 | template: 8 | src: "apt-preference-docker.j2" 9 | dest: "/etc/apt/preferences.d/docker" 10 | mode: 0644 11 | owner: root 12 | group: root 13 | 14 | - name: "pin docker cli version to {{ docker_pinned_package }}" 15 | template: 16 | src: "apt-preference-docker-cli.j2" 17 | dest: "/etc/apt/preferences.d/docker-cli" 18 | mode: 0644 19 | owner: root 20 | group: root 21 | 22 | - name: "pin docker ce rootless extras version to {{ docker_pinned_package }}" 23 | template: 24 | src: "apt-preference-docker-ce-rootless-extras.j2" 25 | dest: "/etc/apt/preferences.d/docker-ce-rootless-extras" 26 | mode: 0644 27 | owner: root 28 | group: root 29 | 30 | - name: "pin containerd.io version to {{ containerd_pinned_package }}" 31 | template: 32 | src: "apt-preference-containerd.j2" 33 | dest: "/etc/apt/preferences.d/containerd" 34 | mode: 0644 35 | owner: root 36 | group: root 37 | 38 | - name: "Pin {{ item }} version" 39 | dpkg_selections: 40 | name: "{{ item }}" 41 | selection: hold 42 | with_items: 43 | - containerd.io 44 | - docker-ce-cli 45 | - docker-ce 46 | - docker-ce-rootless-extras 47 | - docker-scan-plugin -------------------------------------------------------------------------------- /swarmsible/roles/docker-post-setup/tasks/subtasks/2022_06_11.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if docker_changes_2022_06_11 were already applied 3 | stat: 4 | path: "/data/ansible/state/docker/changes/2022_06_11" 5 | register: "docker_changes_2022_06_11" 6 | 7 | - name: set docker_changes_2022_06_11_val 8 | set_fact: 9 | docker_changes_2022_06_11_val: "{{ docker_changes_2022_06_11.stat.exists }}" 10 | 11 | - name: set up docker changes for 2022_06_11 12 | when: not docker_changes_2022_06_11_val|bool 13 | block: 14 | 15 | - name: "copy daemon.json to /etc/docker/daemon.json" 16 | template: 17 | src: "2022_06_11/daemon.json.j2" 18 | dest: "/etc/docker/daemon.json" 19 | mode: 0600 20 | owner: root 21 | group: root 22 | 23 | - name: "change docker config file to /etc/docker/daemon.json" 24 | command: echo 'DOCKER_OPTS="--config-file=/etc/docker/daemon.json"' > /etc/default/docker 25 | 26 | - name: "restart docker" 27 | service: 28 | name: docker 29 | state: restarted 30 | enabled: yes 31 | retries: 10 32 | delay: 10 33 | register: result 34 | until: result is succeeded 35 | 36 | - name: Allow connections to metrics from docker 37 | ufw: 38 | rule: allow 39 | proto: tcp 40 | from: 172.18.0.0/16 41 | to: 0.0.0.0/0 42 | port: "9323" 43 | register: firewall 44 | until: firewall is succeeded 45 | retries: 10 46 | delay: 3 47 | 48 | - name: "touch /data/ansible/state/docker/changes/2022_06_11" 49 | file: 50 | path: "/data/ansible/state/docker/changes/2022_06_11" 51 | state: touch 52 | mode: "u=rw,g=r,o=r" -------------------------------------------------------------------------------- /swarmsible/roles/docker-setup/tasks/setup-Debian.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure old versions of Docker are not installed. 2 | package: 3 | name: 4 | - docker 5 | - docker-engine 6 | state: absent 7 | 8 | - name: Ensure dependencies are installed. 9 | apt: 10 | name: 11 | - apt-transport-https 12 | - ca-certificates 13 | state: present 14 | 15 | - name: Ensure additional dependencies are installed (on Ubuntu < 20.04 and any other systems). 16 | apt: 17 | name: gnupg2 18 | state: present 19 | when: ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('20.04', '<') 20 | 21 | - name: Ensure additional dependencies are installed (on Ubuntu >= 20.04). 22 | apt: 23 | name: gnupg 24 | state: present 25 | when: ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('20.04', '>=') 26 | 27 | - name: Add Docker apt key. 28 | ansible.builtin.get_url: 29 | url: "{{ docker_apt_gpg_key }}" 30 | dest: /etc/apt/trusted.gpg.d/docker.asc 31 | mode: '0644' 32 | force: true 33 | register: add_repository_key 34 | ignore_errors: "{{ docker_apt_ignore_key_error }}" 35 | 36 | - name: Ensure curl is present (on older systems without SNI). 37 | package: name=curl state=present 38 | when: add_repository_key is failed 39 | 40 | - name: Add Docker apt key (alternative for older systems without SNI). 41 | shell: > 42 | curl -sSL {{ docker_apt_gpg_key }} | apt-key add - 43 | args: 44 | warn: false 45 | when: add_repository_key is failed 46 | 47 | - name: Add Docker repository. 48 | apt_repository: 49 | repo: "{{ docker_apt_repository }}" 50 | state: present 51 | update_cache: true -------------------------------------------------------------------------------- /swarmsible/roles/docker-node-upgrade/tasks/upgrade-node.yml: -------------------------------------------------------------------------------- 1 | - name: "drain node {{ current_docker_node }}" 2 | shell: docker node update --availability drain '{{ hostvars[current_docker_node]['inventory_hostname'] }}' 3 | retries: 10 4 | delay: 10 5 | when: inventory_hostname == docker_swarm_main_manager 6 | 7 | - name: "wait until node {{ current_docker_node }} has drained" 8 | shell: | 9 | if [ -n "$(docker ps -q)" ]; then 10 | exit 1 11 | else 12 | exit 0 13 | fi 14 | retries: 1000 15 | delay: 10 16 | register: drained 17 | until: drained is not failed 18 | when: inventory_hostname == current_docker_node 19 | 20 | - name: "upgrade node {{ current_docker_node }}" 21 | shell: DEBIAN_FRONTEND=noninteractive apt-get update && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y 22 | when: inventory_hostname == current_docker_node 23 | 24 | - name: "upgrade docker tools on {{ current_docker_node }}" 25 | shell: DEBIAN_FRONTEND=noninteractive apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --allow-change-held-packages -y docker-ce docker-ce-rootless-extras docker-ce-cli containerd.io 26 | when: inventory_hostname == current_docker_node 27 | 28 | - name: "Reboot node {{ current_docker_node }}" 29 | ansible.builtin.reboot: 30 | reboot_timeout: 3600 31 | search_paths: ['/lib/molly-guard', '/sbin'] 32 | when: inventory_hostname == current_docker_node 33 | 34 | - name: "Run canary command on node {{ current_docker_node }} to check if it is up before continuing" 35 | shell: "echo 'all fine'" 36 | when: inventory_hostname == current_docker_node 37 | 38 | - name: "set node {{ current_docker_node }} active" 39 | shell: docker node update --availability active '{{ hostvars[current_docker_node]['inventory_hostname'] }}' || exit 1 40 | retries: 10 41 | delay: 10 42 | when: inventory_hostname == docker_swarm_main_manager 43 | -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set up root 3 | include_tasks: subtasks/root-setup.yml 4 | 5 | - name: set group info for ansible group into a dict 6 | set_fact: 7 | ansible_group_info: 8 | name: "{{ global_ansible_group | default('ansible') }}" 9 | requires_root_password: False 10 | 11 | - name: construct list of all group infos 12 | set_fact: 13 | all_group_infos: "{{ (setup_additional_groups | default([])) + [ansible_group_info] }}" 14 | 15 | - name: set up groups 16 | group: 17 | name: "{{ group.name }}" 18 | state: present 19 | vars: 20 | group: "{{ item }}" 21 | with_items: "{{ all_group_infos }}" 22 | 23 | - name: allow relevant groups to become super user without password 24 | template: 25 | src: sudoers.j2 26 | dest: /etc/sudoers.d/sudoers 27 | mode: 0440 28 | validate: '/usr/sbin/visudo -cf %s' 29 | 30 | - name: set user information for the ansible user into a dict 31 | set_fact: 32 | ansible_user_info: 33 | name: "{{ global_ansible_user | default('ansible') }}" 34 | group: "{{ global_ansible_group | default('ansible') }}" 35 | ssh_key: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 36 | # ansible user has a separate group that allows sudo access 37 | is_sudo: False 38 | 39 | - name: construct list of all user infos 40 | set_fact: 41 | all_user_infos: "{{ (setup_additional_users | default([])) + [ansible_user_info] }}" 42 | 43 | - name: set up users 44 | include_tasks: subtasks/setup-single-user.yml 45 | vars: 46 | user: "{{ item }}" 47 | with_items: "{{ all_user_infos }}" 48 | 49 | # now that we definitely have a way back in (including root), we can disable root 50 | # SSH login for all ips (except the management nodes) 51 | - name: ssh config 52 | include_tasks: subtasks/sshd-config.yml -------------------------------------------------------------------------------- /swarmsible/docker_swarm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_swarm 3 | become: true 4 | vars: 5 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 6 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 7 | roles: 8 | # copy the ssl certs from the beginning so all nodes have the certs on the system 9 | # so all mounts work 10 | - docker-swarm-hetzner-init-variables 11 | - copy-ssl-certs 12 | - docker-sysctl-tune 13 | - docker-pre-setup 14 | - docker-setup 15 | - docker-post-setup 16 | - docker-login 17 | - ufw-docker-install 18 | - docker-swarm-firewall 19 | - docker-swarm-hosts 20 | 21 | - hosts: docker_swarm_manager 22 | become: true 23 | vars: 24 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 25 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 26 | roles: 27 | - docker-swarm-hetzner-init-variables 28 | - docker-swarm-init 29 | - docker-swarm-add-manager 30 | 31 | - hosts: docker_swarm_worker 32 | become: true 33 | vars: 34 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 35 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 36 | roles: 37 | - docker-swarm-hetzner-init-variables 38 | - docker-swarm-add-worker 39 | 40 | - hosts: docker_swarm 41 | become: true 42 | vars: 43 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 44 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 45 | roles: 46 | - docker-swarm-hetzner-init-variables 47 | - docker-swarm-labels 48 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-hetzner-init-variables/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "initialize inventory variables [default]" 2 | when: (docker_swarm_initialize_variables_from_hetzner | default('False') | bool) and (not docker_swarm_custom_private_network_enabled | default('False') | bool) 3 | block: 4 | - set_fact: 5 | group_index: "{{ inventory_hostname | regex_search('[a-zA-Z]-0*([0-9]*)$', '\\1') | first }}" 6 | 7 | - set_fact: 8 | docker_swarm_advertise_addr: "192.168.1.{{ (group_index | int) + 1 }}" 9 | when: "'docker_swarm_manager' in group_names" 10 | 11 | - set_fact: 12 | docker_swarm_advertise_addr: "192.168.2.{{ (group_index | int) + 1 }}" 13 | when: "'docker_swarm_worker' in group_names" 14 | 15 | - set_fact: 16 | host_ip: "{{ ansible_host }}" 17 | 18 | - set_fact: 19 | docker_swarm_labels: 20 | host: "{{ inventory_hostname }}" 21 | hetzner_location: "{{ location }}" 22 | 23 | - name: "initialize inventory variables [custom private network]" 24 | when: (docker_swarm_initialize_variables_from_hetzner | default('False') | bool) and (docker_swarm_custom_private_network_enabled | default('False') | bool) 25 | block: 26 | - set_fact: 27 | group_index: "{{ inventory_hostname | regex_search('[a-zA-Z]-0*([0-9]*)$', '\\1') | first }}" 28 | 29 | - set_fact: 30 | docker_swarm_advertise_addr: "{{ docker_swarm_custom_private_network | ipsubnet(26, 1) | ipaddr((group_index | int) + 1) | ipaddr('address') }}" 31 | when: "'docker_swarm_manager' in group_names" 32 | 33 | - set_fact: 34 | docker_swarm_advertise_addr: "{{ docker_swarm_custom_private_network | ipsubnet(26, 2) | ipaddr((group_index | int) + 1) | ipaddr('address') }}" 35 | when: "'docker_swarm_worker' in group_names" 36 | 37 | - set_fact: 38 | host_ip: "{{ ansible_host }}" 39 | 40 | - set_fact: 41 | docker_swarm_labels: 42 | host: "{{ inventory_hostname }}" 43 | hetzner_location: "{{ location }}" 44 | -------------------------------------------------------------------------------- /swarmsible/roles/copy-ssl-certs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "ensure /data/ansible exists" 3 | file: 4 | path: /data/ansible 5 | state: directory 6 | 7 | - name: "ensure /data/ansible/certs exists" 8 | file: 9 | path: /data/ansible/certs 10 | state: directory 11 | 12 | - name: "copy ssl certs to {{ item.directory }}" 13 | copy: 14 | src: "{{ssl_certs_base_dir}}/{{ item.directory }}" 15 | dest: "/data/ansible/certs" 16 | mode: 0700 17 | owner: "{{ item.owner }}" 18 | group: "{{ item.group }}" 19 | when: not item.use_ids | default('False') | bool 20 | with_items: "{{ ssl_cert_dirs }}" 21 | 22 | - name: "copy ssl certs to {{ item.directory }}" 23 | copy: 24 | src: "{{ssl_certs_base_dir}}/{{ item.directory }}" 25 | dest: "/data/ansible/certs" 26 | mode: 0700 27 | owner: "{{ item.uid }}" 28 | group: "{{ item.gid }}" 29 | when: item.use_ids | default('False') | bool 30 | with_items: "{{ ssl_cert_dirs }}" 31 | 32 | - name: "Ensure /data/ansible/certs/{{ item.directory }} is 0700" 33 | command: 34 | cmd: chmod 700 /data/ansible/certs/{{ item.directory }} 35 | warn: False 36 | with_items: "{{ ssl_cert_dirs }}" 37 | 38 | - name: "Ensure files in /data/ansible/certs/{{ item.directory }} 0400" 39 | command: 40 | cmd: find /data/ansible/certs/{{ item.directory }} -type f -exec chmod 0400 {} \; 41 | warn: False 42 | with_items: "{{ ssl_cert_dirs }}" 43 | 44 | - name: "Ensure /data/ansible/certs/{{ item.directory }} belongs to the right user" 45 | command: 46 | cmd: "chown -R {{ item.uid }}:{{ item.gid }} /data/ansible/certs/{{ item.directory }}" 47 | warn: False 48 | when: item.use_ids | default('False') | bool 49 | with_items: "{{ ssl_cert_dirs }}" 50 | 51 | - name: "Ensure /data/ansible/certs/{{ item.directory }} belongs to the right user" 52 | command: 53 | cmd: "chown -R {{ item.owner }}:{{ item.group }} /data/ansible/certs/{{ item.directory }}" 54 | warn: False 55 | when: not item.use_ids | default('False') | bool 56 | with_items: "{{ ssl_cert_dirs }}" -------------------------------------------------------------------------------- /swarmsible/roles/developer-accounts/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "create group developer" 3 | group: 4 | name: "developer" 5 | state: present 6 | 7 | - name: "remove deleted users" 8 | user: 9 | name: "{{ item.name }}" 10 | state: absent 11 | remove: yes 12 | with_items: "{{ ssh_deleted_developer_accounts | default([]) }}" 13 | 14 | - name: "make sure all necessary groups exist" 15 | group: 16 | name: "{{ item }}" 17 | state: present 18 | with_items: "{{ ssh_developer_accounts | default([]) | selectattr('groups', 'defined') | map(attribute='groups') | flatten | unique }}" 19 | 20 | - name: "make sure user exists" 21 | user: 22 | name: "{{ item.name }}" 23 | append: yes 24 | groups: "{{ (item.groups | default([])) + ['developer'] }}" 25 | create_home: yes 26 | shell: /bin/bash 27 | with_items: "{{ ssh_developer_accounts | default([]) }}" 28 | 29 | - name: Add key for users with a single key 30 | include_tasks: subtasks/add-single-key.yml 31 | vars: 32 | user: "{{ item.name }}" 33 | with_items: 34 | - "{{ ssh_developer_accounts | default([]) }}" 35 | 36 | - name: Add keys for users with additional keys 37 | include_tasks: subtasks/add-additional-keys.yml 38 | vars: 39 | user: "{{ item.name }}" 40 | additional_keys: "{{ item.additional_keys }}" 41 | with_items: 42 | - "{{ ssh_developer_accounts | default([]) }}" 43 | 44 | - name: Check if user has directory for multiple keys [directory] 45 | become: false 46 | local_action: 47 | module: stat 48 | path: "{{ project_base_dir | default(playbook_dir) }}/files/all/ssh_files/developer_ssh_keys/{{ item.name }}/" 49 | with_items: 50 | - "{{ ssh_developer_accounts | default([]) }}" 51 | register: multiple_keys 52 | 53 | - name: Build a list of users with multiple keys [directory] 54 | set_fact: 55 | users_with_directory: "{{ users_with_directory | default([]) }} + [ '{{ item.item.name }}' ]" 56 | with_items: "{{ multiple_keys.results }}" 57 | when: item.stat.exists 58 | no_log: true 59 | 60 | - name: Add keys for users with multiple keys [directory] 61 | include_tasks: subtasks/add-multiple-keys.yml 62 | vars: 63 | user: "{{ item }}" 64 | with_items: 65 | - "{{ users_with_directory | default([]) }}" -------------------------------------------------------------------------------- /swarmsible/roles/full-apt-upgrade/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "ensure /data/ansible exists" 4 | file: 5 | path: /data/ansible 6 | state: directory 7 | 8 | - name: "ensure /data/ansible/state exists" 9 | file: 10 | path: /data/ansible/state 11 | state: directory 12 | 13 | - name: check if full apt upgrade was already run once 14 | stat: 15 | path: "/data/ansible/state/initial_apt_upgrade" 16 | register: "initial_apt_upgrade_already_run" 17 | 18 | - name: "check if a previous docker setup already run" 19 | stat: 20 | path: "/data/ansible/state/docker/changes/2022_06_11" 21 | register: "docker_already_setup" 22 | 23 | - name: set initial_apt_upgrade_already_run_val 24 | set_fact: 25 | initial_apt_upgrade_already_run_val: "{{ initial_apt_upgrade_already_run.stat.exists or docker_already_setup.stat.exists}}" 26 | 27 | - name: run initial upgrade 28 | when: not (initial_apt_upgrade_already_run_val|bool) 29 | block: 30 | #(occured during testing, manual apt update fixed this) 31 | - name: "manual apt update to fix problems with permissions" 32 | raw: apt update -y 33 | 34 | - name: apt update 35 | apt: 36 | update_cache: "{{ apt_update_cache | default('True') }}" 37 | 38 | - name: apt dist-upgrade 39 | apt: 40 | upgrade: dist 41 | register: dist_upgraded 42 | 43 | - name: Remove useless packages from the apt cache 44 | apt: 45 | autoclean: yes 46 | 47 | - name: Remove dependencies that are no longer required 48 | apt: 49 | autoremove: yes 50 | 51 | - name: Switch to the legacy variant of iptables because of a bug in the nft version 52 | alternatives: 53 | name: iptables 54 | path: /usr/sbin/iptables-legacy 55 | link: /etc/alternatives/iptables 56 | register: iptables_legacy 57 | when: ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('20.04', '>=') 58 | 59 | - name: Restart after dist upgrade. 60 | reboot: 61 | reboot_timeout: 300 62 | post_reboot_delay: 10 63 | search_paths: ['/lib/molly-guard', '/sbin'] 64 | 65 | - name: "touch /data/ansible/state/initial_apt_upgrade" 66 | file: 67 | path: "/data/ansible/state/initial_apt_upgrade" 68 | state: touch 69 | mode: "u=rw,g=r,o=r" 70 | -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-init/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "run docker swarm init" 3 | when: inventory_hostname == docker_swarm_main_manager 4 | block: 5 | - name: Check if Swarm has already been Initialized 6 | shell: docker node ls 7 | register: swarm_status 8 | ignore_errors: true 9 | 10 | - set_fact: 11 | __docker_swarm_ingress_network_opt: "--opt com.docker.network.driver.mtu={{ docker_swarm_ingress_network_mtu | default('1350') }}" 12 | when: not (docker_swarm_ingress_network_encrypt | default('True') | bool) 13 | 14 | - set_fact: 15 | __docker_swarm_ingress_network_opt: "--opt com.docker.network.driver.mtu={{ docker_swarm_ingress_network_mtu | default('1350') }} --opt encrypted" 16 | when: docker_swarm_ingress_network_encrypt | default('True') | bool 17 | 18 | - name: Initialize Docker Swarm 19 | shell: | 20 | docker swarm init \ 21 | --advertise-addr={{ hostvars[inventory_hostname]['docker_swarm_advertise_addr'] | default(hostvars[inventory_hostname]['host_ip']) }} \ 22 | --default-addr-pool "{{ docker_swarm_default_ip_addr_pool | default('10.0.0.0/8') }}" \ 23 | --default-addr-pool-mask-length "{{ docker_swarm_default_ip_addr_pool_mask_length | default('24') }}" 24 | when: swarm_status.rc != 0 25 | 26 | - name: remove default ingress network 27 | shell: > 28 | yes | docker network rm ingress 29 | when: swarm_status.rc != 0 30 | 31 | - name: create encrypted ingress network 32 | args: 33 | executable: /bin/bash 34 | shell: | 35 | function retry { 36 | local n=1 37 | local max=10 38 | local delay=5 39 | while true; do 40 | "$@" && break || { 41 | if [[ $n -lt $max ]]; then 42 | ((n++)) 43 | echo "Command failed. Attempt $n/$max..." 44 | sleep $delay; 45 | else 46 | echo "The command has failed after $n attempts." 47 | return 1 48 | fi 49 | } 50 | done 51 | } 52 | 53 | check_result () { 54 | ___RESULT=$? 55 | if [ $___RESULT -ne 0 ]; then 56 | echo $1 57 | exit 1 58 | fi 59 | } 60 | 61 | retry docker network create --driver overlay --ingress {{ __docker_swarm_ingress_network_opt }} {{ docker_swarm_ingress_network_name }} --subnet={{ docker_swarm_ingress_subnet }} 62 | check_result "failed to create docker ingress network {{ docker_swarm_ingress_network_name }}" 63 | when: swarm_status.rc != 0 64 | 65 | - name: Get the Manager join-token 66 | shell: docker swarm join-token --quiet manager 67 | register: manager_token 68 | 69 | - name: Get the worker join-token 70 | shell: docker swarm join-token --quiet worker 71 | register: worker_token -------------------------------------------------------------------------------- /swarmsible/ansible_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | ansible_ssh_private_key_file: "{{ ansible_initial_ssh_private_key_file | default(playbook_dir + '/ssh_keys/root_rsa') }}" 5 | ansible_user: "{{ ansible_initial_user | default('root') }}" 6 | gather_facts: False 7 | become: True 8 | tasks: 9 | - name: "ansible required: install python" 10 | raw: python3 -c "import simplejson" || (DEBIAN_FRONTEND=noninteractive apt-get update -y && DEBIAN_FRONTEND=noninteractive apt-get install python3-minimal -y && DEBIAN_FRONTEND=noninteractive apt-get install python3-simplejson -y) 11 | 12 | - hosts: all 13 | vars: 14 | ansible_ssh_private_key_file: "{{ ansible_initial_ssh_private_key_file | default(playbook_dir + '/ssh_keys/root_rsa') }}" 15 | ansible_user: "{{ ansible_initial_user | default('root') }}" 16 | become: True 17 | tasks: 18 | - name: Ensure the en_US locale exists 19 | locale_gen: 20 | name: en_US.UTF-8 21 | state: present 22 | - name: set en_US as default locale 23 | command: update-locale set-locale LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 24 | 25 | - hosts: all 26 | vars: 27 | ansible_ssh_private_key_file: "{{ ansible_initial_ssh_private_key_file | default(playbook_dir + '/ssh_keys/root_rsa') }}" 28 | ansible_user: "{{ ansible_initial_user | default('root') }}" 29 | 30 | apt_update_cache: True 31 | apt_restart_after_dist_upgrade: True 32 | become: True 33 | roles: 34 | - role: full-apt-upgrade 35 | - role: essential-software-setup 36 | - role: user-setup 37 | 38 | - hosts: all 39 | vars: 40 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 41 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 42 | 43 | apt_update_cache: True 44 | apt_restart_after_dist_upgrade: True 45 | become: True 46 | roles: 47 | - role: notnagel-user 48 | 49 | 50 | - hosts: ansiblemanager 51 | vars: 52 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 53 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 54 | 55 | apt_update_cache: True 56 | apt_restart_after_dist_upgrade: True 57 | become: True 58 | roles: 59 | - docker-sysctl-tune 60 | - docker-pre-setup 61 | - docker-setup 62 | 63 | - hosts: all 64 | vars: 65 | ansible_ssh_private_key_file: "{{ global_ansible_ssh_private_key_file | default((project_base_dir | default(playbook_dir)) + '/ssh_keys/ansible_rsa') }}" 66 | ansible_user: "{{ global_ansible_user | default('ansible') }}" 67 | 68 | apt_update_cache: True 69 | apt_restart_after_dist_upgrade: True 70 | become: True 71 | roles: 72 | - role: developer-accounts 73 | 74 | -------------------------------------------------------------------------------- /swarmsible/roles/essential-software-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # careful: this is executed on the management node as well, so this should 3 | # really only include _essential_ software 4 | 5 | - name: Install essential packages (ubuntu == 18) 6 | apt: 7 | name: "{{ packages }}" 8 | update_cache: "{{ apt_update_cache | default('True') }}" 9 | vars: 10 | packages: 11 | - python-setuptools 12 | - python-pip 13 | - python-passlib 14 | when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version == '18' 15 | 16 | - name: Install essential packages 17 | apt: 18 | name: "{{ packages }}" 19 | update_cache: "{{ apt_update_cache | default('True') }}" 20 | vars: 21 | packages: 22 | - vim 23 | - ufw 24 | - sudo 25 | - python3-setuptools 26 | - python3-pip 27 | - python3-passlib 28 | - git 29 | - acl 30 | 31 | - name: configure and enable ufw 32 | remote_user: root 33 | become: true 34 | block: 35 | - name: Allow ssh connections via ipv4 36 | ufw: 37 | rule: allow 38 | proto: tcp 39 | to: 0.0.0.0/0 40 | port: "22" 41 | when: not (ssh_only_management_nodes | default('False') | bool) 42 | register: firewall 43 | until: firewall is succeeded 44 | retries: 10 45 | delay: 3 46 | 47 | - name: Allow ssh ipv6 connections via loopback 48 | ufw: 49 | rule: allow 50 | proto: tcp 51 | from: ::1 52 | to: ::1 53 | port: "22" 54 | register: firewall 55 | until: firewall is succeeded 56 | retries: 10 57 | delay: 3 58 | 59 | - name: Allow ssh ipv6 connections to ipv6 floating ip if configured 60 | ufw: 61 | rule: allow 62 | proto: tcp 63 | to: "{{ floating_ipv6 }}" 64 | port: "22" 65 | when: floating_ipv6 is defined 66 | register: firewall 67 | until: firewall is succeeded 68 | retries: 10 69 | delay: 3 70 | 71 | - name: Allow connections to SSH from management node ips 72 | ufw: 73 | rule: allow 74 | from_ip: "{{ item }}" 75 | to_ip: 0.0.0.0/0 76 | to_port: "22" 77 | proto: tcp 78 | comment: "allow management node" 79 | with_items: "{{ management_node_ips | default([]) }}" 80 | when: ssh_only_management_nodes | default('False') | bool 81 | register: firewall 82 | until: firewall is succeeded 83 | retries: 10 84 | delay: 3 85 | 86 | - name: "delete port 22 allow all rules for ipv4 if they exist" 87 | ufw: 88 | delete: yes 89 | rule: allow 90 | proto: tcp 91 | to: 0.0.0.0/0 92 | port: 22 93 | when: ssh_only_management_nodes | default('False') | bool 94 | register: firewall 95 | until: firewall is succeeded 96 | retries: 10 97 | delay: 3 98 | 99 | - name: Enable ufw 100 | ufw: 101 | state: enabled 102 | 103 | - name: molly guard 104 | include_tasks: subtasks/molly-guard.yml 105 | 106 | - set_fact: 107 | setup_disable_ipv6_val: "{{ setup_disable_ipv6 | default('False') }}" 108 | 109 | - name: Disable ipv6 110 | raw: echo 'Acquire::ForceIPv4 "true";' | sudo tee /etc/apt/apt.conf.d/99force-ipv4 111 | when: setup_disable_ipv6_val|bool -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/templates/Debian.sshd_config.j2: -------------------------------------------------------------------------------- 1 | # Package generated configuration file 2 | # See the sshd_config(5) manpage for details 3 | 4 | GatewayPorts {{ sshd_gateway_ports | default('no') }} 5 | 6 | # What ports, IPs and protocols we listen for 7 | Port 22 8 | # Use these options to restrict which interfaces/protocols sshd will bind to 9 | #ListenAddress :: 10 | #ListenAddress 0.0.0.0 11 | Protocol 2 12 | # HostKeys for protocol version 2 13 | HostKey /etc/ssh/ssh_host_rsa_key 14 | HostKey /etc/ssh/ssh_host_dsa_key 15 | HostKey /etc/ssh/ssh_host_ecdsa_key 16 | HostKey /etc/ssh/ssh_host_ed25519_key 17 | #Privilege Separation is turned on for security 18 | UsePrivilegeSeparation yes 19 | 20 | # Lifetime and size of ephemeral version 1 server key 21 | KeyRegenerationInterval 3600 22 | ServerKeyBits 1024 23 | 24 | # Logging 25 | SyslogFacility AUTH 26 | LogLevel INFO 27 | 28 | # Authentication: 29 | LoginGraceTime 120 30 | 31 | StrictModes yes 32 | 33 | RSAAuthentication yes 34 | PubkeyAuthentication yes 35 | #AuthorizedKeysFile %h/.ssh/authorized_keys 36 | 37 | # Don't read the user's ~/.rhosts and ~/.shosts files 38 | IgnoreRhosts yes 39 | # For this to work you will also need host keys in /etc/ssh_known_hosts 40 | RhostsRSAAuthentication no 41 | # similar for protocol version 2 42 | HostbasedAuthentication no 43 | # Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication 44 | #IgnoreUserKnownHosts yes 45 | 46 | # To enable empty passwords, change to yes (NOT RECOMMENDED) 47 | PermitEmptyPasswords no 48 | 49 | # Change to yes to enable challenge-response passwords (beware issues with 50 | # some PAM modules and threads) 51 | ChallengeResponseAuthentication no 52 | 53 | # Change to no to disable tunnelled clear text passwords 54 | # changed: disabled password auth 55 | PasswordAuthentication no 56 | 57 | # Kerberos options 58 | #KerberosAuthentication no 59 | #KerberosGetAFSToken no 60 | #KerberosOrLocalPasswd yes 61 | #KerberosTicketCleanup yes 62 | 63 | # GSSAPI options 64 | #GSSAPIAuthentication no 65 | #GSSAPICleanupCredentials yes 66 | 67 | X11Forwarding yes 68 | X11DisplayOffset 10 69 | PrintMotd no 70 | PrintLastLog yes 71 | TCPKeepAlive yes 72 | #UseLogin no 73 | 74 | #MaxStartups 10:30:60 75 | #Banner /etc/issue.net 76 | 77 | # Allow client to pass locale environment variables 78 | AcceptEnv LANG LC_* 79 | 80 | Subsystem sftp /usr/lib/openssh/sftp-server 81 | 82 | # Set this to 'yes' to enable PAM authentication, account processing, 83 | # and session processing. If this is enabled, PAM authentication will 84 | # be allowed through the ChallengeResponseAuthentication and 85 | # PasswordAuthentication. Depending on your PAM configuration, 86 | # PAM authentication via ChallengeResponseAuthentication may bypass 87 | # the setting of "PermitRootLogin yes 88 | # If you just want the PAM account and session checks to run without 89 | # PAM authentication, then enable this but set PasswordAuthentication 90 | # and ChallengeResponseAuthentication to 'no'. 91 | UsePAM yes 92 | 93 | {% if (management_node_ips | default([]) | select('match', '^.+') | list | length) > 0 %} 94 | # changed: disable root login for all normal IPs 95 | PermitRootLogin no 96 | # changed: allow root for management nodes 97 | {% for management_node_ip in management_node_ips %} 98 | Match Address {{ management_node_ip }} 99 | PermitRootLogin yes 100 | {% endfor %} 101 | 102 | # changed: only allow login for ansible from management nodes 103 | # DenyUsers {{ global_ansible_user | default('ansible') }}@{% for management_node_ip in management_node_ips %}!{{management_node_ip}},{% endfor %}* 104 | {% else %} 105 | # changed: allow root login for all normal IPs because no management node ip was configured 106 | PermitRootLogin yes 107 | {% endif %} -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/tasks/subtasks/templates/Ubuntu.sshd_config.j2: -------------------------------------------------------------------------------- 1 | # Package generated configuration file 2 | # See the sshd_config(5) manpage for details 3 | 4 | GatewayPorts {{ sshd_gateway_ports | default('no') }} 5 | 6 | # What ports, IPs and protocols we listen for 7 | Port 22 8 | # Use these options to restrict which interfaces/protocols sshd will bind to 9 | #ListenAddress :: 10 | #ListenAddress 0.0.0.0 11 | Protocol 2 12 | # HostKeys for protocol version 2 13 | HostKey /etc/ssh/ssh_host_rsa_key 14 | HostKey /etc/ssh/ssh_host_dsa_key 15 | HostKey /etc/ssh/ssh_host_ecdsa_key 16 | HostKey /etc/ssh/ssh_host_ed25519_key 17 | #Privilege Separation is turned on for security 18 | UsePrivilegeSeparation yes 19 | 20 | # Lifetime and size of ephemeral version 1 server key 21 | KeyRegenerationInterval 3600 22 | ServerKeyBits 1024 23 | 24 | # Logging 25 | SyslogFacility AUTH 26 | LogLevel INFO 27 | 28 | # Authentication: 29 | LoginGraceTime 120 30 | 31 | StrictModes yes 32 | 33 | RSAAuthentication yes 34 | PubkeyAuthentication yes 35 | #AuthorizedKeysFile %h/.ssh/authorized_keys 36 | 37 | # Don't read the user's ~/.rhosts and ~/.shosts files 38 | IgnoreRhosts yes 39 | # For this to work you will also need host keys in /etc/ssh_known_hosts 40 | RhostsRSAAuthentication no 41 | # similar for protocol version 2 42 | HostbasedAuthentication no 43 | # Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication 44 | #IgnoreUserKnownHosts yes 45 | 46 | # To enable empty passwords, change to yes (NOT RECOMMENDED) 47 | PermitEmptyPasswords no 48 | 49 | # Change to yes to enable challenge-response passwords (beware issues with 50 | # some PAM modules and threads) 51 | ChallengeResponseAuthentication no 52 | 53 | # Change to no to disable tunnelled clear text passwords 54 | # changed: disabled password auth 55 | PasswordAuthentication no 56 | 57 | # Kerberos options 58 | #KerberosAuthentication no 59 | #KerberosGetAFSToken no 60 | #KerberosOrLocalPasswd yes 61 | #KerberosTicketCleanup yes 62 | 63 | # GSSAPI options 64 | #GSSAPIAuthentication no 65 | #GSSAPICleanupCredentials yes 66 | 67 | X11Forwarding yes 68 | X11DisplayOffset 10 69 | PrintMotd no 70 | PrintLastLog yes 71 | TCPKeepAlive yes 72 | #UseLogin no 73 | 74 | #MaxStartups 10:30:60 75 | #Banner /etc/issue.net 76 | 77 | # Allow client to pass locale environment variables 78 | AcceptEnv LANG LC_* 79 | 80 | Subsystem sftp /usr/lib/openssh/sftp-server 81 | 82 | # Set this to 'yes' to enable PAM authentication, account processing, 83 | # and session processing. If this is enabled, PAM authentication will 84 | # be allowed through the ChallengeResponseAuthentication and 85 | # PasswordAuthentication. Depending on your PAM configuration, 86 | # PAM authentication via ChallengeResponseAuthentication may bypass 87 | # the setting of "PermitRootLogin yes 88 | # If you just want the PAM account and session checks to run without 89 | # PAM authentication, then enable this but set PasswordAuthentication 90 | # and ChallengeResponseAuthentication to 'no'. 91 | UsePAM yes 92 | 93 | {% if (management_node_ips | default([]) | select('match', '^.+') | list | length) > 0 %} 94 | # changed: disable root login for all normal IPs 95 | PermitRootLogin no 96 | # changed: allow root for management nodes 97 | {% for management_node_ip in management_node_ips %} 98 | Match Address {{ management_node_ip }} 99 | PermitRootLogin yes 100 | {% endfor %} 101 | 102 | # changed: only allow login for ansible from management nodes 103 | # DenyUsers {{ global_ansible_user | default('ansible') }}@{% for management_node_ip in management_node_ips %}!{{management_node_ip}},{% endfor %}* 104 | {% else %} 105 | # changed: allow root login for all normal IPs because no management node ip was configured 106 | PermitRootLogin yes 107 | {% endif %} -------------------------------------------------------------------------------- /swarmsible/roles/user-setup/files/.bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) 3 | # for examples 4 | 5 | # If not running interactively, don't do anything 6 | case $- in 7 | *i*) ;; 8 | *) return;; 9 | esac 10 | 11 | #HISTCONTROL=ignoreboth <--DISABLE!!! 12 | HISTFILESIZE=999999999 13 | HISTSIZE=999999999 14 | HISTTIMEFORMAT="[%F %T] " 15 | HISTFILE=~/.bash_eternal_history 16 | 17 | # append to the history file, don't overwrite it 18 | shopt -s histappend 19 | 20 | # check the window size after each command and, if necessary, 21 | # update the values of LINES and COLUMNS. 22 | shopt -s checkwinsize 23 | 24 | # If set, the pattern "**" used in a pathname expansion context will 25 | # match all files and zero or more directories and subdirectories. 26 | #shopt -s globstar 27 | 28 | # make less more friendly for non-text input files, see lesspipe(1) 29 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 30 | 31 | # set variable identifying the chroot you work in (used in the prompt below) 32 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then 33 | debian_chroot=$(cat /etc/debian_chroot) 34 | fi 35 | 36 | # set a fancy prompt (non-color, unless we know we "want" color) 37 | case "$TERM" in 38 | xterm-color|*-256color) color_prompt=yes;; 39 | esac 40 | 41 | # uncomment for a colored prompt, if the terminal has the capability; turned 42 | # off by default to not distract the user: the focus in a terminal window 43 | # should be on the output of commands, not on the prompt 44 | force_color_prompt=yes 45 | 46 | if [ -n "$force_color_prompt" ]; then 47 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 48 | # We have color support; assume it's compliant with Ecma-48 49 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 50 | # a case would tend to support setf rather than setaf.) 51 | color_prompt=yes 52 | else 53 | color_prompt= 54 | fi 55 | fi 56 | 57 | if [ "$color_prompt" = yes ]; then 58 | #PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' 59 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\u\[\033[01;33m\]@\[\033[01;36m\]\h \[\033[01;33m\]\w \[\033[01;35m\]\$ \[\033[00m\]' 60 | else 61 | PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' 62 | fi 63 | unset color_prompt force_color_prompt 64 | 65 | # If this is an xterm set the title to user@host:dir 66 | case "$TERM" in 67 | xterm*|rxvt*) 68 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" 69 | ;; 70 | *) 71 | ;; 72 | esac 73 | 74 | # enable color support of ls and also add handy aliases 75 | if [ -x /usr/bin/dircolors ]; then 76 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" 77 | alias ls='ls --color=auto' 78 | #alias dir='dir --color=auto' 79 | #alias vdir='vdir --color=auto' 80 | 81 | alias grep='grep --color=auto' 82 | alias fgrep='fgrep --color=auto' 83 | alias egrep='egrep --color=auto' 84 | fi 85 | 86 | # colored GCC warnings and errors 87 | #export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' 88 | 89 | # some more ls aliases 90 | alias ll='ls -alF' 91 | alias la='ls -A' 92 | alias l='ls -CF' 93 | 94 | # Add an "alert" alias for long running commands. Use like so: 95 | # sleep 10; alert 96 | alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' 97 | 98 | # Alias definitions. 99 | # You may want to put all your additions into a separate file like 100 | # ~/.bash_aliases, instead of adding them here directly. 101 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 102 | 103 | if [ -f ~/.bash_aliases ]; then 104 | . ~/.bash_aliases 105 | fi 106 | 107 | # enable programmable completion features (you don't need to enable 108 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 109 | # sources /etc/bash.bashrc). 110 | if ! shopt -oq posix; then 111 | if [ -f /usr/share/bash-completion/bash_completion ]; then 112 | . /usr/share/bash-completion/bash_completion 113 | elif [ -f /etc/bash_completion ]; then 114 | . /etc/bash_completion 115 | fi 116 | fi 117 | 118 | alias rm='rm -i' -------------------------------------------------------------------------------- /swarmsible/roles/developer-accounts/files/.bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) 3 | # for examples 4 | 5 | # If not running interactively, don't do anything 6 | case $- in 7 | *i*) ;; 8 | *) return;; 9 | esac 10 | 11 | #HISTCONTROL=ignoreboth <--DISABLE!!! 12 | HISTFILESIZE=999999999 13 | HISTSIZE=999999999 14 | HISTTIMEFORMAT="[%F %T] " 15 | HISTFILE=~/.bash_eternal_history 16 | 17 | # append to the history file, don't overwrite it 18 | shopt -s histappend 19 | 20 | # check the window size after each command and, if necessary, 21 | # update the values of LINES and COLUMNS. 22 | shopt -s checkwinsize 23 | 24 | # If set, the pattern "**" used in a pathname expansion context will 25 | # match all files and zero or more directories and subdirectories. 26 | #shopt -s globstar 27 | 28 | # make less more friendly for non-text input files, see lesspipe(1) 29 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 30 | 31 | # set variable identifying the chroot you work in (used in the prompt below) 32 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then 33 | debian_chroot=$(cat /etc/debian_chroot) 34 | fi 35 | 36 | # set a fancy prompt (non-color, unless we know we "want" color) 37 | case "$TERM" in 38 | xterm-color|*-256color) color_prompt=yes;; 39 | esac 40 | 41 | # uncomment for a colored prompt, if the terminal has the capability; turned 42 | # off by default to not distract the user: the focus in a terminal window 43 | # should be on the output of commands, not on the prompt 44 | force_color_prompt=yes 45 | 46 | if [ -n "$force_color_prompt" ]; then 47 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 48 | # We have color support; assume it's compliant with Ecma-48 49 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 50 | # a case would tend to support setf rather than setaf.) 51 | color_prompt=yes 52 | else 53 | color_prompt= 54 | fi 55 | fi 56 | 57 | if [ "$color_prompt" = yes ]; then 58 | #PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' 59 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\u\[\033[01;33m\]@\[\033[01;36m\]\h \[\033[01;33m\]\w \[\033[01;35m\]\$ \[\033[00m\]' 60 | else 61 | PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' 62 | fi 63 | unset color_prompt force_color_prompt 64 | 65 | # If this is an xterm set the title to user@host:dir 66 | case "$TERM" in 67 | xterm*|rxvt*) 68 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" 69 | ;; 70 | *) 71 | ;; 72 | esac 73 | 74 | # enable color support of ls and also add handy aliases 75 | if [ -x /usr/bin/dircolors ]; then 76 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" 77 | alias ls='ls --color=auto' 78 | #alias dir='dir --color=auto' 79 | #alias vdir='vdir --color=auto' 80 | 81 | alias grep='grep --color=auto' 82 | alias fgrep='fgrep --color=auto' 83 | alias egrep='egrep --color=auto' 84 | fi 85 | 86 | # colored GCC warnings and errors 87 | #export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' 88 | 89 | # some more ls aliases 90 | alias ll='ls -alF' 91 | alias la='ls -A' 92 | alias l='ls -CF' 93 | 94 | # Add an "alert" alias for long running commands. Use like so: 95 | # sleep 10; alert 96 | alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' 97 | 98 | # Alias definitions. 99 | # You may want to put all your additions into a separate file like 100 | # ~/.bash_aliases, instead of adding them here directly. 101 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 102 | 103 | if [ -f ~/.bash_aliases ]; then 104 | . ~/.bash_aliases 105 | fi 106 | 107 | # enable programmable completion features (you don't need to enable 108 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 109 | # sources /etc/bash.bashrc). 110 | if ! shopt -oq posix; then 111 | if [ -f /usr/share/bash-completion/bash_completion ]; then 112 | . /usr/share/bash-completion/bash_completion 113 | elif [ -f /etc/bash_completion ]; then 114 | . /etc/bash_completion 115 | fi 116 | fi 117 | 118 | alias rm='rm -i' -------------------------------------------------------------------------------- /swarmsible/roles/docker-swarm-firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Allow esp connections via ipv4 2 | ufw: 3 | rule: allow 4 | proto: esp 5 | src: "{{ hostvars[item]['docker_swarm_advertise_addr'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }}/32" 6 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 7 | comment: "docker swarm esp - host: {{ hostvars[item]['inventory_hostname'] }}" 8 | with_items: "{{ groups['docker_swarm'] | default([]) }}" 9 | register: firewall 10 | until: firewall is succeeded 11 | retries: 10 12 | delay: 3 13 | 14 | - name: Allow 2377 tcp connections via ipv4 15 | ufw: 16 | rule: allow 17 | proto: tcp 18 | src: "{{ hostvars[item]['docker_swarm_advertise_addr'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }}/32" 19 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 20 | port: "2377" 21 | comment: "docker swarm 2377 tcp - host: {{ hostvars[item]['inventory_hostname'] }}" 22 | with_items: "{{ groups['docker_swarm'] | default([]) }}" 23 | register: firewall 24 | until: firewall is succeeded 25 | retries: 10 26 | delay: 3 27 | 28 | - name: Allow 7946 udp connections via ipv4 29 | ufw: 30 | rule: allow 31 | proto: udp 32 | src: "{{ hostvars[item]['docker_swarm_advertise_addr'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }}/32" 33 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 34 | port: "7946" 35 | comment: "docker swarm 7946 udp - host: {{ hostvars[item]['inventory_hostname'] }}" 36 | with_items: "{{ groups['docker_swarm'] | default([]) }}" 37 | register: firewall 38 | until: firewall is succeeded 39 | retries: 10 40 | delay: 3 41 | 42 | - name: Allow 7946 tcp connections via ipv4 43 | ufw: 44 | rule: allow 45 | proto: tcp 46 | src: "{{ hostvars[item]['docker_swarm_advertise_addr'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }}/32" 47 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 48 | port: "7946" 49 | comment: "docker swarm 7946 tcp - host: {{ hostvars[item]['inventory_hostname'] }}" 50 | with_items: "{{ groups['docker_swarm'] | default([]) }}" 51 | register: firewall 52 | until: firewall is succeeded 53 | retries: 10 54 | delay: 3 55 | 56 | - name: Allow 4789 udp connections via ipv4 57 | ufw: 58 | rule: allow 59 | proto: udp 60 | src: "{{ hostvars[item]['docker_swarm_advertise_addr'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }}/32" 61 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 62 | port: "4789" 63 | comment: "docker swarm 4789 udp - host: {{ hostvars[item]['inventory_hostname'] }}" 64 | with_items: "{{ groups['docker_swarm'] | default([]) }}" 65 | register: firewall 66 | until: firewall is succeeded 67 | retries: 10 68 | delay: 3 69 | 70 | # if we use a subnet, we must change this role accordingly 71 | 72 | 73 | - name: Allow esp connections via ipv4 74 | ufw: 75 | rule: allow 76 | proto: esp 77 | src: "{{ item.src }}" 78 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 79 | comment: "docker swarm esp - source: {{ item.name }}" 80 | with_items: "{{ docker_swarm_trusted_sources | default([]) }}" 81 | register: firewall 82 | until: firewall is succeeded 83 | retries: 10 84 | delay: 3 85 | 86 | - name: Allow 2377 tcp connections via ipv4 87 | ufw: 88 | rule: allow 89 | proto: tcp 90 | src: "{{ item.src }}" 91 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 92 | port: "2377" 93 | comment: "docker swarm 2377 tcp - source: {{ item.name }}" 94 | with_items: "{{ docker_swarm_trusted_sources | default([]) }}" 95 | register: firewall 96 | until: firewall is succeeded 97 | retries: 10 98 | delay: 3 99 | 100 | - name: Allow 7946 udp connections via ipv4 101 | ufw: 102 | rule: allow 103 | proto: udp 104 | src: "{{ item.src }}" 105 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 106 | port: "7946" 107 | comment: "docker swarm 7946 udp - source: {{ item.name }}" 108 | with_items: "{{ docker_swarm_trusted_sources | default([]) }}" 109 | register: firewall 110 | until: firewall is succeeded 111 | retries: 10 112 | delay: 3 113 | 114 | - name: Allow 7946 tcp connections via ipv4 115 | ufw: 116 | rule: allow 117 | proto: tcp 118 | src: "{{ item.src }}" 119 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 120 | port: "7946" 121 | comment: "docker swarm 7946 tcp - source: {{ item.name }}" 122 | with_items: "{{ docker_swarm_trusted_sources | default([]) }}" 123 | register: firewall 124 | until: firewall is succeeded 125 | retries: 10 126 | delay: 3 127 | 128 | - name: Allow 4789 udp connections via ipv4 129 | ufw: 130 | rule: allow 131 | proto: udp 132 | src: "{{ item.src }}" 133 | to: "{{ docker_swarm_advertise_addr | default(host_ip | default(ansible_host)) }}" 134 | port: "4789" 135 | comment: "docker swarm 4789 udp - source: {{ item.name }}" 136 | with_items: "{{ docker_swarm_trusted_sources | default([]) }}" 137 | register: firewall 138 | until: firewall is succeeded 139 | retries: 10 140 | delay: 3 141 | 142 | - name: Allow 9323 metrics connections via local bridge networks 143 | ufw: 144 | rule: allow 145 | proto: tcp 146 | src: "172.16.0.0/12" 147 | to: "172.16.0.0/12" 148 | port: "9323" 149 | comment: "docker metrics on 9323/tcp" 150 | register: firewall 151 | until: firewall is succeeded 152 | retries: 10 153 | delay: 3 -------------------------------------------------------------------------------- /swarmsible/roles/ufw-docker-install/files/ufw-docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | [[ -n "${DEBUG:-}" ]] && set -x 4 | 5 | LANG=en_US.UTF-8 6 | LANGUAGE=en_US: 7 | LC_ALL=en_US.UTF-8 8 | PATH="/bin:/usr/bin:/sbin:/usr/sbin" 9 | 10 | GREP_REGEXP_INSTANCE_NAME="[-_.[:alnum:]]\\+" 11 | DEFAULT_PROTO=tcp 12 | 13 | ufw_docker_agent=ufw-docker-agent 14 | ufw_docker_agent_image="${UFW_DOCKER_AGENT_IMAGE:-chaifeng/${ufw_docker_agent}:221002-nf_tables}" 15 | 16 | if [[ "${ufw_docker_agent_image}" = *-@(legacy|nf_tables) ]]; then 17 | if iptables --version | grep -F '(legacy)' &>/dev/null; then 18 | ufw_docker_agent_image="${ufw_docker_agent_image%-*}-legacy" 19 | else 20 | ufw_docker_agent_image="${ufw_docker_agent_image%-*}-nf_tables" 21 | fi 22 | fi 23 | 24 | test -n "$ufw_docker_agent_image" 25 | 26 | function ufw-docker--status() { 27 | ufw-docker--list "$GREP_REGEXP_INSTANCE_NAME" 28 | } 29 | 30 | function ufw-docker--list() { 31 | local INSTANCE_NAME="$1" 32 | local INSTANCE_PORT="${2:-}" 33 | local PROTO="${3:-${DEFAULT_PROTO}}" 34 | local NETWORK="${4:-}" 35 | 36 | if [[ -z "$INSTANCE_PORT" ]]; then 37 | INSTANCE_PORT="[[:digit:]]\\+" 38 | PROTO="\\(tcp\\|udp\\)" 39 | fi 40 | 41 | if [[ -z "$NETWORK" ]]; then 42 | NETWORK="[[:graph:]]*" 43 | fi 44 | 45 | ufw status numbered | grep "# allow ${INSTANCE_NAME}\\( ${INSTANCE_PORT}\\/${PROTO}\\)\\( ${NETWORK}\\)\$" || \ 46 | ufw status numbered | grep "# allow ${INSTANCE_NAME}\\( ${INSTANCE_PORT}\\/${PROTO}\\)\$" || \ 47 | ufw status numbered | grep "# allow ${INSTANCE_NAME}\$" 48 | } 49 | 50 | function ufw-docker--list-number() { 51 | ufw-docker--list "$@" | sed -e 's/^\[[[:blank:]]*\([[:digit:]]\+\)\].*/\1/' 52 | } 53 | 54 | function ufw-docker--delete() { 55 | for UFW_NUMBER in $(ufw-docker--list-number "$@" | sort -rn); do 56 | echo "delete \"$UFW_NUMBER\"" 57 | echo y | ufw delete "$UFW_NUMBER" || true 58 | done 59 | } 60 | 61 | function ufw-docker--allow() { 62 | local INSTANCE_NAME="$1" 63 | local INSTANCE_PORT="$2" 64 | local PROTO="$3" 65 | local NETWORK="${4:-}" 66 | 67 | docker inspect "$INSTANCE_NAME" &>/dev/null || 68 | die "Docker instance \"$INSTANCE_NAME\" doesn't exist." 69 | 70 | mapfile -t INSTANCE_IP_ADDRESSES < <(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{"\n"}}{{end}}' "$INSTANCE_NAME" 2>/dev/null | remove_blank_lines) 71 | 72 | [[ -z "${INSTANCE_IP_ADDRESSES:-}" ]] && die "Could not find a running instance \"$INSTANCE_NAME\"." 73 | 74 | mapfile -t INSTANCE_NETWORK_NAMES < <(docker inspect --format='{{range $k, $v := .NetworkSettings.Networks}}{{printf "%s\n" $k}}{{end}}' "$INSTANCE_NAME" 2>/dev/null | remove_blank_lines) 75 | mapfile -t PORT_PROTO_LIST < <(docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}}{{with $conf}}{{$p}}{{"\n"}}{{end}}{{end}}' "$INSTANCE_NAME" | remove_blank_lines) 76 | 77 | if [[ -z "${PORT_PROTO_LIST:-}" ]]; then 78 | err "\"$INSTANCE_NAME\" doesn't have any published ports." 79 | return 1 80 | fi 81 | 82 | RETVAL=1 83 | for PORT_PROTO in "${PORT_PROTO_LIST[@]}"; do 84 | if [[ -z "$INSTANCE_PORT" || "$PORT_PROTO" = "${INSTANCE_PORT}/${PROTO}" ]]; then 85 | ITER=0 86 | for IP in "${INSTANCE_IP_ADDRESSES[@]}"; do 87 | INSTANCE_NETWORK="${INSTANCE_NETWORK_NAMES[$ITER]}" 88 | ITER=$((ITER+1)) 89 | if [[ -n "$NETWORK" ]] && [[ "$NETWORK" != "$INSTANCE_NETWORK" ]]; then 90 | continue 91 | fi 92 | ufw-docker--add-rule "$INSTANCE_NAME" "$IP" "${PORT_PROTO%/*}" "${PORT_PROTO#*/}" "${INSTANCE_NETWORK}" 93 | RETVAL="$?" 94 | done 95 | fi 96 | done 97 | if [[ "$RETVAL" -ne 0 ]]; then 98 | err "Fail to add rule(s), cannot find the published port ${INSTANCE_PORT}/${PROTO} of instance \"${INSTANCE_NAME}\" or cannot update outdated rule(s)." 99 | fi 100 | return "$RETVAL" 101 | } 102 | 103 | function ufw-docker--add-service-rule() { 104 | declare service_id="$1" 105 | declare port="${2%/*}" 106 | declare proto="${2#*/}" 107 | 108 | declare target_ip_port 109 | target_ip_port="$(iptables -t nat -L DOCKER-INGRESS | grep -E "^DNAT\\s+${proto}\\s+.+\\sto:[.0-9]+:${port}\$" | grep -Eo "[.0-9]+:${port}\$")" 110 | 111 | [[ -z "$target_ip_port" ]] && die "Could not find VIP of service ${service_id}." 112 | 113 | ufw-docker--add-rule "$service_id" "${target_ip_port%:*}" "$port" "$proto" 114 | } 115 | 116 | function ufw-docker--add-rule() { 117 | local INSTANCE_NAME="$1" 118 | local INSTANCE_IP_ADDRESS="$2" 119 | local PORT="$3" 120 | local PROTO="$4" 121 | local NETWORK="${5:-}" 122 | 123 | declare comment 124 | 125 | echo "allow ${INSTANCE_NAME} ${PORT}/${PROTO} ${NETWORK}" 126 | typeset -a UFW_OPTS 127 | UFW_OPTS=(route allow proto "${PROTO}" 128 | from any to "$INSTANCE_IP_ADDRESS") 129 | comment="allow ${INSTANCE_NAME}" 130 | [[ -n "$PORT" ]] && { 131 | UFW_OPTS+=(port "${PORT}") 132 | comment="$comment ${PORT}/${PROTO}" 133 | } 134 | [[ -n "$NETWORK" ]] && { 135 | comment="$comment ${NETWORK}" 136 | } 137 | UFW_OPTS+=(comment "$comment") 138 | 139 | if ufw-docker--list "$INSTANCE_NAME" "$PORT" "$PROTO" "$NETWORK" &>/dev/null; then 140 | ufw --dry-run "${UFW_OPTS[@]}" | grep "^Skipping" && return 0 141 | err "Remove outdated rule." 142 | ufw-docker--delete "$INSTANCE_NAME" "$PORT" "$PROTO" "$NETWORK" 143 | fi 144 | echo ufw "${UFW_OPTS[@]}" 145 | ufw "${UFW_OPTS[@]}" 146 | } 147 | 148 | function ufw-docker--instance-name() { 149 | local INSTANCE_ID="$1" 150 | { 151 | { 152 | docker inspect --format='{{.Name}}' "$INSTANCE_ID" 2>/dev/null | sed -e 's,^/,,' | 153 | grep "^${GREP_REGEXP_INSTANCE_NAME}\$" 2>/dev/null 154 | } || echo -n "$INSTANCE_ID"; 155 | } | remove_blank_lines 156 | } 157 | 158 | function ufw-docker--service() { 159 | declare service_action="${1:-help}" 160 | case "$service_action" in 161 | delete) 162 | shift || true 163 | if [[ "${1:?Invalid 'delete' command syntax.}" != "allow" ]]; then 164 | die "\"delete\" command only support removing allowed rules" 165 | fi 166 | shift || true 167 | declare service_id_or_name="${1:?Missing swarm service name or service ID}" 168 | 169 | "ufw-docker--service-${service_action}" "${service_id_or_name}" 170 | ;; 171 | allow) 172 | shift || true 173 | declare service_id_or_name="${1:?Missing swarm service name or service ID}" 174 | declare service_port="${2:?Missing the port number, such as '80/tcp'.}" 175 | 176 | "ufw-docker--service-${service_action}" "${service_id_or_name}" "${service_port}" 177 | ;; 178 | *) 179 | ufw-docker--help 180 | ;; 181 | esac 182 | } 183 | 184 | function ufw-docker--get-service-id() { 185 | declare service_name="$1" 186 | docker service inspect "${service_name}" --format "{{.ID}}" 187 | } 188 | 189 | function ufw-docker--get-service-name() { 190 | declare service_name="$1" 191 | docker service inspect "${service_name}" --format "{{.Spec.Name}}" 192 | } 193 | 194 | function ufw-docker--service-allow() { 195 | declare service_name="$1" 196 | declare service_port="$2" 197 | declare service_proto=tcp 198 | 199 | if [[ -n "$service_port" ]] && 200 | ! grep -E '^[0-9]+(/(tcp|udp))?$' <<< "$service_port" &>/dev/null; then 201 | die "Invalid port syntax: $service_port" 202 | return 1 203 | fi 204 | 205 | if [[ "$service_port" = */* ]]; then 206 | service_proto="${service_port#*/}" 207 | service_port="${service_port%/*}" 208 | fi 209 | 210 | declare service_id 211 | service_id="$(ufw-docker--get-service-id "${service_name}")" 212 | [[ -z "${service_id:-}" ]] && die "Could not find service \"$service_name\"" 213 | 214 | service_name="$(ufw-docker--get-service-name "${service_name}")" 215 | 216 | exec 9< <(docker service inspect "$service_name" \ 217 | --format '{{range .Endpoint.Spec.Ports}}{{.PublishedPort}} {{.TargetPort}}/{{.Protocol}}{{"\n"}}{{end}}') 218 | while read -u 9 -r port target_port; do 219 | if [[ "$target_port" = "${service_port}/${service_proto}" ]]; then 220 | declare service_env="ufw_public_${service_id}=${service_name}/${port}/${service_proto}" 221 | break; 222 | fi 223 | done 224 | exec 9<&- 225 | 226 | [[ -z "${service_env:-}" ]] && die "Service $service_name does not publish port $service_port." 227 | 228 | if ! docker service inspect "$ufw_docker_agent" &>/dev/null; then 229 | err "Not found ufw-docker-agent service, creating ..." 230 | docker service create --name "$ufw_docker_agent" --mode global \ 231 | --mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock \ 232 | --mount type=bind,source=/etc/ufw,target=/etc/ufw,readonly=true \ 233 | --env ufw_docker_agent_image="${ufw_docker_agent_image}" \ 234 | --env DEBUG="${DEBUG:-}" \ 235 | --env "${service_env}" \ 236 | "${ufw_docker_agent_image}" 237 | else 238 | declare -a service_env_list 239 | service_env_list+=(--env-add "${service_env}") 240 | 241 | exec 8< <(ufw-docker--get-env-list) 242 | while read -u 8 -r id value; do 243 | [[ "$id" = "$service_id" ]] && continue 244 | [[ "$value" = "${service_name}"/* ]] && service_env_list+=(--env-rm "ufw_public_${id}") 245 | done 246 | exec 8<&- 247 | 248 | docker service update --update-parallelism=0 \ 249 | --env-add ufw_docker_agent_image="${ufw_docker_agent_image}" \ 250 | --env-add DEBUG="${DEBUG:-}" \ 251 | "${service_env_list[@]}" \ 252 | --image "${ufw_docker_agent_image}" \ 253 | "${ufw_docker_agent}" 254 | fi 255 | } 256 | 257 | function ufw-docker--get-env-list() { 258 | docker service inspect "${ufw_docker_agent}" \ 259 | --format '{{range $k,$v := .Spec.TaskTemplate.ContainerSpec.Env}}{{ $v }}{{"\n"}}{{end}}' | 260 | sed -e '/^ufw_public_/!d' \ 261 | -e 's/^ufw_public_//' \ 262 | -e 's/=/ /' 263 | } 264 | 265 | function ufw-docker--service-delete() { 266 | declare service_name="$1" 267 | 268 | exec 8< <(ufw-docker--get-env-list) 269 | while read -u 8 -r id value; do 270 | if [[ "$id" = "$service_name" ]] || [[ "$value" = "${service_name}"/* ]]; then 271 | declare service_id="$id" 272 | service_name="${value%%/*}" 273 | declare service_env="ufw_public_${service_id}=${service_name}/deny" 274 | break; 275 | fi 276 | done 277 | exec 8<&- 278 | 279 | [[ -z "${service_env:-}" ]] && die "Could not find service \"$service_name\"" 280 | 281 | docker service update --update-parallelism=0 \ 282 | --env-add ufw_docker_agent_image="${ufw_docker_agent_image}" \ 283 | --env-add "${service_env}" \ 284 | --image "${ufw_docker_agent_image}" \ 285 | "${ufw_docker_agent}" 286 | } 287 | 288 | function ufw-docker--raw-command() { 289 | ufw "$@" 290 | } 291 | 292 | after_rules="/etc/ufw/after.rules" 293 | 294 | function ufw-docker--check() { 295 | err "\\n########## iptables -n -L DOCKER-USER ##########" 296 | iptables -n -L DOCKER-USER 297 | 298 | err "\\n\\n########## diff $after_rules ##########" 299 | ufw-docker--check-install && err "\\nCheck done." 300 | } 301 | 302 | declare -a files_to_be_deleted 303 | 304 | function rm-on-exit() { 305 | [[ $# -gt 0 ]] && files_to_be_deleted+=("$@") 306 | } 307 | 308 | function on-exit() { 309 | for file in "${files_to_be_deleted[@]:-}"; do 310 | [[ -f "$file" ]] && rm -r "$file" 311 | done 312 | files_to_be_deleted=() 313 | } 314 | 315 | trap on-exit EXIT INT TERM QUIT ABRT ERR 316 | 317 | function ufw-docker--check-install() { 318 | after_rules_tmp="${after_rules_tmp:-$(mktemp)}" 319 | rm-on-exit "$after_rules_tmp" 320 | 321 | sed "/^# BEGIN UFW AND DOCKER/,/^# END UFW AND DOCKER/d" "$after_rules" > "$after_rules_tmp" 322 | >> "${after_rules_tmp}" cat <<-\EOF 323 | # BEGIN UFW AND DOCKER 324 | *filter 325 | :ufw-user-forward - [0:0] 326 | :ufw-docker-logging-deny - [0:0] 327 | :DOCKER-USER - [0:0] 328 | -A DOCKER-USER -j ufw-user-forward 329 | 330 | -A DOCKER-USER -j RETURN -s 10.0.0.0/8 331 | -A DOCKER-USER -j RETURN -s 172.16.0.0/12 332 | -A DOCKER-USER -j RETURN -s 192.168.0.0/16 333 | 334 | -A DOCKER-USER -p udp -m udp --sport 53 --dport 1024:65535 -j RETURN 335 | 336 | -A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 192.168.0.0/16 337 | -A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 10.0.0.0/8 338 | -A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 172.16.0.0/12 339 | -A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 192.168.0.0/16 340 | -A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 10.0.0.0/8 341 | -A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 172.16.0.0/12 342 | 343 | -A DOCKER-USER -j RETURN 344 | 345 | -A ufw-docker-logging-deny -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW DOCKER BLOCK] " 346 | -A ufw-docker-logging-deny -j DROP 347 | 348 | COMMIT 349 | # END UFW AND DOCKER 350 | EOF 351 | 352 | diff -u --color=auto "$after_rules" "$after_rules_tmp" 353 | } 354 | 355 | function ufw-docker--install() { 356 | if ! ufw-docker--check-install; then 357 | local after_rules_bak 358 | after_rules_bak="${after_rules}-ufw-docker~$(date '+%Y-%m-%d-%H%M%S')~" 359 | err "\\nBacking up $after_rules to $after_rules_bak" 360 | cp "$after_rules" "$after_rules_bak" 361 | cat "$after_rules_tmp" > "$after_rules" 362 | err "Please restart UFW service manually by using the following command:" 363 | if type systemctl &>/dev/null; then 364 | err " sudo systemctl restart ufw" 365 | else 366 | err " sudo service ufw restart" 367 | fi 368 | fi 369 | } 370 | 371 | function ufw-docker--help() { 372 | cat <<-EOF >&2 373 | Usage: 374 | ufw-docker [docker-instance-id-or-name [port[/tcp|/udp]] [network]] 375 | ufw-docker delete allow [docker-instance-id-or-name [port[/tcp|/udp]] [network]] 376 | 377 | ufw-docker service allow >> 378 | ufw-docker service delete allow 379 | 380 | ufw-docker 381 | 382 | Examples: 383 | ufw-docker help 384 | 385 | ufw-docker check # Check the installation of firewall rules 386 | ufw-docker install # Install firewall rules 387 | 388 | ufw-docker status 389 | 390 | ufw-docker list httpd 391 | 392 | 393 | ufw-docker allow httpd 394 | ufw-docker allow httpd 80 395 | ufw-docker allow httpd 80/tcp 396 | ufw-docker allow httpd 80/tcp default 397 | 398 | ufw-docker delete allow httpd 399 | ufw-docker delete allow httpd 80/tcp 400 | ufw-docker delete allow httpd 80/tcp default 401 | 402 | ufw-docker service allow httpd 80/tcp 403 | 404 | ufw-docker service delete allow httpd 405 | EOF 406 | } 407 | 408 | function remove_blank_lines() { 409 | sed '/^[[:blank:]]*$/d' 410 | } 411 | 412 | function err() { 413 | echo -e "$@" >&2 414 | } 415 | 416 | function die() { 417 | err "ERROR:" "$@" 418 | exit 1 419 | } 420 | 421 | # __main__ 422 | 423 | if ! ufw status 2>/dev/null | grep -Fq "Status: active" ; then 424 | die "UFW is disabled or you are not root user, or mismatched iptables legacy/nf_tables, current $(iptables --version)" 425 | fi 426 | 427 | ufw_action="${1:-help}" 428 | 429 | case "$ufw_action" in 430 | delete) 431 | shift || true 432 | if [[ "${1:?Invalid 'delete' command syntax.}" != "allow" ]]; then 433 | die "\"delete\" command only support removing allowed rules" 434 | fi 435 | ;& 436 | list|allow) 437 | shift || true 438 | 439 | INSTANCE_ID="${1:?Docker instance name/ID cannot be empty.}" 440 | INSTANCE_NAME="$(ufw-docker--instance-name "$INSTANCE_ID")" 441 | shift || true 442 | 443 | INSTANCE_PORT="${1:-}" 444 | if [[ -n "$INSTANCE_PORT" && ! "$INSTANCE_PORT" =~ [0-9]+(/(tcp|udp))? ]]; then 445 | die "invalid port syntax: \"$INSTANCE_PORT\"." 446 | fi 447 | 448 | PROTO="$DEFAULT_PROTO" 449 | if [[ "$INSTANCE_PORT" = */udp ]]; then 450 | PROTO=udp 451 | fi 452 | shift || true 453 | 454 | NETWORK="${1:-}" 455 | 456 | INSTANCE_PORT="${INSTANCE_PORT%/*}" 457 | 458 | "ufw-docker--$ufw_action" "$INSTANCE_NAME" "$INSTANCE_PORT" "$PROTO" "$NETWORK" 459 | ;; 460 | service|raw-command|add-service-rule) 461 | shift || true 462 | "ufw-docker--$ufw_action" "$@" 463 | ;; 464 | status|install|check) 465 | ufw-docker--"$ufw_action" 466 | ;; 467 | *) 468 | ufw-docker--help 469 | ;; 470 | esac 471 | --------------------------------------------------------------------------------