├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── ansible.cfg ├── group_vars ├── .gitignore └── k8s │ └── vars.yml.sample ├── host_vars ├── .gitignore ├── desktop │ └── vars.yml.sample ├── gitea │ └── vars.yml.sample ├── harbor │ └── vars.yml.sample ├── nextcloud │ └── vars.yml.sample ├── octopi │ └── vars.yml.sample └── router │ └── vars.yml.sample ├── hosts.yml.sample ├── playbook.yml ├── roles ├── all │ ├── files │ │ └── debian-sources.list │ └── tasks │ │ ├── debian.yml │ │ ├── main.yml │ │ ├── openbsd.yml │ │ └── raspbian.yml ├── auto-upgrades │ ├── files │ │ └── 20auto-upgrades │ └── tasks │ │ └── main.yml ├── desktop │ └── tasks │ │ ├── main.yml │ │ └── ubiquiti.yml ├── docker │ ├── files │ │ └── daemon.json │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── gitea │ ├── files │ │ ├── app.ini │ │ └── gitea.service │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── harbor │ ├── files │ │ ├── harbor.service │ │ └── harbor.yml │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── k8s-all │ └── tasks │ │ └── main.yml ├── k8s-control-plane │ └── tasks │ │ └── main.yml ├── k8s-nodes │ └── tasks │ │ └── main.yml ├── nextcloud │ ├── files │ │ └── nextcloud.conf │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── octopi │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── router │ ├── files │ │ ├── .gitignore │ │ ├── dhcpd.conf.sample │ │ ├── pf.conf │ │ └── unbound.conf.sample │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml └── vms │ └── tasks │ └── main.yml └── scripts ├── cleanup-k8s.bash ├── run-playbook.bash └── upgrade-k8s.bash /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | hosts.yml 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2020 Jason Vigil 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | HOSTS_FILE := hosts.yml 2 | 3 | %: 4 | HOSTS_FILE=$(HOSTS_FILE) ./scripts/run-playbook.bash $@ 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Homelab Setup 2 | 3 | This project is the "configuration as code" for my homelab environment. The project goal is to simplify management, updates, and configuration changes for every service running on my home network so that I don't get bogged down with day 2 operations (preventing me from focusing on my other projects). This document describes the steps required to (re)deploy each service from scratch. 4 | 5 | 6 | ## Table of Contents 7 | 1. [Router](#router) 8 | 1. [Gitea](#gitea) 9 | 1. [Nextcloud](#nextcloud) 10 | 1. [Harbor](#harbor) 11 | 1. [Kubernetes](#kubernetes) 12 | 1. [OctoPi](#octopi) 13 | 14 | 15 | ## Router 16 | 17 | 1. Install OpenBSD 6.7 18 | - Enable SSH 19 | 1. Setup passwordless SSH 20 | ``` 21 | mkdir ~/.ssh 22 | chmod 700 ~/.ssh 23 | vi ~/.ssh/authorized_keys 24 | chmod 600 ~/.ssh/authorized_keys 25 | ``` 26 | 1. Install Python 27 | ``` 28 | pkg_add python-3.8.2 29 | ``` 30 | 1. Run router Ansible role 31 | ``` 32 | make router 33 | ``` 34 | 35 | 36 | ## Gitea 37 | 38 | 1. Provision a VM 39 | - 1 CPU 40 | - 1 GB memory 41 | - 8 GB disk 42 | 1. Install Debian 10 43 | - Disable GUI 44 | - Disable print server 45 | - Enable SSH 46 | 1. Setup passwordless SSH 47 | ``` 48 | mkdir ~/.ssh 49 | chmod 700 ~/.ssh 50 | nano ~/.ssh/authorized_keys 51 | chmod 600 ~/.ssh/authorized_keys 52 | ``` 53 | 1. Install sudo 54 | ``` 55 | apt install sudo 56 | usermod -aG sudo 57 | ``` 58 | 1. Run gitea Ansible role 59 | ``` 60 | make gitea 61 | ``` 62 | 1. Create gitea user 63 | ``` 64 | gitea --config /etc/gitea/app.ini admin create-user \ 65 | --username \ 66 | --password password \ 67 | --email \ 68 | --must-change-password \ 69 | --admin 70 | ``` 71 | 1. Log in, setup SSH keys, GPG keys, repos, etc. 72 | 73 | 74 | ## Nextcloud 75 | 76 | 1. Provision a VM 77 | - 1 CPU 78 | - 2 GB memory 79 | - 64 GB disk 80 | 1. Install Debian 10 81 | - Disable GUI 82 | - Disable print server 83 | - Enable SSH 84 | 1. Setup passwordless SSH 85 | ``` 86 | mkdir ~/.ssh 87 | chmod 700 ~/.ssh 88 | nano ~/.ssh/authorized_keys 89 | chmod 600 ~/.ssh/authorized_keys 90 | ``` 91 | 1. Install sudo 92 | ``` 93 | apt install sudo 94 | usermod -aG sudo 95 | ``` 96 | 1. Run nextcloud Ansible role 97 | ``` 98 | make nextcloud 99 | ``` 100 | 1. Complete installation with web UI 101 | 1. Install "Deck" App 102 | 103 | 104 | ## Harbor 105 | 106 | 1. Provision a VM 107 | - 2 CPU 108 | - 4 GB memory 109 | - 128 GB disk 110 | 1. Install Debian 10 111 | - Disable GUI 112 | - Disable print server 113 | - Enable SSH 114 | 1. Setup passwordless SSH 115 | ``` 116 | mkdir ~/.ssh 117 | chmod 700 ~/.ssh 118 | nano ~/.ssh/authorized_keys 119 | chmod 600 ~/.ssh/authorized_keys 120 | ``` 121 | 1. Install sudo 122 | ``` 123 | apt install sudo 124 | usermod -aG sudo 125 | ``` 126 | 1. Run harbor Ansible role 127 | ``` 128 | make harbor 129 | ``` 130 | 1. Log in to web UI and change admin password 131 | 1. Configure Docker client to trust private CA 132 | ``` 133 | sudo mkdir -p /etc/docker/certs.d/harbor.mydomain.com/ 134 | sudo vim /etc/docker/certs.d/harbor.mydomain.com/ca.crt 135 | ``` 136 | 137 | 138 | ## Kubernetes 139 | 140 | 1. Provision 6 VMs 141 | - 2 CPU 142 | - 7 GB memory 143 | - 32 GB disk 144 | 1. Install Debian 10 145 | - Disable GUI 146 | - Disable print server 147 | - Enable SSH 148 | 1. Setup passwordless SSH 149 | ``` 150 | mkdir ~/.ssh 151 | chmod 700 ~/.ssh 152 | nano ~/.ssh/authorized_keys 153 | chmod 600 ~/.ssh/authorized_keys 154 | ``` 155 | 1. Install sudo 156 | ``` 157 | apt install sudo 158 | usermod -aG sudo 159 | ``` 160 | 1. Run k8s Ansible role 161 | ``` 162 | make k8s 163 | ``` 164 | 1. (optional) Copy ~/.kube/config to local machine 165 | ``` 166 | mkdir ~/.kube 167 | chmod 770 ~/.kube 168 | scp user@k8s-control-plane.mydomain.com:~/.kube/config ~/.kube/config 169 | ``` 170 | 171 | 172 | ## OctoPi 173 | 174 | 1. Install [OctoPi](https://github.com/guysoft/OctoPi) 175 | 1. Configure WiFi by editing `octopi-wpa-supplicant.txt` on the root partition 176 | 1. Connect to Raspberry Pi 177 | ``` 178 | ssh pi@octopi.mydomain.com # password: raspberry 179 | ``` 180 | 1. Edit system settings 181 | ``` 182 | sudo raspi-config 183 | # change password 184 | # set locale (under Localisation) 185 | # set timezone (under Localisation) 186 | ``` 187 | 1. Change root user password 188 | ``` 189 | sudo su - 190 | passwd 191 | ``` 192 | 1. Create user account 193 | ``` 194 | adduser 195 | usermod -aG sudo 196 | ``` 197 | 1. Setup passwordless SSH for user 198 | ``` 199 | mkdir .ssh 200 | chmod 700 .ssh 201 | touch .ssh/authorized_keys 202 | chmod 600 .ssh/authorized_keys 203 | vi .ssh/authorized_keys 204 | ``` 205 | 1. Run homelab-setup ansible playbook 206 | ``` 207 | make octopi 208 | ``` 209 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | remote_tmp=/tmp/ 3 | 4 | [ssh_connection] 5 | pipelining=True 6 | -------------------------------------------------------------------------------- /group_vars/.gitignore: -------------------------------------------------------------------------------- 1 | vars.yml 2 | -------------------------------------------------------------------------------- /group_vars/k8s/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: k8s_sudo_password 3 | -------------------------------------------------------------------------------- /host_vars/.gitignore: -------------------------------------------------------------------------------- 1 | vars.yml 2 | -------------------------------------------------------------------------------- /host_vars/desktop/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: desktop_sudo_password 3 | -------------------------------------------------------------------------------- /host_vars/gitea/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: gitea_sudo_password 3 | gitea_hostname: gitea.mydomain.com 4 | gitea_user_name: username 5 | gitea_user_email: username@mydomain.com 6 | gitea_postgres_password: postgres_password 7 | gitea_cert: | 8 | tls_certificate 9 | gitea_key: | 10 | private_key 11 | -------------------------------------------------------------------------------- /host_vars/harbor/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: harbor_root_password 3 | harbor_hostname: harbor.mydomain.com 4 | harbor_database_password: database_password 5 | harbor_cert: | 6 | tls_certificate 7 | harbor_key: | 8 | private_key 9 | -------------------------------------------------------------------------------- /host_vars/nextcloud/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: nextcloud_sudo_password 3 | nextcloud_hostname: nextcloud.mydomain.com 4 | nextcloud_postgres_password: postgres_password 5 | nextcloud_cert: | 6 | tls_certificate 7 | nextcloud_key: | 8 | private_key 9 | -------------------------------------------------------------------------------- /host_vars/octopi/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: octopi_sudo_password 3 | octopi_cert_pem: | 4 | octopi_cert 5 | octopi_key 6 | -------------------------------------------------------------------------------- /host_vars/router/vars.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become_pass: router_root_password 3 | -------------------------------------------------------------------------------- /hosts.yml.sample: -------------------------------------------------------------------------------- 1 | all: 2 | hosts: 3 | router: 4 | ansible_host: 192.168.1.1 5 | ansible_python_interpreter: /usr/local/bin/python3.8 6 | ansible_become_method: su 7 | gitea: 8 | ansible_host: gitea.mydomain.com 9 | ansible_python_interpreter: /usr/bin/python2 10 | nextcloud: 11 | ansible_host: nextcloud.mydomain.com 12 | ansible_python_interpreter: /usr/bin/python2 13 | harbor: 14 | ansible_host: harbor.mydomain.com 15 | ansible_python_interpreter: /usr/bin/python2 16 | desktop: 17 | ansible_host: desktop.mydomain.com 18 | ansible_python_interpreter: /usr/bin/python3.8 19 | k8s-control-plane: 20 | ansible_host: k8s-control-plane.mydomain.com 21 | ansible_python_interpreter: /usr/bin/python2 22 | k8s-node-1: 23 | ansible_host: k8s-node-1.mydomain.com 24 | ansible_python_interpreter: /usr/bin/python2 25 | k8s-node-2: 26 | ansible_host: k8s-node-2.mydomain.com 27 | ansible_python_interpreter: /usr/bin/python2 28 | k8s-node-3: 29 | ansible_host: k8s-node-3.mydomain.com 30 | ansible_python_interpreter: /usr/bin/python2 31 | k8s-node-4: 32 | ansible_host: k8s-node-4.mydomain.com 33 | ansible_python_interpreter: /usr/bin/python2 34 | k8s-node-5: 35 | ansible_host: k8s-node-5.mydomain.com 36 | ansible_python_interpreter: /usr/bin/python2 37 | octopi: 38 | ansible_host: octopi.mydomain.com 39 | ansible_python_interpreter: /usr/bin/python3.7 40 | children: 41 | vms: 42 | hosts: 43 | gitea: 44 | nextcloud: 45 | harbor: 46 | desktop: 47 | k8s-control-plane: 48 | k8s-node-1: 49 | k8s-node-2: 50 | k8s-node-3: 51 | k8s-node-4: 52 | k8s-node-5: 53 | k8s: 54 | hosts: 55 | k8s-control-plane: 56 | k8s-node-1: 57 | k8s-node-2: 58 | k8s-node-3: 59 | k8s-node-4: 60 | k8s-node-5: 61 | k8s_nodes: 62 | hosts: 63 | k8s-node-1: 64 | k8s-node-2: 65 | k8s-node-3: 66 | k8s-node-4: 67 | k8s-node-5: 68 | -------------------------------------------------------------------------------- /playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - all 5 | 6 | - hosts: vms 7 | roles: 8 | - vms 9 | 10 | - hosts: router 11 | roles: 12 | - router 13 | 14 | - hosts: gitea 15 | roles: 16 | - auto-upgrades 17 | - gitea 18 | 19 | - hosts: nextcloud 20 | roles: 21 | - auto-upgrades 22 | - nextcloud 23 | 24 | - hosts: harbor 25 | roles: 26 | - auto-upgrades 27 | - docker 28 | - harbor 29 | 30 | - hosts: desktop 31 | roles: 32 | - desktop 33 | 34 | - hosts: k8s 35 | roles: 36 | - auto-upgrades 37 | - docker 38 | - k8s-all 39 | 40 | - hosts: k8s-control-plane 41 | roles: 42 | - k8s-control-plane 43 | 44 | - hosts: k8s_nodes 45 | roles: 46 | - k8s-nodes 47 | 48 | - hosts: octopi 49 | roles: 50 | - octopi 51 | -------------------------------------------------------------------------------- /roles/all/files/debian-sources.list: -------------------------------------------------------------------------------- 1 | # security updates 2 | deb http://security.debian.org/debian-security buster/updates main contrib non-free 3 | deb-src http://security.debian.org/debian-security buster/updates main contrib non-free 4 | 5 | # base 6 | deb http://deb.debian.org/debian/ buster main contrib non-free 7 | deb-src http://deb.debian.org/debian/ buster main contrib non-free 8 | 9 | # stable updates 10 | deb http://deb.debian.org/debian/ buster-updates main contrib non-free 11 | deb-src http://deb.debian.org/debian/ buster-updates main contrib non-free 12 | 13 | # backports 14 | deb http://deb.debian.org/debian/ buster-backports main contrib non-free 15 | deb-src http://deb.debian.org/debian/ buster-backports main contrib non-free 16 | 17 | -------------------------------------------------------------------------------- /roles/all/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: debian - configure package repositories 3 | become: true 4 | copy: 5 | src: roles/all/files/debian-sources.list 6 | dest: /etc/apt/sources.list 7 | owner: root 8 | group: root 9 | mode: 0644 10 | 11 | - name: debian - upgrade installed packages 12 | become: true 13 | apt: 14 | update_cache: true 15 | upgrade: safe 16 | 17 | - name: debian - install vim 18 | become: true 19 | apt: 20 | name: vim 21 | -------------------------------------------------------------------------------- /roles/all/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_facts['distribution'] == "Debian" and ansible_facts['lsb']['id'] != "Raspbian" 3 | import_tasks: debian.yml 4 | 5 | - when: ansible_facts['lsb']['id'] is defined and ansible_facts['lsb']['id'] == "Raspbian" 6 | import_tasks: raspbian.yml 7 | 8 | - when: ansible_facts['distribution'] == "OpenBSD" 9 | import_tasks: openbsd.yml 10 | -------------------------------------------------------------------------------- /roles/all/tasks/openbsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: openbsd - apply system patches 3 | become: true 4 | syspatch: 5 | apply: true 6 | 7 | - name: openbsd - upgrade installed packages 8 | become: true 9 | openbsd_pkg: 10 | name: '*' 11 | state: latest 12 | 13 | - name: openbsd - install vim 14 | become: true 15 | openbsd_pkg: 16 | name: vim-8.2.534-no_x11 17 | state: latest 18 | -------------------------------------------------------------------------------- /roles/all/tasks/raspbian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: raspbian - upgrade installed packages 3 | become: true 4 | apt: 5 | update_cache: true 6 | upgrade: safe 7 | 8 | - name: raspbian - install vim 9 | become: true 10 | apt: 11 | name: vim 12 | -------------------------------------------------------------------------------- /roles/auto-upgrades/files/20auto-upgrades: -------------------------------------------------------------------------------- 1 | APT::Periodic::Update-Package-Lists "1"; 2 | APT::Periodic::Unattended-Upgrade "1"; 3 | -------------------------------------------------------------------------------- /roles/auto-upgrades/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | - name: install dependencies 8 | become: true 9 | apt: 10 | name: '{{ auto_updates_package_name }}' 11 | loop: 12 | - apt-listchanges 13 | - mailutils 14 | - unattended-upgrades 15 | loop_control: 16 | loop_var: auto_updates_package_name 17 | 18 | - name: enable automatic upgrades 19 | become: true 20 | copy: 21 | src: roles/auto-upgrades/files/20auto-upgrades 22 | dest: /etc/apt/apt.conf.d/20auto-upgrades 23 | owner: root 24 | group: root 25 | mode: 0644 26 | 27 | - name: configure mail recipient 28 | become: true 29 | lineinfile: 30 | path: /etc/apt/apt.conf.d/50unattended-upgrades 31 | insertafter: '//Unattended-Upgrade::Mail "";' 32 | line: 'Unattended-Upgrade::Mail "root";' 33 | 34 | - name: enable auto reboot 35 | become: true 36 | lineinfile: 37 | path: /etc/apt/apt.conf.d/50unattended-upgrades 38 | insertafter: '//Unattended-Upgrade::Automatic-Reboot "false";' 39 | line: 'Unattended-Upgrade::Automatic-Reboot "true";' 40 | 41 | - name: configure auto reboot time 42 | become: true 43 | lineinfile: 44 | path: /etc/apt/apt.conf.d/50unattended-upgrades 45 | insertafter: '//Unattended-Upgrade::Automatic-Reboot-Time "02:00";' 46 | line: 'Unattended-Upgrade::Automatic-Reboot-Time "02:00";' 47 | -------------------------------------------------------------------------------- /roles/desktop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Ubuntu' or ansible_distribution_version != '20.04' 3 | name: check platform 4 | fail: 5 | msg: This playbook requires Ubuntu 20.04 6 | 7 | - name: create ~/workspace directory 8 | file: 9 | path: /home/{{ ansible_user_id }}/workspace 10 | state: directory 11 | mode: 0755 12 | 13 | - name: create ~/.cache/ansible-workdir directory 14 | file: 15 | path: /home/{{ ansible_user_id }}/.cache/ansible-workdir 16 | state: directory 17 | mode: 0755 18 | 19 | - include_tasks: '{{ task_name }}' 20 | loop: 21 | - ubiquiti.yml 22 | loop_control: 23 | loop_var: task_name 24 | -------------------------------------------------------------------------------- /roles/desktop/tasks/ubiquiti.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Ubuntu' or ansible_distribution_version != '20.04' 3 | name: check platform 4 | fail: 5 | msg: This playbook requires Ubuntu 20.04 6 | 7 | - name: install dependencies 8 | become: true 9 | apt: 10 | name: '{{ ubiquiti_package_name }}' 11 | loop: 12 | - apt-transport-https 13 | - ca-certificates 14 | - openjdk-8-jre 15 | loop_control: 16 | loop_var: ubiquiti_package_name 17 | 18 | - name: add ubiquiti gpg key 19 | become: true 20 | apt_key: 21 | url: https://dl.ui.com/unifi/unifi-repo.gpg 22 | 23 | - name: add ubiquiti package repository 24 | become: true 25 | apt_repository: 26 | repo: deb https://www.ui.com/downloads/unifi/debian stable ubiquiti 27 | 28 | - name: install ubiquiti controller 29 | become: true 30 | apt: 31 | name: unifi 32 | 33 | - name: disable and stop ubiquiti 34 | become: true 35 | systemd: 36 | enabled: false 37 | state: stopped 38 | name: unifi 39 | -------------------------------------------------------------------------------- /roles/docker/files/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "exec-opts": ["native.cgroupdriver=systemd"], 3 | "log-driver": "json-file", 4 | "log-opts": { 5 | "max-size": "100m" 6 | }, 7 | "storage-driver": "overlay2" 8 | } 9 | -------------------------------------------------------------------------------- /roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | become: true 4 | systemd: 5 | state: restarted 6 | daemon_reload: true 7 | name: docker 8 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | - name: install docker dependencies 8 | become: true 9 | apt: 10 | name: '{{ docker_dependency_package_name }}' 11 | loop: 12 | - apt-transport-https 13 | - ca-certificates 14 | - curl 15 | - gnupg-agent 16 | - software-properties-common 17 | loop_control: 18 | loop_var: docker_dependency_package_name 19 | 20 | - name: add docker gpg key 21 | become: true 22 | apt_key: 23 | url: https://download.docker.com/linux/debian/gpg 24 | 25 | - name: add docker package repository 26 | become: true 27 | apt_repository: 28 | repo: deb [arch=amd64] https://download.docker.com/linux/debian buster stable 29 | 30 | - name: install docker packages 31 | become: true 32 | apt: 33 | update_cache: true 34 | name: '{{ docker_package_name }}' 35 | loop: 36 | - containerd.io 37 | - docker-ce 38 | - docker-ce-cli 39 | loop_control: 40 | loop_var: docker_package_name 41 | 42 | - name: allow user to access docker socket 43 | become: true 44 | user: 45 | name: '{{ ansible_user_id }}' 46 | groups: docker 47 | append: true 48 | 49 | - name: setup docker daemon config 50 | become: true 51 | copy: 52 | src: roles/docker/files/daemon.json 53 | dest: /etc/docker/daemon.json 54 | owner: root 55 | group: root 56 | mode: 0644 57 | notify: restart docker 58 | 59 | - name: start and enable docker service 60 | become: true 61 | systemd: 62 | enabled: true 63 | state: started 64 | name: docker 65 | -------------------------------------------------------------------------------- /roles/gitea/files/app.ini: -------------------------------------------------------------------------------- 1 | APP_NAME = Homelab: Gitea 2 | RUN_USER = git 3 | RUN_MODE = prod 4 | 5 | [oauth2] 6 | JWT_SECRET = ${JWT_SECRET} 7 | 8 | [security] 9 | INTERNAL_TOKEN = ${INTERNAL_TOKEN} 10 | INSTALL_LOCK = true 11 | SECRET_KEY = ${SECRET_KEY} 12 | PASSWORD_COMPLEXITY = lower,upper,digit 13 | 14 | [database] 15 | DB_TYPE = postgres 16 | HOST = 127.0.0.1:5432 17 | NAME = gitea 18 | USER = gitea 19 | PASSWD = ${POSTGRES_USER_PASSWORD} 20 | SSL_MODE = disable 21 | CHARSET = utf8 22 | PATH = /var/lib/gitea/data/gitea.db 23 | 24 | [repository] 25 | ROOT = /home/git/gitea-repositories 26 | 27 | [server] 28 | PROTOCOL = https 29 | CERT_FILE = cert.pem 30 | KEY_FILE = key.pem 31 | SSH_DOMAIN = ${HOSTNAME} 32 | DOMAIN = ${HOSTNAME} 33 | HTTP_PORT = 443 34 | ROOT_URL = https://${HOSTNAME}:443/ 35 | DISABLE_SSH = false 36 | SSH_PORT = 22 37 | LFS_START_SERVER = true 38 | LFS_CONTENT_PATH = /var/lib/gitea/data/lfs 39 | LFS_JWT_SECRET = ${LFS_JWT_SECRET} 40 | OFFLINE_MODE = false 41 | REDIRECT_OTHER_PORT = true 42 | PORT_TO_REDIRECT = 80 43 | 44 | [mailer] 45 | ENABLED = false 46 | 47 | [service] 48 | REGISTER_EMAIL_CONFIRM = false 49 | ENABLE_NOTIFY_MAIL = false 50 | DISABLE_REGISTRATION = true 51 | ALLOW_ONLY_EXTERNAL_REGISTRATION = false 52 | ENABLE_CAPTCHA = false 53 | REQUIRE_SIGNIN_VIEW = true 54 | DEFAULT_KEEP_EMAIL_PRIVATE = true 55 | DEFAULT_ALLOW_CREATE_ORGANIZATION = true 56 | DEFAULT_ENABLE_TIMETRACKING = true 57 | NO_REPLY_ADDRESS = noreply.localhost 58 | 59 | [picture] 60 | DISABLE_GRAVATAR = true 61 | ENABLE_FEDERATED_AVATAR = false 62 | 63 | [openid] 64 | ENABLE_OPENID_SIGNIN = false 65 | ENABLE_OPENID_SIGNUP = false 66 | 67 | [session] 68 | PROVIDER = file 69 | 70 | [log] 71 | MODE = file 72 | LEVEL = info 73 | ROOT_PATH = /var/lib/gitea/log 74 | -------------------------------------------------------------------------------- /roles/gitea/files/gitea.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Homelab: Gitea 3 | After=syslog.target 4 | After=network.target 5 | Requires=postgresql.service 6 | 7 | [Service] 8 | RestartSec=2s 9 | Type=simple 10 | User=git 11 | Group=git 12 | WorkingDirectory=/var/lib/gitea/ 13 | ExecStart=/usr/local/bin/gitea web --config /etc/gitea/app.ini 14 | Restart=always 15 | Environment=USER=git HOME=/home/git GITEA_WORK_DIR=/var/lib/gitea 16 | CapabilityBoundingSet=CAP_NET_BIND_SERVICE 17 | AmbientCapabilities=CAP_NET_BIND_SERVICE 18 | 19 | [Install] 20 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/gitea/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart gitea 3 | become: true 4 | systemd: 5 | state: restarted 6 | name: gitea 7 | -------------------------------------------------------------------------------- /roles/gitea/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | ########## SETUP 8 | 9 | - name: install dependencies 10 | become: true 11 | apt: 12 | name: '{{ gitea_package_name }}' 13 | loop: 14 | - git # for gitea to manage git repos 15 | - xz-utils # to extract gitea binary (using unxz) 16 | - postgresql-11 # to store gitea application data 17 | - python-psycopg2 # for ansible postgres modules 18 | loop_control: 19 | loop_var: gitea_package_name 20 | 21 | - name: create /root/ansible-workdir/gitea/ directory 22 | become: true 23 | file: 24 | path: /root/ansible-workdir/gitea/ 25 | state: directory 26 | mode: 0700 27 | owner: root 28 | group: root 29 | 30 | ########## GITEA BINARY 31 | 32 | - name: check if gitea archive exists 33 | register: gitea_archive 34 | become: true 35 | stat: 36 | path: /root/ansible-workdir/gitea/gitea-1.16.3.xz 37 | 38 | - name: download gitea archive 39 | when: gitea_archive.stat.exists == false 40 | become: true 41 | get_url: 42 | url: https://dl.gitea.io/gitea/1.16.3/gitea-1.16.3-linux-amd64.xz 43 | checksum: sha256:d684950b757d90fc8c4d16d530ee30714524b6a24dd7a17558209b2de1d7672e 44 | dest: /root/ansible-workdir/gitea/gitea-1.16.3.xz 45 | mode: 0600 46 | owner: root 47 | group: root 48 | 49 | - name: check if gitea binary exists 50 | register: gitea_binary 51 | become: true 52 | stat: 53 | path: /root/ansible-workdir/gitea/gitea-1.16.3 54 | 55 | - name: extract gitea binary from archive 56 | when: gitea_binary.stat.exists == false 57 | become: true 58 | shell: unxz -k /root/ansible-workdir/gitea/gitea-1.16.3.xz 59 | 60 | - name: install gitea binary to global location 61 | become: true 62 | copy: 63 | src: /root/ansible-workdir/gitea/gitea-1.16.3 64 | remote_src: true 65 | dest: /usr/local/bin/gitea 66 | mode: 0755 67 | owner: root 68 | group: root 69 | notify: restart gitea 70 | 71 | ########## LINUX USER 72 | 73 | - name: create git group 74 | become: true 75 | group: 76 | name: git 77 | state: present 78 | 79 | - name: create git user 80 | become: true 81 | user: 82 | name: git 83 | system: true 84 | shell: /bin/bash 85 | comment: gitea user 86 | group: git 87 | home: /home/git 88 | state: present 89 | 90 | ########## DIRECTORIES 91 | 92 | - name: create gitea data directories 93 | become: true 94 | file: 95 | path: '{{ item.path }}' 96 | state: directory 97 | mode: 0750 98 | owner: git 99 | group: git 100 | loop: 101 | - { path: /var/lib/gitea } 102 | - { path: /var/lib/gitea/custom } 103 | - { path: /var/lib/gitea/data } 104 | - { path: /var/lib/gitea/log } 105 | 106 | - name: create gitea config directory 107 | become: true 108 | file: 109 | path: /etc/gitea 110 | state: directory 111 | mode: 0750 112 | owner: root 113 | group: git 114 | 115 | ########## POSTGRES 116 | 117 | - name: start and enable postgresql service 118 | become: true 119 | systemd: 120 | name: postgresql 121 | state: started 122 | enabled: true 123 | 124 | - name: create gitea postgres database 125 | become_user: postgres 126 | become: true 127 | postgresql_db: 128 | name: gitea 129 | 130 | - name: create gitea postgres user 131 | become_user: postgres 132 | become: true 133 | postgresql_user: 134 | db: gitea 135 | name: gitea 136 | password: '{{ gitea_postgres_password }}' 137 | 138 | ########## GITEA CONFIG 139 | 140 | - name: create gitea config file 141 | become: true 142 | copy: 143 | src: roles/gitea/files/app.ini 144 | dest: /etc/gitea/app.ini 145 | owner: root 146 | group: git 147 | mode: 0640 148 | force: false 149 | 150 | - name: generate gitea JWT_SECRET 151 | register: gitea_jwt_secret_result 152 | changed_when: false 153 | shell: gitea generate secret JWT_SECRET 154 | 155 | - name: write JWT_SECRET to gitea config file 156 | become: true 157 | replace: 158 | path: /etc/gitea/app.ini 159 | regexp: '\${JWT_SECRET}' 160 | replace: '{{ gitea_jwt_secret_result.stdout }}' 161 | notify: restart gitea 162 | 163 | - name: generate gitea INTERNAL_TOKEN 164 | register: gitea_internal_token_result 165 | changed_when: false 166 | shell: gitea generate secret INTERNAL_TOKEN 167 | 168 | - name: write INTERNAL_TOKEN to gitea config file 169 | become: true 170 | replace: 171 | path: /etc/gitea/app.ini 172 | regexp: '\${INTERNAL_TOKEN}' 173 | replace: '{{ gitea_internal_token_result.stdout }}' 174 | notify: restart gitea 175 | 176 | - name: generate gitea SECRET_KEY 177 | register: gitea_secret_key_result 178 | changed_when: false 179 | shell: gitea generate secret SECRET_KEY 180 | 181 | - name: write SECRET_KEY to gitea config file 182 | become: true 183 | replace: 184 | path: /etc/gitea/app.ini 185 | regexp: '\${SECRET_KEY}' 186 | replace: '{{ gitea_secret_key_result.stdout }}' 187 | notify: restart gitea 188 | 189 | - name: generate gitea LFS_JWT_SECRET 190 | register: gitea_lfs_jwt_secret_result 191 | changed_when: false 192 | shell: gitea generate secret LFS_JWT_SECRET 193 | 194 | - name: write LFS_JWT_SECRET to gitea config file 195 | become: true 196 | replace: 197 | path: /etc/gitea/app.ini 198 | regexp: '\${LFS_JWT_SECRET}' 199 | replace: '{{ gitea_lfs_jwt_secret_result.stdout }}' 200 | notify: restart gitea 201 | 202 | - name: write POSTGRES_UESR_PASSWORD to gitea config file 203 | become: true 204 | replace: 205 | path: /etc/gitea/app.ini 206 | regexp: '^(PASSWD\s+=).*$' 207 | replace: \1 {{ gitea_postgres_password }} 208 | 209 | - name: write gitea_hostname to gitea config file 210 | become: true 211 | replace: 212 | path: /etc/gitea/app.ini 213 | regexp: '\${HOSTNAME}' 214 | replace: '{{ gitea_hostname }}' 215 | notify: restart gitea 216 | 217 | ########## TLS CERTIFICATES 218 | 219 | - name: copy server private key to gitea config directory 220 | become: true 221 | copy: 222 | content: '{{ gitea_key }}' 223 | dest: /var/lib/gitea/custom/key.pem 224 | owner: git 225 | group: git 226 | mode: '0600' 227 | notify: restart gitea 228 | 229 | - name: copy server cert to gitea config directory 230 | become: true 231 | copy: 232 | content: '{{ gitea_cert }}' 233 | dest: /var/lib/gitea/custom/cert.pem 234 | owner: git 235 | group: git 236 | mode: '0600' 237 | notify: restart gitea 238 | 239 | ########## SYSTEMD SERVICE 240 | 241 | - name: create gitea systemd service config 242 | become: true 243 | copy: 244 | src: roles/gitea/files/gitea.service 245 | dest: /etc/systemd/system/gitea.service 246 | owner: root 247 | group: root 248 | mode: 0644 249 | 250 | - name: start and enable gitea service 251 | become: true 252 | systemd: 253 | enabled: true 254 | state: started 255 | name: gitea 256 | -------------------------------------------------------------------------------- /roles/harbor/files/harbor.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=harbor 3 | Requires=docker.service 4 | After=docker.service 5 | 6 | [Service] 7 | Restart=always 8 | RestartSec=30s 9 | 10 | # Remove old containers, images and volumes 11 | ExecStartPre=/usr/local/bin/docker-compose -f /home/harbor/harbor/docker-compose.yml down -v 12 | ExecStartPre=/usr/local/bin/docker-compose -f /home/harbor/harbor/docker-compose.yml rm -v 13 | ExecStartPre=-/bin/bash -c 'docker volume rm $(docker volume ls -q)' 14 | ExecStartPre=-/bin/bash -c 'docker rmi $(docker images | grep "" | awk \'{print $3}\')' 15 | ExecStartPre=-/bin/bash -c 'docker rm -v $(docker ps -aq)' 16 | 17 | # Compose up 18 | ExecStart=/usr/local/bin/docker-compose -f /home/harbor/harbor/docker-compose.yml up 19 | 20 | # Compose down, remove containers and volumes 21 | ExecStop=/usr/local/bin/docker-compose -f /home/harbor/harbor/docker-compose.yml down -v 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /roles/harbor/files/harbor.yml: -------------------------------------------------------------------------------- 1 | # Configuration file of Harbor 2 | 3 | # The IP address or hostname to access admin UI and registry service. 4 | # DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients. 5 | hostname: ${HOSTNAME} 6 | 7 | # http related config 8 | http: 9 | # port for http, default is 80. If https enabled, this port will redirect to https port 10 | port: 80 11 | 12 | # https related config 13 | https: 14 | # https port for harbor, default is 443 15 | port: 443 16 | # The path of cert and key files for nginx 17 | certificate: /home/harbor/harbor/harbor.cert.pem 18 | private_key: /home/harbor/harbor/harbor.key.pem 19 | 20 | # # Uncomment following will enable tls communication between all harbor components 21 | # internal_tls: 22 | # # set enabled to true means internal tls is enabled 23 | # enabled: true 24 | # # put your cert and key files on dir 25 | # dir: /etc/harbor/tls/internal 26 | 27 | # Uncomment external_url if you want to enable external proxy 28 | # And when it enabled the hostname will no longer used 29 | # external_url: https://reg.mydomain.com:8433 30 | 31 | # The initial password of Harbor admin 32 | # It only works in first time to install harbor 33 | # Remember Change the admin password from UI after launching Harbor. 34 | harbor_admin_password: Harbor12345 35 | 36 | # Harbor DB configuration 37 | database: 38 | # The password for the root user of Harbor DB. Change this before any production use. 39 | password: ${DATABASE_PASSWORD} 40 | # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. 41 | max_idle_conns: 50 42 | # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. 43 | # Note: the default number of connections is 100 for postgres. 44 | max_open_conns: 100 45 | 46 | # The default data volume 47 | data_volume: /data 48 | 49 | # Harbor Storage settings by default is using /data dir on local filesystem 50 | # Uncomment storage_service setting If you want to using external storage 51 | # storage_service: 52 | # # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore 53 | # # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate. 54 | # ca_bundle: 55 | 56 | # # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss 57 | # # for more info about this configuration please refer https://docs.docker.com/registry/configuration/ 58 | # filesystem: 59 | # maxthreads: 100 60 | # # set disable to true when you want to disable registry redirect 61 | # redirect: 62 | # disabled: false 63 | 64 | # Clair configuration 65 | clair: 66 | # The interval of clair updaters, the unit is hour, set to 0 to disable the updaters. 67 | updaters_interval: 12 68 | 69 | # Trivy configuration 70 | trivy: 71 | # ignoreUnfixed The flag to display only fixed vulnerabilities 72 | ignore_unfixed: false 73 | # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub 74 | # 75 | # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues. 76 | # If the flag is enabled you have to manually download the `trivy.db` file and mount it in the 77 | # /home/scanner/.cache/trivy/db/trivy.db path. 78 | skip_update: false 79 | # 80 | # insecure The flag to skip verifying registry certificate 81 | insecure: false 82 | # github_token The GitHub access token to download Trivy DB 83 | # 84 | # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. 85 | # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached 86 | # in the local file system (/home/scanner/.cache/trivy/db/trivy.db). In addition, the database contains the update 87 | # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. 88 | # Currently, the database is updated every 12 hours and published as a new release to GitHub. 89 | # 90 | # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough 91 | # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 92 | # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult 93 | # https://developer.github.com/v3/#rate-limiting 94 | # 95 | # You can create a GitHub token by following the instuctions in 96 | # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line 97 | # 98 | # github_token: xxx 99 | 100 | jobservice: 101 | # Maximum number of job workers in job service 102 | max_job_workers: 10 103 | 104 | notification: 105 | # Maximum retry count for webhook job 106 | webhook_job_max_retry: 10 107 | 108 | chart: 109 | # Change the value of absolute_url to enabled can enable absolute url in chart 110 | absolute_url: disabled 111 | 112 | # Log configurations 113 | log: 114 | # options are debug, info, warning, error, fatal 115 | level: info 116 | # configs for logs in local storage 117 | local: 118 | # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. 119 | rotate_count: 50 120 | # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. 121 | # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G 122 | # are all valid. 123 | rotate_size: 200M 124 | # The directory on your host that store log 125 | location: /var/log/harbor 126 | 127 | # Uncomment following lines to enable external syslog endpoint. 128 | # external_endpoint: 129 | # # protocol used to transmit log to external endpoint, options is tcp or udp 130 | # protocol: tcp 131 | # # The host of external endpoint 132 | # host: localhost 133 | # # Port of external endpoint 134 | # port: 5140 135 | 136 | #This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY! 137 | _version: 2.0.0 138 | 139 | # Uncomment external_database if using external database. 140 | # external_database: 141 | # harbor: 142 | # host: harbor_db_host 143 | # port: harbor_db_port 144 | # db_name: harbor_db_name 145 | # username: harbor_db_username 146 | # password: harbor_db_password 147 | # ssl_mode: disable 148 | # max_idle_conns: 2 149 | # max_open_conns: 0 150 | # clair: 151 | # host: clair_db_host 152 | # port: clair_db_port 153 | # db_name: clair_db_name 154 | # username: clair_db_username 155 | # password: clair_db_password 156 | # ssl_mode: disable 157 | # notary_signer: 158 | # host: notary_signer_db_host 159 | # port: notary_signer_db_port 160 | # db_name: notary_signer_db_name 161 | # username: notary_signer_db_username 162 | # password: notary_signer_db_password 163 | # ssl_mode: disable 164 | # notary_server: 165 | # host: notary_server_db_host 166 | # port: notary_server_db_port 167 | # db_name: notary_server_db_name 168 | # username: notary_server_db_username 169 | # password: notary_server_db_password 170 | # ssl_mode: disable 171 | 172 | # Uncomment external_redis if using external Redis server 173 | # external_redis: 174 | # host: redis 175 | # port: 6379 176 | # password: 177 | # # db_index 0 is for core, it's unchangeable 178 | # registry_db_index: 1 179 | # jobservice_db_index: 2 180 | # chartmuseum_db_index: 3 181 | # clair_db_index: 4 182 | # trivy_db_index: 5 183 | # idle_timeout_seconds: 30 184 | 185 | # Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert. 186 | # uaa: 187 | # ca_file: /path/to/ca 188 | 189 | # Global proxy 190 | # Config http proxy for components, e.g. http://my.proxy.com:3128 191 | # Components doesn't need to connect to each others via http proxy. 192 | # Remove component from `components` array if want disable proxy 193 | # for it. If you want use proxy for replication, MUST enable proxy 194 | # for core and jobservice, and set `http_proxy` and `https_proxy`. 195 | # Add domain to the `no_proxy` field, when you want disable proxy 196 | # for some special registry. 197 | proxy: 198 | http_proxy: 199 | https_proxy: 200 | no_proxy: 201 | components: 202 | - core 203 | - jobservice 204 | - clair 205 | - trivy 206 | -------------------------------------------------------------------------------- /roles/harbor/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart harbor 3 | become: true 4 | systemd: 5 | state: restarted 6 | name: harbor 7 | -------------------------------------------------------------------------------- /roles/harbor/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | ########## SETUP 8 | 9 | - name: create /root/ansible-workdir/harbor/ directory 10 | become: true 11 | file: 12 | path: /root/ansible-workdir/harbor/ 13 | state: directory 14 | mode: 0700 15 | owner: root 16 | group: root 17 | 18 | ########## DOCKER COMPOSE 19 | 20 | - name: check if docker-compose binary exists 21 | register: docker_compose_binary 22 | become: true 23 | stat: 24 | path: /root/ansible-workdir/harbor/docker-compose-1.29.2-Linux-x86_64 25 | 26 | - name: download docker-compose binary 27 | when: docker_compose_binary.stat.exists == false 28 | become: true 29 | get_url: 30 | url: https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64 31 | checksum: sha256:f3f10cf3dbb8107e9ba2ea5f23c1d2159ff7321d16f0a23051d68d8e2547b323 32 | dest: /root/ansible-workdir/harbor/docker-compose-1.29.2-Linux-x86_64 33 | mode: 0700 34 | owner: root 35 | group: root 36 | 37 | - name: install docker-compose binary to global location 38 | become: true 39 | copy: 40 | src: /root/ansible-workdir/harbor/docker-compose-1.29.2-Linux-x86_64 41 | remote_src: true 42 | dest: /usr/local/bin/docker-compose 43 | mode: 0755 44 | owner: root 45 | group: root 46 | 47 | ########## LINUX USER 48 | 49 | - name: create harbor group 50 | become: true 51 | group: 52 | name: harbor 53 | state: present 54 | 55 | - name: create harbor user 56 | become: true 57 | user: 58 | name: harbor 59 | system: true 60 | shell: /bin/bash 61 | comment: harbor user 62 | group: harbor 63 | home: /home/harbor 64 | state: present 65 | 66 | - name: allow harbor user to access docker socket 67 | become: true 68 | user: 69 | name: harbor 70 | groups: docker 71 | append: true 72 | 73 | ########## HARBOR 74 | 75 | - name: check if harbor archive exists 76 | register: harbor_archive 77 | become: true 78 | stat: 79 | path: /root/ansible-workdir/harbor/harbor-offline-installer-v2.4.1.tgz 80 | 81 | - name: download harbor archive 82 | when: harbor_archive.stat.exists == false 83 | become: true 84 | get_url: 85 | url: https://github.com/goharbor/harbor/releases/download/v2.4.1/harbor-offline-installer-v2.4.1.tgz 86 | checksum: md5:2d7b4c93f4205b3a13c5aab0ac9ad0e9 87 | dest: /root/ansible-workdir/harbor/harbor-offline-installer-v2.4.1.tgz 88 | mode: 0600 89 | owner: root 90 | group: root 91 | 92 | - name: extract harbor archive 93 | become: true 94 | unarchive: 95 | src: /root/ansible-workdir/harbor/harbor-offline-installer-v2.4.1.tgz 96 | remote_src: true 97 | dest: /home/harbor 98 | creates: /home/harbor/harbor 99 | mode: 0755 100 | owner: harbor 101 | group: harbor 102 | 103 | - name: create harbor config file 104 | become: true 105 | copy: 106 | src: roles/harbor/files/harbor.yml 107 | dest: /home/harbor/harbor/harbor.yml 108 | owner: harbor 109 | group: harbor 110 | mode: 0640 111 | force: false 112 | 113 | - name: write HOSTNAME to harbor config file 114 | become: true 115 | replace: 116 | path: /home/harbor/harbor/harbor.yml 117 | regexp: '\${HOSTNAME}' 118 | replace: '{{ harbor_hostname }}' 119 | notify: restart harbor 120 | 121 | - name: write DATABASE_PASSWORD to harbor config file 122 | become: true 123 | replace: 124 | path: /home/harbor/harbor/harbor.yml 125 | regexp: '\${DATABASE_PASSWORD}' 126 | replace: '{{ harbor_database_password }}' 127 | notify: restart harbor 128 | 129 | ########## TLS CERTIFICATES 130 | 131 | - name: copy server private key to harbor config directory 132 | become: true 133 | copy: 134 | content: '{{ harbor_key }}' 135 | dest: /home/harbor/harbor/harbor.key.pem 136 | owner: harbor 137 | group: harbor 138 | mode: '0600' 139 | notify: restart harbor 140 | 141 | - name: copy server cert to harbor config directory 142 | become: true 143 | copy: 144 | content: '{{ harbor_cert }}' 145 | dest: /home/harbor/harbor/harbor.cert.pem 146 | owner: harbor 147 | group: harbor 148 | mode: '0600' 149 | notify: restart harbor 150 | 151 | ########## INSTALL 152 | 153 | - name: run harbor install script 154 | when: harbor_archive.stat.exists == false 155 | become: true 156 | shell: /home/harbor/harbor/install.sh --with-trivy 157 | 158 | ########## SYSTEMD SERVICE 159 | 160 | - name: create harbor systemd service config 161 | become: true 162 | copy: 163 | src: roles/harbor/files/harbor.service 164 | dest: /etc/systemd/system/harbor.service 165 | owner: root 166 | group: root 167 | mode: 0644 168 | 169 | - name: start and enable harbor service 170 | become: true 171 | systemd: 172 | enabled: true 173 | state: started 174 | name: harbor 175 | -------------------------------------------------------------------------------- /roles/k8s-all/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | - name: check if swap is enabled 8 | register: swap_enabled 9 | changed_when: false 10 | become: true 11 | shell: swapon --show 12 | 13 | - name: disable swap for running kernel 14 | when: swap_enabled.stdout_lines|length > 0 15 | become: true 16 | shell: swapoff -a 17 | 18 | - name: disable swap permanently in fstab 19 | become: true 20 | replace: 21 | path: /etc/fstab 22 | regexp: '^([^#].*?\sswap\s+sw\s+.*)$' 23 | replace: '# \1' 24 | 25 | - name: add google cloud gpg key 26 | become: true 27 | apt_key: 28 | url: https://packages.cloud.google.com/apt/doc/apt-key.gpg 29 | 30 | - name: add google cloud apt repository 31 | become: true 32 | apt_repository: 33 | repo: deb https://apt.kubernetes.io/ kubernetes-xenial main 34 | 35 | - name: install kubernetes packages 36 | become: true 37 | apt: 38 | update_cache: true 39 | name: '{{ k8s_package_name }}' 40 | loop: 41 | - kubelet 42 | - kubeadm 43 | - kubectl 44 | loop_control: 45 | loop_var: k8s_package_name 46 | 47 | - name: check held packages 48 | register: held_packages 49 | changed_when: false 50 | shell: apt-mark showhold 51 | 52 | - name: hold kubernetes packages 53 | when: (not "kubelet" in held_packages.stdout) or (not "kubeadm" in held_packages.stdout) or (not "kubectl" in held_packages.stdout) 54 | become: true 55 | shell: apt-mark hold kubelet kubeadm kubectl -------------------------------------------------------------------------------- /roles/k8s-control-plane/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | - name: check if kubelet is running 8 | register: kubelet_on_control_plane 9 | changed_when: false 10 | shell: ps aux | grep kubelet | grep -v grep | wc -l 11 | 12 | - name: initialize control plane 13 | when: kubelet_on_control_plane.stdout == "0" 14 | become: true 15 | shell: kubeadm init --pod-network-cidr=10.244.0.0/16 16 | 17 | - name: setup flannel pod network add-on 18 | when: kubelet_on_control_plane.stdout == "0" 19 | become: true 20 | shell: KUBECONFIG=/etc/kubernetes/admin.conf kubectl create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 21 | 22 | - name: install jq 23 | become: true 24 | apt: 25 | update_cache: true 26 | name: jq 27 | 28 | - name: save cluster bootstrap token 29 | when: kubelet_on_control_plane.stdout == "0" 30 | register: k8s_bootstrap_token 31 | become: true 32 | shell: | 33 | kubeadm token list -o json | \ 34 | jq -r '.token' | \ 35 | head -n 1 36 | 37 | - name: save cluster discovery token 38 | when: kubelet_on_control_plane.stdout == "0" 39 | register: k8s_discovery_token 40 | become: true 41 | shell: | 42 | openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | \ 43 | openssl rsa -pubin -outform der 2>/dev/null | \ 44 | openssl dgst -sha256 -hex | sed 's/^.* //' 45 | 46 | - name: create ~/.kube directory 47 | file: 48 | path: /home/{{ ansible_user_id }}/.kube 49 | state: directory 50 | owner: '{{ ansible_user_id }}' 51 | group: '{{ ansible_user_id }}' 52 | mode: 0770 53 | 54 | - name: copy kube config to ~/.kube/config 55 | become: true 56 | copy: 57 | src: /etc/kubernetes/admin.conf 58 | remote_src: true 59 | dest: /home/{{ ansible_user_id }}/.kube/config 60 | owner: '{{ ansible_user_id }}' 61 | group: '{{ ansible_user_id }}' 62 | mode: 0660 63 | -------------------------------------------------------------------------------- /roles/k8s-nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | - name: check if kubelet is running 8 | register: kubelet_on_node 9 | changed_when: false 10 | shell: ps aux | grep kubelet | grep -v grep | wc -l 11 | 12 | - name: join cluster 13 | when: kubelet_on_node.stdout == "0" 14 | become: true 15 | shell: | 16 | kubeadm join --token {{ hostvars['k8s-control-plane']['k8s_bootstrap_token']['stdout'] }} \ 17 | {{ hostvars['k8s-control-plane']['ansible_default_ipv4']['address'] }}:6443 \ 18 | --discovery-token-ca-cert-hash sha256:{{ hostvars['k8s-control-plane']['k8s_discovery_token']['stdout'] }} 19 | -------------------------------------------------------------------------------- /roles/nextcloud/files/nextcloud.conf: -------------------------------------------------------------------------------- 1 | Alias / "/var/www/nextcloud/" 2 | 3 | 4 | Redirect permanent / https://${HOSTNAME}/ 5 | 6 | 7 | 8 | ServerAdmin root@localhost 9 | DocumentRoot /var/www/nextcloud 10 | ErrorLog ${APACHE_LOG_DIR}/error.log 11 | CustomLog ${APACHE_LOG_DIR}/access.log combined 12 | SSLEngine on 13 | SSLCertificateFile /etc/apache2/ssl/nextcloud.cert.pem 14 | SSLCertificateKeyFile /etc/apache2/ssl/nextcloud.key.pem 15 | 16 | Header always set Strict-Transport-Security "max-age=15552000; includeSubDomains" 17 | 18 | 19 | 20 | 21 | Require all granted 22 | AllowOverride All 23 | Options FollowSymLinks MultiViews 24 | 25 | Dav off 26 | 27 | 28 | -------------------------------------------------------------------------------- /roles/nextcloud/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart apache 3 | become: true 4 | systemd: 5 | state: restarted 6 | name: apache2 7 | -------------------------------------------------------------------------------- /roles/nextcloud/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_distribution_version != '10' 3 | name: check platform 4 | fail: 5 | msg: This role requires Debian 10 6 | 7 | ########## SETUP 8 | 9 | - name: install dependencies 10 | become: true 11 | apt: 12 | name: '{{ nextcloud_package_name }}' 13 | loop: 14 | - apache2 # required, web server 15 | - libapache2-mod-php7.3 # required, apache php module 16 | - postgresql-11 # required, database 17 | - python-psycopg2 # required, for ansible postgres modules 18 | - php7.3 # required, server-side scripting language 19 | - php7.3-pgsql # required, database connector module 20 | - php7.3-curl # required 21 | - php7.3-gd # required 22 | - php7.3-mbstring # required 23 | - php7.3-xml # required 24 | - php7.3-zip # required 25 | - php7.3-bz2 # recommended, for extraction of apps 26 | - php7.3-intl # recommended, for language translation and handling of non-ASCII characters 27 | - php7.3-bcmath # optional, for passwordless login 28 | - php7.3-gmp # optional, for sftp storage 29 | - php-imagick # optional, for image preview generation 30 | - libmagickcore-6.q16-6-extra # optional, for php-imagick svg support 31 | - rsync # optional, for backup/restore 32 | loop_control: 33 | loop_var: nextcloud_package_name 34 | 35 | - name: create /root/ansible-workdir/nextcloud/ directory 36 | become: true 37 | file: 38 | path: /root/ansible-workdir/nextcloud/ 39 | state: directory 40 | mode: 0700 41 | owner: root 42 | group: root 43 | 44 | ########## NEXTCLOUD 45 | 46 | - name: check if nextcloud archive exists 47 | register: nextcloud_archive 48 | become: true 49 | stat: 50 | path: /root/ansible-workdir/nextcloud/nextcloud-23.0.2.tar.bz2 51 | 52 | - name: download nextcloud archive 53 | when: nextcloud_archive.stat.exists == false 54 | become: true 55 | get_url: 56 | url: https://download.nextcloud.com/server/releases/nextcloud-23.0.2.tar.bz2 57 | checksum: sha256:9e02462d38eaab6457fca8077bd46fe78c3aaad442e91a6e12e32fa7d51bc4ee 58 | dest: /root/ansible-workdir/nextcloud/nextcloud-23.0.2.tar.bz2 59 | mode: 0600 60 | owner: root 61 | group: root 62 | 63 | - name: extract nextcloud archive 64 | become: true 65 | unarchive: 66 | src: /root/ansible-workdir/nextcloud/nextcloud-23.0.2.tar.bz2 67 | remote_src: true 68 | dest: /var/www/ 69 | creates: /var/www/nextcloud 70 | owner: www-data 71 | group: www-data 72 | 73 | - name: setup nextcloud cron job 74 | become: true 75 | ansible.builtin.cron: 76 | user: 'www-data' 77 | name: '/var/www/nextcloud/cron.php' 78 | minute: '*/5' 79 | job: 'php -f /var/www/nextcloud/cron.php' 80 | 81 | ########## TLS CERTIFICATES 82 | 83 | - name: create /etc/apache2/ssl/ directory 84 | become: true 85 | file: 86 | path: /etc/apache2/ssl/ 87 | state: directory 88 | mode: 0744 89 | owner: root 90 | group: root 91 | 92 | - name: copy server private key to apache config directory 93 | become: true 94 | copy: 95 | content: '{{ nextcloud_key }}' 96 | dest: /etc/apache2/ssl/nextcloud.key.pem 97 | owner: root 98 | group: root 99 | mode: 0644 100 | notify: restart apache 101 | 102 | - name: copy server cert to apache config directory 103 | become: true 104 | copy: 105 | content: '{{ nextcloud_cert }}' 106 | dest: /etc/apache2/ssl/nextcloud.cert.pem 107 | owner: root 108 | group: root 109 | mode: 0600 110 | notify: restart apache 111 | 112 | ########## APACHE 113 | 114 | - name: enable apache modules 115 | become: true 116 | apache2_module: 117 | name: '{{ apache2_module_name }}' 118 | loop: 119 | - dir 120 | - env 121 | - headers 122 | - mime 123 | - rewrite 124 | - ssl 125 | loop_control: 126 | loop_var: apache2_module_name 127 | notify: restart apache 128 | 129 | - name: create apache site config file 130 | become: true 131 | copy: 132 | src: roles/nextcloud/files/nextcloud.conf 133 | dest: /etc/apache2/sites-available/nextcloud.conf 134 | owner: root 135 | group: root 136 | mode: 0644 137 | force: false 138 | notify: restart apache 139 | 140 | - name: write cloud_hostname to site config file 141 | become: true 142 | replace: 143 | path: /etc/apache2/sites-available/nextcloud.conf 144 | regexp: '\${HOSTNAME}' 145 | replace: '{{ nextcloud_hostname }}' 146 | notify: restart apache 147 | 148 | - name: enable nextcloud site 149 | register: enable_nextcloud_result 150 | become: true 151 | shell: a2ensite nextcloud.conf 152 | changed_when: "'already enabled' not in enable_nextcloud_result.stdout" 153 | notify: restart apache 154 | 155 | - name: disable default site 156 | register: disable_default_result 157 | become: true 158 | shell: a2dissite 000-default.conf 159 | changed_when: "'already disabled' not in disable_default_result.stdout" 160 | notify: restart apache 161 | 162 | ########## PHP 163 | - name: set php memory_limit 164 | become: true 165 | replace: 166 | path: /etc/php/7.3/apache2/php.ini 167 | regexp: '^memory_limit = .*$' 168 | replace: 'memory_limit = 512M' 169 | notify: restart apache 170 | 171 | ########## POSTGRES 172 | 173 | - name: start and enable postgresql service 174 | become: true 175 | systemd: 176 | name: postgresql 177 | state: started 178 | enabled: true 179 | 180 | - name: create nextcloud postgres database 181 | become_user: postgres 182 | become: true 183 | postgresql_db: 184 | name: nextcloud 185 | 186 | - name: create nextcloud postgres user 187 | become_user: postgres 188 | become: true 189 | postgresql_user: 190 | db: nextcloud 191 | name: nextcloud 192 | password: '{{ nextcloud_postgres_password }}' 193 | 194 | -------------------------------------------------------------------------------- /roles/octopi/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart haproxy 3 | become: true 4 | systemd: 5 | state: restarted 6 | name: haproxy 7 | -------------------------------------------------------------------------------- /roles/octopi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'Debian' or ansible_lsb['id'] != 'Raspbian' 3 | name: check platform 4 | fail: 5 | msg: This role requires Raspbian 6 | 7 | - name: copy ssl cert 8 | become: true 9 | copy: 10 | content: '{{ octopi_pem }}' 11 | dest: /home/pi/octopi.pem 12 | owner: pi 13 | group: pi 14 | mode: '0600' 15 | notify: restart haproxy 16 | 17 | - name: configure haproxy to use custom ssl cert 18 | become: true 19 | replace: 20 | path: /etc/haproxy/haproxy.cfg 21 | regexp: 'bind \:\:\:443 v4v6 ssl crt \/etc\/ssl\/snakeoil\.pem' 22 | replace: 'bind :::443 v4v6 ssl crt /home/pi/octopi.pem' 23 | notify: restart haproxy 24 | 25 | - name: configure haproxy to redirect http to https 26 | become: true 27 | replace: 28 | path: /etc/haproxy/haproxy.cfg 29 | regexp: '(\s+bind \:\:\:80.*\n) (?!redirect scheme https if !{ ssl_fc })' 30 | replace: '\1 redirect scheme https if !{ ssl_fc }\n ' 31 | notify: restart haproxy 32 | -------------------------------------------------------------------------------- /roles/router/files/.gitignore: -------------------------------------------------------------------------------- 1 | dhcpd.conf 2 | unbound.conf -------------------------------------------------------------------------------- /roles/router/files/dhcpd.conf.sample: -------------------------------------------------------------------------------- 1 | # primary network 2 | subnet 192.168.1.0 netmask 255.255.255.0 { 3 | option routers 192.168.1.1; 4 | option domain-name-servers 192.168.1.1; 5 | range 192.168.1.100 192.168.1.149; 6 | host gitea { 7 | fixed-address 192.168.1.2; 8 | hardware ethernet de:ad:be:ef:00:01; 9 | } 10 | host nextcloud { 11 | fixed-address 192.168.1.3; 12 | hardware ethernet de:ad:be:ef:00:02; 13 | } 14 | host harbor { 15 | fixed-address 192.168.1.4; 16 | hardware ethernet de:ad:be:ef:00:03; 17 | } 18 | host octopi { 19 | fixed-address 192.168.1.5; 20 | hardware ethernet de:ad:be:ef:00:04; 21 | } 22 | host desktop { 23 | fixed-address 192.168.1.6; 24 | hardware ethernet de:ad:be:ef:00:05; 25 | } 26 | host k8s-control-plane { 27 | fixed-address 192.168.1.10; 28 | hardware ethernet f0:0d:d0:0d:00:00; 29 | } 30 | host k8s-node-1 { 31 | fixed-address 192.168.1.11; 32 | hardware ethernet f0:0d:d0:0d:00:01; 33 | } 34 | host k8s-node-2 { 35 | fixed-address 192.168.1.12; 36 | hardware ethernet f0:0d:d0:0d:00:02; 37 | } 38 | host k8s-node-3 { 39 | fixed-address 192.168.1.13; 40 | hardware ethernet f0:0d:d0:0d:00:03; 41 | } 42 | host k8s-node-4 { 43 | fixed-address 192.168.1.14; 44 | hardware ethernet f0:0d:d0:0d:00:04; 45 | } 46 | host k8s-node-5 { 47 | fixed-address 192.168.1.15; 48 | hardware ethernet f0:0d:d0:0d:00:05; 49 | } 50 | } 51 | 52 | # secondary network 53 | subnet 192.168.2.0 netmask 255.255.255.0 { 54 | option routers 192.168.2.1; 55 | option domain-name-servers 192.168.2.1; 56 | range 192.168.2.100 192.168.2.199; 57 | } 58 | -------------------------------------------------------------------------------- /roles/router/files/pf.conf: -------------------------------------------------------------------------------- 1 | # interfaces 2 | lo_if = "lo0" 3 | wan_if = "em0" 4 | primary_if = "em1" 5 | secondary_if = "em2" 6 | 7 | # cidr ranges 8 | primary_range = "192.168.1.0/24" 9 | secondary_range = "192.168.2.0/24" 10 | 11 | # setup non-routable address list 12 | # note: since this firewall is behind a local network, 13 | # do not include the default gateway in the table 14 | table { 0.0.0.0/8 10.0.0.0/8 127.0.0.0/8 169.254.0.0/16 \ 15 | 172.16.0.0/12 192.0.0.0/24 192.0.2.0/24 224.0.0.0/3 \ 16 | 192.168.0.0/16 198.18.0.0/15 198.51.100.0/24 \ 17 | 203.0.113.0/24 !192.168.0.1 } 18 | 19 | # drop blocked traffic 20 | set block-policy drop 21 | # set interface for logging 22 | set loginterface $wan_if 23 | # ignore loopback traffic 24 | set skip on $lo_if 25 | 26 | # normalize incoming packets 27 | match in all scrub (no-df random-id max-mss 1460) 28 | # perform NAT 29 | match out on $wan_if inet from !($wan_if:network) to any nat-to ($wan_if:0) 30 | 31 | # prevent spoofed traffic 32 | antispoof quick for { $wan_if $primary_if $secondary_if } 33 | 34 | # block non-routable traffic 35 | block in quick on $wan_if from to any 36 | block return out quick on $wan_if from any to 37 | 38 | # block all traffic 39 | block all 40 | # allow outgoing traffic 41 | pass out inet 42 | # allow traffic from internal networks 43 | pass in on { $primary_if $secondary_if } inet 44 | # block traffic from primary <--> secondary 45 | block in on $primary_if from $primary_range to $secondary_range 46 | block in on $secondary_if from $secondary_range to $primary_range 47 | # block outgoing unencrypted dns requests 48 | block proto { TCP UDP } from { $primary_range $secondary_range } to any port 53 49 | pass proto { TCP UDP } from { $primary_range $secondary_range} to self port 53 50 | # block ssh access for wan_if and secondary_if 51 | block in on $wan_if proto TCP to self port ssh 52 | block in on $secondary_if proto TCP to self port ssh 53 | -------------------------------------------------------------------------------- /roles/router/files/unbound.conf.sample: -------------------------------------------------------------------------------- 1 | server: 2 | interface: 127.0.0.1 3 | interface: 192.168.1.1 4 | interface: 192.168.2.1 5 | access-control: 127.0.0.0/8 allow 6 | access-control: 192.168.1.0/24 allow 7 | access-control: 192.168.2.0/24 allow 8 | hide-identity: yes 9 | hide-version: yes 10 | do-not-query-localhost: no 11 | tls-cert-bundle: "/etc/ssl/cert.pem" 12 | local-data: "router.mydomain.com A 192.168.1.1" 13 | local-data: "gitea.mydomain.com A 192.168.1.2" 14 | local-data: "nextcloud.mydomain.com A 192.168.1.3" 15 | local-data: "harbor.mydomain.com A 192.168.1.4" 16 | local-data: "octopi.mydomain.com A 192.168.1.5" 17 | local-data: "desktop.mydomain.com A 192.168.1.6" 18 | local-data: "k8s-control-plane.mydomain.com A 192.168.1.10" 19 | local-data: "k8s-node-1.mydomain.com A 192.168.1.11" 20 | local-data: "k8s-node-2.mydomain.com A 192.168.1.12" 21 | local-data: "k8s-node-3.mydomain.com A 192.168.1.13" 22 | local-data: "k8s-node-4.mydomain.com A 192.168.1.14" 23 | local-data: "k8s-node-5.mydomain.com A 192.168.1.15" 24 | 25 | forward-zone: 26 | name: "." 27 | forward-tls-upstream: yes 28 | forward-addr: 8.8.8.8@853 29 | forward-addr: 8.8.4.4@853 30 | -------------------------------------------------------------------------------- /roles/router/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload pf config 3 | become: true 4 | shell: pfctl -f /etc/pf.conf 5 | 6 | - name: restart dhcpd 7 | become: true 8 | service: 9 | name: dhcpd 10 | state: restarted 11 | 12 | - name: restart unbound 13 | become: true 14 | service: 15 | name: unbound 16 | state: restarted 17 | -------------------------------------------------------------------------------- /roles/router/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: ansible_distribution != 'OpenBSD' or ansible_distribution_version != '6.7' 3 | name: check platform 4 | fail: 5 | msg: This role requires OpenBSD 6.7 6 | 7 | - name: setup pf config file 8 | become: true 9 | copy: 10 | src: roles/router/files/pf.conf 11 | dest: /etc/pf.conf 12 | owner: root 13 | group: wheel 14 | mode: 0600 15 | notify: reload pf config 16 | 17 | - name: setup dhcpd config file 18 | become: true 19 | copy: 20 | src: roles/router/files/dhcpd.conf 21 | dest: /etc/dhcpd.conf 22 | owner: root 23 | group: wheel 24 | mode: 0644 25 | notify: restart dhcpd 26 | 27 | - name: start and enable dhcpd 28 | become: true 29 | service: 30 | name: dhcpd 31 | state: started 32 | enabled: yes 33 | args: em1 em2 34 | 35 | - name: setup unbound config file 36 | become: true 37 | copy: 38 | src: roles/router/files/unbound.conf 39 | dest: /var/unbound/etc/unbound.conf 40 | owner: root 41 | group: wheel 42 | mode: 0644 43 | notify: restart unbound 44 | 45 | - name: start and enable unbound 46 | become: true 47 | service: 48 | name: unbound 49 | state: started 50 | enabled: yes 51 | 52 | - name: enable ip forwarding 53 | become: true 54 | sysctl: 55 | name: net.inet.ip.forwarding 56 | value: '1' 57 | state: present 58 | -------------------------------------------------------------------------------- /roles/vms/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - when: | 3 | (ansible_distribution != 'Debian' or ansible_distribution_version != '10') and 4 | (ansible_distribution != 'Ubuntu' or ansible_distribution_version != '20.04') 5 | name: check platform 6 | fail: 7 | msg: This role requires Debian 10 or Ubuntu 20.04 8 | 9 | - name: install open-vm-tools 10 | become: true 11 | apt: 12 | name: open-vm-tools 13 | 14 | - name: enable and start open-vm-tools service 15 | become: true 16 | systemd: 17 | enabled: true 18 | state: started 19 | name: open-vm-tools 20 | -------------------------------------------------------------------------------- /scripts/cleanup-k8s.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | k8s_cleanup_cmd="kubeadm reset --force && " 4 | k8s_cleanup_cmd+="rm -rf /etc/cni/net.d && " 5 | k8s_cleanup_cmd+="iptables -P INPUT ACCEPT && " 6 | k8s_cleanup_cmd+="iptables -P FORWARD ACCEPT && " 7 | k8s_cleanup_cmd+="iptables -P OUTPUT ACCEPT && " 8 | k8s_cleanup_cmd+="iptables -t nat -F && " 9 | k8s_cleanup_cmd+="iptables -t mangle -F && " 10 | k8s_cleanup_cmd+="iptables -F && " 11 | k8s_cleanup_cmd+="iptables -X && " 12 | k8s_cleanup_cmd+="iptables-legacy -P INPUT ACCEPT && " 13 | k8s_cleanup_cmd+="iptables-legacy -P FORWARD ACCEPT && " 14 | k8s_cleanup_cmd+="iptables-legacy -P OUTPUT ACCEPT && " 15 | k8s_cleanup_cmd+="iptables-legacy -t nat -F && " 16 | k8s_cleanup_cmd+="iptables-legacy -t mangle -F && " 17 | k8s_cleanup_cmd+="iptables-legacy -F && " 18 | k8s_cleanup_cmd+="iptables-legacy -X && " 19 | k8s_cleanup_cmd+="ip6tables -P INPUT ACCEPT && " 20 | k8s_cleanup_cmd+="ip6tables -P FORWARD ACCEPT && " 21 | k8s_cleanup_cmd+="ip6tables -P OUTPUT ACCEPT && " 22 | k8s_cleanup_cmd+="ip6tables -t nat -F && " 23 | k8s_cleanup_cmd+="ip6tables -t mangle -F && " 24 | k8s_cleanup_cmd+="ip6tables -F && " 25 | k8s_cleanup_cmd+="ip6tables -X && " 26 | k8s_cleanup_cmd+="ip6tables-legacy -P INPUT ACCEPT && " 27 | k8s_cleanup_cmd+="ip6tables-legacy -P FORWARD ACCEPT && " 28 | k8s_cleanup_cmd+="ip6tables-legacy -P OUTPUT ACCEPT && " 29 | k8s_cleanup_cmd+="ip6tables-legacy -t nat -F && " 30 | k8s_cleanup_cmd+="ip6tables-legacy -t mangle -F && " 31 | k8s_cleanup_cmd+="ip6tables-legacy -F && " 32 | k8s_cleanup_cmd+="ip6tables-legacy -X" 33 | 34 | ansible \ 35 | k8s \ 36 | -i hosts.yml \ 37 | --ask-vault-pass \ 38 | --become \ 39 | -a "/bin/bash -c '${k8s_cleanup_cmd}'" 40 | -------------------------------------------------------------------------------- /scripts/run-playbook.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | case "${1}" in 4 | "router") ;; 5 | "gitea") ;; 6 | "nextcloud") ;; 7 | "harbor") ;; 8 | "k8s") ;; 9 | "octopi") ;; 10 | "desktop") ;; 11 | *) 12 | echo "error: specify service to configure (for example: ./scripts/run-playbook router)" 13 | exit 1 14 | esac 15 | 16 | ansible-playbook \ 17 | -i "${HOSTS_FILE}" \ 18 | -l "${1}" \ 19 | --ask-vault-pass \ 20 | playbook.yml 21 | -------------------------------------------------------------------------------- /scripts/upgrade-k8s.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | upgrade_cmd="/bin/bash -c 'sudo apt-mark unhold kubelet kubeadm kubectl && " 4 | upgrade_cmd+="sudo apt-get update && " 5 | upgrade_cmd+="sudo apt-get upgrade -y kubelet kubeadm kubectl && " 6 | upgrade_cmd+="sudo apt-mark hold kubelet kubeadm kubectl'" 7 | 8 | ansible \ 9 | k8s \ 10 | -i hosts.yml \ 11 | --ask-vault-pass \ 12 | --become \ 13 | -a "${upgrade_cmd}" 14 | --------------------------------------------------------------------------------