├── LICENSE ├── README.md ├── aeolyus.oci.yaml ├── aeolyusplex.oci.yaml ├── ansible.cfg ├── bitwarden ├── meta │ └── main.yml ├── tasks │ └── main.yml └── templates │ ├── env.j2 │ └── vault.j2 ├── caddy ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml └── tasks │ └── main.yml ├── docker └── tasks │ └── main.yml ├── firewalld ├── defaults │ └── main.yml └── tasks │ └── main.yml ├── grafana ├── defaults │ └── main.yml ├── files │ ├── dashboard_provider.yml │ └── home.json ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ ├── dashboards.yml │ └── main.yml └── templates │ └── monitor.j2 ├── hosts ├── innernet-server ├── defaults │ └── main.yaml ├── meta │ └── main.yml └── tasks │ ├── generate-invites.yaml │ ├── groups.yaml │ ├── install.yaml │ ├── main.yaml │ └── server.yaml ├── innernet ├── defaults │ └── main.yaml ├── meta │ └── main.yml └── tasks │ ├── get_ip.yaml │ ├── install-invite.yaml │ ├── install.yaml │ └── main.yaml ├── k3s-initial-master ├── defaults │ └── main.yml ├── meta │ └── main.yml └── tasks │ └── main.yml ├── k3s-master ├── defaults │ └── main.yml ├── meta │ └── main.yml └── tasks │ └── main.yml ├── k3s-playbook.yml ├── k3s-worker ├── defaults │ └── main.yml └── tasks │ └── main.yml ├── letsencrypt ├── defaults │ └── main.yml └── tasks │ └── main.yml ├── minecraft ├── meta │ └── main.yml └── tasks │ └── main.yml ├── nextcloud ├── meta │ └── main.yml └── tasks │ └── main.yml ├── nginx ├── defaults │ └── main.yml ├── files │ └── nginx.conf ├── tasks │ └── main.yml └── templates │ └── default.j2 ├── oci ├── Dockerfile ├── README.md ├── docker-compose.yaml └── launch_compute_instance │ ├── main.yaml │ ├── tasks │ ├── find_ad.yaml │ ├── instance.yaml │ ├── main.yaml │ ├── network.yaml │ └── teardown.yaml │ ├── templates │ ├── egress_security_rules.yaml.j2 │ └── ingress_security_rules.yaml.j2 │ └── vars │ ├── aeolyus-master-arm.yaml │ ├── aeolyus-master-x86.yaml │ ├── aeolyus-worker-arm.yaml │ ├── aeolyus-worker-x86.yaml │ ├── aeolyusplex-innernet-server-x86.yaml │ ├── aeolyusplex-master-arm.yaml │ ├── aeolyusplex-master-x86.yaml │ ├── aeolyusplex-worker-arm.yaml │ ├── aeolyusplex-worker-x86.yaml │ ├── innernet-server.yaml │ ├── kubernetes-master.yaml │ ├── kubernetes-worker.yaml │ ├── network.yaml │ ├── starrydough-master-arm.yaml │ ├── starrydough-master-x86.yaml │ ├── starrydough-worker-arm.yaml │ └── starrydough-worker-x86.yaml ├── pihole ├── defaults │ └── main.yml ├── files │ └── resolv.conf ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ ├── dns.yml │ └── main.yml └── templates │ ├── dns-over-tls.j2 │ └── dns.j2 ├── plex ├── meta │ └── main.yml └── tasks │ └── main.yml ├── prometheus ├── defaults │ └── main.yml ├── files │ └── prometheus.yml ├── meta │ └── main.yml └── tasks │ ├── main.yml │ ├── nginx_exporter.yml │ └── node_exporter.yml ├── site.yml ├── starrydough.oci.yaml ├── swap ├── defaults │ └── main.yml └── tasks │ └── main.yml ├── tautulli ├── meta │ └── main.yml └── tasks │ └── main.yml └── watchtower ├── meta └── main.yml └── tasks └── main.yml /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Richard Huang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Homelab 2 | Ansible playbook to terraform and configure some apps on my homelab. 3 | 4 | ## Roles 5 | ``` 6 | bitwarden > password manager 7 | caddy > webserver, reverse proxy, ssl termination, with automatic tls 8 | docker > container management tool 9 | firewalld > firewall management tool 10 | grafana > analytics and graph composer 11 | nextcloud > file hosting service 12 | minecraft > sandbox game server 13 | pihole > DNS sinkhole for adblocking 14 | plex > media server 15 | prometheus > monitoring system 16 | swap > add some swap 17 | tautulli > monitoring and tracking tool for plex 18 | watchtower > automated docker container base image updates 19 | 20 | # Deprecating... 21 | letsencrypt > open certificate authority 22 | nginx > webserver, reverse proxy, ssl termination (with Let's Encrypt) 23 | ``` 24 | 25 | ## Usage 26 | Edit the services you want in `site.yml`. 27 | Modify the default variables as desired. 28 | Change the `hosts` file to include the server. 29 | Run the ansible playbook to terraform and configure the server! 30 | ``` 31 | ansible-playbook site.yml 32 | ``` 33 | 34 | You can also run the playbook on an arbitrary server without specifying it in the `hosts` file. 35 | ``` 36 | ansible-playbook -i user@server, site.yml 37 | ``` 38 | -------------------------------------------------------------------------------- /aeolyus.oci.yaml: -------------------------------------------------------------------------------- 1 | plugin: oracle.oci.oci 2 | config_profile: aeolyus 3 | -------------------------------------------------------------------------------- /aeolyusplex.oci.yaml: -------------------------------------------------------------------------------- 1 | plugin: oracle.oci.oci 2 | config_profile: aeolyusplex 3 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | strategy = free 3 | inventory = hosts 4 | stdout_callback = yaml 5 | display_ok_hosts = no 6 | display_skipped_hosts = false 7 | interpreter_python = /usr/bin/python3 8 | host_key_checking = false 9 | -------------------------------------------------------------------------------- /bitwarden/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | -------------------------------------------------------------------------------- /bitwarden/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check bitwarden env 2 | stat: 3 | path: "/data/bitwarden/.env" 4 | register: bitwarden_env 5 | 6 | - name: Create bitwarden directory 7 | file: 8 | path: "/data/bitwarden" 9 | state: directory 10 | when: not bitwarden_env.stat.exists 11 | 12 | - name: Generate bitwarden admin token 13 | set_fact: 14 | bitwarden_admin_token: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters') }}" 15 | when: not bitwarden_env.stat.exists 16 | 17 | - debug: 18 | msg: "Bitwarden admin token: {{ bitwarden_admin_token }}" 19 | changed_when: true 20 | when: not bitwarden_env.stat.exists 21 | 22 | - name: Template bitwarden env 23 | template: 24 | src: "env.j2" 25 | dest: "/data/bitwarden/.env" 26 | when: not bitwarden_env.stat.exists 27 | 28 | - name: Bitwarden container present 29 | docker_container: 30 | name: bitwarden 31 | image: bitwardenrs/server:alpine 32 | restart_policy: unless-stopped 33 | ports: 34 | - "3011:80" 35 | - "3012:3012" 36 | volumes: 37 | - "/data/bitwarden/:/data/" 38 | env_file: "/data/bitwarden/.env" 39 | 40 | - name: Setup caddy for bitwarden 41 | include_role: 42 | name: caddy 43 | vars: 44 | caddyfile_marker: "# {mark} ANSIBLE Bitwarden" 45 | caddyfile_block: | 46 | vault.{{ domain }} { 47 | reverse_proxy localhost:3011 48 | reverse_proxy /notifications/hub localhost:3012 49 | reverse_proxy /notifications/hub/negotiate localhost:3011 50 | } 51 | -------------------------------------------------------------------------------- /bitwarden/templates/env.j2: -------------------------------------------------------------------------------- 1 | DOMAIN=https://vault.{{ domain }} 2 | SIGNUPS_ALLOWED=false 3 | ADMIN_TOKEN={{ bitwarden_admin_token }} 4 | -------------------------------------------------------------------------------- /bitwarden/templates/vault.j2: -------------------------------------------------------------------------------- 1 | # [DEPRECATED] moved to caddy 2 | server { 3 | listen 80; 4 | listen [::]:80; 5 | server_name vault.*; 6 | return 301 https://$host$request_uri; 7 | limit_req zone=http burst=100; 8 | } 9 | 10 | server { 11 | listen 443 ssl http2; 12 | server_name vault.*; 13 | 14 | ssl_certificate /etc/letsencrypt/live/{{ hostname }}/fullchain.pem; 15 | ssl_certificate_key /etc/letsencrypt/live/{{ hostname }}/privkey.pem; 16 | 17 | limit_req zone=http burst=100; 18 | 19 | # Allow large attachments 20 | client_max_body_size 128M; 21 | 22 | location / { 23 | proxy_pass http://127.0.0.1:3011; 24 | proxy_set_header Host $host; 25 | proxy_set_header X-Real-IP $remote_addr; 26 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 27 | proxy_set_header X-Forwarded-Proto $scheme; 28 | } 29 | 30 | location /notifications/hub { 31 | proxy_pass http://127.0.0.1:3012; 32 | proxy_set_header Upgrade $http_upgrade; 33 | proxy_set_header Connection "upgrade"; 34 | } 35 | 36 | location /notifications/hub/negotiate { 37 | proxy_pass http://127.0.0.1:3011; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /caddy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | domain: aeoly.us 2 | public: true 3 | caddyfile_marker: "# {mark} ANSIBLE Default" 4 | # TODO: remove staging 5 | caddyfile_block: | 6 | {{ domain }} { 7 | reverse_proxy https://aeolyus.github.io { 8 | header_up Host aeolyus.github.io 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /caddy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Reload caddy config 2 | command: docker exec caddy caddy reload --config /etc/caddy/Caddyfile --adapter caddyfile 3 | -------------------------------------------------------------------------------- /caddy/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | -------------------------------------------------------------------------------- /caddy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check if Caddyfile exists 2 | stat: 3 | path: "/data/caddy/Caddyfile" 4 | register: caddyfile_check 5 | 6 | - name: Create caddy directory 7 | file: 8 | path: "/data/caddy/" 9 | state: directory 10 | when: not caddyfile_check.stat.exists 11 | 12 | - name: Create Caddyfile 13 | file: 14 | path: "/data/caddy/Caddyfile" 15 | state: touch 16 | when: not caddyfile_check.stat.exists 17 | 18 | - name: Setup firewall 19 | include_role: 20 | role: firewalld 21 | vars: 22 | firewalld_services: ["http", "https"] 23 | when: public | d(False) 24 | 25 | - name: Caddy container present 26 | docker_container: 27 | name: caddy 28 | image: caddy:alpine 29 | restart_policy: unless-stopped 30 | network_mode: host 31 | volumes: 32 | - "/data/caddy/Caddyfile:/etc/caddy/Caddyfile" 33 | - "/data/caddy/caddy_data:/data" 34 | when: public | d(False) 35 | 36 | - name: Setup Caddyfile 37 | blockinfile: 38 | path: "/data/caddy/Caddyfile" 39 | marker: "{{ caddyfile_marker }}" 40 | block: "{{ caddyfile_block }}" 41 | create: true 42 | register: caddy_config 43 | 44 | - name: Check if caddy container is running 45 | docker_container_info: 46 | name: caddy 47 | register: caddy 48 | 49 | - name: Reload caddy config 50 | command: docker restart caddy 51 | when: caddy.exists and caddy_config.changed 52 | -------------------------------------------------------------------------------- /docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check docker and pip is installed 2 | package: 3 | name: [docker.io, python3-pip] 4 | state: present 5 | update_cache: true 6 | cache_valid_time: 3600 7 | 8 | - name: Install docker module for python 9 | pip: 10 | name: docker 11 | 12 | - name: Ensure docker service is started and enabled 13 | service: 14 | name: docker 15 | state: started 16 | enabled: true 17 | -------------------------------------------------------------------------------- /firewalld/defaults/main.yml: -------------------------------------------------------------------------------- 1 | firewalld_services: [] 2 | firewalld_ports: [] 3 | -------------------------------------------------------------------------------- /firewalld/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check firewalld is installed 2 | become: yes 3 | package: 4 | name: firewalld 5 | state: present 6 | update_cache: true 7 | cache_valid_time: 3600 8 | register: firewalld_installed 9 | 10 | - name: Open service ports 11 | become: yes 12 | loop: "{{ firewalld_services }}" 13 | ansible.posix.firewalld: 14 | service: "{{ item }}" 15 | permanent: true 16 | immediate: true 17 | state: enabled 18 | register: firewalld_services_changed 19 | 20 | - name: Open ports 21 | become: yes 22 | loop: "{{ firewalld_ports }}" 23 | ansible.posix.firewalld: 24 | port: "{{ item }}" 25 | permanent: true 26 | immediate: true 27 | state: enabled 28 | register: firewalld_ports_changed 29 | 30 | - name: Restart firewalld 31 | become: yes 32 | service: 33 | name: firewalld 34 | state: restarted 35 | enabled: true 36 | when: firewalld_installed.changed or firewalld_services_changed.changed or firewalld_ports_changed.changed 37 | register: firewalld_restarted 38 | 39 | - name: Populate service facts 40 | service_facts: 41 | 42 | - name: Restart docker after restarting firewalld 43 | become: yes 44 | service: 45 | name: docker 46 | state: restarted 47 | enabled: true 48 | when: firewalld_restarted.changed and 'docker.service' in services 49 | -------------------------------------------------------------------------------- /grafana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | gf_auth: 2 | hide_login_form: "true" 3 | anonymous_enabled: "true" 4 | anonymous_org_role: "Viewer" 5 | gf_alerting: 6 | enabled: "false" 7 | -------------------------------------------------------------------------------- /grafana/files/dashboard_provider.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | # an unique provider name 5 | - name: 'a unique provider name' 6 | # org id. will default to orgId 1 if not specified 7 | orgId: 1 8 | # name of the dashboard folder. Required 9 | folder: '' 10 | # folder UID. will be automatically generated if not specified 11 | folderUid: '' 12 | # provider type. Required 13 | type: file 14 | # disable dashboard deletion 15 | disableDeletion: false 16 | # enable dashboard editing 17 | editable: true 18 | # how often Grafana will scan for changed dashboards 19 | updateIntervalSeconds: 10 20 | # allow updating provisioned dashboards from the UI 21 | allowUiUpdates: false 22 | options: 23 | # path to dashboard files on disk. Required 24 | path: /var/lib/grafana/dashboards 25 | -------------------------------------------------------------------------------- /grafana/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Generate grafana password 2 | set_fact: 3 | grafana_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters') }}" 4 | 5 | - name: Set grafana password 6 | shell: "docker exec grafana grafana-cli admin reset-admin-password {{ grafana_password }}" 7 | 8 | - name: Print grafana password 9 | debug: 10 | msg: "Grafana password: {{ grafana_password }}" 11 | changed_when: true 12 | 13 | - name: Add prometheus datasource 14 | grafana_datasource: 15 | name: "prometheus" 16 | ds_type: "prometheus" 17 | ds_url: "http://localhost:9090" 18 | grafana_url: "http://localhost:3000" 19 | grafana_user: "admin" 20 | grafana_password: "{{ grafana_password }}" 21 | 22 | - name: Set preferred home dashboard 23 | uri: 24 | url: "http://localhost:3000/api/org/preferences" 25 | user: admin 26 | password: "{{ grafana_password }}" 27 | method: PUT 28 | body: '{"homeDashboardId":1}' 29 | force_basic_auth: true 30 | status_code: 200 31 | body_format: json 32 | -------------------------------------------------------------------------------- /grafana/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | - role: prometheus 4 | -------------------------------------------------------------------------------- /grafana/tasks/dashboards.yml: -------------------------------------------------------------------------------- 1 | - name: Setup dashboard provider directory 2 | file: 3 | path: "/data/grafana/provisioning/dashboards" 4 | state: directory 5 | 6 | - name: Setup dashboards directory 7 | file: 8 | path: "/data/grafana/dashboards" 9 | state: directory 10 | 11 | - name: Setup dashboard provider 12 | copy: 13 | src: "dashboard_provider.yml" 14 | dest: "/data/grafana/provisioning/dashboards/dashboard_provider.yml" 15 | 16 | - name: Setup home dashboard 17 | copy: 18 | src: "home.json" 19 | dest: "/data/grafana/dashboards/home.json" 20 | 21 | - name: Check domain certificate 22 | stat: 23 | path: "/data/grafana/dashboards/nginx.json" 24 | register: grafana_nginx_dashboard 25 | 26 | - name: Setup nginx dashboard 27 | get_url: 28 | url: "https://git.io/JvSCB" 29 | dest: "/data/grafana/dashboards/nginx.json" 30 | when: not grafana_nginx_dashboard.stat.exists 31 | 32 | - name: Replace ${DS_PROMETHEUS} with prometheus in nginx dashboard 33 | replace: 34 | path: "/data/grafana/dashboards/nginx.json" 35 | regexp: "\\${DS_PROMETHEUS}" 36 | replace: "prometheus" 37 | when: not grafana_nginx_dashboard.stat.exists 38 | -------------------------------------------------------------------------------- /grafana/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Setup grafana dashboard 2 | import_tasks: dashboards.yml 3 | 4 | - name: Grafana container present 5 | docker_container: 6 | name: grafana 7 | image: grafana/grafana 8 | user: root 9 | restart_policy: unless-stopped 10 | network_mode: host 11 | volumes: 12 | - "/data/grafana/:/var/lib/grafana" 13 | env: 14 | GF_AUTH_HIDE_LOGIN_FORM: "{{ gf_auth.hide_login_form }}" 15 | GF_AUTH_ANONYMOUS_ENABLED: "{{ gf_auth.anonymous_enabled }}" 16 | GF_AUTH_ANONYMOUS_ORG_ROLE: "{{ gf_auth.anonymous_org_role }}" 17 | GF_ALERTING_ENABLED: " {{ gf_alerting.enabled }}" 18 | GF_PATHS_PROVISIONING: "/var/lib/grafana/provisioning" 19 | register: grafana_container 20 | notify: 21 | - Generate grafana password 22 | - Set grafana password 23 | - Print grafana password 24 | - Add prometheus datasource 25 | - Set preferred home dashboard 26 | 27 | - name: Setup caddy for grafana 28 | include_role: 29 | name: caddy 30 | vars: 31 | caddyfile_marker: "# {mark} ANSIBLE Grafana" 32 | caddyfile_block: | 33 | monitor.{{ domain }} { 34 | reverse_proxy localhost:3000 35 | } 36 | -------------------------------------------------------------------------------- /grafana/templates/monitor.j2: -------------------------------------------------------------------------------- 1 | # [DEPRECATED] moved to caddy 2 | server { 3 | listen 80 ; 4 | listen [::]:80 ; 5 | server_name monitor.*; 6 | return 301 https://$host$request_uri; 7 | limit_req zone=http burst=100; 8 | } 9 | 10 | server { 11 | listen 443 ssl http2; 12 | server_name monitor.*; 13 | 14 | ssl_certificate /etc/letsencrypt/live/{{ hostname }}/fullchain.pem; 15 | ssl_certificate_key /etc/letsencrypt/live/{{ hostname }}/privkey.pem; 16 | 17 | limit_req zone=http burst=100; 18 | 19 | location / { 20 | proxy_pass http://127.0.0.1:3000; 21 | } 22 | } 23 | 24 | -------------------------------------------------------------------------------- /hosts: -------------------------------------------------------------------------------- 1 | [prod] 2 | aeoly.us 3 | 4 | [dev] 5 | ifstatementsarenot.ml domain=ifstatementsarenot.ml 6 | ifstatementsarejust.ml domain=ifstatementsarejust.ml 7 | notmachinelearning.ml domain=notmachinelearning.ml 8 | itsnickyang.ml domain=itsnickyang.ml 9 | itsnotnickyang.ml domain=itsnotnickyang.ml 10 | 11 | [prod:vars] 12 | domain=aeoly.us 13 | ansible_user=ubuntu 14 | 15 | [dev:vars] 16 | ansible_user=ubuntu 17 | -------------------------------------------------------------------------------- /innernet-server/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | innernet_version: "v1.4.1" 2 | 3 | innernet_interface_name: "magic-base" 4 | innernet_interface_cidr: 10.42.0.0/16 5 | innernet_server_port: 51820 6 | innernet_listen_port: 51820 7 | 8 | innernet_server_groups: 9 | - name: "humans" 10 | cidr: "10.42.1.0/24" 11 | -------------------------------------------------------------------------------- /innernet-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: firewalld 3 | vars: 4 | firewalld_ports: 5 | - "51820/tcp" # innernet server 6 | - "51820/udp" # wireguard 7 | -------------------------------------------------------------------------------- /innernet-server/tasks/generate-invites.yaml: -------------------------------------------------------------------------------- 1 | - name: Check for existence of innernet peer "{{ peer.hostname }}" 2 | shell: > 3 | sqlite3 /var/lib/innernet-server/{{ innernet_interface_name }}.db 4 | "SELECT * FROM peers WHERE name == '{{ peer.hostname }}';" 5 | register: result 6 | changed_when: false 7 | - set_fact: 8 | peer_exists: "{{ result.stdout | length > 0 }}" 9 | invite_path: "/tmp/{{ peer.hostname }}-innernet-invite.toml" 10 | 11 | - name: Generate innernet invite for "{{ peer.hostname }}" 12 | become: yes 13 | shell: > 14 | innernet-server \ 15 | add-peer {{ innernet_interface_name }} 16 | --auto-ip 17 | --admin true 18 | --cidr {{ innernet_cidr_name }} 19 | --name {{ peer.hostname }} 20 | --save-config {{ invite_path }} 21 | --invite-expires 1d --yes 22 | args: 23 | creates: "{{ invite_path }}" 24 | when: not peer_exists 25 | 26 | - name: Copy invite file to local 27 | synchronize: 28 | mode: pull 29 | src: "{{ invite_path }}" 30 | dest: "{{ invite_path }}" 31 | when: not peer_exists 32 | 33 | - name: Delete invite file on server 34 | become: yes 35 | file: 36 | path: "{{ invite_path }}" 37 | state: absent 38 | -------------------------------------------------------------------------------- /innernet-server/tasks/groups.yaml: -------------------------------------------------------------------------------- 1 | - name: Check for existence of innernet group "{{ group.name }}" 2 | shell: > 3 | sqlite3 /var/lib/innernet-server/{{ innernet_interface_name }}.db 4 | "SELECT * FROM cidrs WHERE name == '{{ group.name }}';" 5 | register: result 6 | changed_when: false 7 | - set_fact: 8 | group_exists: "{{ result.stdout | length > 0 }}" 9 | 10 | - name: Add innernet group "{{ group.name }}" 11 | become: yes 12 | shell: > 13 | innernet-server 14 | add-cidr {{ innernet_interface_name }} 15 | --name {{ group.name }} 16 | --cidr {{ group.cidr }} 17 | --parent {{ innernet_interface_name }} 18 | --yes 19 | when: not group_exists 20 | -------------------------------------------------------------------------------- /innernet-server/tasks/install.yaml: -------------------------------------------------------------------------------- 1 | - name: Install dependencies 2 | become: yes 3 | package: 4 | name: [git, wireguard, cargo, libclang-dev, libsqlite3-dev, sqlite3, rsync] 5 | state: present 6 | update_cache: true 7 | cache_valid_time: 3600 8 | 9 | - name: Check if innernet has been installed 10 | stat: 11 | path: /usr/bin/innernet-server 12 | register: result 13 | 14 | - name: 15 | set_fact: 16 | innernet_exists: "{{ result.stat.exists }}" 17 | changed_when: false 18 | 19 | - name: Install innernet 20 | block: 21 | - name: Clone git repo 22 | git: 23 | repo: "https://github.com/tonarino/innernet" 24 | dest: innernet/ 25 | version: "{{ innernet_version }}" 26 | 27 | - name: Install cargo-deb 28 | shell: cargo install cargo-deb 29 | 30 | - name: Build and install server deb package 31 | shell: cargo deb --install -p server 32 | args: 33 | chdir: innernet/ 34 | creates: "usr/bin/innernet-server" 35 | when: not innernet_exists 36 | 37 | - name: Clean up post-installation 38 | file: 39 | state: absent 40 | path: innernet/ 41 | -------------------------------------------------------------------------------- /innernet-server/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install innernet 2 | import_tasks: install.yaml 3 | 4 | - name: Setup server 5 | import_tasks: server.yaml 6 | -------------------------------------------------------------------------------- /innernet-server/tasks/server.yaml: -------------------------------------------------------------------------------- 1 | - name: Get public IP address 2 | uri: 3 | url: http://ifconfig.me/ip 4 | return_content: true 5 | register: result 6 | - set_fact: 7 | server_public_ip: "{{ result.content }}" 8 | 9 | - name: Create interface 10 | become: yes 11 | shell: > 12 | innernet-server 13 | new 14 | --external-endpoint {{ server_public_ip }}:{{ innernet_server_port }} 15 | --listen-port {{ innernet_listen_port }} 16 | --network-cidr {{ innernet_interface_cidr }} 17 | --network-name {{ innernet_interface_name }} 18 | args: 19 | creates: "/var/lib/innernet-server/{{ innernet_interface_name }}.db" 20 | 21 | - name: Create groups 22 | include_tasks: "groups.yaml" 23 | loop: "{{ innernet_server_groups }}" 24 | loop_control: 25 | loop_var: group 26 | 27 | - name: Get all nodes not connected to innernet 28 | set_fact: 29 | nodes_not_connected_to_innernet: "{{ groups['tag_innernet=true'] \ 30 | | map('extract', hostvars) \ 31 | | selectattr('innernet_ip', 'equalto', '') \ 32 | | list }}" 33 | 34 | - name: Generate invites 35 | include_tasks: "generate-invites.yaml" 36 | loop: "{{ nodes_not_connected_to_innernet }}" 37 | loop_control: 38 | loop_var: peer 39 | vars: 40 | innernet_cidr_name: "kube-nodes" 41 | 42 | - name: Enable interface systemctl service 43 | become: yes 44 | systemd: 45 | name: "innernet-server@{{ innernet_interface_name }}" 46 | state: started 47 | enabled: yes 48 | -------------------------------------------------------------------------------- /innernet/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | innernet_version: "v1.4.1" 2 | -------------------------------------------------------------------------------- /innernet/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: firewalld 3 | vars: 4 | firewalld_ports: 5 | - "51820/udp" # wireguard 6 | -------------------------------------------------------------------------------- /innernet/tasks/get_ip.yaml: -------------------------------------------------------------------------------- 1 | - name: Get node innernet ip 2 | become: yes 3 | shell: > 4 | sudo innernet show "{{ innernet_interface_name }}" --tree 5 | | awk '/you,/ {print $3}' 6 | | sed 's/://g' 7 | register: result 8 | changed_when: false 9 | - set_fact: 10 | innernet_ip: "{{ result.stdout }}" 11 | 12 | - name: Get hostname 13 | shell: hostname 14 | register: result 15 | changed_when: false 16 | - set_fact: 17 | hostname: "{{ result.stdout }}" 18 | -------------------------------------------------------------------------------- /innernet/tasks/install-invite.yaml: -------------------------------------------------------------------------------- 1 | - name: Get innernet ip 2 | import_tasks: get_ip.yaml 3 | 4 | - set_fact: 5 | innernet_connected: "{{ innernet_ip | length > 0 }}" 6 | invite_path: "/tmp/{{ hostname }}-innernet-invite.toml" 7 | 8 | - name: Copy invite from local to node 9 | copy: 10 | src: "{{ invite_path }}" 11 | dest: "{{ invite_path }}" 12 | when: not innernet_connected 13 | 14 | - name: Delete invite file on local 15 | delegate_to: localhost 16 | file: 17 | path: "{{ invite_path }}" 18 | state: absent 19 | 20 | - name: Install innernet invites 21 | become: yes 22 | shell: innernet install {{ invite_path }} --default-name -d 23 | args: 24 | removes: "{{ invite_path }}" 25 | 26 | - name: Enable interface systemctl service 27 | become: yes 28 | systemd: 29 | name: "innernet@{{ innernet_interface_name }}" 30 | state: started 31 | enabled: yes 32 | -------------------------------------------------------------------------------- /innernet/tasks/install.yaml: -------------------------------------------------------------------------------- 1 | - name: Install dependencies 2 | become: yes 3 | package: 4 | name: [git, wireguard, cargo, libclang-dev, libsqlite3-dev] 5 | state: present 6 | update_cache: true 7 | cache_valid_time: 3600 8 | 9 | - name: Check if innernet has been installed 10 | stat: 11 | path: /usr/bin/innernet 12 | register: result 13 | 14 | - name: 15 | set_fact: 16 | innernet_exists: "{{ result.stat.exists }}" 17 | changed_when: false 18 | 19 | - name: Install innernet 20 | block: 21 | - name: Clone git repo 22 | git: 23 | repo: "https://github.com/tonarino/innernet" 24 | dest: innernet/ 25 | version: "{{ innernet_version }}" 26 | 27 | - name: Install cargo-deb 28 | shell: cargo install cargo-deb 29 | 30 | - name: Build and install client deb package 31 | shell: cargo deb --install -p client 32 | args: 33 | chdir: innernet/ 34 | creates: "usr/bin/innernet" 35 | when: not innernet_exists 36 | 37 | - name: Clean up post-installation 38 | file: 39 | state: absent 40 | path: innernet/ 41 | -------------------------------------------------------------------------------- /innernet/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install innernet 2 | import_tasks: install.yaml 3 | 4 | - name: Get innernet IP 5 | import_tasks: get_ip.yaml 6 | -------------------------------------------------------------------------------- /k3s-initial-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | k3s_token_file_location: "/var/lib/rancher/k3s/server/node-token" 2 | -------------------------------------------------------------------------------- /k3s-initial-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: firewalld 3 | vars: 4 | firewalld_ports: 5 | - "6443/tcp" # Kube api 6 | - "2379/tcp" # etcd client requests 7 | - "2380/tcp" # etcd peer communication 8 | - "10250/tcp" # kubelet 9 | firewalld_services: ["https"] 10 | -------------------------------------------------------------------------------- /k3s-initial-master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check if k3s token exists 2 | become: true 3 | register: k3s_token_file_check 4 | stat: 5 | path: "{{ k3s_token_file_location }}" 6 | 7 | - name: Grab and set the existing k3s token 8 | when: k3s_token_file_check.stat.exists 9 | block: 10 | - name: Get the existing k3s token 11 | become: true 12 | shell: cut {{ k3s_token_file_location }} -d ":" -f4 13 | register: k3s_token_file 14 | changed_when: false 15 | - name: Set k3s token 16 | set_fact: 17 | k3s_token: "{{ k3s_token_file.stdout }}" 18 | 19 | - name: Install initial k3s master 20 | when: not k3s_token_file_check.stat.exists 21 | block: 22 | - name: Generate k3s token 23 | set_fact: 24 | k3s_token: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters') }}" 25 | - name: Download k3s installer 26 | get_url: 27 | url: https://get.k3s.io 28 | dest: /tmp/k3s-installer.sh 29 | - name: Run the k3s installer 30 | become: yes 31 | shell: > 32 | K3S_TOKEN={{ k3s_token }} 33 | bash /tmp/k3s-installer.sh 34 | server 35 | --cluster-init 36 | --node-taint CriticalAddonsOnly=true:NoExecute 37 | --node-ip {{ node_ip | default(ansible_host) }} 38 | --disable-cloud-controller 39 | --disable traefik 40 | --flannel-backend wireguard 41 | 42 | - name: Allow kubectl without sudo 43 | become: true 44 | file: 45 | path: /etc/rancher 46 | owner: "{{ ansible_user }}" 47 | recurse: true 48 | -------------------------------------------------------------------------------- /k3s-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | k3s_token_file_location: "/var/lib/rancher/k3s/server/node-token" 2 | k3s_token: "{{ hostvars[groups['k3s_initial_master'][0]]['k3s_token'] }}" 3 | k3s_initial_master_ip: "{{ hostvars[groups['k3s_initial_master'][0]]['ansible_host'] }}" 4 | -------------------------------------------------------------------------------- /k3s-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: firewalld 3 | vars: 4 | firewalld_ports: 5 | - "6443/tcp" # Kube api 6 | - "2379/tcp" # etcd client requests 7 | - "2380/tcp" # etcd peer communication 8 | - "10250/tcp" # kubelet 9 | firewalld_services: ["https"] 10 | -------------------------------------------------------------------------------- /k3s-master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Download k3s installer 2 | get_url: 3 | url: https://get.k3s.io 4 | dest: /tmp/k3s-installer.sh 5 | 6 | - name: Change k3s installer to be executable 7 | file: 8 | dest: /tmp/k3s-installer.sh 9 | mode: +x 10 | 11 | - name: Run the k3s installer 12 | become: yes 13 | shell: > 14 | K3S_TOKEN={{ k3s_token }} 15 | /tmp/k3s-installer.sh 16 | server 17 | --server https://{{ k3s_initial_master_ip }}:6443 18 | --node-taint CriticalAddonsOnly=true:NoExecute 19 | --node-ip {{ node_ip | default(ansible_host) }} 20 | --disable-cloud-controller 21 | --disable traefik 22 | --flannel-backend wireguard 23 | args: 24 | creates: "{{ k3s_token_file_location }}" 25 | 26 | - name: Allow kubectl without sudo 27 | become: true 28 | file: 29 | path: /etc/rancher 30 | owner: "{{ ansible_user }}" 31 | recurse: true 32 | -------------------------------------------------------------------------------- /k3s-playbook.yml: -------------------------------------------------------------------------------- 1 | - name: Check prerequisite packages are installed 2 | hosts: k3s_initial_master:k3s_master:k3s_worker:!localhost 3 | become: true 4 | tasks: 5 | - package: 6 | name: [wireguard, nfs-common] 7 | state: present 8 | update_cache: true 9 | cache_valid_time: 3600 10 | 11 | - name: Setup initial k3s master node 12 | hosts: k3s_initial_master:!localhost 13 | become: true 14 | roles: 15 | - swap 16 | - k3s-initial-master 17 | 18 | - name: Setup other k3s master nodes 19 | hosts: k3s_master:!localhost 20 | become: true 21 | roles: 22 | - swap 23 | - k3s-master 24 | 25 | - name: Setup k3s worker nodes 26 | hosts: k3s_worker:!localhost 27 | become: true 28 | roles: 29 | - swap 30 | - k3s-worker 31 | -------------------------------------------------------------------------------- /k3s-worker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | k3s_token: "{{ hostvars[groups['k3s_initial_master'][0]]['k3s_token'] }}" 2 | k3s_initial_master_ip: "{{ hostvars[groups['k3s_initial_master'][0]]['ansible_host'] }}" 3 | -------------------------------------------------------------------------------- /k3s-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Populate service facts 2 | service_facts: 3 | 4 | - name: Download k3s installer 5 | get_url: 6 | url: https://get.k3s.io 7 | dest: /tmp/k3s-installer.sh 8 | 9 | - name: Change k3s installer to be executable 10 | file: 11 | dest: /tmp/k3s-installer.sh 12 | mode: +x 13 | 14 | - name: Run the k3s installer 15 | become: yes 16 | shell: > 17 | K3S_URL=https://{{ k3s_initial_master_ip }}:6443 18 | K3S_TOKEN={{ k3s_token }} 19 | /tmp/k3s-installer.sh 20 | when: "'k3s-agent.service' not in services" 21 | -------------------------------------------------------------------------------- /letsencrypt/defaults/main.yml: -------------------------------------------------------------------------------- 1 | certbot_admin_email: richardhuang.huang@gmail.com 2 | -------------------------------------------------------------------------------- /letsencrypt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Add certbot repository 2 | apt_repository: 3 | repo: 'ppa:certbot/certbot' 4 | 5 | - name: Install certbot's nginx package 6 | package: 7 | name: python-certbot-nginx 8 | state: present 9 | update_cache: true 10 | cache_valid_time: 3600 11 | 12 | - name: Check domain certificate 13 | stat: 14 | path: "/etc/letsencrypt/live/{{ hostname }}/cert.pem" 15 | register: cert 16 | 17 | - name: Generate new certificate for domain if one doesn't exist. 18 | shell: "certbot certonly --nginx --noninteractive --agree-tos --email {{ certbot_admin_email }} -d {{ hostname }}" 19 | when: not cert.stat.exists 20 | -------------------------------------------------------------------------------- /minecraft/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | - role: firewalld 4 | vars: 5 | firewalld_ports: ["25565/tcp", "25565/udp", "8123/tcp", "8504/tcp"] 6 | -------------------------------------------------------------------------------- /minecraft/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Minecraft container present 2 | docker_container: 3 | name: mc 4 | image: itzg/minecraft-server 5 | restart_policy: unless-stopped 6 | ports: 7 | - "25565:25565" 8 | - "8123:8123" # Dynmap 9 | - "8804:8804" # Player Analytics 10 | volumes: 11 | - "/data/mc/:/data/" 12 | env: 13 | EULA: "TRUE" 14 | TYPE: "PAPER" 15 | VERSION: "LATEST" 16 | MEMORY: "3G" 17 | command: 18 | # Some older versions (pre-1.14) of Spigot require this when detaching stdin 19 | - "--noconsole" 20 | 21 | - name: Setup caddy for minecraft 22 | include_role: 23 | name: caddy 24 | vars: 25 | caddyfile_marker: "# {mark} ANSIBLE Minecraft" 26 | caddyfile_block: | 27 | mc.{{ domain }} { 28 | reverse_proxy / localhost:8123 29 | reverse_proxy /stats localhost:8804 30 | } 31 | -------------------------------------------------------------------------------- /nextcloud/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | - role: firewalld 4 | vars: 5 | firewalld_ports: ["4381/tcp"] 6 | -------------------------------------------------------------------------------- /nextcloud/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Nextcloud container present 2 | docker_container: 3 | name: nextcloud 4 | image: nextcloud 5 | restart_policy: unless-stopped 6 | ports: 7 | - "4381:80" 8 | volumes: 9 | - "/data/nextcloud/:/var/www/html" 10 | env: 11 | SQLITE_DATABASE: "nextcloud-sqlite-db" 12 | NEXTCLOUD_TRUSTED_DOMAINS: "nextcloud.{{ domain }}" 13 | 14 | - name: Setup caddy for nextcloud 15 | include_role: 16 | name: caddy 17 | vars: 18 | caddyfile_marker: "# {mark} ANSIBLE Nextcloud" 19 | caddyfile_block: | 20 | nextcloud.{{ domain }} { 21 | reverse_proxy localhost:4381 22 | } 23 | -------------------------------------------------------------------------------- /nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | domain: aeoly.us 2 | hostname: "{{ domain }}" 3 | nginx_conf_file: "nginx.conf" 4 | nginx_template_file: "default" 5 | -------------------------------------------------------------------------------- /nginx/files/nginx.conf: -------------------------------------------------------------------------------- 1 | user www-data; 2 | worker_processes auto; 3 | pid /run/nginx.pid; 4 | include /etc/nginx/modules-enabled/*.conf; 5 | 6 | events { 7 | worker_connections 768; 8 | # multi_accept on; 9 | } 10 | 11 | http { 12 | 13 | ## 14 | # Basic Settings 15 | ## 16 | 17 | sendfile on; 18 | tcp_nopush on; 19 | tcp_nodelay on; 20 | keepalive_timeout 65; 21 | types_hash_max_size 2048; 22 | # server_tokens off; 23 | 24 | # server_names_hash_bucket_size 64; 25 | # server_name_in_redirect off; 26 | 27 | include /etc/nginx/mime.types; 28 | default_type application/octet-stream; 29 | 30 | ## 31 | # SSL Settings 32 | ## 33 | 34 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE 35 | ssl_prefer_server_ciphers on; 36 | 37 | ## 38 | # Logging Settings 39 | ## 40 | 41 | access_log /var/log/nginx/access.log; 42 | error_log /var/log/nginx/error.log; 43 | 44 | ## 45 | # Gzip Settings 46 | ## 47 | 48 | gzip on; 49 | 50 | # gzip_vary on; 51 | # gzip_proxied any; 52 | # gzip_comp_level 6; 53 | # gzip_buffers 16 8k; 54 | # gzip_http_version 1.1; 55 | # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; 56 | 57 | ## 58 | # Virtual Host Configs 59 | ## 60 | 61 | include /etc/nginx/conf.d/*.conf; 62 | include /etc/nginx/sites-enabled/*; 63 | } 64 | 65 | ## 66 | # DNS over TLS 67 | ## 68 | stream { 69 | include /etc/nginx/streams/*; 70 | } 71 | -------------------------------------------------------------------------------- /nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check nginx is installed 2 | package: 3 | name: nginx 4 | state: present 5 | update_cache: true 6 | cache_valid_time: 3600 7 | 8 | - name: Setup firewall 9 | include_role: 10 | role: firewalld 11 | vars: 12 | firewalld_services: ["http", "https"] 13 | 14 | # [DEPRECATED] moved to caddy 15 | # - name: Get letsencrypt certs 16 | # include_role: 17 | # role: letsencrypt 18 | 19 | - name: Remove default file 20 | file: 21 | path: "/etc/nginx/sites-enabled/default" 22 | state: absent 23 | 24 | - name: Template nginx configuration file 25 | copy: 26 | src: "nginx.conf" 27 | dest: "/etc/nginx/nginx.conf" 28 | register: nginx_conf 29 | 30 | # [DEPRECATED] moved to caddy 31 | # - name: Check nginx site configuration 32 | # template: 33 | # src: "{{ nginx_template_file }}.j2" 34 | # dest: "/etc/nginx/sites-available/{{ hostname }}" 35 | # register: nginx_template 36 | 37 | # - name: Ensure nginx site symlink 38 | # file: 39 | # src: "/etc/nginx/sites-available/{{ hostname }}" 40 | # dest: "/etc/nginx/sites-enabled/{{ hostname }}" 41 | # state: link 42 | # register: nginx_symlink 43 | 44 | - name: Ensure nginx stream configuration 45 | block: 46 | - file: 47 | path: "/etc/nginx/streams/" 48 | state: directory 49 | - template: 50 | src: "{{ nginx_stream_file }}.j2" 51 | dest: "/etc/nginx/streams/{{ nginx_stream_file }}" 52 | register: nginx_stream 53 | when: nginx_stream_file is defined 54 | 55 | - name: Restart nginx 56 | service: 57 | name: nginx 58 | state: restarted 59 | enabled: true 60 | when: > 61 | nginx_stream is defined and nginx_stream.changed or 62 | nginx_conf.changed 63 | -------------------------------------------------------------------------------- /nginx/templates/default.j2: -------------------------------------------------------------------------------- 1 | # [DEPRECATED] moved to caddy 2 | limit_req_zone $binary_remote_addr zone=http:10m rate=100r/s; 3 | 4 | server { 5 | listen 80; 6 | listen [::]:80; 7 | limit_req zone=http burst=100; 8 | server_name {{ hostname }}; 9 | 10 | return 301 https://$host$request_uri; 11 | } 12 | 13 | server { 14 | listen 443 ssl; 15 | listen [::]:443 ssl ipv6only=on; 16 | limit_req zone=http burst=100; 17 | server_name {{ hostname }}; 18 | 19 | ssl_certificate /etc/letsencrypt/live/{{ hostname }}/fullchain.pem; 20 | ssl_certificate_key /etc/letsencrypt/live/{{ hostname }}/privkey.pem; 21 | 22 | location / { 23 | proxy_pass https://aeolyus.github.io; 24 | } 25 | } 26 | 27 | server { 28 | listen 8080; 29 | listen [::]:8080; 30 | 31 | location /stub_status { 32 | stub_status; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /oci/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | ENV VIRTUAL_ENV=/opt/env 4 | ENV PATH="$VIRTUAL_ENV/bin:$PATH" 5 | 6 | # Install prerequisites 7 | RUN apt-get update && apt-get install -y \ 8 | --no-install-recommends \ 9 | ansible \ 10 | python3-venv 11 | RUN python3 -m venv $VIRTUAL_ENV 12 | RUN pip3 install wheel 13 | RUN pip3 install ansible 14 | RUN pip3 install oci 15 | RUN ansible-galaxy collection install -f oracle.oci 16 | 17 | # CMD ["/bin/bash"] 18 | CMD ["ansible-playbook", "/launch_compute_instance/main.yaml"] 19 | -------------------------------------------------------------------------------- /oci/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | This module automatically creates all the virtual machines available in the free tier of Oracle OCI, along with the basic necessary networking configs. Since I didn't want to deal with installing Oracle's OCI Ansible collection natively on my machines, I also containerized everything to make provisioning more convenient. Do note that some of these configs, such as the configs in `launch_compute_instance/vars`, are specific to my infrastructure and may not work for everyone. 3 | 4 | The default setup will provision 5 | - 2x x86 VM 1 OCPU 1GB RAM 6 | - 4x ARM VM 1 OCPU 6GB RAM 7 | 8 | And some basic networking 9 | - 1x Virtual Cloud Network 10 | - 1x Internet Gateway 11 | - 2x subnet (one for each machine type) 12 | - 1x route table to connect internet gateway to VCN 13 | - 1x ingress rules table 14 | - 1x egress rules table 15 | 16 | # Usage 17 | 1. Create a `config` file to store your Oracle OCI credentials. To create and grab your Oracle OCI credentials you will need to navigate to Oracle OCI profile -> API Keys. 18 | 2. Make a `keys` directory. Put your ssh keys you want to add to your VM's in `keys/ssh_authorized_keys` and your OCI private API key as `keys/oci.pem`. 19 | 3. Edit the variables in `launch_compute_instance/vars` to match your tenancy, image, instance hostname, compartment, etc. 20 | 4. Spin up the ansible container to provision your machines. 21 | ``` 22 | docker-compose up 23 | ``` 24 | 4. Profit! 25 | -------------------------------------------------------------------------------- /oci/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | oci-manager: 5 | container_name: oci-manager 6 | build: . 7 | volumes: 8 | - "./keys/:/keys/:ro" 9 | - "./config:/root/.oci/config:ro" 10 | - "./launch_compute_instance/:/launch_compute_instance/:ro" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/main.yaml: -------------------------------------------------------------------------------- 1 | - name : Launch compute instances 2 | hosts: localhost 3 | gather_facts: no 4 | collections: 5 | - oracle.oci 6 | vars_files: 7 | - vars/network.yaml 8 | 9 | tasks: 10 | - name: Create innernet server 11 | block: 12 | - name: Include vars for innernet server 13 | include_vars: 14 | file: vars/innernet-server.yaml 15 | 16 | - name: Include vars for tenant aeolyusplex for innernet server x86 machines 17 | include_vars: 18 | file: vars/aeolyusplex-innernet-server-x86.yaml 19 | - name: Create x86 compute instances 20 | include_tasks: tasks/main.yaml 21 | with_sequence: count=1 22 | loop_control: 23 | loop_var: instance_num 24 | 25 | - name: Create kubernetes master nodes 26 | block: 27 | - name: Include vars for kubernetes master 28 | include_vars: 29 | file: vars/kubernetes-master.yaml 30 | 31 | - name: Include vars for tenant aeolyus for master x86 machines 32 | include_vars: 33 | file: vars/aeolyus-master-x86.yaml 34 | - name: Create x86 compute instances 35 | include_tasks: tasks/main.yaml 36 | with_sequence: count=1 37 | loop_control: 38 | loop_var: instance_num 39 | 40 | - name: Include vars for tenant starrydough for master x86 machines 41 | include_vars: 42 | file: vars/starrydough-master-x86.yaml 43 | - name: Create x86 compute instances 44 | include_tasks: tasks/main.yaml 45 | with_sequence: count=1 46 | loop_control: 47 | loop_var: instance_num 48 | 49 | - name: Include vars for tenant aeolyusplex for master x86 machines 50 | include_vars: 51 | file: vars/aeolyusplex-worker-x86.yaml 52 | - name: Create x86 compute instances 53 | include_tasks: tasks/main.yaml 54 | with_sequence: count=1 55 | loop_control: 56 | loop_var: instance_num 57 | 58 | - name: Create kubernetes worker nodes 59 | block: 60 | - name: Include vars for kubernetes worker 61 | include_vars: 62 | file: vars/kubernetes-worker.yaml 63 | 64 | - name: Include vars for tenant aeolyus for worker arm machines 65 | include_vars: 66 | file: vars/aeolyus-worker-arm.yaml 67 | - name: Create arm compute instances 68 | include_tasks: tasks/main.yaml 69 | with_sequence: count=4 70 | loop_control: 71 | loop_var: instance_num 72 | 73 | - name: Include vars for tenant aeolyusplex for worker arm machines 74 | include_vars: 75 | file: vars/aeolyusplex-worker-arm.yaml 76 | - name: Create arm compute instances 77 | include_tasks: tasks/main.yaml 78 | with_sequence: count=0 # San Jose currently has no more ARM VMs 79 | loop_control: 80 | loop_var: instance_num 81 | 82 | - name: Include vars for tenant starrydough for worker arm machines 83 | include_vars: 84 | file: vars/starrydough-worker-arm.yaml 85 | - name: Create arm compute instances 86 | include_tasks: tasks/main.yaml 87 | with_sequence: count=4 88 | loop_control: 89 | loop_var: instance_num 90 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/tasks/find_ad.yaml: -------------------------------------------------------------------------------- 1 | - name: List availbility domains 2 | oci_identity_availability_domain_facts: 3 | config_profile_name: "{{ config_profile_name }}" 4 | compartment_id: "{{ instance_compartment }}" 5 | register: result 6 | 7 | - set_fact: 8 | availability_domains: "{{ result.availability_domains }}" 9 | 10 | - name: List shapes in first AD 11 | oci_compute_shape_facts: 12 | config_profile_name: "{{ config_profile_name }}" 13 | compartment_id: "{{ instance_compartment }}" 14 | image_id: "{{ instance_image }}" 15 | availability_domain: "{{ availability_domains[0].name }}" 16 | register: result 17 | when: availability_domains | length > 0 18 | 19 | - set_fact: 20 | instance_ad: "{{ availability_domains[0].name }}" 21 | loop: "{{ result.shapes | default([]) }}" 22 | when: item.shape == instance_shape and availability_domains | length > 0 23 | 24 | - name: List shapes in second AD 25 | oci_compute_shape_facts: 26 | config_profile_name: "{{ config_profile_name }}" 27 | compartment_id: "{{ instance_compartment }}" 28 | image_id: "{{ instance_image }}" 29 | availability_domain: "{{ availability_domains[1].name }}" 30 | register: result 31 | when: availability_domains | length > 1 32 | 33 | - set_fact: 34 | instance_ad: "{{ availability_domains[1].name }}" 35 | when: item.shape == instance_shape and availability_domains | length > 1 36 | loop: "{{ result.shapes | default([]) }}" 37 | 38 | - name: List shapes in third AD 39 | oci_compute_shape_facts: 40 | config_profile_name: "{{ config_profile_name }}" 41 | compartment_id: "{{ instance_compartment }}" 42 | image_id: "{{ instance_image }}" 43 | availability_domain: "{{ availability_domains[2].name }}" 44 | register: result 45 | when: availability_domains | length > 2 46 | 47 | - set_fact: 48 | instance_ad: "{{ availability_domains[2].name }}" 49 | loop: "{{ result.shapes | default([]) }}" 50 | when: item.shape == instance_shape and availability_domains | length > 2 51 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/tasks/instance.yaml: -------------------------------------------------------------------------------- 1 | - name: Launch a compute instance 2 | oci_compute_instance: 3 | config_profile_name: "{{ config_profile_name }}" 4 | name: "{{ '-'.join((instance_hostname, instance_num)) }}" 5 | availability_domain: "{{ instance_ad }}" 6 | compartment_id: "{{ instance_compartment }}" 7 | shape: "{{ instance_shape }}" 8 | source_details: 9 | source_type: image 10 | image_id: "{{ instance_image }}" 11 | shape_config: 12 | memory_in_gbs: " {{ memory_in_gbs | default(1) }}" 13 | ocpus: "{{ ocpus | default(1) }}" 14 | create_vnic_details: 15 | assign_public_ip: True 16 | subnet_id: "{{ instance_subnet_id }}" 17 | metadata: 18 | ssh_authorized_keys: "{{ lookup('file', '~/.oci/keys/ssh_authorized_keys') }}" 19 | freeform_tags: "{{ freeform_tags | default({}) }}" 20 | register: result 21 | - set_fact: 22 | instance_id: "{{result.instance.id }}" 23 | 24 | - name: Get boot volume id from an existing instance 25 | oci_compute_boot_volume_attachment_facts: 26 | config_profile_name: "{{ config_profile_name }}" 27 | availability_domain: "{{ instance_ad }}" 28 | compartment_id: "{{ instance_compartment }}" 29 | instance_id: "{{ instance_id }}" 30 | register: result 31 | - set_fact: 32 | boot_volume_id: "{{ result.boot_volume_attachments[0].boot_volume_id }}" 33 | 34 | - name: Update instance boot volume VPU 35 | oci_blockstorage_boot_volume: 36 | config_profile_name: "{{ config_profile_name }}" 37 | vpus_per_gb: 120 38 | boot_volume_id: "{{ boot_volume_id }}" 39 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Launch a compute instance 2 | block: 3 | # Find the availability domain 4 | - import_tasks: find_ad.yaml 5 | # Create network components 6 | - import_tasks: network.yaml 7 | # Create instance 8 | - import_tasks: instance.yaml 9 | 10 | rescue: 11 | - import_tasks: teardown.yaml 12 | ignore_errors: yes 13 | - fail: 14 | msg: "{{ ansible_failed_result }}" 15 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/tasks/network.yaml: -------------------------------------------------------------------------------- 1 | - name: Create a VCN 2 | oci_network_vcn: 3 | config_profile_name: "{{ config_profile_name }}" 4 | compartment_id: "{{ instance_compartment }}" 5 | display_name: "{{ vcn_name }}" 6 | cidr_block: "{{ vcn_cidr_block }}" 7 | register: result 8 | - set_fact: 9 | vcn_id: "{{ result.vcn.id }}" 10 | 11 | - name: Create a new Internet Gateway 12 | oci_network_internet_gateway: 13 | config_profile_name: "{{ config_profile_name }}" 14 | compartment_id: "{{ instance_compartment }}" 15 | vcn_id: "{{ vcn_id }}" 16 | name: "{{ ig_name }}" 17 | is_enabled: 'yes' 18 | state: 'present' 19 | register: result 20 | - set_fact: 21 | ig_id: "{{ result.internet_gateway.id }}" 22 | 23 | - name: Create route table to connect internet gateway to the VCN 24 | oci_network_route_table: 25 | config_profile_name: "{{ config_profile_name }}" 26 | compartment_id: "{{ instance_compartment }}" 27 | vcn_id: "{{ vcn_id }}" 28 | name: "{{ route_table_name }}" 29 | route_rules: "{{ route_table_rules }}" 30 | state: 'present' 31 | register: result 32 | - set_fact: 33 | rt_id: "{{ result.route_table.id }}" 34 | 35 | # Create a security list for allowing access to public instance 36 | # Use a jinja2 template of the ingress and egress security rules to generate 37 | # a templated version of the final rules. 38 | - name: Create ingress rules yaml body 39 | template: src=../templates/ingress_security_rules.yaml.j2 dest=/tmp/instance_ingress_security_rules.yaml 40 | - name: Create egress yaml body 41 | template: src=../templates/egress_security_rules.yaml.j2 dest=/tmp/instance_egress_security_rules.yaml 42 | # Load the variables defined in the generated files 43 | - name: Load the variables defined in the ingress rules yaml body 44 | include_vars: 45 | file: /tmp/instance_ingress_security_rules.yaml 46 | name: loaded_ingress 47 | - name: Load the variables defined in the egress rules yaml body 48 | include_vars: 49 | file: /tmp/instance_egress_security_rules.yaml 50 | name: loaded_egress 51 | - name: Create a security list for allowing access to public instance 52 | oci_network_security_list: 53 | config_profile_name: "{{ config_profile_name }}" 54 | name: "{{ securitylist_name }}" 55 | compartment_id: "{{ instance_compartment }}" 56 | vcn_id: '{{ vcn_id }}' 57 | ingress_security_rules: "{{ loaded_ingress.instance_ingress_security_rules }}" 58 | egress_security_rules: "{{ loaded_egress.instance_egress_security_rules }}" 59 | register: result 60 | - set_fact: 61 | instance_security_list_ocid: "{{ result.security_list.id }}" 62 | 63 | - name: Create a subnet to host the public instance. Link security_list and route_table. 64 | oci_network_subnet: 65 | config_profile_name: "{{ config_profile_name }}" 66 | availability_domain: "{{ instance_ad }}" 67 | cidr_block: "{{ subnet_cidr }}" 68 | compartment_id: "{{ instance_compartment }}" 69 | display_name: "{{ subnet_name }}" 70 | prohibit_public_ip_on_vnic: false 71 | route_table_id: "{{ rt_id }}" 72 | security_list_ids: [ "{{ instance_security_list_ocid }}" ] 73 | vcn_id: '{{ vcn_id }}' 74 | register: result 75 | - set_fact: 76 | instance_subnet_id: "{{ result.subnet.id }}" 77 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/tasks/teardown.yaml: -------------------------------------------------------------------------------- 1 | - name: Terminate the instance 2 | oci_compute_instance: 3 | config_profile_name: "{{ config_profile_name }}" 4 | id: "{{ instance_id }}" 5 | state: absent 6 | 7 | - name: Delete the subnet 8 | oci_network_subnet: 9 | config_profile_name: "{{ config_profile_name }}" 10 | id: "{{ instance_subnet_id }}" 11 | state: absent 12 | 13 | - name: Delete the security list 14 | oci_network_security_list: 15 | config_profile_name: "{{ config_profile_name }}" 16 | id: "{{ instance_security_list_ocid }}" 17 | state: absent 18 | 19 | - name: Delete the route table 20 | oci_network_route_table: 21 | config_profile_name: "{{ config_profile_name }}" 22 | id: "{{ rt_id }}" 23 | state: absent 24 | 25 | - name: Delete the Internet Gateway 26 | oci_network_internet_gateway: 27 | config_profile_name: "{{ config_profile_name }}" 28 | id: "{{ ig_id }}" 29 | state: absent 30 | 31 | - name: Delete the VCN 32 | oci_network_vcn: 33 | config_profile_name: "{{ config_profile_name }}" 34 | vcn_id: "{{ vcn_id }}" 35 | state: absent 36 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/templates/egress_security_rules.yaml.j2: -------------------------------------------------------------------------------- 1 | instance_egress_security_rules: 2 | # Allow ssh connections outside 3 | - destination: "{{ quad_zero_route }}" 4 | protocol: "all" 5 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/templates/ingress_security_rules.yaml.j2: -------------------------------------------------------------------------------- 1 | instance_ingress_security_rules: 2 | # Allow incoming SSH connections 3 | - source: "{{ quad_zero_route }}" 4 | protocol: "all" 5 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyus-master-arm.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.A1.Flex" 2 | instance_hostname: "aeolyus-master-arm-instance-ansible" 3 | # Ubuntu 20.04 aarch64 in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/e29b572c-5e90-4c00-88c6-11e8cae0a8d4/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaa37achzetqynl6qzdgkla44wyiktjqhh6xvgzk2wgeato4rfjjo6q" 6 | # aeolyus 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaatu5lebwfrgjkuh4fkohbelghz6sqtchok6omg2qnkugjybfltdra" 8 | memory_in_gbs: 6 9 | ocpus: 1 10 | config_profile_name: "aeolyus" 11 | 12 | # Different subnet than x86 because oci_network_subnet is not idempotent 13 | subnet_cidr: "10.0.1.0/24" 14 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyus-master-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "aeolyus-master-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaasugogq2rv32d3zbpolvbvdgapjdwfgdzll6q3ctgtnqhkcns3b7a" 6 | # aeolyus 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaatu5lebwfrgjkuh4fkohbelghz6sqtchok6omg2qnkugjybfltdra" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "aeolyus" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyus-worker-arm.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.A1.Flex" 2 | instance_hostname: "aeolyus-worker-arm-instance-ansible" 3 | # Ubuntu 20.04 aarch64 in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/e29b572c-5e90-4c00-88c6-11e8cae0a8d4/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaa37achzetqynl6qzdgkla44wyiktjqhh6xvgzk2wgeato4rfjjo6q" 6 | # aeolyus 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaatu5lebwfrgjkuh4fkohbelghz6sqtchok6omg2qnkugjybfltdra" 8 | memory_in_gbs: 6 9 | ocpus: 1 10 | config_profile_name: "aeolyus" 11 | 12 | # Different subnet than x86 because oci_network_subnet is not idempotent 13 | subnet_cidr: "10.0.1.0/24" 14 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyus-worker-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "aeolyus-worker-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaasugogq2rv32d3zbpolvbvdgapjdwfgdzll6q3ctgtnqhkcns3b7a" 6 | # aeolyus 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaatu5lebwfrgjkuh4fkohbelghz6sqtchok6omg2qnkugjybfltdra" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "aeolyus" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyusplex-innernet-server-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "aeolyusplex-innernet-server-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-sanjose-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.us-sanjose-1.aaaaaaaacgbjxf7qypodeq4zexw7ikuga27yl3crbpnncjpfzisfkefr7lea" 6 | # aeolyusplex 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaapvsxhpn42yybnvggy6socle5ojgjk6bkc5ulxilsp7tp6o4y6mjq" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "aeolyusplex" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyusplex-master-arm.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.A1.Flex" 2 | instance_hostname: "aeolyusplex-master-arm-instance-ansible" 3 | # Ubuntu 20.04 aarch64 in us-sanjose-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/e29b572c-5e90-4c00-88c6-11e8cae0a8d4/ 5 | instance_image: "ocid1.image.oc1.us-sanjose-1.aaaaaaaatoqrfoytj5y4zsqi7c7i2ngskgl6namke2nc3bujhkxvfma62yva" 6 | # aeolyusplex 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaapvsxhpn42yybnvggy6socle5ojgjk6bkc5ulxilsp7tp6o4y6mjq" 8 | memory_in_gbs: 6 9 | ocpus: 1 10 | config_profile_name: "aeolyusplex" 11 | 12 | # Different subnet than x86 because oci_network_subnet is not idempotent 13 | subnet_cidr: "10.0.1.0/24" 14 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyusplex-master-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "aeolyusplex-master-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-sanjose-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.us-sanjose-1.aaaaaaaacgbjxf7qypodeq4zexw7ikuga27yl3crbpnncjpfzisfkefr7lea" 6 | # aeolyusplex 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaapvsxhpn42yybnvggy6socle5ojgjk6bkc5ulxilsp7tp6o4y6mjq" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "aeolyusplex" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyusplex-worker-arm.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.A1.Flex" 2 | instance_hostname: "aeolyusplex-worker-arm-instance-ansible" 3 | # Ubuntu 20.04 aarch64 in us-sanjose-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/e29b572c-5e90-4c00-88c6-11e8cae0a8d4/ 5 | instance_image: "ocid1.image.oc1.us-sanjose-1.aaaaaaaatoqrfoytj5y4zsqi7c7i2ngskgl6namke2nc3bujhkxvfma62yva" 6 | # aeolyusplex 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaapvsxhpn42yybnvggy6socle5ojgjk6bkc5ulxilsp7tp6o4y6mjq" 8 | memory_in_gbs: 6 9 | ocpus: 1 10 | config_profile_name: "aeolyusplex" 11 | 12 | # Different subnet than x86 because oci_network_subnet is not idempotent 13 | subnet_cidr: "10.0.1.0/24" 14 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/aeolyusplex-worker-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "aeolyusplex-worker-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-sanjose-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.us-sanjose-1.aaaaaaaacgbjxf7qypodeq4zexw7ikuga27yl3crbpnncjpfzisfkefr7lea" 6 | # aeolyusplex 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaapvsxhpn42yybnvggy6socle5ojgjk6bkc5ulxilsp7tp6o4y6mjq" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "aeolyusplex" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/innernet-server.yaml: -------------------------------------------------------------------------------- 1 | freeform_tags: { 2 | "innernet-server": "true", 3 | "ansible": "true", 4 | } 5 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/kubernetes-master.yaml: -------------------------------------------------------------------------------- 1 | freeform_tags: { 2 | "kubernetes": "true", 3 | "kubernetes-master": "true", 4 | "innernet": "true", 5 | "ansible": "true", 6 | } 7 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/kubernetes-worker.yaml: -------------------------------------------------------------------------------- 1 | freeform_tags: { 2 | "kubernetes": "true", 3 | "kubernetes-worker": "true", 4 | "innernet": "true", 5 | "ansible": "true", 6 | } 7 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/network.yaml: -------------------------------------------------------------------------------- 1 | # Common networking definitions 2 | quad_zero_route: "0.0.0.0/0" 3 | procotol: "all" 4 | 5 | # Network 6 | vcn_name: "vcn-ansible" 7 | vcn_cidr_block: "10.0.0.0/16" 8 | 9 | ig_name: "internet-gateway-ansible" 10 | 11 | route_table_name: "route-table-ansible" 12 | # Route all internet access to our Internet Gateway 13 | route_table_rules: 14 | - cidr_block: "{{ quad_zero_route }}" 15 | network_entity_id: "{{ ig_id }}" 16 | 17 | subnet_cidr: "10.0.0.0/24" 18 | subnet_name: "subnet-ansible" 19 | 20 | securitylist_name: "securitylist-ansible" 21 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/starrydough-master-arm.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.A1.Flex" 2 | instance_hostname: "starrydough-master-arm-instance-ansible" 3 | # Ubuntu 20.04 aarch64 in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/e29b572c-5e90-4c00-88c6-11e8cae0a8d4/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaa37achzetqynl6qzdgkla44wyiktjqhh6xvgzk2wgeato4rfjjo6q" 6 | # starrydough 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaaw3vgk7i2fa7afmbkxrn26hlxg4auluwszxj7d3mtinyub6psyo5q" 8 | memory_in_gbs: 6 9 | ocpus: 1 10 | config_profile_name: "starrydough" 11 | 12 | # Different subnet than x86 because oci_network_subnet is not idempotent 13 | subnet_cidr: "10.0.1.0/24" 14 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/starrydough-master-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "starrydough-master-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaasugogq2rv32d3zbpolvbvdgapjdwfgdzll6q3ctgtnqhkcns3b7a" 6 | # starrydough 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaaw3vgk7i2fa7afmbkxrn26hlxg4auluwszxj7d3mtinyub6psyo5q" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "starrydough" 11 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/starrydough-worker-arm.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.A1.Flex" 2 | instance_hostname: "starrydough-worker-arm-instance-ansible" 3 | # Ubuntu 20.04 aarch64 in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/e29b572c-5e90-4c00-88c6-11e8cae0a8d4/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaa37achzetqynl6qzdgkla44wyiktjqhh6xvgzk2wgeato4rfjjo6q" 6 | # starrydough 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaaw3vgk7i2fa7afmbkxrn26hlxg4auluwszxj7d3mtinyub6psyo5q" 8 | memory_in_gbs: 6 9 | ocpus: 1 10 | config_profile_name: "starrydough" 11 | 12 | # Different subnet than x86 because oci_network_subnet is not idempotent 13 | subnet_cidr: "10.0.1.0/24" 14 | -------------------------------------------------------------------------------- /oci/launch_compute_instance/vars/starrydough-worker-x86.yaml: -------------------------------------------------------------------------------- 1 | instance_shape: "VM.Standard.E2.1.Micro" 2 | instance_hostname: "starrydough-worker-x86-instance-ansible" 3 | # Ubuntu 20.04 Minimal in us-phoenix-1 4 | # https://docs.oracle.com/en-us/iaas/images/image/19124c98-1c3e-42e4-8ea8-fa3977a33342/ 5 | instance_image: "ocid1.image.oc1.phx.aaaaaaaasugogq2rv32d3zbpolvbvdgapjdwfgdzll6q3ctgtnqhkcns3b7a" 6 | # starrydough 7 | instance_compartment: "ocid1.tenancy.oc1..aaaaaaaaw3vgk7i2fa7afmbkxrn26hlxg4auluwszxj7d3mtinyub6psyo5q" 8 | memory_in_gbs: 1 9 | ocpus: 1 10 | config_profile_name: "starrydough" 11 | -------------------------------------------------------------------------------- /pihole/defaults/main.yml: -------------------------------------------------------------------------------- 1 | pihole_privacy_lvl: "2" 2 | pihole_logging: "off" 3 | -------------------------------------------------------------------------------- /pihole/files/resolv.conf: -------------------------------------------------------------------------------- 1 | nameserver 1.1.1.1 2 | nameserver 1.0.0.1 3 | -------------------------------------------------------------------------------- /pihole/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Set pihole privacy level 2 | lineinfile: 3 | path: "/data/pihole/etc-pihole/pihole-FTL.conf" 4 | regexp: "^PRIVACYLEVEL=" 5 | line: "PRIVACYLEVEL={{ pihole_privacy_lvl }}" 6 | create: true 7 | 8 | - name: Set pihole logging level 9 | command: docker exec -it pihole bash -c "pihole logging {{ pihole_logging }}" 10 | 11 | - name: Generate pihole password 12 | set_fact: 13 | pihole_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters') }}" 14 | 15 | - name: Set pihole password 16 | shell: "docker exec pihole pihole -a -p {{ pihole_password }}" 17 | 18 | - name: Print pihole password 19 | debug: 20 | msg: "Pihole password: {{ pihole_password }}" 21 | changed_when: true 22 | -------------------------------------------------------------------------------- /pihole/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | - role: firewalld 4 | vars: 5 | firewalld_services: ["dns"] 6 | firewalld_ports: ["853/tcp"] 7 | -------------------------------------------------------------------------------- /pihole/tasks/dns.yml: -------------------------------------------------------------------------------- 1 | - name: Disable systemd-resolved 2 | systemd: 3 | name: systemd-resolved 4 | state: stopped 5 | enabled: no 6 | 7 | - name: Change internal dns to cloudflare 8 | copy: 9 | src: resolv.conf 10 | dest: /etc/resolv.conf 11 | -------------------------------------------------------------------------------- /pihole/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Setup DNS 2 | import_tasks: dns.yml 3 | 4 | - name: Pihole container present 5 | docker_container: 6 | name: pihole 7 | image: pihole/pihole 8 | restart_policy: unless-stopped 9 | ports: 10 | - "31415:80" 11 | - "53:53/tcp" 12 | - "53:53/udp" 13 | volumes: 14 | - "/data/pihole/etc-pihole/:/etc/pihole/" 15 | - "/data/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/" 16 | dns_servers: [127.0.0.1, 1.1.1.1] 17 | env: 18 | TZ: "America/Los_Angeles" 19 | DNS1: "1.1.1.1" 20 | DNS2: "1.0.0.1" 21 | DNSSEC: "True" 22 | register: pihole_container 23 | notify: 24 | - Set pihole privacy level 25 | - Set pihole logging level 26 | - Generate pihole password 27 | - Set pihole password 28 | - Print pihole password 29 | 30 | - name: Setup caddy for pihole 31 | include_role: 32 | name: caddy 33 | vars: 34 | caddyfile_marker: "# {mark} ANSIBLE Pihole" 35 | caddyfile_block: | 36 | dns.{{ domain }} { 37 | reverse_proxy localhost:31415 38 | } 39 | 40 | - name: Wait for DNS cert before setting DoT 41 | wait_for: 42 | path: "/data/caddy/caddy_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory/dns.{{ domain }}/dns.{{ domain }}.crt" 43 | when: public | d(False) 44 | 45 | - name: Setup pihole DoT nginx stream conf 46 | include_role: 47 | name: nginx 48 | vars: 49 | hostname: "dns.{{ domain }}" 50 | nginx_stream_file: "dns-over-tls" 51 | # [DEPRECATED] moved to caddy 52 | # nginx_template_file: "dns" 53 | when: public | d(False) 54 | -------------------------------------------------------------------------------- /pihole/templates/dns-over-tls.j2: -------------------------------------------------------------------------------- 1 | upstream dns-servers { 2 | server 127.0.0.1:53; 3 | } 4 | 5 | server { 6 | listen 853 ssl; 7 | ssl_certificate /data/caddy/caddy_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory/dns.{{ domain }}/dns.{{ domain }}.crt; 8 | ssl_certificate_key /data/caddy/caddy_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory/dns.{{ domain }}/dns.{{ domain }}.key; 9 | ssl_protocols TLSv1.2 TLSv1.3; 10 | ssl_ciphers HIGH:!aNULL:!MD5; 11 | ssl_handshake_timeout 10s; 12 | ssl_session_cache shared:SSL:20m; 13 | 14 | proxy_pass dns-servers; 15 | } 16 | 17 | -------------------------------------------------------------------------------- /pihole/templates/dns.j2: -------------------------------------------------------------------------------- 1 | # [DEPRECATED] moved to caddy 2 | server { 3 | listen 80; 4 | listen [::]:80; 5 | server_name dns.*; 6 | return 301 https://$host$request_uri; 7 | limit_req zone=http burst=100; 8 | } 9 | 10 | server { 11 | listen 443 ssl http2; 12 | server_name dns.*; 13 | 14 | ssl_certificate /etc/letsencrypt/live/dns.{{ domain }}/fullchain.pem; 15 | ssl_certificate_key /etc/letsencrypt/live/dns.{{ domain }}/privkey.pem; 16 | 17 | limit_req zone=http burst=100; 18 | 19 | location / { 20 | proxy_pass http://127.0.0.1:31415; 21 | proxy_set_header Host $host; 22 | proxy_set_header X-Real-IP $remote_addr; 23 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 24 | proxy_set_header X-Forwarded-Proto $scheme; 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /plex/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | - role: firewalld 4 | vars: 5 | firewalld_ports: ["32400/tcp"] 6 | -------------------------------------------------------------------------------- /plex/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Plex container present 2 | docker_container: 3 | name: plex 4 | image: plexinc/pms-docker 5 | restart_policy: unless-stopped 6 | network_mode: host 7 | volumes: 8 | - "/data/plex/config:/config" 9 | - "/data/plex/data:/data" 10 | - "/dev/shm:/transcode" 11 | env: 12 | TZ: "America/Los_Angeles" 13 | 14 | - name: Setup caddy for plex 15 | include_role: 16 | name: caddy 17 | vars: 18 | caddyfile_marker: "# {mark} ANSIBLE Plex" 19 | caddyfile_block: | 20 | plex.{{ domain }} { 21 | reverse_proxy localhost:32400 22 | } 23 | -------------------------------------------------------------------------------- /prometheus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | prometheus_exporters: ["node_exporter", "nginx_exporter"] 2 | -------------------------------------------------------------------------------- /prometheus/files/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | 4 | scrape_configs: 5 | - job_name: "prometheus" 6 | static_configs: 7 | - targets: ["localhost:9090"] 8 | 9 | - job_name: "node" 10 | static_configs: 11 | - targets: ["localhost:9100"] 12 | 13 | - job_name: "nginx" 14 | static_configs: 15 | - targets: ["localhost:9113"] 16 | -------------------------------------------------------------------------------- /prometheus/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | -------------------------------------------------------------------------------- /prometheus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create prometheus directory 2 | file: 3 | path: /data/prometheus 4 | state: directory 5 | 6 | - name: Check prometheus config 7 | copy: 8 | src: "prometheus.yml" 9 | dest: "/data/prometheus/prometheus.yml" 10 | 11 | - name: Prometheus container present 12 | docker_container: 13 | name: prometheus 14 | image: prom/prometheus 15 | user: root 16 | restart_policy: unless-stopped 17 | network_mode: host 18 | volumes: 19 | - "/data/prometheus:/prometheus" 20 | command: 21 | - '--config.file=/prometheus/prometheus.yml' 22 | - '--storage.tsdb.retention.time=100y' 23 | - '--storage.tsdb.retention.size=20GB' 24 | 25 | - name: Check exporters 26 | include_tasks: "{{ item }}.yml" 27 | loop: "{{ prometheus_exporters }}" 28 | -------------------------------------------------------------------------------- /prometheus/tasks/nginx_exporter.yml: -------------------------------------------------------------------------------- 1 | - name: Nginx exporter container present 2 | docker_container: 3 | name: nginx-exporter 4 | image: nginx/nginx-prometheus-exporter 5 | restart_policy: unless-stopped 6 | network_mode: host 7 | command: 8 | - '--nginx.scrape-uri="http://localhost:8080/stub_status"' 9 | -------------------------------------------------------------------------------- /prometheus/tasks/node_exporter.yml: -------------------------------------------------------------------------------- 1 | - name: Node exporter container present 2 | docker_container: 3 | name: node-exporter 4 | image: prom/node-exporter 5 | restart_policy: unless-stopped 6 | ports: 7 | - "9100:9100" 8 | volumes: 9 | - "/proc:/host/proc" 10 | - "/sys:/host/sys" 11 | - "/:/rootfs" 12 | command: 13 | - "--path.procfs=/host/proc" 14 | - "--path.sysfs=/host/sys" 15 | - "--collector.filesystem.ignored-mount-points=\"^/(sys|proc|dev|host|etc)($|/)\"" 16 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | - name: Master playbook 2 | hosts: all:!localhost 3 | become: true 4 | roles: 5 | - swap 6 | - caddy 7 | - watchtower 8 | - bitwarden 9 | - pihole 10 | - grafana 11 | - nextcloud 12 | - minecraft 13 | - plex 14 | - tautulli 15 | -------------------------------------------------------------------------------- /starrydough.oci.yaml: -------------------------------------------------------------------------------- 1 | plugin: oracle.oci.oci 2 | config_profile: starrydough 3 | -------------------------------------------------------------------------------- /swap/defaults/main.yml: -------------------------------------------------------------------------------- 1 | swap_file: "/.swapfile" 2 | swap_size: "{{ ((ansible_memtotal_mb | int * 2) 3 | if (ansible_memtotal_mb | int <= 2048) 4 | else ansible_memtotal_mb | int) }}M" 5 | swappiness: "1" 6 | -------------------------------------------------------------------------------- /swap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check if swap file exists 2 | stat: 3 | path: "{{swap_file}}" 4 | register: swap_file_check 5 | 6 | - name: Create swap file 7 | command: fallocate -l {{swap_size}} {{swap_file}} 8 | when: not swap_file_check.stat.exists 9 | 10 | - name: Change swap file permissions 11 | file: path="{{swap_file}}" 12 | owner=root 13 | group=root 14 | mode=0600 15 | 16 | - name: Format swap file 17 | command: "mkswap {{swap_file}}" 18 | when: not swap_file_check.stat.exists 19 | 20 | - name: Write swap entry in fstab 21 | mount: name=none 22 | src={{swap_file}} 23 | fstype=swap 24 | opts=sw 25 | passno=0 26 | dump=0 27 | state=present 28 | 29 | - name: Turn on swap 30 | command: swapon -a 31 | when: not swap_file_check.stat.exists 32 | 33 | - name: Set swappiness 34 | sysctl: 35 | name: vm.swappiness 36 | value: "{{swappiness}}" 37 | -------------------------------------------------------------------------------- /tautulli/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | - role: firewalld 4 | vars: 5 | firewalld_ports: ["8181/tcp"] 6 | -------------------------------------------------------------------------------- /tautulli/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Tautulli container present 2 | docker_container: 3 | name: tautulli 4 | image: tautulli/tautulli 5 | restart_policy: unless-stopped 6 | ports: 7 | - "8181:8181" 8 | volumes: 9 | - "/data/tautulli:/config" 10 | - "/data/plex/config/Library/Application\ Support/Plex\ Media\ Server/Logs:/plex_logs" 11 | env: 12 | TZ: "America/Los_Angeles" 13 | 14 | - name: Setup caddy for tautulli 15 | include_role: 16 | name: caddy 17 | vars: 18 | caddyfile_marker: "# {mark} ANSIBLE Tautulli" 19 | caddyfile_block: | 20 | tautulli.{{ domain }} { 21 | reverse_proxy localhost:8181 22 | } 23 | -------------------------------------------------------------------------------- /watchtower/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: docker 3 | -------------------------------------------------------------------------------- /watchtower/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Watchtower container present 2 | docker_container: 3 | name: watchtower 4 | image: containrrr/watchtower 5 | restart_policy: unless-stopped 6 | volumes: 7 | - "/var/run/docker.sock:/var/run/docker.sock" 8 | --------------------------------------------------------------------------------