├── .ansible-lint ├── .github └── renovate.json ├── .gitignore ├── .gitmodules ├── .vscode └── settings.json ├── .woodpecker.yml ├── .yamllint ├── FUNDING.yml ├── LICENSE ├── README.MD ├── ansible.cfg ├── bootstrap.yml ├── docker.yml ├── files ├── sshd_config └── sudoer_ansible ├── git-init.sh ├── git-vault-check.sh ├── gitea.sh ├── github.sh ├── group_vars ├── adonalsium.yml ├── all.yaml ├── ambition.yml ├── autonomy.yml ├── cultivation.yml ├── design.yml ├── desktop.yml ├── dominion.yml ├── endowment.yml ├── hoid.yml ├── honor.yml ├── identity.yml ├── invention.yml ├── investiture.yml ├── ishap.yml ├── omada.yml ├── preservation.yml ├── unity.yml └── virtuosity.yml ├── hosts.ini ├── justfile ├── prereqs.sh ├── requirements.yaml ├── roles ├── adonalsium │ ├── files │ │ ├── backup.sh │ │ ├── etc │ │ │ └── issue │ │ └── trim.sh │ ├── tasks │ │ ├── disks.yml │ │ ├── infrastructure.yml │ │ └── main.yml │ └── templates │ │ ├── hooks.yaml.j2 │ │ └── smb.conf.j2 ├── ambition │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── Caddyfile.j2 │ │ ├── certs.sh.j2 │ │ ├── db_backup.sh.j2 │ │ └── db_restore.sh.j2 ├── autonomy │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── cast_frontdoor.sh.j2 │ │ ├── docker.sh.j2 │ │ ├── google_traffic.py.j2 │ │ ├── update_hass.sh.j2 │ │ ├── waze_daycare.py.j2 │ │ ├── waze_home.py.j2 │ │ └── waze_live.py.j2 ├── bash_alias │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── cronjobs │ ├── README.md │ └── tasks │ │ └── main.yml ├── cultivation │ └── tasks │ │ └── main.yml ├── design │ ├── files │ │ ├── WayofKings.jpg │ │ ├── face │ │ └── stormlight_arc1_wallpaper.png │ └── tasks │ │ └── main.yml ├── desktop │ ├── README.md │ ├── files │ │ ├── WayofKings.jpg │ │ ├── face │ │ ├── stormlight_arc1_wallpaper_uw.png │ │ └── szeth_logid.cfg │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── sudoers_main_user.j2 │ │ └── systemd │ │ ├── restic_backup.service.j2 │ │ ├── restic_backup.timer.j2 │ │ ├── restic_prune.service.j2 │ │ └── restic_prune.timer.j2 ├── dominion │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── update.sh.j2 ├── endowment │ ├── files │ │ └── nextcloud_cron.sh │ ├── tasks │ │ ├── disks.yml │ │ └── main.yml │ └── templates │ │ └── photos.sh.j2 ├── github │ └── tasks │ │ └── main.yml ├── honor │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── Caddyfile.j2 │ │ ├── certs.sh.j2 │ │ ├── ddns_vps.sh.j2 │ │ └── wg-check.sh.j2 ├── identity │ └── tasks │ │ └── main.yml ├── install │ ├── bitwarden_cli │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── crowdsec_bouncer │ │ └── tasks │ │ │ └── main.yml │ ├── lego │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── lnxlink │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── config.yaml.j2 │ │ │ └── lnxlink.service.j2 │ ├── mergerfs │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── nag_removal │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── pip_packages │ │ └── tasks │ │ │ └── main.yml │ ├── runitor │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── tailscale │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ └── zsh │ │ ├── files │ │ ├── .zshrc │ │ └── p10k.zsh │ │ └── tasks │ │ └── main.yml ├── invention │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── systemd │ │ ├── restic_backup.service.j2 │ │ ├── restic_backup.timer.j2 │ │ ├── restic_prune.service.j2 │ │ └── restic_prune.timer.j2 ├── ishap │ ├── files │ │ ├── etc │ │ │ ├── export │ │ │ └── issue │ │ └── trim.sh │ └── tasks │ │ ├── disks.yml │ │ ├── infrastructure.yml │ │ └── main.yml ├── omada │ └── tasks │ │ └── main.yml ├── preservation │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── db_backup.sh.j2 │ │ ├── db_restore.sh.j2 │ │ └── postgres_cleanup.sh.j2 ├── unity │ └── tasks │ │ └── main.yml └── virtuosity │ └── tasks │ └── main.yml ├── run.yml ├── services ├── adonalsium │ └── compose.yaml ├── ambition │ └── compose.yaml ├── autonomy │ └── compose.yaml ├── cultivation │ └── compose.yaml ├── endowment │ └── compose.yaml ├── honor │ └── compose.yaml ├── identity │ └── compose.yaml ├── invention │ └── compose.yaml ├── investiture │ └── compose.yaml ├── ishap │ └── compose.yaml ├── omada │ └── compose.yaml ├── preservation │ └── compose.yaml ├── unity │ └── compose.yaml └── virtuosity │ └── compose.yaml ├── templates ├── MQTT_Explorer.desktop.j2 ├── diun_config.yml.j2 ├── github.sh.j2 └── ssh_config.j2 ├── update.yml ├── vars └── vault.yaml └── vault.sh /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | exclude_paths: 3 | - galaxy_roles/ 4 | - galaxy_collections/ 5 | 6 | skip_list: 7 | - no-relative-paths 8 | - literal-compare 9 | - command-instead-of-shell 10 | - risky-shell-pipe 11 | - no-changed-when 12 | - git-latest 13 | - risky-file-permissions 14 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "enabledManagers": ["docker-compose"], 4 | "dependencyDashboard": true, 5 | "prConcurrentLimit": 0, 6 | "prHourlyLimit": 0, 7 | "baseBranches": ["main"], 8 | "enabled": true, 9 | "extends": [ 10 | "config:recommended" 11 | ], 12 | "packageRules": [ 13 | { 14 | "matchPackageNames": ["ghcr.io/umami-software/umami"], 15 | "versioning": "regex:^(?.*)-v?(?\\d+)\\.(?\\d+)\\.(?\\d+)?$" 16 | }, 17 | { 18 | "matchPackageNames": ["immich-app/postgres/"], 19 | "enabled": false 20 | }, 21 | { 22 | "matchPackageNames": ["/valkey/"], 23 | "enabled": false 24 | }, 25 | { 26 | "matchPackageNames": ["crocodilestick/calibre-web-automated"], 27 | "versioning": "regex:^(?.*)-?(?\\d+)\\.(?\\d+)\\.(?\\d+)?$" 28 | }, 29 | { 30 | "matchPackageNames": ["corentinth/it-tools, ghcr.io/wez/govee2mqtt"], 31 | "versioning": "regex:^(?.*)-?(?\\d+)\\.(?\\d+)\\.(?\\d+)\\-(?.+)?$" 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | galaxy_roles 2 | .vault-password 3 | test.yml 4 | #vault.yaml -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "roles/docker-compose-generator"] 2 | path = roles/docker-compose-generator 3 | url = https://github.com/ironicbadger/ansible-role-docker-compose-generator/ 4 | [submodule "roles/install/autorestic"] 5 | path = roles/install/autorestic 6 | url = https://github.com/FuzzyMistborn/ansible-role-autorestic/ 7 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "**/services/**/*.yaml": "jinja-yaml", 4 | "**/services/**/*.yml": "jinja-yaml" 5 | }, 6 | "[jinja-yaml]": { 7 | "editor.defaultFormatter": null 8 | } 9 | } -------------------------------------------------------------------------------- /.woodpecker.yml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: ansible-lint and yamllint 3 | image: fuzzymistborn/docker-linting:latest 4 | environment: 5 | ansible_vault_password: 6 | from_secret: ansible_vault_password 7 | commands: 8 | - ansible-galaxy role install -r requirements.yaml 9 | - ansible-galaxy collection install -r requirements.yaml 10 | - sed -i '8d' ansible.cfg 11 | - echo $ansible_vault_password > .vault-password 12 | - ansible-lint . 13 | - yamllint . 14 | - find . -maxdepth 1 -name '*.yml' | grep -v '.woodpecker.yml' | grep -v 'FUNDING.yml' | xargs ansible-playbook --syntax-check --list-tasks --vault-password-file .vault-password 15 | when: 16 | - event: push 17 | 18 | - name: Send TG Notification 19 | image: appleboy/drone-telegram 20 | settings: 21 | token: 22 | from_secret: tg_token 23 | to: 24 | from_secret: tg_id 25 | format: markdown 26 | message: "{{#success build.status}}✅ Build for `{{repo.name}}` was *successful*!{{else}}❌ Build for `{{repo.name}}` has *FAILED*!{{/success}} \n🌐 [Output]({{build.link}})\n👤 Author: {{ commit.author }}\n📝 Commit: {{ commit.message }}" 27 | when: 28 | event: push 29 | status: 30 | - success 31 | - failure -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | braces: 6 | max-spaces-inside: 1 7 | level: error 8 | brackets: 9 | max-spaces-inside: 1 10 | level: error 11 | colons: 12 | max-spaces-after: -1 13 | level: error 14 | commas: 15 | max-spaces-after: -1 16 | level: error 17 | comments-indentation: disable 18 | document-start: 19 | ignore: 20 | - services/ 21 | empty-lines: 22 | max: 3 23 | level: error 24 | line-length: disable 25 | new-line-at-end-of-file: disable 26 | truthy: 27 | allowed-values: ['true', 'false', 'yes', 'no'] 28 | 29 | ignore: | 30 | galaxy_roles/ 31 | roles/docker-compose-generator 32 | test.yml 33 | requirements.yaml 34 | vars/vault.yaml 35 | FUNDING.yml 36 | .woodpecker.yml 37 | -------------------------------------------------------------------------------- /FUNDING.yml: -------------------------------------------------------------------------------- 1 | --- 2 | github: FuzzyMistborn 3 | ko_fi: FuzzyMistborn -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | This repo contains the code used to deploy and managing my various LXC's on Proxmox. Ansible is the main way I deploy things. 4 | 5 | A huge HUGE thanks to IronicBadger/AlexKTZ for his [infra GitHub repo](https://github.com/IronicBadger/infra). I learned a ton poking around that repo and some of the things are copied straight from there, so credit where credit is due. 6 | 7 | ## Requirements 8 | 9 | - Python 3 10 | - `ansible` installed on the system 11 | - [`just`](https://github.com/casey/just) 12 | - Lazy install: `curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | sudo bash -s -- --to /usr/local/bin` 13 | 14 | # Explanation of Shards/Names: 15 | 16 | Most of my servers are named after Shards of Adonalsium from my favorite fantasy series by Brandon Sanderson. So if you look at my roles you'll see a pattern. 17 | 18 | * **Adonalsium** - Primary Proxmox Node 19 | * **Autonomy** - Home Automation 20 | * **Cultivation** - Misc. Media 21 | * **Dominion** - Ansible primary dev/git 22 | * **Endowment** - Media distribution/collection 23 | * **Honor** - DMZ/Primary Adguard/Wireguard 24 | * **Preservation** - Databases 25 | * **Ambition** - Cloud VPS 26 | 27 | Then my desktops are named after some of my favorite characters from the Cosmere: Kelsier and Hoid. 28 | 29 | # First Deploy instructions: 30 | 31 | * `just install` - Installs a github hook to prevent uploading an unencrypted Ansible vault file, adds the Ansible Galaxy roles, and sets up the Ansible vault password 32 | * `just boostrap` - Sets up the hosts with a main user and an ansible user that can run sudo without a password 33 | * `just build_proxmox` - Creates all LXCs, sets up disks on Proxmox host 34 | * `just bootstrap HOSTNAME` - Bootstraps the indicated server/desktop/whatever 35 | * `just build HOSTNAME` - Sets up the various machines 36 | 37 | # Ansible Vault 38 | 39 | * `just encrypt` - Encrypts the Ansible vault 40 | * `just decrypt` - Decrypts the Ansible vault 41 | 42 | # Updates 43 | 44 | * `just update` - Runs apt update and apt upgrade on the servers, and pushes an updated docker-compose.yml file 45 | * `just docker` - Runs docker compose pull {{ containers }} and docker compose up -d {{ container }} on host and containers specified 46 | 47 | ### If you appreciate my work, please consider buying me a beer (or coffee, or whatever) 48 | [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/E1E5796VZ) 49 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = ./hosts.ini 3 | remote_user = ansible 4 | host_key_checking = False 5 | private_key_file = ~/.ssh/ansible_user 6 | roles_path = $PWD/galaxy_roles:$PWD/roles 7 | interpreter_python=/usr/bin/python3 8 | vault_password_file = ./vault.sh 9 | 10 | [privilege_escalation] 11 | #become_user = ansible 12 | become_ask_pass = False 13 | -------------------------------------------------------------------------------- /bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Ubuntu 3 | - hosts: ubuntu 4 | remote_user: root 5 | gather_facts: true 6 | vars_files: 7 | - 'vars/vault.yaml' 8 | tasks: 9 | - name: Apt update 10 | apt: 11 | upgrade: 'yes' 12 | update_cache: yes 13 | cache_valid_time: 3600 14 | - name: Ensure user groups exists 15 | group: 16 | name: "{{ item }}" 17 | state: present 18 | loop: 19 | - "{{ main_username }}" 20 | - ansible 21 | - name: Add users 22 | user: 23 | name: "{{ item.user }}" 24 | password: "{{ item.pass }}" 25 | groups: 26 | - "{{ item.user }}" 27 | - sudo 28 | shell: /bin/bash 29 | loop: 30 | - {user: "{{ main_username }}", pass: "{{ secret_main_user_pass }}"} 31 | - {user: ansible, pass: "{{ secret_ansible_pass }}"} 32 | - name: Add sudoers file for ansible 33 | copy: 34 | src: sudoer_ansible 35 | dest: /etc/sudoers.d/ansible 36 | owner: root 37 | group: root 38 | mode: 0440 39 | - name: SSH Keys 40 | authorized_key: 41 | user: "{{ item.user }}" 42 | state: present 43 | key: "{{ item.ssh }}" 44 | loop: 45 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_ssh }}"} 46 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_alt_ssh }}"} 47 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_pixel_ssh }}"} 48 | - {user: ansible, ssh: "{{ secret_ansible_ssh }}"} 49 | - name: Add hardened SSH config 50 | copy: 51 | src: sshd_config 52 | dest: /etc/ssh/sshd_config 53 | owner: root 54 | group: root 55 | mode: 0600 56 | - name: Restart ssh 57 | service: 58 | name: sshd 59 | state: restarted 60 | 61 | ### Arch/Fedora 62 | - hosts: arch fedora 63 | remote_user: "{{ bootstrap_user }}" 64 | gather_facts: true 65 | vars_files: 66 | - 'vars/vault.yaml' 67 | tasks: 68 | - name: Add ansible user 69 | user: 70 | name: ansible 71 | password: "{{ secret_ansible_pass }}" 72 | uid: 666 73 | groups: 74 | - wheel 75 | system: yes 76 | shell: /bin/bash 77 | - name: Add sudoers file for ansible 78 | copy: 79 | src: sudoer_ansible 80 | dest: /etc/sudoers.d/ansible 81 | owner: root 82 | group: root 83 | mode: 0440 84 | - name: Add SSH Keys 85 | authorized_key: 86 | user: ansible 87 | state: present 88 | key: "{{ secret_ansible_ssh }}" 89 | - name: Add hardened SSH config 90 | copy: 91 | src: sshd_config 92 | dest: /etc/ssh/sshd_config 93 | owner: root 94 | group: root 95 | mode: 0600 96 | - name: Restart ssh 97 | service: 98 | name: sshd 99 | state: restarted 100 | 101 | ### Debian Bootstrap 102 | - hosts: debian 103 | remote_user: "{{ bootstrap_user }}" 104 | gather_facts: true 105 | vars_files: 106 | - 'vars/vault.yaml' 107 | tasks: 108 | - name: Apt update 109 | apt: 110 | upgrade: 'yes' 111 | update_cache: yes 112 | cache_valid_time: 3600 113 | - name: Add ansible user 114 | user: 115 | name: ansible 116 | password: "{{ secret_ansible_pass }}" 117 | uid: 666 118 | groups: 119 | - sudo 120 | system: yes 121 | shell: /bin/bash 122 | - name: Add sudoers file for ansible 123 | copy: 124 | src: sudoer_ansible 125 | dest: /etc/sudoers.d/ansible 126 | owner: root 127 | group: root 128 | mode: 0440 129 | - name: SSH Keys 130 | authorized_key: 131 | user: "{{ item.user }}" 132 | state: present 133 | key: "{{ item.ssh }}" 134 | loop: 135 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_ssh }}"} 136 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_alt_ssh }}"} 137 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_pixel_ssh }}"} 138 | - {user: ansible, ssh: "{{ secret_ansible_ssh }}"} 139 | - name: Add hardened SSH config 140 | copy: 141 | src: sshd_config 142 | dest: /etc/ssh/sshd_config 143 | owner: root 144 | group: root 145 | mode: 0600 146 | - name: Restart ssh 147 | service: 148 | name: sshd 149 | state: restarted 150 | -------------------------------------------------------------------------------- /docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: "{{ host }}" 4 | become: true 5 | vars_files: 6 | - 'vars/vault.yaml' 7 | gather_facts: false 8 | vars_prompt: 9 | - name: host 10 | prompt: Which host? 11 | private: no 12 | - name: containers_to_update 13 | prompt: Which containers? 14 | private: no 15 | tasks: 16 | - name: Docker Pull 17 | command: "docker compose pull {{ containers_to_update }}" 18 | args: 19 | chdir: /home/{{ main_username }}/ 20 | 21 | - name: Docker Update 22 | command: "docker compose up -d {{ containers_to_update }}" 23 | args: 24 | chdir: /home/{{ main_username }}/ 25 | 26 | - name: Docker prune 27 | command: "docker image prune -af" 28 | roles: 29 | - role: docker-compose-generator 30 | -------------------------------------------------------------------------------- /files/sshd_config: -------------------------------------------------------------------------------- 1 | # $OpenBSD: sshd_config,v 1.101 2017/03/14 07:19:07 djm Exp $ 2 | 3 | # This is the sshd server system-wide configuration file. See 4 | # sshd_config(5) for more information. 5 | 6 | # This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin 7 | 8 | # The strategy used for options in the default sshd_config shipped with 9 | # OpenSSH is to specify options with their default value where 10 | # possible, but leave them commented. Uncommented options override the 11 | # default value. 12 | 13 | Protocol 2 14 | #Port 22 15 | #AddressFamily any 16 | #ListenAddress 0.0.0.0 17 | #ListenAddress :: 18 | 19 | #HostKey /etc/ssh/ssh_host_rsa_key 20 | #HostKey /etc/ssh/ssh_host_ecdsa_key 21 | #HostKey /etc/ssh/ssh_host_ed25519_key 22 | 23 | # Ciphers and keying 24 | #RekeyLimit default none 25 | 26 | # Logging 27 | #SyslogFacility AUTH 28 | #LogLevel INFO 29 | 30 | # Authentication: 31 | 32 | #LoginGraceTime 2m 33 | PermitRootLogin no 34 | #StrictModes yes 35 | MaxAuthTries 3 36 | #MaxSessions 10 37 | 38 | #PubkeyAuthentication yes 39 | 40 | # Expect .ssh/authorized_keys2 to be disregarded by default in future. 41 | #AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2 42 | 43 | #AuthorizedPrincipalsFile none 44 | 45 | #AuthorizedKeysCommand none 46 | #AuthorizedKeysCommandUser nobody 47 | 48 | # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts 49 | #HostbasedAuthentication no 50 | # Change to yes if you don't trust ~/.ssh/known_hosts for 51 | # HostbasedAuthentication 52 | #IgnoreUserKnownHosts no 53 | # Don't read the user's ~/.rhosts and ~/.shosts files 54 | #IgnoreRhosts yes 55 | 56 | # To disable tunneled clear text passwords, change to no here! 57 | PasswordAuthentication no 58 | PermitEmptyPasswords no 59 | 60 | # Change to yes to enable challenge-response passwords (beware issues with 61 | # some PAM modules and threads) 62 | ChallengeResponseAuthentication no 63 | 64 | # Kerberos options 65 | #KerberosAuthentication no 66 | #KerberosOrLocalPasswd yes 67 | #KerberosTicketCleanup yes 68 | #KerberosGetAFSToken no 69 | 70 | # GSSAPI options 71 | #GSSAPIAuthentication no 72 | #GSSAPICleanupCredentials yes 73 | #GSSAPIStrictAcceptorCheck yes 74 | #GSSAPIKeyExchange no 75 | 76 | # Set this to 'yes' to enable PAM authentication, account processing, 77 | # and session processing. If this is enabled, PAM authentication will 78 | # be allowed through the ChallengeResponseAuthentication and 79 | # PasswordAuthentication. Depending on your PAM configuration, 80 | # PAM authentication via ChallengeResponseAuthentication may bypass 81 | # the setting of "PermitRootLogin without-password". 82 | # If you just want the PAM account and session checks to run without 83 | # PAM authentication, then enable this but set PasswordAuthentication 84 | # and ChallengeResponseAuthentication to 'no'. 85 | UsePAM yes 86 | 87 | #AllowAgentForwarding yes 88 | #AllowTcpForwarding yes 89 | #GatewayPorts no 90 | X11Forwarding yes 91 | #X11DisplayOffset 10 92 | #X11UseLocalhost yes 93 | #PermitTTY yes 94 | PrintMotd no 95 | #PrintLastLog yes 96 | #TCPKeepAlive yes 97 | #UseLogin no 98 | #PermitUserEnvironment no 99 | #Compression delayed 100 | #ClientAliveInterval 0 101 | #ClientAliveCountMax 3 102 | #UseDNS no 103 | #PidFile /var/run/sshd.pid 104 | #MaxStartups 10:30:100 105 | #PermitTunnel no 106 | #ChrootDirectory none 107 | #VersionAddendum none 108 | 109 | # no default banner path 110 | #Banner none 111 | 112 | # Allow client to pass locale environment variables 113 | AcceptEnv LANG LC_* 114 | 115 | # override default of no subsystems 116 | Subsystem sftp /usr/lib/openssh/sftp-server 117 | 118 | # Example of overriding settings on a per-user basis 119 | #Match User anoncvs 120 | # X11Forwarding no 121 | # AllowTcpForwarding no 122 | # PermitTTY no 123 | # ForceCommand cvs server 124 | -------------------------------------------------------------------------------- /files/sudoer_ansible: -------------------------------------------------------------------------------- 1 | ansible ALL=(ALL) NOPASSWD: ALL 2 | -------------------------------------------------------------------------------- /git-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # sets up a pre-commit hook to ensure that vault.yaml is encrypted 3 | # 4 | 5 | if [ -d .git/ ]; then 6 | rm .git/hooks/pre-commit 7 | fi 8 | cp git-vault-check.sh .git/hooks/pre-commit 9 | 10 | chmod +x .git/hooks/pre-commit -------------------------------------------------------------------------------- /git-vault-check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | green=$'\e[0;32m' 3 | red=$'\e[0;31m' 4 | reset=$'\e[0m' 5 | 6 | if ( cat $HOME/infra/vars/vault.yaml | grep -q "\$ANSIBLE_VAULT;" ); then 7 | echo "${green}Vault Encrypted. Safe to commit.${reset}" 8 | else 9 | echo "${red}Vault not encrypted! Run 'make encrypt' and try again.${reset}" 10 | exit 1 11 | fi -------------------------------------------------------------------------------- /gitea.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | green=$'\e[0;32m' 3 | red=$'\e[0;31m' 4 | reset=$'\e[0m' 5 | 6 | if ( cat vars/vault.yaml | grep -q "\$ANSIBLE_VAULT;" ); then 7 | echo "${green}Vault Encrypted. Safe to commit.${reset}" 8 | else 9 | echo "${red}Vault not encrypted! Run 'make encrypt' and try again.${reset}" 10 | exit 1 11 | fi 12 | read -p "Commit description: " desc 13 | git add . && \ 14 | git commit -m "$desc" && \ 15 | git push gitea dev 16 | -------------------------------------------------------------------------------- /github.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | green=$'\e[0;32m' 3 | red=$'\e[0;31m' 4 | reset=$'\e[0m' 5 | 6 | if ( cat vars/vault.yaml | grep -q "\$ANSIBLE_VAULT;" ); then 7 | echo "${green}Vault Encrypted. Safe to commit.${reset}" 8 | else 9 | echo "${red}Vault not encrypted! Run 'make encrypt' and try again.${reset}" 10 | exit 1 11 | fi 12 | read -p "Commit description: " desc 13 | git add . && \ 14 | git commit -m "$desc" && \ 15 | git push github main 16 | -------------------------------------------------------------------------------- /group_vars/all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # generic settings 3 | main_username: "{{ secret_fuzzy_name }}" 4 | main_groupname: "{{ main_username }}" 5 | main_uid: "1000" 6 | main_gid: "{{ main_uid }}" 7 | 8 | # grog.package 9 | package_list: 10 | - name: curl 11 | - name: git 12 | - name: htop 13 | - name: lm-sensors 14 | - name: net-tools 15 | - name: ncdu 16 | - name: sudo 17 | - name: wget 18 | 19 | # geerlingguy.docker 20 | docker_packages_state: latest 21 | 22 | # bash aliases 23 | bash_aliases: 24 | - alias: c 25 | command: clear 26 | - alias: e 27 | command: exit 28 | 29 | bash_autorestic_aliases: 30 | - alias: arsnapshot 31 | command: autorestic exec -av -- snapshots 32 | - alias: arbackup 33 | command: autorestic backup -a 34 | - alias: arprune 35 | command: autorestic forget -a -- prune 36 | 37 | bash_apt_aliases: 38 | - alias: agi 39 | command: sudo apt install $c 40 | - alias: agr 41 | command: sudo apt remove $c 42 | - alias: agu 43 | command: sudo apt update && sudo apt upgrade 44 | 45 | bash_git_aliases: 46 | - alias: ga 47 | command: git add --all 48 | - alias: gp 49 | command: git push $c 50 | - alias: gpdry 51 | command: git push --dry-run 52 | - alias: gpf 53 | command: git push --force 54 | - alias: gb 55 | command: git branch 56 | - alias: gbd 57 | command: git branch -d $c 58 | - alias: gcam 59 | command: git commit -am $c 60 | - alias: gcm 61 | command: git commit -m $c 62 | - alias: gcheck 63 | command: git checkout 64 | - alias: gm 65 | command: git merge $c 66 | - alias: gms 67 | command: git merge --squash $c 68 | - alias: gst 69 | command: git status 70 | - alias: gpl 71 | command: git pull 72 | - alias: glast 73 | command: git log -1 HEAD --stat 74 | - alias: greset 75 | command: git reset HEAD~1 76 | 77 | bash_systemctl_aliases: 78 | - alias: sc-start 79 | command: sudo systemctl start $c 80 | - alias: sc-stop 81 | command: sudo systemctl stop $c 82 | - alias: sc-restart 83 | command: sudo systemctl restart $c 84 | - alias: sc-enable 85 | command: sudo systemctl enable $c 86 | - alias: sc-disable 87 | command: sudo systemctl disable $c 88 | - alias: sc-status 89 | command: sudo systemctl status $c 90 | - alias: sc-reload 91 | command: sudo systemctl daemon-reload 92 | 93 | bash_docker_aliases: 94 | - alias: dc-up 95 | command: docker compose up -d $c 96 | - alias: dc-down 97 | command: docker compose down $c 98 | - alias: dc-pull 99 | command: docker compose pull $c 100 | - alias: dc-exec 101 | command: docker compose exec $c 102 | - alias: dc-ps 103 | command: docker compose ps 104 | - alias: dc-start 105 | command: docker compose start $c 106 | - alias: dc-stop 107 | command: docker compose stop $c 108 | - alias: dc-restart 109 | command: docker compose restart $c 110 | - alias: dc-rm 111 | command: docker compose rm $c 112 | 113 | bash_pacman_aliases: 114 | - alias: pacup 115 | command: sudo pacman -Syu 116 | - alias: pacin 117 | command: sudo pacman -S 118 | - alias: pacloc 119 | command: sudo pacman -Qi 120 | - alias: pacmir 121 | command: sudo pacman -Syy 122 | - alias: pacrem 123 | command: sudo pacman -Rns 124 | 125 | bash_yay_aliases: 126 | - alias: yaup 127 | command: yay -Syu 128 | - alias: yain 129 | command: yay -S 130 | - alias: yaloc 131 | command: yay -Qi 132 | - alias: yamir 133 | command: yay -Syy 134 | - alias: yarem 135 | command: yay -Rns 136 | 137 | bash_dnf_aliases: 138 | - alias: dnfs 139 | command: dnf search 140 | - alias: dnfu 141 | command: sudo dnf upgrade 142 | - alias: dnfi 143 | command: sudo dnf install 144 | - alias: dnfr 145 | command: sudo dnf remove 146 | - alias: dnfc 147 | command: sudo dnf clean all 148 | 149 | # autorestic_ver: 1.1.2 150 | autorestic_config_user: "{{ main_username }}" 151 | # restic_ver: 0.12.1 152 | # runitor_ver: 0.8.0 153 | runitor_url: "{{ secret_hc_ping_url }}" 154 | 155 | ### Github API 156 | github_api_user: fuzzymistborn 157 | github_api_pass: "{{ secret_github_api_pass }}" 158 | github_api_auth: yes 159 | 160 | # geerlingguy.ntp 161 | ntp_timezone: America/New_York 162 | 163 | # geerlingguy.docker 164 | docker_users: 165 | - "{{ main_username }}" 166 | 167 | ### Docker-Compose with docker-compose-generator 168 | appdata_path: /home/{{ main_username }}/docker 169 | docker_compose_generator_uid: "{{ main_uid }}" 170 | docker_compose_generator_gid: "{{ main_gid }}" 171 | docker_compose_generator_output_path: /home/{{ main_username }} 172 | docker_compose_hostname: "{{ hostname }}" 173 | -------------------------------------------------------------------------------- /group_vars/ambition.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: ambition 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: curl 8 | - name: iperf3 9 | - name: restic 10 | - name: net-tools 11 | - name: python3 12 | - name: wget 13 | - name: wireguard 14 | 15 | ### Bash Aliases 16 | bash_docker: true 17 | bash_autorestic: true 18 | bash_systemctl: true 19 | bash_apt: true 20 | 21 | pull_backup: true 22 | # hugo_ver: 0.88.1 23 | # lego_ver: 4.4.0 24 | 25 | ### Cronjobs 26 | cronjobs: 27 | - name: Backup 28 | job: /usr/local/bin/runitor -uuid {{ secret_hc_ambition_db_backup }} -- /home/{{ main_username }}/scripts/db_backup.sh 29 | user: "{{ main_username }}" 30 | minute: 0 31 | hour: 5 32 | - name: Restic Prune 33 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_ambition }} -- /usr/local/bin/autorestic forget -a -- prune 34 | user: "{{ main_username }}" 35 | minute: 0 36 | hour: 6 37 | weekday: 1 38 | - name: Restic Check 39 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_ambition }} -- /usr/local/bin/autorestic exec -a -- check 40 | user: "{{ main_username }}" 41 | minute: 45 42 | hour: 5 43 | day: 1 44 | - name: Cert Update 45 | job: /usr/local/bin/runitor -uuid {{ secret_hc_ambition_certs }} -- /home/{{ main_username }}/scripts/certs.sh 46 | user: root 47 | minute: 0 48 | hour: 19 49 | day: 1,15 50 | - name: Data Backup 51 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_ambition }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 52 | user: root 53 | minute: 15 54 | hour: 5 55 | 56 | ### UFW Rules 57 | ufw: 58 | - comment: 'ssh' 59 | rule: 'allow' 60 | port: '22' 61 | proto: 'any' 62 | route: 'no' 63 | - comment: 'reverse proxy' 64 | rule: 'allow' 65 | port: '80' 66 | proto: 'tcp' 67 | route: 'no' 68 | - comment: 'reverse proxy' 69 | rule: 'allow' 70 | port: '443' 71 | proto: 'tcp' 72 | route: 'no' 73 | - comment: 'iperf' 74 | rule: 'allow' 75 | port: '5201' 76 | proto: 'tcp' 77 | route: 'no' 78 | - comment: 'wireguard tunnel' 79 | rule: 'allow' 80 | port: '51282' 81 | proto: 'udp' 82 | route: 'no' 83 | - comment: 'gitea' 84 | rule: 'allow' 85 | port: '222' 86 | proto: 'tcp' 87 | route: 'no' 88 | - comment: 'portainer' 89 | rule: 'allow' 90 | port: '9001' 91 | proto: 'tcp' 92 | route: 'no' 93 | - comment: 'tailscale adapter' 94 | rule: 'allow' 95 | interface: tailscale0 96 | direction: in 97 | proto: 'any' 98 | route: 'no' 99 | 100 | ### Caddy Config 101 | caddy_systemd_capabilities_enabled: true 102 | caddy_systemd_network_dependency: false 103 | caddy_packages: ["github.com/hslatman/caddy-crowdsec-bouncer"] 104 | caddy_conf_dir: /home/{{ main_username }}/caddy 105 | caddy_config: "{{ lookup('template', 'roles/ambition/templates/Caddyfile.j2') }}" 106 | 107 | ### Autorestic Config 108 | autorestic_config_user: "{{ main_username}}" 109 | autorestic_config_directory: /home/"{{ main_username}}" 110 | autorestic_run_check: false 111 | autorestic_config_yaml: 112 | version: 2 113 | backends: 114 | b2_docker: 115 | type: b2 116 | path: "{{ secret_restic_b2_bucket }}:/linode/docker" 117 | key: "{{ secret_restic_repo_password }}" 118 | env: 119 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 120 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 121 | b2_db: 122 | type: b2 123 | path: "{{ secret_restic_b2_bucket }}:/linode/db" 124 | key: "{{ secret_restic_repo_password }}" 125 | env: 126 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 127 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 128 | b2_certs: 129 | type: b2 130 | path: "{{ secret_restic_b2_bucket }}:/linode/certs" 131 | key: "{{ secret_restic_repo_password }}" 132 | env: 133 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 134 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 135 | locations: 136 | docker: 137 | from: '/home/{{ main_username }}/docker' 138 | to: 139 | - b2_docker 140 | options: 141 | backup: 142 | exclude: 143 | - '/home/{{ main_username }}/docker/umami' 144 | - '/home/{{ main_username }}/docker/gitea/mariadb' 145 | forget: 146 | keep-daily: 1 147 | keep-weekly: 6 148 | keep-monthly: 3 149 | db: 150 | from: '/home/{{ main_username }}/db_backups' 151 | to: 152 | - b2_db 153 | options: 154 | forget: 155 | keep-daily: 1 156 | keep-weekly: 6 157 | keep-monthly: 3 158 | certs: 159 | from: '/home/{{ main_username }}/lego' 160 | to: 161 | - b2_certs 162 | options: 163 | forget: 164 | keep-daily: 1 165 | keep-weekly: 4 166 | -------------------------------------------------------------------------------- /group_vars/cultivation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: cultivation 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: acl 8 | - name: curl 9 | - name: git 10 | - name: htop 11 | - name: lm-sensors 12 | - name: net-tools 13 | - name: python 14 | - name: restic 15 | - name: sudo 16 | - name: wget 17 | 18 | pull_backup: false 19 | 20 | ### LNXLink 21 | lnxlink_diskuse: true 22 | 23 | ### Bash Aliases 24 | bash_docker: true 25 | bash_autorestic: true 26 | bash_systemctl: true 27 | bash_apt: true 28 | 29 | ### Cronjobs 30 | cronjobs: 31 | - name: Restic Prune 32 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_cultivation }} -- /usr/local/bin/autorestic forget -a -- prune 33 | user: "{{ main_username }}" 34 | minute: 15 35 | hour: 22 36 | weekday: 1 37 | - name: Restic Check 38 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_cultivation }} -- /usr/local/bin/autorestic exec -a -- check 39 | user: "{{ main_username }}" 40 | minute: 15 41 | hour: 2 42 | day: 1 43 | - name: Media Backup 44 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_cultivation }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 45 | user: root 46 | minute: 15 47 | hour: 0 48 | - name: Invidious Reboot 49 | job: docker container restart invidious > /dev/null 2>&1 50 | user: "{{ main_username }}" 51 | minute: 0 52 | hour: "*/12" 53 | 54 | ### Autorestic Config 55 | autorestic_config_user: "{{ main_username}}" 56 | autorestic_config_directory: /home/"{{ main_username}}" 57 | autorestic_run_check: false 58 | autorestic_config_yaml: 59 | version: 2 60 | backends: 61 | local_docker: 62 | type: rest 63 | path: 'http://192.168.1.5:8500/cultivation' 64 | key: "{{ secret_restic_repo_password }}" 65 | rest: 66 | user: "{{ secret_restic_rest_user }}" 67 | password: "{{ secret_restic_rest_password }}" 68 | b2_docker: 69 | type: b2 70 | path: "{{ secret_restic_b2_bucket }}:/cultivation" 71 | key: "{{ secret_restic_repo_password }}" 72 | env: 73 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 74 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 75 | locations: 76 | docker: 77 | from: /home/{{ main_username }}/docker 78 | to: 79 | - local_docker 80 | - b2_docker 81 | options: 82 | forget: 83 | keep-daily: 1 84 | keep-weekly: 8 85 | keep-monthly: 4 86 | -------------------------------------------------------------------------------- /group_vars/design.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### Variables 3 | hostname: design 4 | pull_backup: false 5 | 6 | ### LNXLink 7 | lnxlink_shutdown: true 8 | lnxlink_diskuse: true 9 | lnxlink_desktop: true 10 | 11 | ### Install packages with grog.package 12 | package_list: 13 | - name: "@core" 14 | - name: "@multimedia" 15 | - name: "@sound-and-video" 16 | - name: intel-media-driver 17 | - name: audacity 18 | - name: cifs-utils 19 | - name: curl 20 | - name: ethtool 21 | - name: firefox 22 | - name: flameshot 23 | - name: flatpak 24 | - name: gcc 25 | - name: gimp 26 | - name: git 27 | - name: hddtemp 28 | - name: htop 29 | - name: kate 30 | - name: pinta 31 | - name: lm_sensors 32 | - name: nano 33 | - name: ncdu 34 | - name: net-tools 35 | - name: nfs-utils 36 | - name: python 37 | - name: python-psutil 38 | - name: python3-devel 39 | - name: screen 40 | - name: sudo 41 | - name: vlc 42 | - name: wget 43 | 44 | flatpak_list: 45 | - name: com.vivaldi.Vivaldi 46 | - name: com.bambulab.BambuStudio 47 | 48 | ### Mounts 49 | mountpoints: 50 | - path: /mnt/Backup 51 | source: 192.168.1.5:/config 52 | fs: nfs 53 | opts: "defaults" 54 | - path: /mnt/NVR 55 | source: 192.168.1.5:/NVR 56 | fs: nfs 57 | opts: "defaults" 58 | - path: "/mnt/Media" 59 | source: "//192.168.1.10/Media" 60 | fs: cifs 61 | opts: "_netdev,noauto,nofail,x-systemd.automount,uid=1000,gid=1000,credentials=/home/{{ main_username }}/.smbcredentials" 62 | 63 | ### Variables 64 | pip_package: python-pip 65 | pip_packages: 66 | - konsave 67 | 68 | ### Bash Aliases 69 | bash_aliases: 70 | - alias: c 71 | command: clear 72 | - alias: e 73 | command: exit 74 | - alias: ssh-remove 75 | command: ssh-keygen -f /home/{{ main_username }}/.ssh/known_hosts -R $c 76 | - alias: konsave-install 77 | command: wget -qO - https://ansible.{{ secret_personal_internal_url }} | bash" 78 | bash_autorestic: true 79 | bash_systemctl: true 80 | bash_git: true 81 | bash_dnf: true 82 | -------------------------------------------------------------------------------- /group_vars/desktop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: fedora 3 | bootstrap_user: "{{ main_username }}" 4 | pull_backup: false 5 | 6 | ### Install packages with grog.package 7 | package_list: 8 | - name: "@core" 9 | - name: "@multimedia" 10 | - name: "@sound-and-video" 11 | - name: audacity 12 | - name: calibre 13 | - name: chromium 14 | - name: cifs-utils 15 | - name: curl 16 | - name: ethtool 17 | - name: filezilla 18 | - name: firefox 19 | - name: flameshot 20 | - name: flatpak 21 | - name: flatseal 22 | - name: freerdp 23 | - name: gcc 24 | - name: gimp 25 | - name: git 26 | - name: hddtemp 27 | - name: htop 28 | - name: iperf3 29 | - name: kate 30 | - name: pinta 31 | - name: krename 32 | - name: libreoffice 33 | - name: lm_sensors 34 | - name: mkvtoolnix 35 | - name: mpv 36 | - name: mpv-mpris 37 | - name: nano 38 | - name: ncdu 39 | - name: net-tools 40 | - name: nextcloud-client 41 | - name: nfs-utils 42 | - name: pdfarranger 43 | - name: python 44 | - name: python-psutil 45 | - name: python3-devel 46 | - name: remmina 47 | - name: restic 48 | - name: screen 49 | - name: skanlite 50 | - name: solaar 51 | - name: spectacle 52 | - name: sudo 53 | - name: thunderbird 54 | - name: tree 55 | - name: vlc 56 | - name: wget 57 | - name: wireguard-tools 58 | - name: logiops 59 | - name: printer-driver-brlaser 60 | - name: pipx 61 | 62 | flatpak_list: 63 | - name: us.zoom.Zoom 64 | - name: com.github.IsmaelMartinez.teams_for_linux 65 | - name: com.github.iwalton3.jellyfin-media-player 66 | - name: com.mojang.Minecraft 67 | - name: md.obsidian.Obsidian 68 | - name: com.bambulab.BambuStudio 69 | - name: org.ferdium.Ferdium 70 | - name: com.system76.Popsicle 71 | - name: com.jgraph.drawio.desktop 72 | - name: org.raspberrypi.rpi-imager 73 | - name: com.moonlight_stream.Moonlight 74 | - name: io.github.janbar.noson 75 | - name: io.github.dweymouth.supersonic 76 | - name: org.bunkus.mkvtoolnix-gui 77 | 78 | ### Mounts 79 | mountpoints: 80 | - path: /mnt/Backup 81 | source: 192.168.1.5:/config 82 | fs: nfs 83 | opts: "defaults" 84 | - path: /mnt/NVR 85 | source: 192.168.1.5:/NVR 86 | fs: nfs 87 | opts: "defaults" 88 | - path: "/mnt/Media" 89 | source: "//192.168.1.10/Media" 90 | fs: cifs 91 | opts: "_netdev,noauto,nofail,x-systemd.automount,uid=1000,gid=1000,credentials=/home/{{ main_username }}/.smbcredentials" 92 | 93 | ### Variables 94 | pip_package: python-pip 95 | pip_packages: 96 | - konsave 97 | 98 | ### Bash Aliases 99 | bash_aliases: 100 | - alias: c 101 | command: clear 102 | - alias: e 103 | command: exit 104 | - alias: ssh-remove 105 | command: ssh-keygen -f /home/{{ main_username }}/.ssh/known_hosts -R $c 106 | - alias: logi-restart 107 | command: sudo systemctl restart logid 108 | - alias: konsave-install 109 | command: wget -qO - https://ansible.{{ secret_personal_internal_url }} | bash" 110 | bash_autorestic: true 111 | bash_systemctl: true 112 | bash_git: true 113 | bash_dnf: true 114 | -------------------------------------------------------------------------------- /group_vars/dominion.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: dominion 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: python3 8 | - name: curl 9 | - name: sudo 10 | - name: git 11 | - name: wget 12 | - name: make 13 | - name: screen 14 | - name: unzip 15 | 16 | ### Pip Packages 17 | pipx_packages: 18 | - ansible 19 | - ansible-lint 20 | - yamllint 21 | 22 | pull_backup: false 23 | 24 | ### Bash Aliases 25 | 26 | bash_aliases: 27 | - alias: c 28 | command: clear 29 | - alias: e 30 | command: exit 31 | - alias: gitcheck 32 | command: /bin/bash /home/{{ main_username }}/infra/git-vault-check.sh 33 | - alias: ssh-remove 34 | command: ssh-keygen -f /home/{{ main_username }}/.ssh/known_hosts -R $c 35 | bash_systemctl: true 36 | bash_apt: true 37 | bash_git: true 38 | 39 | ### Cronjobs 40 | cronjobs: 41 | - name: Ansible Update 42 | job: /usr/local/bin/runitor -uuid {{ secret_hc_ansible_update }} -- /home/{{ main_username }}/update.sh 43 | user: "{{ main_username }}" 44 | minute: 0 45 | hour: 20 46 | weekday: 3 47 | -------------------------------------------------------------------------------- /group_vars/endowment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: endowment 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: acl 8 | - name: curl 9 | - name: cifs-utils 10 | - name: git 11 | - name: htop 12 | - name: lm-sensors 13 | - name: net-tools 14 | - name: python 15 | - name: restic 16 | - name: sudo 17 | - name: wget 18 | 19 | smb_mountpoints: 20 | - path: "/mnt/Media" 21 | source: "//192.168.1.10/Media" 22 | fs: cifs 23 | opts: "defaults,uid=1000,gid=1000,credentials=/home/{{ main_username }}/.smbcredentials" 24 | 25 | pull_backup: false 26 | 27 | ### LNXLink 28 | lnxlink_diskuse: true 29 | 30 | ### Bash Aliases 31 | bash_docker: true 32 | bash_autorestic: true 33 | bash_systemctl: true 34 | bash_apt: true 35 | 36 | ### Cronjobs 37 | cronjobs: 38 | - name: Nextcloud 39 | job: /home/{{ main_username }}/scripts/nextcloud_cron.sh 40 | user: "{{ main_username }}" 41 | minute: '*/5' 42 | - name: Restart Jellyfin 43 | job: docker container restart jellyfin > /dev/null 2>&1 44 | user: "{{ main_username }}" 45 | hour: 2 46 | minute: 30 47 | - name: Restic Prune 48 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_endowment }} -- /usr/local/bin/autorestic forget -a -- prune 49 | user: "{{ main_username }}" 50 | minute: 30 51 | hour: 22 52 | weekday: 1 53 | - name: Restic Check 54 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_endowment }} -- /usr/local/bin/autorestic exec -a -- check 55 | user: "{{ main_username }}" 56 | minute: 30 57 | hour: 2 58 | day: 1 59 | - name: Photo Reorg 60 | job: /usr/local/bin/runitor -uuid {{ secret_hc_photo_reorg }} -- /home/{{ main_username }}/scripts/photos.sh 61 | user: "{{ main_username }}" 62 | minute: 0 63 | hour: 3 64 | - name: Media Backup 65 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_endowment }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 66 | user: root 67 | minute: 30 68 | hour: 0 69 | 70 | ### Autorestic Config 71 | autorestic_config_user: "{{ main_username}}" 72 | autorestic_config_directory: /home/"{{ main_username}}" 73 | autorestic_run_check: false 74 | autorestic_config_yaml: 75 | version: 2 76 | backends: 77 | local_docker: 78 | type: rest 79 | path: 'http://192.168.1.5:8500/endowment' 80 | key: "{{ secret_restic_repo_password }}" 81 | rest: 82 | user: "{{ secret_restic_rest_user }}" 83 | password: "{{ secret_restic_rest_password }}" 84 | b2_docker: 85 | type: b2 86 | path: "{{ secret_restic_b2_bucket }}:/endowment" 87 | key: "{{ secret_restic_repo_password }}" 88 | env: 89 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 90 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 91 | locations: 92 | docker: 93 | from: '/home/{{ main_username }}/docker' 94 | to: 95 | - local_docker 96 | - b2_docker 97 | options: 98 | backup: 99 | exclude: 100 | - 'transcodes' 101 | forget: 102 | keep-daily: 1 103 | keep-weekly: 8 104 | keep-monthly: 4 105 | -------------------------------------------------------------------------------- /group_vars/hoid.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: fedora 3 | hostname: hoid 4 | bootstrap_user: "{{ main_username }}" 5 | pull_backup: false 6 | 7 | ### LNXLink 8 | lnxlink_shutdown: true 9 | lnxlink_diskuse: true 10 | lnxlink_desktop: true 11 | 12 | ### Autorestic Config 13 | autorestic_config_user: "{{ main_username}}" 14 | autorestic_config_directory: /home/"{{ main_username}}" 15 | autorestic_run_check: false 16 | autorestic_config_yaml: 17 | version: 2 18 | backends: 19 | local_files: 20 | type: rest 21 | path: 'http://192.168.1.5:8500/hoid' 22 | key: "{{ secret_restic_repo_password }}" 23 | rest: 24 | user: "{{ secret_restic_rest_user }}" 25 | password: "{{ secret_restic_rest_password }}" 26 | locations: 27 | files: 28 | from: /home/{{ main_username }} 29 | to: 30 | - local_files 31 | options: 32 | backup: 33 | exclude: 34 | - '.cache*' 35 | - 'Nextcloud' 36 | - 'Downloads' 37 | - 'git' 38 | - '*steam*' 39 | forget: 40 | keep-daily: 3 41 | keep-weekly: 4 42 | keep-monthly: 2 43 | 44 | desktop_hc_prune_id: "{{ secret_hc_restic_prune_hoid }}" 45 | desktop_hc_backup_id: "{{ secret_hc_restic_backup_hoid }}" 46 | 47 | ### Bash Aliases 48 | bash_aliases: 49 | - alias: fingerprint-enable 50 | command: sudo authselect enable-feature with-fingerprint 51 | - alias: fingerprint-disable 52 | command: sudo authselect disable-feature with-fingerprint 53 | 54 | ### Cronjobs 55 | cronjobs: 56 | - name: Restic Check 57 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_hoid }} -- /usr/local/bin/autorestic exec -a -- check 58 | user: "{{ main_username }}" 59 | minute: 20 60 | hour: 20 61 | day: 1 -------------------------------------------------------------------------------- /group_vars/honor.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: honor 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: curl 8 | - name: htop 9 | - name: net-tools 10 | - name: sudo 11 | - name: restic 12 | - name: wget 13 | - name: wireguard 14 | 15 | pull_backup: false 16 | # lego_ver: 4.4.0 17 | 18 | ### Bash Aliases 19 | 20 | bash_docker: true 21 | bash_autorestic: true 22 | bash_systemctl: true 23 | bash_apt: true 24 | 25 | ### Cronjobs 26 | cronjobs: 27 | - name: Restic Prune 28 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_honor }} -- /usr/local/bin/autorestic forget -a -- prune 29 | user: "{{ main_username }}" 30 | minute: 15 31 | hour: 22 32 | weekday: 1 33 | - name: Restic Check 34 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_honor }} -- /usr/local/bin/autorestic exec -a -- check 35 | user: "{{ main_username }}" 36 | minute: 15 37 | hour: 2 38 | day: 1 39 | - name: Media Backup 40 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_honor }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 41 | user: root 42 | minute: 15 43 | hour: 0 44 | - name: Cert Update 45 | job: /usr/local/bin/runitor -uuid {{ secret_hc_honor_certs }} -- /home/{{ main_username }}/certs.sh 46 | user: root 47 | minute: 0 48 | hour: 19 49 | day: 1,15 50 | - name: Wireguard Tunnel Check/Restore 51 | job: /home/{{ main_username }}/wg-check.sh 52 | user: root 53 | minute: '*/5' 54 | - name: Ambition DynDNS 55 | job: /usr/local/bin/runitor -uuid {{ secret_hc_ambition_dyndns_check }} -- /home/{{ main_username }}/ddns_vps.sh 56 | user: "{{ main_username }}" 57 | minute: '*/10' 58 | 59 | ### Autorestic Config 60 | autorestic_config_user: "{{ main_username}}" 61 | autorestic_config_directory: /home/"{{ main_username}}" 62 | autorestic_run_check: false 63 | autorestic_config_yaml: 64 | version: 2 65 | backends: 66 | local_docker: 67 | type: rest 68 | path: 'http://192.168.1.5:8500/honor' 69 | key: "{{ secret_restic_repo_password }}" 70 | rest: 71 | user: "{{ secret_restic_rest_user }}" 72 | password: "{{ secret_restic_rest_password }}" 73 | b2_docker: 74 | type: b2 75 | path: "{{ secret_restic_b2_bucket }}:/honor" 76 | key: "{{ secret_restic_repo_password }}" 77 | env: 78 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 79 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 80 | locations: 81 | docker: 82 | from: '/home/{{ main_username }}/docker' 83 | to: 84 | - local_docker 85 | - b2_docker 86 | options: 87 | forget: 88 | keep-daily: 1 89 | keep-weekly: 8 90 | keep-monthly: 4 91 | 92 | ### Caddy Config 93 | caddy_systemd_capabilities_enabled: true 94 | caddy_systemd_network_dependency: false 95 | caddy_conf_dir: /home/{{ main_username }}/caddy 96 | caddy_config: "{{ lookup('template', 'roles/honor/templates/Caddyfile.j2') }}" 97 | -------------------------------------------------------------------------------- /group_vars/identity.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: identity 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: sudo 8 | - name: git 9 | - name: curl 10 | - name: restic 11 | - name: wget 12 | 13 | pull_backup: false 14 | lnxlink_diskuse: true 15 | 16 | ### Bash Aliases 17 | 18 | bash_docker: true 19 | bash_autorestic: true 20 | bash_systemctl: true 21 | bash_apt: true 22 | 23 | ### Autorestic Config 24 | autorestic_config_user: "{{ main_username}}" 25 | autorestic_config_directory: /home/"{{ main_username}}" 26 | autorestic_run_check: false 27 | autorestic_config_yaml: 28 | version: 2 29 | backends: 30 | local_docker: 31 | type: rest 32 | path: 'http://192.168.1.5:8500/identity' 33 | key: "{{ secret_restic_repo_password }}" 34 | rest: 35 | user: "{{ secret_restic_rest_user }}" 36 | password: "{{ secret_restic_rest_password }}" 37 | locations: 38 | docker: 39 | from: '/home/{{ main_username }}/docker' 40 | to: 41 | - local_docker 42 | options: 43 | forget: 44 | keep-daily: 1 45 | keep-weekly: 4 46 | keep-monthly: 2 47 | 48 | ### Cronjobs 49 | cronjobs: 50 | - name: Restic Prune 51 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_identity }} -- /usr/local/bin/autorestic forget -a -- prune 52 | user: "{{ main_username }}" 53 | minute: 20 54 | hour: 22 55 | weekday: 1 56 | - name: Restic Check 57 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_identity }} -- /usr/local/bin/autorestic exec -a -- check 58 | user: "{{ main_username }}" 59 | minute: 20 60 | hour: 2 61 | day: 1 62 | - name: Media Backup 63 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_identity }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 64 | user: root 65 | minute: 20 66 | hour: 0 67 | -------------------------------------------------------------------------------- /group_vars/invention.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: invention 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: sudo 8 | - name: git 9 | - name: curl 10 | - name: restic 11 | - name: wget 12 | 13 | pull_backup: false 14 | 15 | ### Bash Aliases 16 | 17 | bash_docker: true 18 | bash_autorestic: true 19 | bash_systemctl: true 20 | bash_apt: true 21 | 22 | desktop_hc_prune_id: "{{ secret_hc_restic_prune_invention }}" 23 | desktop_hc_backup_id: "{{ secret_hc_restic_backup_invention }}" 24 | 25 | ### Autorestic Config 26 | autorestic_config_user: "{{ main_username}}" 27 | autorestic_config_directory: /home/"{{ main_username}}" 28 | autorestic_run_check: false 29 | autorestic_config_yaml: 30 | version: 2 31 | backends: 32 | local_docker: 33 | type: rest 34 | path: 'http://192.168.1.5:8500/invention' 35 | key: "{{ secret_restic_repo_password }}" 36 | rest: 37 | user: "{{ secret_restic_rest_user }}" 38 | password: "{{ secret_restic_rest_password }}" 39 | locations: 40 | docker: 41 | from: '/home/{{ main_username }}/docker' 42 | to: 43 | - local_docker 44 | options: 45 | forget: 46 | keep-daily: 1 47 | keep-weekly: 4 48 | keep-monthly: 2 49 | -------------------------------------------------------------------------------- /group_vars/investiture.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: investiture 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: curl 8 | - name: git 9 | - name: htop 10 | - name: net-tools 11 | - name: python 12 | - name: sudo 13 | - name: wget 14 | 15 | ### Bash Aliases 16 | 17 | bash_docker: true 18 | bash_autorestic: false 19 | bash_systemctl: true 20 | bash_apt: true 21 | 22 | ### Cronjobs 23 | cronjobs: 24 | - name: Docker Image Prune 25 | job: /usr/local/bin/runitor -uuid {{ secret_hc_investiture_docker_prune }} -- /usr/bin/docker system prune -a -f 26 | user: "{{ main_username }}" 27 | minute: 35 28 | hour: 5 29 | day: 1,15 30 | -------------------------------------------------------------------------------- /group_vars/omada.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: omada 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: sudo 8 | - name: git 9 | - name: curl 10 | - name: restic 11 | - name: wget 12 | 13 | pull_backup: false 14 | 15 | ### Bash Aliases 16 | 17 | bash_docker: true 18 | bash_autorestic: true 19 | bash_systemctl: true 20 | bash_apt: true 21 | 22 | ### Cronjobs 23 | cronjobs: 24 | - name: Restic Prune 25 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_omada }} -- /usr/local/bin/autorestic forget -a -- prune 26 | user: "{{ main_username }}" 27 | minute: 20 28 | hour: 22 29 | weekday: 1 30 | - name: Restic Check 31 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_omada }} -- /usr/local/bin/autorestic exec -a -- check 32 | user: "{{ main_username }}" 33 | minute: 20 34 | hour: 2 35 | day: 1 36 | - name: Media Backup 37 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_omada }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 38 | user: root 39 | minute: 20 40 | hour: 0 41 | 42 | ### Autorestic Config 43 | autorestic_config_user: "{{ main_username}}" 44 | autorestic_config_directory: /home/"{{ main_username}}" 45 | autorestic_run_check: false 46 | autorestic_config_yaml: 47 | version: 2 48 | backends: 49 | local_docker: 50 | type: rest 51 | path: 'http://192.168.1.5:8500/omada' 52 | key: "{{ secret_restic_repo_password }}" 53 | rest: 54 | user: "{{ secret_restic_rest_user }}" 55 | password: "{{ secret_restic_rest_password }}" 56 | b2_docker: 57 | type: b2 58 | path: "{{ secret_restic_b2_bucket }}:/omada" 59 | key: "{{ secret_restic_repo_password }}" 60 | env: 61 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 62 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 63 | locations: 64 | docker: 65 | from: '/home/{{ main_username }}/docker' 66 | to: 67 | - local_docker 68 | - b2_docker 69 | options: 70 | forget: 71 | keep-daily: 1 72 | keep-weekly: 4 73 | keep-monthly: 2 74 | -------------------------------------------------------------------------------- /group_vars/preservation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: preservation 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: acl 8 | - name: curl 9 | - name: htop 10 | - name: net-tools 11 | - name: sudo 12 | - name: restic 13 | - name: wget 14 | 15 | pull_backup: false 16 | 17 | ### LNXLink 18 | lnxlink_diskuse: true 19 | 20 | ### Bash Aliases 21 | bash_docker: true 22 | bash_autorestic: true 23 | bash_systemctl: true 24 | bash_apt: true 25 | 26 | ### Cronjobs 27 | cronjobs: 28 | - name: DB Backup 29 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_preservation }} -- /home/{{ main_username }}/scripts/db_backup.sh 30 | user: "{{ main_username }}" 31 | minute: 45 32 | hour: 0 33 | - name: Restic Prune 34 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_preservation }} -- /usr/local/bin/autorestic forget -a -- prune 35 | user: "{{ main_username }}" 36 | minute: 45 37 | hour: 22 38 | weekday: 1 39 | - name: Restic Check 40 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_preservation }} -- /usr/local/bin/autorestic exec -a -- check 41 | user: "{{ main_username }}" 42 | minute: 45 43 | hour: 2 44 | day: 1 45 | - name: PSQL Prune 46 | job: /usr/local/bin/runitor -uuid {{ secret_hc_invidious_psql_prune }} -- /home/{{ main_username }}/scripts/postgres_cleanup.sh 47 | user: "{{ main_username }}" 48 | minute: 30 49 | hour: 3 50 | weekday: 5 51 | 52 | ### Autorestic Config 53 | autorestic_config_user: "{{ main_username}}" 54 | autorestic_config_directory: /home/"{{ main_username}}" 55 | autorestic_run_check: false 56 | autorestic_config_yaml: 57 | version: 2 58 | backends: 59 | local_db: 60 | type: rest 61 | path: 'http://192.168.1.5:8500/preservation' 62 | key: "{{ secret_restic_repo_password }}" 63 | rest: 64 | user: "{{ secret_restic_rest_user }}" 65 | password: "{{ secret_restic_rest_password }}" 66 | b2_db: 67 | type: b2 68 | path: "{{ secret_restic_b2_bucket }}:/preservation" 69 | key: "{{ secret_restic_repo_password }}" 70 | env: 71 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 72 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 73 | locations: 74 | db_backups: 75 | from: '/home/{{ main_username }}/db_backups' 76 | to: 77 | - local_db 78 | - b2_db 79 | options: 80 | forget: 81 | keep-daily: 1 82 | keep-weekly: 6 83 | keep-monthly: 3 84 | -------------------------------------------------------------------------------- /group_vars/unity.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: unity 4 | 5 | ### Install packages with grog.package 6 | package_list: 7 | - name: acl 8 | - name: corosync-qnetd 9 | - name: corosync-qdevice 10 | - name: cron 11 | - name: curl 12 | - name: nano 13 | - name: git 14 | - name: htop 15 | - name: iputils-ping 16 | - name: lm-sensors 17 | - name: net-tools 18 | - name: python3 19 | - name: restic 20 | - name: sudo 21 | - name: wget 22 | 23 | ### Variables 24 | pull_backup: false 25 | 26 | ### Bash Aliases 27 | bash_docker: true 28 | bash_autorestic: true 29 | bash_systemctl: true 30 | bash_apt: true 31 | 32 | ### LNXLink 33 | lnxlink_temperature: true 34 | lnxlink_diskuse: true 35 | lnxlink_mounts: true 36 | 37 | ### Cronjobs 38 | cronjobs: 39 | - name: Restic Prune 40 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_unity }} -- /usr/local/bin/autorestic forget -a -- prune 41 | user: "{{ main_username }}" 42 | minute: 0 43 | hour: 22 44 | weekday: 1 45 | - name: Restic Check 46 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_unity }} -- /usr/local/bin/autorestic exec -a -- check 47 | user: "{{ main_username }}" 48 | minute: 0 49 | hour: 2 50 | day: 1 51 | - name: Media Backup 52 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_unity }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 53 | user: root 54 | minute: 0 55 | hour: 0 56 | 57 | ### Autorestic Config 58 | autorestic_config_user: "{{ main_username}}" 59 | autorestic_config_directory: /home/"{{ main_username}}" 60 | autorestic_run_check: false 61 | autorestic_config_yaml: 62 | version: 2 63 | backends: 64 | local_docker: 65 | type: rest 66 | path: 'http://192.168.1.5:8500/unity' 67 | key: "{{ secret_restic_repo_password }}" 68 | rest: 69 | user: "{{ secret_restic_rest_user }}" 70 | password: "{{ secret_restic_rest_password }}" 71 | b2_docker: 72 | type: b2 73 | path: "{{ secret_restic_b2_bucket }}:/unity/docker" 74 | key: "{{ secret_restic_repo_password }}" 75 | env: 76 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 77 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 78 | locations: 79 | docker: 80 | from: '/home/{{ main_username }}/docker' 81 | to: 82 | - local_docker 83 | - b2_docker 84 | options: 85 | forget: 86 | keep-daily: 1 87 | keep-weekly: 8 88 | keep-monthly: 4 89 | -------------------------------------------------------------------------------- /group_vars/virtuosity.yml: -------------------------------------------------------------------------------- 1 | --- 2 | target_os: ubuntu 3 | hostname: virtuosity 4 | 5 | nvidia_driver_version: 550 6 | 7 | ### Install packages with grog.package 8 | package_list: 9 | - name: curl 10 | - name: cron 11 | - name: cifs-utils 12 | - name: htop 13 | - name: net-tools 14 | - name: sudo 15 | - name: nano 16 | - name: restic 17 | - name: wget 18 | 19 | pull_backup: false 20 | lnxlink_diskuse: true 21 | 22 | ### Bash Aliases 23 | bash_docker: true 24 | bash_autorestic: true 25 | bash_systemctl: true 26 | bash_apt: true 27 | 28 | ### Cronjobs 29 | cronjobs: 30 | - name: Restic Prune 31 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_prune_virtuosity }} -- /usr/local/bin/autorestic forget -a -- prune 32 | user: "{{ main_username }}" 33 | minute: 20 34 | hour: 22 35 | weekday: 1 36 | - name: Restic Check 37 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_check_virtuosity }} -- /usr/local/bin/autorestic exec -a -- check 38 | user: "{{ main_username }}" 39 | minute: 20 40 | hour: 2 41 | day: 1 42 | - name: Media Backup 43 | job: /usr/local/bin/runitor -uuid {{ secret_hc_restic_backup_virtuosity }} -- /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml 44 | user: root 45 | minute: 20 46 | hour: 0 47 | 48 | ### Mountpoints 49 | mountpoints: 50 | - path: "/mnt/immich-uploads" 51 | source: "//192.168.1.10/Media/Immich" 52 | fs: cifs 53 | opts: "defaults,uid=1000,gid=1000,credentials=/home/{{ main_username }}/.smbcredentials" 54 | - path: "/mnt/photos" 55 | source: "//192.168.1.10/Media/Photos" 56 | fs: cifs 57 | opts: "defaults,uid=1000,gid=1000,credentials=/home/{{ main_username }}/.smbcredentials" 58 | 59 | ### Autorestic Config 60 | autorestic_config_user: "{{ main_username}}" 61 | autorestic_config_directory: /home/"{{ main_username}}" 62 | autorestic_run_check: false 63 | autorestic_config_yaml: 64 | version: 2 65 | backends: 66 | local_docker: 67 | type: rest 68 | path: 'http://192.168.1.5:8500/virtuosity' 69 | key: "{{ secret_restic_repo_password }}" 70 | rest: 71 | user: "{{ secret_restic_rest_user }}" 72 | password: "{{ secret_restic_rest_password }}" 73 | b2_ddocker: 74 | type: b2 75 | path: "{{ secret_restic_b2_bucket }}:/virtuosity" 76 | key: "{{ secret_restic_repo_password }}" 77 | env: 78 | - B2_ACCOUNT_ID: "{{ secret_restic_b2_account_id }}" 79 | - B2_ACCOUNT_KEY: "{{ secret_restic_b2_account_key }}" 80 | locations: 81 | docker: 82 | from: '/home/{{ main_username }}/docker' 83 | to: 84 | - local_docker 85 | - b2_docker 86 | options: 87 | forget: 88 | keep-daily: 1 89 | keep-weekly: 4 90 | keep-monthly: 2 91 | -------------------------------------------------------------------------------- /hosts.ini: -------------------------------------------------------------------------------- 1 | [unity] 2 | 192.168.1.11 3 | 4 | [omada] 5 | 192.168.1.15 6 | 7 | [ishap] 8 | 192.168.1.5 9 | 10 | [adonalsium] 11 | 192.168.1.10 12 | 13 | [design] 14 | 192.168.10.16 15 | 16 | [dominion] 17 | 192.168.10.50 18 | 19 | [autonomy] 20 | 192.168.30.5 21 | 22 | [invention] 23 | 192.168.30.7 24 | 25 | [hoid] 26 | 192.168.10.32 27 | 28 | [honor] 29 | 192.168.50.10 30 | 31 | [investiture] 32 | 192.168.50.15 33 | 34 | [endowment] 35 | 192.168.50.20 36 | 37 | [cultivation] 38 | 192.168.50.21 39 | 40 | [preservation] 41 | 192.168.50.22 42 | 43 | [identity] 44 | 192.168.50.23 45 | 46 | [virtuosity] 47 | 192.168.50.25 48 | 49 | [ambition] 50 | remote.fuzzymistborn.com 51 | 52 | [shards:children] 53 | dominion 54 | endowment 55 | autonomy 56 | cultivation 57 | preservation 58 | honor 59 | omada 60 | investiture 61 | invention 62 | identity 63 | 64 | [docker:children] 65 | adonalsium 66 | endowment 67 | autonomy 68 | cultivation 69 | preservation 70 | omada 71 | honor 72 | unity 73 | ambition 74 | investiture 75 | invention 76 | identity 77 | 78 | [lxc:children] 79 | dominion 80 | honor 81 | endowment 82 | autonomy 83 | cultivation 84 | preservation 85 | omada 86 | investiture 87 | invention 88 | identity 89 | 90 | [desktop:children] 91 | design 92 | hoid 93 | 94 | [ubuntu:children] 95 | dominion 96 | honor 97 | endowment 98 | autonomy 99 | cultivation 100 | preservation 101 | omada 102 | investiture 103 | invention 104 | virtuosity 105 | identity 106 | 107 | #[arch:children] 108 | 109 | [fedora:children] 110 | hoid 111 | design -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | export PATH := justfile_directory() + "/env/bin:" + env_var("PATH") 2 | 3 | # Recipes 4 | @default: 5 | just --list 6 | 7 | ### Run/Builds 8 | build_adonalsium: 9 | ansible-playbook -u root -b run.yml --limit adonalsium --ask-pass 10 | 11 | build_ishap: 12 | ansible-playbook -u root -b run.yml --limit ishap --ask-pass 13 | 14 | build_virtuosity: 15 | ansible-playbook -u root -b run.yml --limit virtuosity --ask-pass --ask-become-pass 16 | 17 | build +HOST: 18 | ansible-playbook -b run.yml --limit {{ HOST }} 19 | 20 | ### Updates 21 | update: 22 | ansible-playbook update.yml 23 | 24 | docker: 25 | ansible-playbook docker.yml 26 | 27 | test: 28 | ansible-playbook -b test.yml 29 | 30 | ### Vault 31 | decrypt: 32 | ansible-vault decrypt vars/vault.yaml 33 | 34 | encrypt: 35 | ansible-vault encrypt vars/vault.yaml 36 | 37 | ### Lint 38 | yamllint: 39 | yamllint -s . 40 | 41 | ansible-lint: yamllint 42 | #!/usr/bin/env bash 43 | ansible-lint . 44 | ansible-playbook run.yml update.yml bootstrap.yml docker.yml --syntax-check 45 | 46 | ### Bootstrap/Setup 47 | bootstrap_lxc: 48 | ansible-playbook -b bootstrap.yml --limit lxc ambition 49 | 50 | bootstrap +HOST: 51 | ansible-playbook -b bootstrap.yml --limit {{ HOST }} --ask-pass --ask-become-pass 52 | 53 | install: 54 | @./prereqs.sh 55 | @echo "Ansible Vault pre-hook script setup and vault password set" 56 | 57 | ### Git 58 | # git submodule - repo URL + optional local folder name 59 | add-submodule URL *NAME: 60 | #!/usr/bin/env sh 61 | if [ -z "{{NAME}}" ]; then 62 | # Extract repo name from URL if no name provided 63 | basename=$(basename "{{URL}}" .git) 64 | git submodule add {{URL}} "roles/${basename}" 65 | git submodule update --init --recursive 66 | git add .gitmodules "roles/${basename}" 67 | git commit -m "Adds ${basename} as a submodule" 68 | else 69 | git submodule add {{URL}} "roles/{{NAME}}" 70 | git submodule update --init --recursive 71 | git add .gitmodules "roles/{{NAME}}" 72 | git commit -m "Adds {{NAME}} as a submodule" 73 | fi 74 | -------------------------------------------------------------------------------- /prereqs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ansible-galaxy role install -r requirements.yaml 4 | echo "Ansible Galaxy roles installed" 5 | ansible-galaxy collection install -r requirements.yaml 6 | ansible-galaxy collection install community-general 7 | echo "Ansible Galaxy collections installed" 8 | ./git-init.sh 9 | echo "Ansible vault git pre-commit hook installed" 10 | 11 | read -p "Add Ansible Vault Password: " pass 12 | if [ -d / ]; then 13 | rm .vault-password 14 | cat <> .vault-password 15 | $pass 16 | EOT 17 | 18 | fi 19 | 20 | chmod 0600 .vault-password -------------------------------------------------------------------------------- /requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | - src: ironicbadger.snapraid 4 | - src: caddy_ansible.caddy_ansible 5 | - src: grog.package 6 | - src: geerlingguy.docker 7 | - src: geerlingguy.ntp 8 | - src: geerlingguy.nfs 9 | - src: geerlingguy.pip 10 | - src: geerlingguy.samba 11 | 12 | collections: 13 | - community.docker 14 | - ansible.posix 15 | -------------------------------------------------------------------------------- /roles/adonalsium/files/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Mount HDD 3 | mount /dev/disk/by-id/ata-WDC_WD140EDGZ-11B1PA0_9MGWV2JJ-part1 /media/external_hdd 4 | mount /mnt/local_backup 5 | 6 | # Wait for mount 7 | sleep 10s 8 | 9 | # rsync script 10 | rsync -Aavx --info=progress2 --delete --delete-excluded --exclude '.*' --exclude '.trickplay' --exclude 'TV Shows' --exclude 'Parents *' --exclud 'UHD' --exclude 'Frigate' --exclude 'Immich' /mnt/Media/ /media/external_hdd/Media 11 | rsync -Aavx --info=progress2 --delete --delete-excluded --exclude '.*' --exclude '.snapraid.content' /mnt/local_backup/ /media/external_hdd/Backup 12 | 13 | # Wait for dismount 14 | sleep 10s 15 | 16 | # Unmount HDD 17 | umount /media/external_hdd 18 | umount /mnt/local_backup 19 | echo "Backup Complete!" 20 | -------------------------------------------------------------------------------- /roles/adonalsium/files/etc/issue: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------ 2 | 3 | Welcome to the Proxmox Virtual Environment. Please use your web browser to 4 | configure this server - connect to: 5 | 6 | https://\4{vmbr0}:8006/ 7 | 8 | ------------------------------------------------------------------------------ -------------------------------------------------------------------------------- /roles/adonalsium/files/trim.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Trimming Honor" 4 | sudo pct fstrim 201 5 | 6 | echo "Trimming Invention" 7 | sudo pct fstrim 210 8 | 9 | echo "Trimming Endowment" 10 | sudo pct fstrim 250 11 | 12 | echo "Trimming Autonomy" 13 | sudo pct fstrim 251 14 | 15 | echo "Trimming Cultivation" 16 | sudo pct fstrim 252 17 | 18 | echo "Trimming Preservation" 19 | sudo pct fstrim 253 20 | -------------------------------------------------------------------------------- /roles/adonalsium/tasks/disks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: make sure disks unmounted 4 | mount: 5 | path: "{{ item.path }}" 6 | state: unmounted 7 | loop: "{{ data_disks + parity_disks + nfs_mount }}" 8 | 9 | - name: create /mnt points 10 | file: 11 | dest: "{{ item.path }}" 12 | state: directory 13 | owner: nobody 14 | group: nogroup 15 | mode: 0777 16 | loop: "{{ mergerfs_mount + data_disks + parity_disks + nfs_mount + external_mount }}" 17 | 18 | - name: mount disks 19 | mount: 20 | path: "{{ item.path }}" 21 | src: "{{ item.source }}" 22 | fstype: "{{ item.fs }}" 23 | opts: "{{ item.opts }}" 24 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 25 | state: mounted 26 | loop: "{{ data_disks + parity_disks }}" 27 | 28 | - name: NFS mounts to fstab 29 | mount: 30 | path: "{{ item.path }}" 31 | src: "{{ item.source }}" 32 | fstype: "{{ item.fs }}" 33 | opts: "{{ item.opts }}" 34 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 35 | state: present 36 | loop: "{{ nfs_mount }}" 37 | 38 | - name: mount mergerfs array 39 | mount: 40 | path: "{{ item.mountpoint }}" 41 | src: "{{ item.source }}" 42 | opts: "{{ item.opts }}" 43 | fstype: "{{ item.fs }}" 44 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 45 | state: mounted 46 | loop: "{{ fstab_mergerfs }}" 47 | -------------------------------------------------------------------------------- /roles/adonalsium/tasks/infrastructure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### Create LXCs 3 | 4 | - name: Update pveam 5 | command: pveam update 6 | 7 | - name: Download container templates 8 | command: pveam download NFS {{ item }} 9 | loop: 10 | - ubuntu-22.04-standard_22.04-1_amd64.tar.zst 11 | - debian-12-standard_12.2-1_amd64.tar.zst.tmp 12 | 13 | - name: Create LXCs w/ VLAN Tags 14 | proxmox: 15 | vmid: "{{ item.vmid }}" 16 | hostname: "{{ item.name }}" 17 | unprivileged: "{{ item.privileged }}" 18 | onboot: "{{ item.onboot | default ('true') }}" 19 | state: present 20 | node: adonalsium 21 | storage: local-lvm 22 | disk: "{{ item.disk }}" 23 | cpus: '1' 24 | cpuunits: '1000' 25 | cores: "{{ item.cores }}" 26 | memory: "{{ item.memory }}" 27 | swap: "{{ item.swap | default ('512') }}" 28 | api_user: root@pam 29 | api_host: localhost 30 | api_token_id: Ansible 31 | api_token_secret: "{{ secret_proxmox_api_token }}" 32 | pubkey: "{{ secret_proxmox_ct_ssh }}" 33 | password: "{{ secret_proxmox_pass }}" 34 | netif: "{'net0':'name=eth0,gw={{ item.gw }},ip={{ item.ip }}/24,tag={{ item.tag | default ('50') }},bridge=vmbr1'}" 35 | ostemplate: "{{ item.template| default ('NFS:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst') }}" 36 | features: 37 | - nesting={{ item.nesting | default ('1') }} 38 | # - keyctl={{ item.keyctl | default ('0') }} 39 | loop: "{{ lxc_vlans }}" 40 | 41 | - name: Create LXC CT w/ Mounts 42 | proxmox: 43 | vmid: "{{ item.vmid }}" 44 | hostname: "{{ item.name }}" 45 | unprivileged: "{{ item.privileged }}" 46 | onboot: "{{ item.onboot | default ('true') }}" 47 | state: present 48 | node: adonalsium 49 | storage: local-lvm 50 | disk: "{{ item.disk }}" 51 | cpus: '1' 52 | cpuunits: '1000' 53 | cores: "{{ item.cores }}" 54 | memory: "{{ item.memory }}" 55 | swap: "{{ item.swap | default ('512') }}" 56 | api_user: root@pam 57 | api_host: localhost 58 | api_token_id: Ansible 59 | api_token_secret: "{{ secret_proxmox_api_token }}" 60 | pubkey: "{{ secret_proxmox_ct_ssh }}" 61 | password: "{{ secret_proxmox_pass }}" 62 | netif: "{'net0':'name=eth0,gw={{ item.gw }},ip={{ item.ip }}/24,tag={{ item.tag | default ('50') }},bridge=vmbr1'}" 63 | ostemplate: "{{ item.template| default ('local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.gz') }}" 64 | mounts: "{'mp0':'/dev/disk/by-id/ata-KINGSTON_SA400S37480G_50026B7785A010DF-part1,mp=/mnt/scratch,backup=0'}" 65 | features: 66 | - nesting={{ item.nesting | default ('0') }} 67 | - keyctl={{ item.keyctl | default ('0') }} 68 | - mount={{ item.fsmount }} 69 | loop: "{{ lxc_mounts }}" 70 | 71 | - name: Create VM with UEFI Secure Boot Enabled 72 | community.general.proxmox_kvm: 73 | api_user: root@pam 74 | api_host: localhost 75 | api_token_id: Ansible 76 | api_token_secret: "{{ secret_proxmox_api_token }}" 77 | name: "{{ item.name }}" 78 | node: adonalsium 79 | vmid: "{{ item.vmid }}" 80 | onboot: "{{ item.onboot | default ('false') }}" 81 | net: 82 | net0: "virtio,bridge={{ item.bridge | default ('vmbr1') }},tag={{ item.tag | default ('50') }},firewall=1" 83 | cores: "{{ item.cores }}" 84 | cpu: "{{ item.cpu_type | default ('kvm64') }}" 85 | memory: "{{ item.memory }}" 86 | balloon: "{{ item.balloon }}" 87 | bios: "{{ item.bios | default ('seabios') }}" 88 | scsihw: virtio-scsi-single 89 | scsi: 90 | scsi0: "{{ item.disk }},ssd=1,iothread=1" 91 | format: qcow2 92 | ide: 93 | ide2: "NFS:iso/{{ item.iso | default ('ubuntu-24.04-live-server-amd64.iso') }},media=cdrom" 94 | efidisk0: 95 | storage: local-lvm 96 | format: raw 97 | efitype: 4m 98 | pre_enrolled_keys: 1 99 | loop: "{{ vm_uefi }}" 100 | 101 | - name: Set boot order 102 | shell: pct set {{ item.lxc }} --startup order={{ item.order }} 103 | loop: 104 | - {"lxc":"200", "order":"5"} 105 | - {"lxc":"201", "order":"1"} 106 | - {"lxc":"202", "order":"4"} 107 | - {"lxc":"203", "order":"5"} 108 | - {"lxc":"211", "order":"3"} 109 | - {"lxc":"250", "order":"3"} 110 | - {"lxc":"251", "order":"3"} 111 | - {"lxc":"252", "order":"3"} 112 | - {"lxc":"253", "order":"2"} 113 | - {"lxc":"211", "order":"3"} 114 | 115 | - name: Endowment - Add iGPU passthrough/disable apparmor 116 | lineinfile: 117 | dest: "/etc/pve/lxc/250.conf" 118 | insertafter: EOF 119 | line: | 120 | lxc.cgroup2.devices.allow: c 226:0 rwm 121 | lxc.cgroup2.devices.allow: c 226:128 rwm 122 | lxc.cgroup2.devices.allow: c 29:0 rwm 123 | lxc.mount.entry: /dev/dri dev/dri none bind,optional,create=dir 124 | lxc.mount.entry: /dev/fb0 dev/fb0 none bind,optional,create=file 125 | lxc.apparmor.profile: unconfined 126 | lxc.cgroup2.devices.allow: a 127 | lxc.cap.drop: 128 | unsafe_writes: yes 129 | 130 | - name: Autonomy - Add USB devices 131 | lineinfile: 132 | dest: "/etc/pve/lxc/251.conf" 133 | insertafter: EOF 134 | line: | 135 | lxc.apparmor.profile: unconfined 136 | lxc.cgroup2.devices.allow: a 137 | lxc.cap.drop: 138 | unsafe_writes: yes 139 | -------------------------------------------------------------------------------- /roles/adonalsium/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ### Basic Setup 4 | - name: Ensure user groups exists 5 | group: 6 | name: "{{ item }}" 7 | state: present 8 | loop: 9 | - "{{ main_username }}" 10 | - ansible 11 | 12 | - name: Add users 13 | user: 14 | name: "{{ item.user }}" 15 | password: "{{ item.pass }}" 16 | groups: 17 | - "{{ item.user }}" 18 | - sudo 19 | shell: /bin/bash 20 | loop: 21 | - {user: "{{ main_username }}", pass: "{{ secret_main_user_pass }}"} 22 | - {user: ansible, pass: "{{ secret_ansible_pass }}"} 23 | 24 | - name: Add samba users 25 | user: 26 | name: samba 27 | password: "!" 28 | create_home: no 29 | 30 | - name: Add samba user password 31 | shell: "printf '{{ secret_samba_pass }}\n{{ secret_samba_pass }}\n' | smbpasswd -a samba" 32 | 33 | - name: Add sudoers file for ansible 34 | copy: 35 | src: sudoer_ansible 36 | dest: /etc/sudoers.d/ansible 37 | owner: root 38 | group: root 39 | mode: 0440 40 | 41 | - name: SSH Keys 42 | authorized_key: 43 | user: "{{ item.user }}" 44 | state: present 45 | key: "{{ item.ssh }}" 46 | loop: 47 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_ssh }}"} 48 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_alt_ssh }}"} 49 | - {user: ansible, ssh: "{{ secret_ansible_ssh }}"} 50 | 51 | - name: create main user directories 52 | file: 53 | path: /home/{{ main_username }}/{{ item }} 54 | state: directory 55 | owner: "{{ main_username }}" 56 | group: "{{ main_groupname }}" 57 | loop: 58 | - docker 59 | - docker/scrutiny 60 | 61 | - name: Copy scripts 62 | copy: 63 | src: "{{ item.src }}" 64 | dest: "{{ item.dest }}" 65 | mode: +x 66 | loop: 67 | - {src: "backup.sh", dest: "/home/{{ main_username }}/backup.sh"} 68 | - {src: "trim.sh", dest: "/home/{{ main_username }}/trim.sh"} 69 | 70 | # Source: https://www.reddit.com/r/Proxmox/comments/118i6ct/tutorialguide_how_to_make_the_prelogin_banner/ 71 | - name: Copy issue file to adjust displayer IP address 72 | copy: 73 | src: etc/issue 74 | dest: /etc/issue 75 | owner: root 76 | group: root 77 | mode: 0644 78 | 79 | ### Infrastructure 80 | - name: Set up disks/mounts 81 | include_tasks: disks.yml 82 | 83 | - name: Set up containers 84 | include_tasks: infrastructure.yml 85 | when: infrastructure == true 86 | 87 | - name: Install samba config 88 | template: 89 | src: smb.conf.j2 90 | dest: /etc/samba/smb.conf 91 | owner: root 92 | group: root 93 | - name: Restart samba 94 | service: 95 | name: smbd 96 | state: restarted 97 | 98 | ### Backup Restore 99 | - name: Create restore script 100 | copy: 101 | dest: /home/{{ main_username }}/restore.sh 102 | owner: "{{ main_username }}" 103 | group: "{{ main_username }}" 104 | mode: +x 105 | content: | 106 | #!/bin/bash 107 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 108 | when: pull_backup == true 109 | - name: Run restore script 110 | command: /bin/bash ./restore.sh 111 | args: 112 | chdir: "/home/{{ main_username }}" 113 | when: pull_backup == true 114 | - name: Remove restore script 115 | file: 116 | path: /home/{{ main_username }}/restore.sh 117 | state: absent 118 | when: pull_backup == true 119 | 120 | ### Wireguard/Misc 121 | - name: Enable IPv4 forwarding 122 | sysctl: 123 | name: net.ipv4.ip_forward 124 | value: 1 125 | reload: yes 126 | 127 | - name: Reboot Cronjob 128 | cron: 129 | name: "Set /dev/dri to 777" 130 | job: "chmod -R 777 /dev/dri" 131 | user: "root" 132 | special_time: reboot 133 | state: present 134 | 135 | - name: Fix e1000 driver hang 136 | command: ethtool -K eno1 tso off gso off 137 | become: yes 138 | become_user: root 139 | # See https://blog.f2h.cloud/how-to-fix-proxmox-detected-hardware-unit-hang/ and https://forum.proxmox.com/threads/e1000-driver-hang.58284/page-8 140 | # Need to figure out how to added to /etc/network/interfaces to preserve after reboots 141 | -------------------------------------------------------------------------------- /roles/adonalsium/templates/hooks.yaml.j2: -------------------------------------------------------------------------------- 1 | - id: reboot 2 | execute-command: "reboot" 3 | command-working-directory: "/usr/bin" 4 | 5 | - id: start_trell 6 | execute-command: "qm" 7 | command-working-directory: "/usr/sbin" 8 | pass-arguments-to-command: 9 | [{ 10 | "source": "string", 11 | "name": "start" 12 | }, 13 | { 14 | "source": "string", 15 | "name": "300" 16 | }] 17 | 18 | - id: stop_trell 19 | execute-command: "qm" 20 | command-working-directory: "/usr/sbin" 21 | pass-arguments-to-command: 22 | [{ 23 | "source": "string", 24 | "name": "stop" 25 | }, 26 | { 27 | "source": "string", 28 | "name": "300" 29 | }] 30 | 31 | - id: start_invention 32 | execute-command: "pct" 33 | command-working-directory: "/usr/sbin" 34 | pass-arguments-to-command: 35 | [{ 36 | "source": "string", 37 | "name": "start" 38 | }, 39 | { 40 | "source": "string", 41 | "name": "210" 42 | }] 43 | 44 | - id: stop_invention 45 | execute-command: "pct" 46 | command-working-directory: "/usr/sbin" 47 | pass-arguments-to-command: 48 | [{ 49 | "source": "string", 50 | "name": "stop" 51 | }, 52 | { 53 | "source": "string", 54 | "name": "210" 55 | }] 56 | -------------------------------------------------------------------------------- /roles/adonalsium/templates/smb.conf.j2: -------------------------------------------------------------------------------- 1 | ## adonalsium samba configuration 2 | 3 | [global] 4 | server min protocol = SMB2_02 5 | workgroup = cosmere 6 | server string = adonalsium 7 | security = user 8 | guest ok = no 9 | map to guest = bad user 10 | 11 | log file = /var/log/samba/%m.log 12 | max log size = 50 13 | printcap name = /dev/null 14 | load printers = no 15 | 16 | [media] 17 | comment = Media on adonalsium 18 | public = yes 19 | writeable = yes 20 | path = /mnt/Media 21 | valid users = samba 22 | force user = {{ main_username }} 23 | force group = {{ main_username }} -------------------------------------------------------------------------------- /roles/ambition/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wireguard service 3 | service: 4 | name: wg-quick@wg0 5 | enabled: 'true' 6 | state: restarted 7 | 8 | - name: start wireguard service 9 | service: 10 | name: wg-quick@wg0 11 | enabled: 'true' 12 | state: started 13 | -------------------------------------------------------------------------------- /roles/ambition/templates/certs.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #/usr/local/bin/lego --email="fuzzy@fuzzymistborn.com" --domains="*.fuzzymistborn.com" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --accept-tos --path /home/{{ main_username }}/lego run 4 | #/usr/local/bin/lego --email="fuzzy@fuzzymistborn.com" --domains="*.ci.fuzzymistborn.com" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --accept-tos --path /home/{{ main_username }}/lego run 5 | #/usr/local/bin/lego --email="{{ secret_new_email }}" --domains="*.{{ secret_personal_url }}" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --accept-tos --path /home/{{ main_username }}/lego run 6 | #chown {{ main_username }}:user -R /home/{{ main_username }}/lego 7 | #chmod -R 0664 /home/{{ main_username }}/lego 8 | 9 | /usr/local/bin/lego --email="fuzzy@fuzzymistborn.com" --domains="*.fuzzymistborn.com" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --path /home/{{ main_username }}/lego renew --days 45 10 | /usr/local/bin/lego --email='fuzzy@fuzzymistborn.com' --domains="*.ci.fuzzymistborn.com" --dns='hetzner' --dns.resolvers="9.9.9.9:53" --path /home/{{ main_username }}/lego renew --days 45 11 | /usr/local/bin/lego --email="{{ secret_new_email }}" --domains="*.{{ secret_personal_url }}" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --path /home/{{ main_username }}/lego renew --days 45 12 | 13 | /usr/bin/systemctl daemon-reload 14 | 15 | /usr/bin/systemctl restart caddy 16 | 17 | sleep 10s -------------------------------------------------------------------------------- /roles/ambition/templates/db_backup.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #### Gitea DB Backup 4 | echo Backing up Gitea DB 5 | today=$(date -d "$date" +"%Y_%m_%d") 6 | docker exec gitea-db sh -c 'exec mariadb-dump --single-transaction -h localhost -u gitea -p{{ secret_gitea_db_pass }} gitea' > /home/{{ main_username }}/db_backups/gitea/giteaDB_$today.sql 7 | 8 | #### Umami DB Backup 9 | echo Backing up Umami DB 10 | today=$(date +"%Y_%m_%d") 11 | docker exec umami-db sh -c 'exec mariadb-dump --single-transaction -h localhost -u umami -p{{ secret_umami_db_pass }} umami' > /home/{{ main_username }}/db_backups/umami/umamiDB_$today.sql 12 | 13 | find /home/{{ main_username }}/db_backups/* -mtime +6 -type f -delete 14 | -------------------------------------------------------------------------------- /roles/ambition/templates/db_restore.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /home/{{ main_username }} 4 | docker compose up -d gitea-db umami-db 5 | 6 | sleep 30s 7 | echo Restoring Gitea DB 8 | cd /home/{{ main_username }}/db_backups/gitea 9 | LAST_GITEA=$(ls -t | head -n 1) 10 | docker exec -i gitea-db mariadb -u gitea -p{{ secret_gitea_db_pass }} gitea < /home/{{ main_username }}/db_backups/gitea/$LAST_GITEA 11 | 12 | echo Restoring Umami DB 13 | cd /home/{{ main_username }}/db_backups/umami 14 | LAST_UMAMI=$(ls -t | head -n 1) 15 | docker exec -i umami-db mariadb -u umami -p{{ secret_umami_db_pass }} umami < /home/{{ main_username }}/db_backups/umami/$LAST_UMAMI -------------------------------------------------------------------------------- /roles/autonomy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create config directories 3 | file: 4 | path: /home/{{ main_username }}/{{ item }} 5 | state: directory 6 | owner: "{{ main_username }}" 7 | group: "{{ main_groupname }}" 8 | loop: 9 | - .config/catt 10 | - docker 11 | - scripts 12 | 13 | ### SSH 14 | - name: SSH Keys 15 | authorized_key: 16 | user: "{{ item.user }}" 17 | state: present 18 | key: "{{ item.ssh }}" 19 | loop: 20 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_code_server_ssh }}"} 21 | 22 | #### Scripts 23 | 24 | - name: Generate weather cast bash file 25 | copy: 26 | dest: /home/{{ main_username }}/scripts/cast_weather.sh 27 | owner: "{{ main_username }}" 28 | group: "{{ main_username }}" 29 | mode: +x 30 | content: "{{ secret_catt_weather }}" 31 | 32 | - name: Create traffic cast bash file 33 | copy: 34 | dest: /home/{{ main_username }}/scripts/cast_traffic.sh 35 | owner: "{{ main_username }}" 36 | group: "{{ main_username }}" 37 | mode: +x 38 | content: "{{ secret_catt_traffic }}" 39 | 40 | - name: Copy script templates 41 | template: 42 | src: "{{ item.src }}" 43 | dest: /home/{{ main_username }}/scripts/{{ item.dest }} 44 | owner: "{{ main_username }}" 45 | group: "{{ main_username }}" 46 | mode: +x 47 | loop: 48 | - {src: 'cast_frontdoor.sh.j2', dest: 'cast_frontdoor.sh'} 49 | - {src: 'docker.sh.j2', dest: 'docker.sh'} 50 | - {src: 'update_hass.sh.j2', dest: 'update_hass.sh'} 51 | - {src: 'google_traffic.py.j2', dest: 'google_traffic.py'} 52 | - {src: 'waze_daycare.py.j2', dest: 'waze_daycare.py'} 53 | - {src: 'waze_home.py.j2', dest: 'waze_home.py'} 54 | - {src: 'waze_live.py.j2', dest: 'waze_live.py'} 55 | 56 | #### HASS Git 57 | 58 | - name: Clone HASS GitHub Repo 59 | git: 60 | repo: git@github.com:FuzzyMistborn/hass-docker.git 61 | dest: /home/{{ main_username }}/hass_docker 62 | accept_hostkey: true 63 | key_file: /home/{{ main_username }}/.ssh/github 64 | when: clone_git == true 65 | 66 | - name: Clone NodeRed GitHub Repo 67 | git: 68 | repo: git@github.com:FuzzyMistborn/hass-nodered.git 69 | dest: /home/{{ main_username }}/docker/nodered 70 | accept_hostkey: true 71 | key_file: /home/{{ main_username }}/.ssh/github 72 | when: clone_git == true 73 | 74 | ### Backup Restore 75 | - name: Create restore script 76 | copy: 77 | dest: /home/{{ main_username }}/restore.sh 78 | owner: "{{ main_username }}" 79 | group: "{{ main_username }}" 80 | mode: +x 81 | content: | 82 | #!/bin/bash 83 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 84 | /usr/local/bin/autorestic restore -f -l hass_docker --from local_hass_docker --to / 85 | /usr/local/bin/autorestic restore -f -l hass_docker_beta --from local_hass_docker_beta --to / 86 | when: pull_backup == true 87 | - name: Run restore script 88 | command: /bin/bash ./restore.sh 89 | args: 90 | chdir: "/home/{{ main_username }}" 91 | when: pull_backup == true 92 | - name: Remove restore script 93 | file: 94 | path: /home/{{ main_username }}/restore.sh 95 | state: absent 96 | when: pull_backup == true 97 | 98 | ### Pip Virtualenv 99 | - name: Install selenium/splinter 100 | ansible.builtin.pip: 101 | name: 102 | - splinter 103 | - selenium 104 | virtualenv: .venv 105 | -------------------------------------------------------------------------------- /roles/autonomy/templates/cast_frontdoor.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /home/{{ main_username }}/.local/bin/catt -d 192.168.30.70 cast_site "http://192.168.1.10:5000/api/frontdoor?fps=15&height=1080" 3 | 4 | sleep 45s 5 | 6 | /home/{{ main_username }}/.local/bin/catt -d 192.168.30.70 stop -------------------------------------------------------------------------------- /roles/autonomy/templates/docker.sh.j2: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | nextcloud_docker=$(/usr/local/bin/skopeo inspect docker://docker.io/nextcloud:latest | grep -o 'NEXTCLOUD_VERSION=[0-9]*.[0-9]*.[0-9]*' | cut -c 19-30) 4 | #hass_docker_beta=$(/usr/local/bin/skopeo inspect docker://docker.io/homeassistant/home-assistant:beta | grep -ioE '"io.hass.version": "[0-9]*.[0-9]*.0b[0-9]*"' | grep -o "\b[0-9]*.[0-9]*.0b[0-9]" 5 | 6 | mosquitto_pub -h 192.168.30.5 -u homeassistant -P {{ secret_mqtt_pass }} -t dockertags/Nextcloud --retain -m "$nextcloud_docker" 7 | #mosquitto_pub -h 192.168.30.5 -u homeassistant -P {{ secret_mqtt_pass }} -t dockertags/HomeAssistantBeta --retain -m "$hass_docker_beta" -------------------------------------------------------------------------------- /roles/autonomy/templates/google_traffic.py.j2: -------------------------------------------------------------------------------- 1 | from splinter import Browser 2 | 3 | browser = Browser(driver_name='remote', 4 | command_executor='http://localhost:4444/wd/hub', 5 | browser='firefox', 6 | wait_time=20 7 | ) 8 | browser.visit("https://traffic.{{ secret_personal_internal_url }}") 9 | 10 | # Solution to fix "wait" which is broken 11 | browser.find_by_css('Test') 12 | 13 | browser.screenshot("/home/{{ main_username }}/hass/tmp/google_traffic", full=True, unique_file=False) 14 | 15 | browser.quit() -------------------------------------------------------------------------------- /roles/autonomy/templates/update_hass.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /home/{{ main_username }} 4 | docker compose pull hass 5 | docker compose up -d hass 6 | docker image prune -a -f -------------------------------------------------------------------------------- /roles/autonomy/templates/waze_daycare.py.j2: -------------------------------------------------------------------------------- 1 | from splinter import Browser 2 | 3 | browser = Browser(driver_name='remote', 4 | command_executor='http://localhost:4444/wd/hub', 5 | browser='firefox', 6 | wait_time=20 7 | ) 8 | browser.visit("{{ secret_waze_daycare }}") 9 | 10 | browser.is_text_present("Arrive") 11 | browser.find_by_text('Got it').click() 12 | browser.find_by_text('Got it').click() 13 | browser.find_by_xpath("/html/body/div[1]/div[2]/button").click() 14 | 15 | browser.screenshot("/home/{{ main_username }}/hass/tmp/waze_daycare", full=True, unique_file=False) 16 | 17 | browser.quit() -------------------------------------------------------------------------------- /roles/autonomy/templates/waze_home.py.j2: -------------------------------------------------------------------------------- 1 | from splinter import Browser 2 | 3 | browser = Browser(driver_name='remote', 4 | command_executor='http://localhost:4444/wd/hub', 5 | browser='firefox', 6 | wait_time=20 7 | ) 8 | browser.visit("{{ secret_waze_home }}") 9 | 10 | browser.is_text_present("Arrive") 11 | browser.find_by_text('Got it').click() 12 | browser.find_by_text('Got it').click() 13 | browser.find_by_xpath("/html/body/div[1]/div[2]/button").click() 14 | 15 | browser.screenshot("/home/{{ main_username }}/hass/tmp/waze_home", full=True, unique_file=False) 16 | 17 | browser.quit() -------------------------------------------------------------------------------- /roles/autonomy/templates/waze_live.py.j2: -------------------------------------------------------------------------------- 1 | from splinter import Browser 2 | import time 3 | 4 | browser = Browser(driver_name='remote', 5 | command_executor='http://localhost:4444/wd/hub', 6 | browser='firefox', 7 | wait_time=20 8 | ) 9 | browser.visit("{{ secret_waze_live }}") 10 | 11 | # Hack to wait for JavaScript to fully load 12 | time.sleep(10) 13 | 14 | browser.screenshot("/home/{{ main_username }}/hass/tmp/waze_live", full=True, unique_file=False) 15 | 16 | browser.quit() -------------------------------------------------------------------------------- /roles/bash_alias/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | bash_user: "{{ main_username }}" 3 | bash_aliases_path: "/home/{{ bash_user }}/.bash_aliases" 4 | bash_docker: false 5 | bash_autorestic: false 6 | bash_systemctl: false 7 | bash_apt: false 8 | bash_git: false 9 | bash_pacman: false 10 | bash_yay: false 11 | bash_dnf: false 12 | -------------------------------------------------------------------------------- /roles/bash_alias/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Bash Alias | Add general aliases 3 | lineinfile: 4 | dest: "{{ bash_aliases_path }}" 5 | create: yes 6 | owner: "{{ bash_user }}" 7 | group: "{{ bash_user }}" 8 | mode: 0644 9 | line: "alias {{ item.alias }}='{{ item.command }}'" 10 | regexp: "^alias {{ item.alias }}=" 11 | loop: "{{ bash_aliases }}" 12 | when: bash_aliases is defined 13 | 14 | - name: Bash Alias | Add docker aliases 15 | lineinfile: 16 | dest: "{{ bash_aliases_path }}" 17 | create: yes 18 | owner: "{{ bash_user }}" 19 | group: "{{ bash_user }}" 20 | mode: 0644 21 | line: "alias {{ item.alias }}='{{ item.command }}'" 22 | regexp: "^alias {{ item.alias }}=" 23 | loop: "{{ bash_docker_aliases }}" 24 | when: bash_docker != false 25 | 26 | - name: Bash Alias | Add systemctl aliases 27 | lineinfile: 28 | dest: "{{ bash_aliases_path }}" 29 | create: yes 30 | owner: "{{ bash_user }}" 31 | group: "{{ bash_user }}" 32 | mode: 0644 33 | line: "alias {{ item.alias }}='{{ item.command }}'" 34 | regexp: "^alias {{ item.alias }}=" 35 | loop: "{{ bash_systemctl_aliases }}" 36 | when: bash_systemctl != false 37 | 38 | - name: Bash Alias | Add apt aliases 39 | lineinfile: 40 | dest: "{{ bash_aliases_path }}" 41 | create: yes 42 | owner: "{{ bash_user }}" 43 | group: "{{ bash_user }}" 44 | mode: 0644 45 | line: "alias {{ item.alias }}='{{ item.command }}'" 46 | regexp: "^alias {{ item.alias }}=" 47 | loop: "{{ bash_apt_aliases }}" 48 | when: bash_apt != false 49 | 50 | - name: Bash Alias | Add git aliases 51 | lineinfile: 52 | dest: "{{ bash_aliases_path }}" 53 | create: yes 54 | owner: "{{ bash_user }}" 55 | group: "{{ bash_user }}" 56 | mode: 0644 57 | line: "alias {{ item.alias }}='{{ item.command }}'" 58 | regexp: "^alias {{ item.alias }}=" 59 | loop: "{{ bash_git_aliases }}" 60 | when: bash_git != false 61 | 62 | - name: Bash Alias | Add autorestic aliases 63 | lineinfile: 64 | dest: "{{ bash_aliases_path }}" 65 | create: yes 66 | owner: "{{ bash_user }}" 67 | group: "{{ bash_user }}" 68 | mode: 0644 69 | line: "alias {{ item.alias }}='{{ item.command }}'" 70 | regexp: "^alias {{ item.alias }}=" 71 | loop: "{{ bash_autorestic_aliases }}" 72 | when: bash_autorestic != false 73 | 74 | - name: Bash Alias | Add pacman aliases 75 | lineinfile: 76 | dest: "{{ bash_aliases_path }}" 77 | create: yes 78 | owner: "{{ bash_user }}" 79 | group: "{{ bash_user }}" 80 | mode: 0644 81 | line: "alias {{ item.alias }}='{{ item.command }}'" 82 | regexp: "^alias {{ item.alias }}=" 83 | loop: "{{ bash_pacman_aliases }}" 84 | when: bash_pacman != false 85 | 86 | - name: Bash Alias | Add yay aliases 87 | lineinfile: 88 | dest: "{{ bash_aliases_path }}" 89 | create: yes 90 | owner: "{{ bash_user }}" 91 | group: "{{ bash_user }}" 92 | mode: 0644 93 | line: "alias {{ item.alias }}='{{ item.command }}'" 94 | regexp: "^alias {{ item.alias }}=" 95 | loop: "{{ bash_yay_aliases }}" 96 | when: bash_yay != false 97 | 98 | - name: Bash Alias | Add dnf aliases 99 | lineinfile: 100 | dest: "{{ bash_aliases_path }}" 101 | create: yes 102 | owner: "{{ bash_user }}" 103 | group: "{{ bash_user }}" 104 | mode: 0644 105 | line: "alias {{ item.alias }}='{{ item.command }}'" 106 | regexp: "^alias {{ item.alias }}=" 107 | loop: "{{ bash_dnf_aliases }}" 108 | when: bash_dnf != false 109 | -------------------------------------------------------------------------------- /roles/cronjobs/README.md: -------------------------------------------------------------------------------- 1 | # Cronjob Role 2 | 3 | ## Example 4 | cronjobs: 5 | - name: Test 6 | job: echo test 7 | user: fuzzy 8 | minute: 0 9 | hour: */6 10 | day: 1 11 | month: 1 12 | weekday: 1 -------------------------------------------------------------------------------- /roles/cronjobs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Normal Cronjobs 3 | cron: 4 | name: "{{ item.name }}" 5 | job: "{{ item.job }}" 6 | user: "{{ item.user | default ('root') }}" 7 | minute: "{{ item.minute | default ('*') }}" 8 | hour: "{{ item.hour | default ('*') }}" 9 | day: "{{ item.day | default ('*') }}" 10 | month: "{{ item.month | default ('*') }}" 11 | weekday: "{{ item.weekday | default ('*') }}" 12 | state: present 13 | loop: "{{ cronjobs }}" 14 | when: item.special_time is not defined 15 | 16 | - name: Install Special Time Cronjobs 17 | cron: 18 | name: "{{ item.name }}" 19 | job: "{{ item.job }}" 20 | user: "{{ item.user | default ('root') }}" 21 | special_time: "{{ item.special_time }}" 22 | state: present 23 | loop: "{{ cronjobs }}" 24 | when: item.special_time is defined 25 | -------------------------------------------------------------------------------- /roles/cultivation/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create config directories 4 | file: 5 | path: "/home/{{ main_username }}/{{ item }}" 6 | state: directory 7 | owner: "{{ main_username }}" 8 | group: "{{ main_groupname }}" 9 | loop: 10 | - docker 11 | - docker/diun 12 | - scripts 13 | 14 | #### DIUN 15 | - name: Copy DIUN Config Template 16 | template: 17 | src: diun_config.yml.j2 18 | dest: /home/{{ main_username }}/docker/diun/config.yml 19 | owner: "{{ main_username }}" 20 | group: "{{ main_username }}" 21 | 22 | ### Backup Restore 23 | - name: Create restore script 24 | copy: 25 | dest: /home/{{ main_username }}/restore.sh 26 | owner: "{{ main_username }}" 27 | group: "{{ main_username }}" 28 | mode: +x 29 | content: | 30 | #!/bin/bash 31 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 32 | when: pull_backup == true 33 | - name: Run restore script 34 | command: /bin/bash ./restore.sh 35 | args: 36 | chdir: "/home/{{ main_username }}" 37 | when: pull_backup == true 38 | - name: Remove restore script 39 | file: 40 | path: /home/{{ main_username }}/restore.sh 41 | state: absent 42 | when: pull_backup == true 43 | -------------------------------------------------------------------------------- /roles/design/files/WayofKings.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FuzzyMistborn/infra/755bf662dc6c67375ffb5e117fb286b51852bd3b/roles/design/files/WayofKings.jpg -------------------------------------------------------------------------------- /roles/design/files/face: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FuzzyMistborn/infra/755bf662dc6c67375ffb5e117fb286b51852bd3b/roles/design/files/face -------------------------------------------------------------------------------- /roles/design/files/stormlight_arc1_wallpaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FuzzyMistborn/infra/755bf662dc6c67375ffb5e117fb286b51852bd3b/roles/design/files/stormlight_arc1_wallpaper.png -------------------------------------------------------------------------------- /roles/desktop/README.md: -------------------------------------------------------------------------------- 1 | # Desktop Notes 2 | 3 | Things I can't ansible but want to remember: 4 | - Enable SSH 5 | - `sudo systemctl enable --now sshd` 6 | - Wake On Lan 7 | - https://wiki.archlinux.org/title/Wake-on-LAN#Enable_WoL_on_the_network_adapter 8 | - `nmcli con show` and then `nmcli c show "Wired connection 1" | grep 802-3-ethernet.wake-on-lan` 9 | - Finally `nmcli c modify "Wired connection 1" 802-3-ethernet.wake-on-lan magic` -------------------------------------------------------------------------------- /roles/desktop/files/WayofKings.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FuzzyMistborn/infra/755bf662dc6c67375ffb5e117fb286b51852bd3b/roles/desktop/files/WayofKings.jpg -------------------------------------------------------------------------------- /roles/desktop/files/face: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FuzzyMistborn/infra/755bf662dc6c67375ffb5e117fb286b51852bd3b/roles/desktop/files/face -------------------------------------------------------------------------------- /roles/desktop/files/stormlight_arc1_wallpaper_uw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FuzzyMistborn/infra/755bf662dc6c67375ffb5e117fb286b51852bd3b/roles/desktop/files/stormlight_arc1_wallpaper_uw.png -------------------------------------------------------------------------------- /roles/desktop/files/szeth_logid.cfg: -------------------------------------------------------------------------------- 1 | // Location: /etc/logid.cfg 2 | devices: ({ 3 | name: "Wireless Mouse MX Master 3"; 4 | // A lower threshold number makes the wheel switch to free-spin mode 5 | // quicker when scrolling fast. 6 | smartshift: { on: true; threshold: 5; }; 7 | hiresscroll: { hires: true; invert: false; target: false; }; 8 | dpi: 1600; // max=4000 9 | buttons: ( 10 | // Thumb forward button 11 | { cid: 0x56; action = { type: "Gestures"; gestures: ( 12 | // Misc. 13 | { direction: "None"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTALT", "KEY_RIGHT" ];}}, 14 | { direction: "Up"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTSHIFT", "KEY_T" ];}}, 15 | { direction: "Down"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTSHIFT", "KEY_P" ];}}, 16 | { direction: "Left"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_C" ];}}, 17 | { direction: "Right"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_V" ];}} 18 | );};}, 19 | // Thumb back button 20 | { cid: 0x53; action = { type: "Gestures"; gestures: ( 21 | { direction: "None"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTALT", "KEY_LEFT" ];}}, 22 | # { direction: "Up"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTSHIFT", "KEY_LEFTMETA", "KEY_LEFT" ];}}, 23 | # { direction: "Down"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTSHIFT", "KEY_LEFTMETA", "KEY_RIGHT" ];}}, 24 | { direction: "Left"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTALT", "KEY_LEFTSHIFT", "KEY_TAB" ];}}, 25 | { direction: "Right"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTALT", "KEY_TAB" ];}} 26 | );};}, 27 | // Thumb button (Gesture button) 28 | { cid: 0xc3; action = { type: "Gestures"; gestures: ( 29 | // System Control 30 | { direction: "None"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTMETA", "KEY_D" ];}}, 31 | { direction: "Up"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTALT", "KEY_Z" ];}}, 32 | { direction: "Down"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTALT", "KEY_F4" ];}}, 33 | { direction: "Left"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTALT", "KEY_S" ];}}, 34 | { direction: "Right"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTALT", "KEY_H" ];}} 35 | );};}, 36 | // Top button (SmartShift Toggle) 37 | { cid: 0xc4; action = { type: "Gestures"; gestures: ( 38 | // Music Controls 39 | { direction: "None"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_MUTE" ];}}, 40 | { direction: "Up"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_VOLUMEUP" ];}}, 41 | { direction: "Down"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_VOLUMEDOWN" ];}}, 42 | { direction: "Left"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_PLAYPAUSE" ];}}, 43 | { direction: "Right"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_NEXTSONG" ];}} 44 | );};}, 45 | // Clickwheel Button 46 | { cid: 0x52; action = { type: "Gestures"; gestures: ( 47 | // Firefox 48 | { direction: "None"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTSHIFT", "KEY_L" ];}}, 49 | { direction: "Up"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_T" ];}}, 50 | { direction: "Down"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_W" ];}}, 51 | { direction: "Left"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_LEFTSHIFT", "KEY_TAB" ];}}, 52 | { direction: "Right"; mode: "OnRelease"; action = { type: "Keypress"; keys: [ "KEY_LEFTCTRL", "KEY_TAB" ];}} 53 | );};} 54 | ); 55 | }); -------------------------------------------------------------------------------- /roles/desktop/templates/sudoers_main_user.j2: -------------------------------------------------------------------------------- 1 | {{ main_username }} ALL= NOPASSWD: /bin/systemctl,/usr/sbin/grub-reboot -------------------------------------------------------------------------------- /roles/desktop/templates/systemd/restic_backup.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Backup 3 | Wants= restic_backup.timer 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=/home/{{ main_username }} 7 | ExecStartPre=/bin/sleep 20 8 | ExecStart= /usr/local/bin/runitor -uuid {{ desktop_hc_backup_id }} -- /usr/local/bin/autorestic backup -a 9 | EnvironmentFile=/etc/environment 10 | [Install] 11 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/desktop/templates/systemd/restic_backup.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Backup 3 | After=network-online.target 4 | [Timer] 5 | OnCalendar=*-*-* 21:30:00 6 | Persistent=true 7 | [Install] 8 | WantedBy=timers.target 9 | -------------------------------------------------------------------------------- /roles/desktop/templates/systemd/restic_prune.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Prune 3 | Wants= restic_prune.timer 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=/home/{{ main_username }} 7 | ExecStartPre=/bin/sleep 300 8 | ExecStart= /usr/local/bin/runitor -uuid {{ desktop_hc_prune_id }} -- /usr/local/bin/autorestic forget -a -- prune 9 | EnvironmentFile=/etc/environment 10 | [Install] 11 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/desktop/templates/systemd/restic_prune.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Prune 3 | After=network-online.target 4 | [Timer] 5 | OnCalendar=Mon *-*-* 20:30:00 6 | Persistent=true 7 | EnvironmentFile=/etc/environment 8 | [Install] 9 | WantedBy=timers.target -------------------------------------------------------------------------------- /roles/dominion/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clone Ansible GitHub Repo 3 | git: 4 | repo: git@github.com:FuzzyMistborn/infra.git 5 | dest: /home/{{ main_username }}/infra 6 | accept_hostkey: true 7 | key_file: /home/{{ main_username }}/.ssh/github 8 | 9 | - name: Add Ambition SSH Key 10 | copy: 11 | dest: /home/{{ main_username }}/.ssh/ambition 12 | owner: "{{ main_username }}" 13 | group: "{{ main_username }}" 14 | mode: 0600 15 | content: "{{ secret_ambition_ssh }}" 16 | 17 | - name: Add Gitea SSH Key 18 | copy: 19 | dest: /home/{{ main_username }}/.ssh/gitea 20 | owner: "{{ main_username }}" 21 | group: "{{ main_username }}" 22 | mode: 0600 23 | content: "{{ secret_gitea_ssh }}" 24 | 25 | - name: Add ansible SSH Key 26 | copy: 27 | dest: /home/{{ main_username }}/.ssh/ansible_user 28 | owner: "{{ main_username }}" 29 | group: "{{ main_username }}" 30 | mode: 0600 31 | content: "{{ secret_ansible_key }}" 32 | 33 | - name: Add ssh config 34 | copy: 35 | dest: /home/{{ main_username }}/.ssh/config 36 | owner: "{{ main_username }}" 37 | group: "{{ main_username }}" 38 | mode: 0664 39 | content: | 40 | Host github.com 41 | IdentityFile ~/.ssh/github 42 | Host git.fuzzymistborn.com 43 | user git 44 | HostName remote.fuzzymistborn.com 45 | Port 222 46 | IdentityFile ~/.ssh/gitea 47 | Host ambition 48 | HostName {{ secret_ambition_ip }} 49 | IdentityFile ~/.ssh/ambition 50 | 51 | - name: Copy update template script 52 | template: 53 | src: "{{ item.src }}" 54 | dest: /home/{{ main_username }}/{{ item.dest }} 55 | owner: "{{ main_username }}" 56 | group: "{{ main_username }}" 57 | mode: +x 58 | loop: 59 | - {src: 'update.sh.j2', dest: 'update.sh'} 60 | 61 | - name: Create update script 62 | copy: 63 | dest: /home/{{ main_username }}/update.sh 64 | owner: "{{ main_username }}" 65 | group: "{{ main_username }}" 66 | mode: +x 67 | content: | 68 | #!/bin/bash 69 | cd /home/{{ main_username }}/infra 70 | /usr/local/bin/ansible-playbook update.yml 71 | /usr/local/bin/ansible-playbook update.yml --tags "install" 72 | 73 | - name: Set BW CLI version 74 | shell: "bw --version" 75 | changed_when: false 76 | register: bw_ver 77 | 78 | - name: Set BW CLI Config 79 | copy: 80 | dest: "/home/{{ main_username }}/.config/Bitwarden CLI/data.json" 81 | owner: "{{ main_username }}" 82 | group: "{{ main_username }}" 83 | mode: 0644 84 | content: | 85 | { 86 | "installedVersion": "{{ bw_ver.stdout }}", 87 | "environmentUrls": { 88 | "base": "{{ secret_bitwarden_url }}", 89 | "api": null, 90 | "identity": null, 91 | "webVault": null, 92 | "icons": null, 93 | "notifications": null, 94 | "events": null, 95 | "keyConnector": null 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /roles/dominion/templates/update.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sed -i "s|vault_password_file = ./vault.sh|# \0|" /home/{{ main_username }}/infra/ansible.cfg 4 | echo "{{ secret_vault_pw }}" > /tmp/vault.txt 5 | 6 | cd /home/{{ main_username }}/infra 7 | /usr/local/bin/ansible-playbook --vault-password-file /tmp/vault.txt update.yml 8 | /usr/local/bin/ansible-playbook --vault-password-file /tmp/vault.txt update.yml --tags "install" 9 | 10 | sed -i "s|# \(vault_password_file = ./vault.sh\)|\1|" /home/{{ main_username }}/infra/ansible.cfg 11 | rm /tmp/vault.txt -------------------------------------------------------------------------------- /roles/endowment/files/nextcloud_cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker exec --user www-data nextcloud php cron.php -------------------------------------------------------------------------------- /roles/endowment/tasks/disks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: make sure disks unmounted 4 | mount: 5 | path: "{{ item.path }}" 6 | state: unmounted 7 | loop: "{{ smb_mountpoints }}" 8 | 9 | - name: create /mnt points 10 | file: 11 | dest: "{{ item.path }}" 12 | state: directory 13 | owner: nobody 14 | group: nogroup 15 | mode: 0777 16 | loop: "{{ smb_mountpoints }}" 17 | 18 | - name: Add smbcredentials 19 | copy: 20 | dest: /home/{{ main_username }}/.smbcredentials 21 | owner: "{{ main_username }}" 22 | group: "{{ main_username }}" 23 | mode: 0600 24 | content: | 25 | user=samba 26 | password={{ secret_samba_pass }} 27 | domain=cosmere 28 | 29 | - name: mount disks 30 | mount: 31 | path: "{{ item.path }}" 32 | src: "{{ item.source }}" 33 | fstype: "{{ item.fs }}" 34 | opts: "{{ item.opts }}" 35 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 36 | state: mounted 37 | loop: "{{ smb_mountpoints }}" 38 | -------------------------------------------------------------------------------- /roles/endowment/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create directories 4 | file: 5 | path: "/home/{{ main_username }}/{{ item }}" 6 | state: directory 7 | owner: "{{ main_username }}" 8 | group: "{{ main_groupname }}" 9 | loop: 10 | - docker 11 | - scripts 12 | 13 | - name: Copy scripts 14 | copy: 15 | src: "{{ item.path }}" 16 | dest: /home/{{ main_username }}/scripts/{{ item.path }} 17 | owner: "{{ main_username }}" 18 | group: "{{ main_username }}" 19 | mode: "{{ item.mode }}" 20 | loop: 21 | - {path: "nextcloud_cron.sh", mode: "+x"} 22 | 23 | - name: Copy photos script templates 24 | template: 25 | src: "{{ item.src }}" 26 | dest: /home/{{ main_username }}/scripts/{{ item.dest }} 27 | owner: "{{ main_username }}" 28 | group: "{{ main_username }}" 29 | mode: +x 30 | loop: 31 | - {src: 'photos.sh.j2', dest: 'photos.sh'} 32 | 33 | - name: Fix Jellyfin render group for iGPU passthrough 34 | shell: groupmod -g 150 postfix && groupmod -g 108 render && groupmod -g 107 postfix 35 | 36 | #### Mount disks 37 | - name: Set up disks/mounts 38 | include_tasks: disks.yml 39 | 40 | ### Backup Restore 41 | - name: Create restore script 42 | copy: 43 | dest: /home/{{ main_username }}/restore.sh 44 | owner: "{{ main_username }}" 45 | group: "{{ main_username }}" 46 | mode: +x 47 | content: | 48 | #!/bin/bash 49 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 50 | when: pull_backup == true 51 | - name: Run restore script 52 | command: /bin/bash ./restore.sh 53 | args: 54 | chdir: "/home/{{ main_username }}" 55 | when: pull_backup == true 56 | - name: Remove restore script 57 | file: 58 | path: /home/{{ main_username }}/restore.sh 59 | state: absent 60 | when: pull_backup == true 61 | -------------------------------------------------------------------------------- /roles/endowment/templates/photos.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd '{{ secret_photopath_1 }}' 4 | 5 | for file in $(find PXL_* -maxdepth 1 -type f -mtime +8); do 6 | # 1. Extract the date from the file name 7 | date="${file#PXL_}" 8 | date="${date%%_*}" 9 | 10 | # 2. Rearrange the date into the desired format 11 | year="$(echo $date | cut -c 1-4)" 12 | month="$(echo $date | cut -c 5-6)" 13 | dir="$year/$month" 14 | 15 | # 3. Move the file into the appropriate directory 16 | mkdir -p $dir 17 | # If older than 8 days, move 18 | mv "$file" -t $dir 19 | done 20 | 21 | cd '{{ secret_photopath_2 }}' 22 | 23 | for file in $(find PXL_* -maxdepth 1 -type f -mtime +8); do 24 | # 1. Extract the date from the file name 25 | date="${file#PXL_}" 26 | date="${date%%_*}" 27 | 28 | # 2. Rearrange the date into the desired format 29 | year="$(echo $date | cut -c 1-4)" 30 | month="$(echo $date | cut -c 5-6)" 31 | dir="$year/$month" 32 | 33 | # 3. Move the file into the appropriate directory 34 | mkdir -p $dir 35 | # If older than 8 days, move 36 | mv "$file" -t $dir 37 | done -------------------------------------------------------------------------------- /roles/github/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add GitHub SSH Key 3 | copy: 4 | dest: /home/{{ main_username }}/.ssh/github 5 | owner: "{{ main_username }}" 6 | group: "{{ main_username }}" 7 | mode: 0600 8 | content: "{{ secret_github_ssh }}" 9 | - name: Copy Github script 10 | template: 11 | src: github.sh.j2 12 | dest: /home/{{ main_username }}/github.sh 13 | owner: "{{ main_username }}" 14 | group: "{{ main_username }}" 15 | mode: +x 16 | - name: Add ssh config 17 | copy: 18 | dest: /home/{{ main_username }}/.ssh/config 19 | owner: "{{ main_username }}" 20 | group: "{{ main_username }}" 21 | mode: 0664 22 | content: | 23 | Host github.com 24 | IdentityFile ~/.ssh/github 25 | - name: Run Github Script 26 | command: /bin/bash ./github.sh 27 | args: 28 | chdir: "/home/{{ main_username }}" 29 | - name: Remove Github script 30 | file: 31 | path: /home/{{ main_username }}/github.sh 32 | state: absent 33 | -------------------------------------------------------------------------------- /roles/honor/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wireguard service 3 | service: 4 | name: wg-quick@wg0 5 | enabled: 'true' 6 | state: restarted 7 | 8 | - name: start wireguard service 9 | service: 10 | name: wg-quick@wg0 11 | enabled: 'true' 12 | state: started 13 | -------------------------------------------------------------------------------- /roles/honor/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: SSH Keys - Add Work SSH Key 4 | authorized_key: 5 | user: "{{ main_username }}" 6 | state: present 7 | key: "{{ secret_work_ssh }}" 8 | 9 | - name: create config directories 10 | file: 11 | path: "/home/{{ main_username }}/{{ item }}" 12 | state: directory 13 | owner: "{{ main_username }}" 14 | group: "{{ main_groupname }}" 15 | loop: 16 | - docker 17 | - caddy 18 | 19 | - name: Copy certs script 20 | template: 21 | src: "{{ item.src }}" 22 | dest: /home/{{ main_username }}/{{ item.dest }} 23 | owner: "{{ main_username }}" 24 | group: "{{ main_username }}" 25 | mode: +x 26 | loop: 27 | - {src: 'certs.sh.j2', dest: 'certs.sh'} 28 | - {src: 'ddns_vps.sh.j2', dest: 'ddns_vps.sh'} 29 | 30 | ### Backup Restore 31 | - name: Create restore script 32 | copy: 33 | dest: /home/{{ main_username }}/restore.sh 34 | owner: "{{ main_username }}" 35 | group: "{{ main_username }}" 36 | mode: +x 37 | content: | 38 | #!/bin/bash 39 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 40 | when: pull_backup == true 41 | - name: Run restore script 42 | command: /bin/bash ./restore.sh 43 | args: 44 | chdir: "/home/{{ main_username }}" 45 | when: pull_backup == true 46 | - name: Remove restore script 47 | file: 48 | path: /home/{{ main_username }}/restore.sh 49 | state: absent 50 | when: pull_backup == true 51 | 52 | ### Wireguard 53 | - name: Enable IPv4 forwarding 54 | sysctl: 55 | name: net.ipv4.ip_forward 56 | value: 1 57 | reload: yes 58 | 59 | - name: Create wireguard conf 60 | copy: 61 | dest: /etc/wireguard/wg0.conf 62 | owner: root 63 | group: root 64 | mode: 0600 65 | content: "{{ secret_honor_wireguard_config }}" 66 | notify: 67 | - start wireguard service 68 | 69 | - name: Stop resolved 70 | service: 71 | name: systemd-resolved 72 | enabled: no 73 | state: stopped 74 | 75 | - name: Copy WG Tunnel Check Scripts 76 | template: 77 | src: wg-check.sh.j2 78 | dest: /home/{{ main_username }}/wg-check.sh 79 | owner: "{{ main_username }}" 80 | group: "{{ main_username }}" 81 | mode: +x 82 | -------------------------------------------------------------------------------- /roles/honor/templates/certs.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #/usr/local/bin/lego --email="{{ secret_new_email }}" --domains="*.{{ secret_personal_internal_url }}" --dns="hetzner" --accept-tos --path /home/{{ main_username }}/lego run 4 | #/usr/local/bin/lego --email="{{ secret_new_email }}" --domains="*.{{ secret_personal_url }}" --dns="hetzner" --accept-tos --path /home/{{ main_username }}/lego run 5 | 6 | /usr/local/bin/lego --email="{{ secret_new_email }}" --domains="*.{{ secret_personal_internal_url }}" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --path /home/{{ main_username }}/lego renew --days 45 7 | /usr/local/bin/lego --email="{{ secret_new_email }}" --domains="*.{{ secret_personal_url }}" --dns="hetzner" --dns.resolvers="9.9.9.9:53" --path /home/{{ main_username }}/lego renew --days 45 8 | 9 | /bin/systemctl daemon-reload 10 | 11 | /bin/systemctl restart caddy 12 | 13 | sleep 10s -------------------------------------------------------------------------------- /roles/honor/templates/ddns_vps.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ### Get the IP address from anywhere that will echo it 4 | ip=`curl -s http://ipecho.net/plain` 5 | 6 | ### Set Hetzner Items 7 | # Obtain a Hetzner API key 8 | token={{ secret_hetzner_dyndns_api }} 9 | # Obtain zone_id via https://dns.hetzner.com/api-docs/#operation/GetZones 10 | zone_id={{ secret_hetzner_dyndns_zone_id }} 11 | # Obtain record_id via https://dns.hetzner.com/api-docs/#operation/GetRecords 12 | record_id={{ secret_hetzner_dyndns_record_id }} 13 | # The record you're trying to update 14 | dns_name=@ 15 | # Workaround for bash variable expansion inside quotes, do not change 16 | json_string='{"type":"A","zone_id":"'"$zone_id"'","name":"'"$dns_name"'","value":"'"$ip"'"}' 17 | 18 | curl -X PUT "https://dns.hetzner.com/api/v1/records/$record_id" \ 19 | -H 'Content-Type: application/json' \ 20 | -H "Auth-API-Token: $token" \ 21 | -d $json_string -------------------------------------------------------------------------------- /roles/honor/templates/wg-check.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Modified from https://mullvad.net/en/help/running-wireguard-router/ 3 | # and https://wiki.r-selfhosted.com/guides/virtual-private-networks/wireguard/ 4 | # ping Wireguard gateway to test for connection 5 | # if no contact, restart! 6 | 7 | PING=/bin/ping 8 | ## DEBIAN 9 | SERVICE=/usr/sbin/service 10 | 11 | tries=0 12 | while [[ $tries -lt 3 ]] 13 | do 14 | if $PING -c 1 10.10.10.1 15 | then 16 | echo "wg works" 17 | exit 0 18 | fi 19 | echo "wg fail" 20 | tries=$((tries+1)) 21 | done 22 | echo "wg failed 3 times - restarting tunnel" 23 | ## DEBIAN 24 | $SERVICE wg-quick@wg0 restart -------------------------------------------------------------------------------- /roles/identity/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create main user directories 3 | file: 4 | path: /home/{{ main_username }}/{{ item }} 5 | state: directory 6 | owner: "{{ main_username }}" 7 | group: "{{ main_groupname }}" 8 | loop: 9 | - docker 10 | - docker/dawarich 11 | 12 | ### Backup Restore 13 | - name: Create restore script 14 | copy: 15 | dest: /home/{{ main_username }}/restore.sh 16 | owner: "{{ main_username }}" 17 | group: "{{ main_username }}" 18 | mode: +x 19 | content: | 20 | #!/bin/bash 21 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 22 | when: pull_backup == true 23 | - name: Run restore script 24 | command: /bin/bash ./restore.sh 25 | args: 26 | chdir: "/home/{{ main_username }}" 27 | when: pull_backup == true 28 | - name: Remove restore script 29 | file: 30 | path: /home/{{ main_username }}/restore.sh 31 | state: absent 32 | when: pull_backup == true -------------------------------------------------------------------------------- /roles/install/bitwarden_cli/README.md: -------------------------------------------------------------------------------- 1 | # WARNING: Installs old version of BW CLI that does not support Argon2. Bitwarden moved the clients to a [single repo](https://github.com/bitwarden/clients/) and I have not had time to fix the installer to try to parse the GitHub API 2 | 3 | # Bitwarden CLI Installer 4 | 5 | An ansible role to install and configure [Bitwarden CLI](https://bitwarden.com/help/article/cli/). 6 | 7 | ## Features 8 | 9 | - Installation of `BW` binary. 10 | - Updating if there is an update and version is not pinned. 11 | 12 | ## Configuration 13 | 14 | This role has a number of variables that can be configured. 15 | 16 | | Variable | Description | Default | 17 | | ----------------------------------- | -------------------------------------------------------- | ----------------- | 18 | | **bwo_download_latest_ver** | Whether to download the latest version from Github. | `true` 19 | | **bw_pinned_ver** | Desired version of BW CLI (overriden by above var). | `1.20.0` 20 | | **bw_distro** | Which distro to target for download. | `linux` 21 | | **bw_install_directory** | Where to install BW CLI binary. | `/usr/local/bin` 22 | 23 | By default the role fetches and installs the latest available version. You can disable this by pinning to a specific version. Here's an example if you wanted to set the version. 24 | 25 | ```yaml 26 | bw_download_latest_ver: false 27 | bw_pinned_ver: 1.20.0 28 | ``` 29 | By setting a pinned version, a version will only be pulled if the installed version does not match the pinned version. 30 | 31 | ## Github API 32 | 33 | This role utilizes the GitHub API to determine the latest release available. By default, the role utilizes unauthenticated requests, which are [limited by GitHub](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting) to 60 requests per hour. Requests are associated with the originating IP address. For most usecases, this is not an issue. However, you may find yourself rate limited. If you authenticate, you can make 5,000 requests per hour. 34 | 35 | To authenticate, you must obtain a [Personal Access Token](https://github.com/settings/tokens/new). The token does not need any scopes selected. Then add the following variables: 36 | 37 | ``` 38 | github_api_user: fuzzymistborn 39 | github_api_pass: YOUR_TOKEN 40 | github_api_auth: yes 41 | ``` 42 | 43 | That's it! 44 | -------------------------------------------------------------------------------- /roles/install/bitwarden_cli/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | bw_download_latest_ver: true # Change to 'false' to pin to a specific version 4 | bw_pinned_ver: 1.20.0 # Overriden by 'lego_download_latest_ver' variable 5 | bw_distro: linux 6 | 7 | bw_gh_url: https://github.com/bitwarden/cli/releases/download/ 8 | bw_download_directory: "/tmp/bw" 9 | 10 | bw_install_directory: /usr/local/bin 11 | bw_install_path: "{{ bw_install_directory }}/bw" -------------------------------------------------------------------------------- /roles/install/bitwarden_cli/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: get status of bw_install_path 4 | stat: 5 | path: "{{ bw_install_path }}" 6 | register: is_installed 7 | 8 | - name: set bw installed 9 | set_fact: 10 | bw_is_installed: "{{ is_installed.stat.exists }}" 11 | 12 | - name: check bw version 13 | shell: "{{ bw_install_path }} --version" 14 | changed_when: false 15 | register: installed_version_registered 16 | when: bw_is_installed == True 17 | 18 | - name: get latest release 19 | uri: 20 | url: https://api.github.com/repos/bitwarden/cli/releases/latest 21 | url_username: "{{ github_api_user | default (omit) }}" 22 | url_password: "{{ github_api_pass | default (omit) }}" 23 | return_content: true 24 | force_basic_auth: "{{ github_api_auth | default (omit) }}" 25 | register: release_version_registered 26 | when: bw_download_latest_ver == True 27 | 28 | - name: set bw version (latest) 29 | set_fact: 30 | bw_ver: "{{ release_version_registered.json.tag_name|regex_replace('v') }}" 31 | when: bw_download_latest_ver == True 32 | 33 | - name: set bw version (pinned) 34 | set_fact: 35 | bw_ver: "{{ bw_pinned_ver }}" 36 | when: bw_download_latest_ver == False 37 | 38 | - block: 39 | - name: ensure bw_download_directory does not exist 40 | file: 41 | path: "{{ bw_download_directory }}" 42 | state: absent 43 | 44 | - name: create bw_download_directory 45 | file: 46 | path: "{{ bw_download_directory }}" 47 | state: directory 48 | mode: 0755 49 | 50 | - name: download bw 51 | unarchive: 52 | src: "{{ bw_gh_url }}/v{{ bw_ver }}/bw-{{ bw_distro }}-{{ bw_ver }}.zip" 53 | dest: "{{ bw_download_directory }}" 54 | remote_src: yes 55 | owner: root 56 | group: root 57 | mode: +x 58 | 59 | - name: move to bw path 60 | copy: 61 | src: "{{ bw_download_directory }}/bw" 62 | dest: "{{ bw_install_directory }}" 63 | remote_src: yes 64 | mode: +x 65 | when: bw_is_installed == False or ( bw_is_installed == True and bw_download_latest_ver == True and installed_version_registered.stdout != bw_ver ) or ( bw_is_installed == True and bw_download_latest_ver == False and installed_version_registered.stdout != bw_pinned_ver ) 66 | -------------------------------------------------------------------------------- /roles/install/crowdsec_bouncer/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Add crowdsec apt-key 4 | ansible.builtin.apt_key: 5 | url: https://packagecloud.io/crowdsec/crowdsec/gpgkey 6 | state: present 7 | 8 | # https://packagecloud.io/crowdsec/crowdsec/install#manual-deb 9 | - name: Add crowdsec apt repository 10 | ansible.builtin.apt_repository: 11 | repo: deb https://packagecloud.io/crowdsec/crowdsec/any/ any main 12 | state: present 13 | filename: crowdsec 14 | update_cache: true 15 | 16 | - name: Install crowdsec bouncer 17 | ansible.builtin.apt: 18 | name: crowdsec-firewall-bouncer-iptables 19 | state: present 20 | update_cache: true 21 | 22 | ### only installs bouncer, still need to work on configuring -------------------------------------------------------------------------------- /roles/install/lego/README.md: -------------------------------------------------------------------------------- 1 | # LEGO Installer 2 | 3 | An ansible role to install and configure [LEGO](https://go-acme.github.io/lego/). 4 | 5 | ## Features 6 | 7 | - Installation of `lego` GO binary. 8 | - Updating if there is an update and version is not pinned. 9 | 10 | ## Configuration 11 | 12 | This role has a number of variables that can be configured. 13 | 14 | | Variable | Description | Default | 15 | | ----------------------------------- | -------------------------------------------------------- | ----------------- | 16 | | **lego_download_latest_ver** | Whether to download the latest version from Github. | `true` 17 | | **lego_pinned_ver** | Desired version of LEGO (overriden by above var). | `4.5.3` 18 | | **lego_distro** | Which distro to target for download. | `linux_amd64` 19 | | **lego_install_directory** | Where to install LEGO binary. | `/usr/local/bin` 20 | 21 | By default the role fetches and installs the latest available version. You can disable this by pinning to a specific version. Here's an example if you wanted to set the version. 22 | 23 | ```yaml 24 | lego_download_latest_ver: false 25 | lego_pinned_ver: 4.5.2 26 | ``` 27 | By setting a pinned version, a version will only be pulled if the installed version does not match the pinned version. 28 | 29 | ## Github API 30 | 31 | This role utilizes the GitHub API to determine the latest release available. By default, the role utilizes unauthenticated requests, which are [limited by GitHub](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting) to 60 requests per hour. Requests are associated with the originating IP address. For most usecases, this is not an issue. However, you may find yourself rate limited. If you authenticate, you can make 5,000 requests per hour. 32 | 33 | To authenticate, you must obtain a [Personal Access Token](https://github.com/settings/tokens/new). The token does not need any scopes selected. Then add the following variables: 34 | 35 | ``` 36 | github_api_user: fuzzymistborn 37 | github_api_pass: YOUR_TOKEN 38 | github_api_auth: yes 39 | ``` 40 | 41 | That's it! 42 | 43 | ## To Do 44 | - Add cronjobs? -------------------------------------------------------------------------------- /roles/install/lego/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | lego_download_latest_ver: true # Change to 'false' to pin to a specific version 4 | lego_pinned_ver: 4.5.3 # Overriden by 'lego_download_latest_ver' variable 5 | lego_distro: linux_amd64 6 | 7 | lego_gh_url: https://github.com/go-acme/lego/releases/download 8 | lego_download_directory: "/tmp/lego" 9 | 10 | lego_install_directory: /usr/local/bin 11 | lego_install_path: "{{ lego_install_directory }}/lego" -------------------------------------------------------------------------------- /roles/install/lego/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: get status of lego_install_path 4 | stat: 5 | path: "{{ lego_install_path }}" 6 | register: is_installed 7 | 8 | - name: set lego installed 9 | set_fact: 10 | lego_is_installed: "{{ is_installed.stat.exists }}" 11 | 12 | - name: check lego version 13 | shell: "{{ lego_install_path }} --version | awk '{ print $3 }' | cut -c 1-" 14 | changed_when: false 15 | register: installed_version_registered 16 | when: lego_is_installed == True 17 | 18 | - name: get latest release 19 | uri: 20 | url: https://api.github.com/repos/go-acme/lego/releases/latest 21 | url_username: "{{ github_api_user | default (omit) }}" 22 | url_password: "{{ github_api_pass | default (omit) }}" 23 | return_content: true 24 | force_basic_auth: "{{ github_api_auth | default (omit) }}" 25 | register: release_version_registered 26 | when: lego_download_latest_ver == True 27 | 28 | - name: set lego version (latest) 29 | set_fact: 30 | lego_ver: "{{ release_version_registered.json.tag_name|regex_replace('v') }}" 31 | when: lego_download_latest_ver == True 32 | 33 | - name: set lego version (pinned) 34 | set_fact: 35 | lego_ver: "{{ lego_pinned_ver }}" 36 | when: lego_download_latest_ver == False 37 | 38 | - block: 39 | - name: ensure lego_download_directory does not exist 40 | file: 41 | path: "{{ lego_download_directory }}" 42 | state: absent 43 | 44 | - name: create lego_download_directory 45 | file: 46 | path: "{{ lego_download_directory }}" 47 | state: directory 48 | mode: 0755 49 | 50 | - name: download lego 51 | unarchive: 52 | src: "{{ lego_gh_url }}/v{{ lego_ver }}/lego_v{{ lego_ver }}_{{ lego_distro }}.tar.gz" 53 | dest: "{{ lego_download_directory }}" 54 | remote_src: yes 55 | owner: root 56 | group: root 57 | mode: +x 58 | 59 | - name: move to lego path 60 | copy: 61 | src: "{{ lego_download_directory }}/lego" 62 | dest: "{{ lego_install_directory }}" 63 | remote_src: yes 64 | mode: +x 65 | when: lego_is_installed == False or ( lego_is_installed == True and lego_download_latest_ver == True and installed_version_registered.stdout != lego_ver ) or ( lego_is_installed == True and lego_download_latest_ver == False and installed_version_registered.stdout != lego_pinned_ver ) 66 | 67 | - name: populate /etc/environment 68 | lineinfile: 69 | dest: "/etc/environment" 70 | state: present 71 | regexp: "^LEGO_DISABLE_CNAME_SUPPORT=" 72 | line: "LEGO_DISABLE_CNAME_SUPPORT=true" 73 | when: lego_is_installed == False 74 | -------------------------------------------------------------------------------- /roles/install/lnxlink/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | lnxlink_desktop: false 3 | lnxlink_bash: false 4 | lnxlink_temperature: false 5 | lnxlink_mounts: false 6 | lnxlink_diskuse: false 7 | lnxlink_shutdown: false 8 | -------------------------------------------------------------------------------- /roles/install/lnxlink/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install pipx 3 | ansible.builtin.package: 4 | name: pipx 5 | state: present 6 | 7 | - name: Install LnxLink 8 | command: pipx install lnxlink 9 | become: yes 10 | become_user: "{{ main_username }}" 11 | 12 | - name: copy to /usr/local/bin 13 | copy: 14 | src: "/home/{{ main_username }}/.local/bin/lnxlink" 15 | dest: "/usr/local/bin" 16 | mode: +x 17 | remote_src: yes 18 | 19 | - name: create config directory 20 | file: 21 | path: /home/{{ main_username }}/.lnxlink 22 | state: directory 23 | owner: "{{ main_username }}" 24 | group: "{{ main_groupname }}" 25 | 26 | - name: Copy template files 27 | template: 28 | src: "{{ item.src }}" 29 | dest: "{{ item.dest }}" 30 | mode: "{{ item.mode }}" 31 | owner: "{{ main_username }}" 32 | group: "{{ main_username }}" 33 | loop: 34 | - {src: "config.yaml.j2", dest: "/home/{{ main_username }}/.lnxlink/config.yaml", mode: "0644"} 35 | 36 | - name: Copy systemd service file 37 | template: 38 | src: ../templates/lnxlink.service.j2 39 | dest: /etc/systemd/system/lnxlink.service 40 | owner: root 41 | group: root 42 | mode: 0644 43 | 44 | - name: Reload systemd services/timers 45 | systemd: 46 | enabled: true 47 | daemon-reload: true 48 | state: started 49 | name: lnxlink.service 50 | no_block: true -------------------------------------------------------------------------------- /roles/install/lnxlink/templates/config.yaml.j2: -------------------------------------------------------------------------------- 1 | mqtt: 2 | prefix: lnxlink 3 | clientId: {{ hostname }} 4 | server: 192.168.30.5 5 | port: 1883 6 | auth: 7 | user: homeassistant 8 | pass: {{ secret_mqtt_pass }} 9 | tls: false 10 | keyfile: '' 11 | certfile: '' 12 | ca_certs: '' 13 | discovery: 14 | enabled: true 15 | lwt: 16 | enabled: true 17 | qos: 1 18 | retain: true 19 | update_interval: 15 20 | hass_url: null 21 | hass_api: null 22 | modules: 23 | - network 24 | - cpu 25 | - update 26 | - memory 27 | - restart 28 | {% if lnxlink_desktop %} 29 | - microphone_used 30 | - speaker_used 31 | - suspend 32 | {% endif %} 33 | {% if lnxlink_shutdown %} 34 | - shutdown 35 | {% endif %} 36 | {% if lnxlink_temperature %} 37 | - temperature 38 | {% endif %} 39 | {% if lnxlink_mounts %} 40 | - mounts 41 | {% endif %} 42 | {% if lnxlink_diskuse %} 43 | - disk_usage 44 | {% endif %} 45 | {% if lnxlink_bash %} 46 | - bash 47 | {% endif %} 48 | custom_modules: null 49 | exclude: null 50 | settings: 51 | hotkeys: null 52 | {% if ansible_hostname == "ishap" %} 53 | mounts: 54 | autocheck: false 55 | directories: 56 | - /mnt/Backup 57 | disk_usage: 58 | include_disks: 59 | - /dev/mapper/pve-root 60 | exclude_disks: [] 61 | {% elif ansible_hostname == "adonalsium" %} 62 | mounts: 63 | autocheck: false 64 | directories: 65 | - /mnt/Media 66 | disk_usage: 67 | include_disks: 68 | - /dev/mapper/pve-root 69 | exclude_disks: [] 70 | {% else %} 71 | mounts: 72 | autocheck: false 73 | directories: [] 74 | disk_usage: 75 | include_disks: [] 76 | exclude_disks: [] 77 | {% endif %} 78 | {% if ansible_hostname == "ishap" %} 79 | bash: 80 | expose: 81 | - name: Start Trell 82 | command: /usr/sbin/qm start 300 83 | - name: Stop Trell 84 | command: /usr/sbin/qm stop 300 85 | {% elif ansible_hostname == "adonalsium" %} 86 | bash: 87 | expose: 88 | - name: Start Invention 89 | command: /usr/sbin/pct start 210 90 | - name: Stop Invention 91 | command: /usr/sbin/pct stop 210 92 | {% elif ansible_hostname == "Autonomy" %} 93 | bash: 94 | expose: 95 | - name: Cast Traffic 96 | command: /home/{{main_username}}/scripts/cast_traffic.sh 97 | - name: Cast Front Door 98 | command: /home/{{main_username}}/scripts/cast_frontdoor.sh 99 | - name: Cast Weather 100 | command: /home/{{main_username}}/scripts/cast_weather.sh 101 | - name: Update HASS 102 | command: /home/{{main_username}}/scripts/update_hass.sh 103 | {% else %} 104 | bash: 105 | expose: [] 106 | {% endif %} 107 | systemd: null 108 | gpio: 109 | inputs: null 110 | outputs: null 111 | statistics: https://analyzer.bkbilly.workers.dev 112 | -------------------------------------------------------------------------------- /roles/install/lnxlink/templates/lnxlink.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LNXlink 3 | After=network-online.target multi-user.target graphical.target 4 | PartOf=graphical-session.target 5 | 6 | [Service] 7 | Type=simple 8 | Restart=always 9 | RestartSec=5 10 | 11 | ExecStart=/usr/local/bin/lnxlink -c /home/{{ main_username }}/.lnxlink/config.yaml 12 | 13 | [Install] 14 | WantedBy=default.target -------------------------------------------------------------------------------- /roles/install/mergerfs/README.md: -------------------------------------------------------------------------------- 1 | # MergerFS Installer 2 | 3 | An ansible role to install [MergerFS](https://github.com/trapexit/mergerfs/), a union filesystem that I use in combination with Snapraid to create a simple, reliable and redudant data store. See [PerfectMediaServer](https://perfectmediaserver.com/tech-stack/mergerfs.html) for more information. 4 | 5 | ## Features 6 | 7 | - Installation of `mergerfs` deb package. 8 | - Updating if there is an update and version is not pinned. 9 | 10 | ## Configuration 11 | 12 | Below are the configurable variables. 13 | 14 | ```yaml 15 | mergerfs_download_latest_ver: true # Change to 'false' to pin to a specific version 16 | mergerfs_pinned_ver: 2.32.6 # Overriden by 'mergerfs_download_latest_ver' variable 17 | mergerfs_distro: debian-bookworm_amd64 18 | ``` 19 | 20 | By default the role fetches and installs the latest available version. You can disable this by pinning to a specific version. Here's an example if you wanted to set the version. 21 | 22 | ```yaml 23 | mergerfs_download_latest_ver: false 24 | mergerfs_pinned_ver: 2.32.6 25 | ``` 26 | By setting a pinned version, a version will only be pulled if the installed version does not match the pinned version. 27 | 28 | ## Github API 29 | 30 | This role utilizes the GitHub API to determine the latest release available. By default, the role utilizes unauthenticated requests, which are [limited by GitHub](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting) to 60 requests per hour. Requests are associated with the originating IP address. For most usecases, this is not an issue. However, you may find yourself rate limited. If you authenticate, you can make 5,000 requests per hour. 31 | 32 | To authenticate, you must obtain a [Personal Access Token](https://github.com/settings/tokens/new). The token does not need any scopes selected. Then add the following variables: 33 | 34 | ``` 35 | github_api_user: fuzzymistborn 36 | github_api_pass: YOUR_TOKEN 37 | github_api_auth: yes 38 | ``` 39 | 40 | That's it! -------------------------------------------------------------------------------- /roles/install/mergerfs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mergerfs_download_latest_ver: true # Change to 'false' to pin to a specific version 3 | mergerfs_pinned_ver: 2.32.6 # Overriden by 'mergerfs_download_latest_ver' variable 4 | mergerfs_distro: debian-bookworm_amd64 5 | 6 | mergerfs_gh_url: https://github.com/trapexit/mergerfs/releases/download 7 | mergerfs_download_directory: "/tmp/mergerfs" -------------------------------------------------------------------------------- /roles/install/mergerfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check whether mergerfs is installed 3 | shell: "dpkg-query -W 'mergerfs'" 4 | ignore_errors: true 5 | register: is_installed 6 | changed_when: "is_installed.rc != 0" 7 | 8 | - name: set mergerfs installed 9 | set_fact: 10 | mergerfs_is_installed: "{{ is_installed.failed == false }}" 11 | 12 | - name: check mergerfs version 13 | shell: "/usr/bin/mergerfs --version | awk '{ print $3 }' | cut -c 1-" 14 | changed_when: false 15 | register: installed_version_registered 16 | when: mergerfs_is_installed == True 17 | 18 | - name: get latest release 19 | uri: 20 | url: https://api.github.com/repos/trapexit/mergerfs/releases/latest 21 | url_username: "{{ github_api_user | default (omit) }}" 22 | url_password: "{{ github_api_pass | default (omit) }}" 23 | return_content: true 24 | force_basic_auth: "{{ github_api_auth | default (omit) }}" 25 | register: release_version_registered 26 | when: mergerfs_download_latest_ver == True 27 | 28 | - name: set mergerfs version (latest) 29 | set_fact: 30 | mergerfs_ver: "{{ release_version_registered.json.tag_name|regex_replace('v') }}" 31 | when: mergerfs_download_latest_ver == True 32 | 33 | - name: set mergerfs version (pinned) 34 | set_fact: 35 | mergerfs_ver: "{{ mergerfs_pinned_ver }}" 36 | when: mergerfs_download_latest_ver == False 37 | 38 | - block: 39 | - name: ensure mergerfs_download_directory does not exist 40 | file: 41 | path: "{{ mergerfs_download_directory }}" 42 | state: absent 43 | 44 | - name: create mergerfs_download_directory 45 | file: 46 | path: "{{ mergerfs_download_directory }}" 47 | state: directory 48 | mode: 0755 49 | 50 | - name: download mergerfs 51 | get_url: 52 | url: "{{ mergerfs_gh_url }}/{{ mergerfs_ver }}/mergerfs_{{ mergerfs_ver }}.{{ mergerfs_distro }}.deb" 53 | dest: "{{ mergerfs_download_directory }}" 54 | 55 | - name: install deb 56 | apt: 57 | deb: "{{ mergerfs_download_directory }}/mergerfs_{{ mergerfs_ver }}.{{ mergerfs_distro }}.deb" 58 | state: present 59 | when: mergerfs_is_installed == False or ( mergerfs_is_installed == True and mergerfs_download_latest_ver == True and installed_version_registered.stdout != mergerfs_ver ) or ( mergerfs_is_installed == True and mergerfs_download_latest_ver == False and installed_version_registered.stdout != mergerfs_pinned_ver ) 60 | -------------------------------------------------------------------------------- /roles/install/nag_removal/README.md: -------------------------------------------------------------------------------- 1 | # Proxmox Nag Removal Installer 2 | 3 | An ansible role to install [pve-fake-subscription](https://github.com/Jamesits/pve-fake-subscription/). I've tried many of the Proxmox nag removal tools out there, this one has been the most reliable. 4 | 5 | ## Features 6 | 7 | - Installation of `pve-fake-subscription` deb package. 8 | - Updating if there is an update and version is not pinned. 9 | 10 | ## Configuration 11 | 12 | Below are the configurable variables. Note that `nag_force_install` defaults to `false`. The purpose of that variable is to force an install to a specific version you pinned. 13 | 14 | ```yaml 15 | nag_ver: v0.0.7 16 | nag_force_install: false 17 | ``` 18 | Note that the `v` is necessary. 19 | 20 | ## Github API 21 | 22 | This role utilizes the GitHub API to determine the latest release available. By default, the role utilizes unauthenticated requests, which are [limited by GitHub](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting) to 60 requests per hour. Requests are associated with the originating IP address. For most usecases, this is not an issue. However, you may find yourself rate limited. If you authenticate, you can make 5,000 requests per hour. 23 | 24 | To authenticate, you must obtain a [Personal Access Token](https://github.com/settings/tokens/new). The token does not need any scopes selected. Then add the following variables: 25 | 26 | ``` 27 | github_api_user: fuzzymistborn 28 | github_api_pass: YOUR_TOKEN 29 | github_api_auth: yes 30 | ``` 31 | 32 | That's it! -------------------------------------------------------------------------------- /roles/install/nag_removal/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | nag_force_install: false 4 | -------------------------------------------------------------------------------- /roles/install/nag_removal/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check whether package is installed 3 | shell: "dpkg-query -W 'pve-fake-subscription'" 4 | ignore_errors: true 5 | register: is_installed 6 | changed_when: "is_installed.rc != 0" 7 | 8 | - name: install package? 9 | set_fact: 10 | force_install: "{{ nag_force_install == true or is_installed.failed == true }}" 11 | 12 | - name: get latest release 13 | uri: 14 | url: https://api.github.com/repos/Jamesits/pve-fake-subscription/releases/latest 15 | url_username: "{{ github_api_user | default (omit) }}" 16 | url_password: "{{ github_api_pass | default (omit) }}" 17 | return_content: true 18 | force_basic_auth: "{{ github_api_auth | default (omit) }}" 19 | register: release_latest 20 | when: nag_ver is undefined and force_install == True 21 | 22 | - name: set version if not already 23 | set_fact: nag_ver="{{ release_latest.json.tag_name }}" 24 | when: nag_ver is undefined and force_install == True 25 | 26 | - name: download release 27 | when: force_install == True 28 | get_url: 29 | url: https://github.com/Jamesits/pve-fake-subscription/releases/download/{{ nag_ver }}/pve-fake-subscription_{{ nag_ver | regex_replace('v') }}+git-1_all.deb 30 | dest: /tmp 31 | 32 | - name: set filename 33 | set_fact: file_name="pve-fake-subscription_{{ nag_ver | regex_replace('v') }}+git-1_all.deb" 34 | when: force_install == True 35 | 36 | - name: install deb 37 | apt: 38 | deb: /tmp/{{ file_name }} 39 | state: present 40 | when: force_install == True 41 | -------------------------------------------------------------------------------- /roles/install/pip_packages/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Pip Packages 3 | pip: 4 | name: "{{ item }}" 5 | extra_args: --break-system-packages 6 | loop: "{{ pip_packages }}" 7 | when: pip_packages is defined 8 | 9 | - name: Install Pipx Packages 10 | community.general.pipx: 11 | name: "{{ item }}" 12 | loop: "{{ pipx_packages }}" 13 | when: pipx_packages is defined -------------------------------------------------------------------------------- /roles/install/runitor/README.md: -------------------------------------------------------------------------------- 1 | # Runitor Installer 2 | 3 | An ansible role to install and configure [Runitor](https://github.com/bdd/runitor), a tool for simpler configuration and setup of your Healthchecks.io cronjobs. 4 | 5 | ## Features 6 | 7 | - Installation of `runitor` GO binary, as well as sets the API-URL as an environmental variable. 8 | - Updating if there is an update and version is not pinned. 9 | 10 | ## Configuration 11 | 12 | This role has a number of variables that can be configured. 13 | 14 | | Variable | Description | Default | 15 | | ----------------------------------- | -------------------------------------------------------- | ----------------- | 16 | | **runitor_download_latest_ver** | Whether to download the latest version from Github. | `true` 17 | | **runitor_pinned_ver** | Desired version of runitor (overriden by above var). | `0.8.0` 18 | | **runitor_distro** | Which distro to target for download. | `linux-amd64` 19 | | **runitor_install_directory** | Where to install runitor binary. | `/usr/local/bin` 20 | | **runitor_url** | The default URL used by runitor to ping | `https://hc-ping.com` 21 | 22 | By default the role fetches and installs the latest available version. You can disable this by pinning to a specific version. Here's an example if you wanted to set the version. 23 | 24 | ```yaml 25 | runitor_download_latest_ver: false 26 | runitor_pinned_ver: 0.8.0 27 | ``` 28 | By setting a pinned version, a version will only be pulled if the installed version does not match the pinned version. 29 | 30 | ## Github API 31 | 32 | This role utilizes the GitHub API to determine the latest release available. By default, the role utilizes unauthenticated requests, which are [limited by GitHub](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting) to 60 requests per hour. Requests are associated with the originating IP address. For most usecases, this is not an issue. However, you may find yourself rate limited. If you authenticate, you can make 5,000 requests per hour. 33 | 34 | To authenticate, you must obtain a [Personal Access Token](https://github.com/settings/tokens/new). The token does not need any scopes selected. Then add the following variables: 35 | 36 | ``` 37 | github_api_user: fuzzymistborn 38 | github_api_pass: YOUR_TOKEN 39 | github_api_auth: yes 40 | ``` 41 | 42 | That's it! -------------------------------------------------------------------------------- /roles/install/runitor/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | runitor_download_latest_ver: true # Change to 'false' to pin to a specific version 3 | runitor_pinned_ver: 0.8.0 # Overriden by 'runitor_download_latest_ver' variable 4 | runitor_distro: linux-amd64 5 | 6 | runitor_gh_url: https://github.com/bdd/runitor/releases/download 7 | runitor_download_directory: "/tmp/runitor" 8 | 9 | runitor_install_directory: /usr/local/bin 10 | runitor_install_path: "{{ runitor_install_directory }}/runitor" 11 | runitor_url: https://hc-ping.com 12 | -------------------------------------------------------------------------------- /roles/install/runitor/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: get status of runitor_install_path 3 | stat: 4 | path: "{{ runitor_install_path }}" 5 | register: is_installed 6 | 7 | - name: set runitor installed 8 | set_fact: 9 | runitor_is_installed: "{{ is_installed.stat.exists }}" 10 | 11 | - name: check runitor version 12 | shell: "{{ runitor_install_path }} --version | awk '{ print $2 }' | cut -c 2-" 13 | changed_when: false 14 | register: installed_version_registered 15 | when: runitor_is_installed == True 16 | 17 | - name: get latest release 18 | uri: 19 | url: https://api.github.com/repos/bdd/runitor/releases/latest 20 | url_username: "{{ github_api_user | default (omit) }}" 21 | url_password: "{{ github_api_pass | default (omit) }}" 22 | return_content: true 23 | force_basic_auth: "{{ github_api_auth | default (omit) }}" 24 | register: release_version_registered 25 | when: runitor_download_latest_ver == True 26 | 27 | - name: set runitor version (latest) 28 | set_fact: 29 | runitor_ver: "{{ release_version_registered.json.tag_name|regex_replace('v') }}" 30 | when: runitor_download_latest_ver == True 31 | 32 | - name: set runitor version (pinned) 33 | set_fact: 34 | runitor_ver: "{{ runitor_pinned_ver }}" 35 | when: runitor_download_latest_ver == False 36 | 37 | - block: 38 | - name: ensure runitor_download_directory does not exist 39 | file: 40 | path: "{{ runitor_download_directory }}" 41 | state: absent 42 | 43 | - name: create runitor_download_directory 44 | file: 45 | path: "{{ runitor_download_directory }}" 46 | state: directory 47 | mode: 0755 48 | 49 | - name: download runitor 50 | get_url: 51 | url: "{{ runitor_gh_url }}/v{{ runitor_ver }}/runitor-v{{ runitor_ver }}-{{ runitor_distro }}" 52 | dest: "{{ runitor_download_directory }}" 53 | force: yes 54 | owner: root 55 | group: root 56 | mode: +x 57 | 58 | - name: move to /usr/local/bin 59 | shell: "mv {{ runitor_download_directory }}/runitor-v{{ runitor_ver }}-{{ runitor_distro }} {{ runitor_install_path }}" 60 | 61 | - name: Fedora - SELinux Fix 62 | ansible.builtin.command: restorecon -Rv /usr/local/bin 63 | become: true 64 | when: ansible_distribution == 'Fedora' 65 | 66 | when: runitor_is_installed == False or ( runitor_is_installed == True and runitor_download_latest_ver == True and installed_version_registered.stdout != runitor_ver ) or ( runitor_is_installed == True and runitor_download_latest_ver == False and installed_version_registered.stdout != runitor_pinned_ver ) 67 | 68 | - name: populate /etc/environment 69 | lineinfile: 70 | dest: "/etc/environment" 71 | state: present 72 | regexp: "^HC_API_URL=" 73 | line: "HC_API_URL={{ runitor_url }}" 74 | when: runitor_is_installed == False 75 | -------------------------------------------------------------------------------- /roles/install/tailscale/README.md: -------------------------------------------------------------------------------- 1 | # Tailscale Installer 2 | 3 | An ansible role to install and configure Tailscale on *Ubuntu*-based systems. 4 | 5 | If you want to install on other systems, see [this one](https://github.com/artis3n/ansible-role-tailscale) by @artis3n. I stripped this one down as I only needed Tailscale on Ubuntu and nothing beyond that. -------------------------------------------------------------------------------- /roles/install/tailscale/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apt_dependencies: 4 | - gnupg2 5 | - gnupg-agent 6 | - apt-transport-https 7 | - python3-apt 8 | 9 | apt_deb: deb https://pkgs.tailscale.com/stable/{{ ansible_distribution | lower }} {{ ansible_distribution_release | lower }} main 10 | apt_signkey: https://pkgs.tailscale.com/stable/{{ ansible_distribution | lower }}/{{ ansible_distribution_release | lower }}.noarmor.gpg 11 | 12 | tailscale_package: tailscale 13 | -------------------------------------------------------------------------------- /roles/install/tailscale/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Apt Update 3 | become: true 4 | ansible.builtin.apt: 5 | update_cache: true 6 | 7 | - name: Apt Dependencies 8 | become: true 9 | ansible.builtin.apt: 10 | name: "{{ apt_dependencies }}" 11 | state: present 12 | 13 | - name: Add Tailscale Signing Key 14 | become: true 15 | ansible.builtin.apt_key: 16 | url: "{{ apt_signkey }}" 17 | state: present 18 | 19 | - name: Add Tailscale Deb 20 | become: true 21 | ansible.builtin.apt_repository: 22 | repo: "{{ apt_deb }}" 23 | state: present 24 | 25 | - name: Install Tailscale 26 | become: true 27 | ansible.builtin.apt: 28 | name: "{{ tailscale_package }}" 29 | state: latest 30 | update_cache: true -------------------------------------------------------------------------------- /roles/install/zsh/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install ZSH and related packages 3 | become: true 4 | ansible.builtin.package: 5 | name: 6 | - zsh 7 | - acl 8 | # Not present on Ubuntu, at least not easily. Need to fix 9 | # - eza 10 | state: present 11 | 12 | - name: Download Oh My Zsh installation script 13 | get_url: 14 | url: https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh 15 | dest: /tmp/install_ohmyzsh.sh 16 | mode: +x 17 | 18 | - name: Run Oh My Zsh installation script 19 | command: sh /tmp/install_ohmyzsh.sh --unattended 20 | register: ohmyzsh_result 21 | become: true 22 | become_user: "{{ main_username }}" 23 | failed_when: "'FAILED' in ohmyzsh_result.stderr" 24 | 25 | - name: Install PowerLevel10k Theme 26 | ansible.builtin.git: 27 | repo: https://github.com/romkatv/powerlevel10k.git 28 | dest: /home/{{ main_username }}/.oh-my-zsh-custom/themes/powerlevel10k 29 | depth: 1 30 | 31 | - name: Install Autosuggestions Plugin 32 | ansible.builtin.git: 33 | repo: https://github.com/zsh-users/zsh-autosuggestions 34 | dest: /home/{{ main_username }}/.oh-my-zsh-custom/plugins/zsh-autosuggestions 35 | depth: 1 36 | 37 | - name: Install Syntax Highlighting Plugin 38 | ansible.builtin.git: 39 | repo: https://github.com/zsh-users/zsh-syntax-highlighting.git 40 | dest: /home/{{ main_username }}/.oh-my-zsh-custom/plugins/zsh-syntax-highlighting 41 | depth: 1 42 | 43 | - name: Copy config files 44 | copy: 45 | src: "{{ item.src }}" 46 | dest: "{{ item.dest }}" 47 | mode: 0644 48 | owner: "{{ main_username }}" 49 | group: "{{ main_username }}" 50 | loop: 51 | - src: ".zshrc" 52 | dest: "/home/{{ main_username }}/.zshrc" 53 | - src: "p10k.zsh" 54 | dest: "/home/{{ main_username }}/.p10k.zsh" 55 | 56 | - name: change user shell to zsh 57 | become: yes 58 | user: 59 | name: "{{ main_username }}" 60 | shell: /usr/bin/zsh -------------------------------------------------------------------------------- /roles/invention/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create config directories 4 | file: 5 | path: "/home/{{ main_username }}/{{ item }}" 6 | state: directory 7 | owner: "{{ main_username }}" 8 | group: "{{ main_groupname }}" 9 | loop: 10 | - docker 11 | - docker/diun 12 | - docker/minecraft 13 | 14 | #### DIUN 15 | - name: Copy DIUN Config Template 16 | template: 17 | src: diun_config.yml.j2 18 | dest: /home/{{ main_username }}/docker/diun/config.yml 19 | owner: "{{ main_username }}" 20 | group: "{{ main_username }}" 21 | 22 | ### Backup Restore 23 | - name: Create restore script 24 | copy: 25 | dest: /home/{{ main_username }}/restore.sh 26 | owner: "{{ main_username }}" 27 | group: "{{ main_username }}" 28 | mode: +x 29 | content: | 30 | #!/bin/bash 31 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 32 | when: pull_backup == true 33 | - name: Run restore script 34 | command: /bin/bash ./restore.sh 35 | args: 36 | chdir: "/home/{{ main_username }}" 37 | when: pull_backup == true 38 | - name: Remove restore script 39 | file: 40 | path: /home/{{ main_username }}/restore.sh 41 | state: absent 42 | when: pull_backup == true 43 | 44 | ### Restic 45 | - name: Copy systemd service file 46 | template: 47 | src: ../templates//systemd/{{ item }}.j2 48 | dest: /etc/systemd/system/{{ item }} 49 | owner: root 50 | group: root 51 | mode: 0644 52 | loop: 53 | - restic_backup.service 54 | - restic_backup.timer 55 | - restic_prune.service 56 | - restic_prune.timer 57 | 58 | - name: Reload systemd services/timers 59 | systemd: 60 | enabled: true 61 | daemon-reload: true 62 | state: started 63 | name: "{{ item }}" 64 | no_block: true 65 | loop: 66 | - restic_backup.service 67 | - restic_backup.timer 68 | - restic_prune.service 69 | - restic_prune.timer 70 | -------------------------------------------------------------------------------- /roles/invention/templates/systemd/restic_backup.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Backup 3 | Wants= restic_backup.timer 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=/home/{{ main_username }} 7 | ExecStartPre=/bin/sleep 20 8 | ExecStart= /usr/local/bin/runitor -uuid {{ desktop_hc_backup_id }} -- /usr/local/bin/autorestic backup -a 9 | EnvironmentFile=/etc/environment 10 | [Install] 11 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/invention/templates/systemd/restic_backup.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Backup 3 | After=network-online.target 4 | [Timer] 5 | OnCalendar=*-*-* 21:30:00 6 | Persistent=true 7 | [Install] 8 | WantedBy=timers.target 9 | -------------------------------------------------------------------------------- /roles/invention/templates/systemd/restic_prune.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Prune 3 | Wants= restic_prune.timer 4 | [Service] 5 | Type=oneshot 6 | WorkingDirectory=/home/{{ main_username }} 7 | ExecStartPre=/bin/sleep 300 8 | ExecStart= /usr/local/bin/runitor -uuid {{ desktop_hc_prune_id }} -- /usr/local/bin/autorestic forget -a -- prune 9 | EnvironmentFile=/etc/environment 10 | [Install] 11 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/invention/templates/systemd/restic_prune.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Restic Prune 3 | After=network-online.target 4 | [Timer] 5 | OnCalendar=Mon *-*-* 20:30:00 6 | Persistent=true 7 | EnvironmentFile=/etc/environment 8 | [Install] 9 | WantedBy=timers.target -------------------------------------------------------------------------------- /roles/ishap/files/etc/export: -------------------------------------------------------------------------------- 1 | # /etc/exports: the access control list for filesystems which may be exported 2 | # to NFS clients. See exports(5). 3 | # 4 | # Example for NFSv2 and NFSv3: 5 | # /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check) 6 | # 7 | # Example for NFSv4: 8 | # /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check) 9 | # /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check) 10 | # 11 | /mnt/Backup *(rw,sync,fsid=0,no_root_squash,crossmnt,subtree_check,insecure) -------------------------------------------------------------------------------- /roles/ishap/files/etc/issue: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------ 2 | 3 | Welcome to the Proxmox Virtual Environment. Please use your web browser to 4 | configure this server - connect to: 5 | 6 | https://\4{vmbr0}:8006/ 7 | 8 | ------------------------------------------------------------------------------ -------------------------------------------------------------------------------- /roles/ishap/files/trim.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Trimming Dominion" 4 | sudo pct fstrim 200 5 | 6 | echo "Trimming Drone" 7 | sudo pct fstrim 203 8 | 9 | echo "Trimming Omada" 10 | sudo pct fstrim 202 11 | -------------------------------------------------------------------------------- /roles/ishap/tasks/disks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: make sure disks unmounted 4 | mount: 5 | path: "{{ item.path }}" 6 | state: unmounted 7 | loop: "{{ data_disks + parity_disks }}" 8 | 9 | - name: create /mnt points 10 | file: 11 | dest: "{{ item.path }}" 12 | state: directory 13 | owner: nobody 14 | group: nogroup 15 | mode: 0777 16 | loop: "{{ mergerfs_mount + data_disks + parity_disks + external_mount }}" 17 | 18 | - name: mount disks 19 | mount: 20 | path: "{{ item.path }}" 21 | src: "{{ item.source }}" 22 | fstype: "{{ item.fs }}" 23 | opts: "{{ item.opts }}" 24 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 25 | state: mounted 26 | loop: "{{ data_disks + parity_disks }}" 27 | 28 | - name: mount mergerfs array 29 | mount: 30 | path: "{{ item.mountpoint }}" 31 | src: "{{ item.source }}" 32 | opts: "{{ item.opts }}" 33 | fstype: "{{ item.fs }}" 34 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 35 | state: mounted 36 | loop: "{{ fstab_mergerfs }}" 37 | 38 | - name: create mergerfs directories 39 | file: 40 | path: "{{ mergerfs_mount }}/{{ item }}" 41 | state: directory 42 | owner: "{{ main_username }}" 43 | group: "{{ main_groupname }}" 44 | loop: 45 | - proxmox 46 | - config 47 | - NVR 48 | - restic 49 | -------------------------------------------------------------------------------- /roles/ishap/tasks/infrastructure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### Create LXCs 3 | 4 | - name: Update pveam 5 | command: pveam update 6 | 7 | - name: Download container templates 8 | command: pveam download NFS {{ item }} 9 | loop: 10 | - ubuntu-22.04-standard_22.04-1_amd64.tar.zst 11 | - debian-11-standard_11.6-1_amd64.tar.zst 12 | - debian-12-standard_12.2-1_amd64.tar.zst.tmp 13 | 14 | - name: Create LXCs w/ VLAN Tags 15 | proxmox: 16 | proxmox_default_behavior: no_defaults 17 | vmid: "{{ item.vmid }}" 18 | hostname: "{{ item.name }}" 19 | unprivileged: "{{ item.privileged }}" 20 | onboot: "{{ item.onboot | default ('true') }}" 21 | state: present 22 | node: ishap 23 | storage: local-lvm 24 | disk: "{{ item.disk }}" 25 | cpus: '1' 26 | cpuunits: '1000' 27 | cores: "{{ item.cores }}" 28 | memory: "{{ item.memory }}" 29 | swap: "{{ item.swap | default ('512') }}" 30 | api_user: root@pam 31 | api_host: localhost 32 | api_token_id: Ansible 33 | api_token_secret: "{{ secret_proxmox_api_token }}" 34 | pubkey: "{{ secret_proxmox_ct_ssh }}" 35 | password: "{{ secret_proxmox_pass }}" 36 | netif: "{'net0':'name=eth0,gw={{ item.gw }},ip={{ item.ip }}/24,tag={{ item.tag | default ('50') }},bridge=vmbr1'}" 37 | ostemplate: "{{ item.template| default ('NFS:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst') }}" 38 | features: 39 | - nesting={{ item.nesting | default ('1') }} 40 | # - keyctl={{ item.keyctl | default ('0') }} 41 | loop: "{{ lxc_vlans }}" 42 | 43 | - name: Create LXCs on main VLAN 44 | proxmox: 45 | proxmox_default_behavior: no_defaults 46 | vmid: "{{ item.vmid }}" 47 | hostname: "{{ item.name }}" 48 | unprivileged: "{{ item.privileged }}" 49 | onboot: "{{ item.onboot | default ('true') }}" 50 | state: present 51 | node: ishap 52 | storage: local-lvm 53 | disk: "{{ item.disk }}" 54 | cpus: '1' 55 | cpuunits: '1000' 56 | cores: "{{ item.cores }}" 57 | memory: "{{ item.memory }}" 58 | swap: "{{ item.swap | default ('512') }}" 59 | api_user: root@pam 60 | api_host: localhost 61 | api_token_id: Ansible 62 | api_token_secret: "{{ secret_proxmox_api_token }}" 63 | pubkey: "{{ secret_proxmox_ct_ssh }}" 64 | password: "{{ secret_proxmox_pass }}" 65 | netif: "{'net0':'name=eth0,gw={{ item.gw }},ip={{ item.ip }}/24,bridge=vmbr0'}" 66 | ostemplate: "{{ item.template| default ('NFS:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst') }}" 67 | features: 68 | - nesting={{ item.nesting | default ('1') }} 69 | # - keyctl={{ item.keyctl | default ('0') }} 70 | loop: "{{ lxc_main }}" 71 | 72 | - name: Create VMs 73 | community.general.proxmox_kvm: 74 | api_user: root@pam 75 | api_host: localhost 76 | api_token_id: Ansible 77 | api_token_secret: "{{ secret_proxmox_api_token }}" 78 | pubkey: "{{ secret_proxmox_ct_ssh }}" 79 | password: "{{ secret_proxmox_pass }}" 80 | name: Virtuosity 81 | node: ishap 82 | net: 83 | net0: 'virtio,bridge=vmbr1' 84 | cores: 6 85 | balloon: 2048 86 | cpu: host 87 | cpuunits: '1000' 88 | format: qcow2 89 | loop: "{{ vm }}" 90 | -------------------------------------------------------------------------------- /roles/ishap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ### Basic Setup 4 | - name: Ensure user groups exists 5 | group: 6 | name: "{{ item }}" 7 | state: present 8 | loop: 9 | - "{{ main_username }}" 10 | - ansible 11 | 12 | - name: Add users 13 | user: 14 | name: "{{ item.user }}" 15 | password: "{{ item.pass }}" 16 | groups: 17 | - "{{ item.user }}" 18 | - sudo 19 | shell: /bin/bash 20 | loop: 21 | - {user: "{{ main_username }}", pass: "{{ secret_main_user_pass }}"} 22 | - {user: ansible, pass: "{{ secret_ansible_pass }}"} 23 | 24 | - name: Add sudoers file for ansible 25 | copy: 26 | src: sudoer_ansible 27 | dest: /etc/sudoers.d/ansible 28 | owner: root 29 | group: root 30 | mode: 0440 31 | 32 | - name: SSH Keys 33 | authorized_key: 34 | user: "{{ item.user }}" 35 | state: present 36 | key: "{{ item.ssh }}" 37 | loop: 38 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_ssh }}"} 39 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_alt_ssh }}"} 40 | - {user: ansible, ssh: "{{ secret_ansible_ssh }}"} 41 | 42 | - name: create main user directories 43 | file: 44 | path: /home/{{ main_username }}/{{ item }} 45 | state: directory 46 | owner: "{{ main_username }}" 47 | group: "{{ main_groupname }}" 48 | loop: 49 | - docker 50 | - docker/scrutiny 51 | 52 | - name: Copy scripts 53 | copy: 54 | src: "{{ item.src }}" 55 | dest: "{{ item.dest }}" 56 | mode: +x 57 | loop: 58 | - {src: "trim.sh", dest: "/home/{{ main_username }}/trim.sh"} 59 | 60 | # Source: https://www.reddit.com/r/Proxmox/comments/118i6ct/tutorialguide_how_to_make_the_prelogin_banner/ 61 | - name: Copy issue file to adjust displayer IP address 62 | copy: 63 | src: etc/issue 64 | dest: /etc/issue 65 | owner: root 66 | group: root 67 | mode: 0644 68 | 69 | ### Infrastructure 70 | - name: Set up disks/mounts 71 | include_tasks: disks.yml 72 | 73 | - name: Set up containers 74 | include_tasks: infrastructure.yml 75 | when: infrastructure == true 76 | 77 | ### Backup Restore 78 | - name: Create restore script 79 | copy: 80 | dest: /home/{{ main_username }}/restore.sh 81 | owner: "{{ main_username }}" 82 | group: "{{ main_username }}" 83 | mode: +x 84 | content: | 85 | #!/bin/bash 86 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 87 | when: pull_backup == true 88 | - name: Run restore script 89 | command: /bin/bash ./restore.sh 90 | args: 91 | chdir: "/home/{{ main_username }}" 92 | when: pull_backup == true 93 | - name: Remove restore script 94 | file: 95 | path: /home/{{ main_username }}/restore.sh 96 | state: absent 97 | when: pull_backup == true 98 | 99 | ### Wireguard/Misc 100 | - name: Enable IPv4 forwarding 101 | sysctl: 102 | name: net.ipv4.ip_forward 103 | value: 1 104 | reload: yes 105 | 106 | - name: Reboot Cronjob 107 | cron: 108 | name: "Set /dev/dri to 777" 109 | job: "chmod -R 777 /dev/dri" 110 | user: "root" 111 | special_time: reboot 112 | state: present 113 | -------------------------------------------------------------------------------- /roles/omada/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set hostname 4 | hostname: 5 | name: omada 6 | 7 | - name: create config directories 8 | file: 9 | path: "/home/{{ main_username }}/{{ item }}" 10 | state: directory 11 | owner: "{{ main_username }}" 12 | group: "{{ main_groupname }}" 13 | loop: 14 | - docker 15 | - docker/diun 16 | - docker/omada 17 | 18 | #### DIUN 19 | - name: Copy DIUN Config Template 20 | template: 21 | src: diun_config.yml.j2 22 | dest: /home/{{ main_username }}/docker/diun/config.yml 23 | owner: "{{ main_username }}" 24 | group: "{{ main_username }}" 25 | 26 | ### Backup Restore 27 | - name: Create restore script 28 | copy: 29 | dest: /home/{{ main_username }}/restore.sh 30 | owner: "{{ main_username }}" 31 | group: "{{ main_username }}" 32 | mode: +x 33 | content: | 34 | #!/bin/bash 35 | /usr/local/bin/autorestic restore -f -l docker --from local_docker --to / 36 | when: pull_backup == true 37 | - name: Run restore script 38 | command: /bin/bash ./restore.sh 39 | args: 40 | chdir: "/home/{{ main_username }}" 41 | when: pull_backup == true 42 | - name: Remove restore script 43 | file: 44 | path: /home/{{ main_username }}/restore.sh 45 | state: absent 46 | when: pull_backup == true 47 | -------------------------------------------------------------------------------- /roles/preservation/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create config directories 4 | file: 5 | path: "/home/{{ main_username }}/{{ item }}" 6 | state: directory 7 | owner: "{{ main_username }}" 8 | group: "{{ main_groupname }}" 9 | loop: 10 | - docker 11 | - docker/bitwarden_db 12 | - docker/diun 13 | - docker/nextcloud_db 14 | - docker/hass_db 15 | - docker/influx_db 16 | - docker/invidious 17 | - db_backups 18 | - scripts 19 | 20 | #### DIUN 21 | - name: Copy DIUN Config Template 22 | template: 23 | src: diun_config.yml.j2 24 | dest: /home/{{ main_username }}/docker/diun/config.yml 25 | owner: "{{ main_username }}" 26 | group: "{{ main_username }}" 27 | 28 | - name: Copy db backup script template 29 | template: 30 | src: "{{ item.src }}" 31 | dest: /home/{{ main_username }}/scripts/{{ item.dest }} 32 | owner: "{{ main_username }}" 33 | group: "{{ main_username }}" 34 | mode: +x 35 | loop: 36 | - {src: 'db_backup.sh.j2', dest: 'db_backup.sh'} 37 | - {src: 'db_restore.sh.j2', dest: 'db_restore.sh'} 38 | - {src: 'postgres_cleanup.sh.j2', dest: 'postgres_cleanup.sh'} 39 | 40 | ### Backup Restore 41 | - name: Create restore script 42 | copy: 43 | dest: /home/{{ main_username }}/restore.sh 44 | owner: "{{ main_username }}" 45 | group: "{{ main_username }}" 46 | mode: +x 47 | content: | 48 | #!/bin/bash 49 | /usr/local/bin/autorestic restore -f -l db_backups --from local_db --to / 50 | when: pull_backup == true 51 | - name: Run restore script 52 | command: /bin/bash ./restore.sh 53 | args: 54 | chdir: "/home/{{ main_username }}" 55 | when: pull_backup == true 56 | - name: Remove restore script 57 | file: 58 | path: /home/{{ main_username }}/restore.sh 59 | state: absent 60 | when: pull_backup == true 61 | 62 | #### Restore Databases 63 | - name: Run DB Restore Script 64 | command: /bin/bash ./db_restore.sh 65 | args: 66 | chdir: "/home/{{ main_username }}/scripts" 67 | when: pull_backup == true 68 | -------------------------------------------------------------------------------- /roles/preservation/templates/db_backup.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Nextcloud DB Backup 4 | echo Backing up Nextcloud DB 5 | today=$(date +"%Y_%m_%d") 6 | docker exec nextcloud-db sh -c 'exec mariadb-dump --single-transaction -h localhost -u {{ main_username }} -p{{ secret_nextcloud_db_pass }} nextcloud' > /home/{{ main_username }}/db_backups/nextcloud/nextcloudDB_$today.sql 7 | 8 | #### Bitwarden DB Backup 9 | echo Backing up Bitwarden DB 10 | today=$(date +"%Y_%m_%d") 11 | docker exec bitwarden-db sh -c 'exec mariadb-dump --single-transaction -h localhost -u bitwarden -p{{ secret_bitwarden_db_pass }} bitwarden_rs' > /home/{{ main_username }}/db_backups/bitwarden/bitwardenDB_$today.sql 12 | 13 | #### Hass DB Backup 14 | echo Backing up Hass DB 15 | today=$(date +"%Y_%m_%d") 16 | docker exec hass-db sh -c 'exec mariadb-dump --single-transaction -h localhost -u hass -p{{ secret_hass_db_pass }} homeassistant' > /home/{{ main_username }}/db_backups/hass/hassDB_$today.sql 17 | 18 | #### Invidious DB Backup 19 | echo Backing up Invidious DB 20 | today=$(date +"%Y_%m_%d") 21 | docker exec invidious-db sh -c 'pg_dumpall -U kemal' > /home/{{ main_username }}/db_backups/indivious/invidiousDB_$today.sql 22 | 23 | #### InfluxDB Backup 24 | echo Backing up InfluxDB 25 | docker exec influx-db sh -c 'influx backup \ 26 | /media/backup/$(date +"%Y.%m.%d") \ 27 | -t {{ secret_influxdb_token }} && chmod 777 -R /media/backup' 28 | 29 | #### Delete Old 30 | find /home/{{ main_username }}/db_backups/* -mtime +6 -type f -delete 31 | find /home/{{ main_username }}/db_backups/influxdb -mtime +6 -type d -delete 32 | find /home/{{ main_username }}/db_backups/influxdb -type d -empty -delete 33 | 34 | #### Backup the backups 35 | /usr/local/bin/autorestic backup -a -c /home/{{ main_username }}/.autorestic.yml -------------------------------------------------------------------------------- /roles/preservation/templates/db_restore.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /home/{{ main_username }} 4 | docker compose up -d nextcloud-db bitwarden-db hass-db influx-db 5 | 6 | sleep 30s 7 | echo Restoring Nextcloud DB 8 | cd /home/{{ main_username }}/db_backups/nextcloud 9 | LAST_NEXTCLOUD=$(ls -t | head -n 1) 10 | docker exec -i nextcloud-db mariadb -u {{ main_username }} -p{{ secret_nextcloud_db_pass }} nextcloud < /home/{{ main_username }}/db_backups/nextcloud/$LAST_NEXTCLOUD 11 | 12 | echo Restoring Bitwarden DB 13 | cd /home/{{ main_username }}/db_backups/bitwarden 14 | LAST_BITWARDEN=$(ls -t | head -n 1) 15 | docker exec -i bitwarden-db mariadb -u bitwarden -p{{ secret_bitwarden_db_pass }} bitwarden_rs < /home/{{ main_username }}/db_backups/bitwarden/$LAST_BITWARDEN 16 | 17 | echo Restoring HASS DB 18 | cd /home/{{ main_username }}/db_backups/hass 19 | LAST_HASS=$(ls -t | head -n 1) 20 | docker exec -i hass-db mariadb -u hass -p{{ secret_hass_db_pass }} homeassistant < /home/{{ main_username }}/db_backups/hass/$LAST_HASS 21 | 22 | echo Restoring Invidious DB 23 | cd /home/{{ main_username }}/db_backups/invidious 24 | LAST_INVIDIOUS=$(ls -t | head -n 1) 25 | cat /home/{{ main_username }}/db_backups/invidious/$LAST_INVIDIOUS | docker exec -i invidious-db psql -U kemal -d invidious 26 | 27 | #echo Restoring InfluxDB 28 | # Run `docker exec -it influx-db sh` and then run below commands 29 | # influx setup -t {{ secret_influxdb_token }} 30 | # influx restore --full /media/backup/DATE 31 | # Lots of user input data, probably not easy to automate/ansible-ize -------------------------------------------------------------------------------- /roles/preservation/templates/postgres_cleanup.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker exec -i invidious-db /usr/local/bin/psql invidious -U kemal -c "DELETE FROM nonces * WHERE expire < current_timestamp" 4 | docker exec -i invidious-db /usr/local/bin/psql invidious -U kemal -c "TRUNCATE TABLE videos" -------------------------------------------------------------------------------- /roles/unity/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### udev Rules 3 | - name: Add udev rules 4 | copy: 5 | dest: /etc/udev/rules.d/99-usb-serial.rules 6 | owner: root 7 | group: root 8 | mode: 0644 9 | content: | 10 | SUBSYSTEM=="tty", ATTRS{idVendor}=="10c4", ATTRS{idProduct}=="ea60", SYMLINK+="zwave" 11 | SUBSYSTEM=="tty", ATTRS{idVendor}=="1a86", ATTRS{idProduct}=="55d4", SYMLINK+="zigbee" 12 | SUBSYSTEM=="tty", ATTRS{idVendor}=="051d", ATTRS{idProduct}=="0002", SYMLINK+="upsd" 13 | 14 | - name: Update udev 15 | shell: "sudo udevadm control --reload-rules && sudo udevadm trigger" 16 | 17 | #### DIUN 18 | - name: create config directories 19 | file: 20 | path: /home/{{ main_username }}/{{ item }} 21 | state: directory 22 | owner: "{{ main_username }}" 23 | group: "{{ main_groupname }}" 24 | loop: 25 | - docker 26 | - docker/adguard 27 | 28 | ### Networking 29 | - name: Enable IPv4 forwarding 30 | sysctl: 31 | name: net.ipv4.ip_forward 32 | value: 1 33 | reload: yes 34 | - name: Enable IPv6 forwarding 35 | sysctl: 36 | name: net.ipv6.conf.all.forwarding 37 | value: 1 38 | reload: yes 39 | 40 | # - name: Stop resolved 41 | # service: 42 | # name: systemd-resolved 43 | # enabled: no 44 | # state: stopped -------------------------------------------------------------------------------- /roles/virtuosity/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ### Basic Setup/Boostrap 4 | - name: Ensure user groups exists 5 | group: 6 | name: "{{ item }}" 7 | state: present 8 | loop: 9 | - "{{ main_username }}" 10 | - ansible 11 | 12 | - name: Add users 13 | user: 14 | name: "{{ item.user }}" 15 | password: "{{ item.pass }}" 16 | groups: 17 | - "{{ item.user }}" 18 | - sudo 19 | shell: /bin/bash 20 | loop: 21 | - {user: "{{ main_username }}", pass: "{{ secret_main_user_pass }}"} 22 | - {user: ansible, pass: "{{ secret_ansible_pass }}"} 23 | 24 | - name: Add sudoers file for ansible 25 | copy: 26 | src: sudoer_ansible 27 | dest: /etc/sudoers.d/ansible 28 | owner: root 29 | group: root 30 | mode: 0440 31 | 32 | - name: SSH Keys 33 | authorized_key: 34 | user: "{{ item.user }}" 35 | state: present 36 | key: "{{ item.ssh }}" 37 | loop: 38 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_ssh }}"} 39 | - {user: "{{ main_username }}", ssh: "{{ secret_main_user_alt_ssh }}"} 40 | - {user: ansible, ssh: "{{ secret_ansible_ssh }}"} 41 | 42 | - name: create main user directories 43 | file: 44 | path: /home/{{ main_username }}/{{ item }} 45 | state: directory 46 | owner: "{{ main_username }}" 47 | group: "{{ main_groupname }}" 48 | loop: 49 | - docker 50 | - docker/immich 51 | - docker/ollama 52 | 53 | ### GPU 54 | - name: Add Nvidia PPA repo 55 | apt_repository: 56 | repo: ppa:graphics-drivers/ppa 57 | state: present 58 | 59 | - name: Install NVIDIA driver and utils 60 | apt: 61 | pkg: nvidia-driver-{{ nvidia_driver_version }} 62 | install_recommends: no 63 | state: present 64 | update_cache: yes 65 | autoremove: yes 66 | 67 | - name: Add Nvidia repo gpg key 68 | apt_key: 69 | url: https://nvidia.github.io/libnvidia-container/gpgkey 70 | state: present 71 | 72 | - name: Add Nvidia apt repo 73 | get_url: 74 | url: https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list 75 | dest: /etc/apt/sources.list.d/nvidia-docker.list 76 | mode: 0644 77 | 78 | - name: Install Nvidia container toolkit 79 | apt: 80 | name: 81 | - nvidia-container-toolkit 82 | state: present 83 | update_cache: yes 84 | 85 | ### fstab/disk mounts 86 | - name: make sure disks unmounted 87 | mount: 88 | path: "{{ item.path }}" 89 | state: unmounted 90 | loop: "{{ mountpoints }}" 91 | 92 | - name: create /mnt points 93 | file: 94 | dest: "{{ item.path }}" 95 | state: directory 96 | owner: "{{ main_username }}" 97 | group: "{{ main_username }}" 98 | mode: 0777 99 | loop: "{{ mountpoints }}" 100 | 101 | - name: Add smbcredentials 102 | copy: 103 | dest: /home/{{ main_username }}/.smbcredentials 104 | owner: "{{ main_username }}" 105 | group: "{{ main_username }}" 106 | mode: 0600 107 | content: | 108 | user=samba 109 | password={{ secret_samba_pass }} 110 | domain=cosmere 111 | 112 | - name: mount disks 113 | mount: 114 | path: "{{ item.path }}" 115 | src: "{{ item.source }}" 116 | fstype: "{{ item.fs }}" 117 | opts: "{{ item.opts }}" 118 | # change to 'mounted' to auto mount versus 'present' for just loaded into fstab 119 | state: present 120 | loop: "{{ mountpoints }}" 121 | -------------------------------------------------------------------------------- /services/adonalsium/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | portainer: 3 | image: portainer/portainer-ce:2.30.1 4 | container_name: portainer 5 | volumes: 6 | - /srv/docker/portainer:/data 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | ports: 9 | - 9000:9000 10 | - 8000:8000 11 | environment: 12 | - AGENT_SECRET={{ secret_portainer_key }} 13 | mem_limit: 200M 14 | restart: unless-stopped 15 | scrutiny: 16 | image: ghcr.io/analogj/scrutiny:v0.8.1-omnibus 17 | container_name: scrutiny 18 | privileged: true 19 | cap_add: 20 | - SYS_RAWIO 21 | - SYS_ADMIN 22 | devices: 23 | - /dev/sda:/dev/sda 24 | - /dev/sdb:/dev/sdb 25 | - /dev/sdc:/dev/sdc 26 | - /dev/sdd:/dev/sdd 27 | - /dev/sde:/dev/sde 28 | - /dev/nvme0:/dev/nvme0 29 | volumes: 30 | - "{{ appdata_path }}/scrutiny:/opt/scrutiny/config" 31 | - /run/udev:/run/udev:ro 32 | ports: 33 | - 8080:8080 34 | restart: unless-stopped 35 | uptime: 36 | image: louislam/uptime-kuma:1.23.16 37 | container_name: uptime 38 | volumes: 39 | - "{{ appdata_path }}/uptime:/app/data" 40 | ports: 41 | - 3001:3001 42 | restart: unless-stopped 43 | handbrake: 44 | image: ghcr.io/jlesage/handbrake:v25.02.3 45 | container_name: handbrake 46 | devices: 47 | - /dev/dri/renderD128:/dev/dri/renderD128 48 | volumes: 49 | - "{{ appdata_path }}/handbrake/config:/config" 50 | - "{{ appdata_path }}/handbrake/storage:/storage" 51 | - /mnt/Media/Movies/1-Processing/Handbrake/Watch:/watch 52 | - /mnt/Media/Movies/1-Processing/Handbrake/Output:/output 53 | - "/mnt/Media/Kids Movies:/stuff/kids_movies:ro" 54 | - "/mnt/Media/Kids TV:/stuff/kids_tv:ro" 55 | - "/mnt/Media/Movies:/stuff/movies:ro" 56 | - "/mnt/Media/TV Shows:/stuff/tv:ro" 57 | ports: 58 | - 5800:5800 59 | environment: 60 | - TZ={{ ntp_timezone }} 61 | restart: unless-stopped 62 | calibre: 63 | image: lscr.io/linuxserver/calibre:8.4.0 64 | container_name: calibre 65 | volumes: 66 | - "{{ appdata_path }}/calibre:/config" 67 | - /mnt/Media/Ebooks:/books 68 | ports: 69 | - 8085:8080 70 | - 8181:8181 71 | - 8081:8081 72 | environment: 73 | - UID={{ main_uid }} 74 | - GID={{ main_gid }} 75 | - PUID={{ main_uid }} 76 | - PGID={{ main_gid }} 77 | - TZ={{ ntp_timezone }} 78 | restart: unless-stopped 79 | frigate: 80 | image: ghcr.io/blakeblackshear/frigate:0.15.1 81 | container_name: frigate 82 | privileged: true 83 | devices: 84 | - /dev/bus/usb:/dev/bus/usb 85 | - /dev/dri/renderD128:/dev/dri/renderD128 86 | volumes: 87 | - "{{ appdata_path }}/frigate/config:/config" 88 | - /mnt/Media/Frigate:/media/frigate 89 | - /etc/localtime:/etc/localtime:ro 90 | ports: 91 | - 8971:8971 92 | - 5000:5000 93 | - 8554:8554 94 | - 8555:8555/tcp 95 | - 8555:8555/udp 96 | - 1984:1984 97 | environment: 98 | - FRIGATE_RTSP_PASSWORD='password' 99 | - PLUS_API_KEY={{ secret_frigate_plus_api }} 100 | - FRIGATE_JWT_SECRET={{ secret_frigate_jwt }} 101 | shm_size: 200mb 102 | tmpfs: 103 | - /tmp/cache 104 | restart: unless-stopped 105 | nut-ups: 106 | image: fuzzymistborn/nut-upsd:2.8.2 107 | container_name: nut-ups 108 | devices: 109 | - /dev/bus/usb/001/003:/dev/bus/usb/001/003 110 | ports: 111 | - 3493:3493 112 | environment: 113 | - API_USER=homeassistant 114 | - API_PASSWORD={{ secret_nut_api_pw }} 115 | restart: unless-stopped 116 | -------------------------------------------------------------------------------- /services/autonomy/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | hass: 3 | image: ghcr.io/home-assistant/home-assistant:stable 4 | container_name: hass 5 | network_mode: host 6 | volumes: 7 | - /home/{{ main_username }}/hass:/config 8 | - /etc/localtime:/etc/localtime:ro 9 | - /var/run/docker.sock:/var/run/docker.sock 10 | - /mnt/Media:/media:ro 11 | depends_on: 12 | - mqtt 13 | restart: unless-stopped 14 | hass-beta: 15 | image: ghcr.io/home-assistant/home-assistant:beta 16 | container_name: HASS-Beta 17 | network_mode: host 18 | volumes: 19 | - /home/{{ main_username }}/hass_docker_beta:/config 20 | - /etc/localtime:/etc/localtime:ro 21 | ports: 22 | - 5316:5316 23 | depends_on: 24 | - mqtt 25 | restart: unless-stopped 26 | selenium: 27 | image: selenium/standalone-firefox:138.0 28 | container_name: selenium 29 | ports: 30 | - 4444:4444 31 | environment: 32 | - SE_NODE_MAX_SESSIONS=2 33 | - TZ={{ ntp_timezone }} 34 | shm_size: 2g 35 | restart: unless-stopped 36 | esphome: 37 | image: ghcr.io/esphome/esphome:2025.5.1 38 | container_name: esphome 39 | network_mode: host 40 | volumes: 41 | - "{{ appdata_path }}/esphome:/config" 42 | restart: unless-stopped 43 | mqtt: 44 | image: eclipse-mosquitto:2.0.21 45 | container_name: MQTT 46 | volumes: 47 | - "{{ appdata_path }}/mqtt/config:/mosquitto/config/" 48 | - "{{ appdata_path }}/mqtt/data:/mosquitto/data/" 49 | - "{{ appdata_path }}/mqtt/log:/mosquitto/log/" 50 | ports: 51 | - 1883:1883 52 | restart: unless-stopped 53 | govee2mqtt: 54 | image: ghcr.io/wez/govee2mqtt:2025.04.13-17d43d72 55 | container_name: govee2mqtt 56 | network_mode: host 57 | environment: 58 | - TZ={{ ntp_timezone }} 59 | - RUST_LOG_STYLE=always 60 | - GOVEE_EMAIL={{ secret_govee_user }} 61 | - GOVEE_PASSWORD={{ secret_govee_pass }} 62 | - GOVEE_API_KEY={{ secret_govee_api }} 63 | - GOVEE_MQTT_HOST=192.168.30.5 64 | - GOVEE_MQTT_PORT=1883 65 | - GOVEE_MQTT_USER=homeassistant 66 | - GOVEE_MQTT_PASSWORD={{ secret_mqtt_pass }} 67 | restart: unless-stopped 68 | signal-api: 69 | image: bbernhard/signal-cli-rest-api:0.92 70 | container_name: signal-api 71 | volumes: 72 | - "{{ appdata_path }}/signal-api:/home/.local/share/signal-cli" 73 | ports: 74 | - 8080:8080 75 | environment: 76 | - MODE=json-rpc 77 | restart: unless-stopped 78 | nodered: 79 | image: nodered/node-red:4.0.9 80 | container_name: nodered 81 | volumes: 82 | - "{{ appdata_path }}/nodered:/data" 83 | - /etc/localtime:/etc/localtime:ro 84 | ports: 85 | - 1880:1880 86 | environment: 87 | - TZ={{ ntp_timezone }} 88 | depends_on: 89 | - hass 90 | restart: unless-stopped 91 | tasmota: 92 | image: ghcr.io/tasmoadmin/tasmoadmin:v4.3.0 93 | container_name: tasmota 94 | volumes: 95 | - "{{ appdata_path }}/tasmota:/data" 96 | ports: 97 | - 5555:80 98 | restart: unless-stopped 99 | anylist: 100 | image: kevdliu/anylist:latest 101 | container_name: anylist 102 | volumes: 103 | - "{{ appdata_path }}/anylist:/data" 104 | ports: 105 | - 9500:9000 106 | environment: 107 | - PORT=9000 108 | - EMAIL={{ secret_email }} 109 | - PASSWORD={{ secret_anylist_pw }} 110 | restart: unless-stopped 111 | music-assistant: 112 | image: ghcr.io/music-assistant/server:2.5.2 113 | container_name: music-assistant 114 | network_mode: host 115 | privileged: true 116 | volumes: 117 | - "{{ appdata_path }}/music-assistant:/data" 118 | restart: unless-stopped 119 | piper: 120 | image: rhasspy/wyoming-piper:1.5.4 121 | container_name: piper 122 | volumes: 123 | - "{{ appdata_path }}/piper:/data" 124 | ports: 125 | - 10200:10200 126 | command: --voice en_US-hfc_female-medium 127 | restart: unless-stopped 128 | crowdsec: 129 | image: ghcr.io/crowdsecurity/crowdsec:v1.6.8 130 | container_name: crowdsec 131 | volumes: 132 | - "{{ appdata_path }}/crowdsec/data/acquis.yaml:/etc/crowdsec/acquis.yaml" 133 | - /home/{{ main_username }}/hass/:/var/log/homeassistant:ro 134 | - "{{ appdata_path }}/crowdsec/db:/var/lib/crowdsec/data/" 135 | environment: 136 | - COLLECTIONS=crowdsecurity/home-assistant 137 | - GID=1000 138 | - DISABLE_LOCAL_API=true 139 | - AGENT_USERNAME={{ hostname }} 140 | - AGENT_PASSWORD={{ secret_crowdsec_autonomy_pw }} 141 | - LOCAL_API_URL=http://{{ secret_ambition_ip }}:8080 142 | restart: unless-stopped 143 | portainer_agent: 144 | image: portainer/agent:2.30.1 145 | container_name: portainer_agent 146 | volumes: 147 | - /var/run/docker.sock:/var/run/docker.sock 148 | - /var/lib/docker/volumes:/var/lib/docker/volumes 149 | ports: 150 | - 9001:9001 151 | environment: 152 | - AGENT_SECRET={{ secret_portainer_key }} 153 | restart: unless-stopped -------------------------------------------------------------------------------- /services/honor/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | adguard: 3 | image: adguard/adguardhome:v0.107.62 4 | container_name: adguard 5 | cap_add: 6 | - NET_ADMIN 7 | volumes: 8 | - "{{ appdata_path }}/adguard/conf:/opt/adguardhome/conf" 9 | - "{{ appdata_path }}adguard/work:/opt/adguardhome/work" 10 | ports: 11 | - 53:53/tcp 12 | - 53:53/udp 13 | - 853:853/tcp 14 | - 81:80/tcp 15 | - 444:443/tcp 16 | - 3000:3000/tcp 17 | restart: unless-stopped 18 | crowdsec: 19 | image: ghcr.io/crowdsecurity/crowdsec:v1.6.8 20 | container_name: crowdsec 21 | volumes: 22 | - "{{ appdata_path }}/crowdsec/data/acquis.yaml:/etc/crowdsec/acquis.yaml" 23 | - /var/log/auth.log:/var/log/ssh/auth.log:ro 24 | - "{{ appdata_path }}/crowdsec/db:/var/lib/crowdsec/data/" 25 | environment: 26 | - COLLECTIONS=crowdsecurity/http-cve crowdsecurity/whitelist-good-actors crowdsecurity/sshd 27 | - GID=1000 28 | - DISABLE_LOCAL_API=true 29 | - AGENT_USERNAME={{ hostname }} 30 | - AGENT_PASSWORD={{ secret_crowdsec_honor_pw }} 31 | - LOCAL_API_URL=http://{{ secret_ambition_ip }}:8080 32 | restart: unless-stopped 33 | portainer_agent: 34 | image: portainer/agent:2.30.1 35 | container_name: portainer_agent 36 | volumes: 37 | - /var/run/docker.sock:/var/run/docker.sock 38 | - /var/lib/docker/volumes:/var/lib/docker/volumes 39 | ports: 40 | - 9001:9001 41 | environment: 42 | - AGENT_SECRET={{ secret_portainer_key }} 43 | restart: unless-stopped 44 | adguard-sync: 45 | image: ghcr.io/linuxserver/adguardhome-sync:0.7.6 46 | container_name: adguard-sync 47 | volumes: 48 | - "{{ appdata_path }}/adguardsync/config:/config" 49 | ports: 50 | - 8080:8080 51 | environment: 52 | - UID={{ main_uid }} 53 | - GID={{ main_gid }} 54 | - PUID={{ main_uid }} 55 | - PGID={{ main_gid }} 56 | - TZ={{ ntp_timezone }} 57 | restart: unless-stopped -------------------------------------------------------------------------------- /services/identity/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dawarich-app: 3 | image: freikin/dawarich:0.26.6 4 | container_name: dawarich-app 5 | volumes: 6 | - "{{ appdata_path }}/dawarich/public:/var/app/public" 7 | - "{{ appdata_path }}/dawarich/imports:/var/app/tmp/imports/watched" 8 | - "{{ appdata_path }}/dawarich/storage:/var/app/storage" 9 | ports: 10 | - 3000:3000 11 | environment: 12 | - RAILS_ENV=development 13 | - REDIS_URL=redis://dawarich-redis:6379/0 14 | - DATABASE_HOST=dawarich-db 15 | - DATABASE_USERNAME=dawarich 16 | - DATABASE_PASSWORD={{ secret_dawarich_db_pass }} 17 | - DATABASE_NAME=dawarich 18 | - MIN_MINUTES_SPENT_IN_CITY=60 19 | - APPLICATION_HOST=localhost 20 | - APPLICATION_HOSTS=localhost,track.{{ secret_personal_internal_url }} 21 | - TIME_ZONE={{ ntp_timezone }} 22 | - APPLICATION_PROTOCOL=http 23 | - REVERSE_GEOCODING_ENABLED=true 24 | - PHOTON_API_HOST=192.168.50.23:2322 25 | - PHOTON_API_USE_HTTPS=false 26 | - SELF_HOSTED=true 27 | stdin_open: true 28 | tty: true 29 | entrypoint: web-entrypoint.sh 30 | command: ['bin/rails', 'server', '-p', '3000', '-b', '::'] 31 | logging: 32 | driver: json-file 33 | options: 34 | max-size: 100m 35 | max-file: 5 36 | restart: unless-stopped 37 | dawarich-db: 38 | image: postgis/postgis:17-3.5-alpine 39 | container_name: dawarich-db 40 | volumes: 41 | - "{{ appdata_path }}/dawarich/db17:/var/lib/postgresql/data" 42 | - "{{ appdata_path }}/dawarich/shared:/var/shared" 43 | - "{{ appdata_path }}/dawarich/postgresql.conf:/etc/postgresql/postgresql.conf" 44 | environment: 45 | - POSTGRES_DB=dawarich 46 | - POSTGRES_USER=dawarich 47 | - POSTGRES_PASSWORD={{ secret_dawarich_db_pass }} 48 | command: postgres -c config_file=/etc/postgresql/postgresql.conf 49 | mem_limit: 400M 50 | shm_size: 1G 51 | restart: unless-stopped 52 | dawarich-redis: 53 | image: redis:8.0.1 54 | container_name: dawarich-redis 55 | volumes: 56 | - "{{ appdata_path }}/dawarich/shared:/var/shared/redis" 57 | command: redis-server 58 | restart: unless-stopped 59 | dawarich-sidekiq: 60 | image: freikin/dawarich:0.26.6 61 | container_name: dawarich-sidekiq 62 | volumes: 63 | - "{{ appdata_path }}/dawarich/public:/var/app/public" 64 | - "{{ appdata_path }}/dawarich/imports:/var/app/tmp/imports/watched" 65 | - "{{ appdata_path }}/dawarich/storage:/var/app/storage" 66 | environment: 67 | - RAILS_ENV=development 68 | - REDIS_URL=redis://dawarich-redis:6379/0 69 | - DATABASE_HOST=dawarich-db 70 | - DATABASE_USERNAME=dawarich 71 | - DATABASE_PASSWORD={{ secret_dawarich_db_pass }} 72 | - DATABASE_NAME=dawarich 73 | - APPLICATION_HOST=localhost 74 | - APPLICATION_HOSTS=localhost 75 | - BACKGROUND_PROCESSING_CONCURRENCY=10 76 | - APPLICATION_PROTOCOL=http 77 | - REVERSE_GEOCODING_ENABLED=true 78 | - PHOTON_API_HOST=192.168.50.23:2322 79 | - PHOTON_API_USE_HTTPS=false 80 | - SELF_HOSTED=true 81 | stdin_open: true 82 | tty: true 83 | entrypoint: sidekiq-entrypoint.sh 84 | command: ['bundle', 'exec', 'sidekiq'] 85 | logging: 86 | driver: json-file 87 | options: 88 | max-size: 100m 89 | max-file: 5 90 | restart: unless-stopped 91 | photon: 92 | image: ghcr.io/rtuszik/photon-docker:0.6.2-rev.3 93 | container_name: photon 94 | environment: 95 | - COUNTRY_CODE=us 96 | volumes: 97 | - /mnt/external_data/photon:/photon/photon_data 98 | restart: unless-stopped 99 | ports: 100 | - 2322:2322 101 | portainer_agent: 102 | image: portainer/agent:2.30.1 103 | container_name: portainer_agent 104 | volumes: 105 | - /var/run/docker.sock:/var/run/docker.sock 106 | - /var/lib/docker/volumes:/var/lib/docker/volumes 107 | ports: 108 | - 9001:9001 109 | environment: 110 | - AGENT_SECRET={{ secret_portainer_key }} 111 | restart: unless-stopped 112 | -------------------------------------------------------------------------------- /services/invention/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | diun: 3 | image: ghcr.io/crazy-max/diun:4.29.0 4 | container_name: diun 5 | volumes: 6 | - "{{ appdata_path }}/diun/data:/data" 7 | - "{{ appdata_path }}/diun/config.yml:/diun.yml:ro" 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | environment: 10 | - UID={{ main_uid }} 11 | - GID={{ main_gid }} 12 | - PUID={{ main_uid }} 13 | - PGID={{ main_gid }} 14 | - TZ={{ ntp_timezone }} 15 | - LOG_LEVEL=info 16 | - LOG_JSON=false 17 | hostname: invention 18 | restart: unless-stopped 19 | portainer_agent: 20 | image: portainer/agent:2.30.1 21 | container_name: portainer_agent 22 | volumes: 23 | - /var/run/docker.sock:/var/run/docker.sock 24 | - /var/lib/docker/volumes:/var/lib/docker/volumes 25 | ports: 26 | - 9001:9001 27 | environment: 28 | - AGENT_SECRET={{ secret_portainer_key }} 29 | restart: unless-stopped 30 | minecraft: 31 | image: 05jchambers/legendary-minecraft-geyser-floodgate:latest 32 | container_name: minecraft 33 | volumes: 34 | - "{{ appdata_path }}/minecraft:/minecraft" 35 | labels: 36 | - diun.enable=true 37 | - diun.regopt=docker.io 38 | ports: 39 | - 25565:25565 40 | - 19132:19132/udp 41 | - 19132:19132 42 | restart: unless-stopped -------------------------------------------------------------------------------- /services/investiture/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | woodpecker-agent-github: 3 | image: quay.io/woodpeckerci/woodpecker-agent:v3.6.0 4 | container_name: woodpecker-agent-github 5 | volumes: 6 | - /var/run/docker.sock:/var/run/docker.sock 7 | - /etc/localtime:/etc/localtime:ro 8 | ports: 9 | - 4300:3000 10 | environment: 11 | - WOODPECKER_SERVER=githubagent.ci.fuzzymistborn.com 12 | - WOODPECKER_GRPC_SECURE=true 13 | - WOODPECKER_AGENT_SECRET={{ secret_investiture_rpc }} 14 | - WOODPECKER_HOSTNAME=Ishap-Github 15 | - WOODPECKER_MAX_WORKFLOWS=2 16 | - WOODPECKER_PLUGINS_PRIVILEGED=woodpeckerci/plugin-docker-buildx:5.0.0 17 | restart: unless-stopped 18 | woodpecker-agent-gitea: 19 | image: quay.io/woodpeckerci/woodpecker-agent:v3.6.0 20 | container_name: woodpecker-agent-gitea 21 | volumes: 22 | - /var/run/docker.sock:/var/run/docker.sock 23 | - /etc/localtime:/etc/localtime:ro 24 | ports: 25 | - 4301:3000 26 | environment: 27 | - WOODPECKER_SERVER=giteaagent.ci.fuzzymistborn.com 28 | - WOODPECKER_GRPC_SECURE=true 29 | - WOODPECKER_AGENT_SECRET={{ secret_investiture_rpc }} 30 | - WOODPECKER_HOSTNAME=Ishap-Gitea 31 | - WOODPECKER_MAX_WORKFLOWS=2 32 | - WOODPECKER_PLUGINS_PRIVILEGED=woodpeckerci/plugin-docker-buildx:5.0.0 33 | restart: unless-stopped 34 | portainer_agent: 35 | image: portainer/agent:2.30.1 36 | container_name: portainer_agent 37 | volumes: 38 | - /var/run/docker.sock:/var/run/docker.sock 39 | - /var/lib/docker/volumes:/var/lib/docker/volumes 40 | ports: 41 | - 9001:9001 42 | environment: 43 | - AGENT_SECRET={{ secret_portainer_key }} 44 | restart: unless-stopped -------------------------------------------------------------------------------- /services/ishap/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | gluetun: 3 | image: qmcgaw/gluetun:v3.40.0 4 | container_name: gluetun 5 | cap_add: 6 | - NET_ADMIN 7 | devices: 8 | - /dev/net/tun:/dev/net/tun 9 | volumes: 10 | - "{{ appdata_path }}/gluetun:/gluetun" 11 | environment: 12 | - VPN_SERVICE_PROVIDER=mullvad 13 | - VPN_TYPE=wireguard 14 | - WIREGUARD_PRIVATE_KEY={{ secret_mullvad_key }} 15 | - WIREGUARD_ADDRESSES={{ secret_mullvad_url }} 16 | - SERVER_CITIES={{ secret_mullvad_cities }} 17 | - DNS_ADDRESS=192.168.1.11 18 | - TZ={{ ntp_timezone }} 19 | - UPDATER_PERIOD=24h 20 | restart: unless-stopped 21 | tailscale: 22 | image: ghcr.io/tailscale/tailscale:v1.84.0 23 | container_name: tailscale 24 | # network_mode: service:gluetun 25 | cap_add: 26 | - net_admin 27 | - sys_module 28 | devices: 29 | - /dev/net/tun:/dev/net/tun 30 | volumes: 31 | - "{{ appdata_path }}/tailscale:/var/lib/tailscale" 32 | environment: 33 | - TS_HOSTNAME=ishap 34 | - TS_AUTHKEY={{ secret_ishap_tailscale_key }} 35 | - TS_EXTRA_ARGS= --advertise-exit-node --accept-routes 36 | - TS_STATE_DIR=/var/lib/tailscale 37 | - TS_USERSPACE=true 38 | restart: unless-stopped 39 | imap-backup: 40 | image: ghcr.io/joeyates/imap-backup:v15.0.2 41 | container_name: imap-backup 42 | volumes: 43 | - /mnt/Backup/config/Email:/data 44 | - "{{ appdata_path }}/imap/config:/config" 45 | environment: 46 | - UID={{ main_uid }} 47 | - GID={{ main_gid }} 48 | command: imap-backup backup --config /config/personal.json 49 | restart: unless-stopped 50 | portainer_agent: 51 | image: portainer/agent:2.30.1 52 | container_name: portainer_agent 53 | volumes: 54 | - /var/run/docker.sock:/var/run/docker.sock 55 | - /var/lib/docker/volumes:/var/lib/docker/volumes 56 | ports: 57 | - 9001:9001 58 | environment: 59 | - AGENT_SECRET={{ secret_portainer_key }} 60 | restart: unless-stopped 61 | scrutiny: 62 | image: ghcr.io/analogj/scrutiny:v0.8.1-omnibus 63 | container_name: scrutiny 64 | privileged: true 65 | cap_add: 66 | - SYS_RAWIO 67 | - SYS_ADMIN 68 | devices: 69 | - /dev/sda:/dev/sda 70 | - /dev/sdb:/dev/sdb 71 | - /dev/sdc:/dev/sdc 72 | - /dev/sdd:/dev/sdd 73 | - /dev/sde:/dev/sde 74 | - /dev/sdf:/dev/sdf 75 | - /dev/nvme0:/dev/nvme0 76 | volumes: 77 | - "{{ appdata_path }}/scrutiny:/opt/scrutiny/config" 78 | - /run/udev:/run/udev:ro 79 | ports: 80 | - 8080:8080 81 | restart: unless-stopped 82 | restic-server: 83 | image: restic/rest-server:0.13.0 84 | container_name: restic-server 85 | volumes: 86 | - /mnt/Backup/restic:/data 87 | ports: 88 | - 8500:8000 89 | restart: unless-stopped 90 | -------------------------------------------------------------------------------- /services/omada/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | diun: 3 | image: ghcr.io/crazy-max/diun:4.29.0 4 | container_name: diun 5 | volumes: 6 | - "{{ appdata_path }}/diun/data:/data" 7 | - "{{ appdata_path }}/diun/config.yml:/diun.yml:ro" 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | environment: 10 | - UID={{ main_uid }} 11 | - GID={{ main_gid }} 12 | - PUID={{ main_uid }} 13 | - PGID={{ main_gid }} 14 | - TZ={{ ntp_timezone }} 15 | - LOG_LEVEL=info 16 | - LOG_JSON=false 17 | hostname: omada 18 | restart: unless-stopped 19 | portainer_agent: 20 | image: portainer/agent:2.30.1 21 | container_name: portainer_agent 22 | volumes: 23 | - /var/run/docker.sock:/var/run/docker.sock 24 | - /var/lib/docker/volumes:/var/lib/docker/volumes 25 | ports: 26 | - 9001:9001 27 | environment: 28 | - AGENT_SECRET={{ secret_portainer_key }} 29 | restart: unless-stopped 30 | omada-controller: 31 | image: mbentley/omada-controller:5.15 32 | container_name: omada-controller 33 | network_mode: host 34 | volumes: 35 | - "{{ appdata_path }}/omada/data:/opt/tplink/EAPController/data" 36 | - "{{ appdata_path }}/omada/work:/opt/tplink/EAPController/work" 37 | - "{{ appdata_path }}/omada/logs:/opt/tplink/EAPController/logs" 38 | labels: 39 | - diun.enable=true 40 | - diun.regopt=docker.io 41 | environment: 42 | - UID={{ main_uid }} 43 | - GID={{ main_gid }} 44 | - PUID={{ main_uid }} 45 | - PGID={{ main_gid }} 46 | - TZ={{ ntp_timezone }} 47 | - MANAGE_HTTP_PORT=8088 48 | - MANAGE_HTTPS_PORT=8043 49 | - PORTAL_HTTP_PORT=8088 50 | - PORTAL_HTTPS_PORT=8843 51 | - SHOW_SERVER_LOGS=true 52 | - SHOW_MONGODB_LOGS=false 53 | restart: unless-stopped 54 | -------------------------------------------------------------------------------- /services/preservation/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | hass-db: 3 | image: mariadb:11.7.2 4 | container_name: hass-db 5 | volumes: 6 | - "{{ appdata_path }}/hass_db:/var/lib/mysql" 7 | labels: 8 | - diun.enable=true 9 | - diun.regopt=docker.io 10 | ports: 11 | - 3307:3306 12 | environment: 13 | - MYSQL_ROOT_PASSWORD={{ secret_db_root_pass }} 14 | - MYSQL_DATABASE=homeassistant 15 | - MYSQL_USER=hass 16 | - MYSQL_PASSWORD={{ secret_hass_db_pass }} 17 | restart: unless-stopped 18 | influx-db: 19 | image: influxdb:2.7.12 20 | container_name: influx-db 21 | volumes: 22 | - /home/{{ main_username }}/db_backups/influxdb:/media/backup 23 | - "{{ appdata_path }}/influx_db/data:/var/lib/influxdb2" 24 | - "{{ appdata_path }}/influx_db/config:/etc/influxdb2" 25 | ports: 26 | - 8086:8086 27 | environment: 28 | # - DOCKER_INFLUXDB_INIT_MODE=setup 29 | - DOCKER_INFLUXDB_INIT_USERNAME={{ main_username }} 30 | - DOCKER_INFLUXDB_INIT_PASSWORD={{ secret_influxdb_pass }} 31 | - DOCKER_INFLUXDB_INIT_ORG=cosmere 32 | - DOCKER_INFLUXDB_INIT_BUCKET=Default 33 | - DOCKER_INFLUXDB_INIT_RETENTION=168h 34 | - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ secret_influxdb_token }} 35 | restart: unless-stopped 36 | # image: quay.io/influxdb/influxdb:2.1-nightly 37 | # NOTE: quay.io image does not include influx client. To install, run following: 38 | # apt install wget -y 39 | # wget https://dl.influxdata.com/influxdb/releases/influxdb2-client-2.2.0-linux-amd64.tar.gz 40 | # tar xvzf influxdb2-client-2.2.0-linux-amd64.tar.gz 41 | # cp influxdb2-client-2.2.0-linux-amd64/influx /usr/local/bin/ 42 | # rm -r influxdb2-client-2.2.0-linux-amd64 43 | # rm influxdb2-client-2.2.0-linux-amd64.tar.gz 44 | bitwarden-db: 45 | image: mariadb:11.7.2 46 | container_name: bitwarden-db 47 | volumes: 48 | - "{{ appdata_path }}/bitwarden_db:/var/lib/mysql" 49 | labels: 50 | - diun.enable=true 51 | - diun.regopt=docker.io 52 | ports: 53 | - 3308:3306 54 | environment: 55 | - MYSQL_ROOT_PASSWORD={{ secret_db_root_pass }} 56 | - MYSQL_DATABASE=bitwarden_rs 57 | - MYSQL_USER=bitwarden 58 | - MYSQL_PASSWORD={{ secret_bitwarden_db_pass }} 59 | mem_limit: 300M 60 | restart: unless-stopped 61 | nextcloud-db: 62 | image: mariadb:11.7.2 63 | container_name: nextcloud-db 64 | volumes: 65 | - "{{ appdata_path }}/nextcloud_db:/var/lib/mysql" 66 | ports: 67 | - 3306:3306 68 | environment: 69 | - MYSQL_ROOT_PASSWORD={{ secret_db_root_pass }} 70 | - MYSQL_DATABASE=nextcloud 71 | - MYSQL_USER={{ main_username }} 72 | - MYSQL_PASSWORD={{ secret_nextcloud_db_pass }} 73 | mem_limit: 300M 74 | restart: unless-stopped 75 | invidious-db: 76 | image: postgres:15.13-alpine 77 | container_name: invidious-db 78 | volumes: 79 | - "{{ appdata_path }}/invidious/data:/var/lib/postgresql/data" 80 | - "{{ appdata_path }}/invidious/config/sql:/config/sql" 81 | - "{{ appdata_path }}/invidious/init-invidious-db.sh:/docker-entrypoint-initdb.d/init-invidious-db.sh" 82 | labels: 83 | - diun.enable=true 84 | - diun.regopt=docker.io 85 | ports: 86 | - 5432:5432 87 | environment: 88 | - POSTGRES_DB=invidious 89 | - POSTGRES_USER=kemal 90 | - POSTGRES_PASSWORD={{ secret_invidious_db_pass }} 91 | mem_limit: 200M 92 | restart: unless-stopped 93 | phpmyadmin: 94 | image: ghcr.io/linuxserver/phpmyadmin:5.2.2 95 | container_name: phpmyadmin 96 | volumes: 97 | - "{{ appdata_path }}/phpmyadmin:/config" 98 | ports: 99 | - 8000:80/tcp 100 | environment: 101 | - UID={{ main_uid }} 102 | - GID={{ main_gid }} 103 | - PUID={{ main_uid }} 104 | - PGID={{ main_gid }} 105 | - TZ={{ ntp_timezone }} 106 | - PMA_ARBITRARY=1 107 | restart: unless-stopped 108 | diun: 109 | image: ghcr.io/crazy-max/diun:4.29.0 110 | container_name: diun 111 | volumes: 112 | - "{{ appdata_path }}/diun/data:/data" 113 | - "{{ appdata_path }}/diun/config.yml:/diun.yml:ro" 114 | - /var/run/docker.sock:/var/run/docker.sock 115 | environment: 116 | - UID={{ main_uid }} 117 | - GID={{ main_gid }} 118 | - PUID={{ main_uid }} 119 | - PGID={{ main_gid }} 120 | - TZ={{ ntp_timezone }} 121 | - LOG_LEVEL=info 122 | - LOG_JSON=false 123 | hostname: preservation 124 | restart: unless-stopped 125 | portainer_agent: 126 | image: portainer/agent:2.30.1 127 | container_name: portainer_agent 128 | volumes: 129 | - /var/run/docker.sock:/var/run/docker.sock 130 | - /var/lib/docker/volumes:/var/lib/docker/volumes 131 | ports: 132 | - 9001:9001 133 | environment: 134 | - AGENT_SECRET={{ secret_portainer_key }} 135 | restart: unless-stopped -------------------------------------------------------------------------------- /services/unity/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | zwavejs: 3 | image: ghcr.io/zwave-js/zwave-js-ui:10.5.1 4 | container_name: zwavejs 5 | devices: 6 | - /dev/zwave:/dev/zwave 7 | volumes: 8 | - "{{ appdata_path }}/zwavejs:/usr/src/app/store" 9 | ports: 10 | - 8091:8091 11 | - 3500:3000 12 | environment: 13 | - TZ={{ ntp_timezone }} 14 | restart: unless-stopped 15 | zigbee2mqtt: 16 | image: ghcr.io/koenkk/zigbee2mqtt:2.3.0 17 | container_name: zigbee2mqtt 18 | devices: 19 | - /dev/zigbee:/dev/zigbee 20 | volumes: 21 | - "{{ appdata_path }}/zigbee2mqtt:/app/data" 22 | - /run/udev:/run/udev:ro 23 | ports: 24 | - 8080:8080 25 | environment: 26 | - TZ={{ ntp_timezone }} 27 | restart: unless-stopped 28 | adguard: 29 | image: adguard/adguardhome:v0.107.62 30 | container_name: adguard 31 | cap_add: 32 | - NET_ADMIN 33 | volumes: 34 | - "{{ appdata_path }}/adguard/conf:/opt/adguardhome/conf" 35 | - "{{ appdata_path }}/adguard/work:/opt/adguardhome/work" 36 | ports: 37 | - 53:53/tcp 38 | - 53:53/udp 39 | - 853:853/tcp 40 | - 81:80/tcp 41 | - 444:443/tcp 42 | - 3000:3000/tcp 43 | restart: unless-stopped 44 | portainer_agent: 45 | image: portainer/agent:2.30.1 46 | container_name: portainer_agent 47 | volumes: 48 | - /var/run/docker.sock:/var/run/docker.sock 49 | - /var/lib/docker/volumes:/var/lib/docker/volumes 50 | ports: 51 | - 9001:9001 52 | environment: 53 | - AGENT_SECRET={{ secret_portainer_key }} 54 | restart: unless-stopped 55 | nut-ups: 56 | image: fuzzymistborn/nut-upsd:2.8.2 57 | container_name: nut-ups 58 | devices: 59 | - /dev/bus/usb/001/005:/dev/bus/usb/001/005 60 | ports: 61 | - 3493:3493 62 | environment: 63 | - API_USER=homeassistant 64 | - API_PASSWORD={{ secret_nut_api_pw }} 65 | restart: unless-stopped 66 | -------------------------------------------------------------------------------- /services/virtuosity/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | portainer_agent: 3 | image: portainer/agent:2.30.1 4 | container_name: portainer_agent 5 | volumes: 6 | - /var/run/docker.sock:/var/run/docker.sock 7 | - /var/lib/docker/volumes:/var/lib/docker/volumes 8 | ports: 9 | - 9001:9001 10 | environment: 11 | - AGENT_SECRET={{ secret_portainer_key }} 12 | restart: unless-stopped 13 | ollama-server: 14 | image: ollama/ollama:0.7.1 15 | container_name: ollama-server 16 | volumes: 17 | - /mnt/llms/ollama:/root/.ollama 18 | ports: 19 | - 11434:11434 20 | deploy: 21 | resources: 22 | reservations: 23 | devices: 24 | - driver: nvidia 25 | count: 1 26 | capabilities: 27 | - gpu 28 | restart: unless-stopped 29 | open-webui: 30 | image: ghcr.io/open-webui/open-webui:v0.6.11 31 | container_name: open-webui 32 | extra_hosts: 33 | - host.docker.internal:host-gateway 34 | volumes: 35 | - "{{ appdata_path }}/ollama-webui:/app/backend/data" 36 | ports: 37 | - 3010:8080 38 | restart: unless-stopped 39 | stable-diffusion-webui: 40 | image: universonic/stable-diffusion-webui:minimal 41 | container_name: stable-diffusion-webui 42 | cap_add: 43 | - NET_BIND_SERVICE 44 | cap_drop: 45 | - ALL 46 | volumes: 47 | - "{{ appdata_path }}/stable-diffusion/inputs:/app/stable-diffusion-webui/inputs" 48 | - "{{ appdata_path }}/stable-diffusion/textual_inversion_templates:/app/stable-diffusion-webui/textual_inversion_templates" 49 | - "{{ appdata_path }}/stable-diffusion/embeddings:/app/stable-diffusion-webui/embeddings" 50 | - "{{ appdata_path }}/stable-diffusion/extensions:/app/stable-diffusion-webui/extensions" 51 | - /mnt/llms/stable-diffusion/models:/app/stable-diffusion-webui/models 52 | - /mnt/llms/stable-diffusion/outputs:/app/stable-diffusion-webui/outputs 53 | - "{{ appdata_path }}/stable-diffusion/localizations:/app/stable-diffusion-webui/localizations" 54 | ports: 55 | - 8080:8080/tcp 56 | command: --no-half --no-half-vae --precision full --medvram-sdxl --xformers --opt-sdp-attention --opt-sdp-no-mem-attention --opt-split-attention --opt-channelslast 57 | deploy: 58 | resources: 59 | reservations: 60 | devices: 61 | - driver: nvidia 62 | count: 1 63 | capabilities: 64 | - gpu 65 | restart: unless-stopped 66 | crowdsec: 67 | image: ghcr.io/crowdsecurity/crowdsec:v1.6.8 68 | container_name: crowdsec 69 | volumes: 70 | - /var/run/docker.sock:/var/run/docker.sock 71 | - "{{ appdata_path }}/crowdsec/data/acquis.yaml:/etc/crowdsec/acquis.yaml" 72 | - "{{ appdata_path }}/crowdsec/db:/var/lib/crowdsec/data/" 73 | environment: 74 | - COLLECTIONS=gauth-fr/immich 75 | - GID=1000 76 | - DISABLE_LOCAL_API=true 77 | - AGENT_USERNAME={{ hostname }} 78 | - AGENT_PASSWORD={{ secret_crowdsec_virtuosity_pw }} 79 | - LOCAL_API_URL=http://{{ secret_ambition_ip }}:8080 80 | restart: unless-stopped 81 | immich-server: 82 | image: ghcr.io/immich-app/immich-server:v1.134.0 83 | container_name: immich-server 84 | volumes: 85 | - /mnt/immich-uploads:/usr/src/app/upload 86 | - /mnt/photos:/mnt/photos:ro 87 | - /etc/localtime:/etc/localtime:ro 88 | ports: 89 | - 2283:2283 90 | environment: 91 | - DB_PASSWORD={{ secret_immich_db_pass }} 92 | - DB_USERNAME=postgres 93 | - DB_DATABASE_NAME=immich 94 | - REDIS_HOSTNAME=immich-redis 95 | - DB_HOSTNAME=immich-database 96 | depends_on: 97 | - immich-redis 98 | - immich-database 99 | deploy: 100 | resources: 101 | reservations: 102 | devices: 103 | - driver: nvidia 104 | count: 1 105 | capabilities: 106 | - gpu 107 | - compute 108 | - video 109 | restart: unless-stopped 110 | immich-machine-learning: 111 | image: ghcr.io/immich-app/immich-machine-learning:v1.134.0-cuda 112 | container_name: immich_machine_learning 113 | volumes: 114 | - "{{ appdata_path }}/immich/model-cache:/cache" 115 | depends_on: 116 | - immich-redis 117 | - immich-database 118 | deploy: 119 | resources: 120 | reservations: 121 | devices: 122 | - driver: nvidia 123 | count: 1 124 | capabilities: 125 | - gpu 126 | restart: unless-stopped 127 | immich-redis: 128 | image: docker.io/valkey/valkey:8-bookworm@sha256:ff21bc0f8194dc9c105b769aeabf9585fea6a8ed649c0781caeac5cb3c247884 129 | container_name: immich_redis 130 | restart: unless-stopped 131 | immich-database: 132 | image: ghcr.io/immich-app/postgres:14-vectorchord0.3.0-pgvectors0.2.0@sha256:fa4f6e0971f454cd95fec5a9aaed2ed93d8f46725cc6bc61e0698e97dba96da1 133 | container_name: immich_database 134 | volumes: 135 | - "{{ appdata_path }}/immich/database:/var/lib/postgresql/data" 136 | environment: 137 | - POSTGRES_PASSWORD={{ secret_immich_db_pass }} 138 | - POSTGRES_USER=postgres 139 | - POSTGRES_DB=immich 140 | - POSTGRES_INITDB_ARGS='--data-checksums' 141 | restart: unless-stopped 142 | whisper: 143 | image: lscr.io/linuxserver/faster-whisper:2.4.0-gpu 144 | container_name: whisper 145 | environment: 146 | - UID={{ main_uid }} 147 | - GID={{ main_gid }} 148 | - PUID={{ main_uid }} 149 | - PGID={{ main_gid }} 150 | - TZ={{ ntp_timezone }} 151 | - WHISPER_MODEL=base-int8 152 | - WHISPER_BEAM=5 153 | - WHISPER_LANG=en 154 | volumes: 155 | - "{{ appdata_path }}/whisper:/data" 156 | ports: 157 | - 10300:10300 158 | deploy: 159 | resources: 160 | reservations: 161 | devices: 162 | - driver: nvidia 163 | count: 1 164 | capabilities: [gpu] 165 | restart: unless-stopped 166 | -------------------------------------------------------------------------------- /templates/MQTT_Explorer.desktop.j2: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=MQTT Explorer 3 | Exec=/home/{{ main_username }}/.local/bin/MQTT_Explorer.AppImage 4 | Icon=/home/{{ main_username }}/.local/bin/MQTT_Explorer.png 5 | Type=Application 6 | Categories=Utility;Network; -------------------------------------------------------------------------------- /templates/diun_config.yml.j2: -------------------------------------------------------------------------------- 1 | watch: 2 | workers: 20 3 | schedule: "0 */6 * * *" 4 | 5 | providers: 6 | docker: 7 | watchByDefault: false 8 | watchStopped: false 9 | 10 | notif: 11 | telegram: 12 | token: "{{ secret_diun_tg_api }}" 13 | chatIDs: 14 | - "{{ secret_diun_tg_chat }}" 15 | 16 | regopts: 17 | - name: "docker.io" 18 | selector: image 19 | username: fuzzymistborn 20 | password: "{{ secret_diun_docker_pass }}" 21 | - name: "ghcr.io" 22 | selector: image 23 | - name: "quay.io" 24 | selector: image 25 | - name: "registry.gitlab.com" 26 | selector: image -------------------------------------------------------------------------------- /templates/github.sh.j2: -------------------------------------------------------------------------------- 1 | #! bin/sh 2 | 3 | eval `ssh-agent -s` 4 | ssh-add /home/{{ main_username }}/.ssh/github 5 | 6 | git config --global user.email "fuzzy@fuzzymistborn.com" 7 | git config --global user.name "FuzzyMistborn" -------------------------------------------------------------------------------- /templates/ssh_config.j2: -------------------------------------------------------------------------------- 1 | # Read more about SSH config files: https://linux.die.net/man/5/ssh_config 2 | Host omada 3 | HostName 192.168.1.15 4 | User {{ main_username }} 5 | IdentityFile ~/.ssh/desktop_lxcs 6 | 7 | Host adonalsium 8 | HostName 192.168.1.10 9 | User {{ main_username }} 10 | IdentityFile ~/.ssh/desktop_lxcs 11 | 12 | Host ishap 13 | HostName 192.168.1.5 14 | User {{ main_username }} 15 | IdentityFile ~/.ssh/desktop_lxcs 16 | 17 | Host endowment 18 | HostName 192.168.50.20 19 | User {{ main_username }} 20 | IdentityFile ~/.ssh/desktop_lxcs 21 | 22 | Host autonomy 23 | HostName 192.168.30.5 24 | User {{ main_username }} 25 | IdentityFile ~/.ssh/desktop_lxcs 26 | 27 | Host investiture 28 | HostName 192.168.50.15 29 | User {{ main_username }} 30 | IdentityFile ~/.ssh/desktop_lxcs 31 | 32 | Host cultivation 33 | HostName 192.168.50.21 34 | User {{ main_username }} 35 | IdentityFile ~/.ssh/desktop_lxcs 36 | 37 | Host preservation 38 | HostName 192.168.50.22 39 | User {{ main_username }} 40 | IdentityFile ~/.ssh/desktop_lxcs 41 | 42 | Host dominion 43 | HostName 192.168.10.50 44 | User {{ main_username }} 45 | IdentityFile ~/.ssh/desktop_lxcs 46 | 47 | Host honor 48 | HostName 192.168.50.10 49 | User {{ main_username }} 50 | IdentityFile ~/.ssh/desktop_lxcs 51 | 52 | Host invention 53 | HostName 192.168.30.7 54 | User {{ main_username }} 55 | IdentityFile ~/.ssh/desktop_lxcs 56 | 57 | Host whimsy 58 | HostName 192.168.10.13 59 | User {{ main_username }} 60 | IdentityFile ~/.ssh/desktop_lxcs 61 | 62 | Host unity 63 | HostName 192.168.1.11 64 | User {{ main_username }} 65 | IdentityFile ~/.ssh/desktop_lxcs 66 | 67 | Host identity 68 | HostName 192.168.50.23 69 | User {{ main_username }} 70 | IdentityFile ~/.ssh/desktop_lxcs 71 | 72 | Host virtuosity 73 | HostName 192.168.50.25 74 | User {{ main_username }} 75 | IdentityFile ~/.ssh/desktop_lxcs 76 | 77 | host ambition 78 | HostName remote.fuzzymistborn.com 79 | User {{ main_username }} 80 | IdentityFile ~/.ssh/desktop_ambition 81 | 82 | Host github.com 83 | IdentityFile ~/.ssh/github 84 | -------------------------------------------------------------------------------- /update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: shards ambition unity virtuosity 4 | tasks: 5 | - name: update all packages 6 | become: true 7 | apt: 8 | update_cache: yes 9 | cache_valid_time: 86400 10 | upgrade: 'yes' 11 | autoremove: yes 12 | 13 | - hosts: adonalsium ishap 14 | vars_files: 15 | - 'vars/vault.yaml' 16 | - 'group_vars/all.yaml' 17 | roles: 18 | - role: install/mergerfs 19 | become: true 20 | - role: install/runitor 21 | tasks: 22 | - name: update all packages 23 | become: true 24 | apt: 25 | update_cache: yes 26 | cache_valid_time: 86400 27 | upgrade: dist 28 | autoremove: yes 29 | 30 | - hosts: ambition 31 | vars_files: 32 | - 'vars/vault.yaml' 33 | - 'group_vars/all.yaml' 34 | roles: 35 | - role: install/lego 36 | - role: install/runitor 37 | 38 | - hosts: honor 39 | vars_files: 40 | - 'vars/vault.yaml' 41 | - 'group_vars/all.yaml' 42 | roles: 43 | - role: install/lego 44 | - role: install/runitor 45 | 46 | - hosts: dominion omada 47 | vars_files: 48 | - 'vars/vault.yaml' 49 | - 'group_vars/all.yaml' 50 | roles: 51 | - role: install/runitor 52 | 53 | - hosts: endowment autonomy cultivation preservation 54 | vars_files: 55 | - 'vars/vault.yaml' 56 | - 'group_vars/all.yaml' 57 | roles: 58 | - role: install/runitor 59 | -------------------------------------------------------------------------------- /vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if bw unlock --check --session "$(cat /tmp/bw.token)" | grep -q 'Vault is unlocked!' ; then 4 | bw get password "Ansible Infra" --session "$(grep '^' /tmp/bw.token)" 5 | else 6 | bw unlock --raw > /tmp/bw.token 7 | bw get password "Ansible Infra" --session "$(grep '^' /tmp/bw.token)" 8 | fi 9 | --------------------------------------------------------------------------------