├── ansible.cfg ├── roles ├── libvirt │ ├── defaults │ │ └── main.yml │ ├── README.md │ └── tasks │ │ └── main.yml ├── openssh │ ├── defaults │ │ └── main.yml │ ├── README.md │ └── tasks │ │ ├── homed.yml │ │ └── main.yml ├── nas │ ├── defaults │ │ └── main.yml │ ├── templates │ │ ├── btrfs_scrub_service_override.conf.j2 │ │ ├── btrfs_scrub_timer_override.conf.j2 │ │ ├── btrfs_scrub_report.sh.j2 │ │ └── smartd_notify.sh.j2 │ ├── tasks │ │ ├── main.yml │ │ ├── smartd.yml │ │ ├── btrfs_scrub.yml │ │ ├── raid.yml │ │ ├── nfs.yml │ │ └── samba.yml │ └── README.md ├── gui │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── dotfiles.yml │ │ ├── Fedora.yml │ │ ├── flatpak.yml │ │ ├── paru.yml │ │ ├── snapper.yml │ │ └── Archlinux.yml │ ├── files │ │ ├── failure-notification@.service │ │ ├── flatpak-update.timer │ │ └── flatpak-update.service │ └── README.md ├── podman │ ├── defaults │ │ └── main.yml │ ├── legacy │ │ ├── nextcloud-cron.service.j2 │ │ ├── nextcloud-cron.timer.j2 │ │ ├── radarr.yml │ │ ├── sonarr.yml │ │ ├── deluge.yml │ │ ├── radicale.yml │ │ ├── prowlarr.yml │ │ ├── thelounge.service.j2 │ │ ├── prowlarr.service.j2 │ │ ├── radarr.service.j2 │ │ ├── sonarr.service.j2 │ │ ├── deluge.service.j2 │ │ ├── nextcloud-pod.service.j2 │ │ ├── radicale.service.j2 │ │ ├── postgres.service.j2 │ │ ├── nextcloud.service.j2 │ │ ├── homeassistant.service.j2 │ │ ├── swag.yml │ │ ├── swag.service.j2 │ │ ├── jellyfin.yml │ │ ├── homeassistant.yml │ │ ├── jellyfin.service.j2 │ │ └── nextcloud.yml │ ├── templates │ │ ├── podman-system-prune.timer.j2 │ │ ├── podman-system-prune.service.j2 │ │ ├── unifi-db-init-mongo.js.j2 │ │ ├── copy-ssl.service.j2 │ │ ├── paperless-ngx.pod.j2 │ │ ├── paperless-ngx-redis.container.j2 │ │ ├── grafana-prometheus.pod.j2 │ │ ├── unifi.pod.j2 │ │ ├── grafana.container.j2 │ │ ├── prometheus-server.container.j2 │ │ ├── unifi-db.container.j2 │ │ ├── letsencrypt.container.j2 │ │ ├── qbittorrent.container.j2 │ │ ├── syncthing.container.j2 │ │ ├── paperless-ngx.container.j2 │ │ ├── autobrr.container.j2 │ │ ├── unifi.container.j2 │ │ ├── thelounge.container.j2 │ │ ├── prometheus-node-exporter.container.j2 │ │ ├── transmission.container.j2 │ │ ├── tailscale-traefik.container.j2 │ │ ├── traefik.container.j2 │ │ ├── nextcloud-aio.container.j2 │ │ └── gluetun.container.j2 │ └── tasks │ │ ├── main.yml │ │ ├── podman_install.yml │ │ ├── unifi-init.yml │ │ ├── autobrr.yml │ │ ├── thelounge.yml │ │ ├── qbittorrent.yml │ │ ├── transmission.yml │ │ ├── nextcloud.yml │ │ ├── letsencrypt.yml │ │ ├── gluetun.yml │ │ ├── paperless.yml │ │ ├── syncthing.yml │ │ ├── podman_setup.yml │ │ ├── grafana_prometheus.yml │ │ └── unifi.yml ├── auto-update │ ├── tasks │ │ ├── main.yml │ │ ├── Archlinux.yml │ │ ├── Fedora.yml │ │ └── Debian.yml │ ├── files │ │ └── auto-update.service │ ├── templates │ │ ├── auto-update.timer.j2 │ │ ├── dnf5-automatic-timer-override.conf.j2 │ │ └── auto-update.sh.j2 │ └── README.md ├── docker │ └── tasks │ │ ├── main.yml │ │ ├── docker_compose.yml │ │ └── Debian_install.yml ├── nut │ ├── templates │ │ ├── 50-ups.rules.j2 │ │ ├── nut_notify.sh.j2 │ │ └── msmtprc.j2 │ ├── README.md │ └── tasks │ │ └── main.yml ├── archlinux_common │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── paru.yml │ │ ├── snapper.yml │ │ └── main.yml │ ├── files │ │ └── zz-signed_uki_backup.hook │ └── README.md ├── wpa_supplicant │ ├── tasks │ │ ├── Archlinux_prepare.yml │ │ ├── Debian_prepare.yml │ │ ├── main.yml │ │ └── Fedora_prepare.yml │ ├── files │ │ └── wpa_supplicant@fedora.service │ └── README.md ├── systemd_networkd │ ├── tasks │ │ ├── Archlinux-prepare.yml │ │ ├── Fedora-prepare.yml │ │ ├── Debian-prepare.yml │ │ └── main.yml │ ├── templates │ │ └── en0.network.j2 │ └── README.md └── msmtp │ ├── tasks │ └── main.yml │ ├── templates │ └── msmtprc.j2 │ └── README.md ├── inventory.yml ├── iommu.sh ├── virsh_undefine.sh ├── headless_example.yml ├── .gitignore ├── tasks └── update.yml ├── gui_example.yml ├── homed.sh ├── virt-install_arch.sh ├── mkarchiso.sh ├── fedora_post_install.sh ├── debian_post_install.sh └── host_vars ├── gui_example.yml └── headless_example.yml /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory.yml 3 | -------------------------------------------------------------------------------- /roles/libvirt/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | homed: false 3 | -------------------------------------------------------------------------------- /roles/openssh/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | homed: false 3 | -------------------------------------------------------------------------------- /roles/nas/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #firewalld_default_zone: public 3 | -------------------------------------------------------------------------------- /roles/gui/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | homed: false 3 | paru_chroot: false 4 | -------------------------------------------------------------------------------- /roles/podman/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | homed: false 3 | #firewalld_default_zone: public 4 | -------------------------------------------------------------------------------- /roles/gui/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ ansible_facts.distribution }}.yml" 3 | 4 | -------------------------------------------------------------------------------- /roles/auto-update/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ ansible_facts.distribution }}.yml" 3 | -------------------------------------------------------------------------------- /roles/nas/templates/btrfs_scrub_service_override.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStop=/usr/local/bin/btrfs_scrub_report.sh {{ item.path }} 3 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ ansible_facts.distribution }}_install.yml" 3 | 4 | - import_tasks: docker_compose.yml 5 | 6 | -------------------------------------------------------------------------------- /roles/nut/templates/50-ups.rules.j2: -------------------------------------------------------------------------------- 1 | SUBSYSTEM=="usb", ATTR{idVendor}=="{{ ups_vender_id }}", ATTR{idProduct}=="{{ ups_product_id }}", GROUP="nut" 2 | -------------------------------------------------------------------------------- /roles/auto-update/files/auto-update.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=upgrade system 3 | 4 | [Service] 5 | Type=simple 6 | ExecStart=/usr/local/bin/auto-update.sh 7 | 8 | -------------------------------------------------------------------------------- /roles/archlinux_common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | snapper_root_hourly: 5 3 | snapper_root_daily: 7 4 | snapper_root_weekly: 0 5 | snapper_root_monthly: 0 6 | snapper_root_yearly: 0 7 | -------------------------------------------------------------------------------- /roles/wpa_supplicant/tasks/Archlinux_prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S wpa_supplicant 3 | community.general.pacman: name=wpa_supplicant state=present 4 | become: true 5 | 6 | -------------------------------------------------------------------------------- /roles/nas/templates/btrfs_scrub_timer_override.conf.j2: -------------------------------------------------------------------------------- 1 | [Timer] 2 | OnCalendar= 3 | OnCalendar={{ btrfs_scrub_time }} 4 | AccuracySec=1min 5 | RandomizedDelaySec=0 6 | Persistent=false 7 | -------------------------------------------------------------------------------- /roles/auto-update/templates/auto-update.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=upgrade system 3 | 4 | [Timer] 5 | OnCalendar={{ auto_update_time }} 6 | 7 | [Install] 8 | WantedBy=timers.target 9 | -------------------------------------------------------------------------------- /roles/podman/legacy/nextcloud-cron.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nextcloud cron.php job 3 | 4 | [Service] 5 | ExecStart=/usr/bin/podman exec nextcloud /usr/local/bin/php -f /var/www/html/cron.php 6 | -------------------------------------------------------------------------------- /roles/gui/files/failure-notification@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Send a notification about a failed systemd unit 3 | 4 | [Service] 5 | Type=simple 6 | ExecStart=/usr/bin/notify-send "service %i failed" 7 | -------------------------------------------------------------------------------- /roles/gui/files/flatpak-update.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Update Flatpak 3 | 4 | [Timer] 5 | OnBootSec=1m 6 | OnCalendar=daily 7 | Persistent=true 8 | 9 | [Install] 10 | WantedBy=timers.target 11 | -------------------------------------------------------------------------------- /roles/auto-update/templates/dnf5-automatic-timer-override.conf.j2: -------------------------------------------------------------------------------- 1 | [Timer] 2 | OnCalendar= 3 | OnCalendar={{ auto_update_time }} 4 | RandomizedDelaySec= 5 | RandomizedDelaySec=0 6 | Persistent= 7 | Persistent=false 8 | -------------------------------------------------------------------------------- /roles/nas/templates/btrfs_scrub_report.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | echo -e "To: {{ msmtp_to }}\nFrom: {{ msmtp_from }}\nSubject: btrfs scrub report\n\n$(btrfs scrub status $1)" | msmtp --read-recipients --read-envelope-from 4 | -------------------------------------------------------------------------------- /roles/nut/templates/nut_notify.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | echo -e "To: {{ msmtp_to }}\nFrom: {{ msmtp_from }}\nSubject: NUT notification\n\n$1" | msmtp --file='/etc/nut/msmtprc' --read-recipients --read-envelope-from 4 | 5 | -------------------------------------------------------------------------------- /roles/nas/templates/smartd_notify.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | echo -e "To: {{ msmtp_to }}\nFrom: {{ msmtp_from }}\nSubject: S.M.A.R.T Error ${SMARTD_FAILTYPE}\n\n${SMARTD_FULLMESSAGE}" | msmtp --read-recipients --read-envelope-from 4 | -------------------------------------------------------------------------------- /inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | all: 3 | hosts: 4 | 5 | headless_example: 6 | ansible_host: 192.168.122.2 7 | ansible_user: tux 8 | ansible_port: 22 9 | ansible_ssh_private_key_file: /home/tux/.ssh/id_ed25519 10 | -------------------------------------------------------------------------------- /roles/podman/templates/podman-system-prune.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=podman image prune timer 3 | 4 | [Timer] 5 | OnCalendar={{ podman_user.podman_system_prune_timer }} 6 | 7 | [Install] 8 | WantedBy=timers.target 9 | 10 | -------------------------------------------------------------------------------- /roles/podman/templates/podman-system-prune.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Remove all unused pods, containers, images, networks, and volume data 3 | 4 | [Service] 5 | ExecStart=/usr/bin/podman system prune --all --force --filter "until=240h" 6 | -------------------------------------------------------------------------------- /roles/podman/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: podman_install.yml 3 | 4 | - include_tasks: podman_setup.yml 5 | loop: "{{ podman_users }}" 6 | loop_control: 7 | loop_var: podman_user 8 | when: podman_users is defined 9 | 10 | -------------------------------------------------------------------------------- /roles/podman/legacy/nextcloud-cron.timer.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run Nextcloud cron.php every 5 minutes 3 | 4 | [Timer] 5 | OnBootSec=5min 6 | OnUnitActiveSec=5min 7 | Unit=nextcloud-cron.service 8 | 9 | [Install] 10 | WantedBy=timers.target 11 | -------------------------------------------------------------------------------- /roles/libvirt/README.md: -------------------------------------------------------------------------------- 1 | Set up [libvirt](https://wiki.archlinux.org/title/Libvirt) on Arch Linux. 2 | 3 | ## Tasks 4 | - Install libvirt and qemu packages. 5 | - Disable copy on write (COW) on `/var/lib/libvirt/images` directory if using btrfs. 6 | - Enable default NAT. 7 | -------------------------------------------------------------------------------- /roles/docker/tasks/docker_compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy docker-compose.yml 3 | ansible.builtin.copy: 4 | src: "{{ docker_compose_file }}" 5 | dest: /opt/docker-compose.yml 6 | owner: root 7 | group: root 8 | mode: '0600' 9 | become: true 10 | 11 | -------------------------------------------------------------------------------- /iommu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | shopt -s nullglob 3 | for g in $(find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V); do 4 | echo "IOMMU Group ${g##*/}:" 5 | for d in $g/devices/*; do 6 | echo -e "\t$(lspci -nns ${d##*/})" 7 | done; 8 | done; 9 | 10 | -------------------------------------------------------------------------------- /roles/systemd_networkd/tasks/Archlinux-prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete default network configuration on Archlinux 3 | ansible.builtin.file: 4 | path: /etc/systemd/network/20-ethernet.network 5 | state: absent 6 | become: true 7 | when: ansible_facts["distribution"] == "Archlinux" 8 | 9 | -------------------------------------------------------------------------------- /roles/wpa_supplicant/tasks/Debian_prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: apt install wpasupplicant 3 | ansible.builtin.apt: name=wpasupplicant state=present 4 | become: true 5 | 6 | - name: systemctl daemon-reload 7 | ansible.builtin.systemd: 8 | daemon_reload: true 9 | become: true 10 | 11 | -------------------------------------------------------------------------------- /roles/archlinux_common/tasks/paru.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create paru directory 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | mode: '0755' 7 | loop: 8 | - "{{ ansible_facts.user_dir }}/.cache/paru/clone" 9 | - "{{ ansible_facts.user_dir }}/.cache/paru/chroot" 10 | 11 | -------------------------------------------------------------------------------- /roles/podman/templates/unifi-db-init-mongo.js.j2: -------------------------------------------------------------------------------- 1 | db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "{{ podman_user.unifi_db_pass }}", roles: [{role: "dbOwner", db: "unifi"}]}); 2 | db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "{{ podman_user.unifi_db_pass }}", roles: [{role: "dbOwner", db: "unifi_stat"}]}); 3 | -------------------------------------------------------------------------------- /roles/archlinux_common/files/zz-signed_uki_backup.hook: -------------------------------------------------------------------------------- 1 | [Trigger] 2 | Operation = Upgrade 3 | Operation = Install 4 | Operation = Remove 5 | Type = Package 6 | Target = * 7 | 8 | [Action] 9 | Depends = rsync 10 | Description = Backing up /efi... 11 | When = PostTransaction 12 | Exec = /usr/bin/rsync --archive --delete /efi/ /.efibackup 13 | -------------------------------------------------------------------------------- /roles/msmtp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S msmtp 3 | community.general.pacman: name=msmtp state=present 4 | become: true 5 | 6 | - name: msmtp user config 7 | ansible.builtin.template: 8 | src: msmtprc.j2 9 | dest: /root/.msmtprc 10 | owner: root 11 | group: root 12 | mode: '0600' 13 | become: true 14 | -------------------------------------------------------------------------------- /virsh_undefine.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # Remove virtual mahine and its storage 3 | 4 | if [[ -z $1 ]] ; then 5 | echo "ERROR, please provide VM name/domain." 6 | echo "virsh_undefine.sh VM_name" 7 | exit 1 8 | fi 9 | 10 | virsh destroy "$1" 11 | sleep 1 12 | virsh undefine "$1" --nvram --storage "/var/lib/libvirt/images/$1.qcow2" 13 | -------------------------------------------------------------------------------- /roles/systemd_networkd/templates/en0.network.j2: -------------------------------------------------------------------------------- 1 | [Match] 2 | Name={{ networkd_static.nic }} 3 | 4 | [Network] 5 | DHCP=no 6 | Address={{ networkd_static.ip }} 7 | {% if networkd_static.gateway is defined %} 8 | Gateway={{ networkd_static.gateway }} 9 | {% endif %} 10 | {% if networkd_static.dns is defined %} 11 | DNS={{ networkd_static.dns }} 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /headless_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: headless_example 3 | 4 | vars_files: 5 | - host_vars/headless_example.yml 6 | 7 | pre_tasks: 8 | - import_tasks: tasks/update.yml 9 | 10 | roles: 11 | - archlinux_common 12 | - openssh 13 | - networkd_static 14 | - nas 15 | - msmtp 16 | - auto-update 17 | - ups 18 | - podman 19 | 20 | -------------------------------------------------------------------------------- /roles/podman/templates/copy-ssl.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Copy SSL certificate to Caddy 3 | 4 | [Service] 5 | Restart=no 6 | Type=simple 7 | ExecStart=/usr/bin/rsync \ 8 | --recursive \ 9 | --copy-links \ 10 | --delete \ 11 | --chown=traefik:traefik \ 12 | {{ podman_user.letsencrypt_config_dir }}/live/ {{ podman_user.traefik_config_dir }}/ssl 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !/.gitignore 3 | !/LICENSE 4 | !/README.md 5 | 6 | !/ansible.cfg 7 | !/inventory.yml 8 | !*_example* 9 | 10 | !/roles 11 | !/roles/** 12 | !/tasks 13 | !/tasks/* 14 | !/host_vars 15 | !/host_vars/*example.yml 16 | 17 | !/arch_install.sh 18 | !/arch_install_bcachefs.sh 19 | !/debian_post_install.sh 20 | !/fedora_post_install.sh 21 | !/homed.sh 22 | !/iommu.sh 23 | !/mkarchiso.sh 24 | !/virsh_undefine.sh 25 | !/virt-install_arch.sh 26 | -------------------------------------------------------------------------------- /roles/nut/templates/msmtprc.j2: -------------------------------------------------------------------------------- 1 | # Set default values for all following accounts. 2 | defaults 3 | auth on 4 | tls_trust_file /etc/ssl/certs/ca-certificates.crt 5 | logfile /etc/nut/msmtp.log 6 | 7 | account {{ msmtp_account }} 8 | host {{ msmtp_host }} 9 | port {{ msmtp_port }} 10 | tls {{ msmtp_tls }} 11 | tls_starttls {{ msmtp_tls_starttls }} 12 | from {{ msmtp_from }} 13 | user {{ msmtp_user }} 14 | password {{ msmtp_password }} 15 | 16 | -------------------------------------------------------------------------------- /roles/msmtp/templates/msmtprc.j2: -------------------------------------------------------------------------------- 1 | # Set default values for all following accounts. 2 | defaults 3 | auth on 4 | tls_trust_file /etc/ssl/certs/ca-certificates.crt 5 | logfile /var/log/msmtp.log 6 | 7 | account {{ msmtp_account }} 8 | host {{ msmtp_host }} 9 | port {{ msmtp_port }} 10 | tls {{ msmtp_tls }} 11 | tls_starttls {{ msmtp_tls_starttls }} 12 | from {{ msmtp_from }} 13 | user {{ msmtp_user }} 14 | password {{ msmtp_password }} 15 | 16 | -------------------------------------------------------------------------------- /roles/wpa_supplicant/files/wpa_supplicant@fedora.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=WPA supplicant daemon (interface-specific version) 3 | Requires=sys-subsystem-net-devices-%i.device 4 | After=sys-subsystem-net-devices-%i.device 5 | Before=network.target 6 | Wants=network.target 7 | 8 | # NetworkManager users will probably want the dbus version instead. 9 | 10 | [Service] 11 | Type=simple 12 | ExecStart=/usr/sbin/wpa_supplicant -c/etc/wpa_supplicant/wpa_supplicant-%I.conf -i%I 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/gui/files/flatpak-update.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Update Flatpak and cleanup unused runtimes and applications. 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | StartLimitIntervalSec=3h 6 | StartLimitBurst=5 7 | OnFailure=failure-notification@%n 8 | 9 | [Service] 10 | Type=oneshot 11 | #ExecStart=ping -c 1 flathub.org 12 | ExecStart=/usr/bin/flatpak update --user --noninteractive --assumeyes 13 | ExecStart=/usr/bin/flatpak uninstall --user --unused --noninteractive --assumeyes 14 | Restart=on-failure 15 | RestartSec=30min 16 | -------------------------------------------------------------------------------- /tasks/update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -Syu 3 | community.general.pacman: 4 | update_cache: true 5 | upgrade: true 6 | become: true 7 | when: ansible_facts["distribution"] == "Archlinux" 8 | 9 | - name: dnf update 10 | ansible.builtin.dnf: 11 | name: "*" 12 | state: latest 13 | become: true 14 | when: ansible_facts["distribution"] == "Fedora" 15 | 16 | - name: apt update && apt dist-upgrade 17 | ansible.builtin.apt: 18 | update_cache: true 19 | upgrade: dist 20 | become: true 21 | when: ansible_facts["distribution"] == "Debian" 22 | 23 | -------------------------------------------------------------------------------- /roles/podman/templates/paperless-ngx.pod.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Paperless-ngx and Redis Pod 3 | 4 | [Pod] 5 | PodName=paperless-ngx 6 | 7 | UIDMap=1000:0:1 8 | UIDMap=0:1:1000 9 | UIDMap=1001:1001:64536 10 | 11 | HostName=paperless-ngx 12 | PublishPort=127.0.0.1:{{ podman_user.paperless_webui_port }}:{{ podman_user.paperless_webui_port }}/tcp 13 | 14 | 15 | [Service] 16 | Restart=on-failure 17 | RestartSec=5 18 | RestartMaxDelaySec=1h 19 | RestartSteps=10 20 | 21 | # Extend Timeout to allow time to pull the image 22 | TimeoutStartSec=300 23 | 24 | [Install] 25 | WantedBy=default.target 26 | -------------------------------------------------------------------------------- /roles/wpa_supplicant/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ ansible_facts.distribution }}_prepare.yml" 3 | 4 | - name: Copy wpa_supplicant configs 5 | ansible.builtin.copy: 6 | src: "{{ wpa_supplicant_config_file }}" 7 | dest: "/etc/wpa_supplicant/wpa_supplicant-{{ wireless_interface }}.conf" 8 | owner: root 9 | group: root 10 | mode: '0600' 11 | become: true 12 | 13 | - name: systemctl enable --now wpa_supplicant@{{ wireless_interface }}.service 14 | ansible.builtin.systemd: name=wpa_supplicant@{{ wireless_interface }}.service state=started enabled=true 15 | become: true 16 | -------------------------------------------------------------------------------- /roles/wpa_supplicant/tasks/Fedora_prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: dnf install wpa_supplicant 3 | ansible.builtin.dnf: name=wpa_supplicant state=present 4 | become: true 5 | 6 | # Fedora does not have wpa_supplicant@.service create one 7 | - name: Create wpa_supplicant@.service 8 | ansible.builtin.copy: 9 | src: wpa_supplicant@fedora.service 10 | dest: /etc/systemd/system/wpa_supplicant@.service 11 | owner: root 12 | group: root 13 | mode: '0644' 14 | become: true 15 | 16 | - name: systemctl daemon-reload 17 | ansible.builtin.systemd: 18 | daemon_reload: true 19 | become: true 20 | 21 | -------------------------------------------------------------------------------- /roles/openssh/README.md: -------------------------------------------------------------------------------- 1 | Hardening OpenSSH server 2 | 3 | ## Tasks 4 | - Force public key authentication disable password login. 5 | - Optionally, limit allowed login user. 6 | - Optionally, set up firewall rule. 7 | 8 | ## Variables 9 | ```yaml 10 | # Limit login users if defined 11 | # AllowUsers in /etc/ssh/sshd_config 12 | #ssh_allowusers: 'user1 user2 user3' 13 | 14 | 15 | # Set hostkey 16 | # HostKey in /etc/ssh/sshd_config 17 | #ssh_hostkey: ed25519 18 | 19 | 20 | # Only allow ssh connection from these ip address 21 | #ssh_accept_source_ipv4: 22 | # - 192.168.122.0/24 23 | # - 192.168.123.1 24 | ``` 25 | 26 | -------------------------------------------------------------------------------- /roles/nas/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "import_tasks: raid.yml" 3 | import_tasks: raid.yml 4 | when: crypttab_entries is defined or fstab_entries is defined 5 | 6 | - name: "import_tasks: btrfs_scrub.yml" 7 | import_tasks: btrfs_scrub.yml 8 | when: btrfs_scrub_path is defined 9 | 10 | - name: "import_tasks: smartd.yml" 11 | import_tasks: smartd.yml 12 | when: smartd_time is defined 13 | 14 | - name: "import_tasks: nfs.yml" 15 | import_tasks: nfs.yml 16 | when: nfs_mount_point is defined 17 | 18 | - name: "include_tasks: samba.yml" 19 | include_tasks: samba.yml 20 | when: smb_share is defined 21 | 22 | -------------------------------------------------------------------------------- /roles/podman/templates/paperless-ngx-redis.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Paperless-ngx Redis container 3 | 4 | [Container] 5 | ContainerName=paperless-ngx-redis 6 | Pod=paperless-ngx.pod 7 | Image=docker.io/library/redis:latest 8 | AutoUpdate=registry 9 | Timezone={{ TZ }} 10 | 11 | User=1000 12 | Group=1000 13 | 14 | Volume={{ podman_user.paperless_config_dir }}/redis:/data:Z 15 | 16 | 17 | [Service] 18 | Restart=on-failure 19 | RestartSec=5 20 | RestartMaxDelaySec=1h 21 | RestartSteps=10 22 | 23 | # Extend Timeout to allow time to pull the image 24 | TimeoutStartSec=300 25 | 26 | [Install] 27 | WantedBy=default.target 28 | -------------------------------------------------------------------------------- /roles/podman/templates/grafana-prometheus.pod.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Grafana and Prometheus Pod 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | 6 | [Pod] 7 | PodName=grafana-prometheus 8 | 9 | UserNS=keep-id 10 | 11 | HostName=grafana-prometheus 12 | PublishPort=127.0.0.1:{{ podman_user.grafana_webui_port }}:{{ podman_user.grafana_webui_port }}/tcp 13 | 14 | 15 | [Service] 16 | Restart=on-failure 17 | RestartSec=5 18 | RestartMaxDelaySec=1h 19 | RestartSteps=10 20 | 21 | # Extend Timeout to allow time to pull the image 22 | TimeoutStartSec=300 23 | 24 | [Install] 25 | WantedBy=default.target 26 | -------------------------------------------------------------------------------- /roles/podman/templates/unifi.pod.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=UniFi and MongoDB Pod 3 | 4 | [Pod] 5 | PodName=unifi 6 | 7 | HostName=unifi 8 | PublishPort=127.0.0.1:{{ podman_user.unifi_webui_port }}:{{ podman_user.unifi_webui_port }}/tcp 9 | PublishPort=3478:3478/udp 10 | PublishPort=10001:10001/udp 11 | PublishPort=8080:8080/tcp 12 | 13 | UIDMap=1000:0:1 14 | UIDMap=0:1:1000 15 | UIDMap=1001:1001:64536 16 | 17 | 18 | [Service] 19 | Restart=on-failure 20 | RestartSec=5 21 | RestartMaxDelaySec=1h 22 | RestartSteps=10 23 | 24 | # Extend Timeout to allow time to pull the image 25 | TimeoutStartSec=300 26 | 27 | [Install] 28 | WantedBy=default.target 29 | -------------------------------------------------------------------------------- /roles/systemd_networkd/tasks/Fedora-prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: dnf install systemd-networkd 3 | ansible.builtin.dnf: name=systemd-networkd state=present 4 | become: true 5 | 6 | - name: systemctl disable NetworkManager 7 | ansible.builtin.systemd: name=NetworkManager enabled=false 8 | become: true 9 | 10 | - name: systemctl start systemd-resolved 11 | ansible.builtin.systemd: name=systemd-resolved state=started 12 | become: true 13 | 14 | - name: ln -s /run/systemd/resolve/stub-resolv.conf /etc/resolv.conf 15 | ansible.builtin.file: 16 | src: /run/systemd/resolve/stub-resolv.conf 17 | dest: /etc/resolv.conf 18 | state: link 19 | become: true 20 | 21 | -------------------------------------------------------------------------------- /gui_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | 5 | vars_prompt: 6 | - name: ansible_become_password 7 | private: yes 8 | 9 | vars_files: 10 | - host_vars/gui_example.yml 11 | 12 | pre_tasks: 13 | #- name: Enable multilib repo 14 | # blockinfile: 15 | # path: /etc/pacman.conf 16 | # insertafter: '^#\s*[multilib]' 17 | # block: | 18 | # [multilib] 19 | # Include = /etc/pacman.d/mirrorlist 20 | # become: yes 21 | 22 | - import_tasks: tasks/update.yml 23 | 24 | # Check more roles under roles/ directory 25 | roles: 26 | - archlinux_common 27 | - gui 28 | - podman 29 | 30 | 31 | -------------------------------------------------------------------------------- /roles/wpa_supplicant/README.md: -------------------------------------------------------------------------------- 1 | Set up [wpa_supplicant](https://wiki.archlinux.org/title/Wpa_supplicant) __when using systemd-networkd__. 2 | 3 | ## Tasks 4 | ### Arch Linux 5 | - Install `wpa_supplicant`. 6 | - Copy wpa_supplicant configuration file. 7 | - Enable `wpa_supplicant@interface.service`. 8 | 9 | ### Fedora 10 | - Install `wpa_supplicant`. 11 | - Create `wpa_supplicant@.service` file. 12 | - Copy wpa_supplicant configuration file. 13 | - Enable `wpa_supplicant@interface.service`. 14 | 15 | 16 | ## Variables 17 | ```yaml 18 | # The wpa_supplicant configuration file 19 | wpa_supplicant_config_file: "files/wpa_supplicant.conf" 20 | 21 | # wireless NIC name 22 | wireless_interface: wlan0 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /homed.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | systemctl enable --now systemd-homed.service 4 | read -p "Tell me your username: " username 5 | read -p "uid: (default 1000)" uid 6 | uid="${uid:-1000}" 7 | read -p "Tell me the filesystem inside your home directory (btrfs or ext4): " fstype 8 | homectl create "$username" --uid="$uid" --member-of=wheel --shell=/bin/bash --storage=luks --fs-type="$fstype" 9 | 10 | read -p "Do you want to disable root account? [Y/n] " disable_root 11 | disable_root="${disable_root:-n}" 12 | disable_root="${disable_root,,}" 13 | if [[ $disable_root == y ]] ; then 14 | # https://wiki.archlinux.org/title/Sudo#Disable_root_login 15 | echo "Disabling root ..." 16 | passwd -d root 17 | passwd -l root 18 | fi 19 | -------------------------------------------------------------------------------- /roles/podman/templates/grafana.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Grafana container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | 6 | [Container] 7 | ContainerName=grafana 8 | Pod=grafana-prometheus.pod 9 | Image=docker.io/grafana/grafana:latest 10 | AutoUpdate=registry 11 | Timezone={{ TZ }} 12 | 13 | User={{ podman_user.uid }} 14 | Group={{ podman_user.uid }} 15 | 16 | Volume={{ podman_user.grafana_prometheus_config_dir }}/grafana:/var/lib/grafana:Z 17 | 18 | 19 | [Service] 20 | Restart=on-failure 21 | RestartSec=5 22 | RestartMaxDelaySec=1h 23 | RestartSteps=10 24 | 25 | # Extend Timeout to allow time to pull the image 26 | TimeoutStartSec=300 27 | 28 | [Install] 29 | WantedBy=default.target 30 | -------------------------------------------------------------------------------- /roles/nas/tasks/smartd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S smartmontools 3 | community.general.pacman: name=smartmontools state=present 4 | become: true 5 | 6 | - name: /etc/smartd.conf 7 | ansible.builtin.lineinfile: 8 | path: /etc/smartd.conf 9 | regexp: '^DEVICESCAN' 10 | line: "DEVICESCAN -m -M daily -M exec /usr/local/bin/smartd_notify.sh -s {{ smartd_time }}" 11 | state: present 12 | become: true 13 | 14 | - name: /usr/local/bin/smart_notify.sh 15 | ansible.builtin.template: 16 | src: smartd_notify.sh.j2 17 | dest: /usr/local/bin/smartd_notify.sh 18 | owner: root 19 | group: root 20 | mode: '0700' 21 | become: true 22 | 23 | - name: systemctl enable smartd.service 24 | ansible.builtin.systemd: name=smartd enabled=true 25 | become: true 26 | 27 | -------------------------------------------------------------------------------- /virt-install_arch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # Install an Arch Linux virtual machine with given name/domain. 3 | # No graphics only serial output. 4 | 5 | if [[ -z $1 ]] ; then 6 | echo "ERROR, please provide VM name/domain." 7 | echo "virt-install_arch.sh VM_name" 8 | exit 1 9 | fi 10 | 11 | virt-install \ 12 | --name "$1" \ 13 | --memory 2048 \ 14 | --sysinfo host \ 15 | --cpu host-passthrough,cache.mode=passthrough,topology.sockets=1,topology.cores=4,topology.threads=2 \ 16 | --graphics none \ 17 | --autoconsole text \ 18 | --os-variant name='archlinux' \ 19 | --cdrom "/tmp/archlinux-$(date +'%Y.%m.%d')-x86_64.iso" \ 20 | --network network=default,model.type=virtio \ 21 | --boot uefi \ 22 | --disk path="/var/lib/libvirt/images/$1.qcow2",size=16,bus=virtio \ 23 | --tpm default \ 24 | 25 | -------------------------------------------------------------------------------- /roles/systemd_networkd/tasks/Debian-prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: apt install systemd-resolved 3 | ansible.builtin.apt: name=systemd-resolved state=present 4 | become: true 5 | when: ansible_facts["distribution"] == "Debian" 6 | 7 | - name: Check /etc/network/interfaces on Debian 8 | ansible.builtin.stat: 9 | path: /etc/network/interfaces 10 | register: network_interfaces 11 | become: true 12 | when: ansible_facts["distribution"] == "Debian" 13 | 14 | - name: Remove /etc/network/interfaces on Debian 15 | ansible.builtin.command: mv /etc/network/interfaces /etc/network/interfaces.save 16 | become: true 17 | when: ansible_facts["distribution"] == "Debian" and network_interfaces.stat.exists 18 | 19 | - name: systemctl disable NetworkManager 20 | ansible.builtin.systemd: name=NetworkManager enabled=false 21 | become: true 22 | 23 | -------------------------------------------------------------------------------- /roles/podman/templates/prometheus-server.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prometheus server container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | 6 | [Container] 7 | ContainerName=prometheus-server 8 | Pod=grafana-prometheus.pod 9 | Image=quay.io/prometheus/prometheus:latest 10 | AutoUpdate=registry 11 | Timezone={{ TZ }} 12 | 13 | User={{ podman_user.uid }} 14 | Group={{ podman_user.uid }} 15 | 16 | Volume={{ podman_user.grafana_prometheus_config_dir }}/prometheus:/prometheus:Z 17 | Volume={{ podman_user.grafana_prometheus_config_dir }}/prometheus.yml:/etc/prometheus/prometheus.yml:Z 18 | 19 | 20 | [Service] 21 | Restart=on-failure 22 | RestartSec=5 23 | RestartMaxDelaySec=1h 24 | RestartSteps=10 25 | 26 | # Extend Timeout to allow time to pull the image 27 | TimeoutStartSec=300 28 | 29 | [Install] 30 | WantedBy=default.target 31 | -------------------------------------------------------------------------------- /roles/podman/templates/unifi-db.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=MongoDB for UniFi container 3 | 4 | [Container] 5 | ContainerName=unifi-db 6 | Pod=unifi.pod 7 | Image=docker.io/mongo:7.0 8 | AutoUpdate=registry 9 | Timezone={{ TZ }} 10 | 11 | User=1000 12 | Group=1000 13 | 14 | Volume={{ podman_user.unifi_config_dir }}/unifi-db:/data/db:Z 15 | {% if not unifi_database_dir.stat.exists %} 16 | Volume={{ unifi_db_tmp_dir.path }}/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:Z,ro 17 | {% endif %} 18 | 19 | 20 | [Service] 21 | Restart=on-failure 22 | RestartSec=5 23 | RestartMaxDelaySec=1h 24 | RestartSteps=10 25 | 26 | # Remove unifi-db container and all containers depend on unifi-db 27 | ExecStartPre=-/usr/bin/podman rm --force --depend unifi-db 28 | 29 | # Extend Timeout to allow time to pull the image 30 | TimeoutStartSec=300 31 | 32 | [Install] 33 | WantedBy=default.target 34 | -------------------------------------------------------------------------------- /roles/auto-update/README.md: -------------------------------------------------------------------------------- 1 | Update the system and reboot if necessary. 2 | 3 | ## Variables 4 | 5 | ### Arch Linux 6 | Update with script [`auto-update.sh`](templates/auto-update.sh.j2). 7 | To enable email notification set up [`roles/msmtp`](/roles/msmtp/). 8 | This will send `pacman -Syu` log to the email address specified in [`roles/msmtp`](/roles/msmtp/). 9 | ```yaml 10 | # Auto-update time. With format of systemd-timer OnCalendar= 11 | auto_update_time: '01:00:00' 12 | ``` 13 | 14 | ### Debian 15 | Set up [unattended upgrades](https://wiki.debian.org/UnattendedUpgrades). 16 | ```yaml 17 | # Optional auto-update time. With format of systemd-timer OnCalendar= 18 | #auto_update_time: '01:00:00' 19 | ``` 20 | 21 | ### Fedora 22 | Set up [dnf-automatic](https://dnf.readthedocs.io/en/latest/automatic.html),with specified reboot time. 23 | ```yaml 24 | # Optional auto-update time. With format of systemd-timer OnCalendar= 25 | #auto_update_time: '01:00:00' 26 | ``` 27 | -------------------------------------------------------------------------------- /roles/podman/templates/letsencrypt.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=letsencrypt certbot container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | 6 | [Container] 7 | ContainerName=letsencrypt-certbot 8 | Image=docker.io/certbot/dns-cloudflare:latest 9 | Timezone={{ TZ }} 10 | 11 | Volume={{ podman_user.letsencrypt_config_dir }}:/etc/letsencrypt:Z 12 | 13 | HostName=letsencrypt 14 | 15 | Exec=certonly \ 16 | --dns-cloudflare \ 17 | --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini \ 18 | --dns-cloudflare-propagation-seconds 60 \ 19 | --email {{ podman_user.letsencrypt_email }} \ 20 | {% for domain in podman_user.letsencrypt_domains %} 21 | --domains '{{ domain }}' \ 22 | {% endfor %} 23 | --agree-tos \ 24 | --keep-until-expiring \ 25 | --expand \ 26 | --non-interactive 27 | 28 | 29 | [Service] 30 | Restart=no 31 | 32 | # Extend Timeout to allow time to pull the image 33 | TimeoutStartSec=300 34 | -------------------------------------------------------------------------------- /roles/docker/tasks/Debian_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: apt install ca-certificates curl gnupg 3 | ansible.builtin.apt: 4 | name: 5 | - ca-certificates 6 | - curl 7 | - gnupg 8 | state: present 9 | become: true 10 | 11 | - name: Add Docker’s official GPG key 12 | ansible.builtin.apt_key: 13 | url: https://download.docker.com/linux/debian/gpg 14 | state: present 15 | become: true 16 | 17 | - name: Add Docker repository 18 | ansible.builtin.apt_repository: 19 | repo: "deb https://download.docker.com/linux/debian {{ ansible_facts.distribution_release }} stable" 20 | state: present 21 | become: true 22 | 23 | - name: apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 24 | ansible.builtin.apt: 25 | name: 26 | - docker-ce 27 | - docker-ce-cli 28 | - containerd.io 29 | - docker-buildx-plugin 30 | - docker-compose-plugin 31 | state: present 32 | become: true 33 | 34 | -------------------------------------------------------------------------------- /roles/podman/templates/qbittorrent.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=qBittorrent container 3 | Wants=gluetun.service 4 | After=gluetun.service 5 | Requires=gluetun.service 6 | 7 | [Container] 8 | ContainerName=qbittorrent 9 | Image=lscr.io/linuxserver/qbittorrent:latest 10 | AutoUpdate=registry 11 | Timezone={{ TZ }} 12 | 13 | Volume={{ podman_user.qbittorrent_config_dir }}:/config:Z 14 | Volume={{ podman_user.qbittorrent_downloads_dir }}:/downloads:Z 15 | 16 | HostName=qbittorrent 17 | Network=container:gluetun 18 | 19 | Environment=PUID=1000 20 | Environment=PGID=1000 21 | Environment=TZ={{ TZ }} 22 | Environment=WEBUI_PORT={{ podman_user.qbittorrent_webui_port }} 23 | 24 | UIDMap=1000:0:1 25 | UIDMap=0:1:1000 26 | UIDMap=1001:1001:64536 27 | 28 | 29 | [Service] 30 | Restart=on-failure 31 | RestartSec=5 32 | RestartMaxDelaySec=1h 33 | RestartSteps=10 34 | 35 | # Extend Timeout to allow time to pull the image 36 | TimeoutStartSec=300 37 | 38 | [Install] 39 | WantedBy=default.target 40 | -------------------------------------------------------------------------------- /roles/podman/legacy/radarr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: radarr container config direcoty 3 | ansible.builtin.file: 4 | path: "{{ podman_user.radarr_config_dir }}" 5 | state: directory 6 | owner: "{{ podman_user.name }}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | 11 | - name: radarr.service 12 | ansible.builtin.template: 13 | src: radarr.service.j2 14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/radarr.service" 15 | owner: "{{ podman_user.name }}" 16 | group: "{{ podman_user.name }}" 17 | mode: '0600' 18 | become: true 19 | become_user: "{{ podman_user.name }}" 20 | 21 | - name: systemctl --user daemon-reload 22 | ansible.builtin.systemd: daemon_reload=true scope=user 23 | become: true 24 | become_user: "{{ podman_user.name }}" 25 | 26 | - name: systemctl --user enable --now radarr.service 27 | ansible.builtin.systemd: name=radarr enabled=true state=started scope=user 28 | become: true 29 | become_user: "{{ podman_user.name }}" 30 | -------------------------------------------------------------------------------- /roles/podman/legacy/sonarr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: sonarr container config direcoty 3 | ansible.builtin.file: 4 | path: "{{ podman_user.sonarr_config_dir }}" 5 | state: directory 6 | owner: "{{ podman_user.name }}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | 11 | - name: sonarr.service 12 | ansible.builtin.template: 13 | src: sonarr.service.j2 14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/sonarr.service" 15 | owner: "{{ podman_user.name }}" 16 | group: "{{ podman_user.name }}" 17 | mode: '0600' 18 | become: true 19 | become_user: "{{ podman_user.name }}" 20 | 21 | - name: systemctl --user daemon-reload 22 | ansible.builtin.systemd: daemon_reload=true scope=user 23 | become: true 24 | become_user: "{{ podman_user.name }}" 25 | 26 | - name: systemctl --user enable --now sonarr.service 27 | ansible.builtin.systemd: name=sonarr enabled=true state=started scope=user 28 | become: true 29 | become_user: "{{ podman_user.name }}" 30 | -------------------------------------------------------------------------------- /roles/podman/legacy/deluge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: deluge container config direcoty 3 | ansible.builtin.file: 4 | path: "{{ podman_user.deluge_config_dir }}" 5 | state: directory 6 | owner: "{{ podman_user.name }}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | 11 | - name: deluge.service 12 | ansible.builtin.template: 13 | src: deluge.service.j2 14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/deluge.service" 15 | owner: "{{ podman_user.name }}" 16 | group: "{{ podman_user.name }}" 17 | mode: '0600' 18 | become: true 19 | become_user: "{{ podman_user.name }}" 20 | 21 | - name: systemctl --user daemon-reload 22 | ansible.builtin.systemd: daemon_reload=true scope=user 23 | become: true 24 | become_user: "{{ podman_user.name }}" 25 | 26 | - name: systemctl --user enable --now deluge.service 27 | ansible.builtin.systemd: name=deluge.service enabled=true state=started scope=user 28 | become: true 29 | become_user: "{{ podman_user.name }}" 30 | -------------------------------------------------------------------------------- /roles/systemd_networkd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ ansible_facts.distribution }}-prepare.yml" 3 | 4 | - name: "Create static network" 5 | ansible.builtin.template: 6 | src: en0.network.j2 7 | dest: /etc/systemd/network/{{ networkd_static.nic }}.network 8 | owner: root 9 | group: systemd-network 10 | mode: '0640' 11 | become: true 12 | when: networkd_static_ip is defined 13 | loop: "{{ networkd_static_ip }}" 14 | loop_control: 15 | loop_var: networkd_static 16 | 17 | - name: Copy systemd-networkd configs 18 | ansible.builtin.copy: 19 | src: "{{ networkd_configs_dir }}" 20 | dest: /etc/systemd/network/ 21 | owner: root 22 | group: systemd-network 23 | mode: '0640' 24 | become: true 25 | when: networkd_configs_dir is defined 26 | 27 | - name: systemctl enable systemd-networkd 28 | ansible.builtin.systemd: name=systemd-networkd enabled=true 29 | become: true 30 | 31 | - name: systemctl enable systemd-resolved 32 | ansible.builtin.systemd: name=systemd-resolved enabled=true 33 | become: true 34 | 35 | -------------------------------------------------------------------------------- /roles/podman/templates/syncthing.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Syncthing container 3 | 4 | [Container] 5 | ContainerName=syncthing 6 | Image=lscr.io/linuxserver/syncthing:latest 7 | AutoUpdate=registry 8 | Timezone={{ TZ }} 9 | 10 | Volume={{ podman_user.syncthing_config_dir }}:/config:Z 11 | {% for item in podman_user.syncthing_data_dirs %} 12 | Volume={{ item.src }}:{{ item.dest }}:Z 13 | {% endfor %} 14 | 15 | HostName=syncthing 16 | PublishPort=127.0.0.1:{{ podman_user.syncthing_webui_port }}:8384/tcp 17 | {% if podman_user.syncthing_sync_port is defined %} 18 | PublishPort={{ podman_user.syncthing_sync_port }}:22000/tcp 19 | {% else %} 20 | PublishPort=22000:22000/tcp 21 | {% endif %} 22 | 23 | Environment=PUID=1000 24 | Environment=PGID=1000 25 | Environment=TZ={{ TZ }} 26 | 27 | UIDMap=1000:0:1 28 | UIDMap=0:1:1000 29 | UIDMap=1001:1001:64536 30 | 31 | 32 | [Service] 33 | Restart=on-failure 34 | RestartSec=5 35 | RestartMaxDelaySec=1h 36 | RestartSteps=10 37 | 38 | # Extend Timeout to allow time to pull the image 39 | TimeoutStartSec=300 40 | 41 | [Install] 42 | WantedBy=default.target 43 | -------------------------------------------------------------------------------- /roles/podman/templates/paperless-ngx.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Paperless-ngx container 3 | Wants=paperless-ngx-redis.service 4 | After=paperless-ngx-redis.service 5 | 6 | [Container] 7 | ContainerName=paperless-ngx 8 | Pod=paperless-ngx.pod 9 | Image=ghcr.io/paperless-ngx/paperless-ngx:latest 10 | AutoUpdate=registry 11 | Timezone={{ TZ }} 12 | 13 | Volume={{ podman_user.paperless_config_dir }}/paperless:/usr/src/paperless/data:Z 14 | 15 | Environment=USERMAP_UID=1000 16 | Environment=USERMAP_GID=1000 17 | Environment=PAPERLESS_REDIS=redis://localhost:6379 18 | {% if podman_user.paperless_url is defined %} 19 | Environment=PAPERLESS_URL={{ podman_user.paperless_url }} 20 | Environment=PAPERLESS_USE_X_FORWARD_HOST=true 21 | Environment=PAPERLESS_USE_X_FORWARD_PORT=true 22 | Environment=PAPERLESS_PROXY_SSL_HEADER='["HTTP_X_FORWARDED_PROTO", "https"]' 23 | {% endif %} 24 | 25 | 26 | [Service] 27 | Restart=on-failure 28 | RestartSec=5 29 | RestartMaxDelaySec=1h 30 | RestartSteps=10 31 | 32 | # Extend Timeout to allow time to pull the image 33 | TimeoutStartSec=300 34 | 35 | [Install] 36 | WantedBy=default.target 37 | -------------------------------------------------------------------------------- /roles/auto-update/tasks/Archlinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S archlinux-contrib 3 | community.general.pacman: name=archlinux-contrib state=present 4 | become: true 5 | 6 | - name: auto-update.service 7 | ansible.builtin.copy: 8 | src: auto-update.service 9 | dest: /etc/systemd/system/auto-update.service 10 | owner: root 11 | group: root 12 | mode: '0644' 13 | become: true 14 | 15 | - name: auto-update.timer 16 | ansible.builtin.template: 17 | src: auto-update.timer.j2 18 | dest: /etc/systemd/system/auto-update.timer 19 | owner: root 20 | group: root 21 | mode: '0644' 22 | become: true 23 | 24 | - name: auto-update.sh 25 | ansible.builtin.template: 26 | src: auto-update.sh.j2 27 | dest: /usr/local/bin/auto-update.sh 28 | owner: root 29 | group: root 30 | mode: '0700' 31 | become: true 32 | 33 | - name: systemctl daemon-reload 34 | ansible.builtin.systemd: daemon_reload=true 35 | become: true 36 | 37 | - name: systemctl enable auto-update.timer 38 | ansible.builtin.systemd: name=auto-update.timer enabled=true state=started 39 | become: true 40 | -------------------------------------------------------------------------------- /roles/podman/legacy/radicale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: radicale container config/data direcoty 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ podman_user.name}}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | loop: 11 | - "{{ podman_user.radicale_data_dir }}" 12 | - "{{ podman_user.radicale_config_dir }}" 13 | 14 | - name: radicale.service 15 | ansible.builtin.template: 16 | src: radicale.service.j2 17 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/radicale.service" 18 | owner: "{{ podman_user.name}}" 19 | group: "{{ podman_user.name }}" 20 | mode: '0600' 21 | become: true 22 | become_user: "{{ podman_user.name }}" 23 | 24 | - name: systemctl --user daemon-reload 25 | ansible.builtin.systemd: daemon_reload=true scope=user 26 | become: true 27 | become_user: "{{ podman_user.name }}" 28 | 29 | - name: systemctl --user enable --now radicale.service 30 | ansible.builtin.systemd: name=radicale enabled=true state=started scope=user 31 | become: true 32 | become_user: "{{ podman_user.name }}" 33 | 34 | -------------------------------------------------------------------------------- /roles/podman/templates/autobrr.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=autobrr container 3 | {% if "gluetun" in podman_user.containers and podman_user.autobrr_gluetun_proxy %} 4 | Wants=gluetun.service 5 | After=gluetun.service 6 | Requires=gluetun.service 7 | {% endif %} 8 | 9 | [Container] 10 | ContainerName=autobrr 11 | Image=ghcr.io/autobrr/autobrr:latest 12 | AutoUpdate=registry 13 | Timezone={{ TZ }} 14 | 15 | User=1000 16 | Group=1000 17 | UserNS=keep-id:uid=1000,gid=1000 18 | 19 | Volume={{ podman_user.autobrr_config_dir }}:/config:Z 20 | 21 | HostName=autobrr 22 | {% if "gluetun" in podman_user.containers and podman_user.autobrr_gluetun_proxy %} 23 | Network=container:gluetun 24 | {% else %} 25 | PublishPort=127.0.0.1:{{ podman_user.autobrr_webui_port }}:{{ podman_user.autobrr_webui_port }}/tcp 26 | {% endif %} 27 | 28 | Environment=TZ={{ TZ }} 29 | Environment=AUTOBRR__HOST=0.0.0.0 30 | 31 | 32 | [Service] 33 | Restart=on-failure 34 | RestartSec=5 35 | RestartMaxDelaySec=1h 36 | RestartSteps=10 37 | 38 | # Extend Timeout to allow time to pull the image 39 | TimeoutStartSec=300 40 | 41 | [Install] 42 | WantedBy=default.target 43 | -------------------------------------------------------------------------------- /roles/podman/templates/unifi.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=UniFi container 3 | Wants=unifi-db.service 4 | After=unifi-db.service 5 | Requires=unifi-db.service 6 | 7 | [Container] 8 | ContainerName=unifi 9 | Pod=unifi.pod 10 | Image=lscr.io/linuxserver/unifi-network-application:latest 11 | AutoUpdate=registry 12 | Timezone={{ TZ }} 13 | 14 | Volume={{ podman_user.unifi_config_dir }}/unifi:/config:Z 15 | 16 | Environment=PUID=1000 17 | Environment=PGID=1000 18 | Environment=TZ={{ TZ }} 19 | 20 | {% if not unifi_database_dir.stat.exists %} 21 | Environment=MONGO_USER=unifi 22 | Environment=MONGO_PASS={{ podman_user.unifi_db_pass }} 23 | Environment=MONGO_HOST=localhost 24 | Environment=MONGO_PORT=27017 25 | Environment=MONGO_DBNAME=unifi 26 | {% endif %} 27 | 28 | [Service] 29 | Restart=on-failure 30 | RestartSec=5 31 | RestartMaxDelaySec=1h 32 | RestartSteps=10 33 | 34 | {% if not unifi_database_dir.stat.exists %} 35 | # Wait MongoDB initialization 36 | ExecStartPre=/usr/bin/sleep 5 37 | 38 | {% endif %} 39 | # Extend Timeout to allow time to pull the image 40 | TimeoutStartSec=300 41 | 42 | [Install] 43 | WantedBy=default.target 44 | -------------------------------------------------------------------------------- /roles/msmtp/README.md: -------------------------------------------------------------------------------- 1 | Set up [msmtp](https://wiki.archlinux.org/title/Msmtp) for sending email notification. 2 | 3 | ## Variables 4 | ### Arch Linux 5 | This will install the `msmtp` package and create `/root/.msmtprc` file with owner `root` permission `600`. 6 | The password will be stored as plain text so we only allow root user to read it. 7 | Since this is for automatically send email notification, putting encrypted password here is meaningless, 8 | because it will be decrypt automatically. 9 | 10 | ```yaml 11 | # account name 12 | msmtp_account: gmail 13 | 14 | # smtp server 15 | msmtp_host: smtp.gmail.com 16 | 17 | # smtp port 18 | msmtp_port: 465 19 | 20 | # Enable or disable TLS/SSL 21 | msmtp_tls: on 22 | 23 | # Enable or disable STARTTLS for TLS 24 | msmtp_tls_starttls: off 25 | 26 | # From email address 27 | msmtp_from: username@gmail.com 28 | 29 | # username and password 30 | # If you are using Gmail, set up an app password: 31 | # https://myaccount.google.com/apppasswords 32 | msmtp_user: username 33 | msmtp_password: !unsafe plain-text-password 34 | 35 | # To email address 36 | # Not in the /root/.msmtprc file 37 | msmtp_to: username@gmail.com 38 | ``` 39 | -------------------------------------------------------------------------------- /roles/podman/templates/thelounge.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=The Lounge container 3 | {% if "gluetun" in podman_user.containers and podman_user.thelounge_gluetun_proxy %} 4 | Wants=gluetun.service 5 | After=gluetun.service 6 | Requires=gluetun.service 7 | {% endif %} 8 | 9 | [Container] 10 | ContainerName=thelounge 11 | Image=lscr.io/linuxserver/thelounge:latest 12 | AutoUpdate=registry 13 | Timezone={{ TZ }} 14 | 15 | Volume={{ podman_user.thelounge_config_dir }}:/config:Z 16 | 17 | HostName=thelounge 18 | {% if "gluetun" in podman_user.containers and podman_user.thelounge_gluetun_proxy %} 19 | Network=container:gluetun 20 | {% else %} 21 | PublishPort=127.0.0.1:{{ podman_user.thelounge_webui_port }}:{{ podman_user.thelounge_webui_port }}/tcp 22 | {% endif %} 23 | 24 | Environment=PUID=1000 25 | Environment=PGID=1000 26 | Environment=TZ={{ TZ }} 27 | 28 | UIDMap=1000:0:1 29 | UIDMap=0:1:1000 30 | UIDMap=1001:1001:64536 31 | 32 | 33 | [Service] 34 | Restart=on-failure 35 | RestartSec=5 36 | RestartMaxDelaySec=1h 37 | RestartSteps=10 38 | 39 | # Extend Timeout to allow time to pull the image 40 | TimeoutStartSec=300 41 | 42 | [Install] 43 | WantedBy=default.target 44 | -------------------------------------------------------------------------------- /roles/podman/templates/prometheus-node-exporter.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prometheus Node Exporter container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | 6 | [Container] 7 | ContainerName=prometheus-node-exporter 8 | {% if podman_user.prometheus_host_mode is defined and podman_user.prometheus_host_mode %} 9 | Network=host 10 | PodmanArgs=--pid=host 11 | {% else %} 12 | Pod=grafana-prometheus.pod 13 | {% endif %} 14 | Image=quay.io/prometheus/node-exporter:latest 15 | AutoUpdate=registry 16 | Timezone={{ TZ }} 17 | 18 | User={{ podman_user.uid }} 19 | Group={{ podman_user.uid }} 20 | 21 | {% if podman_user.prometheus_host_mode is defined and podman_user.prometheus_host_mode %} 22 | Volume=/:/host:ro,rslave 23 | Exec=--path.rootfs=/host 24 | Exec=--path.procfs=/host/proc 25 | Exec=--path.sysfs=/host/sys 26 | Exec=--collector.filesystem.mount-points-exclude=^/(dev|proc|sys)($$|/) 27 | Exec=--collector.processes 28 | Exec=--collector.systemd 29 | {% endif %} 30 | 31 | 32 | [Service] 33 | Restart=on-failure 34 | RestartSec=5 35 | RestartMaxDelaySec=1h 36 | RestartSteps=10 37 | 38 | # Extend Timeout to allow time to pull the image 39 | TimeoutStartSec=300 40 | 41 | [Install] 42 | WantedBy=default.target 43 | -------------------------------------------------------------------------------- /roles/podman/templates/transmission.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Transmission container 3 | Wants=gluetun.service 4 | After=gluetun.service 5 | Requires=gluetun.service 6 | 7 | [Container] 8 | ContainerName=transmission 9 | Image=lscr.io/linuxserver/transmission:latest 10 | AutoUpdate=registry 11 | Timezone={{ TZ }} 12 | 13 | Volume={{ podman_user.transmission_config_dir }}:/config:Z 14 | Volume={{ podman_user.transmission_downloads_dir }}:/downloads:Z 15 | {% if podman_user.transmission_watch_dir is defined %} 16 | Volume={{ podman_user.transmission_watch_dir }}:/watch:Z 17 | {% endif %} 18 | 19 | HostName=transmission 20 | Network=container:gluetun 21 | 22 | Environment=PUID=1000 23 | Environment=PGID=1000 24 | Environment=TZ={{ TZ }} 25 | {% if podman_user.transmission_user is defined %} 26 | Environment=USER={{ podman_user.transmission_user }} 27 | Environment=PASS='{{ podman_user.transmission_pass }}' 28 | {% endif %} 29 | 30 | UIDMap=1000:0:1 31 | UIDMap=0:1:1000 32 | UIDMap=1001:1001:64536 33 | 34 | 35 | [Service] 36 | Restart=on-failure 37 | RestartSec=5 38 | RestartMaxDelaySec=1h 39 | RestartSteps=10 40 | 41 | # Extend Timeout to allow time to pull the image 42 | TimeoutStartSec=300 43 | 44 | [Install] 45 | WantedBy=default.target 46 | -------------------------------------------------------------------------------- /roles/nut/README.md: -------------------------------------------------------------------------------- 1 | Set up [Network UPS Tools](https://wiki.archlinux.org/title/Network_UPS_Tools) (NUT) and configure it to send email notification on Arch Linux. 2 | 3 | This role depends on [`roles/msmtp`](/roles/msmtp/). 4 | 5 | ## Tasks 6 | - Install `nut`. 7 | - Edit `/etc/nut/ups.conf`, `/etc/nut/upsd.users`, `/etc/nut/upsmon.conf` 8 | - Create `/etc/nut/msmtprc` for sending email, use the variables from [`roles/msmtp`](/roles/msmtp). 9 | Because NUT runs as user `nut` who won't be able to read `/root/.msmtprc` file, create a same file at `/etc/mut/msmtprc` for `nut` user to read. 10 | - Copy [`nut_notify.sh`](templates/nut_notify.sh.j2), which will be executed to send email notification. 11 | - Enable various systemd services. 12 | 13 | ## Variables 14 | ```yaml 15 | # UPS password in the /etc/nut/upsd.conf 16 | # https://wiki.archlinux.org/title/Network_UPS_Tools#upsd_configuration 17 | ups_password: !unsafe 1234546 18 | 19 | 20 | # Optional variable to fix _Can't claim USB device error_ 21 | # https://wiki.archlinux.org/title/Network_UPS_Tools#Can't_claim_USB_device_error 22 | # These are the USB device manufacturer and product IDs. 23 | # You can get these IDs [XXXX:YYYY] by `lsusb` command. 24 | #ups_vender_id: XXXX 25 | #ups_product_id: YYYY 26 | ``` 27 | 28 | -------------------------------------------------------------------------------- /roles/podman/templates/tailscale-traefik.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=tailscale container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | Wants=traefik.service 6 | After=traefik.service 7 | Requires=traefik.service 8 | 9 | [Container] 10 | ContainerName=tailscale-traefik 11 | Image=ghcr.io/tailscale/tailscale:latest 12 | AutoUpdate=registry 13 | Timezone={{ TZ }} 14 | 15 | AddCapability=NET_ADMIN 16 | AddDevice=/dev/net/tun:/dev/net/tun 17 | 18 | Volume={{ podman_user.tailscale_config_dir }}/:/var/lib/tailscale:Z 19 | 20 | Network=container:traefik 21 | 22 | Environment=TS_STATE_DIR=/var/lib/tailscale 23 | {% if podman_user.tailscale_hostname is defined %} 24 | Environment=TS_HOSTNAME={{ podman_user.tailscale_hostname }} 25 | {% else %} 26 | Environment=TS_HOSTNAME=tailscale-traefik-container 27 | {% endif %} 28 | {% if podman_user.tailscale_args is defined %} 29 | {% for arg in podman_user.tailscale_args %} 30 | Environment=TS_EXTRA_ARGS={{ arg }} 31 | {% endfor %} 32 | {% endif %} 33 | 34 | 35 | [Service] 36 | Restart=on-failure 37 | RestartSec=5 38 | RestartMaxDelaySec=1h 39 | RestartSteps=10 40 | 41 | # Extend Timeout to allow time to pull the image 42 | TimeoutStartSec=300 43 | 44 | [Install] 45 | WantedBy=default.target 46 | -------------------------------------------------------------------------------- /roles/podman/legacy/prowlarr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: prowlarr container config direcoty 3 | ansible.builtin.file: 4 | path: "{{ podman_user.prowlarr_config_dir }}" 5 | state: directory 6 | owner: "{{ podman_user.name }}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | 11 | - name: '*Arrs meida direcoty' 12 | ansible.builtin.file: 13 | path: "{{ podman_user.arr_media_dir }}" 14 | state: directory 15 | owner: "{{ podman_user.name }}" 16 | group: "{{ podman_user.name }}" 17 | mode: '0755' 18 | become: true 19 | 20 | - name: prowlarr.service 21 | ansible.builtin.template: 22 | src: prowlarr.service.j2 23 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/prowlarr.service" 24 | owner: "{{ podman_user.name }}" 25 | group: "{{ podman_user.name }}" 26 | mode: '0600' 27 | become: true 28 | become_user: "{{ podman_user.name }}" 29 | 30 | - name: systemctl --user daemon-reload 31 | ansible.builtin.systemd: daemon_reload=true scope=user 32 | become: true 33 | become_user: "{{ podman_user.name }}" 34 | 35 | - name: systemctl --user enable --now prowlarr.service 36 | ansible.builtin.systemd: name=prowlarr enabled=true state=started scope=user 37 | become: true 38 | become_user: "{{ podman_user.name }}" 39 | -------------------------------------------------------------------------------- /roles/podman/legacy/thelounge.service.j2: -------------------------------------------------------------------------------- 1 | # thelounge.service 2 | 3 | [Unit] 4 | Description=Podman thelounge.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | RestartSec=5 14 | TimeoutStopSec=70 15 | ExecStartPre=/bin/rm \ 16 | -f %t/%n.ctr-id 17 | ExecStart=/usr/bin/podman run \ 18 | --cidfile=%t/%n.ctr-id \ 19 | --cgroups=no-conmon \ 20 | --rm \ 21 | --sdnotify=conmon \ 22 | --detach \ 23 | --replace \ 24 | --label io.containers.autoupdate=registry \ 25 | --uidmap 1000:0:1 \ 26 | --uidmap 0:1:1000 \ 27 | --uidmap 1001:1001:64536 \ 28 | --env PUID=1000 \ 29 | --env PGID=1000 \ 30 | --env TZ={{ TZ }} \ 31 | --name thelounge \ 32 | --publish 127.0.0.1:9000:9000/tcp \ 33 | --volume {{ podman_user.thelounge_config_dir }}:/config:Z \ 34 | lscr.io/linuxserver/thelounge:latest 35 | ExecStop=/usr/bin/podman stop \ 36 | --ignore \ 37 | --time 10 \ 38 | --cidfile=%t/%n.ctr-id 39 | ExecStopPost=/usr/bin/podman rm \ 40 | -f \ 41 | --ignore --time 10 \ 42 | --cidfile=%t/%n.ctr-id 43 | Type=notify 44 | NotifyAccess=all 45 | 46 | [Install] 47 | WantedBy=default.target 48 | -------------------------------------------------------------------------------- /roles/podman/legacy/prowlarr.service.j2: -------------------------------------------------------------------------------- 1 | # container-prowlarr.service 2 | 3 | [Unit] 4 | Description=Podman container-prowlarr.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStartPre=/bin/rm \ 15 | -f %t/%n.ctr-id 16 | ExecStart=/usr/bin/podman run \ 17 | --cidfile=%t/%n.ctr-id \ 18 | --cgroups=no-conmon \ 19 | --rm \ 20 | --sdnotify=conmon \ 21 | --detach \ 22 | --replace \ 23 | --label io.containers.autoupdate=registry \ 24 | --uidmap 1000:0:1 \ 25 | --uidmap 0:1:1000 \ 26 | --uidmap 1001:1001:64536 \ 27 | --name=prowlarr \ 28 | --hostname=prowlarr \ 29 | --env PUID=1000 \ 30 | --env PGID=1000 \ 31 | --env TZ={{ TZ }} \ 32 | --publish 127.0.0.1:9696:9696/tcp \ 33 | --volume {{ podman_user.prowlarr_config_dir }}:/config:Z \ 34 | lscr.io/linuxserver/prowlarr:latest 35 | ExecStop=/usr/bin/podman stop \ 36 | --ignore \ 37 | --time=10 \ 38 | --cidfile=%t/%n.ctr-id 39 | ExecStopPost=/usr/bin/podman rm \ 40 | --force \ 41 | --ignore \ 42 | --time=10 \ 43 | --cidfile=%t/%n.ctr-id 44 | Type=notify 45 | NotifyAccess=all 46 | 47 | [Install] 48 | WantedBy=default.target 49 | -------------------------------------------------------------------------------- /roles/podman/legacy/radarr.service.j2: -------------------------------------------------------------------------------- 1 | # radarr.service 2 | 3 | [Unit] 4 | Description=Podman radarr.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStartPre=/bin/rm \ 15 | -f %t/%n.ctr-id 16 | ExecStart=/usr/bin/podman run \ 17 | --cidfile=%t/%n.ctr-id \ 18 | --cgroups=no-conmon \ 19 | --rm \ 20 | --sdnotify=conmon \ 21 | --detach \ 22 | --replace \ 23 | --label io.containers.autoupdate=registry \ 24 | --uidmap 1000:0:1 \ 25 | --uidmap 0:1:1000 \ 26 | --uidmap 1001:1001:64536 \ 27 | --name=radarr \ 28 | --hostname=radarr \ 29 | --env PUID=1000 \ 30 | --env PGID=1000 \ 31 | --env TZ={{ TZ }} \ 32 | --publish 127.0.0.1:7878:7878/tcp \ 33 | --volume {{ podman_user.radarr_config_dir }}:/config:Z \ 34 | --volume {{ podman_user.arr_media_dir }}:/data:rw \ 35 | lscr.io/linuxserver/radarr:latest 36 | ExecStop=/usr/bin/podman stop \ 37 | --ignore \ 38 | --time=10 \ 39 | --cidfile=%t/%n.ctr-id 40 | ExecStopPost=/usr/bin/podman rm \ 41 | --force \ 42 | --ignore \ 43 | --time=10 \ 44 | --cidfile=%t/%n.ctr-id 45 | Type=notify 46 | NotifyAccess=all 47 | 48 | [Install] 49 | WantedBy=default.target 50 | -------------------------------------------------------------------------------- /roles/podman/legacy/sonarr.service.j2: -------------------------------------------------------------------------------- 1 | # sonarr.service 2 | 3 | [Unit] 4 | Description=Podman sonarr.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStartPre=/bin/rm \ 15 | -f %t/%n.ctr-id 16 | ExecStart=/usr/bin/podman run \ 17 | --cidfile=%t/%n.ctr-id \ 18 | --cgroups=no-conmon \ 19 | --rm \ 20 | --sdnotify=conmon \ 21 | --detach \ 22 | --replace \ 23 | --label io.containers.autoupdate=registry \ 24 | --uidmap 1000:0:1 \ 25 | --uidmap 0:1:1000 \ 26 | --uidmap 1001:1001:64536 \ 27 | --name=sonarr \ 28 | --hostname=sonarr \ 29 | --env PUID=1000 \ 30 | --env PGID=1000 \ 31 | --env TZ={{ TZ }} \ 32 | --publish 127.0.0.1:8989:8989/tcp \ 33 | --volume {{ podman_user.sonarr_config_dir }}:/config:Z \ 34 | --volume {{ podman_user.arr_media_dir }}:/data:rw \ 35 | lscr.io/linuxserver/sonarr:latest 36 | ExecStop=/usr/bin/podman stop \ 37 | --ignore \ 38 | --time=10 \ 39 | --cidfile=%t/%n.ctr-id 40 | ExecStopPost=/usr/bin/podman rm \ 41 | --force \ 42 | --ignore \ 43 | --time=10 \ 44 | --cidfile=%t/%n.ctr-id 45 | Type=notify 46 | NotifyAccess=all 47 | 48 | [Install] 49 | WantedBy=default.target 50 | -------------------------------------------------------------------------------- /roles/gui/tasks/dotfiles.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S git 3 | community.general.pacman: name=git state=present 4 | become: true 5 | when: ansible_facts["distribution"] == "Archlinux" 6 | 7 | - name: dnf install git 8 | ansible.builtin.dnf: name=git state=present 9 | become: true 10 | when: ansible_facts["distribution"] == "Fedora" 11 | 12 | - name: Restore dotfiles 13 | ansible.builtin.shell: | 14 | #!/usr/bin/bash 15 | git clone --bare {{ dotfiles_repo.https }} $HOME/.dotfiles 16 | function dotfiles { 17 | /usr/bin/git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME $@ 18 | } 19 | # delete conflicted files 20 | #dotfiles checkout 2>&1 | grep -E "\s+\." | awk {'print $1'} | xargs -I{} rm {} 21 | dotfiles checkout 22 | dotfiles config --local status.showUntrackedFiles no 23 | args: 24 | chdir: "{{ ansible_facts.user_dir }}" 25 | executable: /usr/bin/bash 26 | creates: "{{ ansible_facts.user_dir }}/.dotfiles/config" 27 | 28 | - name: Set ssh connection in .dotfiles/config 29 | ansible.builtin.lineinfile: 30 | path: "{{ ansible_facts.user_dir }}/.dotfiles/config" 31 | regexp: '^\surl =' 32 | insertafter: '^\[remote' 33 | line: "\turl = {{ dotfiles_repo.ssh }}" 34 | 35 | - name: pacman -S openssh 36 | community.general.pacman: name=openssh state=present 37 | become: true 38 | when: ansible_facts["distribution"] == "Archlinux" 39 | -------------------------------------------------------------------------------- /roles/podman/templates/traefik.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=traefik container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | {% if podman_user.traefik_tailscale_enable is defined and podman_user.traefik_tailscale_enable %} 6 | Wants=tailscale-traefik.service 7 | Before=tailscale-traefik.service 8 | {% endif %} 9 | 10 | [Container] 11 | ContainerName=traefik 12 | Image=docker.io/library/traefik:v2.10 13 | AutoUpdate=registry 14 | Timezone={{ TZ }} 15 | 16 | Volume={{ podman_user.traefik_config_dir }}/static_conf.yml:/etc/traefik/traefik.yml:Z,ro 17 | Volume={{ podman_user.traefik_config_dir }}/dynamic_conf.yml:/etc/traefik/dynamic_conf.yml:Z,ro 18 | Volume={{ podman_user.traefik_config_dir }}/ssl:/etc/traefik/ssl:Z,ro 19 | 20 | HostName=traefik 21 | PublishPort=443:443/tcp 22 | {% if podman_user.traefik_forward_ports is defined %} 23 | Network=pasta: 24 | {%- for port in podman_user.traefik_forward_ports -%} 25 | --tcp-ns,{{ port }}{{ "," if not loop.last else "" }} 26 | {%- endfor %} 27 | {% endif %} 28 | 29 | 30 | 31 | [Service] 32 | Restart=on-failure 33 | RestartSec=5 34 | RestartMaxDelaySec=1h 35 | RestartSteps=10 36 | 37 | # Remove traefik container and all containers depend on traefik 38 | ExecStartPre=-/usr/bin/podman rm --force --depend traefik 39 | 40 | # Extend Timeout to allow time to pull the image 41 | TimeoutStartSec=300 42 | 43 | [Install] 44 | WantedBy=default.target 45 | -------------------------------------------------------------------------------- /roles/podman/legacy/deluge.service.j2: -------------------------------------------------------------------------------- 1 | # deluge.service 2 | 3 | [Unit] 4 | Description=Podman deluge.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target gluetun.service 7 | After=network-online.target gluetun.service 8 | Requires=gluetun.service 9 | RequiresMountsFor=%t/containers 10 | 11 | [Service] 12 | Environment=PODMAN_SYSTEMD_UNIT=%n 13 | Restart=on-failure 14 | RestartSec=5 15 | TimeoutStopSec=70 16 | ExecStartPre=/bin/rm \ 17 | -f %t/%n.ctr-id 18 | ExecStart=/usr/bin/podman run \ 19 | --cidfile=%t/%n.ctr-id \ 20 | --cgroups=no-conmon \ 21 | --rm \ 22 | --sdnotify=conmon \ 23 | --detach \ 24 | --replace \ 25 | --label io.containers.autoupdate=registry \ 26 | --uidmap 1000:0:1 \ 27 | --uidmap 0:1:1000 \ 28 | --uidmap 1001:1001:64536 \ 29 | --name=deluge \ 30 | --network=container:gluetun \ 31 | --env PUID=1000 \ 32 | --env PGID=1000 \ 33 | --env TZ={{ TZ }} \ 34 | --volume {{ podman_user.deluge_config_dir }}:/config:Z \ 35 | --volume {{ podman_user.deluge_downloads_dir }}:/downloads:Z \ 36 | lscr.io/linuxserver/deluge:latest 37 | ExecStop=/usr/bin/podman stop \ 38 | --ignore \ 39 | --time=10 \ 40 | --cidfile=%t/%n.ctr-id 41 | ExecStopPost=/usr/bin/podman rm \ 42 | --force \ 43 | --ignore \ 44 | --time=10 \ 45 | --cidfile=%t/%n.ctr-id 46 | Type=notify 47 | NotifyAccess=all 48 | 49 | [Install] 50 | WantedBy=default.target 51 | -------------------------------------------------------------------------------- /roles/podman/legacy/nextcloud-pod.service.j2: -------------------------------------------------------------------------------- 1 | # nextcloud-pod.service 2 | 3 | [Unit] 4 | Description=Podman nextcloud-pod.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | Wants=nextcloud.service postgres.service 10 | Before=nextcloud.service postgres.service 11 | 12 | [Service] 13 | Environment=PODMAN_SYSTEMD_UNIT=%n 14 | Restart=on-failure 15 | TimeoutStopSec=70 16 | #ExecStartPre=/usr/bin/podman pod create \ 17 | # --infra-conmon-pidfile %t/nextcloud-pod.pid \ 18 | # --pod-id-file %t/nextcloud-pod.pod-id \ 19 | # --exit-policy=stop \ 20 | # --replace \ 21 | # --uidmap 1000:0:1 \ 22 | # --uidmap 0:1:1000 \ 23 | # --uidmap 1001:1001:64536 \ 24 | # --publish 1443:443 \ 25 | # --name nextcloud-pod 26 | ExecStartPre=/usr/bin/podman pod create \ 27 | --infra-conmon-pidfile %t/nextcloud-pod.pid \ 28 | --pod-id-file %t/nextcloud-pod.pod-id \ 29 | --exit-policy=stop \ 30 | --userns keep-id:uid=33,gid=33 \ 31 | --replace \ 32 | --publish 127.0.0.1:4108:80 \ 33 | --name nextcloud-pod 34 | ExecStart=/usr/bin/podman pod start \ 35 | --pod-id-file %t/nextcloud-pod.pod-id 36 | ExecStop=/usr/bin/podman pod stop \ 37 | --ignore \ 38 | --pod-id-file %t/nextcloud-pod.pod-id \ 39 | --time=10 40 | ExecStopPost=/usr/bin/podman pod rm \ 41 | --ignore \ 42 | --force \ 43 | --pod-id-file %t/nextcloud-pod.pod-id 44 | PIDFile=%t/nextcloud-pod.pid 45 | Type=forking 46 | 47 | [Install] 48 | WantedBy=default.target 49 | -------------------------------------------------------------------------------- /mkarchiso.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | # Build archlinux ISO file with optional extra kernel parameters. 4 | 5 | # extra kernel command 6 | KERNEL_CMD="console=ttyS0" 7 | 8 | # create a temp directory in current directory 9 | tempdir=$(mktemp -d --tmpdir=.) 10 | 11 | # copy releng profile used in montly ISO relase 12 | cp -r /usr/share/archiso/configs/releng/ "$tempdir/archlive" 13 | 14 | # copy installation scripts to /root/ 15 | cp arch_install.sh "$tempdir/archlive/airootfs/root/" 16 | cp arch_install_bcachefs.sh "$tempdir/archlive/airootfs/root/" 17 | cp homed.sh "$tempdir/archlive/airootfs/root/" 18 | 19 | # Add bcachefs packages 20 | if [[ $(grep '^bcachefs-tools$' $tempdir/archlive/packages.x86_64 | wc -l) == 0 ]] ; then 21 | echo "bcachefs-tools" >> $tempdir/archlive/packages.x86_64 22 | fi 23 | if [[ $(grep '^bcachefs-dkms$' $tempdir/archlive/packages.x86_64 | wc -l) == 0 ]] ; then 24 | echo "bcachefs-dkms" >> $tempdir/archlive/packages.x86_64 25 | fi 26 | if [[ $(grep '^linux-headers$' $tempdir/archlive/packages.x86_64 | wc -l) == 0 ]] ; then 27 | echo "linux-headers" >> $tempdir/archlive/packages.x86_64 28 | fi 29 | 30 | if [[ -n "$KERNEL_CMD" ]] ; then 31 | # add kernel command to systemd-boot 32 | echo "options $KERNEL_CMD" >> "$tempdir/archlive/efiboot/loader/entries/01-archiso-x86_64-linux.conf" 33 | # add kernel command to grub 34 | sed -i "/\\s*linux.*archisodevice=UUID=\${ARCHISO_UUID}\$/ s|\$| $KERNEL_CMD|" "$tempdir/archlive/grub/grub.cfg" 35 | fi 36 | 37 | mkarchiso -v -w "$tempdir/work" -o /tmp "$tempdir/archlive" 38 | 39 | rm -r "$tempdir" 40 | -------------------------------------------------------------------------------- /roles/podman/legacy/radicale.service.j2: -------------------------------------------------------------------------------- 1 | # radicale.service 2 | 3 | [Unit] 4 | Description=Podman radicale.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStartPre=/bin/rm \ 15 | -f %t/%n.ctr-id 16 | ExecStart=/usr/bin/podman run \ 17 | --cidfile=%t/%n.ctr-id \ 18 | --cgroups=no-conmon \ 19 | --rm \ 20 | --sdnotify=conmon \ 21 | --detach \ 22 | --replace \ 23 | --name radicale \ 24 | --label io.containers.autoupdate=registry \ 25 | --publish 127.0.0.1:5232:5232 \ 26 | --userns=keep-id:uid=2999,gid=2999 \ 27 | --read-only \ 28 | --security-opt=no-new-privileges \ 29 | --cap-drop ALL \ 30 | --cap-add CHOWN \ 31 | --cap-add SETUID \ 32 | --cap-add SETGID \ 33 | --cap-add KILL \ 34 | --pids-limit 50 \ 35 | --memory 256M \ 36 | --health-cmd="curl --fail http://localhost:5232 || exit 1" \ 37 | --health-interval=30s \ 38 | --health-retries=3 \ 39 | --volume {{ podman_user.radicale_data_dir }}:/data:Z \ 40 | --volume {{ podman_user.radicale_config_dir }}:/config:Z,ro \ 41 | docker.io/tomsquest/docker-radicale 42 | ExecStop=/usr/bin/podman stop \ 43 | --ignore \ 44 | --time=10 \ 45 | --cidfile=%t/%n.ctr-id 46 | ExecStopPost=/usr/bin/podman rm \ 47 | --force \ 48 | --ignore \ 49 | --time=10 \ 50 | --cidfile=%t/%n.ctr-id 51 | Type=notify 52 | NotifyAccess=all 53 | 54 | [Install] 55 | WantedBy=default.target 56 | -------------------------------------------------------------------------------- /roles/podman/legacy/postgres.service.j2: -------------------------------------------------------------------------------- 1 | # postgres.service 2 | 3 | [Unit] 4 | Description=Podman postgres.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | {% if "nextcloud" in podman_user.containers %} 10 | BindsTo=nextcloud-pod.service 11 | After=nextcloud-pod.service 12 | {% endif %} 13 | 14 | [Service] 15 | Environment=PODMAN_SYSTEMD_UNIT=%n 16 | Restart=on-failure 17 | RestartSec=5 18 | TimeoutStopSec=70 19 | ExecStart=/usr/bin/podman run \ 20 | --cidfile=%t/%n.ctr-id \ 21 | --cgroups=no-conmon \ 22 | --rm \ 23 | {% if "nextcloud" in podman_user.containers %} 24 | --pod-id-file %t/nextcloud-pod.pod-id \ 25 | {% endif %} 26 | --sdnotify=conmon \ 27 | --detach \ 28 | --replace \ 29 | --label io.containers.autoupdate=registry \ 30 | --name=postgres \ 31 | {% if "nextcloud" in podman_user.containers %} 32 | --user 33:33 \ 33 | {% else %} 34 | --user 1000:1000 \ 35 | {% endif %} 36 | --volume {{ podman_user.postgres_config_dir }}:/var/lib/postgresql/data:Z \ 37 | --env POSTGRES_DB={{ podman_user.db_name }} \ 38 | --env POSTGRES_USER={{ podman_user.db_user }} \ 39 | --env POSTGRES_PASSWORD='{{ podman_user.db_password }}' \ 40 | docker.io/library/postgres:15-alpine 41 | ExecStop=/usr/bin/podman stop \ 42 | --ignore \ 43 | --time=10 \ 44 | --cidfile=%t/%n.ctr-id 45 | ExecStopPost=/usr/bin/podman rm \ 46 | --force \ 47 | --ignore \ 48 | --time=10 \ 49 | --cidfile=%t/%n.ctr-id 50 | Type=notify 51 | NotifyAccess=all 52 | 53 | [Install] 54 | WantedBy=default.target 55 | -------------------------------------------------------------------------------- /roles/podman/templates/nextcloud-aio.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nextcloud AIO Master Container 3 | Documentation=https://github.com/nextcloud/all-in-one/blob/main/docker-rootless.md 4 | After=local-fs.target 5 | Requires=podman.socket 6 | 7 | [Container] 8 | ContainerName=nextcloud-aio-mastercontainer 9 | Image=docker.io/nextcloud/all-in-one:latest 10 | AutoUpdate=registry 11 | Timezone={{ TZ }} 12 | SecurityLabelDisable=true 13 | 14 | HostName=nextcloud-aio 15 | Network=bridge 16 | PublishPort=127.0.0.1:{{ podman_user.nextcloud_web_admin_port }}:8080/tcp 17 | 18 | Volume=nextcloud_aio_mastercontainer:/mnt/docker-aio-config 19 | Volume=/run/user/{{ podman_user.uid }}/podman/podman.sock:/var/run/docker.sock:ro,Z 20 | 21 | 22 | Environment=APACHE_PORT={{ podman_user.nextcloud_webui_port }} 23 | Environment=APACHE_IP_BINDING=0.0.0.0 24 | Environment=WATCHTOWER_DOCKER_SOCKET_PATH=/run/user/{{ podman_user.uid }}/podman/podman.sock 25 | {% if podman_user.nextcloud_skip_domain_validation is defined and podman_user.nextcloud_skip_domain_validation %} 26 | Environment=SKIP_DOMAIN_VALIDATION=true 27 | {% endif %} 28 | {% if podman_user.nextcloud_backup_retention is defined %} 29 | Environment=BORG_RETENTION_POLICY="{{ podman_user.nextcloud_backup_retention }}" 30 | {% endif %} 31 | {% if podman_user.nextcloud_memory_limit is defined %} 32 | Environment=NEXTCLOUD_MEMORY_LIMIT={{ podman_user.nextcloud_memory_limit }} 33 | {% endif %} 34 | 35 | [Service] 36 | Restart=on-failure 37 | RestartSec=5 38 | RestartMaxDelaySec=1h 39 | RestartSteps=10 40 | 41 | # Extend Timeout to allow time to pull the image 42 | TimeoutStartSec=300 43 | 44 | [Install] 45 | WantedBy=default.target 46 | -------------------------------------------------------------------------------- /roles/nas/tasks/btrfs_scrub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: btrfs_scrub_report.sh 3 | ansible.builtin.template: 4 | src: btrfs_scrub_report.sh.j2 5 | dest: /usr/local/bin/btrfs_scrub_report.sh 6 | owner: root 7 | group: root 8 | mode: '0700' 9 | become: true 10 | 11 | - name: Create btrfs-scrub@.service.d 12 | ansible.builtin.file: 13 | path: "/etc/systemd/system/btrfs-scrub@{{ item.escape }}.service.d" 14 | state: directory 15 | owner: root 16 | group: root 17 | mode: '0755' 18 | loop: "{{ btrfs_scrub_path }}" 19 | become: true 20 | 21 | - name: Create btrfs-scrub@.timer.d 22 | ansible.builtin.file: 23 | path: "/etc/systemd/system/btrfs-scrub@{{ item.escape }}.timer.d" 24 | state: directory 25 | owner: root 26 | group: root 27 | mode: '0755' 28 | loop: "{{ btrfs_scrub_path }}" 29 | become: true 30 | 31 | - name: Modify btrfs-scrub service to send email when finished 32 | ansible.builtin.template: 33 | src: btrfs_scrub_service_override.conf.j2 34 | dest: /etc/systemd/system/btrfs-scrub@{{ item.escape }}.service.d/override.conf 35 | owner: root 36 | group: root 37 | mode: '0644' 38 | loop: "{{ btrfs_scrub_path }}" 39 | become: true 40 | 41 | - name: Modify btrfs-scrub timer 42 | ansible.builtin.template: 43 | src: btrfs_scrub_timer_override.conf.j2 44 | dest: /etc/systemd/system/btrfs-scrub@{{ item.escape }}.timer.d/override.conf 45 | owner: root 46 | group: root 47 | mode: '0644' 48 | loop: "{{ btrfs_scrub_path }}" 49 | become: true 50 | 51 | - name: systemctl enable btrfs-scrub@.timer 52 | ansible.builtin.systemd: name=btrfs-scrub@{{ item.escape }}.timer enabled=true 53 | loop: "{{ btrfs_scrub_path }}" 54 | become: true 55 | -------------------------------------------------------------------------------- /roles/archlinux_common/README.md: -------------------------------------------------------------------------------- 1 | Common [post-installation configuration](https://wiki.archlinux.org/title/User:Bai-Chiang/Installation_guide_(full_disk_encryption,secure_boot,unified_kernel_image,btrfs)#Post-installation) for Arch Linux. 2 | 3 | ## Tasks 4 | - Set up [time synchronization](https://wiki.archlinux.org/title/Systemd-timesyncd). 5 | - Enable [pacman parallel downloads](https://wiki.archlinux.org/title/Pacman#Enabling_parallel_downloads). 6 | - Enable [reflector](https://wiki.archlinux.org/title/Reflector) to auto update pacman mirror list. 7 | - Enable [paccache](https://wiki.archlinux.org/title/Pacman#Cleaning_the_package_cache) to auto clean up pacman package cache. 8 | - Enable [Periodic TRIM](https://wiki.archlinux.org/title/Solid_state_drive#Periodic_TRIM) for SSD. 9 | - Enable [native build](https://wiki.archlinux.org/title/Makepkg#Building_optimized_binaries) and [parallel compilation](https://wiki.archlinux.org/title/Makepkg#Parallel_compilation) and `-O3` optimization when building AUR packages. 10 | - Set up [snapper](https://wiki.archlinux.org/title/Snapper) for root partition if using btrfs. 11 | 12 | 13 | ## Variables 14 | ```yaml 15 | # Specify `reflector --country` 16 | # https://man.archlinux.org/man/reflector.1#EXAMPLES 17 | # Restrict pacman mirrors to selected countries. Countries may be given by name or country code, or a mix of both. 18 | # Use `reflector --list-countries` get a list of available countries and country codes. 19 | reflector_country: us,France,Germany 20 | 21 | 22 | # Snapshot limits 23 | # https://wiki.archlinux.org/title/Snapper#Set_snapshot_limits 24 | # Default values given below 25 | #snapper_root_hourly: 5 26 | #snapper_root_daily: 7 27 | #sanpper_root_weekly: 0 28 | #snapper_root_monthly: 0 29 | #snapper_root_yearly: 0 30 | ``` 31 | 32 | -------------------------------------------------------------------------------- /roles/gui/tasks/Fedora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install shell packages 3 | ansible.builtin.dnf: name={{ shell_pkgs }} state=present 4 | become: true 5 | 6 | - name: change shell 7 | ansible.builtin.user: 8 | name: "{{ ansible_facts.user_id }}" 9 | shell: "{{ default_shell }}" 10 | become: true 11 | 12 | # Create symlink so zsh-syntax-highlighting and zsh-autosuggestions at the same location as Arch Linux 13 | - name: Create /usr/share/zsh/plugins directory 14 | ansible.builtin.file: 15 | path: /usr/share/zsh/plugins 16 | state: directory 17 | become: true 18 | when: '"zsh" in shell_pkgs' 19 | 20 | - name: symlink zsh-syntax-highlighting and zsh-autosuggestions 21 | ansible.builtin.file: 22 | src: "{{ item.src }}" 23 | dest: "{{ item.dest }}" 24 | state: link 25 | become: true 26 | loop: 27 | - { src: /usr/share/zsh-syntax-highlighting, dest: /usr/share/zsh/plugins/zsh-syntax-highlighting } 28 | - { src: /usr/share/zsh-autosuggestions, dest: /usr/share/zsh/plugins/zsh-autosuggestions } 29 | when: '"zsh" in shell_pkgs' 30 | 31 | - name: Install WM packages 32 | ansible.builtin.dnf: name={{ wm_pkgs }} state=present 33 | become: true 34 | 35 | - name: Install other packages 36 | ansible.builtin.dnf: name={{ other_pkgs }} state=present 37 | become: true 38 | 39 | - include_tasks: dotfiles.yml 40 | 41 | - include_tasks: flatpak.yml 42 | 43 | - name: Create screenshots directories 44 | ansible.builtin.file: 45 | path: "{{ ansible_facts.user_dir }}/screenshots" 46 | state: directory 47 | when: '"grim" in wm_pkgs' 48 | 49 | - name: Create Ranger image previews cache directory 50 | ansible.builtin.file: 51 | path: "{{ ansible_facts.user_dir }}/.cache/ranger" 52 | state: directory 53 | when: '"ranger" in other_pkgs' 54 | -------------------------------------------------------------------------------- /roles/gui/tasks/flatpak.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S flatpak 3 | community.general.pacman: name=flatpak state=present 4 | become: true 5 | when: ansible_facts["distribution"] == "Archlinux" 6 | 7 | - name: dnf install flatpak 8 | ansible.builtin.dnf: name=flatpak state=present 9 | become: true 10 | when: ansible_facts["distribution"] == "Fedora" 11 | 12 | - name: Add flathub repo 13 | community.general.flatpak_remote: 14 | name: flathub 15 | state: present 16 | flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo 17 | method: user 18 | ignore_errors: "{{ ansible_check_mode }}" 19 | 20 | - name: Install flatpak packages 21 | community.general.flatpak: 22 | name: "{{ flatpak_pkgs }}" 23 | state: present 24 | remote: flathub 25 | method: user 26 | ignore_errors: "{{ ansible_check_mode }}" 27 | 28 | - name: setup systemd user directory 29 | ansible.builtin.file: 30 | path: "/home/{{ ansible_facts.user_id }}/.config/systemd/user" 31 | state: directory 32 | owner: "{{ ansible_facts.user_id }}" 33 | group: "{{ ansible_facts.user_id }}" 34 | mode: '0700' 35 | 36 | - name: flatpak-update.service 37 | ansible.builtin.copy: 38 | src: "{{ item }}" 39 | dest: "/home/{{ ansible_facts.user_id }}/.config/systemd/user/{{ item }}" 40 | owner: "{{ ansible_facts.user_id }}" 41 | group: "{{ ansible_facts.user_id }}" 42 | mode: '0600' 43 | loop: 44 | - flatpak-update.service 45 | - flatpak-update.timer 46 | - failure-notification@.service 47 | 48 | - name: systemctl --user daemon-reload 49 | ansible.builtin.systemd: daemon_reload=true scope=user 50 | 51 | - name: systemctl enable --user flatpak-update.timer 52 | ansible.builtin.systemd: name=flatpak-update.timer enabled=true scope=user 53 | ignore_errors: "{{ ansible_check_mode }}" 54 | 55 | -------------------------------------------------------------------------------- /roles/auto-update/tasks/Fedora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: dnf install dnf-automatic 3 | ansible.builtin.dnf: name=dnf-automatic state=present 4 | become: true 5 | 6 | - name: Check file /etc/dnf/automatic.conf 7 | ansible.builtin.stat: 8 | path: /etc/dnf/automatic.conf 9 | register: etc_dnf_automatic_conf 10 | 11 | - name: Copy /usr/share/dnf5/dnf5-plugins/automatic.conf to /etc/dnf/automatic.conf 12 | ansible.builtin.copy: 13 | src: /usr/share/dnf5/dnf5-plugins/automatic.conf 14 | dest: /etc/dnf/automatic.conf 15 | remote_src: true 16 | become: true 17 | when: not etc_dnf_automatic_conf.stat.exists 18 | 19 | - name: Edit /etc/dnf/automatic.conf 20 | ansible.builtin.lineinfile: 21 | path: /etc/dnf/automatic.conf 22 | regexp: "{{ item.regexp }}" 23 | line: "{{ item.line }}" 24 | become: true 25 | loop: 26 | - { regexp: '^apply_updates =', line: "apply_updates = yes" } 27 | - { regexp: '^reboot =', line: "reboot = when-needed" } 28 | 29 | - name: Create /etc/systemd/system/dnf5-automatic.timer.d/ directory 30 | ansible.builtin.file: 31 | path: /etc/systemd/system/dnf5-automatic.timer.d 32 | state: directory 33 | owner: root 34 | group: root 35 | mode: '0755' 36 | become: true 37 | 38 | - name: Override default dnf5-automatic.timer 39 | ansible.builtin.template: 40 | src: dnf5-automatic-timer-override.conf.j2 41 | dest: /etc/systemd/system/dnf5-automatic.timer.d/override.conf 42 | owner: root 43 | group: root 44 | mode: '0644' 45 | become: true 46 | when: auto_update_time is defined 47 | 48 | - name: systemctl daemon-reload 49 | ansible.builtin.systemd: daemon_reload=true 50 | become: true 51 | 52 | - name: systemctl enable dnf5-automatic.timer 53 | ansible.builtin.systemd: name=dnf5-automatic.timer enabled=true state=started 54 | become: true 55 | 56 | -------------------------------------------------------------------------------- /roles/podman/legacy/nextcloud.service.j2: -------------------------------------------------------------------------------- 1 | # nextcloud.service 2 | 3 | [Unit] 4 | Description=Podman nextcloud.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | BindsTo=nextcloud-pod.service 10 | After=nextcloud-pod.service 11 | 12 | [Service] 13 | Environment=PODMAN_SYSTEMD_UNIT=%n 14 | Restart=on-failure 15 | RestartSec=5 16 | TimeoutStopSec=70 17 | #ExecStart=/usr/bin/podman run \ 18 | # --cidfile=%t/%n.ctr-id \ 19 | # --cgroups=no-conmon \ 20 | # --rm \ 21 | # --pod-id-file %t/nextcloud-pod.pod-id \ 22 | # --sdnotify=conmon \ 23 | # --detach \ 24 | # --replace \ 25 | # --label io.containers.autoupdate=registry \ 26 | # --name=nextcloud \ 27 | # --env PUID=1000 \ 28 | # --env PGID=1000 \ 29 | # --env TZ={{ TZ }} \ 30 | # --volume {{ podman_user.nextcloud_config_dir }}:/config:Z \ 31 | # --volume {{ podman_user.nextcloud_data_dir }}:/data:Z \ 32 | # lscr.io/linuxserver/nextcloud:latest 33 | ExecStart=/usr/bin/podman run \ 34 | --cidfile=%t/%n.ctr-id \ 35 | --cgroups=no-conmon \ 36 | --rm \ 37 | --pod-id-file %t/nextcloud-pod.pod-id \ 38 | --sdnotify=conmon \ 39 | --detach \ 40 | --replace \ 41 | --label io.containers.autoupdate=registry \ 42 | --sysctl net.ipv4.ip_unprivileged_port_start=80 \ 43 | --name=nextcloud \ 44 | --volume {{ podman_user.nextcloud_data_dir }}:/var/www/html/data:Z \ 45 | --volume {{ podman_user.nextcloud_config_dir }}:/var/www/html:Z \ 46 | docker.io/library/nextcloud:latest 47 | ExecStop=/usr/bin/podman stop \ 48 | --ignore \ 49 | --time=10 \ 50 | --cidfile=%t/%n.ctr-id 51 | ExecStopPost=/usr/bin/podman rm \ 52 | --force \ 53 | --ignore \ 54 | --time=10 \ 55 | --cidfile=%t/%n.ctr-id 56 | Type=notify 57 | NotifyAccess=all 58 | 59 | [Install] 60 | WantedBy=default.target 61 | -------------------------------------------------------------------------------- /roles/podman/tasks/podman_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S podman aardvark-dns 3 | community.general.pacman: 4 | name: 5 | - podman 6 | - aardvark-dns 7 | state: present 8 | become: true 9 | when: ansible_facts["distribution"] == "Archlinux" 10 | 11 | - name: apt install podman 12 | ansible.builtin.apt: name=podman state=present 13 | become: true 14 | when: ansible_facts["distribution"] == "Debian" 15 | 16 | - name: dnf install podman 17 | ansible.builtin.dnf: name=podman state=present 18 | become: true 19 | when: ansible_facts["distribution"] == "Fedora" 20 | 21 | # install acl package for ansible to become other unprivileged user 22 | - name: apt install acl 23 | ansible.builtin.apt: name=acl state=present 24 | become: true 25 | when: ansible_facts["distribution"] == "Debian" 26 | 27 | - name: dnf install acl 28 | ansible.builtin.dnf: name=acl state=present 29 | become: true 30 | when: ansible_facts["distribution"] == "Fedora" 31 | 32 | - name: dnf install policycoreutils-python-utils 33 | ansible.builtin.dnf: name=policycoreutils-python-utils state=present 34 | become: true 35 | when: ansible_facts["distribution"] == "Fedora" 36 | 37 | #- name: Get file system type of / 38 | # ansible.builtin.command: stat --file-system --format=%T / 39 | # become: true 40 | # register: root_fstype 41 | # changed_when: false 42 | # check_mode: false 43 | # 44 | #- name: Get file system type of ~/ 45 | # ansible.builtin.command: "stat --file-system --format=%T {{ ansible_facts.user_dir }}" 46 | # become: true 47 | # register: home_fstype 48 | # changed_when: false 49 | # check_mode: false 50 | # 51 | #- name: Set container storage driver 52 | # ansible.builtin.lineinfile: 53 | # path: /etc/containers/storage.conf 54 | # regexp: '^driver\s*=' 55 | # line: 'driver = "btrfs"' 56 | # become: true 57 | # when: 58 | # - root_fstype.stdout == 'btrfs' 59 | # - home_fstype.stdout == 'btrfs' 60 | 61 | -------------------------------------------------------------------------------- /roles/podman/legacy/homeassistant.service.j2: -------------------------------------------------------------------------------- 1 | # homeassistant.service 2 | 3 | [Unit] 4 | Description=Podman homeassistant.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStartPre=/bin/rm \ 15 | -f %t/%n.ctr-id 16 | #ExecStart=/usr/bin/podman run \ 17 | # --cidfile=%t/%n.ctr-id \ 18 | # --cgroups=no-conmon \ 19 | # --rm \ 20 | # --sdnotify=conmon \ 21 | # --detach \ 22 | # --replace \ 23 | # --label io.containers.autoupdate=registry \ 24 | # --uidmap 1000:0:1 \ 25 | # --uidmap 0:1:1000 \ 26 | # --uidmap 1001:1001:64536 \ 27 | # --env PUID=1000 \ 28 | # --env PGID=1000 \ 29 | # --env TZ={{ TZ }} \ 30 | # --name=homeassistant \ 31 | # --hostname=homeassistant \ 32 | # --publish 8123:8123/tcp \ 33 | # --volume {{ podman_user.homeassistant_config_dir }}:/config:Z \ 34 | # lscr.io/linuxserver/homeassistant:latest 35 | ExecStart=/usr/bin/podman run \ 36 | --cidfile=%t/%n.ctr-id \ 37 | --cgroups=no-conmon \ 38 | --rm \ 39 | --sdnotify=conmon \ 40 | --detach \ 41 | --replace \ 42 | --label io.containers.autoupdate=registry \ 43 | --userns keep-id:uid=1000,gid=1000 \ 44 | --user 1000:1000 \ 45 | --name=homeassistant \ 46 | --hostname=homeassistant \ 47 | --publish 8123:8123/tcp \ 48 | --env TZ={{ TZ }} \ 49 | --volume {{ podman_user.homeassistant_config_dir }}:/config:Z \ 50 | ghcr.io/home-assistant/home-assistant:stable 51 | ExecStop=/usr/bin/podman stop \ 52 | --ignore \ 53 | --time=10 \ 54 | --cidfile=%t/%n.ctr-id 55 | ExecStopPost=/usr/bin/podman rm \ 56 | --force \ 57 | --ignore \ 58 | --time=10 \ 59 | --cidfile=%t/%n.ctr-id 60 | Type=notify 61 | NotifyAccess=all 62 | 63 | 64 | [Install] 65 | WantedBy=default.target 66 | -------------------------------------------------------------------------------- /roles/openssh/tasks/homed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: | 3 | Force both password and public key authentication for systemd-homed setup. While the user is unlocked run 4 | homectl update username --ssh-authorized-keys=@/path/to/mounted/home/.ssh/authorized_keys 5 | to enroll keys. 6 | ansible.builtin.lineinfile: 7 | path: /etc/ssh/sshd_config 8 | regexp: "{{ item.regexp }}" 9 | line: "{{ item.line }}" 10 | insertafter: "{{ item.insertafter }}" 11 | validate: /usr/sbin/sshd -T -f %s 12 | loop: 13 | - { regexp: '^PasswordAuthentication ', line: PasswordAuthentication yes, insertafter: '#\s*PasswordAuthentication ' } 14 | - { regexp: '^PubkeyAuthentication ', line: PubkeyAuthentication yes, insertafter: '#\s*PubkeyAuthentication ' } 15 | - { regexp: '^AuthenticationMethods ', line: 'AuthenticationMethods publickey,password', insertafter: '^PasswordAuthentication ' } 16 | - { regexp: '^AuthorizedKeysCommandUser ', line: 'AuthorizedKeysCommandUser root', insertafter: '^AuthorizedKeysCommandUser ' } 17 | - { regexp: '^AuthorizedKeysCommand ', line: 'AuthorizedKeysCommand /usr/bin/userdbctl ssh-authorized-keys %u', insertafter: '^AuthorizedKeysCommand ' } 18 | become: true 19 | 20 | - name: Check ~/.ssh/authorized_keys exists or not 21 | ansible.builtin.stat: 22 | path: "{{ ansible_facts.user_dir }}/.ssh/authorized_keys" 23 | register: ssh_authorized_keys_file 24 | 25 | - name: Get all public keys in ~/.ssh/authorized_keys 26 | ansible.builtin.shell: "cat {{ ansible_facts.user_dir }}/.ssh/authorized_keys" 27 | changed_when: false 28 | register: ssh_authorized_keys 29 | when: ssh_authorized_keys_file.stat.exists 30 | 31 | - name: Enroll authorized public keys if file exist with homectl 32 | community.general.homectl: 33 | name: "{{ ansible_facts.user_id }}" 34 | password: "{{ ansible_become_password }}" 35 | sshkeys: "{{ ssh_authorized_keys.stdout }}" 36 | become: true 37 | when: ssh_authorized_keys_file.stat.exists 38 | -------------------------------------------------------------------------------- /roles/podman/legacy/swag.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: swag container config direcoty 3 | ansible.builtin.file: 4 | path: "{{ podman_user.swag_config_dir }}" 5 | state: directory 6 | owner: "{{ podman_user.name}}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | 11 | - name: Allow rootless podman access 443 port 12 | ansible.builtin.copy: 13 | content: | 14 | net.ipv4.ip_unprivileged_port_start=443 15 | dest: /etc/sysctl.d/unprivileged_port_start.conf 16 | owner: root 17 | group: root 18 | mode: '0644' 19 | become: true 20 | register: unprivileged_port_start 21 | 22 | - name: sysctl net.ipv4.ip_unprivileged_port_start=443 23 | ansible.builtin.command: sysctl net.ipv4.ip_unprivileged_port_start=443 24 | become: true 25 | when: unprivileged_port_start is changed 26 | 27 | - name: swag.service 28 | ansible.builtin.template: 29 | src: swag.service.j2 30 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/swag.service" 31 | owner: "{{ podman_user.name}}" 32 | group: "{{ podman_user.name }}" 33 | mode: '0600' 34 | become: true 35 | become_user: "{{ podman_user.name }}" 36 | 37 | - name: systemctl --user daemon-reload 38 | ansible.builtin.systemd: daemon_reload=true scope=user 39 | become: true 40 | become_user: "{{ podman_user.name }}" 41 | 42 | - name: systemctl --user enable --now swag.service 43 | ansible.builtin.systemd: name=swag.service enabled=true state=started scope=user 44 | become: true 45 | become_user: "{{ podman_user.name }}" 46 | 47 | - name: Set firewall rules for https 48 | ansible.posix.firewalld: 49 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="https" accept 50 | #zone: "{{ firewalld_default_zone }}" 51 | permanent: true 52 | immediate: true 53 | state: enabled 54 | loop: "{{ podman_user.https_accept_source_ipv4 }}" 55 | become: true 56 | when: 57 | - podman_user.https_accept_source_ipv4 is defined 58 | -------------------------------------------------------------------------------- /roles/podman/legacy/swag.service.j2: -------------------------------------------------------------------------------- 1 | # swag.service 2 | 3 | [Unit] 4 | Description=Podman swag.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target nss-lookup.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStart=/usr/bin/podman run \ 15 | --cidfile=%t/%n.ctr-id \ 16 | --cgroups=no-conmon \ 17 | --rm \ 18 | --sdnotify=conmon \ 19 | --detach \ 20 | --replace \ 21 | --label io.containers.autoupdate=registry \ 22 | --uidmap 1000:0:1 \ 23 | --uidmap 0:1:1000 \ 24 | --uidmap 1001:1001:64536 \ 25 | --name=swag \ 26 | --cap-add=NET_ADMIN \ 27 | --env PUID=1000 \ 28 | --env PGID=1000 \ 29 | --env TZ={{ TZ }} \ 30 | --publish 443:443/tcp \ 31 | --volume {{ podman_user.swag_config_dir }}:/config:Z \ 32 | --env URL={{ podman_user.swag_domain }} \ 33 | --env VALIDATION=dns \ 34 | --env SUBDOMAINS={{ podman_user.swag_subdomains }} \ 35 | --env CERTPROVIDER=letsencrypt \ 36 | --env DNSPLUGIN=cloudflare \ 37 | --env EMAIL={{ podman_user.swag_email }} \ 38 | --env ONLY_SUBDOMAINS=true \ 39 | --env STAGING=false \ 40 | --env PROPAGATION=60 \ 41 | {% if podman_user.swag_mods is defined %} 42 | --env DOCKER_MODS='{{ podman_user.swag_mods }}' \ 43 | {% endif %} 44 | {% if podman_user.swag_CROWDSEC_API_KEY is defined %} 45 | --env CROWDSEC_API_KEY='{{ podman_user.swag_CROWDSEC_API_KEY }}' \ 46 | --env CROWDSEC_LAPI_URL='{{ podman_user.swag_CROWDSEC_LAPI_URL }}' \ 47 | {% endif %} 48 | lscr.io/linuxserver/swag:latest 49 | ExecStop=/usr/bin/podman stop \ 50 | --ignore \ 51 | --time=10 \ 52 | --cidfile=%t/%n.ctr-id 53 | ExecStopPost=/usr/bin/podman rm \ 54 | --force \ 55 | --ignore \ 56 | --time=10 \ 57 | --cidfile=%t/%n.ctr-id 58 | Type=notify 59 | NotifyAccess=all 60 | 61 | [Install] 62 | WantedBy=default.target 63 | -------------------------------------------------------------------------------- /roles/systemd_networkd/README.md: -------------------------------------------------------------------------------- 1 | Set up [systemd-networkd](https://wiki.archlinux.org/title/Systemd-networkd). 2 | For single NIC static IP setup, specify the static IP address, gateway address, and DNS server address. 3 | For advanced setup, it will copy all configuration files inside the `{{ networkd_configs_dir }}` to `/etc/systemd/network`. 4 | The configuration file will have permission `640` with owner `root` and group `systemd-network`. 5 | This is for preventing the leaking of private keys when setting up WireGuard using systemd-networkd. 6 | 7 | ## Tasks 8 | ### Arch Linux 9 | - Remove default configuration file created by the `[arch_install.sh](arch_install.sh)` script. 10 | - Create a simple static IP configuration (if `{{ networkd_configs_dir }}` variable is undefined) 11 | or copy all configuration files under `{{ networkd_configs_dir }}` to `/etc/systemd/network`. 12 | 13 | ### Fedora 14 | - Install `systemd-networkd` and enable `systemd-resolved.service`. 15 | - Disable `NetworkManager.service` 16 | - Create a simple static IP configuration (if `{{ networkd_configs_dir }}` variable is undefined) 17 | or copy all configuration files under `{{ networkd_configs_dir }}` to `/etc/systemd/network`. 18 | 19 | ### Debian 20 | - Install `systemd-resolved.service`. 21 | - Remove `/etc/network/interfaces` configuration. 22 | - Create a simple static IP configuration (if `{{ networkd_configs_dir }}` variable is undefined) 23 | or copy all configuration files under `{{ networkd_configs_dir }}` to `/etc/systemd/network`. 24 | 25 | 26 | ## Variables 27 | ### Single NIC static IP 28 | ```yaml 29 | networkd_static_ip: 30 | # NIC name 31 | - nic: enp1s0 32 | 33 | # IP address with its prefix length 34 | ip: 192.168.122.2/24 35 | 36 | # Gateway address 37 | gateway: 192.168.122.1 38 | 39 | # DNS server address 40 | dns: 9.9.9.9 41 | ``` 42 | 43 | ### Advanced setup 44 | ```yaml 45 | # Copy all configuration files under this directory to /etc/systemd/network 46 | networkd_configs_dir: "files/systemd-networkd/" 47 | ``` 48 | 49 | -------------------------------------------------------------------------------- /roles/podman/tasks/unifi-init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create a tmp direcoty to store database credentials during initial setup 3 | ansible.builtin.tempfile: 4 | state: directory 5 | become: true 6 | become_user: "{{ podman_user.name }}" 7 | register: unifi_db_tmp_dir 8 | 9 | - name: init-mongo.js for initial setup 10 | ansible.builtin.template: 11 | src: "unifi-db-init-mongo.js.j2" 12 | dest: "{{ unifi_db_tmp_dir.path }}/init-mongo.js" 13 | owner: "{{ podman_user.name}}" 14 | group: "{{ podman_user.name }}" 15 | mode: '0644' 16 | become: true 17 | 18 | - name: unifi.pod, unifi-db.container and unifi.container 19 | ansible.builtin.template: 20 | src: "{{ item }}.j2" 21 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/{{ item }}" 22 | owner: "{{ podman_user.name}}" 23 | group: "{{ podman_user.name }}" 24 | mode: '0600' 25 | become: true 26 | become_user: "{{ podman_user.name }}" 27 | loop: 28 | - unifi.pod 29 | - unifi.container 30 | - unifi-db.container 31 | 32 | - name: systemctl --user daemon-reload 33 | ansible.builtin.systemd: daemon_reload=true scope=user 34 | become: true 35 | become_user: "{{ podman_user.name }}" 36 | 37 | - name: systemctl --user start unifi-pod.service 38 | ansible.builtin.systemd: name=unifi-pod state=started scope=user 39 | become: true 40 | become_user: "{{ podman_user.name }}" 41 | ignore_errors: "{{ ansible_check_mode }}" 42 | 43 | - name: systemctl --user start unifi-db.service 44 | ansible.builtin.systemd: name=unifi-db state=started scope=user 45 | become: true 46 | become_user: "{{ podman_user.name }}" 47 | ignore_errors: "{{ ansible_check_mode }}" 48 | 49 | - name: systemctl --user start unifi.service 50 | ansible.builtin.systemd: name=unifi state=started scope=user 51 | become: true 52 | become_user: "{{ podman_user.name }}" 53 | ignore_errors: "{{ ansible_check_mode }}" 54 | 55 | 56 | - name: Reset variable unifi_database_dir 57 | ansible.builtin.stat: 58 | path: "{{ podman_user.unifi_config_dir }}/unifi-db" 59 | register: unifi_database_dir 60 | 61 | -------------------------------------------------------------------------------- /fedora_post_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | dnf install -y policycoreutils-python-utils python3-libdnf5 python3-dnf 4 | 5 | # set ssh port 6 | echo "ssh port? (22)" 7 | read ssh_port 8 | ssh_port="${ssh_port:-22}" 9 | if [[ $ssh_port -ne 22 ]] ; then 10 | sed -i "s/^#Port.*/Port ${ssh_port}/" /etc/ssh/sshd_config 11 | semanage port -a -t ssh_port_t -p tcp $ssh_port 12 | sed "/port=/s/port=\"22\"/port=\"${ssh_port}\"/" /usr/lib/firewalld/services/ssh.xml > /etc/firewalld/services/ssh.xml 13 | firewall-cmd --reload 14 | fi 15 | 16 | read -p "Do you want to set default firewall zone to drop? [y/N] " firewall_drop 17 | firewall_drop="${firewall_drop:-n}" 18 | firewall_drop="${firewall_drop,,}" 19 | if [[ $firewall_drop == y ]] ; then 20 | firewall-cmd --set-default-zone=drop 21 | 22 | read -p "Allow ICMP echo-request and echo-reply (respond ping)? [Y/n] " allow_ping 23 | allow_ping="${allow_ping:-y}" 24 | allow_ping="${allow_ping,,}" 25 | if [[ $allow_ping == y ]] ; then 26 | firewall-cmd --permanent --zone=drop --add-icmp-block-inversion 27 | echo -e "\nallow ping source ip address (example 192.168.1.0/24) empty to allow all" 28 | read ping_source 29 | if [[ -n $ping_source ]] ; then 30 | firewall-cmd --permanent --zone=drop --add-rich-rule="rule family='ipv4' source address='${ping_source}' icmp-type name='echo-request' accept" 31 | firewall-cmd --permanent --zone=drop --add-rich-rule="rule family='ipv4' source address='${ping_source}' icmp-type name='echo-reply' accept" 32 | else 33 | firewall-cmd --permanent --zone=drop --add-icmp-block=echo-request 34 | firewall-cmd --permanent --zone=drop --add-icmp-block=echo-reply 35 | fi 36 | fi 37 | fi 38 | 39 | echo -e "\nssh allow source ip address (example 192.168.1.0/24), empty to skip" 40 | read ssh_source 41 | if [[ -n $ssh_source ]]; then 42 | firewall-cmd --permanent --add-rich-rule="rule family='ipv4' source address='${ssh_source}' service name='ssh' accept" 43 | firewall-cmd --permanent --remove-service ssh 44 | fi 45 | firewall-cmd --reload 46 | 47 | -------------------------------------------------------------------------------- /roles/podman/legacy/jellyfin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: jellyfin container config and cache direcoty 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ podman_user.name }}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | loop: 11 | - "{{ podman_user.jellyfin_config_dir }}" 12 | - "{{ podman_user.jellyfin_cache_dir }}" 13 | 14 | - name: jellyfin.service 15 | ansible.builtin.template: 16 | src: jellyfin.service.j2 17 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/jellyfin.service" 18 | owner: "{{ podman_user.name }}" 19 | group: "{{ podman_user.name }}" 20 | mode: '0600' 21 | become: true 22 | become_user: "{{ podman_user.name }}" 23 | 24 | - name: systemctl --user daemon-reload 25 | ansible.builtin.systemd: daemon_reload=true scope=user 26 | become: true 27 | become_user: "{{ podman_user.name }}" 28 | 29 | - name: systemctl --user enable --now jellyfin.service 30 | ansible.builtin.systemd: name=jellyfin enabled=true state=started scope=user 31 | become: true 32 | become_user: "{{ podman_user.name }}" 33 | 34 | # 8096/tcp jellyfin port 35 | - name: add jellyfin firewalld service file jellyfin.xml 36 | ansible.builtin.copy: 37 | content: | 38 | 39 | 40 | Jellyfin 41 | Jellyfin 42 | 43 | 44 | dest: /etc/firewalld/services/jellyfin.xml 45 | owner: root 46 | group: root 47 | mode: '0644' 48 | become: true 49 | register: jellyfin_firewalld_file 50 | 51 | - name: Reload firewalld when jellyfin.xml changed 52 | ansible.builtin.command: firewall-cmd --reload 53 | become: true 54 | when: jellyfin_firewalld_file.changed 55 | 56 | - name: Set firewall rules for jellyfin listening port (TCP) 57 | ansible.posix.firewalld: 58 | rich_rule: rule family="ipv4" source address="{{ reverse_proxy_ipv4 }}" service name="jellyfin" accept 59 | #zone: "{{ firewalld_default_zone }}" 60 | permanent: true 61 | immediate: true 62 | state: enabled 63 | become: true 64 | when: reverse_proxy_ipv4 is defined 65 | -------------------------------------------------------------------------------- /roles/podman/legacy/homeassistant.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: homeassistant container config and data direcoty 3 | ansible.builtin.file: 4 | path: "{{ podman_user.homeassistant_config_dir }}" 5 | state: directory 6 | owner: "{{ podman_user.name }}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | 11 | - name: homeassistant.service 12 | ansible.builtin.template: 13 | src: homeassistant.service.j2 14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/homeassistant.service" 15 | owner: "{{ podman_user.name }}" 16 | group: "{{ podman_user.name }}" 17 | mode: '0600' 18 | become: true 19 | become_user: "{{ podman_user.name }}" 20 | 21 | - name: systemctl --user daemon-reload 22 | ansible.builtin.systemd: daemon_reload=true scope=user 23 | become: true 24 | become_user: "{{ podman_user.name }}" 25 | 26 | - name: systemctl --user enable --now homeassistant.service 27 | ansible.builtin.systemd: name=homeassistant enabled=true state=started scope=user 28 | become: true 29 | become_user: "{{ podman_user.name }}" 30 | 31 | 32 | # 8123/tcp homeassistant port 33 | - name: add homeassistant firewalld service file homeassistant.xml 34 | ansible.builtin.copy: 35 | content: | 36 | 37 | 38 | Homeassistant 39 | Homeassistant 40 | 41 | 42 | dest: /etc/firewalld/services/homeassistant.xml 43 | owner: root 44 | group: root 45 | mode: '0644' 46 | become: true 47 | register: homeassistant_firewalld_file 48 | 49 | - name: Reload firewalld when homeassistant.xml changed 50 | ansible.builtin.command: firewall-cmd --reload 51 | become: true 52 | when: homeassistant_firewalld_file.changed 53 | 54 | - name: Set firewall rules for homeassistant listening port (TCP) 55 | ansible.posix.firewalld: 56 | rich_rule: rule family="ipv4" source address="{{ reverse_proxy_ipv4 }}" service name="homeassistant" accept 57 | #zone: "{{ firewalld_default_zone }}" 58 | permanent: true 59 | immediate: true 60 | state: enabled 61 | become: true 62 | when: reverse_proxy_ipv4 is defined 63 | -------------------------------------------------------------------------------- /roles/podman/tasks/autobrr.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status autobrr.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=autobrr.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - autobrr 30 | # 31 | # # autobrr Web UI port 32 | # autobrr_webui_port: 7474 33 | # 34 | # # Path to store autobrr container config 35 | # autobrr_config_dir: "/path/to/container/config/autobrr" 36 | 37 | - name: autobrr container config direcoty 38 | ansible.builtin.file: 39 | path: "{{ podman_user.autobrr_config_dir }}" 40 | state: directory 41 | owner: "{{ podman_user.name }}" 42 | group: "{{ podman_user.name }}" 43 | mode: '0700' 44 | become: true 45 | 46 | - name: autobrr.container 47 | ansible.builtin.template: 48 | src: autobrr.container.j2 49 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/autobrr.container" 50 | owner: "{{ podman_user.name }}" 51 | group: "{{ podman_user.name }}" 52 | mode: '0600' 53 | become: true 54 | become_user: "{{ podman_user.name }}" 55 | 56 | - name: systemctl --user daemon-reload 57 | ansible.builtin.systemd: daemon_reload=true scope=user 58 | become: true 59 | become_user: "{{ podman_user.name }}" 60 | 61 | - name: systemctl --user enable --now autobrr.service 62 | ansible.builtin.systemd: name=autobrr.service enabled=true state=started scope=user 63 | become: true 64 | become_user: "{{ podman_user.name }}" 65 | ignore_errors: "{{ ansible_check_mode }}" 66 | -------------------------------------------------------------------------------- /roles/podman/legacy/jellyfin.service.j2: -------------------------------------------------------------------------------- 1 | # jellyfin.service 2 | 3 | [Unit] 4 | Description=Podman jellyfin.service 5 | Documentation=man:podman-generate-systemd(1) 6 | Wants=network-online.target 7 | After=network-online.target 8 | RequiresMountsFor=%t/containers 9 | 10 | [Service] 11 | Environment=PODMAN_SYSTEMD_UNIT=%n 12 | Restart=on-failure 13 | TimeoutStopSec=70 14 | ExecStartPre=/bin/rm \ 15 | -f %t/%n.ctr-id 16 | #ExecStart=/usr/bin/podman run \ 17 | # --cidfile=%t/%n.ctr-id \ 18 | # --cgroups=no-conmon \ 19 | # --rm \ 20 | # --sdnotify=conmon \ 21 | # --detach \ 22 | # --replace \ 23 | # --label io.containers.autoupdate=registry \ 24 | # --uidmap 1000:0:1 \ 25 | # --uidmap 0:1:1000 \ 26 | # --uidmap 1001:1001:64536 \ 27 | # --env PUID=1000 \ 28 | # --env PGID=1000 \ 29 | # --env TZ={{ TZ }} \ 30 | # --name=jellyfin \ 31 | # --hostname=jellyfin \ 32 | # --publish 8096:8096/tcp \ 33 | # --volume {{ podman_user.jellyfin_config_dir }}:/config:Z \ 34 | # --volume {{ podman_user.jellyfin_data_dir }}:/media:ro,z \ 35 | # lscr.io/linuxserver/jellyfin:latest 36 | ExecStart=/usr/bin/podman run \ 37 | --cidfile=%t/%n.ctr-id \ 38 | --cgroups=no-conmon \ 39 | --rm \ 40 | --sdnotify=conmon \ 41 | --detach \ 42 | --replace \ 43 | --label io.containers.autoupdate=registry \ 44 | --userns keep-id:uid=1000,gid=1000 \ 45 | --user 1000:1000 \ 46 | --name=jellyfin \ 47 | --hostname=jellyfin \ 48 | --publish 8096:8096/tcp \ 49 | --volume {{ podman_user.jellyfin_config_dir }}:/config:Z \ 50 | --volume {{ podman_user.jellyfin_cache_dir }}:/cache:Z \ 51 | --volume {{ podman_user.jellyfin_data_dir }}:/media:ro,z \ 52 | docker.io/jellyfin/jellyfin:latest 53 | ExecStop=/usr/bin/podman stop \ 54 | --ignore \ 55 | --time=10 \ 56 | --cidfile=%t/%n.ctr-id 57 | ExecStopPost=/usr/bin/podman rm \ 58 | --force \ 59 | --ignore \ 60 | --time=10 \ 61 | --cidfile=%t/%n.ctr-id 62 | Type=notify 63 | NotifyAccess=all 64 | 65 | # Security Features 66 | #PrivateTmp=yes 67 | #NoNewPrivileges=yes 68 | #ProtectSystem=strict 69 | #ProtectHome=yes 70 | #ProtectKernelTunables=yes 71 | #ProtectControlGroups=yes 72 | #PrivateMounts=yes 73 | #ProtectHostname=yes 74 | 75 | [Install] 76 | WantedBy=default.target 77 | -------------------------------------------------------------------------------- /roles/podman/tasks/thelounge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status thelounge.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=thelounge.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - thelounge 30 | # 31 | # # thelounge Web UI port 32 | # thelounge_webui_port: 9000 33 | # 34 | # # Path to store thelounge container config 35 | # thelounge_config_dir: "/path/to/container/config/thelounge" 36 | 37 | - name: thelounge container config direcoty 38 | ansible.builtin.file: 39 | path: "{{ podman_user.thelounge_config_dir }}" 40 | state: directory 41 | owner: "{{ podman_user.name }}" 42 | group: "{{ podman_user.name }}" 43 | mode: '0700' 44 | become: true 45 | 46 | - name: thelounge.container 47 | ansible.builtin.template: 48 | src: thelounge.container.j2 49 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/thelounge.container" 50 | owner: "{{ podman_user.name }}" 51 | group: "{{ podman_user.name }}" 52 | mode: '0600' 53 | become: true 54 | become_user: "{{ podman_user.name }}" 55 | 56 | - name: systemctl --user daemon-reload 57 | ansible.builtin.systemd: daemon_reload=true scope=user 58 | become: true 59 | become_user: "{{ podman_user.name }}" 60 | 61 | - name: systemctl --user enable --now thelounge.service 62 | ansible.builtin.systemd: name=thelounge.service enabled=true state=started scope=user 63 | become: true 64 | become_user: "{{ podman_user.name }}" 65 | ignore_errors: "{{ ansible_check_mode }}" 66 | -------------------------------------------------------------------------------- /roles/nut/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S nut 3 | community.general.pacman: name=nut state=present 4 | become: true 5 | 6 | - name: /etc/nut/ups.conf 7 | ansible.builtin.blockinfile: 8 | path: /etc/nut/ups.conf 9 | block: | 10 | [myups] 11 | driver = usbhid-ups 12 | port = auto 13 | state: present 14 | become: true 15 | 16 | - name: Change UPS device permission 17 | ansible.builtin.template: 18 | src: 50-ups.rules.j2 19 | dest: /etc/udev/rules.d/50-ups.rules 20 | owner: root 21 | group: root 22 | mode: '0644' 23 | become: true 24 | when: ups_product_id is defined 25 | 26 | - name: /etc/nut/upsd.users 27 | ansible.builtin.blockinfile: 28 | path: /etc/nut/upsd.users 29 | block: | 30 | [admin] 31 | password = {{ ups_password }} 32 | upsmon primary 33 | actions = SET 34 | instcmds = ALL 35 | state: present 36 | become: true 37 | 38 | - name: /etc/nut/upsmon.conf 39 | ansible.builtin.blockinfile: 40 | path: /etc/nut/upsmon.conf 41 | block: | 42 | MONITOR myups@localhost 1 admin {{ ups_password }} primary 43 | NOTIFYCMD /etc/nut/nut_notify.sh 44 | NOTIFYFLAG ONLINE SYSLOG+EXEC 45 | NOTIFYFLAG ONBATT SYSLOG+EXEC 46 | NOTIFYFLAG LOWBATT SYSLOG+EXEC 47 | NOTIFYFLAG FSD SYSLOG+EXEC 48 | NOTIFYFLAG SHUTDOWN SYSLOG+EXEC 49 | NOTIFYFLAG REPLBATT SYSLOG+EXEC 50 | NOTIFYFLAG NOCOMM SYSLOG+EXEC 51 | state: present 52 | create: true 53 | become: true 54 | 55 | - name: msmtp config 56 | ansible.builtin.template: 57 | src: msmtprc.j2 58 | dest: /etc/nut/msmtprc 59 | owner: nut 60 | group: nut 61 | mode: '0600' 62 | become: true 63 | 64 | - name: /etc/nut/nut_notify.sh 65 | ansible.builtin.template: 66 | src: nut_notify.sh.j2 67 | dest: /etc/nut/nut_notify.sh 68 | owner: nut 69 | group: nut 70 | mode: '0700' 71 | become: true 72 | 73 | - name: systemctl enable nut-driver-enumerator.service nut-server.service nut-monitor.service nut.target nut-driver.target 74 | ansible.builtin.systemd: name={{ item }} enabled=true 75 | loop: 76 | - nut-driver-enumerator.service 77 | - nut-server.service 78 | - nut-monitor.service 79 | - nut.target 80 | - nut-driver.target 81 | become: true 82 | -------------------------------------------------------------------------------- /roles/openssh/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Allow access only for some users 3 | ansible.builtin.lineinfile: 4 | path: /etc/ssh/sshd_config 5 | regexp: '^AllowUsers' 6 | line: AllowUsers {{ ssh_allowusers }} 7 | insertafter: '#\s*AllowUsers' 8 | validate: /usr/sbin/sshd -T -f %s 9 | become: true 10 | when: ssh_allowusers is defined 11 | 12 | - name: Set host key 13 | ansible.builtin.lineinfile: 14 | path: /etc/ssh/sshd_config 15 | regexp: '^HostKey' 16 | line: "HostKey /etc/ssh/ssh_host_{{ ssh_hostkey }}_key" 17 | insertafter: '#\s*HostKey' 18 | validate: /usr/sbin/sshd -T -f %s 19 | become: true 20 | when: ssh_hostkey is defined 21 | 22 | #- name: Set ssh port 23 | # ansible.builtin.lineinfile: 24 | # path: /etc/ssh/sshd_config 25 | # regexp: '^Port' 26 | # line: "Port {{ ansible_port }}" 27 | # insertafter: '#\s*Port' 28 | # validate: /usr/sbin/sshd -T -f %s 29 | # become: true 30 | 31 | - name: Force public key authentication 32 | ansible.builtin.lineinfile: 33 | path: /etc/ssh/sshd_config 34 | regexp: "{{ item.regexp }}" 35 | line: "{{ item.line }}" 36 | insertafter: "{{ item.insertafter }}" 37 | validate: /usr/sbin/sshd -T -f %s 38 | loop: 39 | - { regexp: '^PasswordAuthentication ', line: PasswordAuthentication no, insertafter: '^#\s*PasswordAuthentication ' } 40 | - { regexp: '^KbdInteractiveAuthentication ', line: KbdInteractiveAuthentication no, insertafter: '^#\s*KbdInteractiveAuthentication ' } 41 | - { regexp: '^AuthenticationMethods ', line: AuthenticationMethods publickey, insertafter: '^PasswordAuthentication ' } 42 | become: true 43 | when: not homed 44 | 45 | #- name: Add custom ssh rule to UFW 46 | # ansible.builtin.blockinfile: 47 | # path: /etc/ufw/applications.d/ufw-custom 48 | # block: | 49 | # [SSH-custom] 50 | # title=SSH server 51 | # description=SSH server 52 | # ports={{ ansible_port }}/tcp 53 | # create: true 54 | # marker: "; SSH {mark} ANSIBLE MANAGED BLOCK" 55 | 56 | - name: Firewall rule for ssh 57 | ansible.posix.firewalld: 58 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="ssh" accept 59 | permanent: true 60 | immediate: true 61 | state: enabled 62 | loop: "{{ ssh_accept_source_ipv4 }}" 63 | become: true 64 | when: ssh_accept_source_ipv4 is defined 65 | 66 | 67 | - include_tasks: "homed.yml" 68 | when: homed 69 | -------------------------------------------------------------------------------- /roles/podman/tasks/qbittorrent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status qbittorrent.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=qbittorrent.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - qbittorrent 30 | # 31 | # # qbittorrent Web UI port 32 | # qbittorrent_webui_port: 8081 33 | # 34 | # # Path to store qbittorrent container config 35 | # qbittorrent_config_dir: "/path/to/container/config/qbittorrent" 36 | # 37 | # # Path to qbittorrent download directory 38 | # qbittorrent_downloads_dir: "/path/to/qbittorrent/download/dir" 39 | 40 | - name: qbittorrent container config direcoty 41 | ansible.builtin.file: 42 | path: "{{ podman_user.qbittorrent_config_dir }}" 43 | state: directory 44 | owner: "{{ podman_user.name }}" 45 | group: "{{ podman_user.name }}" 46 | mode: '0700' 47 | become: true 48 | 49 | - name: qbittorrent.container 50 | ansible.builtin.template: 51 | src: qbittorrent.container.j2 52 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/qbittorrent.container" 53 | owner: "{{ podman_user.name }}" 54 | group: "{{ podman_user.name }}" 55 | mode: '0600' 56 | become: true 57 | become_user: "{{ podman_user.name }}" 58 | 59 | - name: systemctl --user daemon-reload 60 | ansible.builtin.systemd: daemon_reload=true scope=user 61 | become: true 62 | become_user: "{{ podman_user.name }}" 63 | 64 | - name: systemctl --user enable --now qbittorrent.service 65 | ansible.builtin.systemd: name=qbittorrent.service enabled=true state=started scope=user 66 | become: true 67 | become_user: "{{ podman_user.name }}" 68 | ignore_errors: "{{ ansible_check_mode }}" 69 | -------------------------------------------------------------------------------- /roles/nas/tasks/raid.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: dnf install cryptsetup 3 | ansible.builtin.dnf: name=cryptsetup state=present 4 | become: true 5 | when: 6 | - ansible_facts["distribution"] == "Fedora" 7 | - crypttab_entries is defined 8 | 9 | - name: Create /etc/crypttab file 10 | ansible.builtin.file: 11 | path: /etc/crypttab 12 | state: touch 13 | owner: root 14 | group: root 15 | mode: '0644' 16 | modification_time: preserve 17 | access_time: preserve 18 | become: true 19 | when: 20 | - ansible_facts["distribution"] == "Fedora" 21 | - crypttab_entries is defined 22 | 23 | - name: Edit /etc/crypttab 24 | ansible.builtin.lineinfile: 25 | path: /etc/crypttab 26 | regexp: "^{{ item.device_mapper_name }}" 27 | line: "{{ item.device_mapper_name}} UUID={{ item.UUID }} {{ item.keyfile }}" 28 | state: present 29 | when: crypttab_entries is defined 30 | loop: "{{ crypttab_entries }}" 31 | become: true 32 | 33 | - name: Create mount points 34 | ansible.builtin.file: 35 | path: "{{ item.mount_point }}" 36 | state: directory 37 | owner: "{{ item.owner }}" 38 | group: "{{ item.group }}" 39 | mode: "{{ item.mode }}" 40 | loop: "{{ fstab_entries }}" 41 | become: true 42 | 43 | - name: Edit /etc/fstab 44 | ansible.builtin.lineinfile: 45 | path: /etc/fstab 46 | regexp: "^{{ item.device }}\\s+{{ item.mount_point }}\\s+" 47 | line: "{{ item.device }} {{ item.mount_point }} {{ item.fs }} {{ item.mount_opts }} 0 0" 48 | state: present 49 | loop: "{{ fstab_entries }}" 50 | become: true 51 | register: fstab 52 | 53 | - name: remount 54 | ansible.builtin.shell: | 55 | systemctl daemon-reload 56 | mount --all 57 | become: true 58 | when: fstab.changed 59 | 60 | - name: Set mount points permissions 61 | ansible.builtin.file: 62 | path: "{{ item.mount_point }}" 63 | state: directory 64 | owner: "{{ item.owner }}" 65 | group: "{{ item.group }}" 66 | mode: "{{ item.mode }}" 67 | loop: "{{ fstab_entries }}" 68 | become: true 69 | 70 | - name: Set spindown timeout for disk 71 | ansible.builtin.copy: 72 | content: | 73 | ACTION=="add|change", KERNEL=="sd[a-z]", ATTRS{queue/rotational}=="1", RUN+="/usr/bin/hdparm -S {{ hdparm_spindown }} /dev/%k" 74 | dest: /etc/udev/rules.d/69-hdparm.rules 75 | owner: root 76 | group: root 77 | mode: '0644' 78 | become: true 79 | when: hdparm_spindown is defined 80 | -------------------------------------------------------------------------------- /roles/podman/templates/gluetun.container.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=gluetun container 3 | Wants=network-online.target 4 | After=network-online.target nss-lookup.target 5 | {% if "transmission" in podman_user.containers %} 6 | Wants=transmission.service 7 | {% endif %} 8 | {% if "qbittorrent" in podman_user.containers %} 9 | Wants=qbittorrent.service 10 | {% endif %} 11 | {% if "autobrr" in podman_user.containers and podman_user.autobrr_gluetun_proxy %} 12 | Wants=autobrr.service 13 | {% endif %} 14 | {% if "thelounge" in podman_user.containers and podman_user.thelounge_gluetun_proxy %} 15 | Wants=thelounge.service 16 | {% endif %} 17 | 18 | [Container] 19 | ContainerName=gluetun 20 | Image=ghcr.io/qdm12/gluetun:v3 21 | AutoUpdate=registry 22 | Timezone={{ TZ }} 23 | 24 | AddCapability=NET_ADMIN 25 | AddDevice=/dev/net/tun:/dev/net/tun 26 | 27 | HostName=gluetun 28 | {% if "transmission" in podman_user.containers %} 29 | PublishPort=127.0.0.1:{{ podman_user.transmission_webui_port }}:9091/tcp 30 | {% endif %} 31 | {% if "qbittorrent" in podman_user.containers %} 32 | PublishPort=127.0.0.1:{{ podman_user.qbittorrent_webui_port }}:{{ podman_user.qbittorrent_webui_port }}/tcp 33 | {% endif %} 34 | {% if "autobrr" in podman_user.containers and podman_user.autobrr_gluetun_proxy %} 35 | PublishPort=127.0.0.1:{{ podman_user.autobrr_webui_port }}:{{ podman_user.autobrr_webui_port }}/tcp 36 | {% endif %} 37 | {% if "thelounge" in podman_user.containers and podman_user.thelounge_gluetun_proxy %} 38 | PublishPort=127.0.0.1:{{ podman_user.thelounge_webui_port }}:{{ podman_user.thelounge_webui_port }}/tcp 39 | {% endif %} 40 | 41 | Environment=TZ={{ TZ }} 42 | {% for item in podman_user.gluetun_vpn_provider_env %} 43 | Environment={{ item }} 44 | {% endfor %} 45 | 46 | {% if podman_user.gluetun_httpproxy is defined and podman_user.gluetun_httpproxy %} 47 | Environment=HTTPPROXY=on 48 | Environment=HTTPPROXY_STEALTH=on 49 | {% if podman_user.gluetun_httpproxy_port is defined %} 50 | PublishPort=127.0.0.1:{{ podman_user.gluetun_httpproxy_port }}:8888/tcp 51 | {% else %} 52 | PublishPort=127.0.0.1:8888:8888/tcp 53 | {% endif %} 54 | 55 | {% endif %} 56 | 57 | [Service] 58 | Restart=on-failure 59 | RestartSec=5 60 | RestartMaxDelaySec=1h 61 | RestartSteps=10 62 | 63 | # Remove gluetun container and all containers depend on gluetun 64 | ExecStartPre=-/usr/bin/podman rm --force --depend gluetun 65 | 66 | # Extend Timeout to allow time to pull the image 67 | TimeoutStartSec=300 68 | 69 | [Install] 70 | WantedBy=default.target 71 | -------------------------------------------------------------------------------- /roles/auto-update/templates/auto-update.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | {% if msmtp_to is defined and msmtp_from is defined %} 4 | TO="{{ msmtp_to }}" 5 | FROM="{{ msmtp_from }}" 6 | SUBJECT="Auto-update $HOSTNAME $(/usr/bin/date '+%F %T')" 7 | {% endif %} 8 | 9 | # Check need reboot 10 | NUM_PKG=$(/usr/bin/checkupdates | grep -E 'linux|systemd' | wc -l) 11 | if [[ $NUM_PKG -eq 0 ]] ; then 12 | NEED_REBOOT=0 13 | else 14 | NEED_REBOOT=1 15 | fi 16 | 17 | # Create empty auto-update.log 18 | echo '' > /var/log/auto-update.log 19 | 20 | {% if btrfs_scrub_time is defined %} 21 | # systemctl is-active return 0 if at least one is active 22 | /usr/bin/systemctl is-active --quiet --all "btrfs-scrub@*.service" 23 | BTRFS_SCRUB_EXIT=$? 24 | if [[ $BTRFS_SCRUB_EXIT -eq 0 ]] ; then 25 | echo "btrfs-scrub is running. Skipped update.\n" >> /var/log/auto-update.log 26 | else 27 | # Update system 28 | echo "# pacman -Syu --noconfirm --nogrogressbar\n" >> /var/log/auto-update.log 29 | /usr/bin/pacman -Syu --noconfirm --noprogressbar &>> /var/log/auto-update.log 30 | echo "\n\n# checkservices -aP\n" >> /var/log/auto-update.log 31 | /usr/bin/checkservices -aP &>> /var/log/auto-update.log 32 | if [[ $NEED_REBOOT -eq 1 ]] ; then 33 | echo "\n\n# systemctl reboot" >> /var/log/auto-update.log 34 | fi 35 | fi 36 | {% else %} 37 | # Update system 38 | echo "# pacman -Syu --noconfirm --nogrogressbar\n" >> /var/log/auto-update.log 39 | /usr/bin/pacman -Syu --noconfirm --noprogressbar &>> /var/log/auto-update.log 40 | echo "\n\n# checkservices -aP\n" >> /var/log/auto-update.log 41 | /usr/bin/checkservices -aP &>> /var/log/auto-update.log 42 | if [[ $NEED_REBOOT -eq 1 ]] ; then 43 | echo "\n\n# systemctl reboot" >> /var/log/auto-update.log 44 | fi 45 | {% endif %} 46 | 47 | {% if msmtp_to is defined and msmtp_from is defined %} 48 | # send email 49 | update_msg="$(cat /var/log/auto-update.log)" 50 | echo -e "To: ${TO}\nFrom: ${FROM}\nSubject: ${SUBJECT}\n${update_msg}" | msmtp --read-recipients --read-envelope-from 51 | while [[ $? -ne 0 ]]; do 52 | # if failed try resend 5min later 53 | sleep 300 54 | echo -e "To: ${TO}\nFrom: ${FROM}\nSubject: ${SUBJECT}\n${update_msg}" | msmtp --read-recipients --read-envelope-from 55 | done 56 | {% endif %} 57 | 58 | sleep 5 59 | 60 | {% if btrfs_scrub_time is defined %} 61 | if [[ $BTRFS_SCRUB_EXIT -ne 0 && $NEED_REBOOT -eq 1 ]] ; then 62 | # Reboot if necessary 63 | systemctl reboot 64 | fi 65 | {% else %} 66 | if [[ $NEED_REBOOT -eq 1 ]] ; then 67 | systemctl reboot 68 | fi 69 | {% endif %} 70 | 71 | exit 0 72 | -------------------------------------------------------------------------------- /roles/auto-update/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: apt install unattended-upgrades 3 | ansible.builtin.apt: name=unattended-upgrades state=present 4 | become: true 5 | 6 | - name: Check /etc/apt/apt.conf.d/20auto-upgrades stats 7 | ansible.builtin.stat: 8 | path: /etc/apt/apt.conf.d/20auto-upgrades 9 | register: auto_upgrades 10 | become: true 11 | 12 | - name: Create /etc/apt/apt.conf.d/20auto-upgrades if not exists 13 | ansible.builtin.shell: | 14 | echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | debconf-set-selections 15 | dpkg-reconfigure -f noninteractive unattended-upgrades 16 | become: true 17 | when: not auto_upgrades.stat.exists 18 | 19 | - name: 20auto-upgrades config 20 | ansible.builtin.lineinfile: 21 | path: /etc/apt/apt.conf.d/20auto-upgrades 22 | line: "{{ item.line }}" 23 | regexp: "{{ item.regexp }}" 24 | loop: 25 | - { line: 'APT::Periodic::Update-Package-Lists "1";', regexp: '^APT::Periodic::Update-Package-Lists' } 26 | - { line: 'APT::Periodic::Unattended-Upgrade "1";', regexp: '^APT::Periodic::Unattended-Upgrade' } 27 | become: true 28 | 29 | - name: /etc/apt/apt.conf.d/50unattended-upgrades set Automatic-Reboot-Time 30 | ansible.builtin.lineinfile: 31 | path: /etc/apt/apt.conf.d/50unattended-upgrades 32 | regexp: "{{ item.regexp }}" 33 | line: "{{ item.line }}" 34 | insertafter: "{{ item.insertafter }}" 35 | loop: 36 | - { regexp: '^Unattended-Upgrade::Automatic-Reboot ', insertafter: '^//Unattended-Upgrade::Automatic-Reboot ', line: 'Unattended-Upgrade::Automatic-Reboot "true";' } 37 | - { regexp: '^Unattended-Upgrade::Automatic-Reboot-WithUsers ', insertafter: '^//Unattended-Upgrade::Automatic-Reboot-WithUsers ', line: 'Unattended-Upgrade::Automatic-Reboot-WithUsers "true";' } 38 | - { regexp: '^Unattended-Upgrade::Remove-Unused-Dependencies ', insertafter: '^//Unattended-Upgrade::Remove-Unused-Dependencies ', line: 'Unattended-Upgrade::Remove-Unused-Dependencies "true";' } 39 | become: true 40 | 41 | - name: Create override directory for apt-daily-upgrade.timer 42 | ansible.builtin.file: 43 | path: /etc/systemd/system/apt-daily-upgrade.timer.d 44 | state: directory 45 | owner: root 46 | group: root 47 | mode: '0755' 48 | become: true 49 | when: auto_update_time is defined 50 | 51 | - name: Set upgrade time 52 | ansible.builtin.copy: 53 | dest: /etc/systemd/system/apt-daily-upgrade.timer.d/override.conf 54 | content: | 55 | [Timer] 56 | OnCalendar= 57 | OnCalendar={{ auto_update_time }} 58 | RandomizedDelaySec=0 59 | become: true 60 | when: auto_update_time is defined 61 | 62 | -------------------------------------------------------------------------------- /debian_post_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | apt update 4 | apt dist-upgrade 5 | 6 | apt install vim rsync htop 7 | 8 | # install firewalld 9 | apt install firewalld 10 | 11 | # set ssh port 12 | echo "ssh port? (22)" 13 | read ssh_port 14 | ssh_port="${ssh_port:-22}" 15 | if [[ $ssh_port -ne 22 ]] ; then 16 | sed -i "s/^#Port.*/Port ${ssh_port}/" /etc/ssh/sshd_config 17 | sed "/port=/s/port=\"22\"/port=\"${ssh_port}\"/" /usr/lib/firewalld/services/ssh.xml > /etc/firewalld/services/ssh.xml 18 | firewall-cmd --reload 19 | fi 20 | 21 | read -p "Do you want to set default firewall zone to drop? [y/N] " firewall_drop 22 | firewall_drop="${firewall_drop:-n}" 23 | firewall_drop="${firewall_drop,,}" 24 | if [[ $firewall_drop == y ]] ; then 25 | firewall-cmd --set-default-zone=drop 26 | 27 | read -p "Allow ICMP echo-request and echo-reply (respond ping)? [Y/n] " allow_ping 28 | allow_ping="${allow_ping:-y}" 29 | allow_ping="${allow_ping,,}" 30 | if [[ $allow_ping == y ]] ; then 31 | firewall-cmd --permanent --zone=drop --add-icmp-block-inversion 32 | echo -e "\nallow ping source ip address (example 192.168.1.0/24) empty to allow all" 33 | read ping_source 34 | if [[ -n $ping_source ]] ; then 35 | firewall-cmd --permanent --zone=drop --add-rich-rule="rule family='ipv4' source address='${ping_source}' icmp-type name='echo-request' accept" 36 | firewall-cmd --permanent --zone=drop --add-rich-rule="rule family='ipv4' source address='${ping_source}' icmp-type name='echo-reply' accept" 37 | else 38 | firewall-cmd --permanent --zone=drop --add-icmp-block=echo-request 39 | firewall-cmd --permanent --zone=drop --add-icmp-block=echo-reply 40 | fi 41 | fi 42 | fi 43 | 44 | echo -e "\nssh allow source ip address (example 192.168.1.0/24), empty to skip" 45 | read ssh_source 46 | if [[ -n $ssh_source ]]; then 47 | firewall-cmd --permanent --add-rich-rule="rule family='ipv4' source address='${ssh_source}' service name='ssh' accept" 48 | firewall-cmd --permanent --remove-service ssh 49 | fi 50 | firewall-cmd --reload 51 | 52 | # raspberry pi 53 | # create wheel user and disable root user 54 | if [[ ${HOSTNAME:0:3} == rpi ]] ; then 55 | apt install sudo 56 | 57 | read -p "Tell me your username: " username 58 | useradd -m -G sudo -s /usr/bin/bash "$username" 59 | passwd "$username" 60 | 61 | echo "Disabling root ..." 62 | passwd -d root 63 | passwd -l root 64 | 65 | echo -e "\n\nPlease tell me the hostname:" 66 | read hostname 67 | echo "$hostname" > /etc/hostname 68 | echo -e "127.0.0.1\t$hostname" >> /etc/hosts 69 | fi 70 | 71 | -------------------------------------------------------------------------------- /roles/libvirt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check directory /var/lib/libvirt/images 3 | ansible.builtin.stat: 4 | path: /var/lib/libvirt/images 5 | register: libvirt_images_dir 6 | become: true 7 | 8 | - name: Get file system type of /var/lib 9 | ansible.builtin.command: stat --file-system --format=%T /var/lib 10 | become: true 11 | register: var_lib_fstype 12 | changed_when: false 13 | check_mode: false 14 | 15 | - name: Create /var/lib/libvirt/images subvolume if not exist, and disable CoW 16 | ansible.builtin.shell: | 17 | mkdir -p /var/lib/libvirt 18 | btrfs subvolume create /var/lib/libvirt/images 19 | chattr +C /var/lib/libvirt/images 20 | become: true 21 | when: 22 | - not libvirt_images_dir.stat.exists 23 | - var_lib_fstype.stdout == 'btrfs' 24 | 25 | - name: Install libvirt packages 26 | community.general.pacman: 27 | name: 28 | # libvirt 29 | - qemu-desktop 30 | - libvirt 31 | - virt-manager 32 | # default NAT/DHCP networking support 33 | - iptables-nft 34 | - dnsmasq 35 | - dmidecode 36 | # bridged networking support 37 | - bridge-utils 38 | # UEFI support 39 | - edk2-ovmf 40 | state: present 41 | become: true 42 | 43 | - name: Add user to libvirt group (non systemd-homed) 44 | ansible.builtin.user: 45 | name: "{{ ansible_facts.user_id }}" 46 | groups: libvirt 47 | append: true 48 | become: true 49 | when: not homed 50 | 51 | - name: Add user to libvirt group (systemd-homed) 52 | community.general.homectl: 53 | name: "{{ ansible_facts.user_id }}" 54 | password: "{{ ansible_become_password }}" 55 | memberof: "{{ ansible_facts.user_id }},wheel,libvirt" 56 | become: true 57 | when: homed 58 | 59 | - name: systemctl enable libvirtd.service 60 | ansible.builtin.systemd: name=libvirtd.service enabled=true 61 | register: libvirtd 62 | become: true 63 | 64 | - name: systemctl start libvirtd.service virtlogd.service 65 | ansible.builtin.systemd: name={{ item }} state=started 66 | when: libvirtd.changed 67 | become: true 68 | loop: 69 | - libvirtd.service 70 | - virtlogd.service 71 | 72 | - name: Check default NAT is enabled or not 73 | ansible.builtin.shell: virsh net-info --network default | grep '^Autostart:.*yes' | wc -l 74 | become: true 75 | register: libvirt_default_net 76 | changed_when: false 77 | 78 | - name: Enable and start default NAT 79 | ansible.builtin.shell: | 80 | virsh net-autostart default 81 | 82 | # reload firewall and start default network 83 | firewall-cmd --reload 84 | virsh net-start default 85 | become: true 86 | when: libvirt_default_net.stdout == '0' 87 | 88 | -------------------------------------------------------------------------------- /roles/podman/legacy/nextcloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nextcloud container config and data direcoty 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ podman_user.name}}" 7 | group: "{{ podman_user.name }}" 8 | mode: '0700' 9 | become: true 10 | loop: 11 | - "{{ podman_user.nextcloud_config_dir }}" 12 | - "{{ podman_user.nextcloud_data_dir }}" 13 | - "{{ podman_user.postgres_config_dir }}" 14 | 15 | 16 | - name: nextcloud-pod.service nextcloud.service postgres.service nextcloud-cron.service nextcloud-cron.timer 17 | ansible.builtin.template: 18 | src: "{{ item }}.j2" 19 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/{{ item }}" 20 | owner: "{{ podman_user.name}}" 21 | group: "{{ podman_user.name }}" 22 | mode: '0600' 23 | become: true 24 | become_user: "{{ podman_user.name }}" 25 | loop: 26 | - nextcloud-pod.service 27 | - nextcloud.service 28 | - postgres.service 29 | - nextcloud-cron.service 30 | - nextcloud-cron.timer 31 | 32 | - name: systemctl --user daemon-reload 33 | ansible.builtin.systemd: daemon_reload=true scope=user 34 | become: true 35 | become_user: "{{ podman_user.name }}" 36 | 37 | - name: systemctl --user enable --now nextcloud-pod.service nextcloud.service postgres.service nextcloud-cron.timer 38 | ansible.builtin.systemd: name={{ item }} state=started enabled=true scope=user 39 | become: true 40 | become_user: "{{ podman_user.name }}" 41 | loop: 42 | - nextcloud-pod.service 43 | - nextcloud.service 44 | - postgres.service 45 | - nextcloud-cron.timer 46 | 47 | # 4108/tcp nextcloud port 48 | - name: add nextcloud firewalld service file nextcloud.xml 49 | ansible.builtin.copy: 50 | content: | 51 | 52 | 53 | Nextcloud 54 | Nextcloud 55 | 56 | 57 | dest: /etc/firewalld/services/nextcloud.xml 58 | owner: root 59 | group: root 60 | mode: '0644' 61 | become: true 62 | register: nextcloud_firewalld_file 63 | 64 | - name: Reload firewalld when nextcloud.xml changed 65 | ansible.builtin.command: firewall-cmd --reload 66 | become: true 67 | when: nextcloud_firewalld_file.changed 68 | 69 | - name: Set firewall rules for nextcloud listening port (TCP) 70 | ansible.posix.firewalld: 71 | rich_rule: rule family="ipv4" source address="{{ reverse_proxy_ipv4 }}" service name="nextcloud" accept 72 | #zone: "{{ firewalld_default_zone }}" 73 | permanent: true 74 | immediate: true 75 | state: enabled 76 | become: true 77 | when: reverse_proxy_ipv4 is defined 78 | -------------------------------------------------------------------------------- /roles/gui/tasks/paru.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # manually clone and install paru 3 | 4 | - name: Get file system type of /var/cache 5 | ansible.builtin.command: "stat --file-system --format=%T /var/cache" 6 | register: var_cache_fstype 7 | changed_when: false 8 | 9 | - name: Check /var/cache/paru directory 10 | ansible.builtin.stat: 11 | path: "/var/cache/paru" 12 | register: paru_cache_dir 13 | 14 | # Create /var/cache/paru subvolume so it won't be included in snapshots 15 | - name: Create /var/cache/paru subvolume (if using btrfs) 16 | ansible.builtin.command: 17 | cmd: "btrfs subvolume create paru" 18 | chdir: "/var/cache" 19 | become: true 20 | when: not paru_cache_dir.stat.exists 21 | 22 | 23 | - name: Create paru directory 24 | ansible.builtin.file: 25 | path: "{{ item }}" 26 | state: directory 27 | owner: "{{ ansible_facts.user_id }}" 28 | group: "{{ ansible_facts.user_id }}" 29 | mode: '0755' 30 | become: true 31 | loop: 32 | - "/var/lib/paru" 33 | - "/var/cache/paru" 34 | - "/var/lib/paru/repo" 35 | - "/var/cache/paru/clone" 36 | - "/var/cache/paru/chroot" 37 | - "/var/cache/paru/pkg" 38 | 39 | - name: Create empty aur.db repo file 40 | ansible.builtin.file: 41 | path: /var/lib/paru/repo/aur.db 42 | state: touch 43 | modification_time: preserve 44 | access_time: preserve 45 | 46 | - name: Edit CacheDir /etc/pacman.conf 47 | ansible.builtin.lineinfile: 48 | path: /etc/pacman.conf 49 | regexp: "{{ item.regexp }}" 50 | line: "{{ item.line }}" 51 | insertafter: "{{ item.insertafter }}" 52 | become: true 53 | loop: 54 | - { regexp: '^CacheDir\s*=\s*/var/cache/pacman/pkg/', insertafter: '^#\s*CacheDir', line: 'CacheDir = /var/cache/pacman/pkg/' } 55 | - { regexp: '^CacheDir\s*=\s*/var/cache/paru/pkg/', insertafter: '^CacheDir\s*=\s*/var/cache/pacman/pkg/', line: 'CacheDir = /var/cache/paru/pkg/' } 56 | 57 | - name: Add LocalRepo 58 | ansible.builtin.blockinfile: 59 | path: /etc/pacman.conf 60 | marker: "# {mark} ANSIBLE MANAGED BLOCK aur LocalRepo" 61 | block: | 62 | [aur] 63 | SigLevel = PackageOptional DatabaseOptional 64 | Server = file:///var/lib/paru/repo 65 | become: true 66 | 67 | - name: Edit /etc/paru.conf 68 | ansible.builtin.lineinfile: 69 | path: /etc/paru.conf 70 | regexp: "{{ item.regexp }}" 71 | line: "{{ item.line }}" 72 | insertafter: "{{ item.insertafter }}" 73 | loop: 74 | - { regexp: '^LocalRepo', insertafter: '^#\s*LocalRepo', line: LocalRepo = aur } 75 | - { regexp: '^Chroot', insertafter: '^#\s*Chroot', line: Chroot = /var/cache/paru/chroot } 76 | - { regexp: '^CloneDir', insertafter: '^Chroot', line: CloneDir = /var/cache/paru/clone } 77 | become: true 78 | -------------------------------------------------------------------------------- /roles/podman/tasks/transmission.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status transmission.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=transmission.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - transmission 30 | # 31 | # # transmission Web UI port 32 | # transmission_webui_port: 9091 33 | # 34 | # # Path to store transmission container config 35 | # transmission_config_dir: "/path/to/container/config/transmission" 36 | # 37 | # # Path to transmission download directory 38 | # transmission_downloads_dir: "/path/to/transmission/download/dir" 39 | # 40 | # # Optionally, transmisison watch directory 41 | # #transmission_watch_dir: "/path/to/transmission/watch/dir" 42 | # 43 | # # Optionally, add auth to transmission Web UI 44 | # transmission_user: tux 45 | # transmission_pass: !unsafe mypassword 46 | 47 | - name: transmission container config direcoty 48 | ansible.builtin.file: 49 | path: "{{ podman_user.transmission_config_dir }}" 50 | state: directory 51 | owner: "{{ podman_user.name }}" 52 | group: "{{ podman_user.name }}" 53 | mode: '0700' 54 | become: true 55 | 56 | - name: transmission.container 57 | ansible.builtin.template: 58 | src: transmission.container.j2 59 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/transmission.container" 60 | owner: "{{ podman_user.name }}" 61 | group: "{{ podman_user.name }}" 62 | mode: '0600' 63 | become: true 64 | become_user: "{{ podman_user.name }}" 65 | 66 | - name: systemctl --user daemon-reload 67 | ansible.builtin.systemd: daemon_reload=true scope=user 68 | become: true 69 | become_user: "{{ podman_user.name }}" 70 | 71 | - name: systemctl --user enable --now transmission.service 72 | ansible.builtin.systemd: name=transmission.service enabled=true state=started scope=user 73 | become: true 74 | become_user: "{{ podman_user.name }}" 75 | ignore_errors: "{{ ansible_check_mode }}" 76 | -------------------------------------------------------------------------------- /roles/gui/tasks/snapper.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check ~/.cache directory 3 | ansible.builtin.stat: 4 | path: "{{ ansible_facts.user_dir }}/.cache" 5 | register: cache_dir 6 | 7 | - name: Check ~/Downloads/ directory 8 | ansible.builtin.stat: 9 | path: "{{ ansible_facts.user_dir }}/Downloads" 10 | register: downloads_dir 11 | 12 | # Create ~/.cache subvolume so it won't be included in snapshots 13 | - name: Create ~/.cache subvolume (if using btrfs) 14 | ansible.builtin.command: 15 | cmd: "btrfs subvolume create .cache" 16 | chdir: "{{ ansible_facts.user_dir }}" 17 | when: not cache_dir.stat.exists 18 | 19 | # Create ~/Downloads subvolume so it won't be included in snapshots 20 | - name: Create ~/Downloads subvolume (if using btrfs) 21 | ansible.builtin.command: 22 | cmd: "btrfs subvolume create Downloads" 23 | chdir: "{{ ansible_facts.user_dir }}" 24 | when: not downloads_dir.stat.exists 25 | 26 | - name: check /etc/snapper/configs/home 27 | ansible.builtin.stat: 28 | path: /etc/snapper/configs/home 29 | register: snapper_home 30 | become: true 31 | 32 | - name: Create snapper for /home/{{ ansible_facts.user_id }} (systemd-homed) 33 | ansible.builtin.shell: snapper -c home create-config /home/{{ ansible_facts.user_id }} 34 | args: 35 | executable: /usr/bin/bash 36 | become: true 37 | when: 38 | - not snapper_home.stat.exists 39 | - homed 40 | 41 | - name: Create snapper for /home (non systemd-homed) 42 | ansible.builtin.shell: snapper -c home create-config /home 43 | args: 44 | executable: /usr/bin/bash 45 | become: true 46 | when: 47 | - not snapper_home.stat.exists 48 | - not homed 49 | 50 | - name: Edit /etc/snapper/configs/home 51 | ansible.builtin.lineinfile: 52 | path: /etc/snapper/configs/home 53 | regexp: "{{ item.regexp }}" 54 | line: "{{ item.line }}" 55 | loop: 56 | - { regexp: '^TIMELINE_CREATE=', line: 'TIMELINE_CREATE="yes"' } 57 | - { regexp: '^TIMELINE_CLEANUP=', line: 'TIMELINE_CLEANUP="yes"' } 58 | - { regexp: '^NUMBER_MIN_AGE=', line: 'NUMBER_MIN_AGE="1800"' } 59 | - { regexp: '^NUMBER_LIMIT=', line: 'NUMBER_LIMIT="10"' } 60 | - { regexp: '^NUMBER_LIMIT_IMPORTANT=', line: 'NUMBER_LIMIT_IMPORTANT="10"' } 61 | - { regexp: '^TIMELINE_MIN_AGE=', line: 'TIMELINE_MIN_AGE="1800"' } 62 | - { regexp: '^TIMELINE_LIMIT_HOURLY=', line: 'TIMELINE_LIMIT_HOURLY="5"' } 63 | - { regexp: '^TIMELINE_LIMIT_DAILY=', line: 'TIMELINE_LIMIT_DAILY="7"' } 64 | - { regexp: '^TIMELINE_LIMIT_WEEKLY=', line: 'TIMELINE_LIMIT_WEEKLY="0"' } 65 | - { regexp: '^TIMELINE_LIMIT_MONTHLY=', line: 'TIMELINE_LIMIT_MONTHLY="0"' } 66 | - { regexp: '^TIMELINE_LIMIT_YEARLY=', line: 'TIMELINE_LIMIT_YEARLY="0"' } 67 | become: true 68 | -------------------------------------------------------------------------------- /roles/podman/tasks/nextcloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status nextcloud-aio.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=nextcloud-aio.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - nextcloud 30 | # 31 | # # The Nextcloud Web UI port 32 | # nextcloud_webui_port: 11000 33 | # 34 | # # The Nextcloud AIO Web admin port 35 | # nextcloud_web_admin_port: 8088 36 | # 37 | # # Some optional environment variables pass to nextcloud-aio-mastercontainer 38 | # # https://github.com/nextcloud/all-in-one/blob/main/compose.yaml 39 | # 40 | # # SKIP_DOMAIN_VALIDATION 41 | # nextcloud_skip_domain_validation: true 42 | # 43 | # # BORG_RETENTION_POLICY 44 | # nextcloud_backup_retention: "--keep-within=7d --keep-weekly=4 --keep-monthly=0" 45 | # 46 | # # NEXTCLOUD_MEMORY_LIMIT 47 | # nextcloud_memory_limit: 1024M 48 | 49 | - name: nextcloud-aio.container 50 | ansible.builtin.template: 51 | src: nextcloud-aio.container.j2 52 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/nextcloud-aio.container" 53 | owner: "{{ podman_user.name}}" 54 | group: "{{ podman_user.name }}" 55 | mode: '0600' 56 | become: true 57 | 58 | - name: nextcloud-aio-mastercontainer.volume 59 | ansible.builtin.copy: 60 | content: | 61 | [Volume] 62 | VolumeName=nextcloud_aio_mastercontainer 63 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/nextcloud-aio-mastercontainer.volume" 64 | owner: "{{ podman_user.name}}" 65 | group: "{{ podman_user.name }}" 66 | mode: '0600' 67 | become: true 68 | 69 | - name: systemctl --user daemon-reload 70 | ansible.builtin.systemd: daemon_reload=true scope=user 71 | become: true 72 | become_user: "{{ podman_user.name }}" 73 | 74 | - name: systemctl --user enable --now nextcloud-aio.service 75 | ansible.builtin.systemd: name=nextcloud-aio.service state=started enabled=true scope=user 76 | become: true 77 | become_user: "{{ podman_user.name }}" 78 | ignore_errors: "{{ ansible_check_mode }}" 79 | 80 | -------------------------------------------------------------------------------- /roles/archlinux_common/tasks/snapper.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check /etc/snapper directory 3 | ansible.builtin.stat: 4 | path: /etc/snapper 5 | become: true 6 | register: snapper_dir 7 | 8 | - name: pacman.conf NoExtract = etc/cron.daily/snapper etc/cron.hourly/snapper 9 | ansible.builtin.lineinfile: 10 | path: /etc/pacman.conf 11 | regexp: '^NoExtract' 12 | line: NoExtract = etc/cron.daily/snapper etc/cron.hourly/snapper 13 | insertafter: '^#\s*NoExtract' 14 | become: true 15 | when: not snapper_dir.stat.exists 16 | 17 | - name: pacman -S snapper snap-pac rsync 18 | community.general.pacman: 19 | name: 20 | - snapper 21 | - snap-pac 22 | - rsync 23 | state: present 24 | become: true 25 | 26 | - name: Create snapper for / 27 | ansible.builtin.shell: | 28 | umount /.snapshots 29 | rm -r /.snapshots 30 | snapper -c root create-config / 31 | btrfs subvolume delete /.snapshots 32 | mkdir /.snapshots 33 | mount -a 34 | chmod -R 750 /.snapshots 35 | args: 36 | executable: /usr/bin/bash 37 | become: true 38 | when: not snapper_dir.stat.exists 39 | 40 | - name: Edit /etc/snapper/configs/root 41 | ansible.builtin.lineinfile: 42 | path: /etc/snapper/configs/root 43 | regexp: "{{ item.regexp }}" 44 | line: "{{ item.line }}" 45 | loop: 46 | - { regexp: '^TIMELINE_CREATE=', line: 'TIMELINE_CREATE="yes"' } 47 | - { regexp: '^TIMELINE_CLEANUP=', line: 'TIMELINE_CLEANUP="yes"' } 48 | - { regexp: '^NUMBER_MIN_AGE=', line: 'NUMBER_MIN_AGE="1800"' } 49 | - { regexp: '^NUMBER_LIMIT=', line: 'NUMBER_LIMIT="10"' } 50 | - { regexp: '^NUMBER_LIMIT_IMPORTANT=', line: 'NUMBER_LIMIT_IMPORTANT="10"' } 51 | - { regexp: '^TIMELINE_MIN_AGE=', line: 'TIMELINE_MIN_AGE="1800"' } 52 | - { regexp: '^TIMELINE_LIMIT_HOURLY=', line: "TIMELINE_LIMIT_HOURLY=\"{{ snapper_root_hourly }}\"" } 53 | - { regexp: '^TIMELINE_LIMIT_DAILY=', line: "TIMELINE_LIMIT_DAILY=\"{{ snapper_root_daily }}\"" } 54 | - { regexp: '^TIMELINE_LIMIT_WEEKLY=', line: "TIMELINE_LIMIT_WEEKLY=\"{{ snapper_root_weekly }}\"" } 55 | - { regexp: '^TIMELINE_LIMIT_MONTHLY=', line: "TIMELINE_LIMIT_MONTHLY=\"{{ snapper_root_monthly }}\"" } 56 | - { regexp: '^TIMELINE_LIMIT_YEARLY=', line: "TIMELINE_LIMIT_YEARLY=\"{{ snapper_root_yearly }}\"" } 57 | become: true 58 | 59 | - name: systemctl enable snapper-timeline.timer 60 | ansible.builtin.systemd: name=snapper-timeline.timer enabled=true 61 | become: true 62 | 63 | - name: systemctl enable snapper-cleanup.timer 64 | ansible.builtin.systemd: name=snapper-cleanup.timer enabled=true 65 | become: true 66 | 67 | - name: Create /etc/pacman.d/hooks/ if it does not exist 68 | ansible.builtin.file: 69 | path: /etc/pacman.d/hooks 70 | state: directory 71 | owner: root 72 | group: root 73 | mode: '0755' 74 | become: true 75 | 76 | - name: zz-signed_uki_backup.hook 77 | ansible.builtin.copy: 78 | src: zz-signed_uki_backup.hook 79 | dest: /etc/pacman.d/hooks/zz-signed_uki_backup.hook 80 | owner: root 81 | group: root 82 | mode: '0644' 83 | become: true 84 | 85 | -------------------------------------------------------------------------------- /roles/podman/tasks/letsencrypt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status letsencrypt.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=letsencrypt.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - letsencrypt 30 | # 31 | # # Path to store letsencrypt container config 32 | # letsencrypt_config_dir: "/path/to/container/config/letsencrypt" 33 | # # Also create `/path/to/container/config/letsencrypt/cloudlfare.ini` that 34 | # # contains single line: 35 | # # dns_cloudflare_api_token = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 36 | # 37 | # # Email address for letsencrypt expiration notification 38 | # letsencrypt_email: "email@domain.com" 39 | # 40 | # # Domains contained in letsencrypt certification 41 | # letsencrypt_domains: 42 | # - '*.mydomain.example' 43 | 44 | - name: letsencrypt container config direcoty 45 | ansible.builtin.file: 46 | path: "{{ podman_user.letsencrypt_config_dir }}" 47 | state: directory 48 | owner: "{{ podman_user.name}}" 49 | group: "{{ podman_user.name }}" 50 | mode: '0700' 51 | become: true 52 | 53 | - name: letsencrypt.container 54 | ansible.builtin.template: 55 | src: letsencrypt.container.j2 56 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/letsencrypt.container" 57 | owner: "{{ podman_user.name}}" 58 | group: "{{ podman_user.name }}" 59 | mode: '0600' 60 | become: true 61 | become_user: "{{ podman_user.name }}" 62 | 63 | - name: letsencrypt.timer 64 | ansible.builtin.copy: 65 | content: | 66 | [Unit] 67 | Description=letsencrypt certbot 68 | 69 | [Timer] 70 | OnCalendar=daily 71 | 72 | [Install] 73 | WantedBy=timers.target 74 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/letsencrypt.timer" 75 | owner: "{{ podman_user.name}}" 76 | group: "{{ podman_user.name }}" 77 | mode: '0600' 78 | become: true 79 | become_user: "{{ podman_user.name }}" 80 | 81 | - name: systemctl --user daemon-reload 82 | ansible.builtin.systemd: daemon_reload=true scope=user 83 | become: true 84 | become_user: "{{ podman_user.name }}" 85 | 86 | - name: systemctl --user enable letsencrypt.timer 87 | ansible.builtin.systemd: name=letsencrypt.timer enabled=true scope=user 88 | become: true 89 | become_user: "{{ podman_user.name }}" 90 | ignore_errors: "{{ ansible_check_mode }}" 91 | 92 | -------------------------------------------------------------------------------- /roles/podman/tasks/gluetun.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status gluetun.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=gluetun.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - gluetun 30 | # 31 | # # gluetun VPN provider env variables, here is a vanilla wireguard example. 32 | # # see https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers 33 | # # and https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/port-forwarding.md 34 | # gluetun_vpn_provider_env: 35 | # - VPN_SERVICE_PROVIDER=custom 36 | # - VPN_TYPE=wireguard 37 | # - VPN_ENDPOINT_IP='1.2.3.4' 38 | # - VPN_ENDPOINT_PORT=51820 39 | # - WIREGUARD_PUBLIC_KEY='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' 40 | # - WIREGUARD_PRIVATE_KEY='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' 41 | # - WIREGUARD_ADDRESSES='ipv4/mask,ipv6/mask' 42 | # - VPN_PORT_FORWARDING_LISTENING_PORT='1234' 43 | # 44 | # # Optionally, enable gluetun http proxy with default HTTPPROXY_LISTENING_ADDRESS (8888) 45 | # # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md 46 | # #gluetun_httpproxy: true 47 | 48 | - name: setsebool -P container_use_devices=true 49 | ansible.posix.seboolean: 50 | name: container_use_devices 51 | state: true 52 | persistent: true 53 | when: ansible_facts["selinux"]["status"] == "enabled" 54 | become: true 55 | 56 | - name: setsebool -P domain_kernel_load_modules=true 57 | ansible.posix.seboolean: 58 | name: domain_kernel_load_modules 59 | state: true 60 | persistent: true 61 | when: ansible_facts["selinux"]["status"] == "enabled" 62 | become: true 63 | 64 | - name: gluetun.container 65 | ansible.builtin.template: 66 | src: gluetun.container.j2 67 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/gluetun.container" 68 | owner: "{{ podman_user.name }}" 69 | group: "{{ podman_user.name }}" 70 | mode: '0600' 71 | become: true 72 | become_user: "{{ podman_user.name }}" 73 | 74 | - name: systemctl --user daemon-reload 75 | ansible.builtin.systemd: daemon_reload=true scope=user 76 | become: true 77 | become_user: "{{ podman_user.name }}" 78 | 79 | - name: systemctl --user enable --now gluetun.service 80 | ansible.builtin.systemd: name=gluetun enabled=true state=started scope=user 81 | become: true 82 | become_user: "{{ podman_user.name }}" 83 | ignore_errors: "{{ ansible_check_mode }}" 84 | -------------------------------------------------------------------------------- /roles/gui/tasks/Archlinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get file system type of ~/ 3 | ansible.builtin.command: "stat --file-system --format=%T {{ ansible_facts.user_dir }}" 4 | register: home_fstype 5 | changed_when: false 6 | check_mode: false 7 | 8 | - include_tasks: snapper.yml 9 | when: home_fstype.stdout == 'btrfs' 10 | 11 | - name: Install GPU driver 12 | community.general.pacman: name={{ gpu_drivers }} state=present 13 | become: true 14 | 15 | - name: Install shell packages 16 | community.general.pacman: name={{ shell_pkgs }} state=present 17 | become: true 18 | when: shell_pkgs is defined 19 | 20 | - name: change shell (non systemd-homed) 21 | ansible.builtin.user: 22 | name: "{{ ansible_facts.user_id }}" 23 | shell: "{{ default_shell }}" 24 | become: true 25 | when: not homed 26 | 27 | - name: change shell (systemd-homed) 28 | community.general.homectl: 29 | name: "{{ ansible_facts.user_id }}" 30 | password: "{{ ansible_become_password }}" 31 | shell: "{{ default_shell }}" 32 | become: true 33 | when: homed 34 | 35 | - name: Install audio packages 36 | community.general.pacman: name={{ audio_pkgs }} state=present 37 | become: true 38 | when: audio_pkgs is defined 39 | 40 | - name: Install font packages 41 | community.general.pacman: name={{ fonts_pkgs }} state=present 42 | become: true 43 | when: fonts_pkgs is defined 44 | 45 | - name: Install WM packages 46 | community.general.pacman: name={{ wm_pkgs }} state=present 47 | become: true 48 | 49 | - name: systemctl enable sddm.service 50 | ansible.builtin.systemd: name=sddm.service enabled=true 51 | become: true 52 | when: '"sddm" in wm_pkgs' 53 | 54 | - name: systemctl enable gdm.service 55 | ansible.builtin.systemd: name=gdm.service enabled=true 56 | become: true 57 | when: '"gdm" in wm_pkgs' 58 | 59 | - name: Install other packages 60 | community.general.pacman: name={{ other_pkgs }} state=present 61 | become: true 62 | when: other_pkgs is defined 63 | 64 | - name: systemctl enable cups.socket 65 | ansible.builtin.systemd: name=cups.socket enabled=true 66 | become: true 67 | when: '"cups" in other_pkgs' 68 | 69 | - include_tasks: dotfiles.yml 70 | when: dotfiles_repo is defined 71 | 72 | - include_tasks: flatpak.yml 73 | when: flatpak_pkgs is defined 74 | 75 | - name: systemctl enable --now bluetooth.service 76 | ansible.builtin.systemd: name=bluetooth.service enabled=true state=started 77 | become: true 78 | when: '"bluez" in other_pkgs or "bluez-utils" in other_pkgs' 79 | 80 | - name: Create paru clone directory 81 | ansible.builtin.file: 82 | path: "{{ ansible_facts.user_dir }}/.cache/paru/clone" 83 | state: directory 84 | 85 | #- include_tasks: paru.yml 86 | # when: paru_chroot 87 | 88 | - name: Create screenshots directories 89 | ansible.builtin.file: 90 | path: "{{ ansible_facts.user_dir }}/screenshots" 91 | state: directory 92 | when: '"grim" in wm_pkgs' 93 | 94 | - name: Create Downloads directory 95 | ansible.builtin.file: 96 | path: "{{ ansible_facts.user_dir }}/Downloads" 97 | state: directory 98 | owner: "{{ ansible_facts.user_id }}" 99 | group: "{{ ansible_facts.user_id }}" 100 | #mode: '0700' 101 | 102 | -------------------------------------------------------------------------------- /roles/podman/tasks/paperless.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status paperless-ngx.service 10 | # # sudo systemctl --user -M tux@ status paperless-ngx-redis.service 11 | # # sudo systemctl --user -M tux@ status paperless-ngx-pod.service 12 | # # To view journal under different user with UID 10000 13 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=paperless-ngx.service 14 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=paperless-ngx-redis.service 15 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=paperless-ngx-pod.service 16 | # podman_users: 17 | # 18 | # # Run Syncthing under user `tux` 19 | # - name: tux 20 | # 21 | # # UID of the user 22 | # uid: 10000 23 | # 24 | # # Enable lingering or not 25 | # enable_lingering: true 26 | # 27 | # # How often to clean up old podman images/containers. 28 | # # This is the OnCalendar= option in podman-system-prune.timer 29 | # podman_system_prune_timer: daily 30 | # 31 | # # List of containers that will run under user `tux` 32 | # containers: 33 | # - paperless 34 | # 35 | # # paperless Web UI port 36 | # paperless_webui_port: 8000 37 | # 38 | # # Path to store paperless container config 39 | # paperless_config_dir: "/path/to/container/config/paperless" 40 | # 41 | # # Set PAPERLESS_URL when using with reverse proxy 42 | # paperless_url: https://paperless.domain.com 43 | 44 | - name: paperless config direcoty 45 | ansible.builtin.file: 46 | path: "{{ podman_user.paperless_config_dir }}/{{ item }}" 47 | state: directory 48 | owner: "{{ podman_user.name}}" 49 | group: "{{ podman_user.name }}" 50 | mode: '0700' 51 | become: true 52 | loop: 53 | - paperless 54 | - redis 55 | 56 | - name: paperless and redis containers 57 | ansible.builtin.template: 58 | src: "{{ item }}.j2" 59 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/{{ item }}" 60 | owner: "{{ podman_user.name}}" 61 | group: "{{ podman_user.name }}" 62 | mode: '0600' 63 | become: true 64 | become_user: "{{ podman_user.name }}" 65 | loop: 66 | - paperless-ngx.pod 67 | - paperless-ngx.container 68 | - paperless-ngx-redis.container 69 | 70 | - name: systemctl --user daemon-reload 71 | ansible.builtin.systemd: daemon_reload=true scope=user 72 | become: true 73 | become_user: "{{ podman_user.name }}" 74 | 75 | - name: systemctl --user enable --now paperless-ngx-pod.service 76 | ansible.builtin.systemd: name=paperless-ngx-pod.service state=started enabled=true scope=user 77 | become: true 78 | become_user: "{{ podman_user.name }}" 79 | ignore_errors: "{{ ansible_check_mode }}" 80 | 81 | - name: systemctl --user enable --now paperless-ngx-redis.service 82 | ansible.builtin.systemd: name=paperless-ngx-redis.service state=started enabled=true scope=user 83 | become: true 84 | become_user: "{{ podman_user.name }}" 85 | ignore_errors: "{{ ansible_check_mode }}" 86 | 87 | - name: systemctl --user enable --now paperless-ngx.service 88 | ansible.builtin.systemd: name=paperless-ngx.service state=started enabled=true scope=user 89 | become: true 90 | become_user: "{{ podman_user.name }}" 91 | ignore_errors: "{{ ansible_check_mode }}" 92 | 93 | -------------------------------------------------------------------------------- /roles/nas/tasks/nfs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S nfs-utils 3 | community.general.pacman: name=nfs-utils state=present 4 | become: true 5 | when: ansible_facts["distribution"] == "Archlinux" 6 | 7 | - name: dnf install nfs-utils 8 | ansible.builtin.dnf: name=nfs-utils state=present 9 | become: true 10 | when: ansible_facts["distribution"] == "Fedora" 11 | 12 | - name: Create NFS directories 13 | ansible.builtin.file: 14 | path: "{{ item.bind }}" 15 | state: directory 16 | loop: "{{ nfs_mount_point }}" 17 | become: true 18 | when: item.bind is defined 19 | 20 | - name: Add bind mount to /etc/fstab 21 | ansible.builtin.lineinfile: 22 | path: /etc/fstab 23 | regexp: "^{{ item.target }}\\s+{{item.bind }}" 24 | line: "{{ item.target }} {{item.bind }} none bind 0 0" 25 | state: present 26 | loop: "{{ nfs_mount_point }}" 27 | become: true 28 | when: item.bind is defined 29 | 30 | - name: add root mount to /etc/exports 31 | ansible.builtin.lineinfile: 32 | path: /etc/exports 33 | regexp: "^{{ nfs_root }}\\s+" 34 | line: "{{ nfs_root }} {{ nfs_root_ip_opt }}" 35 | state: present 36 | become: true 37 | 38 | - name: add other mount points to /etc/exports (bind mount) 39 | ansible.builtin.lineinfile: 40 | path: /etc/exports 41 | regexp: "^{{ item.bind }}\\s+" 42 | line: "{{ item.bind }} {{ item.ip_opt }}" 43 | state: present 44 | loop: "{{ nfs_mount_point }}" 45 | become: true 46 | when: item.bind is defined 47 | 48 | - name: add other mount points to /etc/exports (no bind mount) 49 | ansible.builtin.lineinfile: 50 | path: /etc/exports 51 | regexp: "^{{ item.target }}\\s+" 52 | line: "{{ item.target }} {{ item.ip_opt }}" 53 | state: present 54 | loop: "{{ nfs_mount_point }}" 55 | become: true 56 | when: item.bind is not defined 57 | 58 | #- name: Add custom NFS rule to UFW 59 | # ansible.builtin.blockinfile: 60 | # path: /etc/ufw/applications.d/ufw-custom 61 | # block: | 62 | # [NFS-custom] 63 | # title=NFS server 64 | # description=NFS server 65 | # ports=2049/tcp 66 | # create: true 67 | # marker: "; NFS {mark} ANSIBLE MANAGED BLOCK" 68 | # 69 | #- name: Configure firewall for NFS 70 | # community.general.ufw: 71 | # rule: allow 72 | # direction: in 73 | # name: NFS-custom 74 | # from: "{{ item }}" 75 | # comment: "Allow NFS from {{ item }}" 76 | # loop: "{{ nfs_allow_ip }}" 77 | # 78 | - name: Set firewall rules for NFS 79 | ansible.posix.firewalld: 80 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="nfs" accept 81 | #zone: "{{ firewalld_default_zone }}" 82 | permanent: true 83 | immediate: true 84 | state: enabled 85 | loop: "{{ nfs_accept_source_ipv4 }}" 86 | when: nfs_accept_source_ipv4 is defined 87 | become: true 88 | 89 | - name: (ArchLinux) systemctl enable nfsv4-server.service 90 | ansible.builtin.systemd: name=nfsv4-server.service enabled=true 91 | become: true 92 | when: ansible_facts["distribution"] == "Archlinux" 93 | 94 | - name: (ArchLinux) systemctl mask rpcbind.service rpcbind.socket nfs-server.service 95 | ansible.builtin.systemd: name={{ item }} enabled=false masked=true 96 | become: true 97 | loop: 98 | - rpcbind.service 99 | - rpcbind.socket 100 | - nfs-server.service 101 | when: ansible_facts["distribution"] == "Archlinux" 102 | 103 | - name: (Fedora) systemctl enable nfs-server.service 104 | ansible.builtin.systemd: name=nfs-server.service enabled=true 105 | become: true 106 | when: ansible_facts["distribution"] == "Fedora" 107 | 108 | -------------------------------------------------------------------------------- /roles/podman/tasks/syncthing.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status syncthing.service 10 | # # To view journal under different user with UID 10000 11 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=syncthing.service 12 | # podman_users: 13 | # 14 | # # Run Syncthing under user `tux` 15 | # - name: tux 16 | # 17 | # # UID of the user 18 | # uid: 10000 19 | # 20 | # # Enable lingering or not 21 | # enable_lingering: true 22 | # 23 | # # How often to clean up old podman images/containers. 24 | # # This is the OnCalendar= option in podman-system-prune.timer 25 | # podman_system_prune_timer: daily 26 | # 27 | # # List of containers that will run under user `tux` 28 | # containers: 29 | # - syncthing 30 | # 31 | # # syncthing Web UI port 32 | # syncthing_webui_port: 8384 33 | # 34 | # # Path to store syncthing container config 35 | # syncthing_config_dir: "/path/to/container/config/syncthing" 36 | # 37 | # # List of directories to map into syncthing container 38 | # syncthing_data_dirs: 39 | # - { src: /path/on/host/machine, dest: /path/in/container } 40 | # - { src: /another/path/on/host, dest: /another/path/in/container } 41 | 42 | - name: syncthing container config direcoty 43 | ansible.builtin.file: 44 | path: "{{ podman_user.syncthing_config_dir }}" 45 | state: directory 46 | owner: "{{ podman_user.name}}" 47 | group: "{{ podman_user.name }}" 48 | mode: '0700' 49 | become: true 50 | 51 | - name: syncthing.container 52 | ansible.builtin.template: 53 | src: syncthing.container.j2 54 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/syncthing.container" 55 | owner: "{{ podman_user.name}}" 56 | group: "{{ podman_user.name }}" 57 | mode: '0600' 58 | become: true 59 | become_user: "{{ podman_user.name }}" 60 | 61 | - name: systemctl --user daemon-reload 62 | ansible.builtin.systemd: daemon_reload=true scope=user 63 | become: true 64 | become_user: "{{ podman_user.name }}" 65 | 66 | - name: systemctl --user enable --now syncthing.service 67 | ansible.builtin.systemd: name=syncthing enabled=true state=started scope=user 68 | become: true 69 | become_user: "{{ podman_user.name }}" 70 | ignore_errors: "{{ ansible_check_mode }}" 71 | 72 | 73 | # 22000/tcp Syncthing Listening port 74 | - name: add syncthing firewalld service file syncthing.xml 75 | ansible.builtin.copy: 76 | content: | 77 | 78 | 79 | Syncthing 80 | Syncthing 81 | 82 | 83 | dest: /etc/firewalld/services/syncthing.xml 84 | owner: root 85 | group: root 86 | mode: '0644' 87 | become: true 88 | register: syncthing_firewalld_file 89 | 90 | - name: Reload firewalld when syncthing.xml changed 91 | ansible.builtin.command: firewall-cmd --reload 92 | become: true 93 | when: syncthing_firewalld_file.changed 94 | 95 | - name: Set firewall rules for Syncthing Listening port (TCP) 96 | ansible.posix.firewalld: 97 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="syncthing" accept 98 | #zone: "{{ firewalld_default_zone }}" 99 | permanent: true 100 | immediate: true 101 | state: enabled 102 | loop: "{{ podman_user.syncthing_accept_source_ipv4 }}" 103 | become: true 104 | when: podman_user.syncthing_accept_source_ipv4 is defined 105 | -------------------------------------------------------------------------------- /roles/archlinux_common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add time servers 3 | ansible.builtin.lineinfile: 4 | path: /etc/systemd/timesyncd.conf 5 | regexp: "{{ item.regexp }}" 6 | line: "{{ item.line }}" 7 | insertafter: "{{ item.insertafter }}" 8 | loop: 9 | - { regexp: '^NTP=', insertafter: '^#\s*NTP=', line: NTP=0.arch.pool.ntp.org 1.arch.pool.ntp.org 2.arch.pool.ntp.org 3.arch.pool.ntp.org } 10 | - { regexp: '^FallbackNTP=', insertafter: '^#\s*FallbackNTP=', line: FallbackNTP=0.pool.ntp.org 1.pool.ntp.org 0.fr.pool.ntp.org } 11 | become: true 12 | 13 | - name: systemctl enable --now systemd-timesyncd.service 14 | ansible.builtin.systemd: name=systemd-timesyncd state=started enabled=true 15 | become: true 16 | 17 | - name: Enable pacman parallel downloads 18 | ansible.builtin.lineinfile: 19 | path: /etc/pacman.conf 20 | regexp: '^ParallelDownloads =' 21 | line: ParallelDownloads = 5 22 | insertafter: '^#\s*ParallelDownloads = 5' 23 | become: true 24 | 25 | - name: pacman -S reflector 26 | community.general.pacman: name=reflector state=present 27 | become: true 28 | when: ansible_facts["architecture"] == 'x86_64' 29 | 30 | - name: Configure reflector 31 | ansible.builtin.lineinfile: 32 | path: /etc/xdg/reflector/reflector.conf 33 | regexp: '^--country' 34 | line: "--country {{ reflector_country }}" 35 | insertafter: '^#\s*--country' 36 | become: true 37 | when: ansible_facts["architecture"] == 'x86_64' 38 | ignore_errors: "{{ ansible_check_mode }}" 39 | 40 | - name: systemctl enable reflector.service 41 | ansible.builtin.systemd: name=reflector enabled=true 42 | become: true 43 | when: ansible_facts["architecture"] == 'x86_64' 44 | ignore_errors: "{{ ansible_check_mode }}" 45 | 46 | - name: systemctl enable --now reflector.timer 47 | ansible.builtin.systemd: name=reflector.timer enabled=true state=started 48 | become: true 49 | when: ansible_facts["architecture"] == 'x86_64' 50 | ignore_errors: "{{ ansible_check_mode }}" 51 | 52 | - name: pacman -S pacman-contrib for paccache 53 | community.general.pacman: name=pacman-contrib state=present 54 | become: true 55 | 56 | - name: systemctl enable --now paccache.timer 57 | ansible.builtin.systemd: name=paccache.timer enabled=true state=started 58 | become: true 59 | ignore_errors: "{{ ansible_check_mode }}" 60 | 61 | - name: systemctl enable --now fstrim.timer 62 | ansible.builtin.systemd: name=fstrim.timer enabled=true state=started 63 | become: true 64 | 65 | - name: systemctl enable --now restorecond.service (SELinux) 66 | ansible.builtin.systemd: name=restorecond.service enabled=true state=started 67 | become: true 68 | when: ansible_facts["selinux"]["status"] == "enabled" 69 | 70 | - name: Optimize AUR building CFLAGS 71 | ansible.builtin.lineinfile: 72 | path: /etc/makepkg.conf 73 | regexp: "{{ item.regexp }}" 74 | line: "{{ item.line }}" 75 | backrefs: "{{ item.backrefs }}" 76 | insertafter: "{{ item.insertafter }}" 77 | loop: 78 | - { regexp: '^(CFLAGS=.*-march=).*( -pipe.*)', line: '\1native -O3\2', backrefs: true, insertafter: '' } 79 | - { regexp: '^MAKEFLAGS=', line: 'MAKEFLAGS="-j$(nproc)"', backrefs: false, insertafter: '^#MAKEFLAGS=' } 80 | become: true 81 | 82 | - name: Optimize AUR building RUSTFLAGS 83 | ansible.builtin.lineinfile: 84 | path: /etc/makepkg.conf.d/rust.conf 85 | regexp: '^(RUSTFLAGS=").*(-Cforce-frame-pointers=yes.*)' 86 | line: '\1-C opt-level=3 -C target-cpu=native \2' 87 | backrefs: true 88 | insertafter: '^#RUSTFLAGS=' 89 | become: true 90 | 91 | - name: Get file system type of / 92 | ansible.builtin.command: stat --file-system --format=%T / 93 | become: true 94 | register: root_fstype 95 | changed_when: false 96 | check_mode: false 97 | 98 | - include_tasks: snapper.yml 99 | when: root_fstype.stdout == 'btrfs' 100 | 101 | -------------------------------------------------------------------------------- /roles/podman/tasks/podman_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create separate podman user 3 | # disable the podman account ( don't allow login ) 4 | # shell: "/usr/bin/nologin" 5 | ansible.builtin.user: 6 | name: "{{ podman_user.name }}" 7 | uid: "{{ podman_user.uid }}" 8 | password: '!' 9 | password_lock: true 10 | create_home: true 11 | become: true 12 | when: 13 | - podman_user.name != ansible_facts["user_id"] 14 | - podman_user.name != "root" 15 | 16 | - name: Check lingering 17 | stat: 18 | path: "/var/lib/systemd/linger/{{ podman_user.name }}" 19 | register: podman_user_lingering 20 | become: true 21 | when: 22 | - podman_user.enable_lingering 23 | - podman_user.name != "root" 24 | 25 | - name: Enable lingering 26 | command: "loginctl enable-linger {{ podman_user.name }}" 27 | become: true 28 | when: 29 | - podman_user.enable_lingering 30 | - podman_user.name != "root" 31 | - not podman_user_lingering.stat.exists 32 | 33 | - name: Set /etc/subuid if using systemd-homed 34 | ansible.builtin.lineinfile: 35 | line: "{{ podman_user.name }}:{{ (podman_user.uid - 1000) * 65536 + 524288 }}:65536" 36 | regexp: "^{{ podman_user.name }}:" 37 | dest: /etc/subuid 38 | create: true 39 | owner: root 40 | group: root 41 | mode: '0644' 42 | become: true 43 | when: 44 | - ansible_facts["distribution"] == "Archlinux" 45 | - homed 46 | 47 | - name: Set /etc/subgid if using systemd-homed 48 | ansible.builtin.lineinfile: 49 | line: "{{ podman_user.name }}:{{ (podman_user.uid - 1000) * 65536 + 524288 }}:65536" 50 | regexp: "^{{ podman_user.name }}:" 51 | dest: /etc/subgid 52 | create: true 53 | owner: root 54 | group: root 55 | mode: '0644' 56 | become: true 57 | when: 58 | - ansible_facts["distribution"] == "Archlinux" 59 | - homed 60 | 61 | #- name: setup container configs directory 62 | # ansible.builtin.file: 63 | # path: "{{ podman_user.container_configs_dir }}" 64 | # state: directory 65 | # owner: "{{ podman_user.name }}" 66 | # group: "{{ podman_user.name }}" 67 | # mode: '0700' 68 | # become: true 69 | 70 | - name: setup systemd user directory 71 | ansible.builtin.file: 72 | path: "/home/{{ podman_user.name }}/.config/systemd/user" 73 | state: directory 74 | owner: "{{ podman_user.name }}" 75 | group: "{{ podman_user.name }}" 76 | mode: '0700' 77 | become: true 78 | become_user: "{{ podman_user.name }}" 79 | 80 | - name: Create podman user unit directory 81 | ansible.builtin.file: 82 | path: "/home/{{ podman_user.name }}/.config/containers/systemd" 83 | state: directory 84 | owner: "{{ podman_user.name }}" 85 | group: "{{ podman_user.name }}" 86 | mode: '0700' 87 | become: true 88 | become_user: "{{ podman_user.name }}" 89 | 90 | - name: create podman-system-prune.service/timer 91 | ansible.builtin.template: 92 | src: "{{ item }}.j2" 93 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/{{ item }}" 94 | owner: "{{ podman_user.name }}" 95 | group: "{{ podman_user.name }}" 96 | mode: '0600' 97 | loop: 98 | - podman-system-prune.service 99 | - podman-system-prune.timer 100 | become: true 101 | become_user: "{{ podman_user.name }}" 102 | 103 | - name: systemctl --user enable podman-system-prune.timer 104 | ansible.builtin.systemd: name=podman-system-prune.timer enabled=true scope=user 105 | become: true 106 | become_user: "{{ podman_user.name }}" 107 | ignore_errors: "{{ ansible_check_mode }}" 108 | 109 | - name: systemctl --user enable podman-auto-update.timer 110 | ansible.builtin.systemd: name=podman-auto-update.timer enabled=true scope=user 111 | become: true 112 | become_user: "{{ podman_user.name }}" 113 | ignore_errors: "{{ ansible_check_mode }}" 114 | 115 | - include_tasks: "{{ container }}.yml" 116 | loop: "{{ podman_user.containers }}" 117 | loop_control: 118 | loop_var: container 119 | -------------------------------------------------------------------------------- /roles/gui/README.md: -------------------------------------------------------------------------------- 1 | Install desktop environment or window manager, restore dotfiles, install Flatpak etc. 2 | 3 | ## Tasks 4 | ### Arch Linux 5 | - Set up snapper for home directory if using btrfs. 6 | - Install GPU driver, audio packages. 7 | - Install and configure default shell. 8 | - Install desktop environment or window manager and fonts. 9 | - Install other packages. 10 | - Set up printer or bluetooth if `cups` or `bluez` package is installed. 11 | - Set up Flatpak. 12 | - Restore dotfiles. 13 | 14 | ### Fedora 15 | - Install and configure default shell. 16 | - Install desktop environment or window manager. 17 | - Install other packages. 18 | - Set up Flatpak. 19 | - Restore dotfiles. 20 | 21 | 22 | ## Variables 23 | ### Arch Linux 24 | ```yaml 25 | gpu_drivers: 26 | - mesa 27 | - vulkan-radeon 28 | - libva-mesa-driver 29 | 30 | 31 | shell_pkgs: 32 | - zsh 33 | - zsh-completions 34 | - zsh-syntax-highlighting 35 | - zsh-autosuggestions 36 | - grml-zsh-config 37 | 38 | default_shell: /usr/bin/zsh 39 | 40 | 41 | # Packages for your Desktop environment or window manager. 42 | wm_pkgs: 43 | 44 | # Example: sway 45 | - sway 46 | - swaylock 47 | - swayidle 48 | - waybar 49 | - xdg-utils 50 | - xdg-desktop-portal 51 | - xorg-xwayland 52 | - wl-clipboard 53 | - xdg-desktop-portal-wlr 54 | # Firefox file chooser needs xdg-desktop-portal-gtk 55 | - xdg-desktop-portal-gtk 56 | # python-i3ipc needed for https://github.com/Bai-Chiang/dotfiles/blob/main/.config/sway/inactive-windows-transparency.py 57 | - python-i3ipc 58 | # grim and slurp for screenshot 59 | - grim 60 | - slurp 61 | # notification daemon 62 | - mako 63 | # app launcher 64 | - fuzzel 65 | 66 | # Example: hyprland 67 | - hyprland 68 | - xdg-desktop-portal-hyprland 69 | - xdg-desktop-portal-gtk 70 | - waybar 71 | - xdg-utils 72 | - xorg-xwayland 73 | - wl-clipboard 74 | - swaylock 75 | - swayidle 76 | - fuzzel 77 | # grim and slurp for screenshot 78 | - grim 79 | - slurp 80 | 81 | # Example: KDE 82 | - plasma-meta 83 | - plasma-wayland-session 84 | - sddm 85 | - phonon-qt5-vlc 86 | 87 | # Example: gnome 88 | - gnome-shell 89 | - xdg-desktop-portal-gnome 90 | - gnome-control-center 91 | - gdm 92 | - nautilus 93 | - gvfs 94 | - gvfs-nfs 95 | - gvfs-smb 96 | - gnome-tweaks 97 | - gnome-backgrounds 98 | 99 | 100 | dotfiles_repo: 101 | # https link to your dotfiles repo. This should be a public repo. 102 | https: 'https://github.com/username/dotfiles.git' 103 | 104 | # ssh link to your dotfiles repo. The playbook will replace the https link with ssh link after clone all dotfiles. 105 | # To push updates to GitHub, you need to create an ssh key then add the key to your GitHub account. 106 | ssh: 'git@github.com:username/dotfiles.git' 107 | 108 | 109 | audio_pkgs: 110 | - pipewire 111 | - pipewire-audio 112 | - pipewire-alsa 113 | - pipewire-pulse 114 | - pipewire-jack 115 | - wireplumber 116 | 117 | 118 | fonts_pkgs: 119 | - ttf-dejavu 120 | - noto-fonts-cjk 121 | - ttf-font-awesome 122 | - noto-fonts-emoji 123 | 124 | 125 | other_pkgs: 126 | - htop 127 | - neofetch 128 | 129 | # bluetooth 130 | - bluez 131 | - bluez-utils 132 | 133 | # printer 134 | - cups 135 | 136 | 137 | flatpak_pkgs: 138 | - com.github.tchx84.Flatseal 139 | - io.gitlab.librewolf-community 140 | - org.mozilla.firefox 141 | 142 | - com.valvesoftware.Steam 143 | - com.valvesoftware.Steam.CompatibilityTool.Proton-GE 144 | ``` 145 | 146 | ### Fedora 147 | ```yaml 148 | shell_pkgs: 149 | - zsh 150 | default_shell: /usr/bin/zsh 151 | 152 | 153 | wm_pkgs: 154 | # KDE 155 | - @kde-desktop-environment 156 | 157 | # Sway 158 | - @sway-desktop-environment 159 | 160 | 161 | other_pkgs: 162 | - htop 163 | 164 | 165 | dotfiles_repo: 166 | # https link to your dotfiles repo. This should be a public repo. 167 | https: 'https://github.com/username/dotfiles.git' 168 | 169 | # ssh link to your dotfiles repo. The playbook will replace the https link with ssh link after clone all dotfiles. 170 | # To push updates to GitHub, you need to create an ssh key then add the key to your GitHub account. 171 | ssh: 'git@github.com:username/dotfiles.git' 172 | ``` 173 | 174 | -------------------------------------------------------------------------------- /roles/podman/tasks/grafana_prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status grafana.service 10 | # # sudo systemctl --user -M tux@ status prometheus-server.service 11 | # # sudo systemctl --user -M tux@ status prometheus-node-exporter.service 12 | # # sudo systemctl --user -M tux@ status grafana-prometheus-pod.service 13 | # # To view journal under different user with UID 10000 14 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=grafana.service 15 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=prometheus-server.service 16 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=prometheus-node-exporter.service 17 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=grafana-prometheus-pod.service 18 | # podman_users: 19 | # 20 | # # Run Syncthing under user `tux` 21 | # - name: tux 22 | # 23 | # # UID of the user 24 | # uid: 10000 25 | # 26 | # # Enable lingering or not 27 | # enable_lingering: true 28 | # 29 | # # How often to clean up old podman images/containers. 30 | # # This is the OnCalendar= option in podman-system-prune.timer 31 | # podman_system_prune_timer: daily 32 | # 33 | # # List of containers that will run under user `tux` 34 | # containers: 35 | # - grafana_prometheus 36 | # 37 | # # Grafana Web UI port 38 | # grafana_webui_port: 3000 39 | # 40 | # # Path to store Grafana and Prometheus container config 41 | # grafana_prometheus_config_dir: "/path/to/container/config/grafana_prometheus" 42 | # 43 | # # Prometheus.yml file 44 | # # see example in roles/podman/README.md 45 | # prometheus_yml: "files/prometheus.yml" 46 | # 47 | # # If only monitoring CPU and Mem usage set prometheus_host_mode to false 48 | # # If need to query more host info like network traffic, system processes etc, 49 | # # set prometheus_host_mode to true. This will run prometheus-node-exporter container 50 | # # with more privileges. 51 | # prometheus_host_mode: false 52 | 53 | - name: grafana and prometheus config direcoty 54 | ansible.builtin.file: 55 | path: "{{ podman_user.grafana_prometheus_config_dir }}/{{ item }}" 56 | state: directory 57 | owner: "{{ podman_user.name}}" 58 | group: "{{ podman_user.name }}" 59 | mode: '0700' 60 | become: true 61 | loop: 62 | - grafana 63 | - prometheus 64 | 65 | - name: Copy prometheus.yml file 66 | ansible.builtin.copy: 67 | src: "{{ podman_user.prometheus_yml }}" 68 | dest: "{{ podman_user.grafana_prometheus_config_dir }}/prometheus.yml" 69 | owner: "{{ podman_user.name}}" 70 | group: "{{ podman_user.name }}" 71 | mode: '0600' 72 | become: true 73 | 74 | - name: grafana prometheus containers 75 | ansible.builtin.template: 76 | src: "{{ item }}.j2" 77 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/{{ item }}" 78 | owner: "{{ podman_user.name}}" 79 | group: "{{ podman_user.name }}" 80 | mode: '0600' 81 | become: true 82 | become_user: "{{ podman_user.name }}" 83 | loop: 84 | - grafana-prometheus.pod 85 | - prometheus-server.container 86 | - prometheus-node-exporter.container 87 | - grafana.container 88 | 89 | - name: systemctl --user daemon-reload 90 | ansible.builtin.systemd: daemon_reload=true scope=user 91 | become: true 92 | become_user: "{{ podman_user.name }}" 93 | 94 | - name: systemctl --user enable --now grafana-prometheus-pod.service 95 | ansible.builtin.systemd: name=grafana-prometheus-pod.service state=started enabled=true scope=user 96 | become: true 97 | become_user: "{{ podman_user.name }}" 98 | ignore_errors: "{{ ansible_check_mode }}" 99 | 100 | - name: systemctl --user enable --now prometheus-node-exporter.service 101 | ansible.builtin.systemd: name=prometheus-node-exporter.service state=started enabled=true scope=user 102 | become: true 103 | become_user: "{{ podman_user.name }}" 104 | ignore_errors: "{{ ansible_check_mode }}" 105 | when: 106 | - podman_user.prometheus_host_mode is defined 107 | - podman_user.prometheus_host_mode 108 | 109 | -------------------------------------------------------------------------------- /roles/nas/tasks/samba.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacman -S samba 3 | community.general.pacman: name=samba state=present 4 | become: true 5 | when: ansible_facts["distribution"] == "Archlinux" 6 | 7 | - name: dnf install samba 8 | ansible.builtin.dnf: name=samba state=present 9 | become: true 10 | when: ansible_facts["distribution"] == "Fedora" 11 | 12 | - name: Create /etc/samba/smb.conf 13 | ansible.builtin.template: 14 | src: smb.conf.j2 15 | dest: /etc/samba/smb.conf 16 | validate: testparm -s %s 17 | become: true 18 | 19 | - name: Create samba share directories 20 | ansible.builtin.file: 21 | path: "/srv/smb/{{ item.name }}" 22 | state: directory 23 | become: true 24 | loop: "{{ smb_share }}" 25 | 26 | - name: Add bind mount to /etc/fstab 27 | ansible.builtin.lineinfile: 28 | path: /etc/fstab 29 | regexp: "^{{ item.path }}\\s+/srv/smb/{{item.name }}" 30 | line: "{{ item.path }} /srv/smb/{{item.name }} none bind 0 0" 31 | state: present 32 | become: true 33 | loop: "{{ smb_share }}" 34 | 35 | #- name: Add custom Samba rule to UFW 36 | # ansible.builtin.blockinfile: 37 | # path: /etc/ufw/applications.d/ufw-custom 38 | # block: | 39 | # [Samba-custom] 40 | # title=SMB/CIFS server 41 | # description=SMB/CIFS server 42 | # ports=445/tcp 43 | # create: true 44 | # marker: "; SMB/CIFS {mark} ANSIBLE MANAGED BLOCK" 45 | # 46 | #- name: Configure firewall for CIFS 47 | # community.general.ufw: 48 | # rule: allow 49 | # direction: in 50 | # name: Samba-custom 51 | # from: "{{ item }}" 52 | # comment: "Allow SMB/CIFS from {{ item }}" 53 | # loop: "{{ CIFS_allow_ip }}" 54 | 55 | - name: Create samba user 56 | ansible.builtin.user: 57 | name: "{{ item.name }}" 58 | password: '!' 59 | password_lock: true 60 | create_home: false 61 | uid: "{{ item.uid }}" 62 | shell: "/usr/bin/nologin" 63 | become: true 64 | when: 65 | - item.name != ansible_facts["user_id"] 66 | - item.name != "root" 67 | loop: "{{ smb_users }}" 68 | 69 | - name: Add samba user 70 | shell: (echo {{ item.passwd }}; echo {{ item.passwd }}) | smbpasswd -s -a {{ item.name }} 71 | become: true 72 | register: samba_user 73 | changed_when: "'Added user' in samba_user.stdout" 74 | loop: "{{ smb_users }}" 75 | 76 | - name: Modify samba firewalld service file samba.xml 77 | ansible.builtin.copy: 78 | content: | 79 | 80 | 81 | Samba 82 | Modified samba server only accept 445/tcp port. 83 | 84 | 85 | dest: /etc/firewalld/services/samba.xml 86 | owner: root 87 | group: root 88 | mode: '0644' 89 | become: true 90 | register: samba_firewalld_file 91 | 92 | - name: Reload firewalld when samba.xml changed 93 | ansible.builtin.command: firewall-cmd --reload 94 | become: true 95 | when: samba_firewalld_file.changed 96 | 97 | - name: Set firewall rules for samba 98 | ansible.posix.firewalld: 99 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="samba" accept 100 | #zone: "{{ firewalld_default_zone }}" 101 | permanent: true 102 | immediate: true 103 | state: enabled 104 | become: true 105 | loop: "{{ samba_accept_source_ipv4 }}" 106 | when: samba_accept_source_ipv4 is defined 107 | 108 | - name: systemctl enable smb.service 109 | ansible.builtin.systemd: name=smb enabled=true 110 | become: true 111 | 112 | - name: dnf install policycoreutils-python-utils 113 | ansible.builtin.dnf: name=policycoreutils-python-utils state=present 114 | become: true 115 | when: ansible_facts["distribution"] == "Fedora" 116 | 117 | - name: Set SELinux file context to shared directories 118 | community.general.sefcontext: 119 | target: "{{ item.path }}(/.*)?" 120 | setype: samba_share_t 121 | state: present 122 | become: true 123 | when: ansible_facts["selinux"]["status"] == "enabled" 124 | loop: "{{ smb_share }}" 125 | register: samba_share_sefcontext 126 | 127 | - name: Apply SELinux file context to shared directories 128 | ansible.builtin.command: "restorecon -R {{ item.path }}" 129 | become: true 130 | when: 131 | - ansible_facts["selinux"]["status"] == "enabled" 132 | - samba_share_sefcontext.changed 133 | loop: "{{ smb_share }}" 134 | 135 | -------------------------------------------------------------------------------- /host_vars/gui_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################################################################### 3 | # roles/common 4 | # Use Reflector automatically update mirror list. 5 | # https://wiki.archlinux.org/title/Reflector 6 | # 7 | # reflector_country: France,Germany 8 | # will only use mirrors from France and Germany 9 | # To get all country names and codes, run 10 | # reflector --list-countries 11 | reflector_country: us 12 | 13 | ############################################################################### 14 | # roles/gui 15 | 16 | # Whether the home directory is mounted using systemd-homed 17 | homed: false 18 | 19 | gpu_drivers: 20 | - mesa 21 | 22 | shell_pkgs: 23 | - zsh 24 | - zsh-completions 25 | - zsh-syntax-highlighting 26 | - zsh-autosuggestions 27 | - grml-zsh-config 28 | 29 | default_shell: /usr/bin/zsh 30 | 31 | # Packages for your Desktop environment or window manager. 32 | wm_pkgs: 33 | 34 | # Example: sway 35 | - sway 36 | - swaylock 37 | - swayidle 38 | - waybar 39 | - polkit 40 | - xdg-utils 41 | - xdg-desktop-portal 42 | - xorg-xwayland 43 | - wl-clipboard 44 | - xdg-desktop-portal-wlr 45 | # Firefox file chooser needs xdg-desktop-portal-gtk 46 | - xdg-desktop-portal-gtk 47 | # python-i3ipc needed for https://github.com/Bai-Chiang/dotfiles/blob/main/.config/sway/inactive-windows-transparency.py 48 | - python-i3ipc 49 | # grim and slurp for screenshoot 50 | - grim 51 | - slurp 52 | # notification daemon 53 | - mako 54 | # app launcher 55 | - fuzzel 56 | 57 | # hyprland 58 | - hyprland 59 | - xdg-desktop-portal-hyprland 60 | - xdg-desktop-portal-gtk 61 | - waybar 62 | - xdg-utils 63 | - xorg-xwayland 64 | - wl-clipboard 65 | - swaylock 66 | - swayidle 67 | - swaybg 68 | - fuzzel 69 | # grim and slurp for screenshoot 70 | - grim 71 | - slurp 72 | 73 | # KDE 74 | - plasma-meta 75 | - sddm 76 | - phonon-qt6-vlc 77 | 78 | # gnome 79 | - gnome-shell 80 | - xdg-desktop-portal-gnome 81 | - gnome-control-center 82 | - gdm 83 | - nautilus 84 | - gvfs 85 | - gvfs-nfs 86 | - gvfs-smb 87 | - gnome-tweaks 88 | - gnome-backgrounds 89 | 90 | dotfiles_repo: 91 | # https link to your dotfiles repo. This should be a public repo. 92 | https: 'https://github.com/username/dotfiles.git' 93 | 94 | # ssh link to your dotfiles repo. The playbook will replace the https link with ssh link after clone all dotfiles. 95 | # To push updates to GitHub, you need to create an ssh key then add the key to your GitHub account. 96 | ssh: 'git@github.com:username/dotfiles.git' 97 | 98 | audio_pkgs: 99 | - pipewire 100 | - pipewire-audio 101 | - pipewire-alsa 102 | - pipewire-pulse 103 | - pipewire-jack 104 | - wireplumber 105 | 106 | fonts_pkgs: 107 | - ttf-dejavu 108 | - noto-fonts-cjk 109 | - ttf-font-awesome 110 | - noto-fonts-emoji 111 | 112 | other_pkgs: 113 | - kitty 114 | - htop 115 | - pass 116 | 117 | # bluetooth 118 | - bluez 119 | - bluez-utils 120 | 121 | # screen brightness 122 | - brightnessctl 123 | # printer config 124 | - system-config-printer 125 | # scanning tool 126 | - gscan2pdf 127 | 128 | # yt-dlp 129 | - yt-dlp 130 | - ffmpeg 131 | 132 | flatpak_pkgs: 133 | - com.github.tchx84.Flatseal 134 | - io.mpv.Mpv 135 | - io.gitlab.librewolf-community 136 | - org.mozilla.firefox 137 | 138 | - org.kde.dolphin 139 | - org.kde.gwenview 140 | - org.kde.okular 141 | - org.kde.kwrite 142 | 143 | 144 | ############################################################################### 145 | # roles/podman 146 | 147 | # TZ enviroment variable for LSIO images. 148 | TZ: "UTC" 149 | 150 | # Running podman as rootless user 151 | # name and uid specify the username and UID value. 152 | # The user will be created if not exists. 153 | podman_users: 154 | - name: tux 155 | uid: 1000 156 | 157 | # enable lingering for the user will automatic start systemd user instance on start-up. 158 | enable_lingering: false 159 | 160 | # The frequency of running podman system prune, in systemd-timer format. 161 | podman_system_prune_timer: daily 162 | 163 | # List of all containers running under this user. 164 | containers: 165 | - syncthing 166 | 167 | # lscr.io/linuxserver/syncthing:latest 168 | syncthing_data_dirs: 169 | - { src: /home/tux/data, dest: /data } 170 | - { src: /home/tux/other_data, dest: /other_data } 171 | 172 | # Path to syncthing configs 173 | syncthing_config_dir: "/home/tux/data/container_configs/syncthing" 174 | 175 | # Syncthing Web UI port 176 | syncthing_webui_port: 8384 177 | 178 | 179 | -------------------------------------------------------------------------------- /roles/podman/tasks/unifi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # # Time zone 3 | # TZ: "UTC" 4 | # 5 | # # Run podman under this users. 6 | # # If the user does not exist it will create a new user. 7 | # # To manage systemd services under different users add `-M username@` to `systemctl` command, 8 | # # for example: 9 | # # sudo systemctl --user -M tux@ status unifi.service 10 | # # sudo systemctl --user -M tux@ status unifi-db.service 11 | # # sudo systemctl --user -M tux@ status unifi-pod.service 12 | # # To view journal under different user with UID 10000 13 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=unifi.service 14 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=unifi-db.service 15 | # # sudo journalctl _UID=10000 _SYSTEMD_USER_UNIT=unifi-pod.service 16 | # podman_users: 17 | # 18 | # # Run Syncthing under user `tux` 19 | # - name: tux 20 | # 21 | # # UID of the user 22 | # uid: 10000 23 | # 24 | # # Enable lingering or not 25 | # enable_lingering: true 26 | # 27 | # # How often to clean up old podman images/containers. 28 | # # This is the OnCalendar= option in podman-system-prune.timer 29 | # podman_system_prune_timer: daily 30 | # 31 | # # List of containers that will run under user `tux` 32 | # containers: 33 | # - unifi 34 | # 35 | # # unifi Web UI port 36 | # unifi_webui_port: 4443 37 | # 38 | # # Path to store unifi container config 39 | # unifi_config_dir: "/path/to/container/config/unifi" 40 | # 41 | # # UniFi MongoDB password 42 | # unifi_db_pass: !unsafe xj9jKbfptRprydNwq9dmKjTPsDvxau 43 | # 44 | # # Allow devices communication on these ports 45 | # # 3478/udp 46 | # # 10001/udp 47 | # # 8080/tcp 48 | # unifi_accept_source_ipv4: 49 | # - 192.168.1.0/24 50 | 51 | - name: Check unifi database direcoty 52 | ansible.builtin.stat: 53 | path: "{{ podman_user.unifi_config_dir }}/unifi-db" 54 | register: unifi_database_dir 55 | become: true 56 | become_user: "{{ podman_user.name }}" 57 | 58 | - name: unifi-controller container config direcoty 59 | ansible.builtin.file: 60 | path: "{{ podman_user.unifi_config_dir }}/{{ item }}" 61 | state: directory 62 | owner: "{{ podman_user.name}}" 63 | group: "{{ podman_user.name }}" 64 | mode: '0700' 65 | become: true 66 | loop: 67 | - unifi 68 | - unifi-db 69 | 70 | - name: First time setup 71 | include_tasks: unifi-init.yml 72 | when: not unifi_database_dir.stat.exists 73 | 74 | - name: unifi.pod, unifi-db.container and unifi.container 75 | ansible.builtin.template: 76 | src: "{{ item }}.j2" 77 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/{{ item }}" 78 | owner: "{{ podman_user.name}}" 79 | group: "{{ podman_user.name }}" 80 | mode: '0600' 81 | become: true 82 | become_user: "{{ podman_user.name }}" 83 | loop: 84 | - unifi.pod 85 | - unifi.container 86 | - unifi-db.container 87 | 88 | - name: systemctl --user daemon-reload 89 | ansible.builtin.systemd: daemon_reload=true scope=user 90 | become: true 91 | become_user: "{{ podman_user.name }}" 92 | 93 | - name: systemctl --user enable --now unifi-pod.service 94 | ansible.builtin.systemd: name=unifi-pod enabled=true state=started scope=user 95 | become: true 96 | become_user: "{{ podman_user.name }}" 97 | ignore_errors: "{{ ansible_check_mode }}" 98 | 99 | - name: systemctl --user enable --now unifi-db.service 100 | ansible.builtin.systemd: name=unifi-db enabled=true state=started scope=user 101 | become: true 102 | become_user: "{{ podman_user.name }}" 103 | ignore_errors: "{{ ansible_check_mode }}" 104 | 105 | - name: systemctl --user enable --now unifi.service 106 | ansible.builtin.systemd: name=unifi enabled=true state=started scope=user 107 | become: true 108 | become_user: "{{ podman_user.name }}" 109 | ignore_errors: "{{ ansible_check_mode }}" 110 | 111 | 112 | # 3478/udp UniFi STUN port 113 | # 10001/udp UniFi AP discovery 114 | # 8080/tcp UniFi device communication 115 | - name: add unifi firewalld service file unifi.xml 116 | ansible.builtin.copy: 117 | content: | 118 | 119 | 120 | UniFi 121 | UniFi controller. 122 | 123 | 124 | 125 | 126 | dest: /etc/firewalld/services/unifi.xml 127 | owner: root 128 | group: root 129 | mode: '0644' 130 | become: true 131 | register: unifi_firewalld_file 132 | 133 | - name: Reload firewalld when unifi.xml changed 134 | ansible.builtin.command: firewall-cmd --reload 135 | become: true 136 | when: unifi_firewalld_file.changed 137 | 138 | - name: Enable firewall rules for UniFi 139 | ansible.posix.firewalld: 140 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="unifi" accept 141 | #zone: "{{ firewalld_default_zone }}" 142 | permanent: yes 143 | immediate: yes 144 | state: enabled 145 | loop: "{{ podman_user.unifi_accept_source_ipv4 }}" 146 | become: true 147 | when: podman_user.unifi_accept_source_ipv4 is defined 148 | 149 | -------------------------------------------------------------------------------- /roles/nas/README.md: -------------------------------------------------------------------------------- 1 | NAS and file server related tasks for Arch Linux. 2 | 3 | [`raid.yml`](tasks/raid.yml) and [`samba.yml`](tasks/samba.yml) should also work on fedora. 4 | Since fedora uses SELinux samba share can only access files with `samba_share_t` context. 5 | [`samba.yml`](tasks/samba.yml) will re-label those directories, 6 | but if the directory or its subdirectory is mounted to podman container its context will become `container_file_t` therefore samba won't have permission to access those directories. 7 | 8 | 9 | ## Tasks 10 | - RAID 11 | - Edit `/etc/crypttab` to decrypt hard drives on boot. 12 | - Edit `/etc/fstab` create mount points. 13 | - btrfs scrub 14 | - Enable `btrfs-scrub@.timer` to automatically scrub btrfs volumes. 15 | - Create [`btrfs_scrub_report.sh`](templates/btrfs_scrub_report.sh.j2) to send scrub result with an email. 16 | The email is configured with [`roles/msmtp`](/roles/msmtp/). 17 | - This task depends on [`roles/msmtp`](/roles/msmtp/). 18 | - [S.M.A.R.T.](https://wiki.archlinux.org/title/S.M.A.R.T.) status 19 | - Create self-test schedule. 20 | - Create [`smartd_notify.sh`](templates/smartd_notify.sh.j2) allow smartd send email warnings. 21 | The email is configured with [`roles/msmtp`](/roles/msmtp/). 22 | - This task depends on [`roles/msmtp`](/roles/msmtp/). 23 | - [NFS](https://wiki.archlinux.org/title/NFS) file server 24 | - Edit `/etc/fstab` and create bind mounts. 25 | - Edit `/etc/exports`. 26 | - Set up firewall rules for NFS. 27 | - [Samba](https://wiki.archlinux.org/title/Samba) 28 | - Edit `/etc/fstab` and create bind mounts. 29 | - Create `/etc/samba/smb.conf`. 30 | - Create samba user. 31 | - Set up samba firewall rules. 32 | 33 | ## Variables 34 | ### RAID 35 | Run [`raid.yml`](tasks/raid.yml) when `{{ crypttab_entries }}` or `{{ fstab_entries }}` is defined. 36 | ```yaml 37 | # decrypt disks (optional) 38 | # Skip if not defined 39 | crypttab_entries: 40 | 41 | # device mapper name, the decrypted volume will be /dev/mapper/cryptdisk0 42 | - device_mapper_name: cryptdisk0 43 | 44 | # here UUID are the UUID of luks volume /dev/sda1 45 | UUID: 0a659df5-5f33-4fc9-bd20-9f32bc945f19 46 | 47 | # path to decrypt keyfile. Using keyfile allow automatically decrypt drive. 48 | keyfile: /path/to/keyfile 49 | 50 | # another device mapper name, the decrypted volume will be /dev/mapper/cryptdisk1 51 | - device_mapper_name: cryptdisk1 52 | 53 | # here UUID are the UUID of luks volume /dev/sda2 54 | UUID: 3195bd48-c9c5-4523-98f5-f2b14ba481aa 55 | 56 | # path to decrypt keyfile 57 | keyfile: /path/to/keyfile 58 | 59 | 60 | # Add entries to /etc/fstab file (optional) 61 | # Skip if not defined 62 | fstab_entries: 63 | 64 | # here UUID are the uuid of decrypted volume /dev/mapper/cryptdisk0 65 | - device: UUID=f55c9ddb-e245-430a-a902-13f8dd688458 66 | 67 | # The mount point 68 | mount_point: /home/tux/data 69 | 70 | # filesystem type 71 | fs: btrfs 72 | 73 | # mount options 74 | mount_opts: "noatime,compress=zstd:3,space_cache=v2,autodefrag,subvol=@data,nodev,nosuid,noexec" 75 | 76 | # The owner, group and permission for the mount point /home/tux/data 77 | # The playbook will create the mount point with this permssion if not exist. 78 | owner: tux 79 | group: tux 80 | mode: '0700' 81 | 82 | 83 | # spindown timeout for the drive (optional) 84 | # Skip if not defined 85 | # The number will pass to hdparm -S 86 | # 242 will set timeout to be 60 min 87 | # https://wiki.archlinux.org/title/Hdparm#Power_management_configuration 88 | hdparm_spindown: 242 89 | ``` 90 | 91 | ### btrfs scrub 92 | Set up btrfs scurb when `{{ btrfs_scrub_path }}` is defined. 93 | ```yaml 94 | # btrfs scrub paths. Use systemd-escape -p /path/to/mountpoint to get escape path 95 | btrfs_scrub_path: 96 | - { path: '/', escape: '-' } 97 | - { path: '/home/tux/data', escape: 'home-tux-data' } 98 | 99 | # Schedule btrfs scrub with systemd-timer format 100 | btrfs_scrub_time: 'Sun *-*-* 01:00:00' 101 | ``` 102 | 103 | ### S.M.A.R.T. 104 | Set up S.M.A.R.T monitor when `{{ btrfs_scrub_path }}` is defined. 105 | ```yaml 106 | # schedule S.M.A.R.T. self-tests 107 | # https://wiki.archlinux.org/title/S.M.A.R.T.#Schedule_self-tests 108 | # smartd will also monitor all drives, and send email notification with information specified in roles/msmtp 109 | # The following example will schedule a short self-test every day at 00:00 to 01:00. 110 | smartd_time: '(S/../.././00)' 111 | ``` 112 | 113 | ### NFS 114 | Set up NFS when `{{ nfs_mount_point }}` is defined. 115 | ```yaml 116 | # NFS server 117 | # The root directory for NFSv4 118 | nfs_root: /srv/nfs 119 | 120 | # NFS root export options 121 | nfs_root_ip_opt: '192.168.122.1(rw,sync,nocrossmnt,fsid=0)' 122 | 123 | # NFS mount points 124 | nfs_mount_point: 125 | 126 | # The directory to be shared 127 | - target: /home/tux/data 128 | 129 | # Bind mount address of the target. See https://wiki.archlinux.org/title/NFS#Server 130 | bind: /srv/nfs/data 131 | 132 | # options for the mount point, same format as in /etc/exports 133 | ip_opt: '192.168.122.1(rw,sync,all_squash,anonuid=1000,anongid=1000)' } 134 | 135 | # (Optional) Firewall rule, only allow NFS connection from these IP address. 136 | nfs_accept_source_ipv4: 137 | - 192.168.122.1 138 | ``` 139 | 140 | ### Samba 141 | Set up Samba when `{{ smb_share }}` is defined. 142 | ```yaml 143 | # Samba share in /etc/samba/smb.conf 144 | # Following example will create a samba share 145 | # [data] 146 | # comment = data 147 | # path = /srv/smb/data 148 | # valid users = smb_username, smb_username2 149 | # public = no 150 | # browseable = no 151 | # printable = no 152 | # read only = no 153 | # create mask = 0664 154 | # directory mask = 2755 155 | # force create mode = 0644 156 | # force directory mode = 2755 157 | # /srv/smb/data is a bind mount, point to /home/tux/data 158 | smb_share: 159 | - name: data 160 | comment: data 161 | path: /home/tux/data # no trailing slash at the end 162 | valid_users: smb_username, smb_username2 163 | read_only: 'no' 164 | 165 | # Samba user with UID and password 166 | smb_users: 167 | - name: smb_username 168 | passwd: !unsafe pa$sw0r6 169 | uid: 10001 170 | - name: smb_username2 171 | passwd: !unsafe p@$sw0r6 172 | uid: 10001 173 | 174 | # (Optional) Firewall rule, only allow Samba connection from these IP address. 175 | samba_accept_source_ipv4: 176 | - 192.168.122.1 177 | ``` 178 | 179 | -------------------------------------------------------------------------------- /host_vars/headless_example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This file contains credentials, should be encrypted using ansible-vault 3 | 4 | ############################################################################### 5 | #roles/archlinux_common 6 | # Use Reflector automatically update mirror list. 7 | # https://wiki.archlinux.org/title/Reflector 8 | # 9 | # reflector_country: France,Germany 10 | # will only use mirrors from France and Germany 11 | # To get all country names and codes, run 12 | # reflector --list-countries 13 | reflector_country: us 14 | 15 | 16 | ############################################################################### 17 | #roles/openssh 18 | 19 | # Only allow these users connect through ssh 20 | ssh_allowusers: 'tux user1 user2' 21 | 22 | # Set ssh HOSTKEY algorithm 23 | ssh_hostkey: ed25519 24 | 25 | # ssh PORT 26 | ssh_port: 22 27 | 28 | # firewall rule will only allow ssh connection from these address 29 | ssh_accept_source_ipv4: 30 | - 192.168.122.1 31 | 32 | 33 | ############################################################################### 34 | #roles/systemd_networkd 35 | # Static ip address with systemd-networkd 36 | 37 | networkd_static_ip: 38 | # interface name 39 | - nic: enp1s0 40 | 41 | # Static ip address with CIDR notation 42 | ip: 192.168.122.2/24 43 | 44 | # Gateway 45 | gateway: 192.168.122.1 46 | 47 | # DNS server 48 | dns: 192.168.122.1 49 | 50 | 51 | ############################################################################### 52 | #roles/msmtp 53 | # https://wiki.archlinux.org/title/Msmtp 54 | # Simple smtp client to send email notification. 55 | # Using Gmail as an example. 56 | 57 | # account name 58 | msmtp_account: gmail 59 | 60 | # smtp server 61 | msmtp_host: smtp.gmail.com 62 | 63 | # smtp port 64 | msmtp_port: 587 65 | 66 | # Enable msmtp TLS 67 | msmtp_tls: "on" 68 | 69 | # Enable msmtp STARTTLS 70 | msmtp_tls_starttls: "on" 71 | 72 | # Your Gmail address 73 | msmtp_from: username@gmail.com 74 | 75 | # The email address you want to send to. Could be the same as Gmail address. 76 | msmtp_to: account@domain.com 77 | 78 | # Your Gmail username without @gmail.com 79 | msmtp_user: username 80 | 81 | # Create an app password for Gamil https://myaccount.google.com/apppasswords 82 | msmtp_password: GmailAppPasswd 83 | 84 | 85 | ############################################################################### 86 | #roles/auto-update 87 | # Automatically update system and send an email notification with update result specified in roles/msmtp 88 | 89 | # The auto update time in systemd timer format. 90 | auto_update_time: '*-*-* 01:00:00' 91 | 92 | 93 | ############################################################################### 94 | #roles/nas 95 | # Storage related tasks, like edit /etc/fstab file to mount disks, schedule btrfs 96 | # scrub jobs, S.M.A.R.T notifications, NFS and samba server, etc. 97 | 98 | # decrypt disks (optional) 99 | crypttab_entries: 100 | 101 | # device mapper name, the decrypted volume will be /dev/mapper/cryptdisk0 102 | - device_mapper_name: cryptdisk0 103 | 104 | # here UUID are the UUID of luks volume /dev/sda1 105 | UUID: 0a659df5-5f33-4fc9-bd20-9f32bc945f19 106 | 107 | # path to decrypt keyfile 108 | keyfile: /path/to/keyfile 109 | 110 | # another device mapper name, the decrypted volume will be /dev/mapper/cryptdisk1 111 | - device_mapper_name: cryptdisk1 112 | 113 | # here UUID are the UUID of luks volume /dev/sda2 114 | UUID: 3195bd48-c9c5-4523-98f5-f2b14ba481aa 115 | 116 | # path to decrypt keyfile 117 | keyfile: /path/to/keyfile 118 | 119 | 120 | # Add entries to /etc/fstab file 121 | fstab_entries: 122 | 123 | # here UUID are the uuid of decrypted volume /dev/mapper/cryptdisk0 124 | - device: UUID=f55c9ddb-e245-430a-a902-13f8dd688458 125 | 126 | # The mount point 127 | mount_point: /home/tux/data 128 | 129 | # filesystem type 130 | fs: btrfs 131 | 132 | # mount options 133 | mount_opts: "noatime,compress=zstd:3,space_cache=v2,autodefrag,subvol=@data,nodev,nosuid,noexec" 134 | 135 | # The owner, group and permission for the mount point /home/tux/data 136 | # The playbook will create the mount point with this permssion if not exist. 137 | owner: tux 138 | group: tux 139 | mode: '0700' 140 | 141 | 142 | # btrfs scrub paths. Use systemd-escape -p /path/to/mountpoint to get escape path 143 | btrfs_scrub_path: 144 | - { path: '/', escape: '-' } 145 | - { path: '/home/tux/data', escape: 'home-tux-data' } 146 | 147 | # Schedule btrfs scrub with systemd-timer format 148 | btrfs_scrub_time: 'Sun *-*-* 01:00:00' 149 | 150 | 151 | # schedule S.M.A.R.T. self-tests 152 | # https://wiki.archlinux.org/title/S.M.A.R.T.#Schedule_self-tests 153 | # smartd will also monitor all drives, and send email notification with information specified in roles/msmtp 154 | smartd_time: '(S/../.././00)' 155 | 156 | 157 | # NFS server 158 | # The root directory for NFSv4 159 | nfs_root: /srv/nfs 160 | 161 | # NFS mount points 162 | nfs_mount_point: 163 | 164 | # The directory to be shared 165 | - target: /home/tux/data 166 | 167 | # Bind mount address of the target. See https://wiki.archlinux.org/title/NFS#Server 168 | bind: /srv/nfs/data 169 | 170 | # options for the mount point, same format as in /etc/exports 171 | ip_opt: '192.168.122.1(rw,sync,all_squash,anonuid=1000,anongid=1000)' } 172 | 173 | # Firewall rule, only allow NFS connection from these ip address. 174 | nfs_accept_source_ipv4: 175 | - 192.168.122.1 176 | 177 | 178 | ############################################################################### 179 | #roles/ups 180 | # uninterruptible power supply setup 181 | # The email notifications will be send via roles/msmtp 182 | 183 | # To prevent "Can't claim USB device error", add a udev rule. 184 | # Use lsusb get vender_id:product_id 185 | # See https://wiki.archlinux.org/title/Network_UPS_Tools#Can't_claim_USB_device_error 186 | ups_vender_id: '0764' 187 | ups_product_id: '0423' 188 | 189 | # Set UPS password in /etc/nut/upsd.users 190 | ups_password: upspassword 191 | 192 | 193 | ############################################################################### 194 | # roles/podman 195 | 196 | # TZ enviroment variable for LSIO images. 197 | TZ: "UTC" 198 | 199 | # Running podman as rootless user 200 | # name and uid specify the username and UID value. 201 | # The user will be created if not exists. 202 | podman_users: 203 | - name: tux 204 | uid: 1000 205 | 206 | # enable lingering for the user will automatic start systemd user instance on start-up. 207 | enable_lingering: true 208 | 209 | # The frequency of running podman system prune, in systemd-timer format. 210 | podman_system_prune_timer: daily 211 | 212 | # List of all containers running under this user. 213 | containers: 214 | - syncthing 215 | 216 | # lscr.io/linuxserver/syncthing:latest 217 | syncthing_data_dirs: 218 | - { src: /home/tux/data, dest: /data } 219 | - { src: /home/tux/other_data, dest: /other_data } 220 | 221 | # Set up firewall rules only allow incomming connection from these ip address. 222 | syncthing_accept_source_ipv4: 223 | - 192.168.122.1 224 | - 192.168.123.0/24 225 | 226 | # Path to syncthing configs 227 | syncthing_config_dir: "/podman_configs/syncthing" 228 | 229 | # Syncthing Web UI port 230 | syncthing_webui_port: 8384 231 | 232 | --------------------------------------------------------------------------------