├── .gitignore
├── LICENSE
├── README.md
├── ansible.cfg
├── arch_install.sh
├── arch_install_bcachefs.sh
├── debian_post_install.sh
├── fedora_post_install.sh
├── gui_example.yml
├── headless_example.yml
├── homed.sh
├── host_vars
├── gui_example.yml
└── headless_example.yml
├── inventory.yml
├── iommu.sh
├── mkarchiso.sh
├── roles
├── archlinux_common
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── zz-signed_uki_backup.hook
│ └── tasks
│ │ ├── main.yml
│ │ ├── paru.yml
│ │ └── snapper.yml
├── auto-update
│ ├── README.md
│ ├── files
│ │ └── auto-update.service
│ ├── tasks
│ │ ├── Archlinux.yml
│ │ ├── Debian.yml
│ │ ├── Fedora.yml
│ │ └── main.yml
│ └── templates
│ │ ├── auto-update.sh.j2
│ │ ├── auto-update.timer.j2
│ │ └── dnf-automatic-timer-override.conf.j2
├── docker
│ └── tasks
│ │ ├── Debian_install.yml
│ │ ├── docker_compose.yml
│ │ └── main.yml
├── gui
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── failure-notification@.service
│ │ ├── flatpak-update.service
│ │ └── flatpak-update.timer
│ └── tasks
│ │ ├── Archlinux.yml
│ │ ├── Fedora.yml
│ │ ├── dotfiles.yml
│ │ ├── flatpak.yml
│ │ ├── main.yml
│ │ ├── paru.yml
│ │ └── snapper.yml
├── libvirt
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── msmtp
│ ├── README.md
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── msmtprc.j2
├── nas
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── btrfs_scrub.yml
│ │ ├── main.yml
│ │ ├── nfs.yml
│ │ ├── raid.yml
│ │ ├── samba.yml
│ │ └── smartd.yml
│ └── templates
│ │ ├── btrfs_scrub_report.sh.j2
│ │ ├── btrfs_scrub_service_override.conf.j2
│ │ ├── btrfs_scrub_timer_override.conf.j2
│ │ ├── smartd_notify.sh.j2
│ │ └── smb.conf.j2
├── nut
│ ├── README.md
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── 50-ups.rules.j2
│ │ ├── msmtprc.j2
│ │ └── nut_notify.sh.j2
├── openssh
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── homed.yml
│ │ └── main.yml
├── podman
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── legacy
│ │ ├── deluge.service.j2
│ │ ├── deluge.yml
│ │ ├── homeassistant.service.j2
│ │ ├── homeassistant.yml
│ │ ├── jellyfin.service.j2
│ │ ├── jellyfin.yml
│ │ ├── nextcloud-cron.service.j2
│ │ ├── nextcloud-cron.timer.j2
│ │ ├── nextcloud-pod.service.j2
│ │ ├── nextcloud.service.j2
│ │ ├── nextcloud.yml
│ │ ├── postgres.service.j2
│ │ ├── prowlarr.service.j2
│ │ ├── prowlarr.yml
│ │ ├── radarr.service.j2
│ │ ├── radarr.yml
│ │ ├── radicale.service.j2
│ │ ├── radicale.yml
│ │ ├── sonarr.service.j2
│ │ ├── sonarr.yml
│ │ ├── swag.service.j2
│ │ ├── swag.yml
│ │ └── thelounge.service.j2
│ ├── tasks
│ │ ├── autobrr.yml
│ │ ├── gluetun.yml
│ │ ├── letsencrypt.yml
│ │ ├── main.yml
│ │ ├── nextcloud.yml
│ │ ├── podman_install.yml
│ │ ├── podman_setup.yml
│ │ ├── qbittorrent.yml
│ │ ├── syncthing.yml
│ │ ├── thelounge.yml
│ │ ├── traefik.yml
│ │ ├── transmission.yml
│ │ ├── unifi-init.yml
│ │ └── unifi.yml
│ └── templates
│ │ ├── autobrr.container.j2
│ │ ├── copy-ssl.service.j2
│ │ ├── gluetun.container.j2
│ │ ├── letsencrypt.container.j2
│ │ ├── nextcloud-aio.container.j2
│ │ ├── podman-system-prune.service.j2
│ │ ├── podman-system-prune.timer.j2
│ │ ├── qbittorrent.container.j2
│ │ ├── syncthing.container.j2
│ │ ├── tailscale-traefik.container.j2
│ │ ├── thelounge.container.j2
│ │ ├── traefik.container.j2
│ │ ├── transmission.container.j2
│ │ ├── unifi-db-init-mongo.js.j2
│ │ ├── unifi-db.container.j2
│ │ └── unifi.container.j2
├── systemd_networkd
│ ├── README.md
│ ├── tasks
│ │ ├── Archlinux-prepare.yml
│ │ ├── Debian-prepare.yml
│ │ ├── Fedora-prepare.yml
│ │ └── main.yml
│ └── templates
│ │ └── en0.network.j2
└── wpa_supplicant
│ ├── README.md
│ ├── files
│ └── wpa_supplicant@fedora.service
│ └── tasks
│ ├── Archlinux_prepare.yml
│ ├── Fedora_prepare.yml
│ └── main.yml
├── tasks
└── update.yml
├── virsh_undefine.sh
└── virt-install_arch.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !/.gitignore
3 | !/LICENSE
4 | !/README.md
5 |
6 | !/ansible.cfg
7 | !/inventory.yml
8 | !*_example*
9 |
10 | !/roles
11 | !/roles/**
12 | !/tasks
13 | !/tasks/*
14 | !/host_vars
15 | !/host_vars/*example.yml
16 |
17 | !/arch_install.sh
18 | !/arch_install_bcachefs.sh
19 | !/debian_post_install.sh
20 | !/fedora_post_install.sh
21 | !/homed.sh
22 | !/iommu.sh
23 | !/mkarchiso.sh
24 | !/virsh_undefine.sh
25 | !/virt-install_arch.sh
26 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This repository is a collection of scripts and Ansible playbooks that I used to provision __all__ of my machines, from laptop to servers.
2 |
3 | - [`arch_install.sh`](arch_install.sh) script will install Arch Linux, based on my installation [notes](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_unified_kernel_image_(UKI),_full_disk_encryption,_secure_boot,_btrfs_snapshots,_and_common_setups).
4 | This script will cover the [_Pre-installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_unified_kernel_image_(UKI),_full_disk_encryption,_secure_boot,_btrfs_snapshots,_and_common_setups#Pre-installation), [_Installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_unified_kernel_image_(UKI),_full_disk_encryption,_secure_boot,_btrfs_snapshots,_and_common_setups#Installation), and [_Configure the system_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_unified_kernel_image_(UKI),_full_disk_encryption,_secure_boot,_btrfs_snapshots,_and_common_setups#Configure_the_system) sections.
5 | It will also configure OpenSSH server, firewall, and user creation in the [_Post-installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_unified_kernel_image_(UKI),_full_disk_encryption,_secure_boot,_btrfs_snapshots,_and_common_setups#Post-installation) section.
6 | The remaining [_Post-installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_unified_kernel_image_(UKI),_full_disk_encryption,_secure_boot,_btrfs_snapshots,_and_common_setups#Post-installation) steps are covered by Ansible [`roles/archlinux_common`](roles/archlinux_common) and [`roles/gui`](roles/gui/).
7 |
8 | - [`arch_install_bcachefs.sh`](arch_install_bcachefs.sh) script will install Arch Linux with bcachefs instead, based on my installation [notes](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_Bcachefs,_unified_kernel_image_(UKI),_secure_boot,_and_common_setups).
9 | Bcachefs is still considered as experimental, so make sure you have a __working__ backup.
10 | Similarly, it will cover the [_Pre-installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_Bcachefs,_unified_kernel_image_(UKI),_secure_boot,_and_common_setups#Pre-installation), [_Installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_Bcachefs,_unified_kernel_image_(UKI),_secure_boot,_and_common_setups#Installation), and [_Configure the system_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_Bcachefs,_unified_kernel_image_(UKI),_secure_boot,_and_common_setups#Configure_the_system) sections.
11 | In addition, it will also configure OpenSSH server, firewall, and user creation in the [_Post-installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_Bcachefs,_unified_kernel_image_(UKI),_secure_boot,_and_common_setups#Post-installation) section.
12 | The remaining [_Post-installation_](https://wiki.archlinux.org/title/User:Bai-Chiang/Arch_Linux_installation_with_Bcachefs,_unified_kernel_image_(UKI),_secure_boot,_and_common_setups#Post-installation) steps are covered by Ansible [`roles/archlinux_common`](roles/archlinux_common) and [`roles/gui`](roles/gui/).
13 | The script is designed to work on bcachefs with single drive or multiple drives setup.
14 | But current only single drive without encryption setup works.
15 |
16 | - [`mkarchiso.sh`](mkarchiso.sh) will create a latest Arch Linux ISO under `/tmp` directory with optionally additional kernel parameters.
17 | It will also copy installation scripts to `/root` in the ISO image.
18 | ```
19 | sudo bash mkarchiso.sh
20 | ```
21 | [`virt-install_arch.sh`](virt-install_arch.sh) will create a virtual machine using ISO files created by `mkarchiso.sh`.
22 | It will not have graphics output, only serial output to terminal.
23 | This requires adding kernel parameter `console=ttyS0` to ISO image.
24 | ```
25 | sudo bash virt-install_arch.sh vm_name
26 | ```
27 | [`virsh_undefine.sh`](virsh_undefine.sh) will remove a virtual machine and its storage.
28 | ```
29 | sudo bash virsh_undefine.sh vm_name
30 | ```
31 | I use these scripts to test my Arch Linux installation scripts.
32 |
33 | - [`fedora_post_install.sh`](fedora_post_install.sh) and [`debian_post_install.sh`](debian_post_install.sh) will configure OpenSSH server port and firewall.
34 |
35 | - [`roles/`](roles/) directory contains various Ansible roles.
36 | You could find documentation of each Ansible role under its directory.
37 |
38 | # Usage
39 | ## Arch Linux Installation script
40 | - Boot into live ISO
41 | - Download the `arch_install.sh` file
42 | ```
43 | curl -LO https://raw.githubusercontent.com/Bai-Chiang/homelab_automation/main/arch_install.sh
44 | ```
45 | If you want to use systemd-homed, also download `homed.sh`
46 | ```
47 | curl -LO https://raw.githubusercontent.com/Bai-Chiang/homelab_automation/main/homed.sh
48 | ```
49 | - Run the installation script
50 | ```
51 | bash arch_install.sh
52 | ```
53 | If using systemd-homed the installation script will only set up a root account, and create `/root/homed.sh`.
54 | You need to reboot into newly installed system and login as root, then run
55 | ```
56 | bash homed.sh
57 | ```
58 |
59 | ## Ansible playbooks
60 | To run Ansible playbooks locally.
61 | - Download necessary packages
62 | ```
63 | pacman -S --needed git ansible
64 | ```
65 | - Clone this repository
66 | ```
67 | git clone https://github.com/Bai-Chiang/homelab_automation.git
68 | cd homelab_automation
69 | ```
70 | - Edit `gui_example.yml` and `host_vars/gui_example.yml`.
71 | You may also check `headless_example.yml` and `host_vars/headless_example.yml`.
72 | - Run ansible playbooks locally with
73 | ```
74 | ansible-playbook gui_example.yml
75 | ```
76 |
77 |
78 | # Ansible roles
79 | Here is the brief introduction of all Ansible roles, detailed documentation of each Ansible role listed under its directory, including all Ansible variables and examples.
80 | All Ansible roles listed below are tested with Arch Linux.
81 | Some also tested with fedora or Debian.
82 | - [`archlinux_common`](roles/archlinux_common/) contains common/sane [post-installation configuration](https://wiki.archlinux.org/title/User:Bai-Chiang/Installation_notes#Post-installation) for Arch Linux.
83 | - [`auto-update`](roles/auto-update/) will auto-update your system and reboot if necessary.
84 | For Arch Linux it will send an email contains `pacman -Syu` log to the email address configured in [`roles/msmtp`](roles/msmtp/).
85 | - [`gui`](roles/gui/) related tasks, like installing GPU driver, PipeWire, desktop environment, Flatpak, restore dotfiles, and setup snapshot for your home directory.
86 | - [`msmtp`](roles/msmtp/) configures a simple SMTP client, used to send email notification.
87 | - [`nas`](roles/nas/) will edit fstab to mount extra disk and set up btrfs-scrub timer.
88 | It will send btrfs scrub result and S.M.A.R.T. notification to an email address configured with [`roles/msmtp`](roles/msmtp/).
89 | It could also set up NFS ans Samba server.
90 | - [`podman`](roles/podman/) rootless containers that I used in my homelab. These containers could run as different users.
91 | - [`nut`](roles/nut/)(Network UPS Tools) will monitor UPS status and send email notification configured with [`roles/msmtp`](roles/msmtp/).
92 | - [`openssh`](roles/openssh/) server limit allowed login user, only allow public key authentication, set up firewall rules.
93 | - [`systemd_networkd`](roles/systemd_networkd/) configuration, either single NIC with static IP or custom setup.
94 | - [`wpa_supplicant`](roles/wpa_supplicant/) setup when using [systemd-networkd](https://wiki.archlinux.org/title/Systemd-networkd) as network manager. __DOES NOT__ work with [NetworkManager](https://wiki.archlinux.org/title/NetworkManager).
95 | - [`libvirt`](roles/libvirt/) virtualization.
96 |
97 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory = inventory.yml
3 |
--------------------------------------------------------------------------------
/debian_post_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | apt update
4 | apt dist-upgrade
5 |
6 | apt install vim rsync htop
7 |
8 | # install firewalld
9 | apt install firewalld
10 |
11 | # set ssh port
12 | echo "ssh port? (22)"
13 | read ssh_port
14 | ssh_port="${ssh_port:-22}"
15 | if [[ $ssh_port -ne 22 ]] ; then
16 | sed -i "s/^#Port.*/Port ${ssh_port}/" /etc/ssh/sshd_config
17 | sed "/port=/s/port=\"22\"/port=\"${ssh_port}\"/" /usr/lib/firewalld/services/ssh.xml > /etc/firewalld/services/ssh.xml
18 | firewall-cmd --reload
19 | fi
20 |
21 | read -p "Do you want to set default firewall zone to drop? [y/N] " firewall_drop
22 | firewall_drop="${firewall_drop:-n}"
23 | firewall_drop="${firewall_drop,,}"
24 | if [[ $firewall_drop == y ]] ; then
25 | firewall-cmd --set-default-zone=drop
26 | fi
27 |
28 | echo -e "\nssh allow source ip address (example 192.168.1.0/24), empty to skip"
29 | read ssh_source
30 | if [[ -n $ssh_source ]]; then
31 | firewall-cmd --permanent --add-rich-rule="rule family='ipv4' source address='${ssh_source}' service name='ssh' accept"
32 | firewall-cmd --permanent --remove-service ssh
33 | fi
34 | firewall-cmd --reload
35 |
36 | # raspberry pi
37 | # create wheel user and disable root user
38 | if [[ ${HOSTNAME:0:3} == rpi ]] ; then
39 | apt install sudo
40 |
41 | read -p "Tell me your username: " username
42 | useradd -m -G sudo -s /usr/bin/bash "$username"
43 | passwd "$username"
44 |
45 | echo "Disabling root ..."
46 | passwd -d root
47 | passwd -l root
48 |
49 | echo -e "\n\nPlease tell me the hostname:"
50 | read hostname
51 | echo "$hostname" > /etc/hostname
52 | echo -e "127.0.0.1\t$hostname" >> /etc/hosts
53 | fi
54 |
55 |
--------------------------------------------------------------------------------
/fedora_post_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | dnf install -y policycoreutils-python-utils
4 |
5 | # set ssh port
6 | echo "ssh port? (22)"
7 | read ssh_port
8 | ssh_port="${ssh_port:-22}"
9 | if [[ $ssh_port -ne 22 ]] ; then
10 | sed -i "s/^#Port.*/Port ${ssh_port}/" /etc/ssh/sshd_config
11 | semanage port -a -t ssh_port_t -p tcp $ssh_port
12 | sed "/port=/s/port=\"22\"/port=\"${ssh_port}\"/" /usr/lib/firewalld/services/ssh.xml > /etc/firewalld/services/ssh.xml
13 | firewall-cmd --reload
14 | fi
15 |
16 | read -p "Do you want to set default firewall zone to drop? [y/N] " firewall_drop
17 | firewall_drop="${firewall_drop:-n}"
18 | firewall_drop="${firewall_drop,,}"
19 | if [[ $firewall_drop == y ]] ; then
20 | firewall-cmd --set-default-zone=drop
21 | fi
22 |
23 | echo -e "\nssh allow source ip address (example 192.168.1.0/24), empty to skip"
24 | read ssh_source
25 | if [[ -n $ssh_source ]]; then
26 | firewall-cmd --permanent --add-rich-rule="rule family='ipv4' source address='${ssh_source}' service name='ssh' accept"
27 | firewall-cmd --permanent --remove-service ssh
28 | fi
29 | firewall-cmd --reload
30 |
31 |
--------------------------------------------------------------------------------
/gui_example.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 |
5 | vars_prompt:
6 | - name: ansible_become_password
7 | private: yes
8 |
9 | vars_files:
10 | - host_vars/gui_example.yml
11 |
12 | pre_tasks:
13 | #- name: Enable multilib repo
14 | # blockinfile:
15 | # path: /etc/pacman.conf
16 | # insertafter: '^#\s*[multilib]'
17 | # block: |
18 | # [multilib]
19 | # Include = /etc/pacman.d/mirrorlist
20 | # become: yes
21 |
22 | - import_tasks: tasks/update.yml
23 |
24 | # Check more roles under roles/ directory
25 | roles:
26 | - archlinux_common
27 | - gui
28 | - podman
29 |
30 |
31 |
--------------------------------------------------------------------------------
/headless_example.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: headless_example
3 |
4 | vars_files:
5 | - host_vars/headless_example.yml
6 |
7 | pre_tasks:
8 | - import_tasks: tasks/update.yml
9 |
10 | roles:
11 | - archlinux_common
12 | - openssh
13 | - networkd_static
14 | - nas
15 | - msmtp
16 | - auto-update
17 | - ups
18 | - podman
19 |
20 |
--------------------------------------------------------------------------------
/homed.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | systemctl enable --now systemd-homed.service
4 | read -p "Tell me your username: " username
5 | read -p "uid: (default 1000)" uid
6 | uid="${uid:-1000}"
7 | read -p "Tell me the filesystem inside your home directory (btrfs or ext4): " fstype
8 | homectl create "$username" --uid="$uid" --member-of=wheel --shell=/bin/bash --storage=luks --fs-type="$fstype"
9 |
10 | read -p "Do you want to disable root account? [Y/n] " disable_root
11 | disable_root="${disable_root:-n}"
12 | disable_root="${disable_root,,}"
13 | if [[ $disable_root == y ]] ; then
14 | # https://wiki.archlinux.org/title/Sudo#Disable_root_login
15 | echo "Disabling root ..."
16 | passwd -d root
17 | passwd -l root
18 | fi
19 |
--------------------------------------------------------------------------------
/host_vars/gui_example.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # roles/common
4 | # Use Reflector automatically update mirror list.
5 | # https://wiki.archlinux.org/title/Reflector
6 | #
7 | # reflector_country: France,Germany
8 | # will only use mirrors from France and Germany
9 | # To get all country names and codes, run
10 | # reflector --list-countries
11 | reflector_country: us
12 |
13 | ###############################################################################
14 | # roles/gui
15 |
16 | # Whether the home directory is mounted using systemd-homed
17 | homed: false
18 |
19 | gpu_drivers:
20 | - mesa
21 |
22 | shell_pkgs:
23 | - zsh
24 | - zsh-completions
25 | - zsh-syntax-highlighting
26 | - zsh-autosuggestions
27 | - grml-zsh-config
28 |
29 | default_shell: /usr/bin/zsh
30 |
31 | # Packages for your Desktop environment or window manager.
32 | wm_pkgs:
33 |
34 | # Example: sway
35 | - sway
36 | - swaylock
37 | - swayidle
38 | - waybar
39 | - xdg-utils
40 | - xdg-desktop-portal
41 | - xorg-xwayland
42 | - wl-clipboard
43 | - xdg-desktop-portal-wlr
44 | # Firefox file chooser needs xdg-desktop-portal-gtk
45 | - xdg-desktop-portal-gtk
46 | # python-i3ipc needed for https://github.com/Bai-Chiang/dotfiles/blob/main/.config/sway/inactive-windows-transparency.py
47 | - python-i3ipc
48 | # grim and slurp for screenshoot
49 | - grim
50 | - slurp
51 | # notification daemon
52 | - mako
53 | # app launcher
54 | - fuzzel
55 |
56 | # hyprland
57 | - hyprland
58 | - xdg-desktop-portal-hyprland
59 | - xdg-desktop-portal-gtk
60 | - waybar
61 | - xdg-utils
62 | - xorg-xwayland
63 | - wl-clipboard
64 | - swaylock
65 | - swayidle
66 | - swaybg
67 | - fuzzel
68 | # grim and slurp for screenshoot
69 | - grim
70 | - slurp
71 |
72 | # KDE
73 | - plasma-meta
74 | - sddm
75 | - phonon-qt6-vlc
76 |
77 | # gnome
78 | - gnome-shell
79 | - xdg-desktop-portal-gnome
80 | - gnome-control-center
81 | - gdm
82 | - nautilus
83 | - gvfs
84 | - gvfs-nfs
85 | - gvfs-smb
86 | - gnome-tweaks
87 | - gnome-backgrounds
88 |
89 | dotfiles_repo:
90 | # https link to your dotfiles repo. This should be a public repo.
91 | https: 'https://github.com/username/dotfiles.git'
92 |
93 | # ssh link to your dotfiles repo. The playbook will replace the https link with ssh link after clone all dotfiles.
94 | # To push updates to GitHub, you need to create an ssh key then add the key to your GitHub account.
95 | ssh: 'git@github.com:username/dotfiles.git'
96 |
97 | audio_pkgs:
98 | - pipewire
99 | - pipewire-audio
100 | - pipewire-alsa
101 | - pipewire-pulse
102 | - pipewire-jack
103 | - wireplumber
104 |
105 | fonts_pkgs:
106 | - ttf-dejavu
107 | - noto-fonts-cjk
108 | - ttf-font-awesome
109 | - noto-fonts-emoji
110 |
111 | other_pkgs:
112 | - kitty
113 | - htop
114 | - pass
115 |
116 | # bluetooth
117 | - bluez
118 | - bluez-utils
119 |
120 | # screen brightness
121 | - brightnessctl
122 | # printer config
123 | - system-config-printer
124 | # scanning tool
125 | - gscan2pdf
126 |
127 | # yt-dlp
128 | - yt-dlp
129 | - ffmpeg
130 |
131 | flatpak_pkgs:
132 | - com.github.tchx84.Flatseal
133 | - io.mpv.Mpv
134 | - io.gitlab.librewolf-community
135 | - org.mozilla.firefox
136 |
137 | - org.kde.dolphin
138 | - org.kde.gwenview
139 | - org.kde.okular
140 | - org.kde.kwrite
141 |
142 |
143 | ###############################################################################
144 | # roles/podman
145 |
146 | # TZ enviroment variable for LSIO images.
147 | TZ: "US/Eastern"
148 |
149 | # Running podman as rootless user
150 | # name and uid specify the username and UID value.
151 | # The user will be created if not exists.
152 | podman_users:
153 | - name: tux
154 | uid: 1000
155 |
156 | # enable lingering for the user will automatic start systemd user instance on start-up.
157 | enable_lingering: false
158 |
159 | # The frequency of running podman system prune, in systemd-timer format.
160 | podman_system_prune_timer: daily
161 |
162 | # List of all containers running under this user.
163 | containers:
164 | - syncthing
165 |
166 | # lscr.io/linuxserver/syncthing:latest
167 | syncthing_data_dirs:
168 | - { src: /home/tux/data, dest: /data }
169 | - { src: /home/tux/other_data, dest: /other_data }
170 |
171 | # Path to syncthing configs
172 | syncthing_config_dir: "/home/tux/data/container_configs/syncthing"
173 |
174 |
175 |
--------------------------------------------------------------------------------
/host_vars/headless_example.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This file contains credentials, should be encrypted using ansible-vault
3 |
4 | ###############################################################################
5 | #role/archlinux_common
6 | # Use Reflector automatically update mirror list.
7 | # https://wiki.archlinux.org/title/Reflector
8 | #
9 | # reflector_country: France,Germany
10 | # will only use mirrors from France and Germany
11 | # To get all country names and codes, run
12 | # reflector --list-countries
13 | reflector_country: us
14 |
15 |
16 | ###############################################################################
17 | #role/openssh
18 |
19 | # Only allow these users connect through ssh
20 | ssh_allowusers: 'tux user1 user2'
21 |
22 | # Set ssh HOSTKEY algorithm
23 | ssh_hostkey: ed25519
24 |
25 | # ssh PORT
26 | ssh_port: 22
27 |
28 | # firewall rule will only allow ssh connection from these address
29 | ssh_accept_source_ipv4:
30 | - 192.168.122.1
31 |
32 |
33 | ###############################################################################
34 | #role/networkd_static
35 | # Static ip address with systemd-networkd
36 |
37 | # interface name
38 | static_nic: enp1s0
39 |
40 | # Static ip address with CIDR notation
41 | static_ip: 192.168.122.2/24
42 |
43 | # Gateway
44 | static_gateway: 192.168.122.1
45 |
46 | # DNS server
47 | static_dns: 192.168.122.1
48 |
49 |
50 | ###############################################################################
51 | #role/msmtp
52 | # https://wiki.archlinux.org/title/Msmtp
53 | # Simple smtp client to send email notification.
54 | # Using Gmail as an example.
55 |
56 | # account name
57 | msmtp_account: gmail
58 |
59 | # smtp server
60 | msmtp_host: smtp.gmail.com
61 |
62 | # smtp port
63 | msmtp_port: 587
64 |
65 | # Enable msmtp TLS
66 | msmtp_tls: "on"
67 |
68 | # Enable msmtp STARTTLS
69 | msmtp_tls_starttls: "on"
70 |
71 | # Your Gmail address
72 | msmtp_from: username@gmail.com
73 |
74 | # The email address you want to send to. Could be the same as Gmail address.
75 | msmtp_to: account@domain.com
76 |
77 | # Your Gmail username without @gmail.com
78 | msmtp_user: username
79 |
80 | # Create an app password for Gamil https://myaccount.google.com/apppasswords
81 | msmtp_password: GmailAppPasswd
82 |
83 |
84 | ###############################################################################
85 | #role/auto-update
86 | # Automatically update system and send an email notification with update result specified in roles/msmtp
87 |
88 | # The auto update time in systemd timer format.
89 | auto_update_time: '*-*-* 01:00:00'
90 |
91 |
92 | ###############################################################################
93 | #role/nas
94 | # Storage related tasks, like edit /etc/fstab file to mount disks, schedule btrfs
95 | # scrub jobs, S.M.A.R.T notifications, NFS and samba server, etc.
96 |
97 | # decrypt disks (optional)
98 | crypttab_entries:
99 |
100 | # device mapper name, the decrypted volume will be /dev/mapper/cryptdisk0
101 | - device_mapper_name: cryptdisk0
102 |
103 | # here UUID are the UUID of luks volume /dev/sda1
104 | UUID: 0a659df5-5f33-4fc9-bd20-9f32bc945f19
105 |
106 | # path to decrypt keyfile
107 | keyfile: /path/to/keyfile
108 |
109 | # another device mapper name, the decrypted volume will be /dev/mapper/cryptdisk1
110 | - device_mapper_name: cryptdisk1
111 |
112 | # here UUID are the UUID of luks volume /dev/sda2
113 | UUID: 3195bd48-c9c5-4523-98f5-f2b14ba481aa
114 |
115 | # path to decrypt keyfile
116 | keyfile: /path/to/keyfile
117 |
118 |
119 | # Add entries to /etc/fstab file
120 | fstab_entries:
121 |
122 | # here UUID are the uuid of decrypted volume /dev/mapper/cryptdisk0
123 | - device: UUID=f55c9ddb-e245-430a-a902-13f8dd688458
124 |
125 | # The mount point
126 | mount_point: /home/tux/data
127 |
128 | # filesystem type
129 | fs: btrfs
130 |
131 | # mount options
132 | mount_opts: "noatime,compress=zstd:3,space_cache=v2,autodefrag,subvol=@data,nodev,nosuid,noexec"
133 |
134 | # The owner, group and permission for the mount point /home/tux/data
135 | # The playbook will create the mount point with this permssion if not exist.
136 | owner: tux
137 | group: tux
138 | mode: '0700'
139 |
140 |
141 | # btrfs scrub paths. Use systemd-escape -p /path/to/mountpoint to get escape path
142 | btrfs_scrub_path:
143 | - { path: '/', escape: '-' }
144 | - { path: '/home/tux/data', escape: 'home-tux-data' }
145 |
146 | # Schedule btrfs scrub with systemd-timer format
147 | btrfs_scrub_time: 'Sun *-*-* 01:00:00'
148 |
149 |
150 | # schedule S.M.A.R.T. self-tests
151 | # https://wiki.archlinux.org/title/S.M.A.R.T.#Schedule_self-tests
152 | # smartd will also monitor all drives, and send email notification with information specified in roles/msmtp
153 | smartd_time: '(S/../.././00)'
154 |
155 |
156 | # NFS server
157 | # The root directory for NFSv4
158 | nfs_root: /srv/nfs
159 |
160 | # NFS mount points
161 | nfs_mount_point:
162 |
163 | # The directory to be shared
164 | - target: /home/tux/data
165 |
166 | # Bind mount address of the target. See https://wiki.archlinux.org/title/NFS#Server
167 | bind: /srv/nfs/data
168 |
169 | # options for the mount point, same format as in /etc/exports
170 | ip_opt: '192.168.122.1(rw,sync,all_squash,anonuid=1000,anongid=1000)' }
171 |
172 | # Firewall rule, only allow NFS connection from these ip address.
173 | nfs_accept_source_ipv4:
174 | - 192.168.122.1
175 |
176 |
177 | ###############################################################################
178 | #role/ups
179 | # uninterruptible power supply setup
180 | # The email notifications will be send via roles/msmtp
181 |
182 | # To prevent "Can't claim USB device error", add a udev rule.
183 | # Use lsusb get vender_id:product_id
184 | # See https://wiki.archlinux.org/title/Network_UPS_Tools#Can't_claim_USB_device_error
185 | ups_vender_id: '0764'
186 | ups_product_id: '0423'
187 |
188 | # Set UPS password in /etc/nut/upsd.users
189 | ups_password: upspassword
190 |
191 |
192 | ###############################################################################
193 | # role/podman
194 |
195 | # TZ enviroment variable for LSIO images.
196 | TZ: "US/Eastern"
197 |
198 | # Running podman as rootless user
199 | # name and uid specify the username and UID value.
200 | # The user will be created if not exists.
201 | podman_users:
202 | - name: tux
203 | uid: 1000
204 |
205 | # enable lingering for the user will automatic start systemd user instance on start-up.
206 | enable_lingering: true
207 |
208 | # The frequency of running podman system prune, in systemd-timer format.
209 | podman_system_prune_timer: daily
210 |
211 | # List of all containers running under this user.
212 | containers:
213 | - syncthing
214 |
215 | # lscr.io/linuxserver/syncthing:latest
216 | syncthing_data_dirs:
217 | - { src: /home/tux/data, dest: /data }
218 | - { src: /home/tux/other_data, dest: /other_data }
219 |
220 | # Set up firewall rules only allow incomming connection from these ip address.
221 | syncthing_accept_source_ipv4:
222 | - 192.168.122.1
223 | - 192.168.123.0/24
224 |
225 | # Path to syncthing configs
226 | syncthing_config_dir: "/podman_configs/syncthing"
227 |
228 |
--------------------------------------------------------------------------------
/inventory.yml:
--------------------------------------------------------------------------------
1 | ---
2 | all:
3 | hosts:
4 |
5 | headless_example:
6 | ansible_host: 192.168.122.2
7 | ansible_user: tux
8 | ansible_port: 22
9 | ansible_ssh_private_key_file: /home/tux/.ssh/id_ed25519
10 |
--------------------------------------------------------------------------------
/iommu.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | shopt -s nullglob
3 | for g in $(find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V); do
4 | echo "IOMMU Group ${g##*/}:"
5 | for d in $g/devices/*; do
6 | echo -e "\t$(lspci -nns ${d##*/})"
7 | done;
8 | done;
9 |
10 |
--------------------------------------------------------------------------------
/mkarchiso.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | # Build archlinux ISO file with optional extra kernel parameters.
4 |
5 | # extra kernel command
6 | KERNEL_CMD="console=ttyS0"
7 |
8 | # create a temp directory in current directory
9 | tempdir=$(mktemp -d --tmpdir=.)
10 |
11 | # copy releng profile used in montly ISO relase
12 | cp -r /usr/share/archiso/configs/releng/ "$tempdir/archlive"
13 |
14 | # copy installation scripts to /root/
15 | cp arch_install.sh "$tempdir/archlive/airootfs/root/"
16 | cp arch_install_bcachefs.sh "$tempdir/archlive/airootfs/root/"
17 | cp homed.sh "$tempdir/archlive/airootfs/root/"
18 |
19 | if [[ -n "$KERNEL_CMD" ]] ; then
20 | # add kernel command to systemd-boot
21 | echo "options $KERNEL_CMD" >> "$tempdir/archlive/efiboot/loader/entries/01-archiso-x86_64-linux.conf"
22 | # add kernel command to grub
23 | sed -i "/\\s*linux.*archisodevice=UUID=\${ARCHISO_UUID}\$/ s|\$| $KERNEL_CMD|" "$tempdir/archlive/grub/grub.cfg"
24 | fi
25 |
26 | mkarchiso -v -w "$tempdir/work" -o /tmp "$tempdir/archlive"
27 |
28 | rm -r "$tempdir"
29 |
--------------------------------------------------------------------------------
/roles/archlinux_common/README.md:
--------------------------------------------------------------------------------
1 | Common [post-installation configuration](https://wiki.archlinux.org/title/User:Bai-Chiang/Installation_guide_(full_disk_encryption,secure_boot,unified_kernel_image,btrfs)#Post-installation) for Arch Linux.
2 |
3 | ## Tasks
4 | - Set up [time synchronization](https://wiki.archlinux.org/title/Systemd-timesyncd).
5 | - Enable [pacman parallel downloads](https://wiki.archlinux.org/title/Pacman#Enabling_parallel_downloads).
6 | - Enable [reflector](https://wiki.archlinux.org/title/Reflector) to auto update pacman mirror list.
7 | - Enable [paccache](https://wiki.archlinux.org/title/Pacman#Cleaning_the_package_cache) to auto clean up pacman package cache.
8 | - Enable [Periodic TRIM](https://wiki.archlinux.org/title/Solid_state_drive#Periodic_TRIM) for SSD.
9 | - Enable [native build](https://wiki.archlinux.org/title/Makepkg#Building_optimized_binaries) and [parallel compilation](https://wiki.archlinux.org/title/Makepkg#Parallel_compilation) and `-O3` optimization when building AUR packages.
10 | - Set up [snapper](https://wiki.archlinux.org/title/Snapper) for root partition if using btrfs.
11 |
12 |
13 | ## Variables
14 | ```yaml
15 | # Specify `reflector --country`
16 | # https://man.archlinux.org/man/reflector.1#EXAMPLES
17 | # Restrict pacman mirrors to selected countries. Countries may be given by name or country code, or a mix of both.
18 | # Use `reflector --list-countries` get a list of available countries and country codes.
19 | reflector_country: us,France,Germany
20 |
21 |
22 | # Snapshot limits
23 | # https://wiki.archlinux.org/title/Snapper#Set_snapshot_limits
24 | # Default values given below
25 | #snapper_root_hourly: 5
26 | #snapper_root_daily: 7
27 | #sanpper_root_weekly: 0
28 | #snapper_root_monthly: 0
29 | #snapper_root_yearly: 0
30 | ```
31 |
32 |
--------------------------------------------------------------------------------
/roles/archlinux_common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | snapper_root_hourly: 5
3 | snapper_root_daily: 7
4 | snapper_root_weekly: 0
5 | snapper_root_monthly: 0
6 | snapper_root_yearly: 0
7 |
--------------------------------------------------------------------------------
/roles/archlinux_common/files/zz-signed_uki_backup.hook:
--------------------------------------------------------------------------------
1 | [Trigger]
2 | Operation = Upgrade
3 | Operation = Install
4 | Operation = Remove
5 | Type = Package
6 | Target = *
7 |
8 | [Action]
9 | Depends = rsync
10 | Description = Backing up /efi...
11 | When = PostTransaction
12 | Exec = /usr/bin/rsync --archive --delete /efi/ /.efibackup
13 |
--------------------------------------------------------------------------------
/roles/archlinux_common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add time servers
3 | ansible.builtin.lineinfile:
4 | path: /etc/systemd/timesyncd.conf
5 | regexp: "{{ item.regexp }}"
6 | line: "{{ item.line }}"
7 | insertafter: "{{ item.insertafter }}"
8 | loop:
9 | - { regexp: '^NTP=', insertafter: '^#\s*NTP=', line: NTP=0.arch.pool.ntp.org 1.arch.pool.ntp.org 2.arch.pool.ntp.org 3.arch.pool.ntp.org }
10 | - { regexp: '^FallbackNTP=', insertafter: '^#\s*FallbackNTP=', line: FallbackNTP=0.pool.ntp.org 1.pool.ntp.org 0.fr.pool.ntp.org }
11 | become: true
12 |
13 | - name: systemctl enable --now systemd-timesyncd.service
14 | ansible.builtin.systemd: name=systemd-timesyncd state=started enabled=true
15 | become: true
16 |
17 | - name: Enable pacman parallel downloads
18 | ansible.builtin.lineinfile:
19 | path: /etc/pacman.conf
20 | regexp: '^ParallelDownloads ='
21 | line: ParallelDownloads = 5
22 | insertafter: '^#\s*ParallelDownloads = 5'
23 | become: true
24 |
25 | - name: pacman -S reflector
26 | community.general.pacman: name=reflector state=present
27 | become: true
28 | when: ansible_architecture == 'x86_64'
29 |
30 | - name: Configure reflector
31 | ansible.builtin.lineinfile:
32 | path: /etc/xdg/reflector/reflector.conf
33 | regexp: '^--country'
34 | line: "--country {{ reflector_country }}"
35 | insertafter: '^#\s*--country'
36 | become: true
37 | when: ansible_architecture == 'x86_64'
38 | ignore_errors: "{{ ansible_check_mode }}"
39 |
40 | - name: systemctl enable reflector.service
41 | ansible.builtin.systemd: name=reflector enabled=true
42 | become: true
43 | when: ansible_architecture == 'x86_64'
44 | ignore_errors: "{{ ansible_check_mode }}"
45 |
46 | - name: systemctl enable --now reflector.timer
47 | ansible.builtin.systemd: name=reflector.timer enabled=true state=started
48 | become: true
49 | when: ansible_architecture == 'x86_64'
50 | ignore_errors: "{{ ansible_check_mode }}"
51 |
52 | - name: pacman -S pacman-contrib for paccache
53 | community.general.pacman: name=pacman-contrib state=present
54 | become: true
55 |
56 | - name: systemctl enable --now paccache.timer
57 | ansible.builtin.systemd: name=paccache.timer enabled=true state=started
58 | become: true
59 | ignore_errors: "{{ ansible_check_mode }}"
60 |
61 | - name: systemctl enable --now fstrim.timer
62 | ansible.builtin.systemd: name=fstrim.timer enabled=true state=started
63 | become: true
64 |
65 | - name: systemctl enable --now restorecond.service (SELinux)
66 | ansible.builtin.systemd: name=restorecond.service enabled=true state=started
67 | become: true
68 | when: ansible_selinux.status == "enabled"
69 |
70 | - name: Optimize AUR building CFLAGS
71 | ansible.builtin.lineinfile:
72 | path: /etc/makepkg.conf
73 | regexp: "{{ item.regexp }}"
74 | line: "{{ item.line }}"
75 | backrefs: "{{ item.backrefs }}"
76 | insertafter: "{{ item.insertafter }}"
77 | loop:
78 | - { regexp: '^(CFLAGS=.*-march=).*( -pipe.*)', line: '\1native -O3\2', backrefs: true, insertafter: '' }
79 | - { regexp: '^MAKEFLAGS=', line: 'MAKEFLAGS="-j$(nproc)"', backrefs: false, insertafter: '^#MAKEFLAGS=' }
80 | become: true
81 |
82 | - name: Optimize AUR building RUSTFLAGS
83 | ansible.builtin.lineinfile:
84 | path: /etc/makepkg.conf.d/rust.conf
85 | regexp: '^(RUSTFLAGS=").*(-Cforce-frame-pointers=yes.*)'
86 | line: '\1-C opt-level=3 -C target-cpu=native \2'
87 | backrefs: true
88 | insertafter: '^#RUSTFLAGS='
89 | become: true
90 |
91 | - name: Get file system type of /
92 | ansible.builtin.command: stat --file-system --format=%T /
93 | become: true
94 | register: root_fstype
95 | changed_when: false
96 | check_mode: false
97 |
98 | - include_tasks: snapper.yml
99 | when: root_fstype.stdout == 'btrfs'
100 |
101 |
--------------------------------------------------------------------------------
/roles/archlinux_common/tasks/paru.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create paru directory
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | mode: '0755'
7 | loop:
8 | - "{{ ansible_user_dir }}/.cache/paru/clone"
9 | - "{{ ansible_user_dir }}/.cache/paru/chroot"
10 |
11 |
--------------------------------------------------------------------------------
/roles/archlinux_common/tasks/snapper.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check /etc/snapper directory
3 | ansible.builtin.stat:
4 | path: /etc/snapper
5 | become: true
6 | register: snapper_dir
7 |
8 | - name: pacman.conf NoExtract = etc/cron.daily/snapper etc/cron.hourly/snapper
9 | ansible.builtin.lineinfile:
10 | path: /etc/pacman.conf
11 | regexp: '^NoExtract'
12 | line: NoExtract = etc/cron.daily/snapper etc/cron.hourly/snapper
13 | insertafter: '^#\s*NoExtract'
14 | become: true
15 | when: not snapper_dir.stat.exists
16 |
17 | - name: pacman -S snapper snap-pac rsync
18 | community.general.pacman:
19 | name:
20 | - snapper
21 | - snap-pac
22 | - rsync
23 | state: present
24 | become: true
25 |
26 | - name: Create snapper for /
27 | ansible.builtin.shell: |
28 | umount /.snapshots
29 | rm -r /.snapshots
30 | snapper -c root create-config /
31 | btrfs subvolume delete /.snapshots
32 | mkdir /.snapshots
33 | mount -a
34 | chmod -R 750 /.snapshots
35 | args:
36 | executable: /usr/bin/bash
37 | become: true
38 | when: not snapper_dir.stat.exists
39 |
40 | - name: Edit /etc/snapper/configs/root
41 | ansible.builtin.lineinfile:
42 | path: /etc/snapper/configs/root
43 | regexp: "{{ item.regexp }}"
44 | line: "{{ item.line }}"
45 | loop:
46 | - { regexp: '^TIMELINE_CREATE=', line: 'TIMELINE_CREATE="yes"' }
47 | - { regexp: '^TIMELINE_CLEANUP=', line: 'TIMELINE_CLEANUP="yes"' }
48 | - { regexp: '^NUMBER_MIN_AGE=', line: 'NUMBER_MIN_AGE="1800"' }
49 | - { regexp: '^NUMBER_LIMIT=', line: 'NUMBER_LIMIT="10"' }
50 | - { regexp: '^NUMBER_LIMIT_IMPORTANT=', line: 'NUMBER_LIMIT_IMPORTANT="10"' }
51 | - { regexp: '^TIMELINE_MIN_AGE=', line: 'TIMELINE_MIN_AGE="1800"' }
52 | - { regexp: '^TIMELINE_LIMIT_HOURLY=', line: "TIMELINE_LIMIT_HOURLY=\"{{ snapper_root_hourly }}\"" }
53 | - { regexp: '^TIMELINE_LIMIT_DAILY=', line: "TIMELINE_LIMIT_DAILY=\"{{ snapper_root_daily }}\"" }
54 | - { regexp: '^TIMELINE_LIMIT_WEEKLY=', line: "TIMELINE_LIMIT_WEEKLY=\"{{ snapper_root_weekly }}\"" }
55 | - { regexp: '^TIMELINE_LIMIT_MONTHLY=', line: "TIMELINE_LIMIT_MONTHLY=\"{{ snapper_root_monthly }}\"" }
56 | - { regexp: '^TIMELINE_LIMIT_YEARLY=', line: "TIMELINE_LIMIT_YEARLY=\"{{ snapper_root_yearly }}\"" }
57 | become: true
58 |
59 | - name: systemctl enable snapper-timeline.timer
60 | ansible.builtin.systemd: name=snapper-timeline.timer enabled=true
61 | become: true
62 |
63 | - name: systemctl enable snapper-cleanup.timer
64 | ansible.builtin.systemd: name=snapper-cleanup.timer enabled=true
65 | become: true
66 |
67 | - name: Create /etc/pacman.d/hooks/ if it does not exist
68 | ansible.builtin.file:
69 | path: /etc/pacman.d/hooks
70 | state: directory
71 | owner: root
72 | group: root
73 | mode: '0755'
74 | become: true
75 |
76 | - name: zz-signed_uki_backup.hook
77 | ansible.builtin.copy:
78 | src: zz-signed_uki_backup.hook
79 | dest: /etc/pacman.d/hooks/zz-signed_uki_backup.hook
80 | owner: root
81 | group: root
82 | mode: '0644'
83 | become: true
84 |
85 |
--------------------------------------------------------------------------------
/roles/auto-update/README.md:
--------------------------------------------------------------------------------
1 | Update the system and reboot if necessary.
2 |
3 | ## Variables
4 |
5 | ### Arch Linux
6 | Update with script [`auto-update.sh`](templates/auto-update.sh.j2).
7 | To enable email notification set up [`roles/msmtp`](/roles/msmtp/).
8 | This will send `pacman -Syu` log to the email address specified in [`roles/msmtp`](/roles/msmtp/).
9 | ```yaml
10 | # Auto-update time. With format of systemd-timer OnCalendar=
11 | auto_update_time: '01:00:00'
12 | ```
13 |
14 | ### Debian
15 | Set up [unattended upgrades](https://wiki.debian.org/UnattendedUpgrades).
16 | ```yaml
17 | # Optional auto-update time. With format of systemd-timer OnCalendar=
18 | #auto_update_time: '01:00:00'
19 | ```
20 |
21 | ### Fedora
22 | Set up [dnf-automatic](https://dnf.readthedocs.io/en/latest/automatic.html),with specified reboot time.
23 | ```yaml
24 | # Optional auto-update time. With format of systemd-timer OnCalendar=
25 | #auto_update_time: '01:00:00'
26 | ```
27 |
--------------------------------------------------------------------------------
/roles/auto-update/files/auto-update.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=upgrade system
3 |
4 | [Service]
5 | Type=simple
6 | ExecStart=/usr/local/bin/auto-update.sh
7 |
8 |
--------------------------------------------------------------------------------
/roles/auto-update/tasks/Archlinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S archlinux-contrib
3 | community.general.pacman: name=archlinux-contrib state=present
4 | become: true
5 |
6 | - name: auto-update.service
7 | ansible.builtin.copy:
8 | src: auto-update.service
9 | dest: /etc/systemd/system/auto-update.service
10 | owner: root
11 | group: root
12 | mode: '0644'
13 | become: true
14 |
15 | - name: auto-update.timer
16 | ansible.builtin.template:
17 | src: auto-update.timer.j2
18 | dest: /etc/systemd/system/auto-update.timer
19 | owner: root
20 | group: root
21 | mode: '0644'
22 | become: true
23 |
24 | - name: auto-update.sh
25 | ansible.builtin.template:
26 | src: auto-update.sh.j2
27 | dest: /usr/local/bin/auto-update.sh
28 | owner: root
29 | group: root
30 | mode: '0700'
31 | become: true
32 |
33 | - name: systemctl daemon-reload
34 | ansible.builtin.systemd: daemon_reload=true
35 | become: true
36 |
37 | - name: systemctl enable auto-update.timer
38 | ansible.builtin.systemd: name=auto-update.timer enabled=true state=started
39 | become: true
40 |
--------------------------------------------------------------------------------
/roles/auto-update/tasks/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: apt install unattended-upgrades
3 | ansible.builtin.apt: name=unattended-upgrades state=present
4 | become: true
5 |
6 | - name: Check /etc/apt/apt.conf.d/20auto-upgrades stats
7 | ansible.builtin.stat:
8 | path: /etc/apt/apt.conf.d/20auto-upgrades
9 | register: auto_upgrades
10 | become: true
11 |
12 | - name: Create /etc/apt/apt.conf.d/20auto-upgrades if not exists
13 | ansible.builtin.shell: |
14 | echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | debconf-set-selections
15 | dpkg-reconfigure -f noninteractive unattended-upgrades
16 | become: true
17 | when: not auto_upgrades.stat.exists
18 |
19 | - name: 20auto-upgrades config
20 | ansible.builtin.lineinfile:
21 | path: /etc/apt/apt.conf.d/20auto-upgrades
22 | line: "{{ item.line }}"
23 | regexp: "{{ item.regexp }}"
24 | loop:
25 | - { line: 'APT::Periodic::Update-Package-Lists "1";', regexp: '^APT::Periodic::Update-Package-Lists' }
26 | - { line: 'APT::Periodic::Unattended-Upgrade "1";', regexp: '^APT::Periodic::Unattended-Upgrade' }
27 | become: true
28 |
29 | - name: /etc/apt/apt.conf.d/50unattended-upgrades set Automatic-Reboot-Time
30 | ansible.builtin.lineinfile:
31 | path: /etc/apt/apt.conf.d/50unattended-upgrades
32 | regexp: "{{ item.regexp }}"
33 | line: "{{ item.line }}"
34 | insertafter: "{{ item.insertafter }}"
35 | loop:
36 | - { regexp: '^Unattended-Upgrade::Automatic-Reboot ', insertafter: '^//Unattended-Upgrade::Automatic-Reboot ', line: 'Unattended-Upgrade::Automatic-Reboot "true";' }
37 | - { regexp: '^Unattended-Upgrade::Automatic-Reboot-WithUsers ', insertafter: '^//Unattended-Upgrade::Automatic-Reboot-WithUsers ', line: 'Unattended-Upgrade::Automatic-Reboot-WithUsers "true";' }
38 | - { regexp: '^Unattended-Upgrade::Remove-Unused-Dependencies ', insertafter: '^//Unattended-Upgrade::Remove-Unused-Dependencies ', line: 'Unattended-Upgrade::Remove-Unused-Dependencies "true";' }
39 | become: true
40 |
41 | - name: Create override directory for apt-daily-upgrade.timer
42 | ansible.builtin.file:
43 | path: /etc/systemd/system/apt-daily-upgrade.timer.d
44 | state: directory
45 | owner: root
46 | group: root
47 | mode: '0755'
48 | become: true
49 | when: auto_update_time is defined
50 |
51 | - name: Set upgrade time
52 | ansible.builtin.copy:
53 | dest: /etc/systemd/system/apt-daily-upgrade.timer.d/override.conf
54 | content: |
55 | [Timer]
56 | OnCalendar=
57 | OnCalendar={{ auto_update_time }}
58 | RandomizedDelaySec=0
59 | become: true
60 | when: auto_update_time is defined
61 |
62 |
--------------------------------------------------------------------------------
/roles/auto-update/tasks/Fedora.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: dnf install dnf-automatic
3 | ansible.builtin.dnf: name=dnf-automatic state=present
4 | become: true
5 |
6 | - name: Edit /etc/dnf/automatic.conf
7 | ansible.builtin.lineinfile:
8 | path: /etc/dnf/automatic.conf
9 | regexp: "{{ item.regexp }}"
10 | line: "{{ item.line }}"
11 | become: true
12 | loop:
13 | - { regexp: '^apply_updates =', line: "apply_updates = yes" }
14 | - { regexp: '^reboot =', line: "reboot = when-needed" }
15 |
16 | - name: Create /etc/systemd/system/dnf-automatic.timer.d/ directory
17 | ansible.builtin.file:
18 | path: /etc/systemd/system/dnf-automatic.timer.d
19 | state: directory
20 | owner: root
21 | group: root
22 | mode: '0755'
23 | become: true
24 |
25 | - name: Override default dnf-automatic.timer
26 | ansible.builtin.template:
27 | src: dnf-automatic-timer-override.conf.j2
28 | dest: /etc/systemd/system/dnf-automatic.timer.d/override.conf
29 | owner: root
30 | group: root
31 | mode: '0644'
32 | become: true
33 | when: auto_update_time is defined
34 |
35 | - name: systemctl enable dnf-automatic.timer
36 | ansible.builtin.systemd: name=dnf-automatic.timer enabled=true state=started
37 | become: true
38 |
39 | - name: systemctl daemon-reload
40 | ansible.builtin.systemd: daemon_reload=true
41 | become: true
42 |
--------------------------------------------------------------------------------
/roles/auto-update/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: "{{ ansible_distribution }}.yml"
3 |
--------------------------------------------------------------------------------
/roles/auto-update/templates/auto-update.sh.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | {% if msmtp_to is defined and msmtp_from is defined %}
4 | TO="{{ msmtp_to }}"
5 | FROM="{{ msmtp_from }}"
6 | SUBJECT="Auto-update $HOSTNAME $(/usr/bin/date '+%F %T')"
7 | {% endif %}
8 |
9 | # Check need reboot
10 | NUM_PKG=$(/usr/bin/checkupdates | grep -E 'linux|systemd' | wc -l)
11 | if [[ $NUM_PKG -eq 0 ]] ; then
12 | NEED_REBOOT=0
13 | else
14 | NEED_REBOOT=1
15 | fi
16 |
17 | # Create empty auto-update.log
18 | echo '' > /var/log/auto-update.log
19 |
20 | {% if btrfs_scrub_time is defined %}
21 | # systemctl is-active return 0 if at least one is active
22 | /usr/bin/systemctl is-active --quiet --all "btrfs-scrub@*.service"
23 | BTRFS_SCRUB_EXIT=$?
24 | if [[ $BTRFS_SCRUB_EXIT -eq 0 ]] ; then
25 | echo "btrfs-scrub is running. Skipped update.\n" >> /var/log/auto-update.log
26 | else
27 | # Update system
28 | echo "# pacman -Syu --noconfirm --nogrogressbar\n" >> /var/log/auto-update.log
29 | /usr/bin/pacman -Syu --noconfirm --noprogressbar &>> /var/log/auto-update.log
30 | echo "\n\n# checkservices -aP\n" >> /var/log/auto-update.log
31 | /usr/bin/checkservices -aP &>> /var/log/auto-update.log
32 | if [[ $NEED_REBOOT -eq 1 ]] ; then
33 | echo "\n\n# systemctl reboot" >> /var/log/auto-update.log
34 | fi
35 | fi
36 | {% else %}
37 | # Update system
38 | echo "# pacman -Syu --noconfirm --nogrogressbar\n" >> /var/log/auto-update.log
39 | /usr/bin/pacman -Syu --noconfirm --noprogressbar &>> /var/log/auto-update.log
40 | echo "\n\n# checkservices -aP\n" >> /var/log/auto-update.log
41 | /usr/bin/checkservices -aP &>> /var/log/auto-update.log
42 | if [[ $NEED_REBOOT -eq 1 ]] ; then
43 | echo "\n\n# systemctl reboot" >> /var/log/auto-update.log
44 | fi
45 | {% endif %}
46 |
47 | {% if msmtp_to is defined and msmtp_from is defined %}
48 | # send email
49 | update_msg="$(cat /var/log/auto-update.log)"
50 | echo -e "To: ${TO}\nFrom: ${FROM}\nSubject: ${SUBJECT}\n${update_msg}" | msmtp --read-recipients --read-envelope-from
51 | while [[ $? -ne 0 ]]; do
52 | # if failed try resend 5min later
53 | sleep 300
54 | echo -e "To: ${TO}\nFrom: ${FROM}\nSubject: ${SUBJECT}\n${update_msg}" | msmtp --read-recipients --read-envelope-from
55 | done
56 | {% endif %}
57 |
58 | sleep 5
59 |
60 | {% if btrfs_scrub_time is defined %}
61 | if [[ $BTRFS_SCRUB_EXIT -ne 0 && $NEED_REBOOT -eq 1 ]] ; then
62 | # Reboot if necessary
63 | systemctl reboot
64 | fi
65 | {% else %}
66 | if [[ $NEED_REBOOT -eq 1 ]] ; then
67 | systemctl reboot
68 | fi
69 | {% endif %}
70 |
71 | exit 0
72 |
--------------------------------------------------------------------------------
/roles/auto-update/templates/auto-update.timer.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=upgrade system
3 |
4 | [Timer]
5 | OnCalendar={{ auto_update_time }}
6 |
7 | [Install]
8 | WantedBy=timers.target
9 |
--------------------------------------------------------------------------------
/roles/auto-update/templates/dnf-automatic-timer-override.conf.j2:
--------------------------------------------------------------------------------
1 | [Timer]
2 | OnCalendar=
3 | OnCalendar={{ auto_update_time }}
4 | RandomizedDelaySec=
5 | RandomizedDelaySec=0
6 | Persistent=
7 | Persistent=false
8 |
--------------------------------------------------------------------------------
/roles/docker/tasks/Debian_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: apt install ca-certificates curl gnupg
3 | ansible.builtin.apt:
4 | name:
5 | - ca-certificates
6 | - curl
7 | - gnupg
8 | state: present
9 | become: true
10 |
11 | - name: Add Docker’s official GPG key
12 | ansible.builtin.apt_key:
13 | url: https://download.docker.com/linux/debian/gpg
14 | state: present
15 | become: true
16 |
17 | - name: Add Docker repository
18 | ansible.builtin.apt_repository:
19 | repo: "deb https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
20 | state: present
21 | become: true
22 |
23 | - name: apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
24 | ansible.builtin.apt:
25 | name:
26 | - docker-ce
27 | - docker-ce-cli
28 | - containerd.io
29 | - docker-buildx-plugin
30 | - docker-compose-plugin
31 | state: present
32 | become: true
33 |
34 |
--------------------------------------------------------------------------------
/roles/docker/tasks/docker_compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy docker-compose.yml
3 | ansible.builtin.copy:
4 | src: "{{ docker_compose_file }}"
5 | dest: /opt/docker-compose.yml
6 | owner: root
7 | group: root
8 | mode: '0600'
9 | become: true
10 |
11 |
--------------------------------------------------------------------------------
/roles/docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: "{{ ansible_distribution }}_install.yml"
3 |
4 | - import_tasks: docker_compose.yml
5 |
6 |
--------------------------------------------------------------------------------
/roles/gui/README.md:
--------------------------------------------------------------------------------
1 | Install desktop environment or window manager, restore dotfiles, install Flatpak etc.
2 |
3 | ## Tasks
4 | ### Arch Linux
5 | - Set up snapper for home directory if using btrfs.
6 | - Install GPU driver, audio packages.
7 | - Install and configure default shell.
8 | - Install desktop environment or window manager and fonts.
9 | - Install other packages.
10 | - Set up printer or bluetooth if `cups` or `bluez` package is installed.
11 | - Set up Flatpak.
12 | - Restore dotfiles.
13 |
14 | ### Fedora
15 | - Install and configure default shell.
16 | - Install desktop environment or window manager.
17 | - Install other packages.
18 | - Set up Flatpak.
19 | - Restore dotfiles.
20 |
21 |
22 | ## Variables
23 | ### Arch Linux
24 | ```yaml
25 | gpu_drivers:
26 | - mesa
27 | - vulkan-radeon
28 | - libva-mesa-driver
29 |
30 |
31 | shell_pkgs:
32 | - zsh
33 | - zsh-completions
34 | - zsh-syntax-highlighting
35 | - zsh-autosuggestions
36 | - grml-zsh-config
37 |
38 | default_shell: /usr/bin/zsh
39 |
40 |
41 | # Packages for your Desktop environment or window manager.
42 | wm_pkgs:
43 |
44 | # Example: sway
45 | - sway
46 | - swaylock
47 | - swayidle
48 | - waybar
49 | - xdg-utils
50 | - xdg-desktop-portal
51 | - xorg-xwayland
52 | - wl-clipboard
53 | - xdg-desktop-portal-wlr
54 | # Firefox file chooser needs xdg-desktop-portal-gtk
55 | - xdg-desktop-portal-gtk
56 | # python-i3ipc needed for https://github.com/Bai-Chiang/dotfiles/blob/main/.config/sway/inactive-windows-transparency.py
57 | - python-i3ipc
58 | # grim and slurp for screenshot
59 | - grim
60 | - slurp
61 | # notification daemon
62 | - mako
63 | # app launcher
64 | - fuzzel
65 |
66 | # Example: hyprland
67 | - hyprland
68 | - xdg-desktop-portal-hyprland
69 | - xdg-desktop-portal-gtk
70 | - waybar
71 | - xdg-utils
72 | - xorg-xwayland
73 | - wl-clipboard
74 | - swaylock
75 | - swayidle
76 | - fuzzel
77 | # grim and slurp for screenshot
78 | - grim
79 | - slurp
80 |
81 | # Example: KDE
82 | - plasma-meta
83 | - plasma-wayland-session
84 | - sddm
85 | - phonon-qt5-vlc
86 |
87 | # Example: gnome
88 | - gnome-shell
89 | - xdg-desktop-portal-gnome
90 | - gnome-control-center
91 | - gdm
92 | - nautilus
93 | - gvfs
94 | - gvfs-nfs
95 | - gvfs-smb
96 | - gnome-tweaks
97 | - gnome-backgrounds
98 |
99 |
100 | dotfiles_repo:
101 | # https link to your dotfiles repo. This should be a public repo.
102 | https: 'https://github.com/username/dotfiles.git'
103 |
104 | # ssh link to your dotfiles repo. The playbook will replace the https link with ssh link after clone all dotfiles.
105 | # To push updates to GitHub, you need to create an ssh key then add the key to your GitHub account.
106 | ssh: 'git@github.com:username/dotfiles.git'
107 |
108 |
109 | audio_pkgs:
110 | - pipewire
111 | - pipewire-audio
112 | - pipewire-alsa
113 | - pipewire-pulse
114 | - pipewire-jack
115 | - wireplumber
116 |
117 |
118 | fonts_pkgs:
119 | - ttf-dejavu
120 | - noto-fonts-cjk
121 | - ttf-font-awesome
122 | - noto-fonts-emoji
123 |
124 |
125 | other_pkgs:
126 | - htop
127 | - neofetch
128 |
129 | # bluetooth
130 | - bluez
131 | - bluez-utils
132 |
133 | # printer
134 | - cups
135 |
136 |
137 | flatpak_pkgs:
138 | - com.github.tchx84.Flatseal
139 | - io.gitlab.librewolf-community
140 | - org.mozilla.firefox
141 |
142 | - com.valvesoftware.Steam
143 | - com.valvesoftware.Steam.CompatibilityTool.Proton-GE
144 | ```
145 |
146 | ### Fedora
147 | ```yaml
148 | shell_pkgs:
149 | - zsh
150 | default_shell: /usr/bin/zsh
151 |
152 |
153 | wm_pkgs:
154 | # KDE
155 | - @kde-desktop-environment
156 |
157 | # Sway
158 | - @sway-desktop-environment
159 |
160 |
161 | other_pkgs:
162 | - htop
163 |
164 |
165 | dotfiles_repo:
166 | # https link to your dotfiles repo. This should be a public repo.
167 | https: 'https://github.com/username/dotfiles.git'
168 |
169 | # ssh link to your dotfiles repo. The playbook will replace the https link with ssh link after clone all dotfiles.
170 | # To push updates to GitHub, you need to create an ssh key then add the key to your GitHub account.
171 | ssh: 'git@github.com:username/dotfiles.git'
172 | ```
173 |
174 |
--------------------------------------------------------------------------------
/roles/gui/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | homed: false
3 | paru_chroot: false
4 |
--------------------------------------------------------------------------------
/roles/gui/files/failure-notification@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Send a notification about a failed systemd unit
3 |
4 | [Service]
5 | Type=simple
6 | ExecStart=/usr/bin/notify-send "service %i failed"
7 |
--------------------------------------------------------------------------------
/roles/gui/files/flatpak-update.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Update Flatpak and cleanup unused runtimes and applications.
3 | Wants=network-online.target
4 | After=network-online.target nss-lookup.target
5 | StartLimitIntervalSec=3h
6 | StartLimitBurst=5
7 | OnFailure=failure-notification@%n
8 |
9 | [Service]
10 | Type=oneshot
11 | ExecStart=ping -c 1 flathub.org
12 | ExecStart=/usr/bin/flatpak update --user --noninteractive --assumeyes
13 | ExecStart=/usr/bin/flatpak uninstall --user --unused --noninteractive --assumeyes
14 | Restart=on-failure
15 | RestartSec=30min
16 |
--------------------------------------------------------------------------------
/roles/gui/files/flatpak-update.timer:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Update Flatpak
3 |
4 | [Timer]
5 | OnBootSec=1m
6 | OnCalendar=daily
7 | Persistent=true
8 |
9 | [Install]
10 | WantedBy=timers.target
11 |
--------------------------------------------------------------------------------
/roles/gui/tasks/Archlinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get file system type of ~/
3 | ansible.builtin.command: "stat --file-system --format=%T {{ ansible_user_dir }}"
4 | register: home_fstype
5 | changed_when: false
6 | check_mode: false
7 |
8 | - include_tasks: snapper.yml
9 | when: home_fstype.stdout == 'btrfs'
10 |
11 | - name: Install GPU driver
12 | community.general.pacman: name={{ gpu_drivers }} state=present
13 | become: true
14 |
15 | - name: Install shell packages
16 | community.general.pacman: name={{ shell_pkgs }} state=present
17 | become: true
18 | when: shell_pkgs is defined
19 |
20 | - name: change shell (non systemd-homed)
21 | ansible.builtin.user:
22 | name: "{{ ansible_user_id }}"
23 | shell: "{{ default_shell }}"
24 | become: true
25 | when: not homed
26 |
27 | - name: change shell (systemd-homed)
28 | community.general.homectl:
29 | name: "{{ ansible_user_id }}"
30 | password: "{{ ansible_become_password }}"
31 | shell: "{{ default_shell }}"
32 | become: true
33 | when: homed
34 |
35 | - name: Install audio packages
36 | community.general.pacman: name={{ audio_pkgs }} state=present
37 | become: true
38 | when: audio_pkgs is defined
39 |
40 | - name: Install font packages
41 | community.general.pacman: name={{ fonts_pkgs }} state=present
42 | become: true
43 | when: fonts_pkgs is defined
44 |
45 | - name: Install WM packages
46 | community.general.pacman: name={{ wm_pkgs }} state=present
47 | become: true
48 |
49 | - name: systemctl enable sddm.service
50 | ansible.builtin.systemd: name=sddm.service enabled=true
51 | become: true
52 | when: '"sddm" in wm_pkgs'
53 |
54 | - name: systemctl enable gdm.service
55 | ansible.builtin.systemd: name=gdm.service enabled=true
56 | become: true
57 | when: '"gdm" in wm_pkgs'
58 |
59 | - name: Install other packages
60 | community.general.pacman: name={{ other_pkgs }} state=present
61 | become: true
62 | when: other_pkgs is defined
63 |
64 | - name: systemctl enable cups.socket
65 | ansible.builtin.systemd: name=cups.socket enabled=true
66 | become: true
67 | when: '"cups" in other_pkgs'
68 |
69 | - include_tasks: dotfiles.yml
70 | when: dotfiles_repo is defined
71 |
72 | - include_tasks: flatpak.yml
73 | when: flatpak_pkgs is defined
74 |
75 | - name: systemctl enable --now bluetooth.service
76 | ansible.builtin.systemd: name=bluetooth.service enabled=true state=started
77 | become: true
78 | when: '"bluez" in other_pkgs or "bluez-utils" in other_pkgs'
79 |
80 | - name: Create paru clone directory
81 | ansible.builtin.file:
82 | path: "{{ ansible_user_dir }}/.cache/paru/clone"
83 | state: directory
84 |
85 | #- include_tasks: paru.yml
86 | # when: paru_chroot
87 |
88 | - name: Create screenshots directories
89 | ansible.builtin.file:
90 | path: "{{ ansible_user_dir }}/screenshots"
91 | state: directory
92 | when: '"grim" in wm_pkgs'
93 |
94 | - name: Create Downloads directory
95 | ansible.builtin.file:
96 | path: "{{ ansible_user_dir }}/Downloads"
97 | state: directory
98 | owner: "{{ ansible_user_id }}"
99 | group: "{{ ansible_user_id }}"
100 | #mode: '0700'
101 |
102 |
--------------------------------------------------------------------------------
/roles/gui/tasks/Fedora.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install shell packages
3 | ansible.builtin.dnf: name={{ shell_pkgs }} state=present
4 | become: true
5 |
6 | - name: change shell
7 | ansible.builtin.user:
8 | name: "{{ ansible_user_id }}"
9 | shell: "{{ default_shell }}"
10 | become: true
11 |
12 | # Create symlink so zsh-syntax-highlighting and zsh-autosuggestions at the same location as Arch Linux
13 | - name: Create /usr/share/zsh/plugins directory
14 | ansible.builtin.file:
15 | path: /usr/share/zsh/plugins
16 | state: directory
17 | become: true
18 | when: '"zsh" in shell_pkgs'
19 |
20 | - name: symlink zsh-syntax-highlighting and zsh-autosuggestions
21 | ansible.builtin.file:
22 | src: "{{ item.src }}"
23 | dest: "{{ item.dest }}"
24 | state: link
25 | become: true
26 | loop:
27 | - { src: /usr/share/zsh-syntax-highlighting, dest: /usr/share/zsh/plugins/zsh-syntax-highlighting }
28 | - { src: /usr/share/zsh-autosuggestions, dest: /usr/share/zsh/plugins/zsh-autosuggestions }
29 | when: '"zsh" in shell_pkgs'
30 |
31 | - name: Install WM packages
32 | ansible.builtin.dnf: name={{ wm_pkgs }} state=present
33 | become: true
34 |
35 | - name: Install other packages
36 | ansible.builtin.dnf: name={{ other_pkgs }} state=present
37 | become: true
38 |
39 | - include_tasks: dotfiles.yml
40 |
41 | - include_tasks: flatpak.yml
42 |
43 | - name: Create screenshots directories
44 | ansible.builtin.file:
45 | path: "{{ ansible_user_dir }}/screenshots"
46 | state: directory
47 | when: '"grim" in wm_pkgs'
48 |
49 | - name: Create Ranger image previews cache directory
50 | ansible.builtin.file:
51 | path: "{{ ansible_user_dir }}/.cache/ranger"
52 | state: directory
53 | when: '"ranger" in other_pkgs'
54 |
--------------------------------------------------------------------------------
/roles/gui/tasks/dotfiles.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S git
3 | community.general.pacman: name=git state=present
4 | become: true
5 | when: ansible_distribution == "Archlinux"
6 |
7 | - name: dnf install git
8 | ansible.builtin.dnf: name=git state=present
9 | become: true
10 | when: ansible_distribution == "Fedora"
11 |
12 | - name: Restore dotfiles
13 | ansible.builtin.shell: |
14 | #!/usr/bin/bash
15 | git clone --bare {{ dotfiles_repo.https }} $HOME/.dotfiles
16 | function dotfiles {
17 | /usr/bin/git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME $@
18 | }
19 | # delete conflicted files
20 | #dotfiles checkout 2>&1 | grep -E "\s+\." | awk {'print $1'} | xargs -I{} rm {}
21 | dotfiles checkout
22 | dotfiles config --local status.showUntrackedFiles no
23 | args:
24 | chdir: "{{ ansible_user_dir }}"
25 | executable: /usr/bin/bash
26 | creates: "{{ ansible_user_dir }}/.dotfiles/config"
27 |
28 | - name: Set ssh connection in .dotfiles/config
29 | ansible.builtin.lineinfile:
30 | path: "{{ ansible_user_dir }}/.dotfiles/config"
31 | regexp: '^\surl ='
32 | insertafter: '^\[remote'
33 | line: "\turl = {{ dotfiles_repo.ssh }}"
34 |
35 | - name: pacman -S openssh
36 | community.general.pacman: name=openssh state=present
37 | become: true
38 | when: ansible_distribution == "Archlinux"
39 |
--------------------------------------------------------------------------------
/roles/gui/tasks/flatpak.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S flatpak
3 | community.general.pacman: name=flatpak state=present
4 | become: true
5 | when: ansible_distribution == "Archlinux"
6 |
7 | - name: dnf install flatpak
8 | ansible.builtin.dnf: name=flatpak state=present
9 | become: true
10 | when: ansible_distribution == "Fedora"
11 |
12 | - name: Add flathub repo
13 | community.general.flatpak_remote:
14 | name: flathub
15 | state: present
16 | flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
17 | method: user
18 | ignore_errors: "{{ ansible_check_mode }}"
19 |
20 | - name: Install flatpak packages
21 | community.general.flatpak:
22 | name: "{{ flatpak_pkgs }}"
23 | state: present
24 | remote: flathub
25 | method: user
26 | ignore_errors: "{{ ansible_check_mode }}"
27 |
28 | - name: setup systemd user directory
29 | ansible.builtin.file:
30 | path: "/home/{{ ansible_user_id }}/.config/systemd/user"
31 | state: directory
32 | owner: "{{ ansible_user_id }}"
33 | group: "{{ ansible_user_id }}"
34 | mode: '0700'
35 |
36 | - name: flatpak-update.service
37 | ansible.builtin.copy:
38 | src: "{{ item }}"
39 | dest: "/home/{{ ansible_user_id }}/.config/systemd/user/{{ item }}"
40 | owner: "{{ ansible_user_id }}"
41 | group: "{{ ansible_user_id }}"
42 | mode: '0600'
43 | loop:
44 | - flatpak-update.service
45 | - flatpak-update.timer
46 | - failure-notification@.service
47 |
48 | - name: systemctl --user daemon-reload
49 | ansible.builtin.systemd: daemon_reload=true scope=user
50 |
51 | - name: systemctl enable --user flatpak-update.timer
52 | ansible.builtin.systemd: name=flatpak-update.timer enabled=true scope=user
53 | ignore_errors: "{{ ansible_check_mode }}"
54 |
55 |
--------------------------------------------------------------------------------
/roles/gui/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: "{{ ansible_distribution }}.yml"
3 |
4 |
--------------------------------------------------------------------------------
/roles/gui/tasks/paru.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # manually clone and install paru
3 |
4 | - name: Get file system type of /var/cache
5 | ansible.builtin.command: "stat --file-system --format=%T /var/cache"
6 | register: var_cache_fstype
7 | changed_when: false
8 |
9 | - name: Check /var/cache/paru directory
10 | ansible.builtin.stat:
11 | path: "/var/cache/paru"
12 | register: paru_cache_dir
13 |
14 | # Create /var/cache/paru subvolume so it won't be included in snapshots
15 | - name: Create /var/cache/paru subvolume (if using btrfs)
16 | ansible.builtin.command:
17 | cmd: "btrfs subvolume create paru"
18 | chdir: "/var/cache"
19 | become: true
20 | when: not paru_cache_dir.stat.exists
21 |
22 |
23 | - name: Create paru directory
24 | ansible.builtin.file:
25 | path: "{{ item }}"
26 | state: directory
27 | owner: "{{ ansible_user_id }}"
28 | group: "{{ ansible_user_id }}"
29 | mode: '0755'
30 | become: true
31 | loop:
32 | - "/var/lib/paru"
33 | - "/var/cache/paru"
34 | - "/var/lib/paru/repo"
35 | - "/var/cache/paru/clone"
36 | - "/var/cache/paru/chroot"
37 | - "/var/cache/paru/pkg"
38 |
39 | - name: Create empty aur.db repo file
40 | ansible.builtin.file:
41 | path: /var/lib/paru/repo/aur.db
42 | state: touch
43 | modification_time: preserve
44 | access_time: preserve
45 |
46 | - name: Edit CacheDir /etc/pacman.conf
47 | ansible.builtin.lineinfile:
48 | path: /etc/pacman.conf
49 | regexp: "{{ item.regexp }}"
50 | line: "{{ item.line }}"
51 | insertafter: "{{ item.insertafter }}"
52 | become: true
53 | loop:
54 | - { regexp: '^CacheDir\s*=\s*/var/cache/pacman/pkg/', insertafter: '^#\s*CacheDir', line: 'CacheDir = /var/cache/pacman/pkg/' }
55 | - { regexp: '^CacheDir\s*=\s*/var/cache/paru/pkg/', insertafter: '^CacheDir\s*=\s*/var/cache/pacman/pkg/', line: 'CacheDir = /var/cache/paru/pkg/' }
56 |
57 | - name: Add LocalRepo
58 | ansible.builtin.blockinfile:
59 | path: /etc/pacman.conf
60 | marker: "# {mark} ANSIBLE MANAGED BLOCK aur LocalRepo"
61 | block: |
62 | [aur]
63 | SigLevel = PackageOptional DatabaseOptional
64 | Server = file:///var/lib/paru/repo
65 | become: true
66 |
67 | - name: Edit /etc/paru.conf
68 | ansible.builtin.lineinfile:
69 | path: /etc/paru.conf
70 | regexp: "{{ item.regexp }}"
71 | line: "{{ item.line }}"
72 | insertafter: "{{ item.insertafter }}"
73 | loop:
74 | - { regexp: '^LocalRepo', insertafter: '^#\s*LocalRepo', line: LocalRepo = aur }
75 | - { regexp: '^Chroot', insertafter: '^#\s*Chroot', line: Chroot = /var/cache/paru/chroot }
76 | - { regexp: '^CloneDir', insertafter: '^Chroot', line: CloneDir = /var/cache/paru/clone }
77 | become: true
78 |
--------------------------------------------------------------------------------
/roles/gui/tasks/snapper.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check ~/.cache directory
3 | ansible.builtin.stat:
4 | path: "{{ ansible_user_dir }}/.cache"
5 | register: cache_dir
6 |
7 | - name: Check ~/Downloads/ directory
8 | ansible.builtin.stat:
9 | path: "{{ ansible_user_dir }}/Downloads"
10 | register: downloads_dir
11 |
12 | # Create ~/.cache subvolume so it won't be included in snapshots
13 | - name: Create ~/.cache subvolume (if using btrfs)
14 | ansible.builtin.command:
15 | cmd: "btrfs subvolume create .cache"
16 | chdir: "{{ ansible_user_dir }}"
17 | when: not cache_dir.stat.exists
18 |
19 | # Create ~/Downloads subvolume so it won't be included in snapshots
20 | - name: Create ~/Downloads subvolume (if using btrfs)
21 | ansible.builtin.command:
22 | cmd: "btrfs subvolume create Downloads"
23 | chdir: "{{ ansible_user_dir }}"
24 | when: not downloads_dir.stat.exists
25 |
26 | - name: check /etc/snapper/configs/home
27 | ansible.builtin.stat:
28 | path: /etc/snapper/configs/home
29 | register: snapper_home
30 | become: true
31 |
32 | - name: Create snapper for /home/{{ ansible_user_id }} (systemd-homed)
33 | ansible.builtin.shell: snapper -c home create-config /home/{{ ansible_user_id }}
34 | args:
35 | executable: /usr/bin/bash
36 | become: true
37 | when:
38 | - not snapper_home.stat.exists
39 | - homed
40 |
41 | - name: Create snapper for /home (non systemd-homed)
42 | ansible.builtin.shell: snapper -c home create-config /home
43 | args:
44 | executable: /usr/bin/bash
45 | become: true
46 | when:
47 | - not snapper_home.stat.exists
48 | - not homed
49 |
50 | - name: Edit /etc/snapper/configs/home
51 | ansible.builtin.lineinfile:
52 | path: /etc/snapper/configs/home
53 | regexp: "{{ item.regexp }}"
54 | line: "{{ item.line }}"
55 | loop:
56 | - { regexp: '^TIMELINE_CREATE=', line: 'TIMELINE_CREATE="yes"' }
57 | - { regexp: '^TIMELINE_CLEANUP=', line: 'TIMELINE_CLEANUP="yes"' }
58 | - { regexp: '^NUMBER_MIN_AGE=', line: 'NUMBER_MIN_AGE="1800"' }
59 | - { regexp: '^NUMBER_LIMIT=', line: 'NUMBER_LIMIT="10"' }
60 | - { regexp: '^NUMBER_LIMIT_IMPORTANT=', line: 'NUMBER_LIMIT_IMPORTANT="10"' }
61 | - { regexp: '^TIMELINE_MIN_AGE=', line: 'TIMELINE_MIN_AGE="1800"' }
62 | - { regexp: '^TIMELINE_LIMIT_HOURLY=', line: 'TIMELINE_LIMIT_HOURLY="5"' }
63 | - { regexp: '^TIMELINE_LIMIT_DAILY=', line: 'TIMELINE_LIMIT_DAILY="7"' }
64 | - { regexp: '^TIMELINE_LIMIT_WEEKLY=', line: 'TIMELINE_LIMIT_WEEKLY="0"' }
65 | - { regexp: '^TIMELINE_LIMIT_MONTHLY=', line: 'TIMELINE_LIMIT_MONTHLY="0"' }
66 | - { regexp: '^TIMELINE_LIMIT_YEARLY=', line: 'TIMELINE_LIMIT_YEARLY="0"' }
67 | become: true
68 |
--------------------------------------------------------------------------------
/roles/libvirt/README.md:
--------------------------------------------------------------------------------
1 | Set up [libvirt](https://wiki.archlinux.org/title/Libvirt) on Arch Linux.
2 |
3 | ## Tasks
4 | - Install libvirt and qemu packages.
5 | - Disable copy on write (COW) on `/var/lib/libvirt/images` directory if using btrfs.
6 | - Enable default NAT.
7 |
--------------------------------------------------------------------------------
/roles/libvirt/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | homed: false
3 |
--------------------------------------------------------------------------------
/roles/libvirt/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check directory /var/lib/libvirt/images
3 | ansible.builtin.stat:
4 | path: /var/lib/libvirt/images
5 | register: libvirt_images_dir
6 | become: true
7 |
8 | - name: Get file system type of /var/lib
9 | ansible.builtin.command: stat --file-system --format=%T /var/lib
10 | become: true
11 | register: var_lib_fstype
12 | changed_when: false
13 | check_mode: false
14 |
15 | - name: Create /var/lib/libvirt/images subvolume if not exist, and disable CoW
16 | ansible.builtin.shell: |
17 | mkdir -p /var/lib/libvirt
18 | btrfs subvolume create /var/lib/libvirt/images
19 | chattr +C /var/lib/libvirt/images
20 | become: true
21 | when:
22 | - not libvirt_images_dir.stat.exists
23 | - var_lib_fstype.stdout == 'btrfs'
24 |
25 | - name: Install libvirt packages
26 | community.general.pacman:
27 | name:
28 | # libvirt
29 | - qemu-desktop
30 | - libvirt
31 | - virt-manager
32 | # default NAT/DHCP networking support
33 | - iptables-nft
34 | - dnsmasq
35 | - dmidecode
36 | # bridged networking support
37 | - bridge-utils
38 | # UEFI support
39 | - edk2-ovmf
40 | state: present
41 | become: true
42 |
43 | - name: Add user to libvirt group (non systemd-homed)
44 | ansible.builtin.user:
45 | name: "{{ ansible_user_id }}"
46 | groups: libvirt
47 | append: true
48 | become: true
49 | when: not homed
50 |
51 | - name: Add user to libvirt group (systemd-homed)
52 | community.general.homectl:
53 | name: "{{ ansible_user_id }}"
54 | password: "{{ ansible_become_password }}"
55 | memberof: "{{ ansible_user_id }},wheel,libvirt"
56 | become: true
57 | when: homed
58 |
59 | - name: systemctl enable libvirtd.service
60 | ansible.builtin.systemd: name=libvirtd.service enabled=true
61 | register: libvirtd
62 | become: true
63 |
64 | - name: systemctl start libvirtd.service virtlogd.service
65 | ansible.builtin.systemd: name={{ item }} state=started
66 | when: libvirtd.changed
67 | become: true
68 | loop:
69 | - libvirtd.service
70 | - virtlogd.service
71 |
72 | - name: Check default NAT is enabled or not
73 | ansible.builtin.shell: virsh net-info --network default | grep '^Autostart:.*yes' | wc -l
74 | become: true
75 | register: libvirt_default_net
76 | changed_when: false
77 |
78 | - name: Enable and start default NAT
79 | ansible.builtin.shell: |
80 | virsh net-autostart default
81 |
82 | # reload firewall and start default network
83 | firewall-cmd --reload
84 | virsh net-start default
85 | become: true
86 | when: libvirt_default_net.stdout == '0'
87 |
88 |
--------------------------------------------------------------------------------
/roles/msmtp/README.md:
--------------------------------------------------------------------------------
1 | Set up [msmtp](https://wiki.archlinux.org/title/Msmtp) for sending email notification.
2 |
3 | ## Variables
4 | ### Arch Linux
5 | This will install the `msmtp` package and create `/root/.msmtprc` file with owner `root` permission `600`.
6 | The password will be stored as plain text so we only allow root user to read it.
7 | Since this is for automatically send email notification, putting encrypted password here is meaningless,
8 | because it will be decrypt automatically.
9 |
10 | ```yaml
11 | # account name
12 | msmtp_account: gmail
13 |
14 | # smtp server
15 | msmtp_host: smtp.gmail.com
16 |
17 | # smtp port
18 | msmtp_port: 465
19 |
20 | # Enable or disable TLS/SSL
21 | msmtp_tls: on
22 |
23 | # Enable or disable STARTTLS for TLS
24 | msmtp_tls_starttls: off
25 |
26 | # From email address
27 | msmtp_from: username@gmail.com
28 |
29 | # username and password
30 | # If you are using Gmail, set up an app password:
31 | # https://myaccount.google.com/apppasswords
32 | msmtp_user: username
33 | msmtp_password: !unsafe plain-text-password
34 |
35 | # To email address
36 | # Not in the /root/.msmtprc file
37 | msmtp_to: username@gmail.com
38 | ```
39 |
--------------------------------------------------------------------------------
/roles/msmtp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S msmtp
3 | community.general.pacman: name=msmtp state=present
4 | become: true
5 |
6 | - name: msmtp user config
7 | ansible.builtin.template:
8 | src: msmtprc.j2
9 | dest: /root/.msmtprc
10 | owner: root
11 | group: root
12 | mode: '0600'
13 | become: true
14 |
--------------------------------------------------------------------------------
/roles/msmtp/templates/msmtprc.j2:
--------------------------------------------------------------------------------
1 | # Set default values for all following accounts.
2 | defaults
3 | auth on
4 | tls_trust_file /etc/ssl/certs/ca-certificates.crt
5 | logfile /var/log/msmtp.log
6 |
7 | account {{ msmtp_account }}
8 | host {{ msmtp_host }}
9 | port {{ msmtp_port }}
10 | tls {{ msmtp_tls }}
11 | tls_starttls {{ msmtp_tls_starttls }}
12 | from {{ msmtp_from }}
13 | user {{ msmtp_user }}
14 | password {{ msmtp_password }}
15 |
16 |
--------------------------------------------------------------------------------
/roles/nas/README.md:
--------------------------------------------------------------------------------
1 | NAS and file server related tasks for Arch Linux.
2 |
3 | [`raid.yml`](tasks/raid.yml) and [`samba.yml`](tasks/samba.yml) should also work on fedora.
4 | Since fedora uses SELinux samba share can only access files with `samba_share_t` context.
5 | [`samba.yml`](tasks/samba.yml) will re-label those directories,
6 | but if the directory or its subdirectory is mounted to podman container its context will become `container_file_t` therefore samba won't have permission to access those directories.
7 |
8 |
9 | ## Tasks
10 | - RAID
11 | - Edit `/etc/crypttab` to decrypt hard drives on boot.
12 | - Edit `/etc/fstab` create mount points.
13 | - btrfs scrub
14 | - Enable `btrfs-scrub@.timer` to automatically scrub btrfs volumes.
15 | - Create [`btrfs_scrub_report.sh`](templates/btrfs_scrub_report.sh.j2) to send scrub result with an email.
16 | The email is configured with [`roles/msmtp`](/roles/msmtp/).
17 | - This task depends on [`roles/msmtp`](/roles/msmtp/).
18 | - [S.M.A.R.T.](https://wiki.archlinux.org/title/S.M.A.R.T.) status
19 | - Create self-test schedule.
20 | - Create [`smartd_notify.sh`](templates/smartd_notify.sh.j2) allow smartd send email warnings.
21 | The email is configured with [`roles/msmtp`](/roles/msmtp/).
22 | - This task depends on [`roles/msmtp`](/roles/msmtp/).
23 | - [NFS](https://wiki.archlinux.org/title/NFS) file server
24 | - Edit `/etc/fstab` and create bind mounts.
25 | - Edit `/etc/exports`.
26 | - Set up firewall rules for NFS.
27 | - [Samba](https://wiki.archlinux.org/title/Samba)
28 | - Edit `/etc/fstab` and create bind mounts.
29 | - Create `/etc/samba/smb.conf`.
30 | - Create samba user.
31 | - Set up samba firewall rules.
32 |
33 | ## Variables
34 | ### RAID
35 | Run [`raid.yml`](tasks/raid.yml) when `{{ crypttab_entries }}` or `{{ fstab_entries }}` is defined.
36 | ```yaml
37 | # decrypt disks (optional)
38 | # Skip if not defined
39 | crypttab_entries:
40 |
41 | # device mapper name, the decrypted volume will be /dev/mapper/cryptdisk0
42 | - device_mapper_name: cryptdisk0
43 |
44 | # here UUID are the UUID of luks volume /dev/sda1
45 | UUID: 0a659df5-5f33-4fc9-bd20-9f32bc945f19
46 |
47 | # path to decrypt keyfile. Using keyfile allow automatically decrypt drive.
48 | keyfile: /path/to/keyfile
49 |
50 | # another device mapper name, the decrypted volume will be /dev/mapper/cryptdisk1
51 | - device_mapper_name: cryptdisk1
52 |
53 | # here UUID are the UUID of luks volume /dev/sda2
54 | UUID: 3195bd48-c9c5-4523-98f5-f2b14ba481aa
55 |
56 | # path to decrypt keyfile
57 | keyfile: /path/to/keyfile
58 |
59 |
60 | # Add entries to /etc/fstab file (optional)
61 | # Skip if not defined
62 | fstab_entries:
63 |
64 | # here UUID are the uuid of decrypted volume /dev/mapper/cryptdisk0
65 | - device: UUID=f55c9ddb-e245-430a-a902-13f8dd688458
66 |
67 | # The mount point
68 | mount_point: /home/tux/data
69 |
70 | # filesystem type
71 | fs: btrfs
72 |
73 | # mount options
74 | mount_opts: "noatime,compress=zstd:3,space_cache=v2,autodefrag,subvol=@data,nodev,nosuid,noexec"
75 |
76 | # The owner, group and permission for the mount point /home/tux/data
77 | # The playbook will create the mount point with this permssion if not exist.
78 | owner: tux
79 | group: tux
80 | mode: '0700'
81 |
82 |
83 | # spindown timeout for the drive (optional)
84 | # Skip if not defined
85 | # The number will pass to hdparm -S
86 | # 242 will set timeout to be 60 min
87 | # https://wiki.archlinux.org/title/Hdparm#Power_management_configuration
88 | hdparm_spindown: 242
89 | ```
90 |
91 | ### btrfs scrub
92 | Set up btrfs scurb when `{{ btrfs_scrub_path }}` is defined.
93 | ```yaml
94 | # btrfs scrub paths. Use systemd-escape -p /path/to/mountpoint to get escape path
95 | btrfs_scrub_path:
96 | - { path: '/', escape: '-' }
97 | - { path: '/home/tux/data', escape: 'home-tux-data' }
98 |
99 | # Schedule btrfs scrub with systemd-timer format
100 | btrfs_scrub_time: 'Sun *-*-* 01:00:00'
101 | ```
102 |
103 | ### S.M.A.R.T.
104 | Set up S.M.A.R.T monitor when `{{ btrfs_scrub_path }}` is defined.
105 | ```yaml
106 | # schedule S.M.A.R.T. self-tests
107 | # https://wiki.archlinux.org/title/S.M.A.R.T.#Schedule_self-tests
108 | # smartd will also monitor all drives, and send email notification with information specified in roles/msmtp
109 | # The following example will schedule a short self-test every day at 00:00 to 01:00.
110 | smartd_time: '(S/../.././00)'
111 | ```
112 |
113 | ### NFS
114 | Set up NFS when `{{ nfs_mount_point }}` is defined.
115 | ```yaml
116 | # NFS server
117 | # The root directory for NFSv4
118 | nfs_root: /srv/nfs
119 |
120 | # NFS mount points
121 | nfs_mount_point:
122 |
123 | # The directory to be shared
124 | - target: /home/tux/data
125 |
126 | # Bind mount address of the target. See https://wiki.archlinux.org/title/NFS#Server
127 | bind: /srv/nfs/data
128 |
129 | # options for the mount point, same format as in /etc/exports
130 | ip_opt: '192.168.122.1(rw,sync,all_squash,anonuid=1000,anongid=1000)' }
131 |
132 | # (Optional) Firewall rule, only allow NFS connection from these IP address.
133 | nfs_accept_source_ipv4:
134 | - 192.168.122.1
135 | ```
136 |
137 | ### Samba
138 | Set up Samba when `{{ smb_share }}` is defined.
139 | ```yaml
140 | # Samba share in /etc/samba/smb.conf
141 | # Following example will create a samba share
142 | # [data]
143 | # comment = data
144 | # path = /srv/smb/data
145 | # valid users = smb_username
146 | # public = no
147 | # browseable = no
148 | # printable = no
149 | # read only = no
150 | # create mask = 0664
151 | # directory mask = 2755
152 | # force create mode = 0644
153 | # force directory mode = 2755
154 | # /srv/smb/data is a bind mount, point to /home/tux/data
155 | smb_share:
156 | - name: data
157 | comment: data
158 | path: /home/tux/data # no trailing slash at the end
159 | valid_users: smb_username
160 | read_only: 'no'
161 |
162 | # Samba user with UID and password
163 | smb_users:
164 | - name: smb_username
165 | passwd: !unsafe pa$sw0r6
166 | uid: 10001
167 |
168 | # (Optional) Firewall rule, only allow Samba connection from these IP address.
169 | samba_accept_source_ipv4:
170 | - 192.168.122.1
171 | ```
172 |
173 |
--------------------------------------------------------------------------------
/roles/nas/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #firewalld_default_zone: public
3 |
--------------------------------------------------------------------------------
/roles/nas/tasks/btrfs_scrub.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: btrfs_scrub_report.sh
3 | ansible.builtin.template:
4 | src: btrfs_scrub_report.sh.j2
5 | dest: /usr/local/bin/btrfs_scrub_report.sh
6 | owner: root
7 | group: root
8 | mode: '0700'
9 | become: true
10 |
11 | - name: Create btrfs-scrub@.service.d
12 | ansible.builtin.file:
13 | path: "/etc/systemd/system/btrfs-scrub@{{ item.escape }}.service.d"
14 | state: directory
15 | owner: root
16 | group: root
17 | mode: '0755'
18 | loop: "{{ btrfs_scrub_path }}"
19 | become: true
20 |
21 | - name: Create btrfs-scrub@.timer.d
22 | ansible.builtin.file:
23 | path: "/etc/systemd/system/btrfs-scrub@{{ item.escape }}.timer.d"
24 | state: directory
25 | owner: root
26 | group: root
27 | mode: '0755'
28 | loop: "{{ btrfs_scrub_path }}"
29 | become: true
30 |
31 | - name: Modify btrfs-scrub service to send email when finished
32 | ansible.builtin.template:
33 | src: btrfs_scrub_service_override.conf.j2
34 | dest: /etc/systemd/system/btrfs-scrub@{{ item.escape }}.service.d/override.conf
35 | owner: root
36 | group: root
37 | mode: '0644'
38 | loop: "{{ btrfs_scrub_path }}"
39 | become: true
40 |
41 | - name: Modify btrfs-scrub timer
42 | ansible.builtin.template:
43 | src: btrfs_scrub_timer_override.conf.j2
44 | dest: /etc/systemd/system/btrfs-scrub@{{ item.escape }}.timer.d/override.conf
45 | owner: root
46 | group: root
47 | mode: '0644'
48 | loop: "{{ btrfs_scrub_path }}"
49 | become: true
50 |
51 | - name: systemctl enable btrfs-scrub@.timer
52 | ansible.builtin.systemd: name=btrfs-scrub@{{ item.escape }}.timer enabled=true
53 | loop: "{{ btrfs_scrub_path }}"
54 | become: true
55 |
--------------------------------------------------------------------------------
/roles/nas/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "import_tasks: raid.yml"
3 | import_tasks: raid.yml
4 | when: crypttab_entries is defined or fstab_entries is defined
5 |
6 | - name: "import_tasks: btrfs_scrub.yml"
7 | import_tasks: btrfs_scrub.yml
8 | when: btrfs_scrub_path is defined
9 |
10 | - name: "import_tasks: smartd.yml"
11 | import_tasks: smartd.yml
12 | when: smartd_time is defined
13 |
14 | - name: "import_tasks: nfs.yml"
15 | import_tasks: nfs.yml
16 | when: nfs_mount_point is defined
17 |
18 | - name: "include_tasks: samba.yml"
19 | include_tasks: samba.yml
20 | when: smb_share is defined
21 |
22 |
--------------------------------------------------------------------------------
/roles/nas/tasks/nfs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S nfs-utils
3 | community.general.pacman: name=nfs-utils state=present
4 | become: true
5 |
6 | - name: Create NFS directories
7 | ansible.builtin.file:
8 | path: "{{ item.bind }}"
9 | state: directory
10 | loop: "{{ nfs_mount_point }}"
11 | become: true
12 |
13 | - name: Add bind mount to /etc/fstab
14 | ansible.builtin.lineinfile:
15 | path: /etc/fstab
16 | regexp: "^{{ item.target }}\\s+{{item.bind }}"
17 | line: "{{ item.target }} {{item.bind }} none bind 0 0"
18 | state: present
19 | loop: "{{ nfs_mount_point }}"
20 | become: true
21 |
22 | - name: add root mount to /etc/exports
23 | ansible.builtin.lineinfile:
24 | path: /etc/exports
25 | regexp: "^{{ nfs_root }}\\s+"
26 | line: "{{ nfs_root }} {{ nfs_root_ip_opt }}"
27 | state: present
28 | become: true
29 |
30 | - name: add other mount points to /etc/exports
31 | ansible.builtin.lineinfile:
32 | path: /etc/exports
33 | regexp: "^{{ item.bind }}\\s+"
34 | line: "{{ item.bind }} {{ item.ip_opt }}"
35 | state: present
36 | loop: "{{ nfs_mount_point }}"
37 | become: true
38 |
39 | #- name: Add custom NFS rule to UFW
40 | # ansible.builtin.blockinfile:
41 | # path: /etc/ufw/applications.d/ufw-custom
42 | # block: |
43 | # [NFS-custom]
44 | # title=NFS server
45 | # description=NFS server
46 | # ports=2049/tcp
47 | # create: true
48 | # marker: "; NFS {mark} ANSIBLE MANAGED BLOCK"
49 | #
50 | #- name: Configure firewall for NFS
51 | # community.general.ufw:
52 | # rule: allow
53 | # direction: in
54 | # name: NFS-custom
55 | # from: "{{ item }}"
56 | # comment: "Allow NFS from {{ item }}"
57 | # loop: "{{ nfs_allow_ip }}"
58 | #
59 | - name: Set firewall rules for NFS
60 | ansible.posix.firewalld:
61 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="nfs" accept
62 | #zone: "{{ firewalld_default_zone }}"
63 | permanent: true
64 | immediate: true
65 | state: enabled
66 | loop: "{{ nfs_accept_source_ipv4 }}"
67 | when: nfs_accept_source_ipv4 is defined
68 | become: true
69 |
70 | - name: systemctl enable nfsv4-server.service
71 | ansible.builtin.systemd: name=nfsv4-server.service enabled=true
72 | become: true
73 |
74 | - name: systemctl mask rpcbind.service rpcbind.socket nfs-server.service
75 | ansible.builtin.systemd: name={{ item }} enabled=false masked=true
76 | become: true
77 | loop:
78 | - rpcbind.service
79 | - rpcbind.socket
80 | - nfs-server.service
81 |
--------------------------------------------------------------------------------
/roles/nas/tasks/raid.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: dnf install cryptsetup
3 | ansible.builtin.dnf: name=cryptsetup state=present
4 | become: true
5 | when:
6 | - ansible_distribution == "Fedora"
7 | - crypttab_entries is defined
8 |
9 | - name: Create /etc/crypttab file
10 | ansible.builtin.file:
11 | path: /etc/crypttab
12 | state: touch
13 | owner: root
14 | group: root
15 | mode: '0644'
16 | modification_time: preserve
17 | access_time: preserve
18 | become: true
19 | when:
20 | - ansible_distribution == "Fedora"
21 | - crypttab_entries is defined
22 |
23 | - name: Edit /etc/crypttab
24 | ansible.builtin.lineinfile:
25 | path: /etc/crypttab
26 | regexp: "^{{ item.device_mapper_name }}"
27 | line: "{{ item.device_mapper_name}} UUID={{ item.UUID }} {{ item.keyfile }}"
28 | state: present
29 | when: crypttab_entries is defined
30 | loop: "{{ crypttab_entries }}"
31 | become: true
32 |
33 | - name: Create mount points
34 | ansible.builtin.file:
35 | path: "{{ item.mount_point }}"
36 | state: directory
37 | owner: "{{ item.owner }}"
38 | group: "{{ item.group }}"
39 | mode: "{{ item.mode }}"
40 | loop: "{{ fstab_entries }}"
41 | become: true
42 |
43 | - name: Edit /etc/fstab
44 | ansible.builtin.lineinfile:
45 | path: /etc/fstab
46 | regexp: "^{{ item.device }}\\s+{{ item.mount_point }}\\s+"
47 | line: "{{ item.device }} {{ item.mount_point }} {{ item.fs }} {{ item.mount_opts }} 0 0"
48 | state: present
49 | loop: "{{ fstab_entries }}"
50 | become: true
51 | register: fstab
52 |
53 | - name: remount
54 | ansible.builtin.shell: |
55 | systemctl daemon-reload
56 | mount --all
57 | become: true
58 | when: fstab.changed
59 |
60 | - name: Set mount points permissions
61 | ansible.builtin.file:
62 | path: "{{ item.mount_point }}"
63 | state: directory
64 | owner: "{{ item.owner }}"
65 | group: "{{ item.group }}"
66 | mode: "{{ item.mode }}"
67 | loop: "{{ fstab_entries }}"
68 | become: true
69 |
70 | - name: Set spindown timeout for disk
71 | ansible.builtin.copy:
72 | content: |
73 | ACTION=="add|change", KERNEL=="sd[a-z]", ATTRS{queue/rotational}=="1", RUN+="/usr/bin/hdparm -S {{ hdparm_spindown }} /dev/%k"
74 | dest: /etc/udev/rules.d/69-hdparm.rules
75 | owner: root
76 | group: root
77 | mode: '0644'
78 | become: true
79 | when: hdparm_spindown is defined
80 |
--------------------------------------------------------------------------------
/roles/nas/tasks/samba.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S samba
3 | community.general.pacman: name=samba state=present
4 | become: true
5 | when: ansible_distribution == "Archlinux"
6 |
7 | - name: dnf install samba
8 | ansible.builtin.dnf: name=samba state=present
9 | become: true
10 | when: ansible_distribution == "Fedora"
11 |
12 | - name: Create /etc/samba/smb.conf
13 | ansible.builtin.template:
14 | src: smb.conf.j2
15 | dest: /etc/samba/smb.conf
16 | validate: testparm -s %s
17 | become: true
18 |
19 | - name: Create samba share directories
20 | ansible.builtin.file:
21 | path: "/srv/smb/{{ item.name }}"
22 | state: directory
23 | become: true
24 | loop: "{{ smb_share }}"
25 |
26 | - name: Add bind mount to /etc/fstab
27 | ansible.builtin.lineinfile:
28 | path: /etc/fstab
29 | regexp: "^{{ item.path }}\\s+/srv/smb/{{item.name }}"
30 | line: "{{ item.path }} /srv/smb/{{item.name }} none bind 0 0"
31 | state: present
32 | become: true
33 | loop: "{{ smb_share }}"
34 |
35 | #- name: Add custom Samba rule to UFW
36 | # ansible.builtin.blockinfile:
37 | # path: /etc/ufw/applications.d/ufw-custom
38 | # block: |
39 | # [Samba-custom]
40 | # title=SMB/CIFS server
41 | # description=SMB/CIFS server
42 | # ports=445/tcp
43 | # create: true
44 | # marker: "; SMB/CIFS {mark} ANSIBLE MANAGED BLOCK"
45 | #
46 | #- name: Configure firewall for CIFS
47 | # community.general.ufw:
48 | # rule: allow
49 | # direction: in
50 | # name: Samba-custom
51 | # from: "{{ item }}"
52 | # comment: "Allow SMB/CIFS from {{ item }}"
53 | # loop: "{{ CIFS_allow_ip }}"
54 |
55 | - name: Create samba user
56 | ansible.builtin.user:
57 | name: "{{ item.name }}"
58 | password: '!'
59 | password_lock: true
60 | create_home: false
61 | uid: "{{ item.uid }}"
62 | shell: "/usr/bin/nologin"
63 | become: true
64 | when:
65 | - item.name != ansible_user_id
66 | - item.name != "root"
67 | loop: "{{ smb_users }}"
68 |
69 | - name: Add samba user
70 | shell: (echo {{ item.passwd }}; echo {{ item.passwd }}) | smbpasswd -s -a {{ item.name }}
71 | become: true
72 | register: samba_user
73 | changed_when: "'Added user' in samba_user.stdout"
74 | loop: "{{ smb_users }}"
75 |
76 | - name: Modify samba firewalld service file samba.xml
77 | ansible.builtin.copy:
78 | content: |
79 |
80 |
81 | Samba
82 | Modified samba server only accept 445/tcp port.
83 |
84 |
85 | dest: /etc/firewalld/services/samba.xml
86 | owner: root
87 | group: root
88 | mode: '0644'
89 | become: true
90 | register: samba_firewalld_file
91 |
92 | - name: Reload firewalld when samba.xml changed
93 | ansible.builtin.command: firewall-cmd --reload
94 | become: true
95 | when: samba_firewalld_file.changed
96 |
97 | - name: Set firewall rules for samba
98 | ansible.posix.firewalld:
99 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="samba" accept
100 | #zone: "{{ firewalld_default_zone }}"
101 | permanent: true
102 | immediate: true
103 | state: enabled
104 | become: true
105 | loop: "{{ samba_accept_source_ipv4 }}"
106 | when: samba_accept_source_ipv4 is defined
107 |
108 | - name: systemctl enable smb.service
109 | ansible.builtin.systemd: name=smb enabled=true
110 | become: true
111 |
112 | - name: dnf install policycoreutils-python-utils
113 | ansible.builtin.dnf: name=policycoreutils-python-utils state=present
114 | become: true
115 | when: ansible_distribution == "Fedora"
116 |
117 | - name: Set SELinux file context to shared directories
118 | community.general.sefcontext:
119 | target: "{{ item.path }}(/.*)?"
120 | setype: samba_share_t
121 | state: present
122 | become: true
123 | when: ansible_selinux.status == "enabled"
124 | loop: "{{ smb_share }}"
125 | register: samba_share_sefcontext
126 |
127 | - name: Apply SELinux file context to shared directories
128 | ansible.builtin.command: "restorecon -R {{ item.path }}"
129 | become: true
130 | when:
131 | - ansible_selinux.status == "enabled"
132 | - samba_share_sefcontext.changed
133 | loop: "{{ smb_share }}"
134 |
135 |
--------------------------------------------------------------------------------
/roles/nas/tasks/smartd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S smartmontools
3 | community.general.pacman: name=smartmontools state=present
4 | become: true
5 |
6 | - name: /etc/smartd.conf
7 | ansible.builtin.lineinfile:
8 | path: /etc/smartd.conf
9 | regexp: '^DEVICESCAN'
10 | line: "DEVICESCAN -m -M daily -M exec /usr/local/bin/smartd_notify.sh -s {{ smartd_time }}"
11 | state: present
12 | become: true
13 |
14 | - name: /usr/local/bin/smart_notify.sh
15 | ansible.builtin.template:
16 | src: smartd_notify.sh.j2
17 | dest: /usr/local/bin/smartd_notify.sh
18 | owner: root
19 | group: root
20 | mode: '0700'
21 | become: true
22 |
23 | - name: systemctl enable smartd.service
24 | ansible.builtin.systemd: name=smartd enabled=true
25 | become: true
26 |
27 |
--------------------------------------------------------------------------------
/roles/nas/templates/btrfs_scrub_report.sh.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | echo -e "To: {{ msmtp_to }}\nFrom: {{ msmtp_from }}\nSubject: btrfs scrub report\n\n$(btrfs scrub status $1)" | msmtp --read-recipients --read-envelope-from
4 |
--------------------------------------------------------------------------------
/roles/nas/templates/btrfs_scrub_service_override.conf.j2:
--------------------------------------------------------------------------------
1 | [Service]
2 | ExecStop=/usr/local/bin/btrfs_scrub_report.sh {{ item.path }}
3 |
--------------------------------------------------------------------------------
/roles/nas/templates/btrfs_scrub_timer_override.conf.j2:
--------------------------------------------------------------------------------
1 | [Timer]
2 | OnCalendar=
3 | OnCalendar={{ btrfs_scrub_time }}
4 | AccuracySec=1min
5 | RandomizedDelaySec=0
6 | Persistent=false
7 |
--------------------------------------------------------------------------------
/roles/nas/templates/smartd_notify.sh.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | echo -e "To: {{ msmtp_to }}\nFrom: {{ msmtp_from }}\nSubject: S.M.A.R.T Error ${SMARTD_FAILTYPE}\n\n${SMARTD_FULLMESSAGE}" | msmtp --read-recipients --read-envelope-from
4 |
--------------------------------------------------------------------------------
/roles/nas/templates/smb.conf.j2:
--------------------------------------------------------------------------------
1 | # This is the main Samba configuration file. You should read the
2 | # smb.conf(5) manual page in order to understand the options listed
3 | # here. Samba has a huge number of configurable options (perhaps too
4 | # many!) most of which are not shown in this example
5 | #
6 | # For a step to step guide on installing, configuring and using samba,
7 | # read the Samba-HOWTO-Collection. This may be obtained from:
8 | # http://www.samba.org/samba/docs/Samba-HOWTO-Collection.pdf
9 | #
10 | # Many working examples of smb.conf files can be found in the
11 | # Samba-Guide which is generated daily and can be downloaded from:
12 | # http://www.samba.org/samba/docs/Samba-Guide.pdf
13 | #
14 | # Any line which starts with a ; (semi-colon) or a # (hash)
15 | # is a comment and is ignored. In this example we will use a #
16 | # for commentry and a ; for parts of the config file that you
17 | # may wish to enable
18 | #
19 | # NOTE: Whenever you modify this file you should run the command "testparm"
20 | # to check that you have not made any basic syntactic errors.
21 | #
22 | #======================= Global Settings =====================================
23 | [global]
24 |
25 | # workgroup = NT-Domain-Name or Workgroup-Name, eg: MIDEARTH
26 | workgroup = SAMBAGROUP
27 |
28 | # server string is the equivalent of the NT Description field
29 | server string = Samba Server
30 |
31 | # Server role. Defines in which mode Samba will operate. Possible
32 | # values are "standalone server", "member server", "classic primary
33 | # domain controller", "classic backup domain controller", "active
34 | # directory domain controller".
35 | #
36 | # Most people will want "standalone server" or "member server".
37 | # Running as "active directory domain controller" will require first
38 | # running "samba-tool domain provision" to wipe databases and create a
39 | # new domain.
40 | server role = standalone server
41 |
42 | # This option is important for security. It allows you to restrict
43 | # connections to machines which are on your local network. The
44 | # following example restricts access to two C class networks and
45 | # the "loopback" interface. For more examples of the syntax see
46 | # the smb.conf man page
47 | ; hosts allow = 192.168.1. 192.168.2. 127.
48 |
49 | # Uncomment this if you want a guest account, you must add this to /etc/passwd
50 | # otherwise the user "nobody" is used
51 | ; guest account = pcguest
52 |
53 | # this tells Samba to use a separate log file for each machine
54 | # that connects
55 | ; log file = /usr/local/samba/var/log.%m
56 | logging = systemd
57 |
58 | # Put a capping on the size of the log files (in Kb).
59 | max log size = 50
60 |
61 | # Specifies the Kerberos or Active Directory realm the host is part of
62 | ; realm = MY_REALM
63 |
64 | # Backend to store user information in. New installations should
65 | # use either tdbsam or ldapsam. smbpasswd is available for backwards
66 | # compatibility. tdbsam requires no further configuration.
67 | ; passdb backend = tdbsam
68 |
69 | # Using the following line enables you to customise your configuration
70 | # on a per machine basis. The %m gets replaced with the netbios name
71 | # of the machine that is connecting.
72 | # Note: Consider carefully the location in the configuration file of
73 | # this line. The included file is read at that point.
74 | ; include = /usr/local/samba/lib/smb.conf.%m
75 |
76 | # Configure Samba to use multiple interfaces
77 | # If you have multiple network interfaces then you must list them
78 | # here. See the man page for details.
79 | ; interfaces = 192.168.12.2/24 192.168.13.2/24
80 |
81 | # Where to store roving profiles (only for Win95 and WinNT)
82 | # %L substitutes for this servers netbios name, %U is username
83 | # You must uncomment the [Profiles] share below
84 | ; logon path = \\%L\Profiles\%U
85 |
86 | # Windows Internet Name Serving Support Section:
87 | # WINS Support - Tells the NMBD component of Samba to enable it's WINS Server
88 | ; wins support = yes
89 |
90 | # WINS Server - Tells the NMBD components of Samba to be a WINS Client
91 | # Note: Samba can be either a WINS Server, or a WINS Client, but NOT both
92 | ; wins server = w.x.y.z
93 |
94 | # WINS Proxy - Tells Samba to answer name resolution queries on
95 | # behalf of a non WINS capable client, for this to work there must be
96 | # at least one WINS Server on the network. The default is NO.
97 | ; wins proxy = yes
98 |
99 | # DNS Proxy - tells Samba whether or not to try to resolve NetBIOS names
100 | # via DNS nslookups. The default is NO.
101 | dns proxy = no
102 |
103 | # These scripts are used on a domain controller or stand-alone
104 | # machine to add or delete corresponding unix accounts
105 | ; add user script = /usr/sbin/useradd %u
106 | ; add group script = /usr/sbin/groupadd %g
107 | ; add machine script = /usr/sbin/adduser -n -g machines -c Machine -d /dev/null -s /bin/false %u
108 | ; delete user script = /usr/sbin/userdel %u
109 | ; delete user from group script = /usr/sbin/deluser %u %g
110 | ; delete group script = /usr/sbin/groupdel %g
111 |
112 | # Disable printer sharing
113 | load printers = no
114 | printing = bsd
115 | printcap name = /dev/null
116 | disable spoolss = yes
117 | show add printer wizard = no
118 |
119 | # Restrict protocols for better security
120 | server min protocol = SMB3
121 |
122 | # Use native SMB transport encryption
123 | server smb encrypt = required
124 |
125 |
126 | #============================ Share Definitions ==============================
127 | ;[homes]
128 | ; comment = Home Directories
129 | ; browseable = no
130 | ; writable = yes
131 |
132 | # Un-comment the following and create the netlogon directory for Domain Logons
133 | ; [netlogon]
134 | ; comment = Network Logon Service
135 | ; path = /usr/local/samba/lib/netlogon
136 | ; guest ok = yes
137 | ; writable = no
138 | ; share modes = no
139 |
140 |
141 | # Un-comment the following to provide a specific roving profile share
142 | # the default is to use the user's home directory
143 | ;[Profiles]
144 | ; path = /usr/local/samba/profiles
145 | ; browseable = no
146 | ; guest ok = yes
147 |
148 |
149 | # NOTE: If you have a BSD-style print system there is no need to
150 | # specifically define each individual printer
151 | ;[printers]
152 | ; comment = All Printers
153 | ; path = /usr/spool/samba
154 | ; browseable = no
155 | # Set public = yes to allow user 'guest account' to print
156 | ; guest ok = no
157 | ; writable = no
158 | ; printable = yes
159 |
160 | # This one is useful for people to share files
161 | ;[tmp]
162 | ; comment = Temporary file space
163 | ; path = /tmp
164 | ; read only = no
165 | ; public = yes
166 |
167 | # A publicly accessible directory, but read only, except for people in
168 | # the "staff" group
169 | ;[public]
170 | ; comment = Public Stuff
171 | ; path = /home/samba
172 | ; public = yes
173 | ; writable = no
174 | ; printable = no
175 | ; write list = @staff
176 |
177 | # Other examples.
178 | #
179 | # A private printer, usable only by fred. Spool data will be placed in fred's
180 | # home directory. Note that fred must have write access to the spool directory,
181 | # wherever it is.
182 | ;[fredsprn]
183 | ; comment = Fred's Printer
184 | ; valid users = fred
185 | ; path = /homes/fred
186 | ; printer = freds_printer
187 | ; public = no
188 | ; writable = no
189 | ; printable = yes
190 |
191 | # A private directory, usable only by fred. Note that fred requires write
192 | # access to the directory.
193 | ;[fredsdir]
194 | ; comment = Fred's Service
195 | ; path = /usr/somewhere/private
196 | ; valid users = fred
197 | ; public = no
198 | ; writable = yes
199 | ; printable = no
200 |
201 | # a service which has a different directory for each machine that connects
202 | # this allows you to tailor configurations to incoming machines. You could
203 | # also use the %U option to tailor it by user name.
204 | # The %m gets replaced with the machine name that is connecting.
205 | ;[pchome]
206 | ; comment = PC Directories
207 | ; path = /usr/pc/%m
208 | ; public = no
209 | ; writable = yes
210 |
211 | # A publicly accessible directory, read/write to all users. Note that all files
212 | # created in the directory by users will be owned by the default user, so
213 | # any user with access can delete any other user's files. Obviously this
214 | # directory must be writable by the default user. Another user could of course
215 | # be specified, in which case all files would be owned by that user instead.
216 | ;[public]
217 | ; path = /usr/somewhere/else/public
218 | ; public = yes
219 | ; only guest = yes
220 | ; writable = yes
221 | ; printable = no
222 |
223 | # The following two entries demonstrate how to share a directory so that two
224 | # users can place files there that will be owned by the specific users. In this
225 | # setup, the directory should be writable by both users and should have the
226 | # sticky bit set on it to prevent abuse. Obviously this could be extended to
227 | # as many users as required.
228 | ;[myshare]
229 | ; comment = Mary's and Fred's stuff
230 | ; path = /usr/somewhere/shared
231 | ; valid users = mary fred
232 | ; public = no
233 | ; writable = yes
234 | ; printable = no
235 | ; create mask = 0765
236 |
237 | {% for item in smb_share %}
238 | [{{ item.name }}]
239 | comment = {{ item.comment }}
240 | path = /srv/smb/{{ item.name }}
241 | valid users = {{ item.valid_users }}
242 | public = no
243 | browseable = no
244 | printable = no
245 | read only = {{ item.read_only }}
246 | {% if item.read_only == 'no' %}
247 | create mask = 0664
248 | directory mask = 2755
249 | force create mode = 0644
250 | force directory mode = 2755
251 | {% endif %}
252 | {% endfor %}
253 |
254 |
--------------------------------------------------------------------------------
/roles/nut/README.md:
--------------------------------------------------------------------------------
1 | Set up [Network UPS Tools](https://wiki.archlinux.org/title/Network_UPS_Tools) (NUT) and configure it to send email notification on Arch Linux.
2 |
3 | This role depends on [`roles/msmtp`](/roles/msmtp/).
4 |
5 | ## Tasks
6 | - Install `nut`.
7 | - Edit `/etc/nut/ups.conf`, `/etc/nut/upsd.users`, `/etc/nut/upsmon.conf`
8 | - Create `/etc/nut/msmtprc` for sending email, use the variables from [`roles/msmtp`](/roles/msmtp).
9 | Because NUT runs as user `nut` who won't be able to read `/root/.msmtprc` file, create a same file at `/etc/mut/msmtprc` for `nut` user to read.
10 | - Copy [`nut_notify.sh`](templates/nut_notify.sh.j2), which will be executed to send email notification.
11 | - Enable various systemd services.
12 |
13 | ## Variables
14 | ```yaml
15 | # UPS password in the /etc/nut/upsd.conf
16 | # https://wiki.archlinux.org/title/Network_UPS_Tools#upsd_configuration
17 | ups_password: !unsafe 1234546
18 |
19 |
20 | # Optional variable to fix _Can't claim USB device error_
21 | # https://wiki.archlinux.org/title/Network_UPS_Tools#Can't_claim_USB_device_error
22 | # These are the USB device manufacturer and product IDs.
23 | # You can get these IDs [XXXX:YYYY] by `lsusb` command.
24 | #ups_vender_id: XXXX
25 | #ups_product_id: YYYY
26 | ```
27 |
28 |
--------------------------------------------------------------------------------
/roles/nut/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S nut
3 | community.general.pacman: name=nut state=present
4 | become: true
5 |
6 | - name: /etc/nut/ups.conf
7 | ansible.builtin.blockinfile:
8 | path: /etc/nut/ups.conf
9 | block: |
10 | [myups]
11 | driver = usbhid-ups
12 | port = auto
13 | state: present
14 | become: true
15 |
16 | - name: Change UPS device permission
17 | ansible.builtin.template:
18 | src: 50-ups.rules.j2
19 | dest: /etc/udev/rules.d/50-ups.rules
20 | owner: root
21 | group: root
22 | mode: '0644'
23 | become: true
24 | when: ups_product_id is defined
25 |
26 | - name: /etc/nut/upsd.users
27 | ansible.builtin.blockinfile:
28 | path: /etc/nut/upsd.users
29 | block: |
30 | [admin]
31 | password = {{ ups_password }}
32 | upsmon primary
33 | actions = SET
34 | instcmds = ALL
35 | state: present
36 | become: true
37 |
38 | - name: /etc/nut/upsmon.conf
39 | ansible.builtin.blockinfile:
40 | path: /etc/nut/upsmon.conf
41 | block: |
42 | MONITOR myups@localhost 1 admin {{ ups_password }} primary
43 | NOTIFYCMD /etc/nut/nut_notify.sh
44 | NOTIFYFLAG ONLINE SYSLOG+EXEC
45 | NOTIFYFLAG ONBATT SYSLOG+EXEC
46 | NOTIFYFLAG LOWBATT SYSLOG+EXEC
47 | NOTIFYFLAG FSD SYSLOG+EXEC
48 | NOTIFYFLAG SHUTDOWN SYSLOG+EXEC
49 | NOTIFYFLAG REPLBATT SYSLOG+EXEC
50 | NOTIFYFLAG NOCOMM SYSLOG+EXEC
51 | state: present
52 | create: true
53 | become: true
54 |
55 | - name: msmtp config
56 | ansible.builtin.template:
57 | src: msmtprc.j2
58 | dest: /etc/nut/msmtprc
59 | owner: nut
60 | group: nut
61 | mode: '0600'
62 | become: true
63 |
64 | - name: /etc/nut/nut_notify.sh
65 | ansible.builtin.template:
66 | src: nut_notify.sh.j2
67 | dest: /etc/nut/nut_notify.sh
68 | owner: nut
69 | group: nut
70 | mode: '0700'
71 | become: true
72 |
73 | - name: systemctl enable nut-driver-enumerator.service nut-server.service nut-monitor.service nut.target nut-driver.target
74 | ansible.builtin.systemd: name={{ item }} enabled=true
75 | loop:
76 | - nut-driver-enumerator.service
77 | - nut-server.service
78 | - nut-monitor.service
79 | - nut.target
80 | - nut-driver.target
81 | become: true
82 |
--------------------------------------------------------------------------------
/roles/nut/templates/50-ups.rules.j2:
--------------------------------------------------------------------------------
1 | SUBSYSTEM=="usb", ATTR{idVendor}=="{{ ups_vender_id }}", ATTR{idProduct}=="{{ ups_product_id }}", GROUP="nut"
2 |
--------------------------------------------------------------------------------
/roles/nut/templates/msmtprc.j2:
--------------------------------------------------------------------------------
1 | # Set default values for all following accounts.
2 | defaults
3 | auth on
4 | tls_trust_file /etc/ssl/certs/ca-certificates.crt
5 | logfile /etc/nut/msmtp.log
6 |
7 | account {{ msmtp_account }}
8 | host {{ msmtp_host }}
9 | port {{ msmtp_port }}
10 | tls {{ msmtp_tls }}
11 | tls_starttls {{ msmtp_tls_starttls }}
12 | from {{ msmtp_from }}
13 | user {{ msmtp_user }}
14 | password {{ msmtp_password }}
15 |
16 |
--------------------------------------------------------------------------------
/roles/nut/templates/nut_notify.sh.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | echo -e "To: {{ msmtp_to }}\nFrom: {{ msmtp_from }}\nSubject: NUT notification\n\n$1" | msmtp --file='/etc/nut/msmtprc' --read-recipients --read-envelope-from
4 |
5 |
--------------------------------------------------------------------------------
/roles/openssh/README.md:
--------------------------------------------------------------------------------
1 | Hardening OpenSSH server
2 |
3 | ## Tasks
4 | - Force public key authentication disable password login.
5 | - Optionally, limit allowed login user.
6 | - Optionally, set up firewall rule.
7 |
8 | ## Variables
9 | ```yaml
10 | # Limit login users if defined
11 | # AllowUsers in /etc/ssh/sshd_config
12 | #ssh_allowusers: 'user1 user2 user3'
13 |
14 |
15 | # Set hostkey
16 | # HostKey in /etc/ssh/sshd_config
17 | #ssh_hostkey: ed25519
18 |
19 |
20 | # Only allow ssh connection from these ip address
21 | #ssh_accept_source_ipv4:
22 | # - 192.168.122.0/24
23 | # - 192.168.123.1
24 | ```
25 |
26 |
--------------------------------------------------------------------------------
/roles/openssh/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | homed: false
3 |
--------------------------------------------------------------------------------
/roles/openssh/tasks/homed.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: |
3 | Force both password and public key authentication for systemd-homed setup. While the user is unlocked run
4 | homectl update username --ssh-authorized-keys=@/path/to/mounted/home/.ssh/authorized_keys
5 | to enroll keys.
6 | ansible.builtin.lineinfile:
7 | path: /etc/ssh/sshd_config
8 | regexp: "{{ item.regexp }}"
9 | line: "{{ item.line }}"
10 | insertafter: "{{ item.insertafter }}"
11 | validate: /usr/sbin/sshd -T -f %s
12 | loop:
13 | - { regexp: '^PasswordAuthentication ', line: PasswordAuthentication yes, insertafter: '#\s*PasswordAuthentication ' }
14 | - { regexp: '^PubkeyAuthentication ', line: PubkeyAuthentication yes, insertafter: '#\s*PubkeyAuthentication ' }
15 | - { regexp: '^AuthenticationMethods ', line: 'AuthenticationMethods publickey,password', insertafter: '^PasswordAuthentication ' }
16 | - { regexp: '^AuthorizedKeysCommandUser ', line: 'AuthorizedKeysCommandUser root', insertafter: '^AuthorizedKeysCommandUser ' }
17 | - { regexp: '^AuthorizedKeysCommand ', line: 'AuthorizedKeysCommand /usr/bin/userdbctl ssh-authorized-keys %u', insertafter: '^AuthorizedKeysCommand ' }
18 | become: true
19 |
20 | - name: Check ~/.ssh/authorized_keys exists or not
21 | ansible.builtin.stat:
22 | path: "{{ ansible_user_dir }}/.ssh/authorized_keys"
23 | register: ssh_authorized_keys_file
24 |
25 | - name: Get all public keys in ~/.ssh/authorized_keys
26 | ansible.builtin.shell: "cat {{ ansible_user_dir }}/.ssh/authorized_keys"
27 | changed_when: false
28 | register: ssh_authorized_keys
29 | when: ssh_authorized_keys_file.stat.exists
30 |
31 | - name: Enroll authorized public keys if file exist with homectl
32 | community.general.homectl:
33 | name: "{{ ansible_user_id }}"
34 | password: "{{ ansible_become_password }}"
35 | sshkeys: "{{ ssh_authorized_keys.stdout }}"
36 | become: true
37 | when: ssh_authorized_keys_file.stat.exists
38 |
--------------------------------------------------------------------------------
/roles/openssh/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Allow access only for some users
3 | ansible.builtin.lineinfile:
4 | path: /etc/ssh/sshd_config
5 | regexp: '^AllowUsers'
6 | line: AllowUsers {{ ssh_allowusers }}
7 | insertafter: '#\s*AllowUsers'
8 | validate: /usr/sbin/sshd -T -f %s
9 | become: true
10 | when: ssh_allowusers is defined
11 |
12 | - name: Set host key
13 | ansible.builtin.lineinfile:
14 | path: /etc/ssh/sshd_config
15 | regexp: '^HostKey'
16 | line: "HostKey /etc/ssh/ssh_host_{{ ssh_hostkey }}_key"
17 | insertafter: '#\s*HostKey'
18 | validate: /usr/sbin/sshd -T -f %s
19 | become: true
20 | when: ssh_hostkey is defined
21 |
22 | #- name: Set ssh port
23 | # ansible.builtin.lineinfile:
24 | # path: /etc/ssh/sshd_config
25 | # regexp: '^Port'
26 | # line: "Port {{ ansible_port }}"
27 | # insertafter: '#\s*Port'
28 | # validate: /usr/sbin/sshd -T -f %s
29 | # become: true
30 |
31 | - name: Force public key authentication
32 | ansible.builtin.lineinfile:
33 | path: /etc/ssh/sshd_config
34 | regexp: "{{ item.regexp }}"
35 | line: "{{ item.line }}"
36 | insertafter: "{{ item.insertafter }}"
37 | validate: /usr/sbin/sshd -T -f %s
38 | loop:
39 | - { regexp: '^PasswordAuthentication ', line: PasswordAuthentication no, insertafter: '^#\s*PasswordAuthentication ' }
40 | - { regexp: '^KbdInteractiveAuthentication ', line: KbdInteractiveAuthentication no, insertafter: '^#\s*KbdInteractiveAuthentication ' }
41 | - { regexp: '^AuthenticationMethods ', line: AuthenticationMethods publickey, insertafter: '^PasswordAuthentication ' }
42 | become: true
43 | when: not homed
44 |
45 | #- name: Add custom ssh rule to UFW
46 | # ansible.builtin.blockinfile:
47 | # path: /etc/ufw/applications.d/ufw-custom
48 | # block: |
49 | # [SSH-custom]
50 | # title=SSH server
51 | # description=SSH server
52 | # ports={{ ansible_port }}/tcp
53 | # create: true
54 | # marker: "; SSH {mark} ANSIBLE MANAGED BLOCK"
55 |
56 | - name: Firewall rule for ssh
57 | ansible.posix.firewalld:
58 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="ssh" accept
59 | permanent: true
60 | immediate: true
61 | state: enabled
62 | loop: "{{ ssh_accept_source_ipv4 }}"
63 | become: true
64 | when: ssh_accept_source_ipv4 is defined
65 |
66 |
67 | - include_tasks: "homed.yml"
68 | when: homed
69 |
--------------------------------------------------------------------------------
/roles/podman/README.md:
--------------------------------------------------------------------------------
1 | Install [podman](https://wiki.archlinux.org/title/Podman) and set up rootless containers with [Quadlet](https://wiki.archlinux.org/title/Podman#Quadlet).
2 | Since containers are running as non-root user, we can run different container under different user to further isolation.
3 |
4 | This role should works on Arch Linux and Fedora.
5 |
6 | ## Tasks
7 | - Install `podman` and `aardvark-dns` packages.
8 | - [Enable lingering](https://wiki.archlinux.org/title/Systemd/User#Automatic_start-up_of_systemd_user_instances) for each user with `{{ enable_lingering }}` set to `true`.
9 | - Create [`podman-system-prune.service`](templates/podman-system-prune.service.j2) and [`podman-system-prune.timer`](templates/podman-system-prune.timer.j2) to automatically cleanup old images and containers.
10 | - Enable `podman-auto-update.timer` to auto-update containers.
11 |
12 |
13 | ## Variables and examples
14 |
15 | ### Syncthing
16 | ```yaml
17 | # Time zone, used in LinuxServer.io images
18 | TZ: "US/Eastern"
19 |
20 |
21 | # Run podman under these users.
22 | # If the user does not exist it will create a new user.
23 | # Here containers are spread under different users as an example, you could
24 | # group them under few users.
25 | # To manage systemd services under different users add `-M username@` to `systemctl` command,
26 | # for example:
27 | # sudo systemctl --user -M user1@ status xxxx.service
28 | # To view journal under different user with UID 1001
29 | # sudo journalctl _UID=1001 _SYSTEMD_USER_UNIT=xxxx.service
30 | podman_users:
31 |
32 | # Run Syncthing under user `tux`
33 | - name: tux
34 |
35 | # UID of the user
36 | uid: 10000
37 |
38 | # Enable lingering or not
39 | enable_lingering: true
40 |
41 | # How often to clean up old podman images/containers.
42 | # This is the OnCalendar= option in podman-system-prune.timer
43 | podman_system_prune_timer: daily
44 |
45 | # List of containers that will run under user `tux`
46 | containers:
47 | - syncthing
48 |
49 | # Path to store syncthing container config
50 | syncthing_config_dir: "/path/to/container/config/syncthing"
51 |
52 | # List of directories to map into syncthing container
53 | syncthing_data_dirs:
54 | - { src: /path/on/host/machine, dest: /path/in/container }
55 | - { src: /another/path/on/host, dest: /another/path/in/container }
56 | ```
57 |
58 | ### Linux ISOs
59 | ```yaml
60 | # Time zone, used in LinuxServer.io images
61 | TZ: "US/Eastern"
62 |
63 | podman_users:
64 | - name: tux1
65 | uid: 10001
66 | enable_lingering: true
67 | podman_system_prune_timer: daily
68 |
69 | containers:
70 | - gluetun
71 | - transmission
72 | - qbittorrent
73 |
74 | # gluetun VPN provider env variables, here is a vanilla wireguard example.
75 | # see https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers
76 | # and https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/port-forwarding.md
77 | gluetun_vpn_provider_env:
78 | - VPN_SERVICE_PROVIDER=custom
79 | - VPN_TYPE=wireguard
80 | - VPN_ENDPOINT_IP='1.2.3.4'
81 | - VPN_ENDPOINT_PORT=51820
82 | - WIREGUARD_PUBLIC_KEY='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
83 | - WIREGUARD_PRIVATE_KEY='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
84 | - WIREGUARD_ADDRESSES='ipv4/mask,ipv6/mask'
85 | - VPN_PORT_FORWARDING_LISTENING_PORT='1234'
86 |
87 | # Optionally, enable gluetun http proxy with default HTTPPROXY_LISTENING_ADDRESS (8888)
88 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
89 | #gluetun_httpproxy: true
90 |
91 | # Path to store transmission config
92 | transmission_config_dir: "/path/to/container/config/transmission"
93 |
94 | # Path to transmission download directory
95 | transmission_downloads_dir: "/path/to/transmission/download/dir"
96 |
97 | # Optionally, specify web UI port (default 9091)
98 | #transmission_web_port: 9091
99 |
100 | # Optionally, transmisison watch directory
101 | #transmission_watch_dir: "/path/to/transmission/watch/dir"
102 |
103 | # Optionally, add auth to transmission web UI
104 | #transmission_user: tux
105 | #transmission_pass: !unsafe mypassword
106 |
107 | # Path to store qbittorrent config
108 | qbittorrent_config_dir: "/path/to/container/config/qbittorrent"
109 |
110 | # Path to qbittorrent download directory
111 | qbittorrent_downloads_dir: "/path/to/qbittorrent/download/dir"
112 |
113 | # Optionally, specify web UI port (default 8090)
114 | #qbittorrent_web_port: 8090
115 | ```
116 |
117 |
118 | ### Nextcloud AIO, traefik2 reverse proxy and Letsencrypt running as different users
119 |
120 | ```yaml
121 | podman_users:
122 |
123 | # Generate letsencrypt certificates under user `tux2` with cloudflare DNS challenge.
124 | # This way other user can't access your DNS token.
125 | - name: tux2
126 | uid: 10002
127 | enable_lingering: true
128 | podman_system_prune_timer: daily
129 |
130 | containers:
131 | - letsencrypt
132 |
133 | # Path to store letsencrypt container config
134 | letsencrypt_config_dir: "/path/to/container/config/letsencrypt"
135 | # Also create `/path/to/container/config/letsencrypt/cloudlfare.ini` that
136 | # contains single line:
137 | # dns_cloudflare_api_token = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
138 |
139 | # Email address for letsencrypt expiration notification
140 | letsencrypt_email: "email@domain.com"
141 |
142 | # Domains contained in letsencrypt certification
143 | letsencrypt_domains:
144 | - '*.mydomain.example'
145 |
146 |
147 | # Reverse proxy runs under user `tux3`
148 | - name: tux3
149 | uid: 10003
150 | enable_lingering: true
151 | podman_system_prune_timer: daily
152 |
153 | containers:
154 | - traefik
155 |
156 | # Path to store traefik container config
157 | traefik_config_dir: "/path/to/container/config/traefik"
158 |
159 | # traefik static config file
160 | # see example at the end
161 | traefik_static_config: "files/traefik_static_config.yml"
162 |
163 | # traefik dynamic config file
164 | # see example at the end
165 | traefik_dynamic_config: "files/traefik_dynamic_config.yml"
166 |
167 | # Path to store letsencrypt container config
168 | # If letsencrypt and traefik running as different users, traefik won't be
169 | # able to access letsencrypt certificates, with advantage being traefik
170 | # also won't have access to your DNS token.
171 | # To accommodate this we create a copy-ssl.service running as root and
172 | # only copy generated certificates to `{{ traefik_config_dir }}`.
173 | letsencrypt_config_dir: "/path/to/container/config/letsencrypt"
174 |
175 | # firewall rules only allow connection from these ipv4 address
176 | https_accept_source_ipv4:
177 | - 192.168.1.0/24
178 | - 192.168.2.1
179 |
180 |
181 | # Nextcloud AIO runs under user `tux4`
182 | - name: tux4
183 | uid: 10004
184 | enable_lingering: true
185 | podman_system_prune_timer: daily
186 |
187 | containers:
188 | - nextcloud
189 |
190 | # The Nextcloud AIO web admin port
191 | nextcloud_aio_port: 11001
192 |
193 | # Some optional environment variables pass to nextcloud-aio-mastercontainer
194 | # https://github.com/nextcloud/all-in-one/blob/main/compose.yaml
195 |
196 | # SKIP_DOMAIN_VALIDATION
197 | nextcloud_skip_domain_validation: true
198 |
199 | # BORG_RETENTION_POLICY
200 | nextcloud_backup_retention: "--keep-within=7d --keep-weekly=4 --keep-monthly=0"
201 |
202 | # NEXTCLOUD_MEMORY_LIMIT
203 | nextcloud_memory_limit: 1024M
204 | ```
205 |
206 |
207 | Traefik static configuration file example
208 | ```yaml
209 | providers:
210 | file:
211 | # Don't change this path.
212 | # The dynamic configuration file specified in Ansible will be copied and
213 | # mapped to `/etc/traefik/dynamic_conf.yml` inside the container
214 | filename: "/etc/traefik/dynamic_conf.yml"
215 |
216 | # redirect http to https
217 | entryPoints:
218 | http:
219 | address: :80
220 | http:
221 | redirections:
222 | entryPoint:
223 | to: https
224 | scheme: https
225 |
226 | https:
227 | address: :443
228 |
229 | # Disable SSL verification between traefik and backends
230 | serversTransport:
231 | insecureSkipVerify: true
232 | ```
233 |
234 | Traefik dynamic configuration file example
235 | ```yaml
236 | http:
237 |
238 | routers:
239 | nextcloud:
240 | entryPoints:
241 | - https
242 | rule: "Host(`nextcloud.mydomain.example`)"
243 | service: nextcloud
244 | middlewares:
245 | - secureHeader
246 | - nextcloud-redirectregex
247 | tls:
248 | options: default
249 | domains:
250 | - main: "mydomain.example"
251 | sans:
252 | - "*.mydomain.example"
253 |
254 | services:
255 | nextcloud:
256 | loadBalancer:
257 | passHostHeader: true
258 | servers:
259 | - url: "http://10.0.2.2:11000"
260 |
261 | middlewares:
262 | secureHeader:
263 | headers:
264 | stsSeconds: 15552000
265 | stsIncludeSubdomains: true
266 | forceSTSHeader: true
267 | customFrameOptionsValue: "SAMEORIGIN"
268 | contentTypeNosniff: true
269 | browserXssFilter: true
270 | referrerPolicy: "strict-origin"
271 | customResponseHeaders:
272 | X-Robots-Tag: "noindex,nofollow,nosnippet,noarchive,notranslate,noimageindex"
273 |
274 | nextcloud-redirectregex:
275 | redirectRegex:
276 | permanent: true
277 | regex: "https://(.*)/.well-known/(?:card|cal)dav"
278 | replacement: "https://${1}/remote.php/dav"
279 |
280 |
281 | tls:
282 | certificates:
283 | - certFile: /etc/traefik/ssl/mydomain.example/fullchain.pem
284 | keyFile: /etc/traefik/ssl/mydomain.example/privkey.pem
285 | stores:
286 | - default
287 | stores:
288 | default:
289 | defaultCertificate:
290 | certFile: /etc/traefik/ssl/mydomain.example/fullchain.pem
291 | keyFile: /etc/traefik/ssl/mydomain.example/privkey.pem
292 | options:
293 | default:
294 | minVersion: VersionTLS13
295 | sniStrict: true
296 | ```
297 |
--------------------------------------------------------------------------------
/roles/podman/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | homed: false
3 | #firewalld_default_zone: public
4 |
--------------------------------------------------------------------------------
/roles/podman/legacy/deluge.service.j2:
--------------------------------------------------------------------------------
1 | # deluge.service
2 |
3 | [Unit]
4 | Description=Podman deluge.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target gluetun.service
7 | After=network-online.target gluetun.service
8 | Requires=gluetun.service
9 | RequiresMountsFor=%t/containers
10 |
11 | [Service]
12 | Environment=PODMAN_SYSTEMD_UNIT=%n
13 | Restart=on-failure
14 | RestartSec=5
15 | TimeoutStopSec=70
16 | ExecStartPre=/bin/rm \
17 | -f %t/%n.ctr-id
18 | ExecStart=/usr/bin/podman run \
19 | --cidfile=%t/%n.ctr-id \
20 | --cgroups=no-conmon \
21 | --rm \
22 | --sdnotify=conmon \
23 | --detach \
24 | --replace \
25 | --label io.containers.autoupdate=registry \
26 | --uidmap 1000:0:1 \
27 | --uidmap 0:1:1000 \
28 | --uidmap 1001:1001:64536 \
29 | --name=deluge \
30 | --network=container:gluetun \
31 | --env PUID=1000 \
32 | --env PGID=1000 \
33 | --env TZ={{ TZ }} \
34 | --volume {{ podman_user.deluge_config_dir }}:/config:Z \
35 | --volume {{ podman_user.deluge_downloads_dir }}:/downloads:Z \
36 | lscr.io/linuxserver/deluge:latest
37 | ExecStop=/usr/bin/podman stop \
38 | --ignore \
39 | --time=10 \
40 | --cidfile=%t/%n.ctr-id
41 | ExecStopPost=/usr/bin/podman rm \
42 | --force \
43 | --ignore \
44 | --time=10 \
45 | --cidfile=%t/%n.ctr-id
46 | Type=notify
47 | NotifyAccess=all
48 |
49 | [Install]
50 | WantedBy=default.target
51 |
--------------------------------------------------------------------------------
/roles/podman/legacy/deluge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: deluge container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.deluge_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: deluge.service
12 | ansible.builtin.template:
13 | src: deluge.service.j2
14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/deluge.service"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now deluge.service
27 | ansible.builtin.systemd: name=deluge.service enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 |
--------------------------------------------------------------------------------
/roles/podman/legacy/homeassistant.service.j2:
--------------------------------------------------------------------------------
1 | # homeassistant.service
2 |
3 | [Unit]
4 | Description=Podman homeassistant.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStartPre=/bin/rm \
15 | -f %t/%n.ctr-id
16 | #ExecStart=/usr/bin/podman run \
17 | # --cidfile=%t/%n.ctr-id \
18 | # --cgroups=no-conmon \
19 | # --rm \
20 | # --sdnotify=conmon \
21 | # --detach \
22 | # --replace \
23 | # --label io.containers.autoupdate=registry \
24 | # --uidmap 1000:0:1 \
25 | # --uidmap 0:1:1000 \
26 | # --uidmap 1001:1001:64536 \
27 | # --env PUID=1000 \
28 | # --env PGID=1000 \
29 | # --env TZ={{ TZ }} \
30 | # --name=homeassistant \
31 | # --hostname=homeassistant \
32 | # --publish 8123:8123/tcp \
33 | # --volume {{ podman_user.homeassistant_config_dir }}:/config:Z \
34 | # lscr.io/linuxserver/homeassistant:latest
35 | ExecStart=/usr/bin/podman run \
36 | --cidfile=%t/%n.ctr-id \
37 | --cgroups=no-conmon \
38 | --rm \
39 | --sdnotify=conmon \
40 | --detach \
41 | --replace \
42 | --label io.containers.autoupdate=registry \
43 | --userns keep-id:uid=1000,gid=1000 \
44 | --user 1000:1000 \
45 | --name=homeassistant \
46 | --hostname=homeassistant \
47 | --publish 8123:8123/tcp \
48 | --env TZ={{ TZ }} \
49 | --volume {{ podman_user.homeassistant_config_dir }}:/config:Z \
50 | ghcr.io/home-assistant/home-assistant:stable
51 | ExecStop=/usr/bin/podman stop \
52 | --ignore \
53 | --time=10 \
54 | --cidfile=%t/%n.ctr-id
55 | ExecStopPost=/usr/bin/podman rm \
56 | --force \
57 | --ignore \
58 | --time=10 \
59 | --cidfile=%t/%n.ctr-id
60 | Type=notify
61 | NotifyAccess=all
62 |
63 |
64 | [Install]
65 | WantedBy=default.target
66 |
--------------------------------------------------------------------------------
/roles/podman/legacy/homeassistant.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: homeassistant container config and data direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.homeassistant_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: homeassistant.service
12 | ansible.builtin.template:
13 | src: homeassistant.service.j2
14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/homeassistant.service"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now homeassistant.service
27 | ansible.builtin.systemd: name=homeassistant enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 |
31 |
32 | # 8123/tcp homeassistant port
33 | - name: add homeassistant firewalld service file homeassistant.xml
34 | ansible.builtin.copy:
35 | content: |
36 |
37 |
38 | Homeassistant
39 | Homeassistant
40 |
41 |
42 | dest: /etc/firewalld/services/homeassistant.xml
43 | owner: root
44 | group: root
45 | mode: '0644'
46 | become: true
47 | register: homeassistant_firewalld_file
48 |
49 | - name: Reload firewalld when homeassistant.xml changed
50 | ansible.builtin.command: firewall-cmd --reload
51 | become: true
52 | when: homeassistant_firewalld_file.changed
53 |
54 | - name: Set firewall rules for homeassistant listening port (TCP)
55 | ansible.posix.firewalld:
56 | rich_rule: rule family="ipv4" source address="{{ reverse_proxy_ipv4 }}" service name="homeassistant" accept
57 | #zone: "{{ firewalld_default_zone }}"
58 | permanent: true
59 | immediate: true
60 | state: enabled
61 | become: true
62 | when: reverse_proxy_ipv4 is defined
63 |
--------------------------------------------------------------------------------
/roles/podman/legacy/jellyfin.service.j2:
--------------------------------------------------------------------------------
1 | # jellyfin.service
2 |
3 | [Unit]
4 | Description=Podman jellyfin.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStartPre=/bin/rm \
15 | -f %t/%n.ctr-id
16 | #ExecStart=/usr/bin/podman run \
17 | # --cidfile=%t/%n.ctr-id \
18 | # --cgroups=no-conmon \
19 | # --rm \
20 | # --sdnotify=conmon \
21 | # --detach \
22 | # --replace \
23 | # --label io.containers.autoupdate=registry \
24 | # --uidmap 1000:0:1 \
25 | # --uidmap 0:1:1000 \
26 | # --uidmap 1001:1001:64536 \
27 | # --env PUID=1000 \
28 | # --env PGID=1000 \
29 | # --env TZ={{ TZ }} \
30 | # --name=jellyfin \
31 | # --hostname=jellyfin \
32 | # --publish 8096:8096/tcp \
33 | # --volume {{ podman_user.jellyfin_config_dir }}:/config:Z \
34 | # --volume {{ podman_user.jellyfin_data_dir }}:/media:ro,z \
35 | # lscr.io/linuxserver/jellyfin:latest
36 | ExecStart=/usr/bin/podman run \
37 | --cidfile=%t/%n.ctr-id \
38 | --cgroups=no-conmon \
39 | --rm \
40 | --sdnotify=conmon \
41 | --detach \
42 | --replace \
43 | --label io.containers.autoupdate=registry \
44 | --userns keep-id:uid=1000,gid=1000 \
45 | --user 1000:1000 \
46 | --name=jellyfin \
47 | --hostname=jellyfin \
48 | --publish 8096:8096/tcp \
49 | --volume {{ podman_user.jellyfin_config_dir }}:/config:Z \
50 | --volume {{ podman_user.jellyfin_cache_dir }}:/cache:Z \
51 | --volume {{ podman_user.jellyfin_data_dir }}:/media:ro,z \
52 | docker.io/jellyfin/jellyfin:latest
53 | ExecStop=/usr/bin/podman stop \
54 | --ignore \
55 | --time=10 \
56 | --cidfile=%t/%n.ctr-id
57 | ExecStopPost=/usr/bin/podman rm \
58 | --force \
59 | --ignore \
60 | --time=10 \
61 | --cidfile=%t/%n.ctr-id
62 | Type=notify
63 | NotifyAccess=all
64 |
65 | # Security Features
66 | #PrivateTmp=yes
67 | #NoNewPrivileges=yes
68 | #ProtectSystem=strict
69 | #ProtectHome=yes
70 | #ProtectKernelTunables=yes
71 | #ProtectControlGroups=yes
72 | #PrivateMounts=yes
73 | #ProtectHostname=yes
74 |
75 | [Install]
76 | WantedBy=default.target
77 |
--------------------------------------------------------------------------------
/roles/podman/legacy/jellyfin.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: jellyfin container config and cache direcoty
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 | loop:
11 | - "{{ podman_user.jellyfin_config_dir }}"
12 | - "{{ podman_user.jellyfin_cache_dir }}"
13 |
14 | - name: jellyfin.service
15 | ansible.builtin.template:
16 | src: jellyfin.service.j2
17 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/jellyfin.service"
18 | owner: "{{ podman_user.name }}"
19 | group: "{{ podman_user.name }}"
20 | mode: '0600'
21 | become: true
22 | become_user: "{{ podman_user.name }}"
23 |
24 | - name: systemctl --user daemon-reload
25 | ansible.builtin.systemd: daemon_reload=true scope=user
26 | become: true
27 | become_user: "{{ podman_user.name }}"
28 |
29 | - name: systemctl --user enable --now jellyfin.service
30 | ansible.builtin.systemd: name=jellyfin enabled=true state=started scope=user
31 | become: true
32 | become_user: "{{ podman_user.name }}"
33 |
34 | # 8096/tcp jellyfin port
35 | - name: add jellyfin firewalld service file jellyfin.xml
36 | ansible.builtin.copy:
37 | content: |
38 |
39 |
40 | Jellyfin
41 | Jellyfin
42 |
43 |
44 | dest: /etc/firewalld/services/jellyfin.xml
45 | owner: root
46 | group: root
47 | mode: '0644'
48 | become: true
49 | register: jellyfin_firewalld_file
50 |
51 | - name: Reload firewalld when jellyfin.xml changed
52 | ansible.builtin.command: firewall-cmd --reload
53 | become: true
54 | when: jellyfin_firewalld_file.changed
55 |
56 | - name: Set firewall rules for jellyfin listening port (TCP)
57 | ansible.posix.firewalld:
58 | rich_rule: rule family="ipv4" source address="{{ reverse_proxy_ipv4 }}" service name="jellyfin" accept
59 | #zone: "{{ firewalld_default_zone }}"
60 | permanent: true
61 | immediate: true
62 | state: enabled
63 | become: true
64 | when: reverse_proxy_ipv4 is defined
65 |
--------------------------------------------------------------------------------
/roles/podman/legacy/nextcloud-cron.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Nextcloud cron.php job
3 |
4 | [Service]
5 | ExecStart=/usr/bin/podman exec nextcloud /usr/local/bin/php -f /var/www/html/cron.php
6 |
--------------------------------------------------------------------------------
/roles/podman/legacy/nextcloud-cron.timer.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Run Nextcloud cron.php every 5 minutes
3 |
4 | [Timer]
5 | OnBootSec=5min
6 | OnUnitActiveSec=5min
7 | Unit=nextcloud-cron.service
8 |
9 | [Install]
10 | WantedBy=timers.target
11 |
--------------------------------------------------------------------------------
/roles/podman/legacy/nextcloud-pod.service.j2:
--------------------------------------------------------------------------------
1 | # nextcloud-pod.service
2 |
3 | [Unit]
4 | Description=Podman nextcloud-pod.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 | Wants=nextcloud.service postgres.service
10 | Before=nextcloud.service postgres.service
11 |
12 | [Service]
13 | Environment=PODMAN_SYSTEMD_UNIT=%n
14 | Restart=on-failure
15 | TimeoutStopSec=70
16 | #ExecStartPre=/usr/bin/podman pod create \
17 | # --infra-conmon-pidfile %t/nextcloud-pod.pid \
18 | # --pod-id-file %t/nextcloud-pod.pod-id \
19 | # --exit-policy=stop \
20 | # --replace \
21 | # --uidmap 1000:0:1 \
22 | # --uidmap 0:1:1000 \
23 | # --uidmap 1001:1001:64536 \
24 | # --publish 1443:443 \
25 | # --name nextcloud-pod
26 | ExecStartPre=/usr/bin/podman pod create \
27 | --infra-conmon-pidfile %t/nextcloud-pod.pid \
28 | --pod-id-file %t/nextcloud-pod.pod-id \
29 | --exit-policy=stop \
30 | --userns keep-id:uid=33,gid=33 \
31 | --replace \
32 | --publish 127.0.0.1:4108:80 \
33 | --name nextcloud-pod
34 | ExecStart=/usr/bin/podman pod start \
35 | --pod-id-file %t/nextcloud-pod.pod-id
36 | ExecStop=/usr/bin/podman pod stop \
37 | --ignore \
38 | --pod-id-file %t/nextcloud-pod.pod-id \
39 | --time=10
40 | ExecStopPost=/usr/bin/podman pod rm \
41 | --ignore \
42 | --force \
43 | --pod-id-file %t/nextcloud-pod.pod-id
44 | PIDFile=%t/nextcloud-pod.pid
45 | Type=forking
46 |
47 | [Install]
48 | WantedBy=default.target
49 |
--------------------------------------------------------------------------------
/roles/podman/legacy/nextcloud.service.j2:
--------------------------------------------------------------------------------
1 | # nextcloud.service
2 |
3 | [Unit]
4 | Description=Podman nextcloud.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 | BindsTo=nextcloud-pod.service
10 | After=nextcloud-pod.service
11 |
12 | [Service]
13 | Environment=PODMAN_SYSTEMD_UNIT=%n
14 | Restart=on-failure
15 | RestartSec=5
16 | TimeoutStopSec=70
17 | #ExecStart=/usr/bin/podman run \
18 | # --cidfile=%t/%n.ctr-id \
19 | # --cgroups=no-conmon \
20 | # --rm \
21 | # --pod-id-file %t/nextcloud-pod.pod-id \
22 | # --sdnotify=conmon \
23 | # --detach \
24 | # --replace \
25 | # --label io.containers.autoupdate=registry \
26 | # --name=nextcloud \
27 | # --env PUID=1000 \
28 | # --env PGID=1000 \
29 | # --env TZ={{ TZ }} \
30 | # --volume {{ podman_user.nextcloud_config_dir }}:/config:Z \
31 | # --volume {{ podman_user.nextcloud_data_dir }}:/data:Z \
32 | # lscr.io/linuxserver/nextcloud:latest
33 | ExecStart=/usr/bin/podman run \
34 | --cidfile=%t/%n.ctr-id \
35 | --cgroups=no-conmon \
36 | --rm \
37 | --pod-id-file %t/nextcloud-pod.pod-id \
38 | --sdnotify=conmon \
39 | --detach \
40 | --replace \
41 | --label io.containers.autoupdate=registry \
42 | --sysctl net.ipv4.ip_unprivileged_port_start=80 \
43 | --name=nextcloud \
44 | --volume {{ podman_user.nextcloud_data_dir }}:/var/www/html/data:Z \
45 | --volume {{ podman_user.nextcloud_config_dir }}:/var/www/html:Z \
46 | docker.io/library/nextcloud:latest
47 | ExecStop=/usr/bin/podman stop \
48 | --ignore \
49 | --time=10 \
50 | --cidfile=%t/%n.ctr-id
51 | ExecStopPost=/usr/bin/podman rm \
52 | --force \
53 | --ignore \
54 | --time=10 \
55 | --cidfile=%t/%n.ctr-id
56 | Type=notify
57 | NotifyAccess=all
58 |
59 | [Install]
60 | WantedBy=default.target
61 |
--------------------------------------------------------------------------------
/roles/podman/legacy/nextcloud.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: nextcloud container config and data direcoty
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ podman_user.name}}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 | loop:
11 | - "{{ podman_user.nextcloud_config_dir }}"
12 | - "{{ podman_user.nextcloud_data_dir }}"
13 | - "{{ podman_user.postgres_config_dir }}"
14 |
15 |
16 | - name: nextcloud-pod.service nextcloud.service postgres.service nextcloud-cron.service nextcloud-cron.timer
17 | ansible.builtin.template:
18 | src: "{{ item }}.j2"
19 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/{{ item }}"
20 | owner: "{{ podman_user.name}}"
21 | group: "{{ podman_user.name }}"
22 | mode: '0600'
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 | loop:
26 | - nextcloud-pod.service
27 | - nextcloud.service
28 | - postgres.service
29 | - nextcloud-cron.service
30 | - nextcloud-cron.timer
31 |
32 | - name: systemctl --user daemon-reload
33 | ansible.builtin.systemd: daemon_reload=true scope=user
34 | become: true
35 | become_user: "{{ podman_user.name }}"
36 |
37 | - name: systemctl --user enable --now nextcloud-pod.service nextcloud.service postgres.service nextcloud-cron.timer
38 | ansible.builtin.systemd: name={{ item }} state=started enabled=true scope=user
39 | become: true
40 | become_user: "{{ podman_user.name }}"
41 | loop:
42 | - nextcloud-pod.service
43 | - nextcloud.service
44 | - postgres.service
45 | - nextcloud-cron.timer
46 |
47 | # 4108/tcp nextcloud port
48 | - name: add nextcloud firewalld service file nextcloud.xml
49 | ansible.builtin.copy:
50 | content: |
51 |
52 |
53 | Nextcloud
54 | Nextcloud
55 |
56 |
57 | dest: /etc/firewalld/services/nextcloud.xml
58 | owner: root
59 | group: root
60 | mode: '0644'
61 | become: true
62 | register: nextcloud_firewalld_file
63 |
64 | - name: Reload firewalld when nextcloud.xml changed
65 | ansible.builtin.command: firewall-cmd --reload
66 | become: true
67 | when: nextcloud_firewalld_file.changed
68 |
69 | - name: Set firewall rules for nextcloud listening port (TCP)
70 | ansible.posix.firewalld:
71 | rich_rule: rule family="ipv4" source address="{{ reverse_proxy_ipv4 }}" service name="nextcloud" accept
72 | #zone: "{{ firewalld_default_zone }}"
73 | permanent: true
74 | immediate: true
75 | state: enabled
76 | become: true
77 | when: reverse_proxy_ipv4 is defined
78 |
--------------------------------------------------------------------------------
/roles/podman/legacy/postgres.service.j2:
--------------------------------------------------------------------------------
1 | # postgres.service
2 |
3 | [Unit]
4 | Description=Podman postgres.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 | {% if "nextcloud" in podman_user.containers %}
10 | BindsTo=nextcloud-pod.service
11 | After=nextcloud-pod.service
12 | {% endif %}
13 |
14 | [Service]
15 | Environment=PODMAN_SYSTEMD_UNIT=%n
16 | Restart=on-failure
17 | RestartSec=5
18 | TimeoutStopSec=70
19 | ExecStart=/usr/bin/podman run \
20 | --cidfile=%t/%n.ctr-id \
21 | --cgroups=no-conmon \
22 | --rm \
23 | {% if "nextcloud" in podman_user.containers %}
24 | --pod-id-file %t/nextcloud-pod.pod-id \
25 | {% endif %}
26 | --sdnotify=conmon \
27 | --detach \
28 | --replace \
29 | --label io.containers.autoupdate=registry \
30 | --name=postgres \
31 | {% if "nextcloud" in podman_user.containers %}
32 | --user 33:33 \
33 | {% else %}
34 | --user 1000:1000 \
35 | {% endif %}
36 | --volume {{ podman_user.postgres_config_dir }}:/var/lib/postgresql/data:Z \
37 | --env POSTGRES_DB={{ podman_user.db_name }} \
38 | --env POSTGRES_USER={{ podman_user.db_user }} \
39 | --env POSTGRES_PASSWORD='{{ podman_user.db_password }}' \
40 | docker.io/library/postgres:15-alpine
41 | ExecStop=/usr/bin/podman stop \
42 | --ignore \
43 | --time=10 \
44 | --cidfile=%t/%n.ctr-id
45 | ExecStopPost=/usr/bin/podman rm \
46 | --force \
47 | --ignore \
48 | --time=10 \
49 | --cidfile=%t/%n.ctr-id
50 | Type=notify
51 | NotifyAccess=all
52 |
53 | [Install]
54 | WantedBy=default.target
55 |
--------------------------------------------------------------------------------
/roles/podman/legacy/prowlarr.service.j2:
--------------------------------------------------------------------------------
1 | # container-prowlarr.service
2 |
3 | [Unit]
4 | Description=Podman container-prowlarr.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStartPre=/bin/rm \
15 | -f %t/%n.ctr-id
16 | ExecStart=/usr/bin/podman run \
17 | --cidfile=%t/%n.ctr-id \
18 | --cgroups=no-conmon \
19 | --rm \
20 | --sdnotify=conmon \
21 | --detach \
22 | --replace \
23 | --label io.containers.autoupdate=registry \
24 | --uidmap 1000:0:1 \
25 | --uidmap 0:1:1000 \
26 | --uidmap 1001:1001:64536 \
27 | --name=prowlarr \
28 | --hostname=prowlarr \
29 | --env PUID=1000 \
30 | --env PGID=1000 \
31 | --env TZ={{ TZ }} \
32 | --publish 127.0.0.1:9696:9696/tcp \
33 | --volume {{ podman_user.prowlarr_config_dir }}:/config:Z \
34 | lscr.io/linuxserver/prowlarr:latest
35 | ExecStop=/usr/bin/podman stop \
36 | --ignore \
37 | --time=10 \
38 | --cidfile=%t/%n.ctr-id
39 | ExecStopPost=/usr/bin/podman rm \
40 | --force \
41 | --ignore \
42 | --time=10 \
43 | --cidfile=%t/%n.ctr-id
44 | Type=notify
45 | NotifyAccess=all
46 |
47 | [Install]
48 | WantedBy=default.target
49 |
--------------------------------------------------------------------------------
/roles/podman/legacy/prowlarr.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: prowlarr container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.prowlarr_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: '*Arrs meida direcoty'
12 | ansible.builtin.file:
13 | path: "{{ podman_user.arr_media_dir }}"
14 | state: directory
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0755'
18 | become: true
19 |
20 | - name: prowlarr.service
21 | ansible.builtin.template:
22 | src: prowlarr.service.j2
23 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/prowlarr.service"
24 | owner: "{{ podman_user.name }}"
25 | group: "{{ podman_user.name }}"
26 | mode: '0600'
27 | become: true
28 | become_user: "{{ podman_user.name }}"
29 |
30 | - name: systemctl --user daemon-reload
31 | ansible.builtin.systemd: daemon_reload=true scope=user
32 | become: true
33 | become_user: "{{ podman_user.name }}"
34 |
35 | - name: systemctl --user enable --now prowlarr.service
36 | ansible.builtin.systemd: name=prowlarr enabled=true state=started scope=user
37 | become: true
38 | become_user: "{{ podman_user.name }}"
39 |
--------------------------------------------------------------------------------
/roles/podman/legacy/radarr.service.j2:
--------------------------------------------------------------------------------
1 | # radarr.service
2 |
3 | [Unit]
4 | Description=Podman radarr.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStartPre=/bin/rm \
15 | -f %t/%n.ctr-id
16 | ExecStart=/usr/bin/podman run \
17 | --cidfile=%t/%n.ctr-id \
18 | --cgroups=no-conmon \
19 | --rm \
20 | --sdnotify=conmon \
21 | --detach \
22 | --replace \
23 | --label io.containers.autoupdate=registry \
24 | --uidmap 1000:0:1 \
25 | --uidmap 0:1:1000 \
26 | --uidmap 1001:1001:64536 \
27 | --name=radarr \
28 | --hostname=radarr \
29 | --env PUID=1000 \
30 | --env PGID=1000 \
31 | --env TZ={{ TZ }} \
32 | --publish 127.0.0.1:7878:7878/tcp \
33 | --volume {{ podman_user.radarr_config_dir }}:/config:Z \
34 | --volume {{ podman_user.arr_media_dir }}:/data:rw \
35 | lscr.io/linuxserver/radarr:latest
36 | ExecStop=/usr/bin/podman stop \
37 | --ignore \
38 | --time=10 \
39 | --cidfile=%t/%n.ctr-id
40 | ExecStopPost=/usr/bin/podman rm \
41 | --force \
42 | --ignore \
43 | --time=10 \
44 | --cidfile=%t/%n.ctr-id
45 | Type=notify
46 | NotifyAccess=all
47 |
48 | [Install]
49 | WantedBy=default.target
50 |
--------------------------------------------------------------------------------
/roles/podman/legacy/radarr.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: radarr container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.radarr_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: radarr.service
12 | ansible.builtin.template:
13 | src: radarr.service.j2
14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/radarr.service"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now radarr.service
27 | ansible.builtin.systemd: name=radarr enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 |
--------------------------------------------------------------------------------
/roles/podman/legacy/radicale.service.j2:
--------------------------------------------------------------------------------
1 | # radicale.service
2 |
3 | [Unit]
4 | Description=Podman radicale.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStartPre=/bin/rm \
15 | -f %t/%n.ctr-id
16 | ExecStart=/usr/bin/podman run \
17 | --cidfile=%t/%n.ctr-id \
18 | --cgroups=no-conmon \
19 | --rm \
20 | --sdnotify=conmon \
21 | --detach \
22 | --replace \
23 | --name radicale \
24 | --label io.containers.autoupdate=registry \
25 | --publish 127.0.0.1:5232:5232 \
26 | --userns=keep-id:uid=2999,gid=2999 \
27 | --read-only \
28 | --security-opt=no-new-privileges \
29 | --cap-drop ALL \
30 | --cap-add CHOWN \
31 | --cap-add SETUID \
32 | --cap-add SETGID \
33 | --cap-add KILL \
34 | --pids-limit 50 \
35 | --memory 256M \
36 | --health-cmd="curl --fail http://localhost:5232 || exit 1" \
37 | --health-interval=30s \
38 | --health-retries=3 \
39 | --volume {{ podman_user.radicale_data_dir }}:/data:Z \
40 | --volume {{ podman_user.radicale_config_dir }}:/config:Z,ro \
41 | docker.io/tomsquest/docker-radicale
42 | ExecStop=/usr/bin/podman stop \
43 | --ignore \
44 | --time=10 \
45 | --cidfile=%t/%n.ctr-id
46 | ExecStopPost=/usr/bin/podman rm \
47 | --force \
48 | --ignore \
49 | --time=10 \
50 | --cidfile=%t/%n.ctr-id
51 | Type=notify
52 | NotifyAccess=all
53 |
54 | [Install]
55 | WantedBy=default.target
56 |
--------------------------------------------------------------------------------
/roles/podman/legacy/radicale.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: radicale container config/data direcoty
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ podman_user.name}}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 | loop:
11 | - "{{ podman_user.radicale_data_dir }}"
12 | - "{{ podman_user.radicale_config_dir }}"
13 |
14 | - name: radicale.service
15 | ansible.builtin.template:
16 | src: radicale.service.j2
17 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/radicale.service"
18 | owner: "{{ podman_user.name}}"
19 | group: "{{ podman_user.name }}"
20 | mode: '0600'
21 | become: true
22 | become_user: "{{ podman_user.name }}"
23 |
24 | - name: systemctl --user daemon-reload
25 | ansible.builtin.systemd: daemon_reload=true scope=user
26 | become: true
27 | become_user: "{{ podman_user.name }}"
28 |
29 | - name: systemctl --user enable --now radicale.service
30 | ansible.builtin.systemd: name=radicale enabled=true state=started scope=user
31 | become: true
32 | become_user: "{{ podman_user.name }}"
33 |
34 |
--------------------------------------------------------------------------------
/roles/podman/legacy/sonarr.service.j2:
--------------------------------------------------------------------------------
1 | # sonarr.service
2 |
3 | [Unit]
4 | Description=Podman sonarr.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStartPre=/bin/rm \
15 | -f %t/%n.ctr-id
16 | ExecStart=/usr/bin/podman run \
17 | --cidfile=%t/%n.ctr-id \
18 | --cgroups=no-conmon \
19 | --rm \
20 | --sdnotify=conmon \
21 | --detach \
22 | --replace \
23 | --label io.containers.autoupdate=registry \
24 | --uidmap 1000:0:1 \
25 | --uidmap 0:1:1000 \
26 | --uidmap 1001:1001:64536 \
27 | --name=sonarr \
28 | --hostname=sonarr \
29 | --env PUID=1000 \
30 | --env PGID=1000 \
31 | --env TZ={{ TZ }} \
32 | --publish 127.0.0.1:8989:8989/tcp \
33 | --volume {{ podman_user.sonarr_config_dir }}:/config:Z \
34 | --volume {{ podman_user.arr_media_dir }}:/data:rw \
35 | lscr.io/linuxserver/sonarr:latest
36 | ExecStop=/usr/bin/podman stop \
37 | --ignore \
38 | --time=10 \
39 | --cidfile=%t/%n.ctr-id
40 | ExecStopPost=/usr/bin/podman rm \
41 | --force \
42 | --ignore \
43 | --time=10 \
44 | --cidfile=%t/%n.ctr-id
45 | Type=notify
46 | NotifyAccess=all
47 |
48 | [Install]
49 | WantedBy=default.target
50 |
--------------------------------------------------------------------------------
/roles/podman/legacy/sonarr.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: sonarr container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.sonarr_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: sonarr.service
12 | ansible.builtin.template:
13 | src: sonarr.service.j2
14 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/sonarr.service"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now sonarr.service
27 | ansible.builtin.systemd: name=sonarr enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 |
--------------------------------------------------------------------------------
/roles/podman/legacy/swag.service.j2:
--------------------------------------------------------------------------------
1 | # swag.service
2 |
3 | [Unit]
4 | Description=Podman swag.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target nss-lookup.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | TimeoutStopSec=70
14 | ExecStart=/usr/bin/podman run \
15 | --cidfile=%t/%n.ctr-id \
16 | --cgroups=no-conmon \
17 | --rm \
18 | --sdnotify=conmon \
19 | --detach \
20 | --replace \
21 | --label io.containers.autoupdate=registry \
22 | --uidmap 1000:0:1 \
23 | --uidmap 0:1:1000 \
24 | --uidmap 1001:1001:64536 \
25 | --name=swag \
26 | --cap-add=NET_ADMIN \
27 | --env PUID=1000 \
28 | --env PGID=1000 \
29 | --env TZ={{ TZ }} \
30 | --network=slirp4netns:port_handler=slirp4netns \
31 | --publish 443:443/tcp \
32 | --volume {{ podman_user.swag_config_dir }}:/config:Z \
33 | --env URL={{ podman_user.swag_domain }} \
34 | --env VALIDATION=dns \
35 | --env SUBDOMAINS={{ podman_user.swag_subdomains }} \
36 | --env CERTPROVIDER=letsencrypt \
37 | --env DNSPLUGIN=cloudflare \
38 | --env EMAIL={{ podman_user.swag_email }} \
39 | --env ONLY_SUBDOMAINS=true \
40 | --env STAGING=false \
41 | --env PROPAGATION=60 \
42 | {% if podman_user.swag_mods is defined %}
43 | --env DOCKER_MODS='{{ podman_user.swag_mods }}' \
44 | {% endif %}
45 | {% if podman_user.swag_CROWDSEC_API_KEY is defined %}
46 | --env CROWDSEC_API_KEY='{{ podman_user.swag_CROWDSEC_API_KEY }}' \
47 | --env CROWDSEC_LAPI_URL='{{ podman_user.swag_CROWDSEC_LAPI_URL }}' \
48 | {% endif %}
49 | lscr.io/linuxserver/swag:latest
50 | ExecStop=/usr/bin/podman stop \
51 | --ignore \
52 | --time=10 \
53 | --cidfile=%t/%n.ctr-id
54 | ExecStopPost=/usr/bin/podman rm \
55 | --force \
56 | --ignore \
57 | --time=10 \
58 | --cidfile=%t/%n.ctr-id
59 | Type=notify
60 | NotifyAccess=all
61 |
62 | [Install]
63 | WantedBy=default.target
64 |
--------------------------------------------------------------------------------
/roles/podman/legacy/swag.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: swag container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.swag_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name}}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: Allow rootless podman access 443 port
12 | ansible.builtin.copy:
13 | content: |
14 | net.ipv4.ip_unprivileged_port_start=443
15 | dest: /etc/sysctl.d/unprivileged_port_start.conf
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | become: true
20 | register: unprivileged_port_start
21 |
22 | - name: sysctl net.ipv4.ip_unprivileged_port_start=443
23 | ansible.builtin.command: sysctl net.ipv4.ip_unprivileged_port_start=443
24 | become: true
25 | when: unprivileged_port_start is changed
26 |
27 | - name: swag.service
28 | ansible.builtin.template:
29 | src: swag.service.j2
30 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/swag.service"
31 | owner: "{{ podman_user.name}}"
32 | group: "{{ podman_user.name }}"
33 | mode: '0600'
34 | become: true
35 | become_user: "{{ podman_user.name }}"
36 |
37 | - name: systemctl --user daemon-reload
38 | ansible.builtin.systemd: daemon_reload=true scope=user
39 | become: true
40 | become_user: "{{ podman_user.name }}"
41 |
42 | - name: systemctl --user enable --now swag.service
43 | ansible.builtin.systemd: name=swag.service enabled=true state=started scope=user
44 | become: true
45 | become_user: "{{ podman_user.name }}"
46 |
47 | - name: Set firewall rules for https
48 | ansible.posix.firewalld:
49 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="https" accept
50 | #zone: "{{ firewalld_default_zone }}"
51 | permanent: true
52 | immediate: true
53 | state: enabled
54 | loop: "{{ podman_user.https_accept_source_ipv4 }}"
55 | become: true
56 | when:
57 | - podman_user.https_accept_source_ipv4 is defined
58 |
--------------------------------------------------------------------------------
/roles/podman/legacy/thelounge.service.j2:
--------------------------------------------------------------------------------
1 | # thelounge.service
2 |
3 | [Unit]
4 | Description=Podman thelounge.service
5 | Documentation=man:podman-generate-systemd(1)
6 | Wants=network-online.target
7 | After=network-online.target
8 | RequiresMountsFor=%t/containers
9 |
10 | [Service]
11 | Environment=PODMAN_SYSTEMD_UNIT=%n
12 | Restart=on-failure
13 | RestartSec=5
14 | TimeoutStopSec=70
15 | ExecStartPre=/bin/rm \
16 | -f %t/%n.ctr-id
17 | ExecStart=/usr/bin/podman run \
18 | --cidfile=%t/%n.ctr-id \
19 | --cgroups=no-conmon \
20 | --rm \
21 | --sdnotify=conmon \
22 | --detach \
23 | --replace \
24 | --label io.containers.autoupdate=registry \
25 | --uidmap 1000:0:1 \
26 | --uidmap 0:1:1000 \
27 | --uidmap 1001:1001:64536 \
28 | --env PUID=1000 \
29 | --env PGID=1000 \
30 | --env TZ={{ TZ }} \
31 | --name thelounge \
32 | --publish 127.0.0.1:9000:9000/tcp \
33 | --volume {{ podman_user.thelounge_config_dir }}:/config:Z \
34 | lscr.io/linuxserver/thelounge:latest
35 | ExecStop=/usr/bin/podman stop \
36 | --ignore \
37 | --time 10 \
38 | --cidfile=%t/%n.ctr-id
39 | ExecStopPost=/usr/bin/podman rm \
40 | -f \
41 | --ignore --time 10 \
42 | --cidfile=%t/%n.ctr-id
43 | Type=notify
44 | NotifyAccess=all
45 |
46 | [Install]
47 | WantedBy=default.target
48 |
--------------------------------------------------------------------------------
/roles/podman/tasks/autobrr.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: autobrr container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.autobrr_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: autobrr.container
12 | ansible.builtin.template:
13 | src: autobrr.container.j2
14 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/autobrr.container"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now autobrr.service
27 | ansible.builtin.systemd: name=autobrr.service enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 | ignore_errors: "{{ ansible_check_mode }}"
31 |
--------------------------------------------------------------------------------
/roles/podman/tasks/gluetun.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: setsebool -P container_use_devices=true
3 | ansible.posix.seboolean:
4 | name: container_use_devices
5 | state: true
6 | persistent: true
7 | when: ansible_selinux.status == "enabled"
8 | become: true
9 |
10 | - name: setsebool -P domain_kernel_load_modules=true
11 | ansible.posix.seboolean:
12 | name: domain_kernel_load_modules
13 | state: true
14 | persistent: true
15 | when: ansible_selinux.status == "enabled"
16 | become: true
17 |
18 | - name: gluetun.container
19 | ansible.builtin.template:
20 | src: gluetun.container.j2
21 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/gluetun.container"
22 | owner: "{{ podman_user.name }}"
23 | group: "{{ podman_user.name }}"
24 | mode: '0600'
25 | become: true
26 | become_user: "{{ podman_user.name }}"
27 |
28 | - name: systemctl --user daemon-reload
29 | ansible.builtin.systemd: daemon_reload=true scope=user
30 | become: true
31 | become_user: "{{ podman_user.name }}"
32 |
33 | - name: systemctl --user enable --now gluetun.service
34 | ansible.builtin.systemd: name=gluetun enabled=true state=started scope=user
35 | become: true
36 | become_user: "{{ podman_user.name }}"
37 | ignore_errors: "{{ ansible_check_mode }}"
38 |
--------------------------------------------------------------------------------
/roles/podman/tasks/letsencrypt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: letsencrypt container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.letsencrypt_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name}}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: letsencrypt.container
12 | ansible.builtin.template:
13 | src: letsencrypt.container.j2
14 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/letsencrypt.container"
15 | owner: "{{ podman_user.name}}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: letsencrypt.timer
22 | ansible.builtin.copy:
23 | content: |
24 | [Unit]
25 | Description=letsencrypt certbot
26 |
27 | [Timer]
28 | OnCalendar=daily
29 |
30 | [Install]
31 | WantedBy=timers.target
32 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/letsencrypt.timer"
33 | owner: "{{ podman_user.name}}"
34 | group: "{{ podman_user.name }}"
35 | mode: '0600'
36 | become: true
37 | become_user: "{{ podman_user.name }}"
38 |
39 | - name: systemctl --user daemon-reload
40 | ansible.builtin.systemd: daemon_reload=true scope=user
41 | become: true
42 | become_user: "{{ podman_user.name }}"
43 |
44 | - name: systemctl --user enable letsencrypt.timer
45 | ansible.builtin.systemd: name=letsencrypt.timer enabled=true scope=user
46 | become: true
47 | become_user: "{{ podman_user.name }}"
48 | ignore_errors: "{{ ansible_check_mode }}"
49 |
50 |
--------------------------------------------------------------------------------
/roles/podman/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_tasks: podman_install.yml
3 |
4 | - include_tasks: podman_setup.yml
5 | loop: "{{ podman_users }}"
6 | loop_control:
7 | loop_var: podman_user
8 | when: podman_users is defined
9 |
10 |
--------------------------------------------------------------------------------
/roles/podman/tasks/nextcloud.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: nextcloud-aio.container
3 | ansible.builtin.template:
4 | src: nextcloud-aio.container.j2
5 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/nextcloud-aio.container"
6 | owner: "{{ podman_user.name}}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0600'
9 | become: true
10 |
11 | - name: nextcloud-aio-mastercontainer.volume
12 | ansible.builtin.copy:
13 | content: |
14 | [Volume]
15 | VolumeName=nextcloud_aio_mastercontainer
16 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/nextcloud-aio-mastercontainer.volume"
17 | owner: "{{ podman_user.name}}"
18 | group: "{{ podman_user.name }}"
19 | mode: '0600'
20 | become: true
21 |
22 | - name: systemctl --user daemon-reload
23 | ansible.builtin.systemd: daemon_reload=true scope=user
24 | become: true
25 | become_user: "{{ podman_user.name }}"
26 |
27 | #- name: systemctl --user enable --now nextcloud-aio.service
28 | # ansible.builtin.systemd: name=nextcloud-aio.service state=started enabled=true scope=user
29 | # become: true
30 | # become_user: "{{ podman_user.name }}"
31 | # ignore_errors: "{{ ansible_check_mode }}"
32 |
33 |
--------------------------------------------------------------------------------
/roles/podman/tasks/podman_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S podman aardvark-dns
3 | community.general.pacman:
4 | name:
5 | - podman
6 | - aardvark-dns
7 | state: present
8 | become: true
9 | when: ansible_distribution == "Archlinux"
10 |
11 | - name: apt install podman
12 | ansible.builtin.apt: name=podman state=present
13 | become: true
14 | when: ansible_distribution == "Debian"
15 |
16 | - name: dnf install podman
17 | ansible.builtin.dnf: name=podman state=present
18 | become: true
19 | when: ansible_distribution == "Fedora"
20 |
21 | # install acl package for ansible to become other unprivileged user
22 | - name: apt install acl
23 | ansible.builtin.apt: name=acl state=present
24 | become: true
25 | when: ansible_distribution == "Debian"
26 |
27 | - name: dnf install acl
28 | ansible.builtin.dnf: name=acl state=present
29 | become: true
30 | when: ansible_distribution == "Fedora"
31 |
32 | - name: dnf install policycoreutils-python-utils
33 | ansible.builtin.dnf: name=policycoreutils-python-utils state=present
34 | become: true
35 | when: ansible_distribution == "Fedora"
36 |
37 | #- name: Get file system type of /
38 | # ansible.builtin.command: stat --file-system --format=%T /
39 | # become: true
40 | # register: root_fstype
41 | # changed_when: false
42 | # check_mode: false
43 | #
44 | #- name: Get file system type of ~/
45 | # ansible.builtin.command: "stat --file-system --format=%T {{ ansible_user_dir }}"
46 | # become: true
47 | # register: home_fstype
48 | # changed_when: false
49 | # check_mode: false
50 | #
51 | #- name: Set container storage driver
52 | # ansible.builtin.lineinfile:
53 | # path: /etc/containers/storage.conf
54 | # regexp: '^driver\s*='
55 | # line: 'driver = "btrfs"'
56 | # become: true
57 | # when:
58 | # - root_fstype.stdout == 'btrfs'
59 | # - home_fstype.stdout == 'btrfs'
60 |
61 |
--------------------------------------------------------------------------------
/roles/podman/tasks/podman_setup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create separate podman user
3 | # disable the podman account ( don't allow login )
4 | # shell: "/usr/bin/nologin"
5 | ansible.builtin.user:
6 | name: "{{ podman_user.name }}"
7 | uid: "{{ podman_user.uid }}"
8 | password: '!'
9 | password_lock: true
10 | create_home: true
11 | become: true
12 | when:
13 | - podman_user.name != ansible_user_id
14 | - podman_user.name != "root"
15 |
16 | - name: Check lingering
17 | stat:
18 | path: "/var/lib/systemd/linger/{{ podman_user.name }}"
19 | register: podman_user_lingering
20 | become: true
21 | when:
22 | - podman_user.enable_lingering
23 | - podman_user.name != "root"
24 |
25 | - name: Enable lingering
26 | command: "loginctl enable-linger {{ podman_user.name }}"
27 | become: true
28 | when:
29 | - podman_user.enable_lingering
30 | - podman_user.name != "root"
31 | - not podman_user_lingering.stat.exists
32 |
33 | - name: Set /etc/subuid if using systemd-homed
34 | ansible.builtin.lineinfile:
35 | line: "{{ podman_user.name }}:{{ (podman_user.uid - 1000) * 65536 + 524288 }}:65536"
36 | regexp: "^{{ podman_user.name }}:"
37 | dest: /etc/subuid
38 | create: true
39 | owner: root
40 | group: root
41 | mode: '0644'
42 | become: true
43 | when:
44 | - ansible_distribution == "Archlinux"
45 | - homed
46 |
47 | - name: Set /etc/subgid if using systemd-homed
48 | ansible.builtin.lineinfile:
49 | line: "{{ podman_user.name }}:{{ (podman_user.uid - 1000) * 65536 + 524288 }}:65536"
50 | regexp: "^{{ podman_user.name }}:"
51 | dest: /etc/subgid
52 | create: true
53 | owner: root
54 | group: root
55 | mode: '0644'
56 | become: true
57 | when:
58 | - ansible_distribution == "Archlinux"
59 | - homed
60 |
61 | #- name: setup container configs directory
62 | # ansible.builtin.file:
63 | # path: "{{ podman_user.container_configs_dir }}"
64 | # state: directory
65 | # owner: "{{ podman_user.name }}"
66 | # group: "{{ podman_user.name }}"
67 | # mode: '0700'
68 | # become: true
69 |
70 | - name: setup systemd user directory
71 | ansible.builtin.file:
72 | path: "/home/{{ podman_user.name }}/.config/systemd/user"
73 | state: directory
74 | owner: "{{ podman_user.name }}"
75 | group: "{{ podman_user.name }}"
76 | mode: '0700'
77 | become: true
78 | become_user: "{{ podman_user.name }}"
79 |
80 | - name: Create podman user unit directory
81 | ansible.builtin.file:
82 | path: "/home/{{ podman_user.name }}/.config/containers/systemd"
83 | state: directory
84 | owner: "{{ podman_user.name }}"
85 | group: "{{ podman_user.name }}"
86 | mode: '0700'
87 | become: true
88 | become_user: "{{ podman_user.name }}"
89 |
90 | - name: create podman-system-prune.service/timer
91 | ansible.builtin.template:
92 | src: "{{ item }}.j2"
93 | dest: "/home/{{ podman_user.name }}/.config/systemd/user/{{ item }}"
94 | owner: "{{ podman_user.name }}"
95 | group: "{{ podman_user.name }}"
96 | mode: '0600'
97 | loop:
98 | - podman-system-prune.service
99 | - podman-system-prune.timer
100 | become: true
101 | become_user: "{{ podman_user.name }}"
102 |
103 | - name: systemctl --user enable podman-system-prune.timer
104 | ansible.builtin.systemd: name=podman-system-prune.timer enabled=true scope=user
105 | become: true
106 | become_user: "{{ podman_user.name }}"
107 |
108 | - name: systemctl --user enable podman-auto-update.timer
109 | ansible.builtin.systemd: name=podman-auto-update.timer enabled=true scope=user
110 | become: true
111 | become_user: "{{ podman_user.name }}"
112 |
113 | - include_tasks: "{{ container }}.yml"
114 | loop: "{{ podman_user.containers }}"
115 | loop_control:
116 | loop_var: container
117 |
--------------------------------------------------------------------------------
/roles/podman/tasks/qbittorrent.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: qbittorrent container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.qbittorrent_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: qbittorrent.container
12 | ansible.builtin.template:
13 | src: qbittorrent.container.j2
14 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/qbittorrent.container"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now qbittorrent.service
27 | ansible.builtin.systemd: name=qbittorrent.service enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 | ignore_errors: "{{ ansible_check_mode }}"
31 |
--------------------------------------------------------------------------------
/roles/podman/tasks/syncthing.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: syncthing container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.syncthing_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name}}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: syncthing.container
12 | ansible.builtin.template:
13 | src: syncthing.container.j2
14 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/syncthing.container"
15 | owner: "{{ podman_user.name}}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now syncthing.service
27 | ansible.builtin.systemd: name=syncthing enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 | ignore_errors: "{{ ansible_check_mode }}"
31 |
32 |
33 | # 22000/tcp Syncthing Listening port
34 | - name: add syncthing firewalld service file syncthing.xml
35 | ansible.builtin.copy:
36 | content: |
37 |
38 |
39 | Syncthing
40 | Syncthing
41 |
42 |
43 | dest: /etc/firewalld/services/syncthing.xml
44 | owner: root
45 | group: root
46 | mode: '0644'
47 | become: true
48 | register: syncthing_firewalld_file
49 |
50 | - name: Reload firewalld when syncthing.xml changed
51 | ansible.builtin.command: firewall-cmd --reload
52 | become: true
53 | when: syncthing_firewalld_file.changed
54 |
55 | - name: Set firewall rules for Syncthing Listening port (TCP)
56 | ansible.posix.firewalld:
57 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="syncthing" accept
58 | #zone: "{{ firewalld_default_zone }}"
59 | permanent: true
60 | immediate: true
61 | state: enabled
62 | loop: "{{ podman_user.syncthing_accept_source_ipv4 }}"
63 | become: true
64 | when: podman_user.syncthing_accept_source_ipv4 is defined
65 |
--------------------------------------------------------------------------------
/roles/podman/tasks/thelounge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: thelounge container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.thelounge_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: thelounge.container
12 | ansible.builtin.template:
13 | src: thelounge.container.j2
14 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/thelounge.container"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now thelounge.service
27 | ansible.builtin.systemd: name=thelounge.service enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 | ignore_errors: "{{ ansible_check_mode }}"
31 |
--------------------------------------------------------------------------------
/roles/podman/tasks/traefik.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Allow rootless podman access 443 port
3 | ansible.builtin.copy:
4 | content: |
5 | net.ipv4.ip_unprivileged_port_start=443
6 | dest: /etc/sysctl.d/unprivileged_port_start.conf
7 | owner: root
8 | group: root
9 | mode: '0644'
10 | become: true
11 | register: unprivileged_port_start
12 |
13 | - name: sysctl net.ipv4.ip_unprivileged_port_start=443
14 | ansible.builtin.command: sysctl net.ipv4.ip_unprivileged_port_start=443
15 | become: true
16 | when: unprivileged_port_start is changed
17 |
18 | - name: traefik config direcoty
19 | ansible.builtin.file:
20 | path: "{{ podman_user.traefik_config_dir }}"
21 | state: directory
22 | owner: "{{ podman_user.name}}"
23 | group: "{{ podman_user.name }}"
24 | mode: '0700'
25 | become: true
26 |
27 | - name: traefik static config file
28 | ansible.builtin.copy:
29 | src: "{{ podman_user.traefik_static_config }}"
30 | dest: "{{ podman_user.traefik_config_dir }}/static_conf.yml"
31 | owner: "{{ podman_user.name}}"
32 | group: "{{ podman_user.name }}"
33 | mode: '0600'
34 | become: true
35 |
36 | - name: traefik dynamic config file
37 | ansible.builtin.copy:
38 | src: "{{ podman_user.traefik_dynamic_config }}"
39 | dest: "{{ podman_user.traefik_config_dir }}/dynamic_conf.yml"
40 | owner: "{{ podman_user.name}}"
41 | group: "{{ podman_user.name }}"
42 | mode: '0600'
43 | become: true
44 |
45 | - name: traefik.container
46 | ansible.builtin.template:
47 | src: traefik.container.j2
48 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/traefik.container"
49 | owner: "{{ podman_user.name}}"
50 | group: "{{ podman_user.name }}"
51 | mode: '0600'
52 | become: true
53 | become_user: "{{ podman_user.name }}"
54 |
55 | - name: tailscale-traefik.container
56 | ansible.builtin.template:
57 | src: tailscale-traefik.container.j2
58 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/tailscale-traefik.container"
59 | owner: "{{ podman_user.name}}"
60 | group: "{{ podman_user.name }}"
61 | mode: '0600'
62 | become: true
63 | become_user: "{{ podman_user.name }}"
64 | when:
65 | - podman_user.traefik_tailscale_enable is defined
66 | - podman_user.traefik_tailscale_enable
67 |
68 | - name: systemctl --user daemon-reload
69 | ansible.builtin.systemd: daemon_reload=true scope=user
70 | become: true
71 | become_user: "{{ podman_user.name }}"
72 |
73 | - name: copy-ssl.service
74 | ansible.builtin.template:
75 | src: copy-ssl.service.j2
76 | dest: "/etc/systemd/system/copy-ssl.service"
77 | owner: "root"
78 | group: "root"
79 | mode: '0600'
80 | become: true
81 | register: copy_ssl_service
82 |
83 | - name: copy-ssl.timer
84 | ansible.builtin.copy:
85 | content: |
86 | [Unit]
87 | Description=Copy SSL certificates to Caddy
88 |
89 | [Timer]
90 | OnCalendar=00:10:00
91 |
92 | [Install]
93 | WantedBy=timers.target
94 | dest: "/etc/systemd/system/copy-ssl.timer"
95 | owner: "root"
96 | group: "root"
97 | mode: '0600'
98 | become: true
99 |
100 | - name: systemctl daemon-reload
101 | ansible.builtin.systemd: daemon_reload=true
102 | become: true
103 |
104 | - name: systemctl enable copy-ssl.timer
105 | ansible.builtin.systemd: name=copy-ssl.timer enabled=true
106 | become: true
107 |
108 | - name: systemctl start copy-ssl.service
109 | ansible.builtin.systemd: name=copy-ssl.service state=started
110 | become: true
111 | when: copy_ssl_service.changed
112 |
113 | - name: systemctl --user daemon-reload
114 | ansible.builtin.systemd: daemon_reload=true scope=user
115 | become: true
116 | become_user: "{{ podman_user.name }}"
117 |
118 | - name: systemctl --user enable --now traefik.service
119 | ansible.builtin.systemd: name=traefik.service state=started enabled=true scope=user
120 | become: true
121 | become_user: "{{ podman_user.name }}"
122 | ignore_errors: "{{ ansible_check_mode }}"
123 |
124 | - name: systemctl --user enable --now tailscale-traefik.service
125 | ansible.builtin.systemd: name=tailscale-traefik.service state=started enabled=true scope=user
126 | become: true
127 | become_user: "{{ podman_user.name }}"
128 | when:
129 | - podman_user.traefik_tailscale_enable is defined
130 | - podman_user.traefik_tailscale_enable
131 |
132 | - name: Set firewall rules for https
133 | ansible.posix.firewalld:
134 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="https" accept
135 | #zone: "{{ firewalld_default_zone }}"
136 | permanent: true
137 | immediate: true
138 | state: enabled
139 | loop: "{{ podman_user.https_accept_source_ipv4 }}"
140 | become: true
141 | when:
142 | - podman_user.https_accept_source_ipv4 is defined
143 |
--------------------------------------------------------------------------------
/roles/podman/tasks/transmission.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: transmission container config direcoty
3 | ansible.builtin.file:
4 | path: "{{ podman_user.transmission_config_dir }}"
5 | state: directory
6 | owner: "{{ podman_user.name }}"
7 | group: "{{ podman_user.name }}"
8 | mode: '0700'
9 | become: true
10 |
11 | - name: transmission.container
12 | ansible.builtin.template:
13 | src: transmission.container.j2
14 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/transmission.container"
15 | owner: "{{ podman_user.name }}"
16 | group: "{{ podman_user.name }}"
17 | mode: '0600'
18 | become: true
19 | become_user: "{{ podman_user.name }}"
20 |
21 | - name: systemctl --user daemon-reload
22 | ansible.builtin.systemd: daemon_reload=true scope=user
23 | become: true
24 | become_user: "{{ podman_user.name }}"
25 |
26 | - name: systemctl --user enable --now transmission.service
27 | ansible.builtin.systemd: name=transmission.service enabled=true state=started scope=user
28 | become: true
29 | become_user: "{{ podman_user.name }}"
30 | ignore_errors: "{{ ansible_check_mode }}"
31 |
--------------------------------------------------------------------------------
/roles/podman/tasks/unifi-init.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create a tmp direcoty to store database credentials during initial setup
3 | ansible.builtin.tempfile:
4 | state: directory
5 | become: true
6 | become_user: "{{ podman_user.name }}"
7 | register: unifi_db_tmp_dir
8 |
9 | - name: init-mongo.js for initial setup
10 | ansible.builtin.template:
11 | src: "unifi-db-init-mongo.js.j2"
12 | dest: "{{ unifi_db_tmp_dir.path }}/init-mongo.js"
13 | owner: "{{ podman_user.name}}"
14 | group: "{{ podman_user.name }}"
15 | mode: '0644'
16 | become: true
17 |
18 | - name: unifi-db.container and unifi.container
19 | ansible.builtin.template:
20 | src: "{{ item }}.j2"
21 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/{{ item }}"
22 | owner: "{{ podman_user.name}}"
23 | group: "{{ podman_user.name }}"
24 | mode: '0600'
25 | become: true
26 | become_user: "{{ podman_user.name }}"
27 | loop:
28 | - unifi-db.container
29 | - unifi.container
30 |
31 | - name: systemctl --user daemon-reload
32 | ansible.builtin.systemd: daemon_reload=true scope=user
33 | become: true
34 | become_user: "{{ podman_user.name }}"
35 |
36 | - name: systemctl --user start unifi-db.service
37 | ansible.builtin.systemd: name=unifi-db state=started scope=user
38 | become: true
39 | become_user: "{{ podman_user.name }}"
40 | ignore_errors: "{{ ansible_check_mode }}"
41 |
42 | - name: systemctl --user start unifi.service
43 | ansible.builtin.systemd: name=unifi state=started scope=user
44 | become: true
45 | become_user: "{{ podman_user.name }}"
46 | ignore_errors: "{{ ansible_check_mode }}"
47 |
48 |
49 | - name: Reset variable unifi_database_dir
50 | ansible.builtin.stat:
51 | path: "{{ podman_user.unifi_db_dir }}"
52 | register: unifi_database_dir
53 |
54 |
--------------------------------------------------------------------------------
/roles/podman/tasks/unifi.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check unifi database direcoty
3 | ansible.builtin.stat:
4 | path: "{{ podman_user.unifi_db_dir }}"
5 | register: unifi_database_dir
6 |
7 | - name: unifi database direcoty
8 | ansible.builtin.file:
9 | path: "{{ podman_user.unifi_db_dir }}"
10 | state: directory
11 | owner: "{{ podman_user.name}}"
12 | group: "{{ podman_user.name }}"
13 | mode: '0700'
14 | become: true
15 |
16 | - name: unifi-controller container config direcoty
17 | ansible.builtin.file:
18 | path: "{{ podman_user.unifi_config_dir }}"
19 | state: directory
20 | owner: "{{ podman_user.name}}"
21 | group: "{{ podman_user.name }}"
22 | mode: '0700'
23 | become: true
24 |
25 | - name: First time setup
26 | include_tasks: unifi-init.yml
27 | when: not unifi_database_dir.stat.exists
28 |
29 | - name: unifi-db.container and unifi.container
30 | ansible.builtin.template:
31 | src: "{{ item }}.j2"
32 | dest: "/home/{{ podman_user.name }}/.config/containers/systemd/{{ item }}"
33 | owner: "{{ podman_user.name}}"
34 | group: "{{ podman_user.name }}"
35 | mode: '0600'
36 | become: true
37 | become_user: "{{ podman_user.name }}"
38 | loop:
39 | - unifi-db.container
40 | - unifi.container
41 |
42 | - name: systemctl --user daemon-reload
43 | ansible.builtin.systemd: daemon_reload=true scope=user
44 | become: true
45 | become_user: "{{ podman_user.name }}"
46 |
47 | - name: systemctl --user enable --now unifi-db.service
48 | ansible.builtin.systemd: name=unifi-db enabled=true state=started scope=user
49 | become: true
50 | become_user: "{{ podman_user.name }}"
51 | ignore_errors: "{{ ansible_check_mode }}"
52 |
53 | - name: systemctl --user enable --now unifi.service
54 | ansible.builtin.systemd: name=unifi enabled=true state=started scope=user
55 | become: true
56 | become_user: "{{ podman_user.name }}"
57 | ignore_errors: "{{ ansible_check_mode }}"
58 |
59 |
60 | # 3478/udp UniFi STUN port
61 | # 10001/udp UniFi AP discovery
62 | # 8080/tcp UniFi device communication
63 | - name: add unifi firewalld service file unifi.xml
64 | ansible.builtin.copy:
65 | content: |
66 |
67 |
68 | UniFi
69 | UniFi controller.
70 |
71 |
72 |
73 |
74 | dest: /etc/firewalld/services/unifi.xml
75 | owner: root
76 | group: root
77 | mode: '0644'
78 | become: true
79 | register: unifi_firewalld_file
80 |
81 | - name: Reload firewalld when unifi.xml changed
82 | ansible.builtin.command: firewall-cmd --reload
83 | become: true
84 | when: unifi_firewalld_file.changed
85 |
86 | - name: Enable firewall rules for UniFi
87 | ansible.posix.firewalld:
88 | rich_rule: rule family="ipv4" source address="{{ item }}" service name="unifi" accept
89 | #zone: "{{ firewalld_default_zone }}"
90 | permanent: yes
91 | immediate: yes
92 | state: enabled
93 | loop: "{{ podman_user.unifi_accept_source_ipv4 }}"
94 | become: true
95 | when: podman_user.unifi_accept_source_ipv4 is defined
96 |
97 |
--------------------------------------------------------------------------------
/roles/podman/templates/autobrr.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=autobrr container
3 | {% if "gluetun" in podman_user.containers and podman_user.autobrr_gluetun_proxy %}
4 | Wants=gluetun.service
5 | After=gluetun.service
6 | Requires=gluetun.service
7 | {% endif %}
8 |
9 | [Container]
10 | ContainerName=autobrr
11 | Image=ghcr.io/autobrr/autobrr:latest
12 | AutoUpdate=registry
13 |
14 | User=1000:1000
15 | UserNS=keep-id:uid=1000,gid=1000
16 |
17 | Volume={{ podman_user.autobrr_config_dir }}:/config:Z
18 |
19 | HostName=autobrr
20 | {% if "gluetun" in podman_user.containers and podman_user.autobrr_gluetun_proxy %}
21 | Network=container:gluetun
22 | {% else %}
23 | PublishPort=127.0.0.1:7474:7474/tcp
24 | {% endif %}
25 |
26 | Environment=TZ={{ TZ }}
27 | Environment=AUTOBRR__HOST=0.0.0.0
28 |
29 |
30 | [Service]
31 | Restart=on-failure
32 | RestartSec=5
33 | RestartMaxDelaySec=1h
34 | RestartSteps=10
35 |
36 | # Extend Timeout to allow time to pull the image
37 | TimeoutStartSec=300
38 |
39 | [Install]
40 | WantedBy=default.target
41 |
--------------------------------------------------------------------------------
/roles/podman/templates/copy-ssl.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Copy SSL certificate to Caddy
3 |
4 | [Service]
5 | Restart=no
6 | Type=simple
7 | ExecStart=/usr/bin/rsync \
8 | --recursive \
9 | --copy-links \
10 | --delete \
11 | --chown=traefik:traefik \
12 | {{ podman_user.letsencrypt_config_dir }}/live/ {{ podman_user.traefik_config_dir }}/ssl
13 |
--------------------------------------------------------------------------------
/roles/podman/templates/gluetun.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=gluetun container
3 | Wants=network-online.target
4 | After=network-online.target nss-lookup.target
5 | {% if "transmission" in podman_user.containers %}
6 | Wants=transmission.service
7 | {% endif %}
8 | {% if "qbittorrent" in podman_user.containers %}
9 | Wants=qbittorrent.service
10 | {% endif %}
11 | {% if "autobrr" in podman_user.containers and podman_user.autobrr_gluetun_proxy %}
12 | Wants=autobrr.service
13 | {% endif %}
14 | {% if "thelounge" in podman_user.containers and podman_user.thelounge_gluetun_proxy %}
15 | Wants=thelounge.service
16 | {% endif %}
17 |
18 | [Container]
19 | ContainerName=gluetun
20 | Image=ghcr.io/qdm12/gluetun:v3
21 | AutoUpdate=registry
22 |
23 | AddCapability=NET_ADMIN
24 | AddDevice=/dev/net/tun:/dev/net/tun
25 |
26 | HostName=gluetun
27 | {% if "transmission" in podman_user.containers %}
28 | {% if podman_user.transmission_web_port is defined %}
29 | PublishPort=127.0.0.1:{{ podman_user.transmission_web_port }}:9091/tcp
30 | {% else %}
31 | PublishPort=127.0.0.1:9091:9091/tcp
32 | {% endif %}
33 | {% endif %}
34 | {% if "qbittorrent" in podman_user.containers %}
35 | {% if podman_user.qbittorrent_web_port is defined %}
36 | PublishPort=127.0.0.1:{{ podman_user.qbittorrent_web_port }}:{{ podman_user.qbittorrent_web_port }}/tcp
37 | {% else %}
38 | PublishPort=127.0.0.1:8090:8090/tcp
39 | {% endif %}
40 | {% endif %}
41 | {% if "autobrr" in podman_user.containers and podman_user.autobrr_gluetun_proxy %}
42 | PublishPort=127.0.0.1:7474:7474/tcp
43 | {% endif %}
44 | {% if "thelounge" in podman_user.containers and podman_user.thelounge_gluetun_proxy %}
45 | PublishPort=127.0.0.1:9000:9000/tcp
46 | {% endif %}
47 |
48 | Environment=TZ={{ TZ }}
49 | {% for item in podman_user.gluetun_vpn_provider_env %}
50 | Environment={{ item }}
51 | {% endfor %}
52 |
53 | {% if podman_user.gluetun_httpproxy is defined and podman_user.gluetun_httpproxy %}
54 | Environment=HTTPPROXY=on
55 | Environment=HTTPPROXY_STEALTH=on
56 | {% if podman_user.gluetun_httpproxy_port is defined %}
57 | PublishPort=127.0.0.1:{{ podman_user.gluetun_httpproxy_port }}:8888/tcp
58 | {% else %}
59 | PublishPort=127.0.0.1:8888:8888/tcp
60 | {% endif %}
61 |
62 | {% endif %}
63 |
64 | [Service]
65 | Restart=on-failure
66 | RestartSec=5
67 | RestartMaxDelaySec=1h
68 | RestartSteps=10
69 |
70 | # Remove gluetun container and all containers depend on gluetun
71 | ExecStartPre=-/usr/bin/podman rm --force --depend gluetun
72 |
73 | # Extend Timeout to allow time to pull the image
74 | TimeoutStartSec=300
75 |
76 | [Install]
77 | WantedBy=default.target
78 |
--------------------------------------------------------------------------------
/roles/podman/templates/letsencrypt.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=letsencrypt certbot container
3 | Wants=network-online.target
4 | After=network-online.target nss-lookup.target
5 |
6 | [Container]
7 | ContainerName=letsencrypt-certbot
8 | Image=docker.io/certbot/dns-cloudflare:latest
9 |
10 | Volume={{ podman_user.letsencrypt_config_dir }}:/etc/letsencrypt:Z
11 |
12 | HostName=letsencrypt
13 |
14 | Exec=certonly \
15 | --dns-cloudflare \
16 | --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini \
17 | --dns-cloudflare-propagation-seconds 60 \
18 | --email {{ podman_user.letsencrypt_email }} \
19 | {% for domain in podman_user.letsencrypt_domains %}
20 | --domains '{{ domain }}' \
21 | {% endfor %}
22 | --agree-tos \
23 | --keep-until-expiring \
24 | --expand \
25 | --non-interactive
26 |
27 |
28 | [Service]
29 | Restart=no
30 |
31 | # Extend Timeout to allow time to pull the image
32 | TimeoutStartSec=300
33 |
--------------------------------------------------------------------------------
/roles/podman/templates/nextcloud-aio.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Nextcloud AIO Master Container
3 | Documentation=https://github.com/nextcloud/all-in-one/blob/main/docker-rootless.md
4 | After=local-fs.target
5 | Requires=podman.socket
6 |
7 | [Container]
8 | ContainerName=nextcloud-aio-mastercontainer
9 | Image=docker.io/nextcloud/all-in-one:latest
10 | AutoUpdate=registry
11 | SecurityLabelDisable=true
12 |
13 | HostName=nextcloud-aio
14 | Network=bridge
15 | {% if podman_user.nextcloud_aio_port is defined %}
16 | PublishPort=127.0.0.1:{{ podman_user.nextcloud_aio_port }}:8080/tcp
17 | {% else %}
18 | PublishPort=127.0.0.1:11001:8080/tcp
19 | {% endif %}
20 |
21 | Volume=nextcloud_aio_mastercontainer:/mnt/docker-aio-config
22 | Volume=/run/user/{{ podman_user.uid }}/podman/podman.sock:/var/run/docker.sock:ro,Z
23 |
24 |
25 | Environment=APACHE_PORT=11000
26 | Environment=APACHE_IP_BINDING=0.0.0.0
27 | Environment=WATCHTOWER_DOCKER_SOCKET_PATH=/run/user/{{ podman_user.uid }}/podman/podman.sock
28 | {% if podman_user.nextcloud_skip_domain_validation is defined and podman_user.nextcloud_skip_domain_validation %}
29 | Environment=SKIP_DOMAIN_VALIDATION=true
30 | {% endif %}
31 | {% if podman_user.nextcloud_backup_retention is defined %}
32 | Environment=BORG_RETENTION_POLICY="{{ podman_user.nextcloud_backup_retention }}"
33 | {% endif %}
34 | {% if podman_user.nextcloud_memory_limit is defined %}
35 | Environment=NEXTCLOUD_MEMORY_LIMIT={{ podman_user.nextcloud_memory_limit }}
36 | {% endif %}
37 |
38 | [Service]
39 | Restart=on-failure
40 | RestartSec=5
41 | RestartMaxDelaySec=1h
42 | RestartSteps=10
43 |
44 | # Extend Timeout to allow time to pull the image
45 | TimeoutStartSec=300
46 |
47 | [Install]
48 | WantedBy=default.target
49 |
--------------------------------------------------------------------------------
/roles/podman/templates/podman-system-prune.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Remove all unused pods, containers, images, networks, and volume data
3 |
4 | [Service]
5 | ExecStart=/usr/bin/podman system prune --all --force --filter "until=240h"
6 |
--------------------------------------------------------------------------------
/roles/podman/templates/podman-system-prune.timer.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=podman image prune timer
3 |
4 | [Timer]
5 | OnCalendar={{ podman_user.podman_system_prune_timer }}
6 |
7 | [Install]
8 | WantedBy=timers.target
9 |
10 |
--------------------------------------------------------------------------------
/roles/podman/templates/qbittorrent.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=qBittorrent container
3 | Wants=gluetun.service
4 | After=gluetun.service
5 | Requires=gluetun.service
6 |
7 | [Container]
8 | ContainerName=qbittorrent
9 | Image=lscr.io/linuxserver/qbittorrent:latest
10 | AutoUpdate=registry
11 |
12 | Volume={{ podman_user.qbittorrent_config_dir }}:/config:Z
13 | Volume={{ podman_user.qbittorrent_downloads_dir }}:/downloads:Z
14 |
15 | HostName=qbittorrent
16 | Network=container:gluetun
17 |
18 | Environment=PUID=1000
19 | Environment=PGID=1000
20 | Environment=TZ={{ TZ }}
21 | {% if podman_user.qbittorrent_web_port is defined %}
22 | Environment=WEBUI_PORT={{ podman_user.qbittorrent_web_port }}
23 | {% else %}
24 | Environment=WEBUI_PORT=8090
25 | {% endif %}
26 |
27 | UIDMap=1000:0:1
28 | UIDMap=0:1:1000
29 | UIDMap=1001:1001:64536
30 |
31 |
32 | [Service]
33 | Restart=on-failure
34 | RestartSec=5
35 | RestartMaxDelaySec=1h
36 | RestartSteps=10
37 |
38 | # Extend Timeout to allow time to pull the image
39 | TimeoutStartSec=300
40 |
41 | [Install]
42 | WantedBy=default.target
43 |
--------------------------------------------------------------------------------
/roles/podman/templates/syncthing.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Syncthing container
3 |
4 | [Container]
5 | ContainerName=syncthing
6 | Image=lscr.io/linuxserver/syncthing:latest
7 | AutoUpdate=registry
8 |
9 | Volume={{ podman_user.syncthing_config_dir }}:/config:Z
10 | {% for item in podman_user.syncthing_data_dirs %}
11 | Volume={{ item.src }}:{{ item.dest }}:Z
12 | {% endfor %}
13 |
14 | HostName=syncthing
15 | {% if podman_user.syncthing_web_port is defined %}
16 | PublishPort=127.0.0.1:{{ podman_user.syncthing_web_port }}:8384/tcp
17 | {% else %}
18 | PublishPort=127.0.0.1:8384:8384/tcp
19 | {% endif %}
20 | {% if podman_user.syncthing_sync_port is defined %}
21 | PublishPort={{ podman_user.syncthing_sync_port }}:22000/tcp
22 | {% else %}
23 | PublishPort=22000:22000/tcp
24 | {% endif %}
25 |
26 | Environment=PUID=1000
27 | Environment=PGID=1000
28 | Environment=TZ={{ TZ }}
29 |
30 | UIDMap=1000:0:1
31 | UIDMap=0:1:1000
32 | UIDMap=1001:1001:64536
33 |
34 |
35 | [Service]
36 | Restart=on-failure
37 | RestartSec=5
38 | RestartMaxDelaySec=1h
39 | RestartSteps=10
40 |
41 | # Extend Timeout to allow time to pull the image
42 | TimeoutStartSec=300
43 |
44 | [Install]
45 | WantedBy=default.target
46 |
--------------------------------------------------------------------------------
/roles/podman/templates/tailscale-traefik.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=tailscale container
3 | Wants=network-online.target
4 | After=network-online.target nss-lookup.target
5 | Wants=traefik.service
6 |
7 | [Container]
8 | ContainerName=tailscale-traefik
9 | Image=ghcr.io/tailscale/tailscale:latest
10 | AutoUpdate=registry
11 |
12 | AddCapability=NET_ADMIN
13 | AddDevice=/dev/net/tun:/dev/net/tun
14 |
15 | Volume={{ podman_user.tailscale_config_dir }}/:/var/lib/tailscale:Z
16 |
17 | HostName=tailscale-traefik
18 | Network=slirp4netns:port_handler=slirp4netns,allow_host_loopback=true
19 | PublishPort=443:443/tcp
20 |
21 | Environment=TS_STATE_DIR=/var/lib/tailscale
22 | {% if podman_user.tailscale_hostname is defined %}
23 | Environment=TS_HOSTNAME={{ podman_user.tailscale_hostname }}
24 | {% else %}
25 | Environment=TS_HOSTNAME=tailscale-traefik-container
26 | {% endif %}
27 | {% if podman_user.tailscale_args is defined %}
28 | {% for arg in podman_user.tailscale_args %}
29 | Environment=TS_EXTRA_ARGS={{ arg }}
30 | {% endfor %}
31 | {% endif %}
32 |
33 |
34 | [Service]
35 | Restart=on-failure
36 | RestartSec=5
37 | RestartMaxDelaySec=1h
38 | RestartSteps=10
39 |
40 | # Remove tailscale-traefik container and all containers depend on tailscale-traefik
41 | ExecStartPre=-/usr/bin/podman rm --force --depend tailscale-traefik
42 |
43 | # Extend Timeout to allow time to pull the image
44 | TimeoutStartSec=300
45 |
46 | [Install]
47 | WantedBy=default.target
48 |
--------------------------------------------------------------------------------
/roles/podman/templates/thelounge.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=The Lounge container
3 | {% if "gluetun" in podman_user.containers and podman_user.thelounge_gluetun_proxy %}
4 | Wants=gluetun.service
5 | After=gluetun.service
6 | Requires=gluetun.service
7 | {% endif %}
8 |
9 | [Container]
10 | ContainerName=thelounge
11 | Image=lscr.io/linuxserver/thelounge:latest
12 | AutoUpdate=registry
13 |
14 | Volume={{ podman_user.thelounge_config_dir }}:/config:Z
15 |
16 | HostName=thelounge
17 | {% if "gluetun" in podman_user.containers and podman_user.thelounge_gluetun_proxy %}
18 | Network=container:gluetun
19 | {% else %}
20 | PublishPort=127.0.0.1:9000:9000/tcp
21 | {% endif %}
22 |
23 | Environment=PUID=1000
24 | Environment=PGID=1000
25 | Environment=TZ={{ TZ }}
26 |
27 | UIDMap=1000:0:1
28 | UIDMap=0:1:1000
29 | UIDMap=1001:1001:64536
30 |
31 |
32 | [Service]
33 | Restart=on-failure
34 | RestartSec=5
35 | RestartMaxDelaySec=1h
36 | RestartSteps=10
37 |
38 | # Extend Timeout to allow time to pull the image
39 | TimeoutStartSec=300
40 |
41 | [Install]
42 | WantedBy=default.target
43 |
--------------------------------------------------------------------------------
/roles/podman/templates/traefik.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=traefik container
3 | Wants=network-online.target
4 | After=network-online.target nss-lookup.target
5 | {% if podman_user.traefik_tailscale_enable is defined and podman_user.traefik_tailscale_enable %}
6 | Wants=tailscale-traefik.service
7 | After=tailscale-traefik.service
8 | Requires=tailscale-traefik.service
9 | {% endif %}
10 |
11 | [Container]
12 | ContainerName=traefik
13 | Image=docker.io/library/traefik:v2.10
14 | AutoUpdate=registry
15 |
16 | Volume={{ podman_user.traefik_config_dir }}/static_conf.yml:/etc/traefik/traefik.yml:Z,ro
17 | Volume={{ podman_user.traefik_config_dir }}/dynamic_conf.yml:/etc/traefik/dynamic_conf.yml:Z,ro
18 | Volume={{ podman_user.traefik_config_dir }}/ssl:/etc/traefik/ssl:Z,ro
19 |
20 | {% if podman_user.traefik_tailscale_enable is defined and podman_user.traefik_tailscale_enable %}
21 | Network=container:tailscale-traefik
22 | {% else %}
23 | Network=slirp4netns:port_handler=slirp4netns,allow_host_loopback=true
24 | PublishPort=443:443/tcp
25 | {% endif %}
26 |
27 |
28 | [Service]
29 | Restart=on-failure
30 | RestartSec=5
31 | RestartMaxDelaySec=1h
32 | RestartSteps=10
33 |
34 | # Extend Timeout to allow time to pull the image
35 | TimeoutStartSec=300
36 |
37 | [Install]
38 | WantedBy=default.target
39 |
--------------------------------------------------------------------------------
/roles/podman/templates/transmission.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Transmission container
3 | Wants=gluetun.service
4 | After=gluetun.service
5 | Requires=gluetun.service
6 |
7 | [Container]
8 | ContainerName=transmission
9 | Image=lscr.io/linuxserver/transmission:latest
10 | AutoUpdate=registry
11 |
12 | Volume={{ podman_user.transmission_config_dir }}:/config:Z
13 | Volume={{ podman_user.transmission_downloads_dir }}:/downloads:Z
14 | {% if podman_user.transmission_watch_dir is defined %}
15 | Volume={{ podman_user.transmission_watch_dir }}:/watch:Z
16 | {% endif %}
17 |
18 | HostName=transmission
19 | Network=container:gluetun
20 |
21 | Environment=PUID=1000
22 | Environment=PGID=1000
23 | Environment=TZ={{ TZ }}
24 | {% if podman_user.transmission_user is defined %}
25 | Environment=USER={{ podman_user.transmission_user }}
26 | Environment=PASS='{{ podman_user.transmission_pass }}'
27 | {% endif %}
28 |
29 | UIDMap=1000:0:1
30 | UIDMap=0:1:1000
31 | UIDMap=1001:1001:64536
32 |
33 |
34 | [Service]
35 | Restart=on-failure
36 | RestartSec=5
37 | RestartMaxDelaySec=1h
38 | RestartSteps=10
39 |
40 | # Extend Timeout to allow time to pull the image
41 | TimeoutStartSec=300
42 |
43 | [Install]
44 | WantedBy=default.target
45 |
--------------------------------------------------------------------------------
/roles/podman/templates/unifi-db-init-mongo.js.j2:
--------------------------------------------------------------------------------
1 | db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "{{ podman_user.unifi_db_pass }}", roles: [{role: "dbOwner", db: "unifi"}]});
2 | db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "{{ podman_user.unifi_db_pass }}", roles: [{role: "dbOwner", db: "unifi_stat"}]});
3 |
--------------------------------------------------------------------------------
/roles/podman/templates/unifi-db.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=MongoDB for UniFi container
3 | Wants=unifi.service
4 | Before=unifi.service
5 |
6 | [Container]
7 | ContainerName=unifi-db
8 | Image=docker.io/mongo:7.0
9 | AutoUpdate=registry
10 |
11 | UserNS=keep-id
12 |
13 | Volume={{ podman_user.unifi_db_dir }}:/data/db:Z
14 | {% if not unifi_database_dir.stat.exists %}
15 | Volume={{ unifi_db_tmp_dir.path }}/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:Z,ro
16 | {% endif %}
17 |
18 | HostName=unifi-db
19 | PublishPort=127.0.0.1:8443:8443/tcp
20 | PublishPort=3478:3478/udp
21 | PublishPort=10001:10001/udp
22 | PublishPort=8080:8080/tcp
23 |
24 |
25 | [Service]
26 | Restart=on-failure
27 | RestartSec=5
28 | RestartMaxDelaySec=1h
29 | RestartSteps=10
30 |
31 | # Remove unifi-db container and all containers depend on unifi-db
32 | ExecStartPre=-/usr/bin/podman rm --force --depend unifi-db
33 |
34 | # Extend Timeout to allow time to pull the image
35 | TimeoutStartSec=300
36 |
37 | [Install]
38 | WantedBy=default.target
39 |
--------------------------------------------------------------------------------
/roles/podman/templates/unifi.container.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=UniFi container
3 | Wants=unifi-db.service
4 | After=unifi-db.service
5 | Requires=unifi-db.service
6 |
7 | [Container]
8 | ContainerName=unifi
9 | Image=lscr.io/linuxserver/unifi-network-application:latest
10 | AutoUpdate=registry
11 |
12 | Volume={{ podman_user.unifi_config_dir }}:/config:Z
13 |
14 | HostName=unifi-network-application
15 | Network=container:unifi-db
16 |
17 | Environment=PUID=1000
18 | Environment=PGID=1000
19 | Environment=TZ={{ TZ }}
20 |
21 | UIDMap=1000:0:1
22 | UIDMap=0:1:1000
23 | UIDMap=1001:1001:64536
24 |
25 | {% if not unifi_database_dir.stat.exists %}
26 | Environment=MONGO_USER=unifi
27 | Environment=MONGO_PASS={{ podman_user.unifi_db_pass }}
28 | Environment=MONGO_HOST=localhost
29 | Environment=MONGO_PORT=27017
30 | Environment=MONGO_DBNAME=unifi
31 | {% endif %}
32 |
33 | [Service]
34 | Restart=on-failure
35 | RestartSec=5
36 | RestartMaxDelaySec=1h
37 | RestartSteps=10
38 |
39 | {% if not unifi_database_dir.stat.exists %}
40 | # Wait MongoDB initialization
41 | ExecStartPre=/usr/bin/sleep 5
42 |
43 | {% endif %}
44 | # Extend Timeout to allow time to pull the image
45 | TimeoutStartSec=300
46 |
47 | [Install]
48 | WantedBy=default.target
49 |
--------------------------------------------------------------------------------
/roles/systemd_networkd/README.md:
--------------------------------------------------------------------------------
1 | Set up [systemd-networkd](https://wiki.archlinux.org/title/Systemd-networkd).
2 | For single NIC static IP setup, specify the static IP address, gateway address, and DNS server address.
3 | For advanced setup, it will copy all configuration files inside the `{{ networkd_configs_dir }}` to `/etc/systemd/network`.
4 | The configuration file will have permission `640` with owner `root` and group `systemd-network`.
5 | This is for preventing the leaking of private keys when setting up WireGuard using systemd-networkd.
6 |
7 | ## Tasks
8 | ### Arch Linux
9 | - Remove default configuration file created by the `[arch_install.sh](arch_install.sh)` script.
10 | - Create a simple static IP configuration (if `{{ networkd_configs_dir }}` variable is undefined)
11 | or copy all configuration files under `{{ networkd_configs_dir }}` to `/etc/systemd/network`.
12 |
13 | ### Fedora
14 | - Install `systemd-networkd` and enable `systemd-resolved.service`.
15 | - Disable `NetworkManager.service`
16 | - Create a simple static IP configuration (if `{{ networkd_configs_dir }}` variable is undefined)
17 | or copy all configuration files under `{{ networkd_configs_dir }}` to `/etc/systemd/network`.
18 |
19 | ### Debian
20 | - Install `systemd-resolved.service`.
21 | - Remove `/etc/network/interfaces` configuration.
22 | - Create a simple static IP configuration (if `{{ networkd_configs_dir }}` variable is undefined)
23 | or copy all configuration files under `{{ networkd_configs_dir }}` to `/etc/systemd/network`.
24 |
25 |
26 | ## Variables
27 | ### Single NIC static IP
28 | ```yaml
29 | # NIC name
30 | static_nic: enp1s0
31 |
32 | # IP address with its prefix length
33 | static_ip: 192.168.122.2/24
34 |
35 | # Gateway address
36 | static_gateway: 192.168.122.1
37 |
38 | # DNS server address
39 | static_dns: 9.9.9.9
40 | ```
41 |
42 | ### Advanced setup
43 | ```yaml
44 | # Copy all configuration files under this directory to /etc/systemd/network
45 | networkd_configs_dir: "files/systemd-networkd/"
46 | ```
47 |
48 |
--------------------------------------------------------------------------------
/roles/systemd_networkd/tasks/Archlinux-prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Delete default network configuration on Archlinux
3 | ansible.builtin.file:
4 | path: /etc/systemd/network/20-ethernet.network
5 | state: absent
6 | become: true
7 | when: ansible_distribution == "Archlinux"
8 |
9 |
--------------------------------------------------------------------------------
/roles/systemd_networkd/tasks/Debian-prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: apt install systemd-resolved
3 | ansible.builtin.apt: name=systemd-resolved state=present
4 | become: true
5 | when: ansible_distribution == "Debian"
6 |
7 | - name: Check /etc/network/interfaces on Debian
8 | ansible.builtin.stat:
9 | path: /etc/network/interfaces
10 | register: network_interfaces
11 | become: true
12 | when: ansible_distribution == "Debian"
13 |
14 | - name: Remove /etc/network/interfaces on Debian
15 | ansible.builtin.command: mv /etc/network/interfaces /etc/network/interfaces.save
16 | become: true
17 | when: ansible_distribution == "Debian" and network_interfaces.stat.exists
18 |
--------------------------------------------------------------------------------
/roles/systemd_networkd/tasks/Fedora-prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: dnf install systemd-networkd
3 | ansible.builtin.dnf: name=systemd-networkd state=present
4 | become: true
5 |
6 | - name: systemctl disable NetworkManager
7 | ansible.builtin.systemd: name=NetworkManager enabled=false
8 | become: true
9 |
10 | - name: systemctl start systemd-resolved
11 | ansible.builtin.systemd: name=systemd-resolved state=started
12 | become: true
13 |
14 | - name: ln -s /run/systemd/resolve/stub-resolv.conf /etc/resolv.conf
15 | ansible.builtin.file:
16 | src: /run/systemd/resolve/stub-resolv.conf
17 | dest: /etc/resolv.conf
18 | state: link
19 | become: true
20 |
21 |
--------------------------------------------------------------------------------
/roles/systemd_networkd/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: "{{ ansible_distribution }}-prepare.yml"
3 |
4 | - name: "Create {{ static_nic }}.network"
5 | ansible.builtin.template:
6 | src: en0.network.j2
7 | dest: /etc/systemd/network/{{ static_nic }}.network
8 | owner: root
9 | group: systemd-network
10 | mode: '0640'
11 | become: true
12 | when: networkd_configs_dir is not defined
13 |
14 | - name: Copy systemd-networkd configs
15 | ansible.builtin.copy:
16 | src: "{{ networkd_configs_dir }}"
17 | dest: /etc/systemd/network/
18 | owner: root
19 | group: systemd-network
20 | mode: '0640'
21 | become: true
22 | when: networkd_configs_dir is defined
23 |
24 | - name: systemctl enable systemd-networkd
25 | ansible.builtin.systemd: name=systemd-networkd enabled=true
26 | become: true
27 |
28 | - name: systemctl enable systemd-resolved
29 | ansible.builtin.systemd: name=systemd-resolved enabled=true
30 | become: true
31 |
32 |
--------------------------------------------------------------------------------
/roles/systemd_networkd/templates/en0.network.j2:
--------------------------------------------------------------------------------
1 | [Match]
2 | Name={{ static_nic }}
3 |
4 | [Network]
5 | DHCP=no
6 | Address={{ static_ip }}
7 | Gateway={{ static_gateway }}
8 | DNS={{ static_dns }}
9 |
--------------------------------------------------------------------------------
/roles/wpa_supplicant/README.md:
--------------------------------------------------------------------------------
1 | Set up [wpa_supplicant](https://wiki.archlinux.org/title/Wpa_supplicant) __when using systemd-networkd__.
2 |
3 | ## Tasks
4 | ### Arch Linux
5 | - Install `wpa_supplicant`.
6 | - Copy wpa_supplicant configuration file.
7 | - Enable `wpa_supplicant@interface.service`.
8 |
9 | ### Fedora
10 | - Install `wpa_supplicant`.
11 | - Create `wpa_supplicant@.service` file.
12 | - Copy wpa_supplicant configuration file.
13 | - Enable `wpa_supplicant@interface.service`.
14 |
15 |
16 | ## Variables
17 | ```yaml
18 | # The wpa_supplicant configuration file
19 | wpa_supplicant_config_file: "files/wpa_supplicant.conf"
20 |
21 | # wireless NIC name
22 | wireless_interface: wlan0
23 | ```
24 |
25 |
--------------------------------------------------------------------------------
/roles/wpa_supplicant/files/wpa_supplicant@fedora.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=WPA supplicant daemon (interface-specific version)
3 | Requires=sys-subsystem-net-devices-%i.device
4 | After=sys-subsystem-net-devices-%i.device
5 | Before=network.target
6 | Wants=network.target
7 |
8 | # NetworkManager users will probably want the dbus version instead.
9 |
10 | [Service]
11 | Type=simple
12 | ExecStart=/usr/sbin/wpa_supplicant -c/etc/wpa_supplicant/wpa_supplicant-%I.conf -i%I
13 |
14 | [Install]
15 | WantedBy=multi-user.target
16 |
--------------------------------------------------------------------------------
/roles/wpa_supplicant/tasks/Archlinux_prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -S wpa_supplicant
3 | community.general.pacman: name=wpa_supplicant state=present
4 | become: true
5 |
6 |
--------------------------------------------------------------------------------
/roles/wpa_supplicant/tasks/Fedora_prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: dnf install wpa_supplicant
3 | ansible.builtin.dnf: name=wpa_supplicant state=present
4 | become: true
5 |
6 | # Fedora does not have wpa_supplicant@.service create one
7 | - name: Create wpa_supplicant@.service
8 | ansible.builtin.copy:
9 | src: wpa_supplicant@fedora.service
10 | dest: /etc/systemd/system/wpa_supplicant@.service
11 | owner: root
12 | group: root
13 | mode: '0644'
14 | become: true
15 |
16 | - name: systemctl daemon-reload
17 | ansible.builtin.systemd:
18 | daemon_reload: true
19 | become: true
20 |
21 |
--------------------------------------------------------------------------------
/roles/wpa_supplicant/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: "{{ ansible_distribution }}_prepare.yml"
3 |
4 | - name: Copy wpa_supplicant configs
5 | ansible.builtin.copy:
6 | src: "{{ wpa_supplicant_config_file }}"
7 | dest: "/etc/wpa_supplicant/wpa_supplicant-{{ wireless_interface }}.conf"
8 | owner: root
9 | group: root
10 | mode: '0600'
11 | become: true
12 |
13 | - name: systemctl enable --now wpa_supplicant@{{ wireless_interface }}.service
14 | ansible.builtin.systemd: name=wpa_supplicant@{{ wireless_interface }}.service state=started enabled=true
15 | become: true
16 |
--------------------------------------------------------------------------------
/tasks/update.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: pacman -Syu
3 | community.general.pacman:
4 | update_cache: true
5 | upgrade: true
6 | become: true
7 | when: ansible_distribution == "Archlinux"
8 |
9 | - name: dnf update
10 | ansible.builtin.dnf:
11 | name: "*"
12 | state: latest
13 | become: true
14 | when: ansible_distribution == "Fedora"
15 |
16 | - name: apt update && apt dist-upgrade
17 | ansible.builtin.apt:
18 | update_cache: true
19 | upgrade: dist
20 | become: true
21 | when: ansible_distribution == "Debian"
22 |
23 |
--------------------------------------------------------------------------------
/virsh_undefine.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | # Remove virtual mahine and its storage
3 |
4 | if [[ -z $1 ]] ; then
5 | echo "ERROR, please provide VM name/domain."
6 | echo "virsh_undefine.sh VM_name"
7 | exit 1
8 | fi
9 |
10 | virsh destroy "$1"
11 | sleep 1
12 | virsh undefine "$1" --nvram --storage "/var/lib/libvirt/images/$1.qcow2"
13 |
--------------------------------------------------------------------------------
/virt-install_arch.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | # Install an Arch Linux virtual machine with given name/domain.
3 | # No graphics only serial output.
4 |
5 | if [[ -z $1 ]] ; then
6 | echo "ERROR, please provide VM name/domain."
7 | echo "virt-install_arch.sh VM_name"
8 | exit 1
9 | fi
10 |
11 | virt-install \
12 | --name "$1" \
13 | --memory 2048 \
14 | --sysinfo host \
15 | --cpu host-passthrough,cache.mode=passthrough,topology.sockets=1,topology.cores=4,topology.threads=2 \
16 | --graphics none \
17 | --autoconsole text \
18 | --os-variant name='archlinux' \
19 | --cdrom "/tmp/archlinux-$(date +'%Y.%m.%d')-x86_64.iso" \
20 | --network network=default,model.type=virtio \
21 | --boot uefi \
22 | --disk path="/var/lib/libvirt/images/$1.qcow2",size=16,bus=virtio \
23 | --tpm default \
24 |
25 |
--------------------------------------------------------------------------------