├── .gitignore ├── caddy └── docker-compose.yaml ├── fediverse └── docker-compose.yaml ├── home └── docker-compose.yaml ├── media └── docker-compose.yaml ├── nocodb └── docker-compose.yaml ├── readme.md ├── storage └── docker-compose.yaml ├── vpn └── docker-compose.yaml ├── warrior └── docker-compose.yaml └── web └── docker-compose.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | # general 2 | */.settings.env 3 | */data/ 4 | 5 | # broken / testing 6 | watchtower/ 7 | 8 | # fediverse 9 | fediverse/.settings-ge.env 10 | fediverse/.settings-se.env 11 | 12 | # pihole 13 | pihole/* 14 | 15 | # utilities 16 | utilities/* 17 | 18 | # homeassistant 19 | homeassistant/.shopping_list.json 20 | homeassistant/android 21 | homeassistant/*.pickle 22 | homeassistant/deps/* 23 | homeassistant/.uuid 24 | homeassistant/*.log 25 | homeassistant/*.db 26 | homeassistant/*.sqlite 27 | homeassistant/*.xml 28 | homeassistant/.google.token 29 | homeassistant/.HA_VERSION 30 | homeassistant/known_devices.yaml 31 | homeassistant/secrets.yaml 32 | homeassistant/nest.conf 33 | homeassistant/plex.conf 34 | homeassistant/emulated_hue_ids.json 35 | homeassistant/gas-meter.jpg 36 | homeassistant/gas-meter-invert.jpg 37 | homeassistant/gas-meter-optimised.jpg 38 | homeassistant/automations/notify-coin-* 39 | homeassistant/automations/notify-exchange-rate.yaml 40 | homeassistant/home-assistant_v2.db-shm 41 | homeassistant/home-assistant_v2.db-wal 42 | homeassistant/.cloud/* 43 | homeassistant/.storage 44 | homeassistant/custom_components/**/__pycache__/ 45 | homeassistant/custom_components/**/.translations/ 46 | homeassistant/google_calendars.yaml 47 | homeassistant/www/Plex_Recently_Added 48 | homeassistant/ip_bans.yaml 49 | homeassistant/home-assistant.log.1 50 | homeassistant/home-assistant.log.fault 51 | homeassistant/camera 52 | -------------------------------------------------------------------------------- /caddy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | caddy: 5 | image: caddy:latest 6 | restart: always 7 | container_name: caddy 8 | volumes: 9 | - ./data/Caddyfile:/etc/caddy/Caddyfile:Z 10 | - ./data/config:/config:Z 11 | - ./data/data:/data:Z 12 | - ./data/sites:/srv:Z 13 | ports: 14 | - 80:80 15 | - 443:443 16 | -------------------------------------------------------------------------------- /fediverse/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # start: podman-compose up -d 2 | x-podman: 3 | in_pod: false 4 | 5 | services: 6 | gotosocial-se: 7 | image: superseriousbusiness/gotosocial:latest 8 | container_name: gotosocial-se 9 | hostname: gotosocialse 10 | env_file: ./.settings-se.env 11 | user: 1000:1000 12 | userns_mode: "keep-id:uid=1000,gid=1000" 13 | restart: always 14 | volumes: 15 | - ./data/db-se:/gotosocial/storage:Z 16 | networks: 17 | fediverse: 18 | ipv4_address: 172.2.1.2 19 | 20 | gotosocial-ge: 21 | image: superseriousbusiness/gotosocial:latest 22 | container_name: gotosocial-ge 23 | hostname: gotosocialge 24 | env_file: ./.settings-ge.env 25 | user: 1000:1000 26 | userns_mode: "keep-id:uid=1000,gid=1000" 27 | restart: always 28 | volumes: 29 | - ./data/db-ge:/gotosocial/storage:Z 30 | networks: 31 | fediverse: 32 | ipv4_address: 172.2.1.3 33 | 34 | cloudflared-fediverse: 35 | image: cloudflare/cloudflared:latest 36 | container_name: cloudflared-fediverse 37 | hostname: cloudflaredfediverse 38 | command: tunnel --no-autoupdate run --token ${CF_FEDIVERSE_TOKEN} 39 | restart: always 40 | depends_on: 41 | - gotosocial-se 42 | - gotosocial-ge 43 | networks: 44 | fediverse: 45 | ipv4_address: 172.2.1.4 46 | 47 | networks: 48 | fediverse: 49 | driver: bridge 50 | ipam: 51 | driver: default 52 | config: 53 | - subnet: 172.2.1.0/16 54 | gateway: 172.2.1.1 55 | -------------------------------------------------------------------------------- /home/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | mqtt: 3 | image: eclipse-mosquitto 4 | container_name: mqtt 5 | hostname: mqtt 6 | restart: always 7 | volumes: 8 | - "./data/mqtt/config/mosquitto.conf:/mosquitto/config/mosquitto.conf:ro,Z" 9 | - "./data/mqtt/config/password.txt:/mosquitto/config/password.txt:ro,Z" 10 | - "./data/mqtt/log:/mosquitto/log:Z" 11 | - "./data/mqtt/data:/mosquitto/data:Z" 12 | ports: 13 | - "1883:1883" 14 | - "9001:9001" 15 | 16 | esphome: 17 | image: esphome/esphome:latest 18 | container_name: esphome 19 | hostname: esphome 20 | restart: always 21 | env_file: ./.settings.env 22 | volumes: 23 | - "./data/esphome:/config:z" 24 | ports: 25 | - "6052:6052" 26 | 27 | homeassistant: 28 | image: homeassistant/home-assistant:latest 29 | container_name: homeassistant 30 | network_mode: host 31 | privileged: true 32 | restart: always 33 | volumes: 34 | - "/etc/localtime:/etc/localtime:ro" 35 | - "./data/homeassistant:/config:z" 36 | - "/run/user/1000/podman/podman.sock:/var/run/docker.sock:z" 37 | depends_on: 38 | - mqtt 39 | - esphome 40 | # - vosk 41 | - whisper 42 | - piper 43 | 44 | # vosk: 45 | # image: rhasspy/wyoming-vosk 46 | # container_name: vosk 47 | # restart: always 48 | # command: --data-dir=/data --data-dir=/share/vosk/models --sentences-dir=/share/vosk/sentences --correct-sentences=0 --language=uk --preload-language=uk 49 | # env_file: ./.settings.env 50 | # volumes: 51 | # - "./data/vosk:/data:Z" 52 | # ports: 53 | # - "10300:10300" 54 | 55 | whisper: 56 | image: rhasspy/wyoming-whisper 57 | container_name: whisper 58 | restart: always 59 | command: --model tiny.en --language en 60 | env_file: ./.settings.env 61 | volumes: 62 | - "./data/whisper:/data:Z" 63 | ports: 64 | - "10300:10300" 65 | 66 | piper: 67 | image: rhasspy/wyoming-piper 68 | container_name: piper 69 | restart: always 70 | command: --voice en-gb-southern_english_female-low 71 | env_file: ./.settings.env 72 | volumes: 73 | - "./data/piper:/data:Z" 74 | ports: 75 | - "10200:10200" 76 | 77 | openwakeword: 78 | image: rhasspy/wyoming-openwakeword:latest 79 | container_name: openwakeword 80 | restart: always 81 | command: --custom-model-dir /custom 82 | env_file: ./.settings.env 83 | volumes: 84 | - "./data/wakeword:/custom" 85 | ports: 86 | - "10400:10400" 87 | -------------------------------------------------------------------------------- /media/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | 5 | miniflux: 6 | image: miniflux/miniflux:latest 7 | container_name: miniflux 8 | hostname: miniflux 9 | env_file: ./.settings.env 10 | restart: always 11 | ports: 12 | - 8082:8080 13 | depends_on: 14 | - db 15 | 16 | db: 17 | image: postgres:13.7 18 | container_name: miniflux_db 19 | restart: always 20 | env_file: ./.settings.env 21 | healthcheck: 22 | test: ["CMD", "pg_isready", "-U", "miniflux"] 23 | interval: 10s 24 | start_period: 30s 25 | volumes: 26 | - ./data/miniflux:/var/lib/postgresql/data:Z 27 | 28 | photoprism: 29 | image: photoprism/photoprism:latest 30 | container_name: photoprism 31 | hostname: photoprism 32 | restart: always 33 | env_file: ./.settings.env 34 | working_dir: /photoprism 35 | depends_on: 36 | - photoprism_db 37 | security_opt: 38 | - seccomp:unconfined 39 | - apparmor:unconfined 40 | ports: 41 | - 8084:2342 42 | volumes: 43 | - /media/sloan/data/Photos:/photoprism/originals:z 44 | - ./data/photoprism/storage:/photoprism/storage:Z 45 | 46 | photoprism_db: 47 | image: mariadb:10.8 48 | container_name: photoprism_db 49 | hostname: photoprismdb 50 | restart: always 51 | env_file: ./.settings.env 52 | security_opt: # see https://github.com/MariaDB/mariadb-docker/issues/434#issuecomment-1136151239 53 | - seccomp:unconfined 54 | - apparmor:unconfined 55 | command: mysqld --innodb-buffer-pool-size=512M --transaction-isolation=READ-COMMITTED --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --max-connections=512 --innodb-rollback-on-timeout=OFF --innodb-lock-wait-timeout=120 56 | volumes: 57 | - ./data/photoprism/db:/var/lib/mysql:Z 58 | 59 | jellyfin: 60 | image: jellyfin/jellyfin:latest 61 | container_name: jellyfin 62 | hostname: jellyfin 63 | privileged: true 64 | restart: always 65 | env_file: ./.settings.env 66 | mem_limit: 1000m 67 | volumes: 68 | - /media/sloan/data/TV:/media/TV:ro 69 | - /media/sloan/data/Music:/media/Music:ro 70 | - /media/sloan/data/Films:/media/Films:ro 71 | - /media/sloan/data/Books:/media/Books:ro 72 | - ./data/jellyfin/cache:/cache:Z 73 | - ./data/jellyfin/config:/config:Z 74 | ports: 75 | - 8085:8096 76 | devices: 77 | - /dev/dri/renderD128:/dev/dri/renderD128 78 | - /dev/dri/card0:/dev/dri/card0 79 | -------------------------------------------------------------------------------- /nocodb/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | nocodb: 3 | image: nocodb/nocodb:latest 4 | container_name: nocodb 5 | hostname: nocodb 6 | restart: always 7 | env_file: ./.settings.env 8 | ports: 9 | - 8087:8080 10 | volumes: 11 | - ./data:/usr/app/data/:Z 12 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # HAM (Home Automation Machine) 2 | 3 | This is version two of HAM ([see version one](https://github.com/scottsweb/ham/tree/master)), the host machine is now running [Fedora Silverblue](https://silverblue.fedoraproject.org/) and uses [Podman](https://podman.io/) instead of Docker. This allows the containers to run rootless and have a slightly more stable system upon which to build my #homelab. I have also upgraded from a Raspberry Pi to a x86 system with a small footprint and low power demands. 4 | 5 | I am mostly just publishing the relevant `docker-compose` / `podman-compose` files that are grouped into pods around certain pieces of functionality like home automation or media. I think this makes more sense, especially as more of my Home Assistant config is moved from YAML to the database. 6 | 7 | ## General / OS tweaks 8 | 9 | ### Packages 10 | 11 | I layer the following packages: 12 | 13 | ``` 14 | rpm-ostree install sshfs docker-compose podman-docker podman-compose wireguard-tools cronie 15 | ``` 16 | 17 | `podman-docker` allows you to use the [Monitor Docker Home Assistant component](https://github.com/ualex73/monitor_docker) for automating and monitoring containers. 18 | 19 | ### Podman 20 | 21 | Enable the Podman socket and set the `DOCKER_HOST` environment variable for more Docker like behaviour: 22 | 23 | ``` 24 | systemctl --user enable podman.socket 25 | systemctl --user start podman.socket 26 | systemctl --user status podman.socket 27 | ``` 28 | 29 | Add the following to your `~/.bash_profile` 30 | 31 | ``` 32 | export DOCKER_HOST=unix:///run/user/$UID/podman/podman.sock 33 | ``` 34 | 35 | Reference: [Use Docker Compose with Podman](https://fedoramagazine.org/use-docker-compose-with-podman-to-orchestrate-containers-on-fedora/) 36 | 37 | Test the Docker API with: 38 | 39 | ``` 40 | sudo curl -H "Content-Type: application/json" --unix-socket /run/user/1000/podman/podman.sock http://localhost/_ping 41 | ``` 42 | 43 | Reference: [Using Podman and Docker Compose](https://www.redhat.com/sysadmin/podman-docker-compose) 44 | 45 | Start the Podman restart service (restarts containers set to `restart: always`) after a reboot: 46 | 47 | ``` 48 | systemctl --user enable podman-restart.service 49 | systemctl --user start podman-restart.service 50 | ``` 51 | 52 | With the release of Podman 5 (Fedora 40), there are [some breaking changes with the switch from slirp4netns to pasta](https://blog.podman.io/2024/03/podman-5-0-breaking-changes-in-detail/). This caused two of my containers to break; Caddy and WireGuard. I still need to read more into but for now the fix is to switch back to slirp4netns for container networking by adding the following to `~/.config/containers/containers.conf`: 53 | 54 | ``` 55 | [network] 56 | default_rootless_network_cmd = "slirp4netns" 57 | ``` 58 | 59 | The system then needed to be rebooted and containers were behaving as before. 60 | 61 | ### Allow long running tasks 62 | 63 | As Silverblue is a desktop OS, it tries to shutdown long running tasks (including Podman containers). This can be turned off by running: `loginctl enable-linger`, check the status with `ls /var/lib/systemd/linger`, then reboot. 64 | 65 | ### Connectivity check 66 | 67 | Fedora has a built in connectivity check that phones home rather frequently. It's probably more useful on a system that uses WiFi, but as this machine is connected via Ethernet I decided to turn it off. `sudo nano /etc/NetworkManager/NetworkManager.conf`: 68 | 69 | ``` 70 | [connectivity] 71 | enabled=false 72 | uri=http://fedoraproject.org/static/hotspot.txt 73 | response=OK 74 | interval=300 75 | ``` 76 | 77 | Then run `systemctl restart NetworkManager` for the changes to be picked up. 78 | 79 | ### System time 80 | 81 | I found that my system time was getting out of sync, so enabled NTP 82 | 83 | ``` 84 | timedatectl set-ntp yes 85 | timedatectl 86 | ``` 87 | 88 | Reference: [Configuring date and time](https://docs.fedoraproject.org/en-US/fedora/latest/system-administrators-guide/basic-system-configuration/Configuring_the_Date_and_Time/) 89 | 90 | ### SSH 91 | 92 | Tweak some SSH settings to restrict access: 93 | 94 | ``` 95 | PermitRootLogin no 96 | PermitEmptyPasswords no 97 | X11Forwarding no 98 | PasswordAuthentication no 99 | AllowUsers username@192.168.1.* username@10.80.x.x (example for more IPs) 100 | ``` 101 | 102 | These tweaks can be added to `/etc/ssh/sshd_config.d/50-redhat.conf` and applied with `sudo systemctl reload sshd`. 103 | 104 | ### Cron 105 | 106 | `cronie` is a layered package (`rpm-ostree install cronie`). 107 | 108 | ``` 109 | sudo nano /etc/cron.allow 110 | # add your username 111 | 112 | systemctl enable crond.service 113 | systemctl start crond.service 114 | 115 | crontab -e 116 | ``` 117 | 118 | Reference: [Scheduling tasks with Cron](https://fedoramagazine.org/scheduling-tasks-with-cron/), [Automating System Tasks](https://docs.fedoraproject.org/en-US/fedora/latest/system-administrators-guide/monitoring-and-automation/Automating_System_Tasks/) 119 | 120 | ### USB sleep / Auto suspend 121 | 122 | To save power Fedora will sleep USB devices. We need to turn this off for Deconz and the Zigbee USB stick (ConBee II): 123 | 124 | ``` 125 | sudo rpm-ostree kargs --append=usbcore.autosuspend=-1 126 | 127 | # check 128 | cat /sys/module/usbcore/parameters/autosuspend 129 | ``` 130 | 131 | ### Gnome suspends after 15 minutes 132 | 133 | Since Fedora 38 the server started auto suspending after 15 minutes. This was due to a settings change that can be adjusted with: 134 | 135 | ``` 136 | # check state of power settings 137 | sudo -u gdm dbus-run-session gsettings list-recursively org.gnome.settings-daemon.plugins.power | grep sleep 138 | 139 | # set to 0 to disable autosuspend on power 140 | sudo -u gdm dbus-run-session gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-timeout 0 141 | ``` 142 | 143 | Reference: [Gnome suspends after 15 minutes](https://discussion.fedoraproject.org/t/gnome-suspends-after-15-minutes-of-user-inactivity-even-on-ac-power/79801) 144 | 145 | ## Containers 146 | 147 | ### Caddy 148 | 149 | I am using Caddy as a local reverse proxy. Along with the Pi-hole it allows custom domains like `homeassistant.lan` for each of the services. Caddy needs access to port 80 and 443 so the firewall needs to be opened: 150 | 151 | ``` 152 | sudo firewall-cmd --zone=FedoraWorkstation --add-service=http 153 | sudo firewall-cmd --zone=FedoraWorkstation --add-service=http --permanent 154 | sudo firewall-cmd --zone=FedoraWorkstation --add-service=https 155 | sudo firewall-cmd --zone=FedoraWorkstation --add-service=https --permanent 156 | ``` 157 | 158 | `firewall-cmd --get-default-zone` will let you know which zone you are currently using. 159 | 160 | ### Pi-hole 161 | 162 | Pi-hole also needs a few tweaks (including a hole in the local firewall): 163 | 164 | ``` 165 | sudo sysctl net.ipv4.ip_unprivileged_port_start=53 166 | sudo nano /etc/sysctl.conf 167 | # add net.ipv4.ip_unprivileged_port_start=53 168 | 169 | sudo nano /etc/systemd/resolved.conf 170 | # add DNSStubListener=no 171 | 172 | sudo systemctl restart systemd-resolved 173 | sudo systemctl restart NetworkManager 174 | 175 | sudo firewall-cmd --zone=FedoraWorkstation --add-port=53/udp 176 | sudo firewall-cmd --zone=FedoraWorkstation --add-port=53/tcp 177 | sudo firewall-cmd --zone=FedoraWorkstation --add-port=67/udp 178 | sudo firewall-cmd --permanent --zone=FedoraWorkstation --add-port=53/udp 179 | sudo firewall-cmd --permanent --zone=FedoraWorkstation --add-port=53/tcp 180 | sudo firewall-cmd --permanent --zone=FedoraWorkstation --add-port=67/udp 181 | ``` 182 | 183 | Reference: [Using firewalld](https://docs.fedoraproject.org/en-US/quick-docs/firewalld/), [Running Pi-hole in a Podman container](https://jreypo.io/2021/03/12/running-pihole-as-a-podman-container-in-fedora/) 184 | 185 | ### Home Assistant 186 | 187 | The `nmap` scanner has to run in unprivileged mode. To do this modify the Nmap Tracker options in the Home Assistant UI and add `--unprivileged` to the raw configurable scan options. 188 | 189 | The container needs access to USB which is a little tricky using Podman. You need to create a udev rule that changes the group and owner of the USB device when its plugged in so the container can access it. 190 | 191 | 192 | ``` 193 | # set policy for containers to use USB devices in SELinux 194 | setsebool -P container_use_devices on 195 | 196 | ls -l /dev/ttyUSB0 197 | 198 | # change owner of /dev/ttyUSB0 to your username 199 | lsusb 200 | sudo nano /etc/udev/rules.d/99-skyconnect.rules 201 | 202 | # add the following to the rule file (based on what you get from lsusb) 203 | SUBSYSTEM=="tty", ATTRS{idVendor}=="10c4", ATTRS{idProduct}=="ea60", OWNER="username", GROUP="username" 204 | 205 | # apply changes 206 | sudo udevadm control --reload-rules 207 | sudo udevadm trigger 208 | ``` 209 | 210 | Reference: [Access USB from rootless container](https://bugzilla.redhat.com/show_bug.cgi?id=1770553), [udev rule tips](https://gist.github.com/edro15/1c6cd63894836ed982a7d88bef26e4af) 211 | 212 | ### Samba 213 | 214 | The [Samba container](https://github.com/crazy-max/docker-samba) requires a hole in the firewall: 215 | 216 | ``` 217 | sudo dnf install samba 218 | sudo systemctl enable smb --now 219 | firewall-cmd --get-active-zones 220 | sudo firewall-cmd --permanent --zone=FedoraWorkstation --add-service=samba 221 | sudo firewall-cmd --reload 222 | ``` 223 | 224 | Reference: [Setting up Samba on Fedora](https://docs.fedoraproject.org/en-US/quick-docs/samba/) 225 | 226 | 227 | ### VPN / WireGuard 228 | 229 | It took some time to get WireGuard running rootless but the `docker-compose.yaml` file in the `vpn` folder is now working. [I spent a great deal of time experimenting with this](https://github.com/containers/podman/issues/15120) and I cannot exactly remember all the steps I took. I think the main thing you will need to do is enable the kernel module for WireGuard if it's not already enabled... and a few others for ip management: 230 | 231 | ``` 232 | # see which modules are loaded 233 | lsmod 234 | 235 | # enable required kernel modules 236 | sudo touch /etc/modules-load.d/wireguard.conf 237 | ``` 238 | 239 | Add the following to `wireguard.conf`: 240 | 241 | ``` 242 | # load wireguard at boot 243 | wireguard 244 | 245 | # modules for nat 246 | ip_tables 247 | iptable_filter 248 | iptable_nat 249 | xt_MASQUERADE 250 | xt_nat 251 | ``` 252 | 253 | Reboot and the container should now start. 254 | 255 | ## Useful links 256 | 257 | * [User IDs and (rootless) containers with Podman](https://blog.christophersmart.com/2021/01/26/user-ids-and-rootless-containers-with-podman/) 258 | * [Debugging volumes in rootles containers](https://www.redhat.com/sysadmin/debug-rootless-podman-mounted-volumes) 259 | * [Using volumes with rootless Podman](https://www.tutorialworks.com/podman-rootless-volumes/) 260 | * [Podman troubleshooting](https://github.com/containers/podman/blob/main/troubleshooting.md) 261 | * [When to use :z or :Z on Podman mounts](https://unix.stackexchange.com/questions/651198/podman-volume-mounts-when-to-use-the-z-or-z-suffix) 262 | * [Tips on fixing SELinux labels](https://unix.stackexchange.com/questions/240813/how-to-remove-selinux-label/240884#240884?s=bfed9cf1b8634c10aad788109dbd0930) 263 | * [Updating images with compose](https://stackoverflow.com/questions/49316462/how-to-update-existing-images-with-docker-compose) 264 | * [LaMetric icon reference](https://developer.lametric.com/icons) 265 | * [Fix excessive ASUS router requests to dns.msftncsi.com](https://www.healey.io/blog/excessive-dns-msftncsi-com-requests/) -------------------------------------------------------------------------------- /storage/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | x-podman: 2 | in_pod: false 3 | 4 | services: 5 | jottacloud: 6 | image: bluet/jottacloud:latest 7 | container_name: jottacloud 8 | hostname: jottacloud 9 | env_file: ./.settings.env 10 | restart: always 11 | volumes: 12 | - /media/sloan/data/Music/:/backup/Music:ro 13 | - /media/sloan/data/Photos/:/backup/Photos:ro 14 | - /media/sloan/data/TV/:/backup/TV:ro 15 | - /media/sloan/data/Films/:/backup/Films:ro 16 | - /media/sloan/data/Software/:/backup/Software:z,ro 17 | - ./data/jottacloud:/data/jottad:Z 18 | 19 | samba: 20 | image: crazymax/samba:latest 21 | container_name: samba 22 | restart: always 23 | env_file: ./.settings.env 24 | network_mode: host 25 | volumes: 26 | - ./data/samba:/data:Z 27 | - /media/sloan/data/Software:/samba/games:z,ro 28 | - /media/sloan/data/Photos/Uploads:/samba/uploads:z 29 | -------------------------------------------------------------------------------- /vpn/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | 3 | services: 4 | wireguard: 5 | image: ghcr.io/wg-easy/wg-easy 6 | container_name: wireguard 7 | hostname: wireguard 8 | restart: always 9 | env_file: ./.settings.env 10 | sysctls: 11 | - net.ipv4.conf.all.forwarding=1 12 | - net.ipv4.conf.all.src_valid_mark=1 13 | ports: 14 | - 51820:51820/udp 15 | - 51821:51821/tcp 16 | cap_add: 17 | - NET_ADMIN 18 | - NET_RAW 19 | - SYS_MODULE 20 | volumes: 21 | - ./data/wireguard:/etc/wireguard:Z 22 | -------------------------------------------------------------------------------- /warrior/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | warrior: 5 | image: atdr.meo.ws/archiveteam/warrior-dockerfile 6 | container_name: warrior 7 | hostname: warrior 8 | env_file: ./.settings.env 9 | restart: on-failure 10 | volumes: 11 | - ./data/warrior:/data/data:Z 12 | ports: 13 | - "8001:8001" 14 | -------------------------------------------------------------------------------- /web/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | 3 | services: 4 | web: 5 | image: joseluisq/static-web-server:latest 6 | hostname: web 7 | container_name: web 8 | env_file: ./.settings.env 9 | restart: always 10 | command: -g info 11 | volumes: 12 | - ./data/web:/public:ro,Z 13 | networks: 14 | web: 15 | ipv4_address: 172.1.1.2 16 | 17 | cloudflared-web: 18 | image: cloudflare/cloudflared:latest 19 | container_name: cloudflared-web 20 | hostname: cloudflaredweb 21 | command: tunnel --no-autoupdate run --token ${CF_WEB_TOKEN} 22 | restart: always 23 | depends_on: 24 | - web 25 | networks: 26 | web: 27 | ipv4_address: 172.1.1.3 28 | 29 | networks: 30 | web: 31 | driver: bridge 32 | ipam: 33 | driver: default 34 | config: 35 | - subnet: 172.1.1.0/16 36 | gateway: 172.1.1.1 37 | --------------------------------------------------------------------------------