├── .gitignore ├── AlmaLinux.md ├── LICENSE ├── README.md ├── Ubuntu.md ├── quadlets ├── README.md ├── actual │ ├── actual.container │ ├── actual.network │ └── actual.volume ├── adguard │ ├── adguard.container │ ├── adguard.network │ └── adguard.volume ├── anubis │ ├── README.md │ ├── anubis.container │ ├── anubis.env │ └── anubis.network ├── apprise │ ├── apprise.container │ ├── apprise.network │ └── apprise.volume ├── audiobookshelf │ ├── audiobookshelf.container │ ├── audiobookshelf.network │ └── audiobookshelf.volume ├── authelia │ ├── authelia.container │ └── authelia.network ├── betanin │ ├── betanin.container │ ├── betanin.network │ └── betanin.volume ├── blinko │ ├── blinko-db.container │ ├── blinko.container │ ├── blinko.network │ └── data.volume ├── caddy │ ├── Caddyfile │ ├── Containerfile │ ├── README.md │ ├── caddy.build │ ├── caddy.container │ ├── caddy.volume │ └── reverse-proxy.network ├── calibre-web │ ├── README.md │ ├── calibre-web-config.volume │ ├── calibre-web-data.volume │ └── calibre-web.container ├── calibre │ ├── calibre.container │ ├── calibre.network │ └── config.volume ├── chartdb │ ├── chartdb.container │ └── chartdb.network ├── checkmate │ ├── checkmate-mongo.container │ ├── checkmate-redis.container │ ├── checkmate-server.container │ ├── checkmate.container │ ├── checkmate.network │ └── checkmate.volume ├── dashdot │ ├── dashdot-nvidia.container │ ├── dashdot.container │ ├── dashdot.env │ └── dashdot.network ├── dashy │ ├── dashy.container │ ├── dashy.network │ └── user-data │ │ └── conf.yml ├── filebrowser │ ├── filebrowser.container │ ├── filebrowser.network │ └── filebrowser.volume ├── filestash │ ├── filestash-wopi.container │ ├── filestash.container │ └── filestash.network ├── forgejo │ ├── forgejo-data.volume │ ├── forgejo.container │ └── forgejo.network ├── foundryvtt │ ├── foundryvtt.container │ ├── foundryvtt.network │ └── foundryvtt.volume ├── freshrss │ ├── fivefilters.container │ ├── freshrss.container │ ├── freshrss.network │ └── freshrss.volume ├── gaseous │ ├── gaseous-mariadb.container │ ├── gaseous.container │ ├── gaseous.network │ └── gaseous.volume ├── glance │ ├── assets │ │ └── user.css │ ├── config │ │ ├── glance.yml │ │ └── home.yml │ ├── glance.container │ ├── glance.env │ └── glance.volume ├── glances │ ├── glances.conf │ ├── glances.container │ └── glances.network ├── gluetun │ ├── config.toml │ └── gluetun.container ├── graphite │ ├── graphite.container │ ├── graphite.env │ ├── graphite.network │ └── graphite.volume ├── graylog │ ├── graylog-datanode.container │ ├── graylog-db.container │ ├── graylog.container │ └── graylog.network ├── grocy │ ├── grocy.container │ ├── grocy.network │ └── grocy.volume ├── healthchecks │ ├── healthchecks-postgres.container │ ├── healthchecks.container │ ├── healthchecks.env │ ├── healthchecks.network │ └── healthchecks.volume ├── hoarder │ ├── hoarder-chrome.container │ ├── hoarder-meilisearch.container │ ├── hoarder.container │ ├── hoarder.env │ └── hoarder.volume ├── homarr │ ├── homarr.container │ ├── homarr.network │ └── homarr.volume ├── homepage │ ├── homepage.container │ ├── homepage.network │ └── homepage.volume ├── homer │ ├── homer.container │ ├── homer.network │ └── homer.volume ├── it-tools │ ├── it-tools.container │ └── it-tools.network ├── joplin │ ├── joplin-db.container │ ├── joplin.container │ ├── joplin.network │ └── joplin.volume ├── kavita │ ├── kavita.container │ ├── kavita.network │ └── kavita.volume ├── kibitzr │ ├── kibitzr.container │ ├── kibitzr.network │ └── kibitzr.volume ├── komga │ ├── komga.container │ ├── komga.network │ └── komga.volume ├── lazylibrarian │ ├── lazylibrarian.container │ ├── lazylibrarian.network │ └── lazylibrarian.volume ├── leantime │ ├── leantime-mysql.container │ ├── leantime.container │ ├── leantime.env │ ├── leantime.network │ └── leantime.volume ├── librenms │ ├── README.md │ ├── db.env │ ├── librenms-db.container │ ├── librenms-dispatcher.container │ ├── librenms-msmtpd.container │ ├── librenms-redist.container │ ├── librenms-snmptrapd.container │ ├── librenms-syslogng.container │ ├── librenms.container │ ├── librenms.env │ ├── librenms.volume │ ├── mariadb.env │ ├── msmtpd.env │ └── tz.env ├── librespeed │ ├── librespeed.container │ └── librespeed.network ├── linkwarden │ ├── linkwarden-database.container │ ├── linkwarden.container │ ├── linkwarden.env │ └── linkwarden.volume ├── matrix │ ├── matrix-db.container │ ├── matrix.container │ ├── matrix.network │ └── matrix.volume ├── maxun │ ├── maxun-backend.container │ ├── maxun-minio.container │ ├── maxun-postgres.container │ ├── maxun-redis.container │ ├── maxun.container │ ├── maxun.env │ ├── maxun.network │ └── maxun.volume ├── mealie │ ├── mealie.container │ ├── mealie.network │ └── mealie.volume ├── memos │ ├── memos.container │ ├── memos.network │ └── memos.volume ├── miniflux │ ├── miniflux-postgres.container │ ├── miniflux.container │ ├── miniflux.network │ └── miniflux.volume ├── minio │ ├── minio.container │ ├── minio.network │ └── minio.volume ├── mirotalk │ ├── mirotalk.container │ ├── mirotalk.env │ └── mirotalk.network ├── n8n │ ├── n8n.container │ ├── n8n.network │ └── n8n.volume ├── nebula │ └── nebula.container ├── netboot-xyz │ ├── netboot-xyz.container │ ├── netboot-xyz.network │ └── netboot-xyz.volume ├── netdata │ ├── netdata.container │ └── netdata.volume ├── nginx │ ├── nginx.container │ ├── nginx.network │ └── nginx.volume ├── openobserve │ ├── openobserve.container │ ├── openobserve.network │ └── openobserve.volume ├── openspeedtest │ └── openspeedtest.container ├── penpot │ ├── penpot-backend.container │ ├── penpot-backend.env │ ├── penpot-exporter.container │ ├── penpot-mailcatcher.container │ ├── penpot-minio.container │ ├── penpot-postgres.container │ ├── penpot-redis.container │ ├── penpot.container │ ├── penpot.env │ ├── penpot.network │ └── penpot.volume ├── pi-hole │ ├── pihole.container │ └── pihole.network ├── pocket-id │ ├── pocket-id.container │ ├── pocket-id.network │ └── pocket-id.volume ├── pointspend │ └── pointspend.container ├── postiz │ ├── postiz-postgres.container │ ├── postiz-redis.container │ ├── postiz.container │ ├── postiz.network │ └── postiz.volume ├── prometheus │ ├── prometheus.container │ └── prometheus.volume ├── protonmail-bridge │ ├── protonmail-bridge.container │ ├── protonmail-bridge.network │ └── protonmail-bridge.volume ├── prowlarr │ ├── prowlarr.container │ ├── prowlarr.network │ └── prowlarr.volume ├── qbit_manage │ ├── config.volume │ ├── config.yml │ ├── qbit_manage.container │ └── qbit_manage.env ├── qbittorrent-port-forward-gluetun-server │ └── qbittorrent-port-forward-gluetun-server.container ├── qbittorrent │ ├── qbittorrent.container │ └── qbittorrent.volume ├── radarr │ ├── radarr.container │ ├── radarr.network │ └── radarr.volume ├── romm │ ├── romm-mariadb.container │ ├── romm.container │ ├── romm.env │ ├── romm.network │ └── romm.volume ├── seedboxapi │ └── seedboxapi.container ├── snowflake │ ├── snowflake.container │ └── snowflake.network ├── sogebot │ ├── sogebot-backend.container │ ├── sogebot-dashboard.container │ ├── sogebot.env │ ├── sogebot.network │ └── sogebot.volume ├── sonarr │ ├── sonarr.container │ ├── sonarr.network │ └── sonarr.volume ├── speedtest-tracker │ ├── speedtest-tracker.container │ ├── speedtest-tracker.env │ ├── speedtest-tracker.network │ └── speedtest-tracker.volume ├── stalwart │ ├── stalwart.container │ ├── stalwart.network │ └── stalwart.volume ├── stirlingPDF │ ├── stirlingpdf.container │ └── stirlingpdf.network ├── syslog-ng │ ├── syslog-ng.container │ └── syslog-ng.network ├── tandoor │ ├── tandoor-db.container │ ├── tandoor-nginx.container │ ├── tandoor.container │ ├── tandoor.env │ ├── tandoor.network │ └── tandoor.volume ├── termix │ ├── termix.container │ ├── termix.network │ └── termix.volume ├── thelounge │ ├── thelounge.container │ ├── thelounge.network │ └── thelounge.volume ├── traggo │ ├── traggo.container │ └── traggo.network ├── vaultwarden │ ├── vaultwarden.container │ ├── vaultwarden.network │ └── vaultwarden.volume ├── vector │ ├── vector.container │ ├── vector.network │ └── vector.yaml ├── vikunja │ ├── vikunja-mariadb.container │ ├── vikunja.container │ ├── vikunja.network │ └── vikunja.volume └── weechat │ ├── weechat.container │ └── weechat.network └── templates ├── template.container ├── template.env ├── template.network ├── template.pod └── template.volume /.gitignore: -------------------------------------------------------------------------------- 1 | .zk 2 | -------------------------------------------------------------------------------- /AlmaLinux.md: -------------------------------------------------------------------------------- 1 | # AlmaLinux 2 | 3 | My proof of concept server running this container stack is built on AlmaLinux 4 | 9.4. 5 | 6 | > [!WARNING] 7 | > Perform `dnf update` immediately 8 | 9 | ## [Repositories](https://wiki.almalinux.org/repos/) 10 | 11 | These may not really be necessary to set up, but you should absolutely review 12 | them and decide for yourself. 13 | 14 | - [AlmaLinux](https://wiki.almalinux.org/repos/AlmaLinux.html) 15 | - [CentOS SIGs](https://wiki.almalinux.org/repos/CentOS.html) 16 | - [Extra](https://wiki.almalinux.org/repos/Extras.html) 17 | - EPEL and CRB 18 | - `dnf install epel-release` 19 | - `dnf config-manager --set-enabled crb` 20 | - ELRepo 21 | - `dnf install elrepo-release` 22 | - [RPM Fusion](https://wiki.almalinux.org/documentation/epel-and-rpmfusion.html) 23 | 24 | ## Disks 25 | 26 | ### Partitions 27 | 28 | Repeat the following steps for all disks that you want to join together into 29 | one single logical volume. 30 | 31 | ```bash 32 | # Find /dev/sdX paths for disks 33 | # WARNING: Make sure you confirm the disk is correct 34 | lsblk -f 35 | # Clear the partition table 36 | dd if=/dev/zero of=/dev/sdX bs=512 count=1 conv=notrunc 37 | dd if=/dev/zero of=/dev/sdY bs=512 count=1 conv=notrunc 38 | ``` 39 | 40 | ### LVM 41 | 42 | ```bash 43 | # Create physical volume 44 | pvcreate /dev/sdX 45 | # Create volume group for disks 46 | vgcreate library /dev/sdX 47 | # Add more disks to volume group 48 | vgextend library /dev/sdY 49 | # Create logical volume across all disks in volume group 50 | lvcreate -l100%FREE -n books library 51 | # Add filesystem to logical volume 52 | mke2fs -t ext4 /dev/library/books 53 | # Check it 54 | e2fsck -f /dev/library/books 55 | ``` 56 | 57 | ### /etc/systemd/system/volumes-books.mount 58 | 59 | ```ini 60 | [Mount] 61 | What=/dev/library/books 62 | Where=/volumes/books 63 | Type=ext4 64 | 65 | [Install] 66 | WantedBy=default.target 67 | ``` 68 | 69 | > [!NOTE] 70 | > We could use a different filesystem that allows mount options to set the 71 | > permissions 72 | 73 | ```bash 74 | chown -R $ctuser:$ctuser /volumes 75 | ``` 76 | 77 | ## SSH 78 | 79 | SSH is optional, but highly encouraged. Cockpit gives you a terminal too, but 80 | that's nowhere near as good as what you can do with a real terminal emulator 81 | and ssh clients. 82 | 83 | ```bash 84 | dnf install openssh-server 85 | 86 | ## Generate strong key on your laptop or workstation/desktop 87 | ssh-keygen -t ed25519 -a 32 -f ~/.ssh/$localhost-to-$remotehost 88 | 89 | ## Copy key to AlmaLinux 90 | ssh-copy-id -i ~/.ssh/$localhost-to-$remotehost $user@$remotehost 91 | ``` 92 | 93 | ## Override `sshd` config 94 | 95 | We don't want to allow anyone to login as root remotely ever. You must be a 96 | `sudoer` with public key auth to elevate to root. 97 | 98 | ```bash 99 | printf '%s\n' 'PermitRootLogin no' > /etc/ssh/sshd_config.d/01-root.conf 100 | printf '%s\n' \ 101 | 'PubkeyAuthentication yes' \ 102 | 'PasswordAuthentication no' > /etc/ssh/sshd_config.d/01-pubkey.conf 103 | ``` 104 | 105 | ## Cockpit -> https://ip-addr:9090 106 | 107 | > [!WARNING] 108 | > I run behind an existing firewall, not in a VPS or cloud provider. 109 | > ```bash 110 | > systemctl disable --now firewalld 111 | > ``` 112 | 113 | > [!NOTE] 114 | > Should be able to set up good firewall with only 22/80/443 open. 115 | 116 | Enable the socket-activated cockpit service and allow it through the firewall. 117 | 118 | ```bash 119 | systemctl enable --now cockpit.socket 120 | 121 | # FIXME: Unnecessary? Default works? 122 | firewall-cmd --permanent --zone=public --add-service=cockpit 123 | firewall-cmd --reload 124 | ``` 125 | 126 | ## Add SSH keys 127 | 128 | > [!TIP] 129 | > Skip if you copied your keys with `ssh-copy-id` above. 130 | 131 | `Accounts` -> `Your account` -> `Authorized public SSH keys` -> `Add Key` 132 | 133 | ## Install SELinux troubleshoot tool 134 | 135 | This is a component for Cockpit. 136 | 137 | ```bash 138 | dnf install setroubleshoot-server 139 | ``` 140 | 141 | ## Podman 142 | 143 | Podman is a daemonless container hypervisor. This document prepares a fully 144 | rootless environment for our containers to run in. 145 | 146 | ## Install 147 | 148 | ```bash 149 | dnf install podman 150 | systemctl enable --now podman 151 | ``` 152 | 153 | > [!NOTE] 154 | > Read the docs. 155 | > `man podman-systemd.unit` 156 | 157 | ## Prepare host networking stack 158 | 159 | ### slirp4netns 160 | 161 | > [!NOTE] 162 | > This may not be necessary but my system is currently using it. 163 | 164 | ```bash 165 | dnf install slirp4netns 166 | ``` 167 | 168 | ### Install DNS server for `podman` 169 | 170 | > [!NOTE] 171 | > Not sure how to resolve these correctly yet but the journal logs it 172 | > so it's running for something. 173 | 174 | ```bash 175 | dnf install aardvark-dns 176 | ``` 177 | 178 | ### Allow rootless binding port 80+ 179 | 180 | > [!NOTE] 181 | > This is only necessary if you are setting up the reverse proxy. 182 | 183 | ```bash 184 | printf '%s\n' 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-unprivileged-port-binding.conf 185 | sysctl -w net.ipv4.ip_unprivileged_port_start=80 186 | ``` 187 | 188 | ### Allow containers to route within multiple networks 189 | 190 | ```bash 191 | printf '%s\n' 'net.ipv4.conf.all.rp_filter=2' > /etc/sysctl.d/99-reverse-path-loose.conf 192 | sysctl -w net.ipv4.conf.all.rp_filter=2 193 | ``` 194 | 195 | ## Prepare container user 196 | 197 | This user will be the owner of all containers with no login shell or root 198 | privileges. 199 | 200 | ```bash 201 | # Prepare a group id outside of the normal range 202 | groupadd --gid 2000 $ctuser 203 | # Create user with restrictions 204 | # We need the $HOME to live in 205 | useradd --create-home \ 206 | --shell /usr/bin/false \ 207 | --password $ctuser_pw \ 208 | --no-user-group \ 209 | --gid $ctuser \ 210 | --groups systemd-journal \ 211 | --uid 2000 \ 212 | $ctuser 213 | # Lock user from password login 214 | usermod --lock $ctuser 215 | # Add container sub-ids 216 | usermod --add-subuids 200000-299999 --add-subgids 200000-299999 $ctuser 217 | # Start $ctuser session at boot without login 218 | loginctl enable-linger $ctuser 219 | ``` 220 | 221 | > [!TIP] 222 | > Optionally setup ssh keys to directly login to $ctuser. 223 | 224 | ### Setup $ctuser env 225 | 226 | > [!NOTE] 227 | > The login shell doesn't exist. Launch `bash -l` manually to get a shell or 228 | > else your `ssh` will exit with a status of 1. 229 | 230 | ```bash 231 | # Switch to user (`-i` doesn't work without a login shell) 232 | machinectl shell $ctuser@ /bin/bash 233 | # Create dirs 234 | mkdir -p ~/.config/{containers/systemd,environment.d} 235 | # Prepare `systemd --user` env 236 | echo 'XDG_RUNTIME_DIR=/run/user/2000' >> ~/.config/environment.d/10-xdg.conf 237 | # Enable container auto-update 238 | podman system migrate 239 | # WARNING: Set strict versions for all containers or risk catastrophe 240 | systemctl --user enable --now podman-auto-update 241 | exit 242 | ``` 243 | 244 | > [!WARNING] 245 | > I disabled SELinux to not deal with this for every container. 246 | > /etc/selinux/config -> `SELINUX=disabled` 247 | 248 | > [!TIP] 249 | > Set up the correct policies permanently instead of disabling SELinux 250 | 251 | Temporarily set SELinux policy to allow containers to use devices. 252 | 253 | ```bash 254 | setsebool -P container_use_devices 1 255 | ``` 256 | 257 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # podbox 2 | 3 | ## Table of Contents 4 | 5 | - [What is this?](#what-is-this) 6 | - [Table of Contents](#table-of-contents) 7 | - [Getting started](#getting-started) 8 | - [Dependencies](#dependencies) 9 | - [Quickstart](#quickstart) 10 | - [Hello, world](#hello-world) 11 | - [Running real apps](#running-real-apps) 12 | - [Example](#example) 13 | - [Coming soon](#coming-soon) 14 | - [Acknowledgments](#acknowledgments) 15 | 16 | ## What is this? 17 | 18 | [Make `systemd` better for Podman with Quadlet](https://www.redhat.com/en/blog/quadlet-podman) 19 | 20 | This is a repository of ready-to-use `quadlets`. They allow you to run any 21 | container with `podman` using `systemd` unit files in your user session. 22 | 23 | This means no root user is ever invoked from the host system. Everything runs 24 | under the same user permissions as yourself, from within your own `$HOME`. 25 | 26 | > [!NOTE] 27 | > It is recommended to create another user specifically for running these 28 | > containers, but it is not strictly required. Details for setting up a system 29 | > from scratch are located in [AlmaLinux.md](./AlmaLinux.md) or 30 | > [Ubuntu.md](./Ubuntu.md 31 | ) 32 | 33 | ## Getting started 34 | 35 | ### Dependencies 36 | 37 | - `podman>=4.3.0` 38 | - `systemd` 39 | 40 | You may need to add a large range of subuids and subgids, because `podman` will 41 | use them when users are generated inside the containers. 42 | 43 | ```bash 44 | sudo usermod --add-subuids 200000-299999 --add-subgids 200000-299999 $USER 45 | ``` 46 | 47 | ### Quickstart 48 | 49 | #### Hello, world 50 | 51 | Create the following unit file at `~/.config/containers/systemd/helloworld.container`. 52 | 53 | ```ini 54 | [Unit] 55 | Description=Hello, world 56 | 57 | [Service] 58 | Restart=on-failure 59 | TimeoutStartSec=900 60 | 61 | [Install] 62 | WantedBy=default.target 63 | 64 | [Container] 65 | Image=quay.io/podman/hello 66 | ContainerName=helloworld 67 | ``` 68 | 69 | Run the following commands to load and run the container. 70 | 71 | ```bash 72 | systemctl --user daemon-reload 73 | systemctl --user start helloworld 74 | journalctl -e 75 | ``` 76 | 77 | You should see the following in your journal! 78 | 79 | ```bash 80 | Dec 01 08:42:05 perseus systemd[1362]: Started hello world. 81 | Dec 01 08:42:05 perseus helloworld[1143334]: !... Hello Podman World ...! 82 | Dec 01 08:42:05 perseus helloworld[1143334]: 83 | Dec 01 08:42:05 perseus helloworld[1143334]: .--"--. 84 | Dec 01 08:42:05 perseus helloworld[1143334]: / - - \ 85 | Dec 01 08:42:05 perseus helloworld[1143334]: / (O) (O) \ 86 | Dec 01 08:42:05 perseus helloworld[1143334]: ~~~| -=(,Y,)=- | 87 | Dec 01 08:42:05 perseus helloworld[1143334]: .---. /` \ |~~ 88 | Dec 01 08:42:05 perseus helloworld[1143334]: ~/ o o \~~~~.----. ~~ 89 | Dec 01 08:42:05 perseus helloworld[1143334]: | =(X)= |~ / (O (O) \ 90 | Dec 01 08:42:05 perseus helloworld[1143334]: ~~~~~~~ ~| =(Y_)=- | 91 | Dec 01 08:42:05 perseus helloworld[1143334]: ~~~~ ~~~| U |~~ 92 | Dec 01 08:42:05 perseus helloworld[1143334]: 93 | Dec 01 08:42:05 perseus helloworld[1143334]: Project: https://github.com/containers/podman 94 | Dec 01 08:42:05 perseus helloworld[1143334]: Website: https://podman.io 95 | Dec 01 08:42:05 perseus helloworld[1143334]: Desktop: https://podman-desktop.io 96 | Dec 01 08:42:05 perseus helloworld[1143334]: Documents: https://docs.podman.io 97 | Dec 01 08:42:05 perseus helloworld[1143334]: YouTube: https://youtube.com/@Podman 98 | Dec 01 08:42:05 perseus helloworld[1143334]: X/Twitter: @Podman_io 99 | Dec 01 08:42:05 perseus helloworld[1143334]: Mastodon: @Podman_io@fosstodon.org 100 | ``` 101 | 102 | ### Running real apps 103 | 104 | 1. Copy the `quadlets/$app/` you want to run to 105 | `$XDG_CONFIG_HOME/containers/systemd/quadlets/` 106 | 2. Edit the files to match your system 107 | - Set your `Network=...` for containers that need to share a network 108 | namespace 109 | - Set `Volume=...:...` to a path that exists on your system if you need to 110 | access it within that container 111 | - Modify environment variables with `Environment=...` or use an env file with `EnvironmentFile=./path/to/foo.env` 112 | 3. Load the updated container definition into `systemd` 113 | 4. Launch the container 114 | 115 | #### Example 116 | 117 | ```bash 118 | # Step 1 119 | git clone --depth=1 https://github.com/redbeardymcgee/podbox 120 | cp -a podbox/quadlets/thelounge "$XDG_CONFIG_HOME"/containers/systemd/ 121 | # Step 2 122 | $EDITOR "$XDG_CONFIG_HOME"/containers/systemd/thelounge/* 123 | # Step 3 124 | systemctl --user daemon-reload 125 | # Step 4 126 | systemctl --user start thelounge 127 | ``` 128 | 129 | Navigate to `http://localhost:9000` in your browser. 130 | 131 | > [!WARNING] 132 | > If the application is not found, confirm that the service is listening on 133 | > port 9000 with `ss -tunlp`. You should see something similar to the 134 | > following in your output: 135 | > 136 | > ```bash 137 | > Netid State Recv-Q Send-Q Local Address:Port Peer Address:PortProcess 138 | > tcp LISTEN 0 4096 *:9000 *:* users:(("rootlessport",pid=913878,fd=10)) 139 | > ``` 140 | 141 | ## Coming soon 142 | 143 | These services are on my radar for implementation. Please suggest your 144 | favorites, and I welcome [pull 145 | requests](https://git.mcgee.red/redbeardymcgee/podbox/pulls). 146 | 147 | - [ ] [ArgoCD](https://github.com/argoproj/argo-cd) 148 | - [ ] [Authelia](https://www.authelia.com/) 149 | - [ ] [Authentik](https://goauthentik.io/) 150 | - [ ] [Duplicacy](https://duplicacy.com/) 151 | - [ ] [Duplicati](https://duplicati.com/) 152 | - [ ] [Immich](https://immich.app/) 153 | - [ ] [Keycloak](https://www.keycloak.org) 154 | - [ ] [Netbird](https://netbird.io/) 155 | - [ ] [Note Mark](https://github.com/enchant97/note-mark) 156 | - [ ] [Notesnook](https://github.com/streetwriters/notesnook-sync-server) 157 | - [ ] [Pod Arcade](https://www.pod-arcade.com/) 158 | - [ ] [Seafile](https://www.seafile.com) 159 | - [ ] [Shiori](https://github.com/go-shiori/shiori) 160 | - [ ] [SimpleX](https://simplex.chat/) 161 | - [ ] [solidtime](https://docs.solidtime.io/self-hosting/intro) 162 | - [ ] [Ubooquity](https://vaemendis.net/ubooquity/) 163 | - [ ] [Umami](https://umami.is/) 164 | - [ ] [UrBackup](https://urbackup.org) 165 | - [ ] [Wazuh](https://wazuh.com/) 166 | - [ ] [wiki.js](https://js.wiki) 167 | - [ ] [wger](https://wger.de/) 168 | - [ ] [Zenoss](https://www.zenoss.com/) 169 | - [ ] [Zitadel](https://zitadel.com/) 170 | 171 | ## Acknowledgments 172 | 173 | Thanks to these users for their examples and contributions! 174 | 175 | - [@fpatrick](https://github.com/fpatrick)/[podman-quadlet](https://github.com/fpatrick/podman-quadlet) 176 | - [@dwedia](https://github.com/dwedia)/[podmanQuadlets](https://github.com/dwedia/podmanQuadlets) 177 | - [@sudo-kraken](https://github.com/sudo-kraken) 178 | - [@EphemeralDev](https://github.com/EphemeralDev) 179 | -------------------------------------------------------------------------------- /Ubuntu.md: -------------------------------------------------------------------------------- 1 | # Ubuntu Server 2 | 3 | Setting up rootless podman on a fresh Ubuntu 24.10 server. 4 | 5 | > [!WARNING] 6 | > Perform `sudo apt update && sudo apt upgrade` immediately. Reboot system. 7 | 8 | ## SSH 9 | 10 | SSH is optional, but highly encouraged. OpenSSH is installed by default and sshd 11 | is running by default. 12 | 13 | ```bash 14 | ## Generate strong key on your laptop or workstation/desktop 15 | ## If you already have keys DO NOT overwrite your previous keys 16 | 17 | ssh-keygen -t ed25519 -a 32 -f ~/.ssh/$localhost-to-$remotehost 18 | 19 | ## Optionally set a passphrase 20 | 21 | ## Copy key to Ubuntu 22 | ssh-copy-id username@remote_host 23 | ``` 24 | 25 | ## Override `sshd` config 26 | 27 | We don't want to allow anyone to login as root remotely ever. You must be a 28 | `sudoer` with public key auth to elevate to root. 29 | 30 | SSH into your server and run 31 | 32 | ```bash 33 | printf '%s\n' 'PermitRootLogin no' | sudo tee /etc/ssh/sshd_config.d/01-root.conf 34 | printf '%s\n' \ 35 | 'PubkeyAuthentication yes' \ 36 | 'PasswordAuthentication no' | sudo tee /etc/ssh/sshd_config.d/01-pubkey.conf 37 | ``` 38 | 39 | Save file and then run `systemctl restart ssh` Before closing your session, open 40 | a new terminal and test SSH is functioning correctly. 41 | 42 | ## Podman 43 | 44 | Podman is a daemonless container hypervisor. This document prepares a fully 45 | rootless environment for our containers to run in. 46 | 47 | ## Install 48 | 49 | ```bash 50 | sudo apt install podman systemd-container 51 | 52 | ## Make sure podman is running 53 | systemctl enable --now podman 54 | ``` 55 | 56 | > [!NOTE] 57 | > Read the docs. `man podman-systemd.unit` 58 | 59 | ## Prepare host networking stack 60 | 61 | ## Pasta or slirp4netns 62 | 63 | > [!NOTE] 64 | > As of Podman 5.0 Pasta is the default rootless networking tool. 65 | > 66 | > Podman 5.0 is available in standard Ubuntu repo since 24.10. 67 | > 68 | > Both are installed with podman see 69 | > [rootless networking for configuration](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md#networking-configuration) 70 | 71 | ## Allow rootless binding port 80+ 72 | 73 | ### Modify range of unprivileged ports 74 | 75 | > [!NOTE] 76 | > This is only necessary if you are setting up the reverse proxy (or any service 77 | > on ports <1024). 78 | 79 | ```bash 80 | printf '%s\n' 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee /etc/sysctl.d/99-unprivileged-port-binding.conf 81 | sysctl -w 'net.ipv4.ip_unprivileged_port_start=80' 82 | ``` 83 | 84 | ## Prepare container user 85 | 86 | This user will be the owner of all containers with no login shell or root 87 | privileges. 88 | 89 | Container user should have range of uid/gid automatically generated. See 90 | [subuid and subgid tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md#etcsubuid-and-etcsubgid-configuration) 91 | to verify range or create if it does not exist. 92 | 93 | Note $ctuser is a placeholder, replace with your username 94 | 95 | ```bash 96 | # Prepare a group id outside of the normal range 97 | sudo groupadd --gid 2000 $ctuser 98 | # Create user with restrictions 99 | # We need the $HOME to live in 100 | sudo useradd --create-home \ 101 | --shell /usr/bin/false \ 102 | --password $ctuser_pw \ 103 | --no-user-group \ 104 | --gid $ctuser \ 105 | --groups systemd-journal \ 106 | --uid 2000 \ 107 | $ctuser 108 | # Lock user from password login 109 | sudo usermod --lock $ctuser 110 | # Start $ctuser session at boot without login 111 | loginctl enable-linger $ctuser 112 | ``` 113 | 114 | > [!NOTE] 115 | > Consider removing bash history entry that contains the password entered above 116 | 117 | ## Setup $ctuser env 118 | 119 | > [!NOTE] 120 | > Use machinectl instead of sudo or su to get a shell that is fully isolated 121 | > from the original session. See the developers comments on the problem 122 | > [with su](https://github.com/systemd/systemd/issues/825#issuecomment-127917622) 123 | > as well as the purpose of 124 | > [machinectl shell](https://github.com/systemd/systemd/pull/1022#issuecomment-136133244) 125 | 126 | ```bash 127 | # Switch to $ctuser 128 | # Note do not remove the trailing @ 129 | machinectl shell $ctuser@ /bin/bash 130 | # Create dirs 131 | mkdir -p ~/.config/{containers/systemd,environment.d} 132 | # Prepare `systemd --user` env 133 | echo 'XDG_RUNTIME_DIR=/run/user/2000' >> ~/.config/environment.d/10-xdg.conf 134 | # Enable container auto-update 135 | podman system migrate 136 | # WARNING: Set strict versions for all containers or risk catastrophe 137 | systemctl --user enable --now podman-auto-update 138 | exit 139 | ``` 140 | 141 | ## Podman fails autostart 142 | 143 | In Podman < 5.3 containers may fail to autostart because user level units cannot depend on system level units (in this case `network-online.target`) 144 | 145 | Podman >= 5.3 should ship with a workaround user unit that can be used `podman-user-wait-network-online.service`, use that instead of the fix below. 146 | 147 | See [this github issue](https://github.com/containers/podman/issues/22197) for workarounds, the workaround below is what worked for me. The google.com ping can be replaced with your preferred (reachable) ip/host 148 | 149 | To fix this, create the following 150 | 151 | ```bash 152 | # ~/.config/systemd/user/network-online.service 153 | [Unit] 154 | Description=User-level proxy to system-level network-online.target 155 | 156 | [Service] 157 | Type=oneshot 158 | ExecStart=sh -c 'until ping -c 1 google.com; do sleep 5; done' 159 | 160 | [Install] 161 | WantedBy=default.target 162 | ``` 163 | ```bash 164 | # ~/.config/systemd/user/network-online.target 165 | [Unit] 166 | Description=User-level network-online.target 167 | Requires=network-online.service 168 | Wants=network-online.service 169 | After=network-online.service 170 | ``` 171 | Then enable the service `systemctl --user enable network-online.service` 172 | 173 | In quadlets add the following: 174 | 175 | ```bash 176 | [Unit] 177 | After=network-online.target 178 | ``` 179 | -------------------------------------------------------------------------------- /quadlets/README.md: -------------------------------------------------------------------------------- 1 | ## Quadlets 2 | 3 | Quadlets go in `~/.config/containers/systemd`. 4 | -------------------------------------------------------------------------------- /quadlets/actual/actual.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Actual budget management 3 | 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/actualbudget/actual-server:latest 14 | ContainerName=actual 15 | 16 | Network=actual.network 17 | HostName=actual 18 | 19 | Volume=actual-data:/data 20 | 21 | #Environment=ACTUAL_HTTPS_KEY= 22 | #Environment=ACTUAL_HTTPS_CERT= 23 | #Environment=ACTUAL_PORT= 24 | #Environment=ACTUAL_UPLOAD_FILE_SYNC_SIZE_LIMIT_MB= 25 | #Environment=ACTUAL_UPLOAD_SYNC_ENCRYPTED_FILE_SYNC_SIZE_LIMIT_MB= 26 | #Environment=ACTUAL_UPLOAD_FILE_SIZE_LIMIT= 27 | 28 | -------------------------------------------------------------------------------- /quadlets/actual/actual.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/actual/actual.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=actual-data 3 | -------------------------------------------------------------------------------- /quadlets/adguard/adguard.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Adguard Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/adguard/adguardhome:latest 13 | ContainerName=adguard 14 | 15 | Network=adguard.network 16 | HostName=adguard 17 | PublishPort=53:53/tcp 18 | PublishPort=53:53/udp 19 | PublishPort=784:784/udp 20 | PublishPort=853:853/tcp 21 | PublishPort=3000:3000/tcp 22 | PublishPort=8844:80/tcp 23 | PublishPort=8443:443/tcp 24 | 25 | Volume=adguard-config:/opt/adguardhome/work 26 | Volume=adguard-work:/opt/adguardhome/conf 27 | Volume=/var/log/AdGuardHome.log:/var/log/AdGuardHome.log 28 | -------------------------------------------------------------------------------- /quadlets/adguard/adguard.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/adguard/adguard.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=adguard-config 3 | VolumeName=adguard-work -------------------------------------------------------------------------------- /quadlets/anubis/README.md: -------------------------------------------------------------------------------- 1 | # Anubis 2 | 3 | ## Necessary modifications 4 | 5 | Ensure that `anubis.env` has at least set `REDIRECT_DOMAINS` and `TARGET`. The 6 | target must be within the same container network as `anubis`, which should be 7 | in the same network as your reverse-proxy such as `nginx` or `caddy`. You may 8 | use multiple `Network` keys in `anubis.container` to achieve this. 9 | 10 | ## Optional 11 | 12 | Create `botPolicy.yaml` and uncomment `POLICY_FNAME` to supply your own 13 | custom rules. 14 | 15 | ## Note 16 | 17 | You will require a unique instance of Anubis for each domain you wish to 18 | protect against AI crawlers. 19 | -------------------------------------------------------------------------------- /quadlets/anubis/anubis.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Anubis 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/techarohq/anubis:latest 13 | ContainerName=anubis 14 | AutoUpdate=registry 15 | 16 | Network=anubis.network 17 | HostName=anubis 18 | 19 | #Volume=./botPolicy.yaml:/data/cfg/botPolicy.yaml:ro 20 | 21 | EnvironmentFile=anubis.env 22 | 23 | -------------------------------------------------------------------------------- /quadlets/anubis/anubis.env: -------------------------------------------------------------------------------- 1 | BIND=:8923 2 | DIFFICULTY=4 3 | METRICS_BIND=:9090 4 | SERVE_ROBOTS_TXT=true 5 | TARGET=http://host:port 6 | #POLICY_FNAME=/data/cfg/botPolicy.yaml 7 | OG_PASSTHROUGH=true 8 | OG_EXPIRY_TIME=24h 9 | REDIRECT_DOMAINS=mydomain.example.com,another.example.com 10 | -------------------------------------------------------------------------------- /quadlets/anubis/anubis.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | 3 | -------------------------------------------------------------------------------- /quadlets/apprise/apprise.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apprise API 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/caronc/apprise:latest 13 | ContainerName=apprise 14 | AutoUpdate=registry 15 | 16 | Network=apprise.network 17 | HostName=apprise 18 | PublishPort=8000:8000 19 | 20 | Volume=apprise-config:/config 21 | Volume=apprise-plugin:/plugin 22 | Volume=apprise-attahc:/attahc 23 | 24 | Environment=APPRISE_STATEFUL_MODE=simple 25 | Environment=APPRISE_WORKER_COUNT=1 26 | -------------------------------------------------------------------------------- /quadlets/apprise/apprise.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apprise network 3 | 4 | [Network] 5 | NetworkName=apprise 6 | 7 | -------------------------------------------------------------------------------- /quadlets/apprise/apprise.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=apprise-config 3 | VolumeName=apprise-plugin 4 | VolumeName=apprise-attach 5 | 6 | -------------------------------------------------------------------------------- /quadlets/audiobookshelf/audiobookshelf.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Audiobookshelf Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/advplyr/audiobookshelf:latest 13 | ContainerName=audiobookshelf 14 | 15 | Network=audiobookshelf.network 16 | HostName=audiobookshelf 17 | PublishPort=13378:80 18 | 19 | Volume=audiobookshelf-config:/metadata 20 | Volume=audiobookshelf-metadata:/config 21 | Volume=audiobookshelf-audiobooks:/audiobooks 22 | -------------------------------------------------------------------------------- /quadlets/audiobookshelf/audiobookshelf.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/audiobookshelf/audiobookshelf.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=audiobookshelf-config 3 | VolumeName=audiobookshelf-metadata 4 | VolumeName=audiobookshelf-audiobooks -------------------------------------------------------------------------------- /quadlets/authelia/authelia.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Authelia 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/authelia/authelia 13 | ContainerName=authelia 14 | AutoUpdate=registry 15 | 16 | Network=authelia.network 17 | HostName=authelia 18 | 19 | Volume=./config:/config 20 | 21 | Environment=AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE=/secrets/JWT_SECRET 22 | Environment=AUTHELIA_SESSION_SECRET_FILE=/secrets/SESSION_SECRET 23 | Environment=AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE=/secrets/STORAGE_PASSWORD 24 | Environment=AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE=/secrets/STORAGE_ENCRYPTION_KEY 25 | 26 | Secret=authelia-jwt-secret,type=mount,target=/secrets/JWT_SECRET 27 | Secret=authelia-session-secret,type=mount,target=/secrets/SESSION_SECRET 28 | Secret=authelia-storage-password,type=mount,target=/secrets/STORAGE_PASSWORD 29 | Secret=authelia-storage-encryption-key,type=mount,target=/secrets/STORAGE_ENCRYPTION_KEY 30 | -------------------------------------------------------------------------------- /quadlets/authelia/authelia.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/betanin/betanin.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=betanin 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/sentriz/betanin 13 | ContainerName=betanin 14 | AutoUpdate=registry 15 | 16 | Network=betanin.network 17 | HostName=betanin 18 | PublishPort=9393:9393 19 | 20 | Volume=betanin-data:/b/.local/share/betanin 21 | Volume=betanin-config:/b/.local/share/config 22 | Volume=betanin-beets:/b/.local/share/beets 23 | Volume=/path/to/music:/music 24 | Volume=/path/to/downloads:/downloads 25 | -------------------------------------------------------------------------------- /quadlets/betanin/betanin.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=betanin network 3 | 4 | [Network] 5 | NetworkName=betanin 6 | 7 | -------------------------------------------------------------------------------- /quadlets/betanin/betanin.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=betanin-data 3 | VolumeName=betanin-config 4 | VolumeName=betanin-beets 5 | 6 | -------------------------------------------------------------------------------- /quadlets/blinko/blinko-db.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Postgres for Blinko 3 | Wants=blinko.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/postgres 14 | ContainerName=blinko-db 15 | 16 | Network=blinko.network 17 | HostName=blinko-db 18 | PublishPort=5435:5432 19 | 20 | Volume=blinko-db:/var/lib/postgresql/data 21 | 22 | Environment=POSTGRES_DB=postgres 23 | Environment=POSTGRES_USER=postgres 24 | Environment=TZ=Etc/UTC 25 | 26 | Secret=blinko-db-pw,type=env,target=POSTGRES_PASSWORD 27 | -------------------------------------------------------------------------------- /quadlets/blinko/blinko.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Blinko 3 | Requires=blinko-db.service 4 | After=blinko-db.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/blinkospace/blinko 15 | ContainerName=blinko 16 | 17 | Network=blinko.network 18 | HostName=blinko 19 | PublishPort=1111:1111 20 | 21 | Volume=blinko-data:/app/.blinko 22 | 23 | Environment=NODE_ENV=production 24 | #Environment=NEXTAUTH_URL=http://localhost:1111 25 | #Environment=NEXT_PUBLIC_BASE_URL=http://localhost:1111 26 | Environment=DATABASE_URL=postgresql://postgres:$mysecretpassword@blinko-db:5432/postgres 27 | 28 | Secret=blinko-nextauth-secret,type=env,target=NEXTAUTH_SECRET 29 | -------------------------------------------------------------------------------- /quadlets/blinko/blinko.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/blinko/data.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=blinko-data 3 | VolumeName=blinko-db 4 | 5 | -------------------------------------------------------------------------------- /quadlets/caddy/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | acme_dns $provider $api_key 3 | } 4 | 5 | qb.$domain.$tld { 6 | reverse_proxy localhost:8080 7 | } 8 | -------------------------------------------------------------------------------- /quadlets/caddy/Containerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/caddy:$version-builder AS builder 2 | 3 | RUN xcaddy build \ 4 | --with github.com/caddy-dns/$module 5 | 6 | FROM docker.io/caddy:$version 7 | 8 | COPY --from=builder /usr/bin/caddy /usr/bin/caddy 9 | -------------------------------------------------------------------------------- /quadlets/caddy/README.md: -------------------------------------------------------------------------------- 1 | # podman 5.2.5 2 | 3 | ```bash 4 | dnf -y install btrfs-progs-devel passt 5 | curl -fsSL \ 6 | -o podman-5.2.5.tar.gz \ 7 | https://github.com/containers/podman/archive/refs/tags/v5.2.5.tar.gz 8 | tar xzf podman-5.2.5.tar.gz 9 | cd podman-5.2.5 10 | make BUILDTAGS="selinux seccomp" PREFIX=/usr 11 | ``` 12 | -------------------------------------------------------------------------------- /quadlets/caddy/caddy.build: -------------------------------------------------------------------------------- 1 | [Build] 2 | ImageTag=localhost/caddy-njalla 3 | SetWorkingDirectory=unit 4 | 5 | -------------------------------------------------------------------------------- /quadlets/caddy/caddy.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Reverse proxy 3 | 4 | [Service] 5 | Restart=on-failure 6 | 7 | [Install] 8 | WantedBy=default.target 9 | 10 | [Container] 11 | Image=caddy.build 12 | ContainerName=caddy 13 | 14 | Network=reverse-proxy.network 15 | HostName=caddy 16 | PublishPort=80:80 17 | PublishPort=443:443 18 | PublishPort=443:443/udp 19 | 20 | Volume=caddy-config:/config 21 | Volume=caddy-data:/data 22 | 23 | Volume=./Caddyfile:/etc/caddy/Caddyfile 24 | 25 | -------------------------------------------------------------------------------- /quadlets/caddy/caddy.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=caddy-config 3 | VolumeName=caddy-data 4 | -------------------------------------------------------------------------------- /quadlets/caddy/reverse-proxy.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/calibre-web/README.md: -------------------------------------------------------------------------------- 1 | # calibre-web 2 | 3 | ## Known issues 4 | 5 | ### The starter metadata.db is required even if you do not use `calibre` 6 | 7 | > [!WARNING] 8 | > This should be run as your `$ctuser` or it will have the wrong owner and 9 | > permissions 10 | 11 | ```bash 12 | curl -fLSs -o /home/$ctuser/.local/share/containers/storage/volumes/calibre-web-database/metadata.db https://github.com/janeczku/calibre-web/raw/master/library/metadata.db 13 | ``` 14 | -------------------------------------------------------------------------------- /quadlets/calibre-web/calibre-web-config.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=calibre-web-config 3 | 4 | -------------------------------------------------------------------------------- /quadlets/calibre-web/calibre-web-data.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=calibre-web-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/calibre-web/calibre-web.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=calibre-web 3 | 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=lscr.io/linuxserver/calibre-web:latest 14 | ContainerName=calibre-web 15 | HostName=calibre-web 16 | 17 | PublishPort=8083 18 | 19 | Volume=/volumes/books:/books 20 | Volume=calibre-web-config:/config 21 | Volume=calibre-config:/database 22 | 23 | Environment=TZ=Etc/UTC 24 | 25 | 26 | -------------------------------------------------------------------------------- /quadlets/calibre/calibre.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Ebook manager 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=lscr.io/linuxserver/calibre:latest 13 | ContainerName=calibre 14 | 15 | Network=calibre.network 16 | HostName=calibre 17 | PublishPort=8080 18 | 19 | Volume=calibre-config:/config 20 | 21 | Environment=TZ=Etc/UTC 22 | -------------------------------------------------------------------------------- /quadlets/calibre/calibre.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/calibre/config.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=calibre-config 3 | -------------------------------------------------------------------------------- /quadlets/chartdb/chartdb.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ChartDB diagramming editor 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/chartdb/chartdb 13 | ContainerName=chartdb 14 | 15 | Network=chartdb.network 16 | HostName=chartdb 17 | PublishPort=8080:80 18 | 19 | Secret=openai-api-key,type=env,target=OPENAI_API_KEY 20 | -------------------------------------------------------------------------------- /quadlets/chartdb/chartdb.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/checkmate/checkmate-mongo.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Checkmate mongodb 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/bluewaveuptime/uptime_database_mongo:latest 13 | ContainerName=checkmate-mongodb 14 | AutoUpdate=registry 15 | 16 | Network=checkmate.network 17 | HostName=checkmate-mongodb 18 | PublishPort=27017:27017 19 | 20 | Volume=checkmate-mongodb:/data/db 21 | 22 | Exec=mongod --quiet 23 | -------------------------------------------------------------------------------- /quadlets/checkmate/checkmate-redis.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Checkmate Redis 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/bluewaveuptime/uptime_redis:latest 13 | ContainerName=checkmate-redis 14 | AutoUpdate=registry 15 | 16 | Network=checkmate.network 17 | HostName=checkmate-redis 18 | PublishPort=6379:6379 19 | 20 | Volume=checkmate-redis:/data 21 | -------------------------------------------------------------------------------- /quadlets/checkmate/checkmate-server.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Checkmate server 3 | Requires=checkmate-mongodb.service 4 | Requires=checkmate-redis.service 5 | After=checkmate-mongodb.service 6 | After=checkmate-redis.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/bluewaveuptime/uptime_server:latest 17 | ContainerName=checkmate-server 18 | AutoUpdate=registry 19 | 20 | Network=checkmate.network 21 | HostName=checkmate-server 22 | PublishPort=5000:5000 23 | 24 | Volume=%t/podman/podman.sock:/run/user/1000/podman/podman.sock:ro 25 | 26 | Environment=REDIS_HOST=checkmate-redis 27 | Environment=DB_CONNECTION_STRING=mongodb://checkmate-mongodb:27017/uptime_db 28 | -------------------------------------------------------------------------------- /quadlets/checkmate/checkmate.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Checkmate 3 | Requires=checkmate-server.service 4 | After=checkmate-server.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/bluewaveuptime/uptime_client:latest 15 | ContainerName=checkmate 16 | AutoUpdate=registry 17 | 18 | Network=checkmate.network 19 | HostName=checkmate 20 | PublishPort=80:80 21 | PublishPort=443:443 22 | 23 | Environment=UPTIME_APP_API_BASE_URL=http://localhost:5000/api/v1 24 | -------------------------------------------------------------------------------- /quadlets/checkmate/checkmate.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Checkmate network 3 | 4 | [Network] 5 | NetworkName=checkmate 6 | 7 | -------------------------------------------------------------------------------- /quadlets/checkmate/checkmate.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=checkmate-mongodb 3 | VolumeName=checkmate-redis 4 | 5 | -------------------------------------------------------------------------------- /quadlets/dashdot/dashdot-nvidia.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=dashdot-nvidia 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mauricenino/dashdot:nvidia 13 | ContainerName=dashdot-nvidia 14 | AutoUpdate=registry 15 | 16 | Network=dashdot.network 17 | HostName=dashdot 18 | PublishPort=3001:3001 19 | 20 | ## FIXME: compose uses super weird syntax to find the gpu instead of mapping it directly 21 | # AddDevice=/dev/dri/renderD129:/dev/dri/renderD129 22 | 23 | Volume=/:/mnt/host:ro 24 | 25 | EnvironmentFile=dashdot.env 26 | 27 | -------------------------------------------------------------------------------- /quadlets/dashdot/dashdot.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=dashdot 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mauricenino/dashdot 13 | ContainerName=dashdot 14 | AutoUpdate=registry 15 | 16 | Network=dashdot.network 17 | HostName=dashdot 18 | PublishPort=3001:3001 19 | 20 | Volume=/:/mnt/host:ro 21 | 22 | EnvironmentFile=dashdot.env 23 | 24 | -------------------------------------------------------------------------------- /quadlets/dashdot/dashdot.env: -------------------------------------------------------------------------------- 1 | # basic 2 | DASHDOT_WIDGET_LIST=os,cpu,storage,ram,network 3 | DASHDOT_PORT= 4 | DASHDOT_PAGE_TITLE= 5 | DASHDOT_DISABLE_INTEGRATIONS= 6 | DASHDOT_SHOW_DASH_VERSION= 7 | DASHDOT_USE_IMPERIAL= 8 | DASHDOT_ALWAYS_SHOW_PERCENTAGES= 9 | 10 | # server 11 | DASHDOT_OS_LABEL_LIST= 12 | DASHDOT_SHOW_HOST= 13 | DASHDOT_CUSTOM_HOST= 14 | 15 | ## styles 16 | DASHDOT_OS_WIDGET_GROW= 17 | DASHDOT_OS_WIDGET_MIN_WIDTH= 18 | 19 | ## overrides 20 | DASHDOT_OVERRIDE_OS= 21 | DASHDOT_OVERRIDE_ARCH= 22 | 23 | # cpu 24 | DASHDOT_CPU_LABEL_LIST= 25 | DASHDOT_ENABLE_CPU_TEMPS= 26 | DASHDOT_CPU_TEMPS_MODE= 27 | DASHDOT_CPU_CORES_TOGGLE_MODE= 28 | 29 | ## styles 30 | DASHDOT_CPU_WIDGET_GROW= 31 | DASHDOT_CPU_WIDGET_MIN_WIDTH= 32 | DASHDOT_CPU_SHOWN_DATAPOINTS= 33 | DASHDOT_CPU_POLL_INTERVAL= 34 | 35 | ## overrides 36 | DASHDOT_OVERRIDE_CPU_BRAND= 37 | DASHDOT_OVERRIDE_CPU_MODEL= 38 | DASHDOT_OVERRIDE_CPU_CORES= 39 | DASHDOT_OVERRIDE_CPU_THREADS= 40 | DASHDOT_OVERRIDE_CPU_FREQUENCY= 41 | 42 | # storage 43 | DASHDOT_STORAGE_LABEL_LIST= 44 | DASHDOT_FS_DEVICE_FILTER= 45 | DASHDOT_FS_TYPE_FILTER= 46 | DASHDOT_FS_VIRTUAL_MOUNTS= 47 | 48 | ## styles 49 | DASHDOT_STORAGE_WIDGET_ITEMS_PER_PAGE= 50 | DASHDOT_STORAGE_WIDGET_GROW= 51 | DASHDOT_STORAGE_WIDGET_MIN_WIDTH= 52 | DASHDOT_STORAGE_POLL_INTERVAL= 53 | 54 | ## overrides 55 | DASHDOT_OVERRIDE_STORAGE_BRANDS= 56 | DASHDOT_OVERRIDE_STORAGE_SIZES= 57 | DASHDOT_OVERRIDE_STORAGE_TYPES= 58 | 59 | # ram 60 | DASHDOT_RAM_LABEL_LIST= 61 | 62 | ## styles 63 | DASHDOT_RAM_WIDGET_GROW= 64 | DASHDOT_RAM_WIDGET_MIN_WIDTH= 65 | DASHDOT_RAM_SHOWN_DATAPOINTS= 66 | DASHDOT_RAM_POLL_INTERVAL= 67 | 68 | ## overrides 69 | DASHDOT_OVERRIDE_RAM_BRAND= 70 | DASHDOT_OVERRIDE_RAM_SIZE= 71 | DASHDOT_OVERRIDE_RAM_TYPE= 72 | DASHDOT_OVERRIDE_RAM_FREQUENCY= 73 | 74 | # network 75 | DASHDOT_NETWORK_LABEL_LIST= 76 | DASHDOT_ACCEPT_OOKLA_EULA= 77 | DASHDOT_USE_NETWORK_INTERFACE= 78 | DASHDOT_SPEED_TEST_FROM_PATH= 79 | DASHDOT_NETWORK_SPEED_AS_BYTES= 80 | 81 | ## styles 82 | DASHDOT_SPEED_TEST_INTERVAL= 83 | DASHDOT_SPEED_TEST_INTERVAL_CRON= 84 | DASHDOT_NETWORK_WIDGET_GROW= 85 | DASHDOT_NETWORK_WIDGET_MIN_WIDTH= 86 | DASHDOT_NETWORK_POLL_INTERVAL= 87 | 88 | ## overrides 89 | DASHDOT_OVERRIDE_NETWORK_TYPE= 90 | DASHDOT_OVERRIDE_NETWORK_SPEED_UP= 91 | DASHDOT_OVERRIDE_NETWORK_SPEED_DOWN= 92 | DASHDOT_OVERRIDE_NETWORK_INTERFACE_SPEED= 93 | DASHDOT_OVERRIDE_NETWORK_PUBLIC_IP= 94 | 95 | # gpu 96 | DASHDOT_GPU_LABEL_LIST= 97 | 98 | ## styles 99 | DASHDOT_GPU_WIDGET_GROW= 100 | DASHDOT_GPU_WIDGET_MIN_WIDTH= 101 | DASHDOT_GPU_SHOWN_DATAPOINTS= 102 | DASHDOT_GPU_POLL_INTERVAL= 103 | 104 | ## overrides 105 | DASHDOT_OVERRIDE_GPU_BRANDS= 106 | DASHDOT_OVERRIDE_GPU_MODELS= 107 | DASHDOT_OVERRIDE_GPU_MEMORIES= 108 | 109 | 110 | -------------------------------------------------------------------------------- /quadlets/dashdot/dashdot.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/dashy/dashy.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Dashboard 3 | After=caddy.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/lissy93/dashy:$dashy_version 14 | ContainerName=dashy 15 | AutoUpdate=registry 16 | 17 | Network=dashy.network 18 | HostName=dashy 19 | 20 | Volume=./user-data:/app/user-data 21 | 22 | Environment=NODE_ENV=production 23 | -------------------------------------------------------------------------------- /quadlets/dashy/dashy.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/dashy/user-data/conf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Page meta info, like heading, footer text and nav links 3 | pageInfo: 4 | title: Dashy 5 | description: Welcome to your new dashboard! 6 | navLinks: 7 | - title: GitHub 8 | path: https://github.com/Lissy93/dashy 9 | - title: Documentation 10 | path: https://dashy.to/docs 11 | 12 | # Optional app settings and configuration 13 | appConfig: 14 | theme: colorful 15 | 16 | # Main content - An array of sections, each containing an array of items 17 | sections: 18 | - name: Getting Started 19 | icon: fas fa-rocket 20 | items: 21 | - title: Dashy Live 22 | description: Development a project management links for Dashy 23 | icon: https://i.ibb.co/qWWpD0v/astro-dab-128.png 24 | url: https://live.dashy.to/ 25 | target: newtab 26 | - title: GitHub 27 | description: Source Code, Issues and Pull Requests 28 | url: https://github.com/lissy93/dashy 29 | icon: favicon 30 | - title: Docs 31 | description: Configuring & Usage Documentation 32 | provider: Dashy.to 33 | icon: far fa-book 34 | url: https://dashy.to/docs 35 | - title: Showcase 36 | description: See how others are using Dashy 37 | url: https://github.com/Lissy93/dashy/blob/master/docs/showcase.md 38 | icon: far fa-grin-hearts 39 | - title: Config Guide 40 | description: See full list of configuration options 41 | url: https://github.com/Lissy93/dashy/blob/master/docs/configuring.md 42 | icon: fas fa-wrench 43 | - title: Support 44 | description: Get help with Dashy, raise a bug, or get in contact 45 | url: https://github.com/Lissy93/dashy/blob/master/.github/SUPPORT.md 46 | icon: far fa-hands-helping 47 | 48 | -------------------------------------------------------------------------------- /quadlets/filebrowser/filebrowser.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Filebrowser Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=multi-user.target default.target 10 | 11 | [Container] 12 | Image=docker.io/hurlenko/filebrowser:latest 13 | ContainerName=filebrowser 14 | 15 | Network=filebrowser.network 16 | Hostname=filebrowser 17 | 18 | Volume=/path/to/what/you/want/to/share:/data:z 19 | Volume=fb-config:/config:z 20 | Volume=fb-branding:/branding:z 21 | 22 | PublishPort=8008:8080 23 | -------------------------------------------------------------------------------- /quadlets/filebrowser/filebrowser.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/filebrowser/filebrowser.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=fb-config 3 | VolumeName=fb-branding -------------------------------------------------------------------------------- /quadlets/filestash/filestash-wopi.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Filestash wopi 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/collabora/code:24.04.10.2.1 13 | ContainerName=filestash-wopi 14 | AutoUpdate=registry 15 | 16 | Network=filestash.network 17 | HostName=filestash-wopi 18 | PublishPort=9980:9980 19 | 20 | Environment=extra_params=--o:ssl.enable=false 21 | Environment=aliasgroup1="https://.*:443" 22 | 23 | Exec=bash -c '/start-collabora-online.sh cool' 24 | -------------------------------------------------------------------------------- /quadlets/filestash/filestash.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Filestash 3 | Wants=filestash-wopi.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/machines/filestash:latest 14 | ContainerName=filestash 15 | AutoUpdate=registry 16 | 17 | Network=filestash.network 18 | HostName=filestash 19 | PublishPort=8334:8334 20 | 21 | Volume=filestash:/app/data/state 22 | 23 | Environment=APPLICATION_URL=https://filestash.example.com 24 | Environment=CANARY=true 25 | Environment=OFFICE_URL=http://filestash-wopi:9980 26 | Environment=OFFICE_FILESTASH_URL=http://filestash:8334 27 | Environment=OFFICE_REWRITE_URL=http://127.0.0.1:9980 28 | 29 | -------------------------------------------------------------------------------- /quadlets/filestash/filestash.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/forgejo/forgejo-data.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=forgejo-data 3 | -------------------------------------------------------------------------------- /quadlets/forgejo/forgejo.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Forgejo 3 | After= 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=codeberg.org/forgejo/forgejo:10 14 | ContainerName=forgejo 15 | AutoUpdate=registry 16 | 17 | Network=forgejo.network 18 | HostName=forgejo 19 | PublishPort=222:22 20 | PublishPort=3000:3000 21 | 22 | Volume=forgejo-data:/data 23 | -------------------------------------------------------------------------------- /quadlets/forgejo/forgejo.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/foundryvtt/foundryvtt.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Foundry VTT Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/felddy/foundryvtt:release 13 | ContainerName=foundryvtt 14 | AutoUpdate=registry 15 | 16 | Network=foundryvtt.network 17 | HostName=foundryvtt 18 | Volume=foundryvtt-data:/data 19 | 20 | PublishPort=30000:30000 21 | 22 | Environment=TIMEZONE=Etc/UTC 23 | Secret=foundry-password,type=env,target=FOUNDRY_PASSWORD 24 | Environment=FOUNDRY_USERNAME= 25 | Secret=foundry-admin-key,type=env,target=FOUNDRY_ADMIN_KEY 26 | Secret=foundry-license-key,type=env,target=FOUNDRY_LICENSE_KEY=XXXX-XXXX-XXXX-XXXX-XXXX-XXXX 27 | Environment=FOUNDRY_HOT_RELOAD=false 28 | Environment=CONTAINER_PRESERVE_CONFIG=true 29 | Environment=CONTAINER_CACHE="/data/container_cache" 30 | -------------------------------------------------------------------------------- /quadlets/foundryvtt/foundryvtt.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/foundryvtt/foundryvtt.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=foundryvtt-data -------------------------------------------------------------------------------- /quadlets/freshrss/fivefilters.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Five Filters Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/heussd/fivefilters-full-text-rss:latest 13 | ContainerName=fivefilters 14 | 15 | Network=freshrss.network 16 | HostName=fivefilters 17 | PublishPort=5000:80 18 | 19 | Secret=ftr-admin-password,type=env,target=FTR_ADMIN_PASSWORD 20 | -------------------------------------------------------------------------------- /quadlets/freshrss/freshrss.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=FreshRSS Quadlet 3 | Requires=fivefilters.service 4 | After=fivefilters.service 5 | 6 | [Service] 7 | Restart=always 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/linuxserver/freshrss:latest 15 | ContainerName=freshrss 16 | 17 | Network=freshrss.network 18 | HostName=freshrss 19 | PublishPort=4422:80 20 | 21 | Volume=freshrss-config:/config 22 | 23 | Environment=TZ=Etc/UTC 24 | -------------------------------------------------------------------------------- /quadlets/freshrss/freshrss.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/freshrss/freshrss.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=freshrss-config -------------------------------------------------------------------------------- /quadlets/gaseous/gaseous-mariadb.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Gaseous MariaDB 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mariadb:latest 13 | ContainerName=gaseous-mariadb 14 | AutoUpdate=registry 15 | 16 | Network=gaseous.network 17 | HostName=gaseous-mariadb 18 | 19 | Volume=gaseous-mariadb:/var/lib/mysql 20 | 21 | Environment=MARIADB_ROOT_PASSWORD=gaseous 22 | Environment=MARIADB_USER=gaseous 23 | Environment=MARIADB_PASSWORD=gaseous 24 | -------------------------------------------------------------------------------- /quadlets/gaseous/gaseous.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Gaseous ROM manager 3 | Requires=gaseous-mariadb.service 4 | After=gaseous-mariadb.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/gaseousgames/gaseousserver:latest 15 | ContainerName=gaseous 16 | AutoUpdate=registry 17 | 18 | Network=gaseous.network 19 | HostName=gaseous 20 | PublishPort=5198:80 21 | 22 | Volume=gaseous:/root/.gaseous-server 23 | 24 | Environment=TZ=Etc/UTC 25 | Environment=dbhost=gsdb 26 | Environment=dbuser=root 27 | Environment=dbpass=gaseous 28 | Environment=igdbclientid= 29 | Environment=igdbclientsecret= 30 | -------------------------------------------------------------------------------- /quadlets/gaseous/gaseous.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Gaseous ROMs network 3 | 4 | [Network] 5 | NetworkName=gaseous 6 | 7 | -------------------------------------------------------------------------------- /quadlets/gaseous/gaseous.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=gaseous 3 | VolumeName=gaseous-mariadb 4 | 5 | -------------------------------------------------------------------------------- /quadlets/glance/assets/user.css: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redbeardymcgee/podbox/951b282f7867c4a3317c6d56a234ba9628319f08/quadlets/glance/assets/user.css -------------------------------------------------------------------------------- /quadlets/glance/config/glance.yml: -------------------------------------------------------------------------------- 1 | server: 2 | assets-path: /app/assets 3 | 4 | theme: 5 | # Note: assets are cached by the browser, changes to the CSS file 6 | # will not be reflected until the browser cache is cleared (Ctrl+F5) 7 | custom-css-file: /assets/user.css 8 | 9 | pages: 10 | # It's not necessary to create a new file for each page and include it, you can simply 11 | # put its contents here, though multiple pages are easier to manage when separated 12 | !include: home.yml 13 | -------------------------------------------------------------------------------- /quadlets/glance/config/home.yml: -------------------------------------------------------------------------------- 1 | - name: Home 2 | # Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look 3 | # hide-desktop-navigation: true 4 | columns: 5 | - size: small 6 | widgets: 7 | - type: calendar 8 | first-day-of-week: monday 9 | 10 | - type: rss 11 | limit: 10 12 | collapse-after: 3 13 | cache: 12h 14 | feeds: 15 | - url: https://selfh.st/rss/ 16 | title: selfh.st 17 | - url: https://ciechanow.ski/atom.xml 18 | - url: https://www.joshwcomeau.com/rss.xml 19 | title: Josh Comeau 20 | - url: https://samwho.dev/rss.xml 21 | - url: https://ishadeed.com/feed.xml 22 | title: Ahmad Shadeed 23 | 24 | - type: twitch-channels 25 | channels: 26 | - theprimeagen 27 | - j_blow 28 | - piratesoftware 29 | - cohhcarnage 30 | - christitustech 31 | - EJ_SA 32 | 33 | - size: full 34 | widgets: 35 | - type: group 36 | widgets: 37 | - type: hacker-news 38 | - type: lobsters 39 | 40 | - type: videos 41 | channels: 42 | - UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips 43 | - UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling 44 | - UCsBjURrPoezykLs9EqgamOA # Fireship 45 | - UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee 46 | - UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium 47 | 48 | - type: group 49 | widgets: 50 | - type: reddit 51 | subreddit: technology 52 | show-thumbnails: true 53 | - type: reddit 54 | subreddit: selfhosted 55 | show-thumbnails: true 56 | 57 | - size: small 58 | widgets: 59 | - type: weather 60 | location: London, United Kingdom 61 | units: metric # alternatively "imperial" 62 | hour-format: 12h # alternatively "24h" 63 | # Optionally hide the location from being displayed in the widget 64 | # hide-location: true 65 | 66 | - type: markets 67 | markets: 68 | - symbol: SPY 69 | name: S&P 500 70 | - symbol: BTC-USD 71 | name: Bitcoin 72 | - symbol: NVDA 73 | name: NVIDIA 74 | - symbol: AAPL 75 | name: Apple 76 | - symbol: MSFT 77 | name: Microsoft 78 | 79 | - type: releases 80 | cache: 1d 81 | # Without authentication the Github API allows for up to 60 requests per hour. You can create a 82 | # read-only token from your Github account settings and use it here to increase the limit. 83 | # token: ... 84 | repositories: 85 | - glanceapp/glance 86 | - go-gitea/gitea 87 | - immich-app/immich 88 | - syncthing/syncthing 89 | -------------------------------------------------------------------------------- /quadlets/glance/glance.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Glance dashboard 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/glanceapp/glance 13 | ContainerName=glance 14 | AutoUpdate=registry 15 | 16 | Network=glance.network 17 | HostName=glance 18 | PublishPort=8080:8080 19 | 20 | Volume=glance-config:/app/config 21 | 22 | -------------------------------------------------------------------------------- /quadlets/glance/glance.env: -------------------------------------------------------------------------------- 1 | # Variables defined here will be available to use anywhere in the config with the syntax ${MY_SECRET_TOKEN} 2 | # Note: making changes to this file requires re-running docker compose up 3 | MY_SECRET_TOKEN=123456 4 | -------------------------------------------------------------------------------- /quadlets/glance/glance.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=glance-config 3 | -------------------------------------------------------------------------------- /quadlets/glances/glances.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=System monitoring at a glance 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/nicolargo/glances:$version_tag 13 | ContainerName=glances 14 | AutoUpdate=registry 15 | 16 | Network=glances.network 17 | HostName=glances 18 | 19 | Volume=%t/podman/podman.sock:/run/user/1000/podman/podman.sock:ro 20 | Volume=/etc/os-release:/etc/os-release:ro 21 | Volume=./glances.conf:/glances/conf/glances.conf 22 | 23 | Environment=GLANCES_OPT="-C /glances/conf/glances.conf -w" 24 | 25 | -------------------------------------------------------------------------------- /quadlets/glances/glances.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/gluetun/config.toml: -------------------------------------------------------------------------------- 1 | [[roles]] 2 | name = "qbittorrent" 3 | # Define a list of routes with the syntax "Http-Method /path" 4 | routes = ["GET /v1/openvpn/portforwarded"] 5 | # Define an authentication method with its parameters 6 | auth = "basic" 7 | username = "myusername" 8 | password = "mypassword" 9 | -------------------------------------------------------------------------------- /quadlets/gluetun/gluetun.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=gluetun VPN 3 | After=protonvpn-network.service 4 | PartOf=protonvpn-network.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/qmcgaw/gluetun 15 | ContainerName=gluetun 16 | HostName=gluetun 17 | AutoUpdate=registry 18 | AddCapability=NET_ADMIN 19 | AddDevice=/dev/net/tun:/dev/net/tun 20 | 21 | Volume=./config.toml:/gluetun/auth/config.toml 22 | 23 | Environment=TZ=Etc/UTC 24 | Environment=UPDATER_PERIOD=24h 25 | Environment=UPDATER_VPN_SERVICE_PROVIDERS=protonvpn 26 | Environment=VPN_SERVICE_PROVIDER=protonvpn 27 | # The trailing `+pmp` is for port forwarding 28 | Environment=OPENVPN_USER=${openvpn_user}+pmp 29 | Environment=OPENVPN_PASSWORD=$openvpn_password 30 | Environment=OPENVPN_CIPHERS=aes-256-gcm 31 | Environment=SERVER_COUNTRIES=$countries 32 | Environment=VPN_PORT_FORWARDING=on 33 | Environment=FIREWALL_DEBUG=on 34 | 35 | -------------------------------------------------------------------------------- /quadlets/graphite/graphite.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graphite monitoring 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/graphiteapp/graphite-statsd 13 | ContainerName=graphite 14 | AutoUpdate=registry 15 | 16 | Network=graphite.network 17 | HostName=graphite 18 | PublishPort=80:80 19 | PublishPort=2003-2004:2003-2004 20 | PublishPort=2023-2024:2023-2024 21 | PublishPort=8125:8125/udp 22 | PublishPort=8125:8125 23 | 24 | Volume=graphite-conf:/opt/graphite/conf 25 | 26 | EnvironmentFile=graphite.env 27 | -------------------------------------------------------------------------------- /quadlets/graphite/graphite.env: -------------------------------------------------------------------------------- 1 | EXAMPLE=value 2 | -------------------------------------------------------------------------------- /quadlets/graphite/graphite.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graphite network 3 | 4 | [Network] 5 | NetworkName=graphite 6 | 7 | -------------------------------------------------------------------------------- /quadlets/graphite/graphite.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=graphite-conf 3 | 4 | -------------------------------------------------------------------------------- /quadlets/graylog/graylog-datanode.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graylog datanode 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/graylog/graylog-datanode:6.1 13 | ContainerName=graylog-datanode 14 | AutoUpdate=registry 15 | 16 | Network=graylog.network 17 | HostName=graylog-datanode 18 | PublishPort=8999:8999 19 | PublishPort=9200:9200 20 | PublishPort=9300:9300 21 | 22 | Volume=graylog-datanode:/var/lib/graylog-datanode 23 | 24 | Environment=GRAYLOG_DATANODE_ID_FILE=/var/lib/graylog-datanode/node-id 25 | Environment=GRAYLOG_DATANODE_MONGODB_URI=mongodb://graylog-db:27017/graylog 26 | 27 | Secret=graylog-password-secret,type=env,target=GRAYLOG_DATANODE_PASSWORD_SECRET 28 | 29 | -------------------------------------------------------------------------------- /quadlets/graylog/graylog-db.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graylog MongoDB 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mongo:6.0 13 | ContainerName=graylog-db 14 | AutoUpdate=registry 15 | 16 | Network=graylog.network 17 | HostName=graylog-db 18 | 19 | Volume=graylog-db-data:/data/db 20 | Volume=graylog-db-config:/data/configdb 21 | 22 | -------------------------------------------------------------------------------- /quadlets/graylog/graylog.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graylog 3 | Wants=graylog-db 4 | Wants=graylog-database 5 | After=graylog-db 6 | After=graylog-database 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/graylog:6.1 17 | ContainerName=graylog 18 | AutoUpdate=registry 19 | 20 | Network=graylog.network 21 | HostName=graylog 22 | PublishPort=5044:5044/tcp 23 | PublishPort=5140:5140/udp 24 | PublishPort=5140:5140/tcp 25 | PublishPort=5555:5555/tcp 26 | PublishPort=5555:5555/udp 27 | PublishPort=9000:9000/tcp 28 | PublishPort=12201:12201/tcp 29 | PublishPort=12201:12201/udp 30 | PublishPort=13301:13301/tcp 31 | PublishPort=13302:13302/tcp 32 | 33 | Volume=graylog-data:/usr/share/graylog/data/data 34 | 35 | Environment=GRAYLOG_NODE_ID_FILE=/usr/share/graylog/data/data/node-id 36 | Environment=GRAYLOG_HTTP_BIND_ADDRESS=0.0.0.0:9000 37 | Environment=GRAYLOG_HTTP_EXTERNAL_URI=http://localhost:9000/ 38 | Environment=GRAYLOG_MONGODB_URI=mongodb://graylog-db:27017/graylog 39 | 40 | Secret=graylog-password-secret,type=env,target=GRAYLOG_PASSWORD_SECRET 41 | Secret=graylog-root-password-sha2,type=env,target=GRAYLOG_ROOT_PASSWORD_SHA2 42 | 43 | Entrypoint=/usr/bin/tini 44 | Exec=-- /docker-entrypoint.sh 45 | 46 | -------------------------------------------------------------------------------- /quadlets/graylog/graylog.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Graylog network 3 | 4 | [Network] 5 | NetworkName=graylog 6 | Subnet=172.25.1.0/29 7 | Gateway=172.25.0.1 8 | -------------------------------------------------------------------------------- /quadlets/grocy/grocy.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Grocy grocery management 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=lscr.io/linuxserver/grocy 13 | ContainerName=grocy 14 | AutoUpdate=registry 15 | 16 | Network=grocy.network 17 | HostName=grocy 18 | PublishPort=9283:80 19 | 20 | Volume=grocy-config:/config 21 | 22 | Environment=TZ=Etc/UTC 23 | -------------------------------------------------------------------------------- /quadlets/grocy/grocy.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/grocy/grocy.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=grocy-config 3 | 4 | -------------------------------------------------------------------------------- /quadlets/healthchecks/healthchecks-postgres.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Healthchecks postgres 3 | Requires=healthchecks-postgres.service 4 | After=healthchecks-postgres.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/postgres:16 15 | ContainerName=healthchecks-postgres 16 | AutoUpdate=registry 17 | 18 | Network=healthchecks.network 19 | HostName=healthchecks-postgres 20 | PublishPort=8000:8000 21 | 22 | Volume=healthchecks-postgres:/var/lib/postgresql/data 23 | 24 | Environment=POSTGRES_DB=healthchecks 25 | 26 | Secret=healthchecks-postgres-password,type=env,target=POSTGRES_PASSWORD 27 | -------------------------------------------------------------------------------- /quadlets/healthchecks/healthchecks.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Healthchecks 3 | Requires=healthchecks-postgres.service 4 | After=healthchecks-postgres.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/healthchecks:latest 15 | ContainerName=healthchecks 16 | AutoUpdate=registry 17 | 18 | Network=healthchecks.network 19 | HostName=healthchecks 20 | PublishPort=8000:8000 21 | 22 | EnvironmentFile=healthchecks.env 23 | 24 | Exec=uwsgi /opt/healthchecks/docker/uwsgi.ini 25 | -------------------------------------------------------------------------------- /quadlets/healthchecks/healthchecks.env: -------------------------------------------------------------------------------- 1 | ALLOWED_HOSTS=localhost 2 | APPRISE_ENABLED=False 3 | DB=postgres 4 | DB_CONN_MAX_AGE=0 5 | DB_HOST=db 6 | DB_NAME=hc 7 | # Use Secret=... 8 | # DB_PASSWORD=fixme-postgres-password 9 | DB_PORT=5432 10 | DB_SSLMODE=prefer 11 | DB_TARGET_SESSION_ATTRS=read-write 12 | DB_USER=postgres 13 | DEBUG=False 14 | DEFAULT_FROM_EMAIL=healthchecks@example.org 15 | DISCORD_CLIENT_ID= 16 | DISCORD_CLIENT_SECRET= 17 | EMAIL_HOST= 18 | EMAIL_HOST_PASSWORD= 19 | EMAIL_HOST_USER= 20 | EMAIL_PORT=587 21 | EMAIL_USE_TLS=True 22 | EMAIL_USE_VERIFICATION=True 23 | INTEGRATIONS_ALLOW_PRIVATE_IPS=False 24 | LINENOTIFY_CLIENT_ID= 25 | LINENOTIFY_CLIENT_SECRET= 26 | MASTER_BADGE_LABEL=Mychecks 27 | MATRIX_ACCESS_TOKEN= 28 | MATRIX_HOMESERVER= 29 | MATRIX_USER_ID= 30 | MATTERMOST_ENABLED=True 31 | MSTEAMS_ENABLED=True 32 | OPSGENIE_ENABLED=True 33 | PAGERTREE_ENABLED=True 34 | PD_APP_ID= 35 | PD_ENABLED=True 36 | PING_BODY_LIMIT=10000 37 | PING_EMAIL_DOMAIN=localhost 38 | PING_ENDPOINT=http://localhost:8000/ping/ 39 | PROMETHEUS_ENABLED=True 40 | PUSHBULLET_CLIENT_ID= 41 | PUSHBULLET_CLIENT_SECRET= 42 | PUSHOVER_API_TOKEN= 43 | PUSHOVER_EMERGENCY_EXPIRATION=86400 44 | PUSHOVER_EMERGENCY_RETRY_DELAY=300 45 | PUSHOVER_SUBSCRIPTION_URL= 46 | REGISTRATION_OPEN=True 47 | REMOTE_USER_HEADER= 48 | ROCKETCHAT_ENABLED=True 49 | RP_ID= 50 | S3_ACCESS_KEY= 51 | S3_BUCKET= 52 | S3_ENDPOINT= 53 | S3_REGION= 54 | S3_SECRET_KEY= 55 | S3_TIMEOUT=60 56 | S3_SECURE=True 57 | SECRET_KEY=--- 58 | SHELL_ENABLED=False 59 | SIGNAL_CLI_SOCKET= 60 | SITE_LOGO_URL= 61 | SITE_NAME=Mychecks 62 | SITE_ROOT=http://localhost:8000 63 | SLACK_CLIENT_ID= 64 | SLACK_CLIENT_SECRET= 65 | SLACK_ENABLED=True 66 | # SMTPD_PORT= 67 | SPIKE_ENABLED=True 68 | TELEGRAM_BOT_NAME=ExampleBot 69 | TELEGRAM_TOKEN= 70 | TRELLO_APP_KEY= 71 | TWILIO_ACCOUNT= 72 | TWILIO_AUTH= 73 | TWILIO_FROM= 74 | TWILIO_USE_WHATSAPP=False 75 | USE_PAYMENTS=False 76 | VICTOROPS_ENABLED=True 77 | WEBHOOKS_ENABLED=True 78 | WHATSAPP_DOWN_CONTENT_SID= 79 | WHATSAPP_UP_CONTENT_SID= 80 | ZULIP_ENABLED=True 81 | -------------------------------------------------------------------------------- /quadlets/healthchecks/healthchecks.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Healthchecks network 3 | 4 | [Network] 5 | NetworkName=healthchecks 6 | 7 | -------------------------------------------------------------------------------- /quadlets/healthchecks/healthchecks.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=healthchecks-postgres 3 | 4 | -------------------------------------------------------------------------------- /quadlets/hoarder/hoarder-chrome.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Hoarder chrome browser 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/zenika/alpine-chrome 13 | ContainerName=hoarder-chrome 14 | AutoUpdate=registry 15 | 16 | Network=hoarder.network 17 | HostName=hoarder-chrome 18 | 19 | Exec=--no-sandbox --disable-gpu --disable-dev-shm-usage --remote-debugging-address=0.0.0.0 --remote-debugging-port=9222 --hide-scrollbars 20 | -------------------------------------------------------------------------------- /quadlets/hoarder/hoarder-meilisearch.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Hoarder requirement 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/getmeili/meilisearch:v1.6 13 | ContainerName=hoarder-meilisearch 14 | AutoUpdate=registry 15 | 16 | Network=hoarder.network 17 | HostName=hoarder-meilisearch 18 | 19 | EnvironmentFile=hoarder.env 20 | 21 | Volume=hoarder-meilisearch:/meili_data 22 | -------------------------------------------------------------------------------- /quadlets/hoarder/hoarder.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Bookmark Manager 3 | 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=ghcr.io/hoarder-app/hoarder:release 14 | ContainerName=hoarder 15 | AutoUpdate=registry 16 | 17 | Network=hoarder.network 18 | HostName=hoarder 19 | PublishPort=3000:3000 20 | 21 | Volume=hoarder-data:/data 22 | 23 | EnvironmentFile=hoarder.env 24 | 25 | Secret=nextauth-secret,type=env,target=NEXTAUTH_SECRET 26 | Secret=meili-master-key,type=env,target=MEILI_MASTER_KEY 27 | Secret=openai-api-key,type=env,target=OPENAI_API_KEY 28 | -------------------------------------------------------------------------------- /quadlets/hoarder/hoarder.env: -------------------------------------------------------------------------------- 1 | # https://docs.hoarder.app/configuration/ 2 | DATA_DIR=/data 3 | # Change according to your domain 4 | NEXTAUTH_URL=http://localhost:3000 5 | #NEXTAUTH_SECRET= 6 | MEILI_ADDR=http://meilisearch:7700 7 | #MEILI_MASTER_KEY= 8 | MEILI_NO_ANALYTICS=true 9 | #MAX_ASSET_SIZE_MB= 10 | #DISABLE_NEW_RELEASE_CHECK= 11 | 12 | # Authentication / Signup 13 | #DISABLE_SIGNUPS= 14 | #DISABLE_PASSWORD_AUTH= 15 | #OAUTH_WELLKNOWN_URL= 16 | #OAUTH_CLIENT_SECRET= 17 | #OAUTH_CLIENT_ID= 18 | #OAUTH_SCOPE= 19 | #OAUTH_PROVIDER_NAME= 20 | #OAUTH_ALLOW_DANGEROUS_EMAIL_ACCOUNT_LINKING= 21 | 22 | # Inference 23 | #OPENAI_API_KEY= 24 | #OPENAI_BASE_URL= 25 | #OLLAMA_BASE_URL= 26 | #OLLAMA_KEEP_ALIV= 27 | #INFERENCE_TEXT_MODEL= 28 | #INFERENCE_IMAGE_MODEL= 29 | #INFERENCE_CONTEXT_LENGTH= 30 | #INFERENCE_LANG= 31 | #INFERENCE_JOB_TIMEOUT_SEC= 32 | 33 | # Crawler 34 | #CRAWLER_NUM_WORKERS= 35 | BROWSER_WEB_URL=http://hoarder-chrome:9222 36 | #BROWSER_WEBSOCKET_URL= 37 | #BROWSER_CONNECT_ONDEMAND= 38 | #CRAWLER_DOWNLOAD_BANNER_IMAGE= 39 | #CRAWLER_STORE_SCREENSHOT= 40 | #CRAWLER_FULL_PAGE_SCREENSHOT= 41 | #CRAWLER_FULL_PAGE_ARCHIVE= 42 | #CRAWLER_JOB_TIMEOUT_SEC= 43 | #CRAWLER_NAVIGATE_TIMEOUT_SEC= 44 | #CRAWLER_VIDEO_DOWNLOAD= 45 | #CRAWLER_VIDEO_DOWNLOAD_MAX_SIZE= 46 | #CRAWLER_VIDEO_DOWNLOAD_TIMEOUT_SEC= 47 | 48 | # OCR 49 | #OCR_CACHE_DIR= 50 | #OCR_LANGS= 51 | #OCR_CONFIDENCE_THRESHOLD= 52 | 53 | -------------------------------------------------------------------------------- /quadlets/hoarder/hoarder.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=hoarder-data 3 | VolumeName=hoarder-meilisearch 4 | -------------------------------------------------------------------------------- /quadlets/homarr/homarr.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Homarr 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/ajnart/homarr:latest 13 | ContainerName=homarr 14 | 15 | Network=homarr.network 16 | HostName=homarr 17 | 18 | Volume=homarr-configs:/app/data/configs 19 | Volume=homarr-icons:/app/public/icons 20 | -------------------------------------------------------------------------------- /quadlets/homarr/homarr.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/homarr/homarr.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=homarr-configs 3 | VolumeName=homarr-data 4 | 5 | -------------------------------------------------------------------------------- /quadlets/homepage/homepage.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Homepage 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/gethomepage/homepage:latest 13 | ContainerName=homepage 14 | AutoUpdate=registry 15 | 16 | Network=homepage.network 17 | HostName=homepage 18 | PublishPort=3000:3000 19 | 20 | Volume=homepage-config:/app/config 21 | 22 | Environment=HOMEPAGE_ALLOWED_HOSTS=homepage.example.com 23 | 24 | -------------------------------------------------------------------------------- /quadlets/homepage/homepage.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Homepage network 3 | 4 | [Network] 5 | NetworkName=homepage 6 | 7 | -------------------------------------------------------------------------------- /quadlets/homepage/homepage.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=homepage-config 3 | 4 | -------------------------------------------------------------------------------- /quadlets/homer/homer.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Homer 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/b4bz/homer 13 | ContainerName=homer 14 | AutoUpdate=registry 15 | 16 | Network=homer.network 17 | HostName=homer 18 | PublishPort=8080:8080 19 | 20 | Volume=homer-assets:/www/assets 21 | 22 | Environment=TZ=Etc/UTC 23 | -------------------------------------------------------------------------------- /quadlets/homer/homer.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/homer/homer.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=homer-assets 3 | -------------------------------------------------------------------------------- /quadlets/it-tools/it-tools.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=IT Tools Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/corentinth/it-tools:latest 13 | ContainerName=it-tools 14 | 15 | Network=it-tools.network 16 | HostName=it-tools 17 | PublishPort=8088:80 18 | -------------------------------------------------------------------------------- /quadlets/it-tools/it-tools.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/joplin/joplin-db.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Joplin DB Quadlet 3 | WantedBy=joplin.service 4 | 5 | [Service] 6 | Restart=always 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/library/postgres:15 14 | ContainerName=joplin-db 15 | HostName=joplin-db 16 | 17 | Volume=joplindb-data:/var/lib/postgresql/data 18 | 19 | Environment=POSTGRES_USER=postgres 20 | Environment=POSTGRES_DB=joplin 21 | 22 | Secret=joplin-db-pw,type=env,target=POSTGRES_PASSWORD 23 | -------------------------------------------------------------------------------- /quadlets/joplin/joplin.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Joplin server 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/joplin/server:latest 13 | ContainerName=joplin 14 | AutoUpdate=registry 15 | 16 | Network=joplin.network 17 | HostName=joplin 18 | PublishPort=22300:22300 19 | 20 | Environment=APP_BASE_URL=https://joplin.example.com 21 | Environment=APP_PORT=22300 22 | Environment=DB_CLIENT=pg 23 | Environment=POSTGRES_DATABASE=joplin 24 | Environment=POSTGRES_USER=postgres 25 | Environment=POSTGRES_PORT=5432 26 | Environment=POSTGRES_HOST=joplin-db 27 | 28 | Secret=joplin-db-pw,type=env,target=POSTGRES_PASSWORD 29 | -------------------------------------------------------------------------------- /quadlets/joplin/joplin.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/joplin/joplin.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=joplindb-data -------------------------------------------------------------------------------- /quadlets/kavita/kavita.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Ebook reader 3 | After=caddy.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=ghcr.io/kareadita/kavita:latest 14 | ContainerName=kavita 15 | AutoUpdate=registry 16 | 17 | Network=kavita.network 18 | HostName=kavita 19 | PublishPort=5000 20 | 21 | Volume=kavita-config:/kavita/config 22 | Volume=/volumes/books:/library 23 | 24 | Environment=TZ=Etc/UTC 25 | 26 | -------------------------------------------------------------------------------- /quadlets/kavita/kavita.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/kavita/kavita.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=kavita-config 3 | -------------------------------------------------------------------------------- /quadlets/kibitzr/kibitzr.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Web assistant 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/peterdemin/kibitzr:latest 13 | ContainerName=kibitzr 14 | AutoUpdate=registry 15 | 16 | Network=kibitzr.network 17 | HostName=kibitzr 18 | 19 | Volume=kibitzr-root:/root 20 | 21 | # NOTE: Initialize a default kibitzr.yml and kibitzr-creds.yml 22 | #Exec=init 23 | Exec=run 24 | -------------------------------------------------------------------------------- /quadlets/kibitzr/kibitzr.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/kibitzr/kibitzr.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=kibitzr-root 3 | 4 | -------------------------------------------------------------------------------- /quadlets/komga/komga.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Komga comic reader 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/gotson/komga 13 | ContainerName=komga 14 | AutoUpdate=registry 15 | 16 | Network=komga.network 17 | HostName=komga 18 | PublishPort=25600:25600 19 | 20 | Volume=komga-config:/config 21 | Volume=komga-data:/data 22 | 23 | Environment=TC=Etc/UTC 24 | -------------------------------------------------------------------------------- /quadlets/komga/komga.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Komga network 3 | 4 | [Network] 5 | NetworkName=komga 6 | 7 | -------------------------------------------------------------------------------- /quadlets/komga/komga.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=komga-config 3 | VolumeName=komga-data 4 | 5 | -------------------------------------------------------------------------------- /quadlets/lazylibrarian/lazylibrarian.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Lazy Librarian 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | ContainerName=lazylibrarian 13 | Image=lscr.io/linuxserver/lazylibrarian:latest 14 | AutoUpdate=registry 15 | 16 | Network=lazylibrarian 17 | HostName=lazylibrarian 18 | PublishPort=5299 19 | 20 | Volume=lazylibrarian-config:/config 21 | Volume=/volumes/books:/books 22 | 23 | Environment=TZ=Etc/UTC 24 | -------------------------------------------------------------------------------- /quadlets/lazylibrarian/lazylibrarian.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/lazylibrarian/lazylibrarian.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=lazylibrarian-config 3 | 4 | -------------------------------------------------------------------------------- /quadlets/leantime/leantime-mysql.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Leantime MySQL 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mysql:8.4 13 | ContainerName=leantime-mysql 14 | AutoUpdate=registry 15 | 16 | Network=leantime.network 17 | HostName=leantime-mysql 18 | 19 | Volume=leantime-mysql:/var/lib/mysql 20 | 21 | EnvironmentFile=leantime.env 22 | 23 | Secret=leantime-db-password,type=env,target=MYSQL_PASSWORD 24 | Secret=leantime-db-root-password,type=env,target=MYSQL_ROOT_PASSWORD 25 | 26 | Exec=--character-set-server=UTF8MB4 --collation-server=UTF8MB4_unicode_ci 27 | -------------------------------------------------------------------------------- /quadlets/leantime/leantime.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Leantime task management 3 | Requires=leantime-mysql.service 4 | After=leantime-mysql.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/leantime/leantime:latest 15 | ContainerName=leantime 16 | AutoUpdate=registry 17 | 18 | AddCapability=CAP_NET_BIND_SERVICE 19 | AddCapability=CAP_CHOWN 20 | AddCapability=CAP_SETGID 21 | AddCapability=CAP_SETUID 22 | 23 | Network=leantime.network 24 | HostName=leantime 25 | PublishPort=8080:8080 26 | 27 | Volume=leantime-public-userfiles:/var/www/html/public/userfiles 28 | Volume=leantime-userfiles:/var/www/html/userfiles 29 | Volume=leantime-plugins:/var/www/html/app/Plugins 30 | Volume=leantime-logs:/var/www/html/storage/logs 31 | 32 | EnvironmentFile=leantime.env 33 | 34 | Secret=leantime-session-password,type=env,target=LEAN_SESSION_PASSWORD 35 | Secret=leantime-db-password,type=env,target=LEAN_DB_PASSWORD 36 | -------------------------------------------------------------------------------- /quadlets/leantime/leantime.env: -------------------------------------------------------------------------------- 1 | # This is a sample configuration file with all possible configuration options. 2 | # If you dont want to maintain a file like this you can pass in all variables via Server Variables 3 | 4 | LEAN_PORT=8080 # The port to expose and access Leantime 5 | LEAN_APP_URL=https://leantime.example.com # Base URL, needed for subfolder or proxy installs (including http:// or https://) 6 | LEAN_APP_DIR= # Base of application without trailing slash (used for cookies), e.g, /leantime 7 | 8 | LEAN_DEBUG=0 # Debug flag 9 | 10 | # Database - MySQL container 11 | # MYSQL_ROOT_PASSWORD=changeme123 # MySQL root password 12 | MYSQL_DATABASE=leantime # Database name 13 | MYSQL_USER=leantime # Database username 14 | # MYSQL_PASSWORD=changeme123 # Database password 15 | 16 | # Database - leantime container 17 | LEAN_DB_HOST=leantime-mysql # Database host 18 | LEAN_DB_USER=leantime # Database username (needs to be the same as MYSQL_USER) 19 | # LEAN_DB_PASSWORD=changeme123 # Database password (needs to be the same as MYSQL_PASSWORD) 20 | LEAN_DB_DATABASE=leantime # Database name (needs to be the same as MYSQL_DATABASE) 21 | LEAN_DB_PORT=3306 # Database port 22 | 23 | ## Session Management 24 | # LEAN_SESSION_PASSWORD=3evBlq9zdUEuzKvVJHWWx3QzsQhturBApxwcws2m # Salting sessions, replace with a strong password 25 | LEAN_SESSION_EXPIRATION=28800 # How many seconds after inactivity should we logout? 28800seconds = 8hours 26 | LEAN_SESSION_SECURE=false # Serve cookies via https only? Set to true when using https, set to false when using http. 27 | 28 | 29 | ## Optional Configuration, you may omit these from your .env file 30 | 31 | ## Default Settings 32 | LEAN_SITENAME=Leantime # Name of your site, can be changed later 33 | LEAN_LANGUAGE=en-US # Default language 34 | LEAN_DEFAULT_TIMEZONE=Etc/UTC # Set default timezone 35 | LEAN_LOG_PATH= # Default Log Path (including filename), if not set /logs/error.log will be used 36 | LEAN_DISABLE_LOGIN_FORM=false # If true then dont show the login form (useful only if additional auth method[s] are available) 37 | 38 | ## Look & Feel, these settings are available in the UI and can be overwritten there. 39 | LEAN_LOGO_PATH=/dist/images/logo.svg # Default logo path, can be changed later 40 | LEAN_PRINT_LOGO_URL=/dist/images/logo.png # Default logo URL use for printing (must be jpg or png format) 41 | LEAN_DEFAULT_THEME=default # Default theme 42 | LEAN_PRIMARY_COLOR=#006d9f # Primary Theme color 43 | LEAN_SECONDARY_COLOR = #00a886 # Secondary Theme Color 44 | 45 | 46 | ## Fileuploads 47 | 48 | # Local File Uploads 49 | LEAN_USER_FILE_PATH=userfiles/ # Local relative path to store uploaded files (if not using S3) 50 | LEAN_DB_BACKUP_PATH=backupdb/ # Local relative path to store backup files, need permission to write 51 | 52 | # S3 File Uploads 53 | LEAN_USE_S3=false # Set to true if you want to use S3 instead of local files 54 | LEAN_S3_KEY= # S3 Key 55 | LEAN_S3_SECRET= # S3 Secret 56 | LEAN_S3_BUCKET= # Your S3 bucket 57 | LEAN_S3_USE_PATH_STYLE_ENDPOINT=false # Sets the endpoint style: false => https://[bucket].[endpoint] ; true => https://[endpoint]/[bucket] 58 | LEAN_S3_REGION= # S3 region 59 | LEAN_S3_FOLDER_NAME= # Foldername within S3 (can be empty) 60 | LEAN_S3_END_POINT=null # S3 EndPoint S3 Compatible (https://sfo2.digitaloceanspaces.com) 61 | 62 | ## Email 63 | LEAN_EMAIL_RETURN= # Return email address, needs to be valid email address format 64 | LEAN_EMAIL_USE_SMTP=false # Use SMTP? If set to false, the default php mail() function will be used 65 | LEAN_EMAIL_SMTP_HOSTS= # SMTP host 66 | LEAN_EMAIL_SMTP_AUTH=true # SMTP authentication required 67 | LEAN_EMAIL_SMTP_USERNAME= # SMTP username 68 | LEAN_EMAIL_SMTP_PASSWORD= # SMTP password 69 | LEAN_EMAIL_SMTP_AUTO_TLS=true # SMTP Enable TLS encryption automatically if a server supports it 70 | LEAN_EMAIL_SMTP_SECURE= # SMTP Security protocol (usually one of: TLS, SSL, STARTTLS) 71 | LEAN_EMAIL_SMTP_SSLNOVERIFY=false # SMTP Allow insecure SSL: Dont verify certificate, accept self-signed, etc. 72 | LEAN_EMAIL_SMTP_PORT= # Port (usually one of 25, 465, 587, 2526) 73 | 74 | ## LDAP 75 | LEAN_LDAP_USE_LDAP=false # Set to true if you want to use LDAP 76 | LEAN_LDAP_LDAP_DOMAIN= # Domain name after username@ so users can login without domain definition 77 | LEAN_LDAP_LDAP_TYPE=OL # Select the correct directory type. Currently Supported: OL - OpenLdap, AD - Active Directory 78 | LEAN_LDAP_HOST= # FQDN 79 | LEAN_LDAP_PORT=389 # Default Port 80 | LEAN_LDAP_URI= # LDAP URI as alternative to hostname and port. Uses ldap://hostname:port 81 | LEAN_LDAP_DN= # Location of users, example: CN=users,DC=example,DC=com 82 | # Leantime->Ldap attribute mapping 83 | LEAN_LDAP_KEYS="{ 84 | \"username\":\"uid\", 85 | \"groups\":\"memberOf\", 86 | \"email\":\"mail\", 87 | \"firstname\":\"displayname\", 88 | \"lastname\":\"\", 89 | \"phone\":\"telephoneNumber\", 90 | \"jobTitle\":\"title\" 91 | \"jobLevel\":\"level\" 92 | \"department\":\"department\" 93 | 94 | }" 95 | 96 | # For AD use these default attributes 97 | # LEAN_LDAP_KEYS="{ 98 | # \"username\":\"cn\", 99 | # \"groups\":\"memberOf\", 100 | # \"email\":\"mail\", 101 | # \"firstname\":\"givenName\", 102 | # \"lastname\":\"sn\", 103 | # \"phone\":\"telephoneNumber\", 104 | # \"jobTitle\":\"title\" 105 | # \"jobLevel\":\"level\" 106 | # \"department\":\"department\" 107 | # }" 108 | 109 | LEAN_LDAP_DEFAULT_ROLE_KEY=20; # Default Leantime Role on creation. (set to editor) 110 | 111 | # Default role assignments upon first login. 112 | # optional - Can be updated later in user settings for each user 113 | LEAN_LDAP_GROUP_ASSIGNMENT="{ 114 | \"5\": { 115 | \"ltRole\":\"readonly\", 116 | \"ldapRole\":\"readonly\" 117 | }, 118 | \"10\": { 119 | \"ltRole\":\"commenter\", 120 | \"ldapRole\":\"commenter\" 121 | }, 122 | \"20\": { 123 | \"ltRole\":\"editor\", 124 | \"ldapRole\":\"editor\" 125 | }, 126 | \"30\": { 127 | \"ltRole\":\"manager\", 128 | \"ldapRole\":\"manager\" 129 | }, 130 | \"40\": { 131 | \"ltRole\":\"admin\", 132 | \"ldapRole\":\"administrators\" 133 | }, 134 | \"50\": { 135 | \"ltRole\":\"owner\", 136 | \"ldapRole\":\"administrators\" 137 | } 138 | }" 139 | 140 | ## OpenID Connect 141 | # required 142 | LEAN_OIDC_ENABLE=false 143 | LEAN_OIDC_CLIENT_ID = 144 | LEAN_OIDC_CLIENT_SECRET = 145 | 146 | # required - the URL for your provider (examples down below) 147 | #LEAN_OIDC_PROVIDER_URL = 148 | 149 | #Create User if it doesnt exist in Leantime db, otherwise fail login 150 | LEAN_OIDC_CREATE_USER=false 151 | 152 | # Default role for users created via OIDC (20 is editor) 153 | LEAN_OIDC_DEFAULT_ROLE=20 154 | 155 | # optional - these will be read from the well-known configuration if possible 156 | #LEAN_OIDC_AUTH_URL_OVERRIDE = 157 | #LEAN_OIDC_TOKEN_URL_OVERRIDE = 158 | #LEAN_OIDC_JWKS_URL_OVERRIDE = 159 | #LEAN_OIDC_USERINFO_URL_OVERRIDE = 160 | 161 | # optional - override the public key for RSA validation 162 | #LEAN_OIDC_CERTIFICATE_STRING = 163 | #LEAN_OIDC_CERTIFICATE_FILE = 164 | 165 | # optional - override the requested scopes 166 | #LEAN_OIDC_SCOPES = 167 | 168 | # optional - override the keys used for these fields 169 | #LEAN_OIDC_FIELD_EMAIL = 170 | #LEAN_OIDC_FIELD_FIRSTNAME = 171 | #LEAN_OIDC_FIELD_LASTNAME = 172 | #LEAN_OIDC_FIELD_PHONE = 173 | #LEAN_OIDC_FIELD_JOBTITLE = 174 | #LEAN_OIDC_FIELD_JOBLEVEL= 175 | #LEAN_OIDC_FIELD_DEPARTMENT = 176 | 177 | ## OpenID Connect setting for GitHub 178 | #LEAN_OIDC_PROVIDER_URL=https://token.actions.githubusercontent.com/ 179 | #LEAN_OIDC_AUTH_URL_OVERRIDE=https://github.com/login/oauth/authorize 180 | #LEAN_OIDC_TOKEN_URL_OVERRIDE=https://github.com/login/oauth/access_token 181 | #LEAN_OIDC_USERINFO_URL_OVERRIDE=https://api.github.com/user,https://api.github.com/user/emails 182 | #LEAN_OIDC_SCOPES=user:email,read:user 183 | #LEAN_OIDC_FIELD_EMAIL=0.email 184 | #LEAN_OIDC_FIELD_FIRSTNAME=name 185 | 186 | 187 | ## Redis (for session storage and cache) 188 | LEAN_USE_REDIS=false # Set to true to use redis as session cache 189 | LEAN_REDIS_URL= # Add URL path such as tcp://1.2.3.4:6379. If you are using a password, add ?auth=yourverycomplexpasswordhere to your URL 190 | LEAN_REDIS_HOST= 191 | LEAN_REDIS_PORT=6379 192 | LEAN_REDIS_PASSWORD= 193 | LEAN_REDIS_SCHEME= 194 | 195 | ## Rate limiting 196 | LEAN_RATELIMIT_GENERAL=1000 197 | LEAN_RATELIMIT_API=10 198 | LEAN_RATELIMIT_AUTH=20 199 | 200 | 201 | 202 | -------------------------------------------------------------------------------- /quadlets/leantime/leantime.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Leantime network 3 | 4 | [Network] 5 | -------------------------------------------------------------------------------- /quadlets/leantime/leantime.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=leantime-public-userfiles 3 | VolumeName=leantime-userfiles 4 | VolumeName=leantime-plugins 5 | VolumeName=leantime-logs 6 | VolumeName=leantime-mysql 7 | -------------------------------------------------------------------------------- /quadlets/librenms/README.md: -------------------------------------------------------------------------------- 1 | # LibreNMS 2 | 3 | ## Create DB password secret 4 | 5 | ```bash 6 | printf 'supersecretpassword' | podman secret create librenms-db-pw - 7 | ``` 8 | 9 | ## Known Issues 10 | 11 | -------------------------------------------------------------------------------- /quadlets/librenms/db.env: -------------------------------------------------------------------------------- 1 | DB_HOST=librenms-db 2 | DB_NAME=librenms 3 | DB_USER=librenms 4 | DB_TIMEOUT=60 5 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms-db.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS DB 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mariadb/mariadb:10 13 | ContainerName=librenms-db 14 | AutoUpdate=registry 15 | 16 | Network= 17 | HostName=librenms-db 18 | 19 | Volume=librenms-db:/var/lib/mysql 20 | 21 | EnvironmentFile=tz.env 22 | EnvironmentFile=mariadb.env 23 | 24 | Secret=librenms-db-pw,type=env,target=MYSQL_PASSWORD 25 | 26 | Exec=mysqld --innodb-file-per-table=1 --lower-case-table-names=0 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci 27 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms-dispatcher.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS dispatcher 3 | After=librenms.container 4 | After=librenms-redis.container 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/librenms/librenms:latest 15 | ContainerName=librenms-dispatcher 16 | AutoUpdate=registry 17 | 18 | Network= 19 | HostName=librenms-dispatcher 20 | 21 | Volume=librenms-data:/data 22 | 23 | EnvironmentFile=librenms.env 24 | EnvironmentFile=tz.env 25 | EnvironmentFile=db.env 26 | 27 | Environment=DISPATCHER_NODE_ID=dispatcher1 28 | Environment=SIDECAR_DISPATCHER=1 29 | 30 | Secret=librenms-db-pw,type=env,target=DB_PASSWORD 31 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms-msmtpd.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS msmtpd 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/crazymax/msmtpd:latest 13 | ContainerName=librenms-msmtpd 14 | AutoUpdate=registry 15 | 16 | Network= 17 | HostName=librenms-msmtpd 18 | 19 | EnvironmentFile=msmtpd.env 20 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms-redist.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS Redis 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/redis/redis:7.2-alpine 13 | ContainerName=librenms-redis 14 | AutoUpdate=registry 15 | 16 | Network= 17 | HostName=librenms-redis 18 | 19 | EnvironmentFile=tz.env 20 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms-snmptrapd.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS snmptrapd 3 | After=librenms.container 4 | After=librenms-redis.container 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/librenms/librenms:latest 15 | ContainerName=librenms-snmptrapd 16 | AutoUpdate=registry 17 | 18 | Network= 19 | HostName=librenms-snmptrapd 20 | PublishPort=162:162/tcp 21 | PublishPort=162:162/udp 22 | 23 | AddCapability=NET_ADMIN 24 | AddCapability=NET_RAW 25 | 26 | Volume=librenms-data:/data 27 | 28 | EnvironmentFile=librenms.env 29 | EnvironmentFile=tz.env 30 | EnvironmentFile=db.env 31 | 32 | Environment=SIDECAR_SNMPTRAPD=1 33 | 34 | Secret=librenms-db-pw,type=env,target=DB_PASSWORD 35 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms-syslogng.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS syslogng 3 | After=librenms.container 4 | After=librenms-redis.container 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/librenms/librenms:latest 15 | ContainerName=librenms-syslogng 16 | AutoUpdate=registry 17 | 18 | Network= 19 | HostName=librenms-syslogng 20 | PublishPort=514:514/tcp 21 | PublishPort=514:514/udp 22 | 23 | AddCapability=NET_ADMIN 24 | AddCapability=NET_RAW 25 | 26 | Volume=librenms-data:/data 27 | 28 | EnvironmentFile=librenms.env 29 | EnvironmentFile=db.env 30 | EnvironmentFile=tz.env 31 | 32 | Environment=SIDECAR_SYSLOGNG=1 33 | 34 | Secret=librenms-db-pw,type=env,target=DB_PASSWORD 35 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LibreNMS 3 | After=librenms-db.container 4 | After=librenms-redis.container 5 | After=librenms-msmtpd.container 6 | Wants=librenms-dispatcher.container 7 | Wants=librenms-syslogng.container 8 | Wants=librenms-snmptrapd.container 9 | 10 | [Service] 11 | Restart=on-failure 12 | TimeoutStartSec=900 13 | 14 | [Install] 15 | WantedBy=default.target 16 | 17 | [Container] 18 | Image=docker.io/librenms/librenms:latest 19 | ContainerName=librenms 20 | AutoUpdate=registry 21 | 22 | AddCapability=NET_ADMIN 23 | AddCapability=NET_RAW 24 | 25 | Network= 26 | HostName=librenms 27 | PublishPort=8000:8000/tcp 28 | 29 | Volume=librenms-data:/data 30 | 31 | EnvironmentFile=librenms.env 32 | EnvironmentFile=tz.env 33 | EnvironmentFile=db.env 34 | 35 | Secret=librenms-db-pw,type=env,target=DB_PASSWORD 36 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms.env: -------------------------------------------------------------------------------- 1 | MEMORY_LIMIT=256M 2 | MAX_INPUT_VARS=1000 3 | UPLOAD_MAX_SIZE=16M 4 | OPCACHE_MEM_SIZE=128 5 | REAL_IP_FROM=0.0.0.0/32 6 | REAL_IP_HEADER=X-Forwarded-For 7 | LOG_IP_VAR=remote_addr 8 | 9 | CACHE_DRIVER=redis 10 | SESSION_DRIVER=redis 11 | REDIS_HOST=librenms-redis 12 | 13 | LIBRENMS_SNMP_COMMUNITY=LibreNMS 14 | 15 | LIBRENMS_WEATHERMAP=false 16 | LIBRENMS_WEATHERMAP_SCHEDULE=*/5 * * * * 17 | -------------------------------------------------------------------------------- /quadlets/librenms/librenms.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=librenms-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/librenms/mariadb.env: -------------------------------------------------------------------------------- 1 | MYSQL_DATABASE=librenms 2 | MYSQL_USER=librenms 3 | MARIADB_RANDOM_ROOT_PASSWORD=yes 4 | 5 | -------------------------------------------------------------------------------- /quadlets/librenms/msmtpd.env: -------------------------------------------------------------------------------- 1 | # https://github.com/crazy-max/docker-msmtpd 2 | SMTP_HOST=smtp.gmail.com 3 | SMTP_PORT=587 4 | SMTP_TLS=on 5 | SMTP_STARTTLS=on 6 | SMTP_TLS_CHECKCERT=on 7 | SMTP_AUTH=on 8 | SMTP_USER=foo 9 | SMTP_PASSWORD=bar 10 | SMTP_FROM=foo@gmail.com 11 | -------------------------------------------------------------------------------- /quadlets/librenms/tz.env: -------------------------------------------------------------------------------- 1 | TZ=Etc/UTC 2 | -------------------------------------------------------------------------------- /quadlets/librespeed/librespeed.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Speedtest server 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/librespeed/speedtest:latest 13 | ContainerName=librespeed 14 | AutoUpdate=registry 15 | 16 | Network=librespeed.network 17 | HostName=librespeed 18 | 19 | Volume=/volumes/librespeed/database:/database 20 | 21 | Environment=MODE=standalone 22 | Environment=TITLE=Librespeed 23 | Environment=TELEMETRY=false 24 | Environment=DB_TYPE= 25 | Environment=ENABLE_ID_OBFUSCATION=false 26 | Environment=REDACT_IP_ADDRESSES=false 27 | Environment=PASSWORD= 28 | Environment=EMAIL= 29 | Environment=DISABLE_IPINFO=false 30 | Environment=IPINFO_APIKEY= 31 | Environment=DISTANCE=km 32 | 33 | -------------------------------------------------------------------------------- /quadlets/librespeed/librespeed.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/linkwarden/linkwarden-database.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Linkwarden database 3 | Wants=linkwarden.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/postgres:16-alpine 14 | ContainerName=linkwarden-database 15 | AutoUpdate=registry 16 | 17 | Network=linkwarden.network 18 | HostName=linkwarden-database 19 | 20 | Volume=linkwarden-database:/var/lib/postgresql/data 21 | 22 | EnvironmentFile=linkwarden.env 23 | 24 | -------------------------------------------------------------------------------- /quadlets/linkwarden/linkwarden.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Bookmark preservation 3 | Requires=linkwarden-database.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=ghcr.io/linkwarden/linkwarden:latest 14 | ContainerName=linkwarden 15 | AutoUpdate=registry 16 | 17 | Network=linkwarden.network 18 | HostName=linkwarden 19 | PublishPort=3000 20 | 21 | Volume=linkwarden-data:/data/data 22 | 23 | EnvironmentFile=linkwarden.env 24 | 25 | -------------------------------------------------------------------------------- /quadlets/linkwarden/linkwarden.env: -------------------------------------------------------------------------------- 1 | NEXTAUTH_URL=http://localhost:3000/api/v1/auth 2 | NEXTAUTH_SECRET= 3 | 4 | # Manual installation database settings 5 | # Example: DATABASE_URL=postgresql://user:password@localhost:5432/linkwarden 6 | DATABASE_URL= 7 | 8 | # Docker installation database settings 9 | POSTGRES_PASSWORD= 10 | 11 | # Additional Optional Settings 12 | PAGINATION_TAKE_COUNT= 13 | STORAGE_FOLDER= 14 | AUTOSCROLL_TIMEOUT= 15 | NEXT_PUBLIC_DISABLE_REGISTRATION= 16 | NEXT_PUBLIC_CREDENTIALS_ENABLED= 17 | DISABLE_NEW_SSO_USERS= 18 | RE_ARCHIVE_LIMIT= 19 | MAX_LINKS_PER_USER= 20 | ARCHIVE_TAKE_COUNT= 21 | BROWSER_TIMEOUT= 22 | IGNORE_UNAUTHORIZED_CA= 23 | IGNORE_HTTPS_ERRORS= 24 | IGNORE_URL_SIZE_LIMIT= 25 | NEXT_PUBLIC_DEMO= 26 | NEXT_PUBLIC_DEMO_USERNAME= 27 | NEXT_PUBLIC_DEMO_PASSWORD= 28 | NEXT_PUBLIC_ADMIN= 29 | NEXT_PUBLIC_MAX_FILE_BUFFER= 30 | MONOLITH_MAX_BUFFER= 31 | MONOLITH_CUSTOM_OPTIONS= 32 | PDF_MAX_BUFFER= 33 | SCREENSHOT_MAX_BUFFER= 34 | READABILITY_MAX_BUFFER= 35 | PREVIEW_MAX_BUFFER= 36 | IMPORT_LIMIT= 37 | PLAYWRIGHT_LAUNCH_OPTIONS_EXECUTABLE_PATH= 38 | MAX_WORKERS= 39 | 40 | # AWS S3 Settings 41 | SPACES_KEY= 42 | SPACES_SECRET= 43 | SPACES_ENDPOINT= 44 | SPACES_BUCKET_NAME= 45 | SPACES_REGION= 46 | SPACES_FORCE_PATH_STYLE= 47 | 48 | # SMTP Settings 49 | NEXT_PUBLIC_EMAIL_PROVIDER= 50 | EMAIL_FROM= 51 | EMAIL_SERVER= 52 | BASE_URL= 53 | 54 | # Proxy settings 55 | PROXY= 56 | PROXY_USERNAME= 57 | PROXY_PASSWORD= 58 | PROXY_BYPASS= 59 | 60 | # PDF archive settings 61 | PDF_MARGIN_TOP= 62 | PDF_MARGIN_BOTTOM= 63 | 64 | ################# 65 | # SSO Providers # 66 | ################# 67 | 68 | # 42 School 69 | NEXT_PUBLIC_FORTYTWO_ENABLED= 70 | FORTYTWO_CUSTOM_NAME= 71 | FORTYTWO_CLIENT_ID= 72 | FORTYTWO_CLIENT_SECRET= 73 | 74 | # Apple 75 | NEXT_PUBLIC_APPLE_ENABLED= 76 | APPLE_CUSTOM_NAME= 77 | APPLE_ID= 78 | APPLE_SECRET= 79 | 80 | # Atlassian 81 | NEXT_PUBLIC_ATLASSIAN_ENABLED= 82 | ATLASSIAN_CUSTOM_NAME= 83 | ATLASSIAN_CLIENT_ID= 84 | ATLASSIAN_CLIENT_SECRET= 85 | ATLASSIAN_SCOPE= 86 | 87 | # Auth0 88 | NEXT_PUBLIC_AUTH0_ENABLED= 89 | AUTH0_CUSTOM_NAME= 90 | AUTH0_ISSUER= 91 | AUTH0_CLIENT_SECRET= 92 | AUTH0_CLIENT_ID= 93 | 94 | # Authelia 95 | NEXT_PUBLIC_AUTHELIA_ENABLED="" 96 | AUTHELIA_CLIENT_ID="" 97 | AUTHELIA_CLIENT_SECRET="" 98 | AUTHELIA_WELLKNOWN_URL="" 99 | 100 | # Authentik 101 | NEXT_PUBLIC_AUTHENTIK_ENABLED= 102 | AUTHENTIK_CUSTOM_NAME= 103 | AUTHENTIK_ISSUER= 104 | AUTHENTIK_CLIENT_ID= 105 | AUTHENTIK_CLIENT_SECRET= 106 | 107 | # Azure AD B2C 108 | NEXT_PUBLIC_AZURE_AD_B2C_ENABLED= 109 | AZURE_AD_B2C_TENANT_NAME= 110 | AZURE_AD_B2C_CLIENT_ID= 111 | AZURE_AD_B2C_CLIENT_SECRET= 112 | AZURE_AD_B2C_PRIMARY_USER_FLOW= 113 | 114 | # Azure AD 115 | NEXT_PUBLIC_AZURE_AD_ENABLED= 116 | AZURE_AD_CLIENT_ID= 117 | AZURE_AD_CLIENT_SECRET= 118 | AZURE_AD_TENANT_ID= 119 | 120 | # Battle.net 121 | NEXT_PUBLIC_BATTLENET_ENABLED= 122 | BATTLENET_CUSTOM_NAME= 123 | BATTLENET_CLIENT_ID= 124 | BATTLENET_CLIENT_SECRET= 125 | BATTLENET_ISSUER= 126 | 127 | # Box 128 | NEXT_PUBLIC_BOX_ENABLED= 129 | BOX_CUSTOM_NAME= 130 | BOX_CLIENT_ID= 131 | BOX_CLIENT_SECRET= 132 | 133 | # Bungie 134 | NEXT_PUBLIC_BUNGIE_ENABLED= 135 | BUNGIE_CUSTOM_NAME= 136 | BUNGIE_CLIENT_ID= 137 | BUNGIE_CLIENT_SECRET= 138 | BUNGIE_API_KEY= 139 | 140 | # Cognito 141 | NEXT_PUBLIC_COGNITO_ENABLED= 142 | COGNITO_CUSTOM_NAME= 143 | COGNITO_CLIENT_ID= 144 | COGNITO_CLIENT_SECRET= 145 | COGNITO_ISSUER= 146 | 147 | # Coinbase 148 | NEXT_PUBLIC_COINBASE_ENABLED= 149 | COINBASE_CUSTOM_NAME= 150 | COINBASE_CLIENT_ID= 151 | COINBASE_CLIENT_SECRET= 152 | 153 | # Discord 154 | NEXT_PUBLIC_DISCORD_ENABLED= 155 | DISCORD_CUSTOM_NAME= 156 | DISCORD_CLIENT_ID= 157 | DISCORD_CLIENT_SECRET= 158 | 159 | # Dropbox 160 | NEXT_PUBLIC_DROPBOX_ENABLED= 161 | DROPBOX_CUSTOM_NAME= 162 | DROPBOX_CLIENT_ID= 163 | DROPBOX_CLIENT_SECRET= 164 | 165 | # DuendeIndentityServer6 166 | NEXT_PUBLIC_DUENDE_IDS6_ENABLED= 167 | DUENDE_IDS6_CUSTOM_NAME= 168 | DUENDE_IDS6_CLIENT_ID= 169 | DUENDE_IDS6_CLIENT_SECRET= 170 | DUENDE_IDS6_ISSUER= 171 | 172 | # EVE Online 173 | NEXT_PUBLIC_EVEONLINE_ENABLED= 174 | EVEONLINE_CUSTOM_NAME= 175 | EVEONLINE_CLIENT_ID= 176 | EVEONLINE_CLIENT_SECRET= 177 | 178 | # Facebook 179 | NEXT_PUBLIC_FACEBOOK_ENABLED= 180 | FACEBOOK_CUSTOM_NAME= 181 | FACEBOOK_CLIENT_ID= 182 | FACEBOOK_CLIENT_SECRET= 183 | 184 | # FACEIT 185 | NEXT_PUBLIC_FACEIT_ENABLED= 186 | FACEIT_CUSTOM_NAME= 187 | FACEIT_CLIENT_ID= 188 | FACEIT_CLIENT_SECRET= 189 | 190 | # Foursquare 191 | NEXT_PUBLIC_FOURSQUARE_ENABLED= 192 | FOURSQUARE_CUSTOM_NAME= 193 | FOURSQUARE_CLIENT_ID= 194 | FOURSQUARE_CLIENT_SECRET= 195 | FOURSQUARE_APIVERSION= 196 | 197 | # Freshbooks 198 | NEXT_PUBLIC_FRESHBOOKS_ENABLED= 199 | FRESHBOOKS_CUSTOM_NAME= 200 | FRESHBOOKS_CLIENT_ID= 201 | FRESHBOOKS_CLIENT_SECRET= 202 | 203 | # FusionAuth 204 | NEXT_PUBLIC_FUSIONAUTH_ENABLED= 205 | FUSIONAUTH_CUSTOM_NAME= 206 | FUSIONAUTH_CLIENT_ID= 207 | FUSIONAUTH_CLIENT_SECRET= 208 | FUSIONAUTH_ISSUER= 209 | FUSIONAUTH_TENANT_ID= 210 | 211 | # GitHub 212 | NEXT_PUBLIC_GITHUB_ENABLED= 213 | GITHUB_CUSTOM_NAME= 214 | GITHUB_ID= 215 | GITHUB_SECRET= 216 | 217 | # GitLab 218 | NEXT_PUBLIC_GITLAB_ENABLED= 219 | GITLAB_CUSTOM_NAME= 220 | GITLAB_CLIENT_ID= 221 | GITLAB_CLIENT_SECRET= 222 | 223 | # Google 224 | NEXT_PUBLIC_GOOGLE_ENABLED= 225 | GOOGLE_CUSTOM_NAME= 226 | GOOGLE_CLIENT_ID= 227 | GOOGLE_CLIENT_SECRET= 228 | 229 | # HubSpot 230 | NEXT_PUBLIC_HUBSPOT_ENABLED= 231 | HUBSPOT_CUSTOM_NAME= 232 | HUBSPOT_CLIENT_ID= 233 | HUBSPOT_CLIENT_SECRET= 234 | 235 | # IdentityServer4 236 | NEXT_PUBLIC_IDS4_ENABLED= 237 | IDS4_CUSTOM_NAME= 238 | IDS4_CLIENT_ID= 239 | IDS4_CLIENT_SECRET= 240 | IDS4_ISSUER= 241 | 242 | # Kakao 243 | NEXT_PUBLIC_KAKAO_ENABLED= 244 | KAKAO_CUSTOM_NAME= 245 | KAKAO_CLIENT_ID= 246 | KAKAO_CLIENT_SECRET= 247 | 248 | # Keycloak 249 | NEXT_PUBLIC_KEYCLOAK_ENABLED= 250 | KEYCLOAK_CUSTOM_NAME= 251 | KEYCLOAK_ISSUER= 252 | KEYCLOAK_CLIENT_ID= 253 | KEYCLOAK_CLIENT_SECRET= 254 | 255 | # LINE 256 | NEXT_PUBLIC_LINE_ENABLED= 257 | LINE_CUSTOM_NAME= 258 | LINE_CLIENT_ID= 259 | LINE_CLIENT_SECRET= 260 | 261 | # LinkedIn 262 | NEXT_PUBLIC_LINKEDIN_ENABLED= 263 | LINKEDIN_CUSTOM_NAME= 264 | LINKEDIN_CLIENT_ID= 265 | LINKEDIN_CLIENT_SECRET= 266 | 267 | # Mailchimp 268 | NEXT_PUBLIC_MAILCHIMP_ENABLED= 269 | MAILCHIMP_CUSTOM_NAME= 270 | MAILCHIMP_CLIENT_ID= 271 | MAILCHIMP_CLIENT_SECRET= 272 | 273 | # Mail.ru 274 | NEXT_PUBLIC_MAILRU_ENABLED= 275 | MAILRU_CUSTOM_NAME= 276 | MAILRU_CLIENT_ID= 277 | MAILRU_CLIENT_SECRET= 278 | 279 | # Naver 280 | NEXT_PUBLIC_NAVER_ENABLED= 281 | NAVER_CUSTOM_NAME= 282 | NAVER_CLIENT_ID= 283 | NAVER_CLIENT_SECRET= 284 | 285 | # Netlify 286 | NEXT_PUBLIC_NETLIFY_ENABLED= 287 | NETLIFY_CUSTOM_NAME= 288 | NETLIFY_CLIENT_ID= 289 | NETLIFY_CLIENT_SECRET= 290 | 291 | # Okta 292 | NEXT_PUBLIC_OKTA_ENABLED= 293 | OKTA_CUSTOM_NAME= 294 | OKTA_CLIENT_ID= 295 | OKTA_CLIENT_SECRET= 296 | OKTA_ISSUER= 297 | 298 | # OneLogin 299 | NEXT_PUBLIC_ONELOGIN_ENABLED= 300 | ONELOGIN_CUSTOM_NAME= 301 | ONELOGIN_CLIENT_ID= 302 | ONELOGIN_CLIENT_SECRET= 303 | ONELOGIN_ISSUER= 304 | 305 | # Osso 306 | NEXT_PUBLIC_OSSO_ENABLED= 307 | OSSO_CUSTOM_NAME= 308 | OSSO_CLIENT_ID= 309 | OSSO_CLIENT_SECRET= 310 | OSSO_ISSUER= 311 | 312 | # osu! 313 | NEXT_PUBLIC_OSU_ENABLED= 314 | OSU_CUSTOM_NAME= 315 | OSU_CLIENT_ID= 316 | OSU_CLIENT_SECRET= 317 | 318 | # Patreon 319 | NEXT_PUBLIC_PATREON_ENABLED= 320 | PATREON_CUSTOM_NAME= 321 | PATREON_CLIENT_ID= 322 | PATREON_CLIENT_SECRET= 323 | 324 | # Pinterest 325 | NEXT_PUBLIC_PINTEREST_ENABLED= 326 | PINTEREST_CUSTOM_NAME= 327 | PINTEREST_CLIENT_ID= 328 | PINTEREST_CLIENT_SECRET= 329 | 330 | # Pipedrive 331 | NEXT_PUBLIC_PIPEDRIVE_ENABLED= 332 | PIPEDRIVE_CUSTOM_NAME= 333 | PIPEDRIVE_CLIENT_ID= 334 | PIPEDRIVE_CLIENT_SECRET= 335 | 336 | # Reddit 337 | NEXT_PUBLIC_REDDIT_ENABLED= 338 | REDDIT_CUSTOM_NAME= 339 | REDDIT_CLIENT_ID= 340 | REDDIT_CLIENT_SECRET= 341 | 342 | # Salesforce 343 | NEXT_PUBLIC_SALESFORCE_ENABLED= 344 | SALESFORCE_CUSTOM_NAME= 345 | SALESFORCE_CLIENT_ID= 346 | SALESFORCE_CLIENT_SECRET= 347 | 348 | # Slack 349 | NEXT_PUBLIC_SLACK_ENABLED= 350 | SLACK_CUSTOM_NAME= 351 | SLACK_CLIENT_ID= 352 | SLACK_CLIENT_SECRET= 353 | 354 | # Spotify 355 | NEXT_PUBLIC_SPOTIFY_ENABLED= 356 | SPOTIFY_CUSTOM_NAME= 357 | SPOTIFY_CLIENT_ID= 358 | SPOTIFY_CLIENT_SECRET= 359 | 360 | # Strava 361 | NEXT_PUBLIC_STRAVA_ENABLED= 362 | STRAVA_CUSTOM_NAME= 363 | STRAVA_CLIENT_ID= 364 | STRAVA_CLIENT_SECRET= 365 | 366 | # Todoist 367 | NEXT_PUBLIC_TODOIST_ENABLED= 368 | TODOIST_CUSTOM_NAME= 369 | TODOIST_CLIENT_ID= 370 | TODOIST_CLIENT_SECRET= 371 | 372 | # Twitch 373 | NEXT_PUBLIC_TWITCH_ENABLED= 374 | TWITCH_CUSTOM_NAME= 375 | TWITCH_CLIENT_ID= 376 | TWITCH_CLIENT_SECRET= 377 | 378 | # United Effects 379 | NEXT_PUBLIC_UNITED_EFFECTS_ENABLED= 380 | UNITED_EFFECTS_CUSTOM_NAME= 381 | UNITED_EFFECTS_CLIENT_ID= 382 | UNITED_EFFECTS_CLIENT_SECRET= 383 | UNITED_EFFECTS_ISSUER= 384 | 385 | 386 | # VK 387 | NEXT_PUBLIC_VK_ENABLED= 388 | VK_CUSTOM_NAME= 389 | VK_CLIENT_ID= 390 | VK_CLIENT_SECRET= 391 | 392 | # Wikimedia 393 | NEXT_PUBLIC_WIKIMEDIA_ENABLED= 394 | WIKIMEDIA_CUSTOM_NAME= 395 | WIKIMEDIA_CLIENT_ID= 396 | WIKIMEDIA_CLIENT_SECRET= 397 | 398 | 399 | # Wordpress.com 400 | NEXT_PUBLIC_WORDPRESS_ENABLED= 401 | WORDPRESS_CUSTOM_NAME= 402 | WORDPRESS_CLIENT_ID= 403 | WORDPRESS_CLIENT_SECRET= 404 | 405 | # Yandex 406 | NEXT_PUBLIC_YANDEX_ENABLED= 407 | YANDEX_CUSTOM_NAME= 408 | YANDEX_CLIENT_ID= 409 | YANDEX_CLIENT_SECRET= 410 | 411 | # Zitadel 412 | NEXT_PUBLIC_ZITADEL_ENABLED= 413 | ZITADEL_CUSTOM_NAME= 414 | ZITADEL_CLIENT_ID= 415 | ZITADEL_CLIENT_SECRET= 416 | ZITADEL_ISSUER= 417 | 418 | # Zoho 419 | NEXT_PUBLIC_ZOHO_ENABLED= 420 | ZOHO_CUSTOM_NAME= 421 | ZOHO_CLIENT_ID= 422 | ZOHO_CLIENT_SECRET= 423 | 424 | # Zoom 425 | NEXT_PUBLIC_ZOOM_ENABLED= 426 | ZOOM_CUSTOM_NAME= 427 | ZOOM_CLIENT_ID= 428 | ZOOM_CLIENT_SECRET= 429 | 430 | -------------------------------------------------------------------------------- /quadlets/linkwarden/linkwarden.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=linkwarden-data 3 | VolumeName=linkwarden-database 4 | -------------------------------------------------------------------------------- /quadlets/matrix/matrix-db.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Matrix Synapse DB Quadlet 3 | WantedBy=matrix.service 4 | 5 | [Service] 6 | Restart=always 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/library/postgres:15 14 | ContainerName=matrix-db 15 | AutoUpdate=registry 16 | 17 | Network=matrix.network 18 | HostName=matrix-db 19 | PublishPort=5432:5432 20 | 21 | Volume=matrix-db:/var/lib/postgresql/data 22 | 23 | Environment=POSTGRES_PASSWORD=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 24 | Environment=POSTGRES_USER=synapse_user 25 | Environment=POSTGRES_DB=synapse 26 | Environment=LANG=C 27 | Environment=LC_COLLATE=C 28 | Environment=LC_CTYPE=C 29 | Environment=POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C 30 | -------------------------------------------------------------------------------- /quadlets/matrix/matrix.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Matrix Synapse Quadlet 3 | Requires=matrix-db.service 4 | After=matrix-db.service 5 | 6 | [Service] 7 | Restart=always 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/matrixdotorg/synapse:latest 15 | ContainerName=matrix 16 | AutoUpdate=registry 17 | 18 | Network=matrix.network 19 | HostName=matrix 20 | PublishPort=8008:8008 21 | 22 | Volume=matrix-db:/data 23 | 24 | Environment=VIRTUAL_HOST=matrix.example.com 25 | Environment=VIRTUAL_PORT=8008 26 | Environment=LETSENCRYPT_HOST=matrix.example.com 27 | Environment=SYNAPSE_SERVER_NAME=matrix.example.com 28 | Environment=SYNAPSE_REPORT_STATS=no 29 | -------------------------------------------------------------------------------- /quadlets/matrix/matrix.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/matrix/matrix.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=matrix-db 3 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun-backend.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Maxun backend 3 | Requires=maxun-postgres.service 4 | Requires=maxun-redis.service 5 | Requires=maxun-minio.service 6 | After=maxun-postgres.service 7 | After=maxun-redis.service 8 | After=maxun-minio.service 9 | 10 | [Service] 11 | Restart=on-failure 12 | TimeoutStartSec=900 13 | 14 | [Install] 15 | WantedBy=default.target 16 | 17 | [Container] 18 | Image=docker.io/getmaxun/maxun-backend:latest 19 | ContainerName=maxun-backend 20 | AutoUpdate=registry 21 | 22 | Network=maxun.network 23 | HostName=maxun-backend 24 | PublishPort=8080:8080 25 | 26 | Volume=/var/run/dbus:/var/run/dbus 27 | 28 | EnvironmentFile=maxun.env 29 | Environment=PLAYWRIGHT_BROWSERS_PATH=/ms-playwright 30 | Environment=PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=0 31 | Environment=CHROMIUM_FLAGS="--disable-gpu --no-sandbox --headless=new" 32 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun-minio.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Maxun Minio 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/minio 13 | ContainerName=maxun-minio 14 | AutoUpdate=registry 15 | 16 | Network=maxun.network 17 | HostName=maxun-minio 18 | PublishPort=9000:9000 19 | PublishPort=9001:9001 20 | 21 | Volume=maxun-minio:/data 22 | 23 | Environment=MINIO_ROOT_USER=minio-root-user 24 | 25 | Secret=maxun-minio-root-password,type=env,target=MINIO_ROOT_PASSWORD 26 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun-postgres.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Maxun postgres 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/postgres:13 13 | ContainerName=maxun-postgres 14 | AutoUpdate=registry 15 | 16 | Network=maxun.network 17 | HostName=maxun-postgres 18 | PublishPort=5432:5432 19 | 20 | Volume=maxun-postgres:/var/lib/postgresql/data 21 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun-redis.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Maxun Redis 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/redis:6 13 | ContainerName=maxun-redis 14 | AutoUpdate=registry 15 | 16 | Network=maxun.network 17 | HostName=maxun-redis 18 | PublishPort=6379:6379 19 | 20 | Volume=maxun-redis:/data 21 | 22 | EnvironmentFile=maxun.env 23 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Maxun frontend 3 | Requires=maxun-backend.service 4 | After=maxun-backend.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/getmaxun/maxun-frontend:latest 15 | ContainerName=maxun 16 | AutoUpdate=registry 17 | 18 | Network=maxun.network 19 | HostName=maxun 20 | PublishPort=5173:5173 21 | 22 | EnvironmentFile=maxun.env 23 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun.env: -------------------------------------------------------------------------------- 1 | # App Setup 2 | NODE_ENV=production # Set to 'development' or 'production' as required 3 | JWT_SECRET= # Replace with a secure JWT secret key 4 | DB_NAME=maxun # Your PostgreSQL database name 5 | DB_USER=postgres # PostgreSQL username 6 | DB_PASSWORD=postgres # PostgreSQL password 7 | DB_HOST=postgres # Host for PostgreSQL in Docker 8 | DB_PORT=5432 # Port for PostgreSQL (default: 5432) 9 | ENCRYPTION_KEY=f4d5e6a7b8c9d0e1f23456789abcdef01234567890abcdef123456789abcdef0 # Key for encrypting sensitive data (passwords and proxies) 10 | MINIO_ENDPOINT=minio # MinIO endpoint in Docker 11 | MINIO_PORT=9000 # Port for MinIO (default: 9000) 12 | MINIO_CONSOLE_PORT=9001 # Web UI Port for MinIO (default: 9001) 13 | MINIO_ACCESS_KEY=minio_access_key # MinIO access key 14 | MINIO_SECRET_KEY=minio_secret_key # MinIO secret key 15 | REDIS_HOST=redis # Redis host in Docker 16 | REDIS_PORT=6379 # Redis port (default: 6379) 17 | REDIS_PASSWORD=redis_password # Redis password (This is optional. Needed to authenticate with a password-protected Redis instance; if not set, Redis will connect without authentication.) 18 | 19 | # Backend and Frontend URLs and Ports 20 | BACKEND_PORT=8080 # Port to run backend on. Needed for Docker setup 21 | FRONTEND_PORT=5173 # Port to run frontend on. Needed for Docker setup 22 | BACKEND_URL=http://localhost:8080 # URL on which the backend runs. You can change it based on your needs. 23 | PUBLIC_URL=http://localhost:5173 # URL on which the frontend runs. You can change it based on your needs. 24 | VITE_BACKEND_URL=http://localhost:8080 # URL used by frontend to connect to backend. It should always have the same value as BACKEND_URL 25 | VITE_PUBLIC_URL=http://localhost:5173 # URL used by backend to connect to frontend. It should always have the same value as PUBLIC_URL 26 | 27 | # Optional Google OAuth settings for Google Sheet Integration 28 | GOOGLE_CLIENT_ID=your_google_client_id 29 | GOOGLE_CLIENT_SECRET=your_google_client_secret 30 | GOOGLE_REDIRECT_URI=your_google_redirect_uri 31 | 32 | # Optional Airtable OAuth settings for Airtable Integration 33 | 34 | AIRTABLE_CLIENT_ID=your_airtable_client_id 35 | AIRTABLE_REDIRECT_URI=http://localhost:8080/auth/airtable/callback 36 | 37 | # Telemetry Settings - Please keep it enabled. Keeping it enabled helps us understand how the product is used and assess the impact of any new changes. 38 | MAXUN_TELEMETRY=true 39 | 40 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Maxun network 3 | 4 | [Network] 5 | NetworkName=maxun 6 | 7 | -------------------------------------------------------------------------------- /quadlets/maxun/maxun.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=maxun-postgres 3 | VolumeName=maxun-redis 4 | VolumeName=maxun-minio 5 | 6 | -------------------------------------------------------------------------------- /quadlets/mealie/mealie.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Mealie Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/hkotel/mealie:latest 13 | ContainerName=mealie 14 | 15 | Network=mealie.network 16 | HostName=mealie 17 | 18 | Volume=mealie-data:/app/data 19 | 20 | Environment=BASE_URL=https://mealie.example.com 21 | Environment=RECIPE_PUBLIC='true' 22 | Environment=RECIPE_SHOW_NUTRITION='true' 23 | Environment=RECIPE_SHOW_ASSETS='true' 24 | Environment=RECIPE_LANDSCAPE_VIEW='true' 25 | Environment=RECIPE_DISABLE_COMMENTS='false' 26 | Environment=RECIPE_DISABLE_AMOUNT='false' 27 | 28 | -------------------------------------------------------------------------------- /quadlets/mealie/mealie.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/mealie/mealie.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=mealie-data 3 | -------------------------------------------------------------------------------- /quadlets/memos/memos.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Memos note taking 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/neosmemo/memos:stable 13 | ContainerName=memos 14 | 15 | Network=memos.network 16 | HostName=memos 17 | PublishPort=5230 18 | 19 | Volume=memos-data:/var/opt/memos 20 | 21 | -------------------------------------------------------------------------------- /quadlets/memos/memos.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/memos/memos.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=memos-data 3 | -------------------------------------------------------------------------------- /quadlets/miniflux/miniflux-postgres.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=miniflux postgres 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/postgres:17-alpine 13 | ContainerName=miniflux 14 | AutoUpdate=registry 15 | 16 | Network=miniflux.network 17 | HostName=miniflux-postgres 18 | 19 | Volume=miniflux-postgres:/var/lib/postgresql/data 20 | 21 | Environment=POSTGRES_USER=miniflux 22 | Environment=POSTGRES_DB=miniflux 23 | 24 | Secret=miniflux-postgres-password,type=env,target=POSTGRES_PASSWORD 25 | -------------------------------------------------------------------------------- /quadlets/miniflux/miniflux.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=miniflux 3 | Requires=miniflux-postgres.service 4 | After=miniflux-postgres.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/miniflux:latest 15 | ContainerName=miniflux 16 | AutoUpdate=registry 17 | 18 | Network=miniflux.network 19 | HostName=miniflux 20 | PublishPort=80:8080 21 | 22 | Environment=DATABASE_URL=postgres://miniflux:secret@miniflux-postgres/miniflux?sslmode=disable 23 | Environment=RUN_MIGRATIONS=1 24 | Environment=CREATE_ADMIN=1 25 | Environment=ADMIN_USERNAME=admin 26 | 27 | Secret=miniflux-admin-password,type=env,target=ADMIN_PASSWORD 28 | -------------------------------------------------------------------------------- /quadlets/miniflux/miniflux.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=miniflux network 3 | 4 | [Network] 5 | NetworkName=miniflux 6 | 7 | -------------------------------------------------------------------------------- /quadlets/miniflux/miniflux.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=miniflux-postgres 3 | 4 | -------------------------------------------------------------------------------- /quadlets/minio/minio.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=A minio server container for local S3 compatible storage 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | ContainerName=minio 13 | Image=docker.io/minio/minio 14 | AutoUpdate=registry 15 | 16 | Network=minio.network 17 | HostName=minio 18 | PublishPort=9000:9000 19 | PublishPort=9001:9001 20 | 21 | Volume=minio-data:/data 22 | 23 | Environment=MINIO_ROOT_USER=admin 24 | Environment=MINIO_VOLUMES=/data 25 | 26 | Secret=minio-root-pw,type=env,target=MINIO_ROOT_PASSWORD 27 | 28 | Exec=server --console-address ":9001" 29 | 30 | -------------------------------------------------------------------------------- /quadlets/minio/minio.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/minio/minio.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=minio-data 3 | -------------------------------------------------------------------------------- /quadlets/mirotalk/mirotalk.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Mirotalk whiteboard 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mirotalk/p2p 13 | ContainerName=mirotalk 14 | AutoUpdate=registry 15 | 16 | Network=mirotalk.network 17 | HostName=mirotalk 18 | PublishPort=3000:3000 19 | 20 | Volume=./mirotalk.env:/src/.env 21 | -------------------------------------------------------------------------------- /quadlets/mirotalk/mirotalk.env: -------------------------------------------------------------------------------- 1 | # Domain 2 | 3 | HOST=localhost 4 | 5 | # Signaling Server listen port 6 | 7 | PORT=3000 8 | 9 | # Enable self-signed certs (app/ssl) 10 | 11 | HTTPS=false # true or false 12 | 13 | # Time Zone corresponding to timezone identifiers from the IANA Time Zone Database es Europe/Rome default UTC 14 | 15 | TZ=UTC 16 | 17 | # Logs 18 | 19 | LOGS_DEBUG=true # true or false 20 | LOGS_COLORS=true # true or false 21 | 22 | # Cors 23 | # Origin: Allow specified origin es '["https://example.com", "https://subdomain.example.com", "http://localhost:3000"]' or 24 | # all origins '*' if not specified as per default. 25 | # Methods: Allow only GET and POST methods 26 | 27 | CORS_ORIGIN='*' 28 | CORS_METHODS='["GET", "POST"]' 29 | 30 | # IP whitelist 31 | # Access to the instance is restricted to only the specified IP addresses in the allowed list. This feature is disabled by default. 32 | 33 | IP_WHITELIST_ENABLED=false # true or false 34 | IP_WHITELIST_ALLOWED='["127.0.0.1", "::1"]' 35 | 36 | # OIDC - OpenID Connect 37 | # 1. Sign up for an account at https://auth0.com. 38 | # 2. Navigate to https://manage.auth0.com/ to create a new application tailored to your specific requirements. 39 | # For those seeking an open-source solution, check out: https://github.com/panva/node-oidc-provider 40 | 41 | OIDC_ENABLED=false # true or false 42 | OIDC_ISSUER_BASE_URL='https://server.example.com' 43 | OIDC_BASE_URL='http://localhost:3000' # https://p2p.mirotalk.com 44 | OIDC_CLIENT_ID='ClientID' 45 | OIDC_CLIENT_SECRET='ClientSecret' 46 | OIDC_AUTH_REUIRED=false # set to true if authentication is required for all routes 47 | SESSION_SECRET='mirotalk-p2p-oidc-secret' 48 | 49 | # Host protection 50 | # HOST_PROTECTED: 51 | # - When set to true, it requires a valid username and password from the HOST_USERS list to initialize or join a room. 52 | # - When OIDC_ENABLED is utilized alongside host protection, the authenticated user will be recognized as valid.# HOST_USER_AUTH: When set to true, it also requires a valid username and password for joining the room. 53 | # HOST_USERS: This is the list of valid users along with their credentials. 54 | 55 | HOST_PROTECTED=false # true or false 56 | HOST_USER_AUTH=false # true or false 57 | HOST_USERS='[{"username": "username", "password": "password"},{"username": "username2", "password": "password2"}]' 58 | 59 | # JWT token config 60 | 61 | JWT_KEY=mirotalkp2p_jwt_secret 62 | JWT_EXP=1h 63 | 64 | # Presenters list 65 | # In our virtual room, the first participant to join will assume the role of the presenter. 66 | # Additionally, we have the option to include more presenters and co-presenters, each identified by their username. 67 | 68 | PRESENTERS='["Miroslav Pejic", "miroslav.pejic.85@gmail.com"]' 69 | 70 | # Ngrok 71 | # 1. Goto https://ngrok.com 72 | # 2. Get started for free 73 | # 3. Copy YourNgrokAuthToken: https://dashboard.ngrok.com/get-started/your-authtoken 74 | 75 | NGROK_ENABLED=false # true or false 76 | NGROK_AUTH_TOKEN=YourNgrokAuthToken 77 | 78 | # Stun 79 | # About: https://bloggeek.me/webrtcglossary/stun/ 80 | # Check: https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/ 81 | 82 | STUN_SERVER_ENABLED=true # true or false 83 | STUN_SERVER_URL=stun:stun.l.google.com:19302 84 | 85 | # Turn 86 | # About: https://bloggeek.me/webrtcglossary/turn/ 87 | # Recommended: https://github.com/coturn/coturn 88 | # Installation: https://github.com/miroslavpejic85/mirotalk/blob/master/docs/coturn.md 89 | # Free one: https://www.metered.ca/tools/openrelay/ (Please, create your own account) 90 | # Check: https://webrtc.github.io/samples/src/content/peerconnection/trickle-ice/ 91 | 92 | TURN_SERVER_ENABLED=true # true or false 93 | TURN_SERVER_URL=turn:a.relay.metered.ca:443 94 | TURN_SERVER_USERNAME=e8dd65b92c62d3e36cafb807 95 | TURN_SERVER_CREDENTIAL=uWdWNmkhvyqTEswO 96 | 97 | # IP lookup 98 | # Using GeoJS to get more info about peer by IP 99 | # Doc: https://www.geojs.io/docs/v1/endpoints/geo/ 100 | 101 | IP_LOOKUP_ENABLED=false # true or false 102 | 103 | # API 104 | # The response will give you a entrypoint / Room URL for your meeting. 105 | # curl -X POST "http://localhost:3000/api/v1/meeting" -H "authorization: mirotalkp2p_default_secret" -H "Content-Type: application/json" 106 | 107 | API_KEY_SECRET=mirotalkp2p_default_secret 108 | API_DISABLED='["token", "meetings"]' 109 | 110 | # Survey URL 111 | # Using to redirect the client after close the call (feedbacks, website...) 112 | 113 | SURVEY_ENABLED=false # true or false 114 | SURVEY_URL=https://www.questionpro.com/t/AUs7VZq00L 115 | 116 | # Redirect URL on leave room 117 | # Upon leaving the room, users who either opt out of providing feedback or if the survey is disabled 118 | # will be redirected to a specified URL. If enabled false the default '/newrcall' URL will be used. 119 | 120 | REDIRECT_ENABLED=false # true or false 121 | REDIRECT_URL='https://p2p.mirotalk.com' 122 | 123 | # Sentry (optional) 124 | # 1. Goto https://sentry.io/ 125 | # 2. Create account 126 | # 3. Goto Settings/Projects/YourProjectName/Client Keys (DSN) 127 | 128 | SENTRY_ENABLED=false # true or false 129 | SENTRY_DSN=YourClientKeyDSN 130 | SENTRY_TRACES_SAMPLE_RATE=1.0 131 | 132 | # Slack Integration (optional) 133 | # 1. Goto https://api.slack.com/apps/ 134 | # 2. Create your app 135 | # 3. On Settings - Basic Information - App Credentials chose your Signing Secret 136 | # 4. Create a Slash Commands and put as Request URL: https://your.domain.name/slack 137 | 138 | SLACK_ENABLED=false # true or false 139 | SLACK_SIGNING_SECRET=YourSlackSigningSecret 140 | 141 | # Mattermost Integration (optional) 142 | # 1. Navigate to Main Menu > Integrations > Slash Commands in Mattermost. 143 | # 2. Click on Add Slash Command and configure the following settings: 144 | # - Title: Enter a descriptive title (e.g., `P2P Command`). 145 | # - Command Trigger Word: Set the trigger word to `p2p`. 146 | # - Callback URLs: Enter the URL for your Express server (e.g., `https://yourserver.com/mattermost`). 147 | # - Request Method: Select POST. 148 | # - Enable Autocomplete: Check the box for Autocomplete. 149 | # - Autocomplete Description: Provide a brief description (e.g., `Get MiroTalk P2P meeting room`). 150 | # 3. Save the slash command and copy the generated token here as MATTERMOST_TOKEN. 151 | 152 | MATTERMOST_ENABLED=false # true or false 153 | MATTERMOST_SERVER_URL=YourMattermostServerUrl 154 | MATTERMOST_USERNAME=YourMattermostUsername 155 | MATTERMOST_PASSWORD=YourMattermostPassword 156 | MATTERMOST_TOKEN=YourMettarmostToken 157 | 158 | # ChatGPT/OpenAI 159 | # 1. Goto https://platform.openai.com/ 160 | # 2. Create your account 161 | # 3. Generate your APIKey https://platform.openai.com/account/api-keys 162 | 163 | CHATGPT_ENABLED=false # true or false 164 | CHATGPT_BASE_PATH=https://api.openai.com/v1/ 165 | CHATGPT_APIKEY=YourOpenAiApiKey 166 | CHATGPT_MODEL=gpt-3.5-turbo 167 | CHATGPT_MAX_TOKENS=1000 168 | CHATGPT_TEMPERATURE=0 169 | 170 | # Configure email settings for notifications or alerts 171 | # Refer to the documentation for Gmail configuration: https://support.google.com/mail/answer/185833?hl=en 172 | 173 | EMAIL_ALERT=false # true or false 174 | EMAIL_HOST=smtp.gmail.com 175 | EMAIL_PORT=587 176 | EMAIL_USERNAME=your_username 177 | EMAIL_PASSWORD=your_password 178 | EMAIL_SEND_TO=p2p.mirotalk@gmail.com 179 | 180 | # Stats 181 | # Umami: https://github.com/umami-software/umami 182 | # We use our Self-hosted Umami to track aggregated usage statistics in order to improve our service. 183 | 184 | STATS_ENABLED=false # true or false 185 | STATS_SCR=https://stats.mirotalk.com/script.js 186 | STATS_ID=c7615aa7-ceec-464a-baba-54cb605d7261 187 | -------------------------------------------------------------------------------- /quadlets/mirotalk/mirotalk.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/n8n/n8n.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=n8n 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.n8n.io/n8nio/n8n 13 | ContainerName=n8n 14 | AutoUpdate=registry 15 | 16 | Network=n8n.network 17 | HostName=n8n 18 | PublishPort=5678:5678 19 | 20 | Volume=n8n-data:/home/node/.n8n 21 | -------------------------------------------------------------------------------- /quadlets/n8n/n8n.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=n8n network 3 | 4 | [Network] 5 | NetworkName=n8n 6 | 7 | -------------------------------------------------------------------------------- /quadlets/n8n/n8n.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=n8n-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/nebula/nebula.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nebula overlay network 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/nebulaoss/nebula 13 | ContainerName=nebula 14 | AddCapability=NET_ADMIN 15 | 16 | HostName=nebula 17 | Network=host 18 | 19 | Volume=nebula-config:/config 20 | -------------------------------------------------------------------------------- /quadlets/netboot-xyz/netboot-xyz.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Netboot.xyz Quadlet 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=lscr.io/linuxserver/netbootxyz:latest 13 | ContainerName=netboot-xyz 14 | AutoUpdate=registry 15 | 16 | Network=netboot-xyz.network 17 | HostName=netboot-xyz 18 | PublishPort=3000:3000 19 | PublishPort=8082:80 20 | PublishPort=69:69/udp 21 | 22 | Volume=netboot-xyz-config:/config 23 | Volume=netboot-xyz-assets:/assets:z 24 | 25 | Environment=TZ=Etc/UTC 26 | Environment=PORT_RANGE=30000:30010 27 | Environment=SUBFOLDER=/ 28 | Environment=WEB_APP_PORT=3000 29 | 30 | 31 | -------------------------------------------------------------------------------- /quadlets/netboot-xyz/netboot-xyz.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/netboot-xyz/netboot-xyz.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=netboot-xyz-config 3 | VolumeName=netboot-xyz-assets -------------------------------------------------------------------------------- /quadlets/netdata/netdata.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Netdata monitoring 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/netdata/netdata:stable 13 | ContainerName=netdata 14 | AutoUpdate=registry 15 | 16 | AddCapability=SYS_PTRACE 17 | AddCapability=SYS_ADMIN 18 | 19 | Network=host 20 | HostName=netdata 21 | 22 | Volume=netdata-config:/etc/netdata 23 | Volume=netdata-lib:/var/lib/netdata 24 | Volume=netdata-cache:/var/cache/netdata 25 | Volume=/:/host/root:ro,rslave 26 | Volume=/etc/passwd:/host/etc/passwd:ro 27 | Volume=/etc/group:/host/etc/group:ro 28 | Volume=/etc/localtime:/etc/localtime:ro 29 | Volume=/proc:/host/proc:ro 30 | Volume=/sys:/host/sys:ro 31 | Volume=/etc/os-release:/host/etc/os-release:ro 32 | Volume=/var/log:/host/var/log:ro 33 | Volume=%t/podman/podman.sock:/run/user/1000/podman/podman.sock:ro 34 | 35 | -------------------------------------------------------------------------------- /quadlets/netdata/netdata.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=netdata-cache 3 | VolumeName=netdata-config 4 | VolumeName=netdata-lib 5 | 6 | -------------------------------------------------------------------------------- /quadlets/nginx/nginx.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Nginx container 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/nginx 13 | ContainerName=nginx 14 | AutoUpdate=registry 15 | 16 | Network=nginx.network 17 | HostName=nginx 18 | PublishPort=80:80 19 | PublishPort=443:443 20 | 21 | Volume=nginx-html:/usr/share/nginx/html 22 | 23 | Environment=TZ=Etc/UTC 24 | -------------------------------------------------------------------------------- /quadlets/nginx/nginx.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/nginx/nginx.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=nginx-html 3 | -------------------------------------------------------------------------------- /quadlets/openobserve/openobserve.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=OpenObserve 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=public.ecr.aws/zinclabs/openobserve:latest 13 | ContainerName=openobserve 14 | AutoUpdate=registry 15 | 16 | Network=openobserve.network 17 | HostName=openobserve 18 | PublishPort=5080:5080 19 | 20 | Volume=openobserve-data:/data 21 | 22 | Environment=ZO_ROOT_USER_EMAIL=root@example.com 23 | 24 | Secret=openobserve-root-user-password,type=env,target=ZO_ROOT_USER_PASSWORD 25 | -------------------------------------------------------------------------------- /quadlets/openobserve/openobserve.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=OpenObserve network 3 | 4 | [Network] 5 | NetworkName=openobserve 6 | 7 | -------------------------------------------------------------------------------- /quadlets/openobserve/openobserve.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=openobserve-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/openspeedtest/openspeedtest.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=OpenSpeedTest server 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/openspeedtest/latest 13 | ContainerName=openspeedtest 14 | AutoUpdate=registry 15 | 16 | Network=openspeedtest.network 17 | HostName=openspeedtest 18 | PublishPort=80:3000 19 | PublishPort=443:3001 20 | 21 | Environment=ENABLE_LETSENCRYPT=True 22 | Environment=DOMAIN_NAME=openspeedtest.example.com 23 | Environment=USER_EMAIL=you@example.com 24 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-backend.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot backend 3 | WantedBy=penpot.service 4 | Requires=penpot-postgres.service 5 | Requires=penpot-redis.service 6 | After=penpot-postgres.service 7 | After=penpot-redis.service 8 | 9 | [Service] 10 | Restart=on-failure 11 | TimeoutStartSec=900 12 | 13 | [Install] 14 | WantedBy=default.target 15 | 16 | [Container] 17 | Image=docker.io/penpotapp/backend 18 | ContainerName=penpot-backend 19 | AutoUpdate=registry 20 | 21 | Network=penpot.network 22 | HostName=penpot-backend 23 | 24 | Volume=penpot-assets:/opt/data/assets 25 | 26 | EnvironmentFile=penpot-backend.env 27 | EnvironmentFile=penpot.env 28 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-backend.env: -------------------------------------------------------------------------------- 1 | ## Penpot SECRET KEY. It serves as a master key from which other keys for subsystems 2 | ## (eg http sessions, or invitations) are derived. 3 | ## 4 | ## If you leave it commented, all created sessions and invitations will 5 | ## become invalid on container restart. 6 | ## 7 | ## If you going to uncomment this, we recommend to use a trully randomly generated 8 | ## 512 bits base64 encoded string here. You can generate one with: 9 | ## 10 | ## python3 -c "import secrets; print(secrets.token_urlsafe(64))" 11 | 12 | # PENPOT_SECRET_KEY=my-insecure-key 13 | 14 | ## The PREPL host. Mainly used for external programatic access to penpot backend 15 | ## (example=admin). By default it will listen on `localhost` but if you are going to use 16 | ## the `admin`, you will need to uncomment this and set the host to `0.0.0.0`. 17 | 18 | PENPOT_PREPL_HOST=0.0.0.0 19 | 20 | ## Database connection parameters. Don't touch them unless you are using custom 21 | ## postgresql connection parameters. 22 | 23 | PENPOT_DATABASE_URI=postgresql://penpot-postgres/penpot 24 | PENPOT_DATABASE_USERNAME=penpot 25 | PENPOT_DATABASE_PASSWORD=penpot 26 | 27 | ## Redis is used for the websockets notifications. Don't touch unless the redis 28 | ## container has different parameters or different name. 29 | 30 | PENPOT_REDIS_URI=redis://penpot-redis/0 31 | 32 | ## Default configuration for assets storage=using filesystem based with all files 33 | ## stored in a docker volume. 34 | 35 | PENPOT_ASSETS_STORAGE_BACKEND=assets-fs 36 | PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets 37 | 38 | ## Also can be configured to to use a S3 compatible storage 39 | ## service like MiniIO. Look below for minio service setup. 40 | 41 | # AWS_ACCESS_KEY_ID= 42 | # AWS_SECRET_ACCESS_KEY= 43 | # PENPOT_ASSETS_STORAGE_BACKEND=assets-s3 44 | # PENPOT_STORAGE_ASSETS_S3_ENDPOINT=http://penpot-minio:9000 45 | # PENPOT_STORAGE_ASSETS_S3_BUCKET= 46 | 47 | ## Telemetry. When enabled, a periodical process will send anonymous data about this 48 | ## instance. Telemetry data will enable us to learn how the application is used, 49 | ## based on real scenarios. If you want to help us, please leave it enabled. You can 50 | ## audit what data we send with the code available on github. 51 | 52 | PENPOT_TELEMETRY_ENABLED=true 53 | PENPOT_TELEMETRY_REFERER=compose 54 | 55 | ## Example SMTP/Email configuration. By default, emails are sent to the mailcatch 56 | ## service, but for production usage it is recommended to setup a real SMTP 57 | ## provider. Emails are used to confirm user registrations & invitations. Look below 58 | ## how the mailcatch service is configured. 59 | 60 | PENPOT_SMTP_DEFAULT_FROM=no-reply@penpot.example.com 61 | PENPOT_SMTP_DEFAULT_REPLY_TO=no-reply@penpot.example.com 62 | PENPOT_SMTP_HOST=penpot-mailcatcher 63 | PENPOT_SMTP_PORT=1025 64 | PENPOT_SMTP_USERNAME= 65 | PENPOT_SMTP_PASSWORD= 66 | PENPOT_SMTP_TLS=false 67 | PENPOT_SMTP_SSL=false 68 | 69 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-exporter.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot exporter 3 | Requires=penpot-redis.service 4 | After=penpot-redis.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/penpotapp/exporter 15 | ContainerName=penpot-exporter 16 | AutoUpdate=registry 17 | 18 | Network=penpot.network 19 | HostName=penpot-exporter 20 | 21 | EnvironmentFile=penpot.env 22 | Environment=PENPOT_REDIS_URI=redis://penpot-redis/0 23 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-mailcatcher.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot mailcatcher 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/sj26/mailcatcher 13 | ContainerName=penpot-mailcatcher 14 | AutoUpdate=registry 15 | 16 | Network=penpot.network 17 | HostName=penpot-mailcatcher 18 | PublishPort=1025:1025 19 | PublishPort=1080:1080 20 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-minio.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot MinIO 3 | WantedBy=penpot-backend.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/minio/minio 14 | ContainerName=penpot-minio 15 | AutoUpdate=registry 16 | 17 | Network=penpot.network 18 | HostName=penpot-minio 19 | PublishPort=9000:9000 20 | PublishPort=9001:9001 21 | 22 | Volume=penpot-minio:/mnt/data 23 | 24 | Environment=MINIO_ROOT_USER=minioadmin 25 | 26 | Secret=penpot-minio-root-password,type=env,target=MINIO_ROOT_PASSWORD 27 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-postgres.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot postgres 3 | WantedBy=penpot-backend.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/postgres:15 14 | ContainerName=penpot-postgres 15 | AutoUpdate=registry 16 | 17 | Network=penpot.network 18 | HostName=penpot-postgres 19 | 20 | Volume=penpot-postgres:/var/lib/postgresql/data 21 | 22 | Environment=POSTGRES_INITDB_ARGS=--data-checksums 23 | Environment=POSTGRES_DB=penpot 24 | Environment=POSTGRES_USER=penpot 25 | 26 | Secret=penpot-postgres-password,type=env,target=POSTGRES_PASSWORD 27 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot-redis.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot redis 3 | WantedBy=penpot-backend.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/redis:7.2 14 | ContainerName=penpot-redis 15 | AutoUpdate=registry 16 | 17 | Network=penpot.network 18 | HostName=penpot-redis 19 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Penpot 3 | Requires=penpot-backend.service 4 | Requires=penpot-exporter.service 5 | After=penpot-backend.service 6 | After=penpot-exporter.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/penpotapp/frontend 17 | ContainerName=penpot 18 | AutoUpdate=registry 19 | 20 | Network=penpot.network 21 | HostName=penpot 22 | PublishPort=9001:8080 23 | 24 | Volume=penpot-assets:/opt/data/assets 25 | 26 | EnvironmentFile=penpot.env 27 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot.env: -------------------------------------------------------------------------------- 1 | PENPOT_FLAGS=disable-email-verification enable-smtp enable-prepl-server disable-secure-session-cookies 2 | PENPOT_PUBLIC_URI=https://penpot.example.com 3 | PENPOT_HTTP_SERVER_MAX_BODY_SIZE=31457280 4 | PENPOT_HTTP_SERVER_MAX_MULTIPART_BODY_SIZE=367001600 5 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | 3 | -------------------------------------------------------------------------------- /quadlets/penpot/penpot.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=penpot-postgres 3 | VolumeName=penpot-assets 4 | #VolumeName=penpot-traefik 5 | #VolumeName=penpot-minio 6 | 7 | 8 | -------------------------------------------------------------------------------- /quadlets/pi-hole/pihole.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Pi-hole 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/pihole:latest 13 | ContainerName=pihole 14 | ## WARNING: This should not be enabled unless you have good backups and 15 | # recovery plans. DNS is a critical piece of your network. 16 | # AutoUpdate=registry 17 | 18 | # DHCP 19 | # AddCapability=CAP_NET_ADMIN 20 | # NTP 21 | # AddCapability=CAP_SYS_TIME 22 | # Optional 23 | # AddCapability=CAP_SYS_NICE 24 | 25 | Network=pihole.network 26 | HostName=pihole 27 | # DNS 28 | PublishPort=53:53/tcp 29 | PublishPort=53:53/udp 30 | # HTTP 31 | # PublishPort=80:80/tcp 32 | # HTTPS 33 | # PublishPort=443:443/tcp 34 | # DHCP 35 | # PublishPort=67:67/udp 36 | # NTP 37 | # PublishPort=123:123/udp 38 | 39 | Volume=pihole-etc:/etc/pihole 40 | 41 | Environment=TZ=Etc/UTC 42 | Environment=FTLCONF_dns_listeningMode=all 43 | 44 | Secret=pihole-api-password,type=env,target=FTLCONF_webserver_api_password 45 | 46 | -------------------------------------------------------------------------------- /quadlets/pi-hole/pihole.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Pi-hole network 3 | 4 | [Network] 5 | NetworkName=pihole 6 | 7 | -------------------------------------------------------------------------------- /quadlets/pocket-id/pocket-id.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Pocket ID 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/pocket-id/pocket-id 13 | ContainerName=pocket-id 14 | AutoUpdate=registry 15 | 16 | Network=pocket-id.network 17 | HostName=pocket-id 18 | PublishPort=3000:80 19 | 20 | Volume=pocket-id-container:/app/backend/data 21 | -------------------------------------------------------------------------------- /quadlets/pocket-id/pocket-id.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/pocket-id/pocket-id.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=pocket-id-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/pointspend/pointspend.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Bonus points spender 3 | After=qbittorrent.service 4 | After=gluetun.service 5 | BindsTo=gluetun.service 6 | BindsTo=qbittorrent.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/myanonamouse/pointspend:latest 17 | ContainerName=pointspend 18 | AutoUpdate=registry 19 | 20 | Network=container:gluetun 21 | HostName=pointspend 22 | 23 | Environment=BUFFER=1000 24 | Environment=WEDGEHOURS=0 25 | Environment=VIP=1 26 | 27 | Secret=mam_id,type=env,target=MAMID 28 | -------------------------------------------------------------------------------- /quadlets/postiz/postiz-postgres.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Postiz DB 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/postgres:17-alpine 13 | ContainerName=postiz-postgres 14 | AutoUpdate=registry 15 | 16 | Network=postiz.network 17 | HostName=postiz-postgres 18 | 19 | Volume=postiz-db:/var/lib/postgresql/data 20 | 21 | Environment=POSTGRES_USER=postiz-user 22 | Environment=POSTGRES_DB=postiz-db-local 23 | 24 | Secret=postiz-password,type=env,target=POSTGRES_PASSWORD 25 | -------------------------------------------------------------------------------- /quadlets/postiz/postiz-redis.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Postiz Redis 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/redis:7.2 13 | ContainerName=postiz-redis 14 | AutoUpdate=registry 15 | 16 | Network=postiz.network 17 | HostName=postiz-redis 18 | 19 | Volume=postiz-redis-data:/data 20 | -------------------------------------------------------------------------------- /quadlets/postiz/postiz.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Postiz social media management 3 | Requires=postiz-postgres.service 4 | Requires=postiz-redis.service 5 | After=postiz-postgres.service 6 | After=postiz-redis.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=ghcr.io/gitroomhq/postiz-app:latest 17 | ContainerName=postiz 18 | AutoUpdate=registry 19 | 20 | Network=postiz.network 21 | HostName=postiz 22 | PublishPort=5000:5000 23 | 24 | Volume=postiz-config:/config 25 | Volume=postiz-uploads:/uploads 26 | 27 | Environment=MAIN_URL=https://postiz.example.com 28 | Environment=FRONTEND_URL=https://postiz.example.com 29 | Environment=NEXT_PUBLIC_BACKEND_URL=https://postiz.example.com/api 30 | 31 | ## Make this whole env var a podman secret to hide the password 32 | ## Secrets do not populate the env before Environment is established 33 | ## Secret=postiz-db-url,type=env,target=DATABASE_URL 34 | Environment=DATABASE_URL=postgresql://postiz-user:postiz-password@postiz-postgres:5432/postiz-db-local 35 | Environment=redis://postiz-redis:6379 36 | Environment=BACKEND_INTERNAL_URL=http://localhost:3000 37 | Environment=IS_GENERAL=true 38 | 39 | Environment=STORAGE_PROVIDER=local 40 | Environment=UPLOAD_DIRECTORY=/uploads 41 | Environment=NEXT_PUBLIC_UPLOAD_DIRECTORY=/uploads 42 | 43 | Secret=postiz-jwt,type=env,target=JWT_SECRET 44 | -------------------------------------------------------------------------------- /quadlets/postiz/postiz.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/postiz/postiz.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=postiz-config 3 | VolumeName=postiz-uploads 4 | VolumeName=postiz-redis-data 5 | VolumeName=postiz-db 6 | 7 | -------------------------------------------------------------------------------- /quadlets/prometheus/prometheus.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prometheus monitoring 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=quay.io/prometheus/prometheus 13 | ContainerName=prometheus 14 | AutoUpdate=registry 15 | 16 | Network=prometheus.network 17 | HostName=prometheus 18 | PublishPort=9090:9090 19 | 20 | Volume=prometheus-data:/prometheus 21 | 22 | -------------------------------------------------------------------------------- /quadlets/prometheus/prometheus.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=prometheus-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/protonmail-bridge/protonmail-bridge.container: -------------------------------------------------------------------------------- 1 | ## NOTE: Requires `podman run --rm -it -v protonmail-bridge-root:/root docker.io/shenxn/protonmail-bridge init` to bootstrap 2 | ## Use `login` to set up your account, then `info` to find your password 3 | [Unit] 4 | Description=Protonmail secure bridge 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/shenxn/protonmail-bridge 15 | ContainerName=protonmail-bridge 16 | AutoUpdate=registry 17 | 18 | Network=protonmail-bridge.network 19 | HostName=protonmail-bridge 20 | PublishPort=1143:143/tcp 21 | PublishPort=1025:25/tcp 22 | 23 | Volume=protonmail-bridge-root:/root 24 | -------------------------------------------------------------------------------- /quadlets/protonmail-bridge/protonmail-bridge.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/protonmail-bridge/protonmail-bridge.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=protonmail-bridge-root 3 | -------------------------------------------------------------------------------- /quadlets/prowlarr/prowlarr.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Manage indexers 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/hotio/prowlarr 13 | ContainerName=prowlarr 14 | AutoUpdate=registry 15 | 16 | Network=prowlarr.network 17 | HostName=prowlarr 18 | PublishPort=9696 19 | 20 | Volume=prowlarr-config:/config 21 | Volume=/path/to/video:/data 22 | 23 | -------------------------------------------------------------------------------- /quadlets/prowlarr/prowlarr.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/prowlarr/prowlarr.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=prowlarr-config 3 | -------------------------------------------------------------------------------- /quadlets/qbit_manage/config.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=qbit_manage-config 3 | 4 | -------------------------------------------------------------------------------- /quadlets/qbit_manage/config.yml: -------------------------------------------------------------------------------- 1 | # This is an example configuration file that documents all the options. 2 | # It will need to be modified for your specific use case. 3 | # Please refer to the link below for more details on how to set up the configuration file 4 | # https://github.com/StuffAnThings/qbit_manage/wiki/Config-Setup 5 | 6 | commands: 7 | # The commands defined below will IGNORE any commands used in command line and docker env variables. 8 | dry_run: True 9 | cross_seed: False 10 | recheck: False 11 | cat_update: False 12 | tag_update: False 13 | rem_unregistered: False 14 | tag_tracker_error: False 15 | rem_orphaned: False 16 | tag_nohardlinks: False 17 | share_limits: False 18 | skip_qb_version_check: False 19 | skip_cleanup: False 20 | 21 | qbt: 22 | # qBittorrent parameters 23 | host: "localhost:8080" 24 | user: "username" 25 | pass: "password" 26 | 27 | settings: 28 | force_auto_tmm: False # Will force qBittorrent to enable Automatic Torrent Management for each torrent. 29 | force_auto_tmm_ignore_tags: #Torrents with these tags will be ignored when force_auto_tmm is enabled. 30 | - cross-seed 31 | - Upload 32 | tracker_error_tag: issue # Will set the tag of any torrents that do not have a working tracker. 33 | nohardlinks_tag: noHL # Will set the tag of any torrents with no hardlinks. 34 | share_limits_tag: ~share_limit # Will add this tag when applying share limits to provide an easy way to filter torrents by share limit group/priority for each torrent 35 | share_limits_min_seeding_time_tag: MinSeedTimeNotReached # Tag to be added to torrents that have not yet reached the minimum seeding time 36 | share_limits_min_num_seeds_tag: MinSeedsNotMet # Tag to be added to torrents that have not yet reached the minimum number of seeds 37 | share_limits_last_active_tag: LastActiveLimitNotReached # Tag to be added to torrents that have not yet reached the last active limit 38 | cross_seed_tag: cross-seed # Will set the tag of any torrents that are added by cross-seed command 39 | cat_filter_completed: True # Filters for completed torrents only when running cat_update command 40 | share_limits_filter_completed: True # Filters for completed torrents only when running share_limits command 41 | tag_nohardlinks_filter_completed: True # Filters for completed torrents only when running tag_nohardlinks command 42 | cat_update_all: True # Checks and updates all torrent categories if set to True when running cat_update command, otherwise only update torrents that are uncategorized 43 | disable_qbt_default_share_limits: True # Allows QBM to handle share limits by disabling qBittorrents default Share limits. Only active when the share_limits command is set to True 44 | 45 | directory: 46 | # Do not remove these 47 | # Cross-seed var: # Output directory of cross-seed 48 | # root_dir var: # Root downloads directory used to check for orphaned files, noHL, and RecycleBin. 49 | # remote_dir var: # Path of docker host mapping of root_dir. 50 | # remote_dir must be set if you're running qbit_manage locally and qBittorrent/cross_seed is in a docker 51 | # remote_dir should not be set if qbit_manage is running in a container 52 | # recycle_bin var: # Path of the RecycleBin folder. Default location is set to remote_dir/.RecycleBin 53 | # torrents_dir var: # Path of the your qbittorrent torrents directory. Required for `save_torrents` attribute in recyclebin 54 | # orphaned_dir var: # Path of the the Orphaned Data folder. This is similar to RecycleBin, but only for orphaned data. 55 | cross_seed: "/your/path/here/" 56 | root_dir: "/data/torrents/" 57 | remote_dir: "/mnt/user/data/torrents/" 58 | recycle_bin: "/mnt/user/data/torrents/.RecycleBin" 59 | torrents_dir: "/qbittorrent/data/BT_backup" 60 | orphaned_dir: "/data/torrents/orphaned_data" 61 | 62 | cat: 63 | # Category & Path Parameters 64 | # All save paths in qbittorent must be populated below. 65 | # If you want to leave a save_path as uncategorized you can use the key 'Uncategorized' as the name of the category. 66 | # : # Path of your save directory. 67 | movies: "/data/torrents/Movies" 68 | tv: "/data/torrents/TV" 69 | 70 | cat_change: 71 | # This moves all the torrents from one category to another category. This executes on --cat-update 72 | # WARNING: if the paths are different and Default Torrent Management Mode is set to automatic the files could be moved !!! 73 | # : 74 | Radarr-HD.cross-seed: movies-hd 75 | Radarr-UHD.cross-seed: movies-uhd 76 | movies-hd.cross-seed: movies-hd 77 | movies-uhd.cross-seed: movies-uhd 78 | 79 | tracker: 80 | # Mandatory 81 | # Tag Parameters 82 | # : # This is the keyword in the tracker url. You can define multiple tracker urls by splitting with `|` delimiter 83 | # Set tag name. Can be a list of tags or a single tag 84 | # tag: 85 | # Set the category based on tracker URL. This category option takes priority over the category defined by save directory 86 | # cat: 87 | # Set this to the notifiarr react name. This is used to add indexer reactions to the notifications sent by Notifiarr 88 | # notifiarr: 89 | animebytes.tv: 90 | tag: AnimeBytes 91 | notifiarr: animebytes 92 | avistaz: 93 | tag: 94 | - Avistaz 95 | - tag2 96 | - tag3 97 | notifiarr: avistaz 98 | beyond-hd: 99 | tag: [Beyond-HD, tag2, tag3] 100 | cat: movies 101 | notifiarr: beyondhd 102 | blutopia: 103 | tag: Blutopia 104 | notifiarr: blutopia 105 | cartoonchaos: 106 | tag: CartoonChaos 107 | digitalcore: 108 | tag: DigitalCore 109 | notifiarr: digitalcore 110 | gazellegames: 111 | tag: GGn 112 | hdts: 113 | tag: HDTorrents 114 | landof.tv: 115 | tag: BroadcasTheNet 116 | notifiarr: broadcasthenet 117 | myanonamouse: 118 | tag: MaM 119 | passthepopcorn: 120 | tag: PassThePopcorn 121 | notifiarr: passthepopcorn 122 | privatehd: 123 | tag: PrivateHD 124 | notifiarr: 125 | torrentdb: 126 | tag: TorrentDB 127 | notifiarr: torrentdb 128 | torrentleech|tleechreload: 129 | tag: TorrentLeech 130 | notifiarr: torrentleech 131 | tv-vault: 132 | tag: TV-Vault 133 | # The "other" key is a special keyword and if defined will tag any other trackers that don't match the above trackers into this tag 134 | other: 135 | tag: other 136 | 137 | nohardlinks: 138 | # Tag Movies/Series that are not hard linked outside the root directory 139 | # Mandatory to fill out directory parameter above to use this function (root_dir/remote_dir) 140 | # This variable should be set to your category name of your completed movies/completed series in qbit. Acceptable variable can be any category you would like to tag if there are no hardlinks found 141 | movies-completed-4k: 142 | series-completed-4k: 143 | movies-completed: 144 | # exclude_tags var: Will exclude torrents with any of the following tags when searching through the category. 145 | exclude_tags: 146 | - Beyond-HD 147 | - AnimeBytes 148 | - MaM 149 | # ignore_root_dir var: Will ignore any hardlinks detected in the same root_dir (Default True). 150 | ignore_root_dir: true 151 | # Can have additional categories set with separate ratio/seeding times defined. 152 | series-completed: 153 | # exclude_tags var: Will exclude torrents with any of the following tags when searching through the category. 154 | exclude_tags: 155 | - Beyond-HD 156 | - BroadcasTheNet 157 | # ignore_root_dir var: Will ignore any hardlinks detected in the same root_dir (Default True). 158 | ignore_root_dir: true 159 | 160 | share_limits: 161 | # Control how torrent share limits are set depending on the priority of your grouping 162 | # Each torrent will be matched with the share limit group with the highest priority that meets the group filter criteria. 163 | # Each torrent can only be matched with one share limit group 164 | # This variable is mandatory and is a text defining the name of your grouping. This can be any string you want 165 | noHL: 166 | # priority: # This is the priority of your grouping. The lower the number the higher the priority 167 | priority: 1 168 | # include_all_tags: # Filter the group based on one or more tags. Multiple include_all_tags are checked with an AND condition 169 | # All tags defined here must be present in the torrent for it to be included in this group 170 | include_all_tags: 171 | - noHL 172 | # include_any_tags: # Filter the group based on one or more tags. Multiple include_any_tags are checked with an OR condition 173 | # Any tags defined here must be present in the torrent for it to be included in this group 174 | include_any_tags: 175 | - noHL 176 | # exclude_all_tags: # Filter by excluding one or more tags. Multiple exclude_all_tags are checked with an AND condition 177 | # This is useful to combine with the category filter to exclude one or more tags from an entire category 178 | # All tags defined here must be present in the torrent for it to be excluded in this group 179 | exclude_all_tags: 180 | - Beyond-HD 181 | # exclude_any_tags: # Filter by excluding one or more tags. Multiple exclude_any_tags are checked with an OR condition 182 | # This is useful to combine with the category filter to exclude one or more tags from an entire category 183 | # Any tags defined here must be present in the torrent for it to be excluded in this group 184 | exclude_any_tags: 185 | - Beyond-HD 186 | # categories: # Filter by including one or more categories. Multiple categories are checked with an OR condition 187 | # Since one torrent can only be associated with a single category, multiple categories are checked with an OR condition 188 | categories: 189 | - RadarrComplete 190 | - SonarrComplete 191 | # max_ratio : Will set the torrent Maximum share ratio until torrent is stopped from seeding/uploading and may be cleaned up / removed if the minimums have been met. 192 | # Will default to -1 (no limit) if not specified for the group. 193 | max_ratio: 5.0 194 | # max_seeding_time : Will set the torrent Maximum seeding time until torrent is stopped from seeding/uploading and may be cleaned up / removed if the minimums have been met. 195 | # See Some examples of valid time expressions (https://github.com/onegreyonewhite/pytimeparse2) 196 | # 32m, 2h32m, 3d2h32m, 1w3d2h32m 197 | # Will default to -1 (no limit) if not specified for the group. (Max value of 1 year (525600 minutes)) 198 | max_seeding_time: 90d 199 | # min_seeding_time : Will prevent torrent deletion by cleanup variable if torrent has not yet minimum seeding time (minutes). 200 | # This should only be set if you are using this in conjunction with max_seeding_time and max_ratio. If you are not setting a max_ratio, then use max_seeding_time instead. 201 | # If the torrent has not yet reached this minimum seeding time, it will change the share limits back to no limits and resume the torrent to continue seeding. 202 | # See Some examples of valid time expressions (https://github.com/onegreyonewhite/pytimeparse2) 203 | # 32m, 2h32m, 3d2h32m, 1w3d2h32m 204 | # Will default to 0 if not specified for the group. 205 | min_seeding_time: 30d 206 | # last_active : Will prevent torrent deletion by cleanup variable if torrent has been active within the last x minutes. 207 | # If the torrent has been active within the last x minutes, it will change the share limits back to no limits and resume the torrent to continue seeding. 208 | # See Some examples of valid time expressions (https://github.com/onegreyonewhite/pytimeparse2) 209 | # 32m, 2h32m, 3d2h32m, 1w3d2h32m 210 | # Will default to 0 if not specified for the group. 211 | last_active: 30d 212 | # Limit Upload Speed : Will limit the upload speed KiB/s (KiloBytes/second) (`-1` : No Limit) 213 | limit_upload_speed: 0 214 | # Enable Group Upload Speed : Upload speed limits are applied at the group level. This will take limit_upload_speed defined and divide it equally among the number of torrents in the group. 215 | enable_group_upload_speed: false 216 | # cleanup : WARNING!! Setting this as true Will remove and delete contents of any torrents that satisfies the share limits (max time OR max ratio) 217 | cleanup: false 218 | # resume_torrent_after_change : This variable will resume your torrent after changing share limits. Default is true 219 | resume_torrent_after_change: true 220 | # add_group_to_tag : This adds your grouping as a tag with a prefix defined in settings . Default is true 221 | # Example: A grouping defined as noHL will have a tag set to ~share_limit.noHL (if using the default prefix) 222 | add_group_to_tag: true 223 | # min_num_seeds : Will prevent torrent deletion by cleanup variable if the number of seeds is less than the value set here. 224 | # If the torrent has less number of seeds than the min_num_seeds, the share limits will be changed back to no limits and resume the torrent to continue seeding. 225 | # Will default to 0 if not specified for the group. 226 | min_num_seeds: 0 227 | # custom_tag : Apply a custom tag name for this particular group. **WARNING (This tag MUST be unique as it will be used to determine share limits. Please ensure it does not overlap with any other tags in qbt)** 228 | custom_tag: sharelimits_noHL 229 | cross-seed: 230 | priority: 2 231 | include_all_tags: 232 | - cross-seed 233 | max_seeding_time: 7d 234 | cleanup: false 235 | PTP: 236 | priority: 3 237 | include_all_tags: 238 | - PassThePopcorn 239 | max_ratio: 2.0 240 | max_seeding_time: 90d 241 | cleanup: false 242 | default: 243 | priority: 999 244 | max_ratio: -1 245 | max_seeding_time: -1 246 | cleanup: false 247 | 248 | recyclebin: 249 | # Recycle Bin method of deletion will move files into the recycle bin (Located in /root_dir/.RecycleBin) instead of directly deleting them in qbit 250 | # By default the Recycle Bin will be emptied on every run of the qbit_manage script if empty_after_x_days is defined. 251 | enabled: true 252 | # empty_after_x_days var: 253 | # Will automatically remove all files and folders in recycle bin after x days. (Checks every script run) 254 | # If this variable is not defined it, the RecycleBin will never be emptied. 255 | # WARNING: Setting this variable to 0 will delete all files immediately upon script run! 256 | empty_after_x_days: 60 257 | # save_torrents var: 258 | # If this option is set to true you MUST fill out the torrents_dir in the directory attribute. 259 | # This will save a copy of your .torrent and .fastresume file in the recycle bin before deleting it from qbittorrent 260 | save_torrents: true 261 | # split_by_category var: 262 | # This will split the recycle bin folder by the save path defined in the `cat` attribute 263 | # and add the base folder name of the recycle bin that was defined in the `recycle_bin` sub-attribute under directory. 264 | split_by_category: false 265 | 266 | orphaned: 267 | # Orphaned files are those in the root_dir download directory that are not referenced by any active torrents. 268 | # Will automatically remove all files and folders in orphaned data after x days. (Checks every script run) 269 | # If this variable is not defined it, the orphaned data will never be emptied. 270 | # WARNING: Setting this variable to 0 will delete all files immediately upon script run! 271 | empty_after_x_days: 60 272 | # File patterns that will not be considered orphaned files. Handy for generated files that aren't part of the torrent but belong with the torrent's files 273 | exclude_patterns: 274 | - "**/.DS_Store" 275 | - "**/Thumbs.db" 276 | - "**/@eaDir" 277 | - "/data/torrents/temp/**" 278 | - "**/*.!qB" 279 | - "**/*_unpackerred" 280 | # Set your desired threshold for the maximum number of orphaned files qbm will delete in a single run. (-1 to disable safeguards) 281 | # This will help reduce the number of accidental large amount orphaned deletions in a single run 282 | # WARNING: Setting this variable to -1 will not safeguard against any deletions 283 | max_orphaned_files_to_delete: 50 284 | 285 | apprise: 286 | # Apprise integration with webhooks 287 | # Leave Empty/Blank to disable 288 | # Mandatory to fill out the url of your apprise API endpoint 289 | api_url: http://apprise-api:8000 290 | # Mandatory to fill out the notification url/urls based on the notification services provided by apprise. https://github.com/caronc/apprise/wiki 291 | notify_url: 292 | 293 | notifiarr: 294 | # Notifiarr integration with webhooks 295 | # Leave Empty/Blank to disable 296 | # Mandatory to fill out API Key 297 | apikey: #################################### 298 | # Set to a unique value (could be your username on notifiarr for example) 299 | instance: 300 | 301 | webhooks: 302 | # Webhook notifications: 303 | # Possible values: 304 | # Set value to notifiarr if using notifiarr integration 305 | # Set value to apprise if using apprise integration 306 | # Set value to a valid webhook URL 307 | # Set value to nothing (leave Empty/Blank) to disable 308 | error: https://mywebhookurl.com/qbt_manage 309 | run_start: notifiarr 310 | run_end: apprise 311 | function: 312 | cross_seed: https://mywebhookurl.com/qbt_manage 313 | recheck: notifiarr 314 | cat_update: apprise 315 | tag_update: notifiarr 316 | rem_unregistered: notifiarr 317 | tag_tracker_error: notifiarr 318 | rem_orphaned: notifiarr 319 | tag_nohardlinks: notifiarr 320 | share_limits: notifiarr 321 | cleanup_dirs: notifiarr 322 | 323 | -------------------------------------------------------------------------------- /quadlets/qbit_manage/qbit_manage.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=qBittorrent manager 3 | Wants=qbittorrent.service 4 | After=qbittorrent.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | ContainerName=qbit_manage 15 | Image=ghcr.io/stuffanthings/qbit_manage:latest 16 | ContainerName=qbit_manage 17 | HostName=qbit_manage 18 | 19 | Volume=qbit_manage-config:/config 20 | Volume=/volumes/books/qbittorrent/downloads:/data/torrents 21 | Volume=qbittorrent-config:/qbittorrent 22 | 23 | EnvironmentFile=qbit_manage.env 24 | -------------------------------------------------------------------------------- /quadlets/qbit_manage/qbit_manage.env: -------------------------------------------------------------------------------- 1 | QBT_RUN=false 2 | QBT_SCHEDULE=1440 3 | QBT_CONFIG=config.yml 4 | QBT_LOGFILE=activity.log 5 | QBT_CROSS_SEED=false 6 | QBT_RECHECK=false 7 | QBT_CAT_UPDATE=false 8 | QBT_TAG_UPDATE=false 9 | QBT_REM_UNREGISTERED=false 10 | QBT_REM_ORPHANED=false 11 | QBT_TAG_TRACKER_ERROR=false 12 | QBT_TAG_NOHARDLINKS=false 13 | QBT_SHARE_LIMITS=false 14 | QBT_SKIP_CLEANUP=false 15 | QBT_DRY_RUN=false 16 | QBT_LOG_LEVEL=INFO 17 | QBT_DIVIDER== 18 | QBT_WIDTH=100 19 | 20 | -------------------------------------------------------------------------------- /quadlets/qbittorrent-port-forward-gluetun-server/qbittorrent-port-forward-gluetun-server.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Port forward updater for qbittorrent over gluetun 3 | After=gluetun.service 4 | After=qbittorrent.service 5 | BindsTo=gluetun.service 6 | BindsTo=qbittorrent.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/mjmeli/qbittorrent-port-forward-gluetun-server:latest 17 | ContainerName=qbittorrent-port-forward-gluetun-server 18 | AutoUpdate=registry 19 | 20 | Network=container:gluetun 21 | HostName=qbittorrent-port-forward-gluetun-server 22 | 23 | Environment=QBT_USERNAME=$qbt_user 24 | Environment=QBT_ADDR=http://localhost:8080 25 | Environment=GTN_ADDR=http://localhost:8000 26 | 27 | Secret=qbt_pw,type=env,target=QBT_PASSWORD 28 | -------------------------------------------------------------------------------- /quadlets/qbittorrent/qbittorrent.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=qbittorrent client 3 | Requires=gluetun.service 4 | After=gluetun.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/qbittorrentofficial/qbittorrent-nox:$qbt_version 15 | ContainerName=qbittorrent 16 | AutoUpdate=registry 17 | 18 | Network=container:gluetun 19 | HostName=qbittorrent 20 | 21 | Volume=qbittorrent-config:/config 22 | Volume=qbittorrent-downloads:/downloads 23 | 24 | Environment=QBT_LEGAL_NOTICE=confirm 25 | Environment=QBT_VERSION=$qbt_version 26 | Environment=TZ=Etc/UTC 27 | 28 | -------------------------------------------------------------------------------- /quadlets/qbittorrent/qbittorrent.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=qbittorrent-config 3 | VolumeName=qbittorrent-downloads 4 | 5 | -------------------------------------------------------------------------------- /quadlets/radarr/radarr.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Movie metadata manager 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/hotio/radarr 13 | ContainerName=radarr 14 | AutoUpdate=registry 15 | 16 | Network=radarr.network 17 | HostName=radarr 18 | PublishPort=7878 19 | 20 | Volume=radarr-config:/config 21 | Volume=/path/to/media:/data 22 | -------------------------------------------------------------------------------- /quadlets/radarr/radarr.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/radarr/radarr.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=radarr-config 3 | -------------------------------------------------------------------------------- /quadlets/romm/romm-mariadb.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ROMM database 3 | Wants=romm.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/mariadb:latest 14 | ContainerName=romm-mariadb 15 | AutoUpdate=registry 16 | 17 | Network=romm.network 18 | HostName=romm-mariadb 19 | 20 | Volume=romm-mariadb-data:/var/lib/mysql 21 | 22 | Environment=MARIADB_DATABASE=romm 23 | Environment=MARIADB_USER=romm-user 24 | 25 | Secret=romm-mariadb-root-password,type=env,target=MARIADB_ROOT_PASSWORD 26 | Secret=romm-mariadb-password,type=env,target=MARIADB_PASSWORD 27 | -------------------------------------------------------------------------------- /quadlets/romm/romm.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ROMM rom manager 3 | Requires=romm-mariadb.service 4 | After=romm-mariadb.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/rommapp/romm:latest 15 | ContainerName=romm 16 | AutoUpdate=registry 17 | 18 | Network=romm.network 19 | HostName=romm 20 | PublishPort=80:8080 21 | 22 | Volume=romm-resources:/romm/resources 23 | Volume=romm-redis-data:/redis-data 24 | Volume=romm-library:/romm/library 25 | Volume=romm-assets:/romm/assets 26 | Volume=romm-config:/romm/config 27 | 28 | EnvironmentFile=romm.env 29 | 30 | Secret=romm-mariadb-password,type=env,target=DB_PASSWD 31 | Secret=romm-auth-secret-key,type=env,target=ROMM_AUTH_SECRET_KEY 32 | Secret=romm-igdb-client-secret,type=env,target=IGDB_CLIENT_SECRET 33 | Secret=romm-mobygames-api-key,type=env,target=MOBYGAMES_API_KEY 34 | Secret=romm-steamgriddb-api-key,type=env,target=STEAMGRIDDB_API_KEY 35 | Secret=romm-screenscraper-password,type=env,target=SCREENSCRAPER_PASSWORD 36 | -------------------------------------------------------------------------------- /quadlets/romm/romm.env: -------------------------------------------------------------------------------- 1 | DB_HOST=romm-mariadb 2 | DB_NAME=romm 3 | DB_USER=romm-user 4 | #DB_PASSWD= 5 | 6 | # ROMM_AUTH_SECRET_KEY= 7 | IGDB_CLIENT_ID= 8 | # IGDB_CLIENT_SECRET= 9 | # MOBYGAMES_API_KEY= 10 | # STEAMGRIDDB_API_KEY= 11 | SCREENSCRAPER_USER= 12 | # SCREENSCRAPER_PASSWORD= 13 | 14 | 15 | -------------------------------------------------------------------------------- /quadlets/romm/romm.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ROMM network 3 | 4 | [Network] 5 | NetworkName=romm 6 | 7 | -------------------------------------------------------------------------------- /quadlets/romm/romm.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=romm-mariadb-data 3 | VolumeName=romm-resources 4 | VolumeName=romm-redis-data 5 | VolumeName=romm-library 6 | VolumeName=romm-assets 7 | VolumeName=romm-config 8 | 9 | -------------------------------------------------------------------------------- /quadlets/seedboxapi/seedboxapi.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Update qbittorrent session IP for tracker 3 | After=qbittorrent.service 4 | After=gluetun.service 5 | BindsTo=gluetun.service 6 | BindsTo=qbittorrent.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/myanonamouse/seedboxapi:latest 17 | ContainerName=seedboxapi 18 | AutoUpdate=registry 19 | 20 | Network=container:gluetun 21 | HostName=seedboxapi 22 | 23 | Volume=/volumes/books/seedboxapi/config:/config 24 | 25 | Environment=DEBUG=1 26 | Environment=interval=1 27 | 28 | Secret=mam_id,type=env,target=mam_id 29 | -------------------------------------------------------------------------------- /quadlets/snowflake/snowflake.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Snowflake proxy 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/thetorproject/snowflake-proxy 13 | ContainerName=snowflake 14 | AutoUpdate=registry 15 | 16 | Network=snowflake.container 17 | HostName=snowflake 18 | -------------------------------------------------------------------------------- /quadlets/snowflake/snowflake.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/sogebot/sogebot-backend.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sogeBot backend 3 | Wants=sogebot-dashboard.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/sogebot/release 14 | ContainerName=sogebot-backend 15 | AutoUpdate=registry 16 | 17 | Network=sogebot.network 18 | HostName=sogebot-backend 19 | PublishPort=20000:20000 20 | ## For --inspect 21 | #PublishPort=9229:9229 22 | 23 | Volume=sogebot-shared:/app/shared 24 | Volume=sogebot-logs:/app/logs 25 | 26 | EnvironmentFile=sogebot.env 27 | Environment=LANG=en_US.UTF-8 28 | ## For 4GB RAM 29 | #Environment=NODE_OPTIONS=--max_old_space=4096 30 | Environment=LANG=en_US.UTF-8 31 | ## For --inspect 32 | #Environment=PROFILER=y 33 | -------------------------------------------------------------------------------- /quadlets/sogebot/sogebot-dashboard.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sogeBot frontend 3 | Requires=sogebot-backend.service 4 | After=sogebot-backend.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/sogebot/dashboard 15 | ContainerName=sogebot-dashboard 16 | AutoUpdate=registry 17 | 18 | Network=sogebot.network 19 | HostName=sogebot-dashboard 20 | PublishPort=12345:80 21 | -------------------------------------------------------------------------------- /quadlets/sogebot/sogebot.env: -------------------------------------------------------------------------------- 1 | TYPEORM_CONNECTION=better-sqlite3 2 | TYPEORM_DATABASE=./shared/sogebot.db 3 | 4 | # DON'T CHANGE ANYTHING BELOW THIS LINE 5 | TYPEORM_ENTITIES=dest/database/entity/*.js 6 | TYPEORM_MIGRATIONS=dest/database/migration/sqlite/*.js 7 | TYPEORM_SUBSCRIBERS=dest/database/entity/*.js 8 | 9 | TYPEORM_ENTITIES_DIR=src/bot/database/entity 10 | TYPEORM_MIGRATIONS_DIR=src/bot/database/migration/sqlite 11 | TYPEORM_SUBSCRIBERS_DIR=src/bot/database/entity 12 | -------------------------------------------------------------------------------- /quadlets/sogebot/sogebot.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/sogebot/sogebot.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=sogebot-shared 3 | VolumeName=sogebot-logs 4 | 5 | -------------------------------------------------------------------------------- /quadlets/sonarr/sonarr.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=TV show metadata manager 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/hotio/sonarr 13 | ContainerName=sonarr 14 | 15 | Network=sonarr.network 16 | HostName=sonarr 17 | PublishPort=8989 18 | 19 | Volume=sonarr-config:/config 20 | Volume=/volumes/media:/data 21 | -------------------------------------------------------------------------------- /quadlets/sonarr/sonarr.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/sonarr/sonarr.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=sonarr-config 3 | -------------------------------------------------------------------------------- /quadlets/speedtest-tracker/speedtest-tracker.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Speedtest tracker 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=lscr.io/linuxserver/speedtest-tracker 13 | ContainerName=speedtest-tracker 14 | AutoUpdate=registry 15 | 16 | Network=speedtest-tracker.network 17 | HostName=speedtest-tracker 18 | PublishPort=8080:80 19 | PublishPort=8443:443 20 | 21 | Volume=speedtest-config:/config 22 | Volume=speedtest-ssl:/config 23 | 24 | EnvironmentFile=speedtest-tracker.env 25 | -------------------------------------------------------------------------------- /quadlets/speedtest-tracker/speedtest-tracker.env: -------------------------------------------------------------------------------- 1 | ## https://docs.speedtest-tracker.dev/getting-started/environment-variables 2 | APP_NAME="Speedtest Tracker" 3 | APP_ENV=local 4 | ## NOTE: 5 | ## Fill this in with the real value. This isn't a shell script. 6 | APP_KEY=$(printf %s "base64:" "$(openssl rand -base64 32)") 7 | APP_DEBUG=false 8 | APP_URL=http://localhost 9 | 10 | APP_LOCALE=en 11 | APP_FALLBACK_LOCALE=en 12 | APP_FAKER_LOCALE=en_US 13 | 14 | APP_MAINTENANCE_DRIVER=file 15 | APP_MAINTENANCE_STORE=database 16 | 17 | PHP_CLI_SERVER_WORKERS=4 18 | 19 | BCRYPT_ROUNDS=12 20 | 21 | LOG_CHANNEL=stack 22 | LOG_STACK=single 23 | LOG_DEPRECATIONS_CHANNEL=null 24 | LOG_LEVEL=debug 25 | 26 | ## NOTE: 27 | ## Change this if you choose a different database 28 | DB_CONNECTION=sqlite 29 | 30 | BROADCAST_CONNECTION=log 31 | CACHE_STORE=database 32 | FILESYSTEM_DISK=local 33 | QUEUE_CONNECTION=database 34 | 35 | SESSION_DRIVER=database 36 | SESSION_LIFETIME=120 37 | SESSION_ENCRYPT=false 38 | SESSION_PATH=/ 39 | SESSION_DOMAIN=null 40 | 41 | MAIL_MAILER=smtp 42 | MAIL_HOST=mailhog 43 | MAIL_PORT=1025 44 | MAIL_USERNAME=null 45 | MAIL_PASSWORD=null 46 | MAIL_SCHEME=null 47 | MAIL_FROM_ADDRESS="hello@example.com" 48 | MAIL_FROM_NAME="Speedtest Tracker" 49 | -------------------------------------------------------------------------------- /quadlets/speedtest-tracker/speedtest-tracker.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/speedtest-tracker/speedtest-tracker.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=speedtest-config 3 | ## NOTE: 4 | ## Enable this if you bring your own cert.crt and cert.key 5 | #VolumeName=speedtest-ssl 6 | 7 | -------------------------------------------------------------------------------- /quadlets/stalwart/stalwart.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Stalwart mail server 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/stalwartlabs/mail-server:latest 13 | ContainerName=stalwart 14 | AutoUpdate=registry 15 | 16 | Network=stalwart.network 17 | HostName=stalwart 18 | PublishPort=443:443 19 | PublishPort=8080:8080 20 | PublishPort=25:25 21 | PublishPort=587:587 22 | PublishPort=465:465 23 | PublishPort=143:143 24 | PublishPort=993:993 25 | PublishPort=4190:4190 26 | 27 | Volume=stalwart:/opt/stalwart-mail 28 | -------------------------------------------------------------------------------- /quadlets/stalwart/stalwart.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/stalwart/stalwart.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=stalwart 3 | 4 | -------------------------------------------------------------------------------- /quadlets/stirlingPDF/stirlingpdf.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=StirlingPDF 3 | 4 | [Service] 5 | Restart=always 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy= default.target 10 | 11 | [Container] 12 | Image=docker.io/frooodle/s-pdf:latest 13 | ContainerName=stirlingPDF 14 | AutoUpdate=registry 15 | 16 | Network=stirlingpdf.network 17 | HostName=stirlingPDF 18 | 19 | Volume=/volumes/stirlingPDF/usr/share/tesseract-ocr/5/tessdata:/usr/share/tesseract-ocr/5/tessdata 20 | Volume=/volumes/stirlingPDF/configs:/configs 21 | Volume=/volumes/stirlingPDF/logs:/logs 22 | 23 | PodmanArgs=-e DOCKER_ENABLE_SECURITY=false -e INSTALL_BOOK_AND_ADVANCED_HTML_OPS=false -e LANGS=en_DK 24 | -------------------------------------------------------------------------------- /quadlets/stirlingPDF/stirlingpdf.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/syslog-ng/syslog-ng.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Syslog-ng 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=lscr.io/linuxserver/syslog-ng:latest 13 | ContainerName=syslog-ng 14 | AutoUpdate=registry 15 | 16 | Network=syslog-ng.network 17 | HostName=syslog-ng 18 | PublishPort=514:5514/udp 19 | PublishPort=601:6601/tcp 20 | PublishPort=6514:6514/tcp 21 | 22 | Volume=syslog-ng-config:/config 23 | Volume=/var/log:/var/log 24 | 25 | Environment=TZ=Etc/UTC 26 | Environment=LOG_TO_STDOUT=true 27 | -------------------------------------------------------------------------------- /quadlets/syslog-ng/syslog-ng.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/tandoor/tandoor-db.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tandoor postgres 3 | Wants=tandoor.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | TimeoutStartSec=900 8 | 9 | [Install] 10 | WantedBy=default.target 11 | 12 | [Container] 13 | Image=docker.io/postgres:16-alpine 14 | ContainerName=tandoor-db 15 | AutoUpdate=registry 16 | 17 | Network=tandoor.network 18 | HostName=tandoor-db 19 | 20 | Volume=tandoor-db:/var/lib/postgresql/data 21 | 22 | EnvironmentFile=tandoor.env 23 | -------------------------------------------------------------------------------- /quadlets/tandoor/tandoor-nginx.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tandoor nginx 3 | Wants=tandoor.service 4 | Wants=tandoor-db.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/nginx:mainline-alpine 15 | ContainerName=tandoor-nginx 16 | AutoUpdate=registry 17 | 18 | Network=tandoor.network 19 | HostName=tandoor-nginx 20 | 21 | Volume=tandoor-nginx-config:/etc/nginx/conf.d 22 | Volume=tandoor-static:/static 23 | Volume=tandoor-media:/media 24 | Volume=tandoor-external:/opt/recipes/externalfiles 25 | 26 | EnvironmentFile=tandoor.env 27 | 28 | -------------------------------------------------------------------------------- /quadlets/tandoor/tandoor.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tandoor 3 | Requires=tandoor-db.service 4 | Requires=tandoor-nginx.service 5 | After=tandoor-db.service 6 | After=tandoor-nginx.service 7 | 8 | [Service] 9 | Restart=on-failure 10 | TimeoutStartSec=900 11 | 12 | [Install] 13 | WantedBy=default.target 14 | 15 | [Container] 16 | Image=docker.io/vabene1111/recipes 17 | ContainerName=tandoor 18 | AutoUpdate=registry 19 | 20 | Network=tandoor.network 21 | HostName=tandoor 22 | 23 | EnvironmentFile=tandoor.env 24 | 25 | Volume=tandoor-db:/var/lib/postgresql/data 26 | Volume=tandoor-static:/opt/recipes/staticfiles 27 | Volume=tandoor-media:/opt/recipes/mediafiles 28 | Volume=tandoor-external:/opt/recipes/externalfiles 29 | Volume=tandoor-nginx-config:/opt/recipes/nginx/conf.d 30 | -------------------------------------------------------------------------------- /quadlets/tandoor/tandoor.env: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------- 2 | # This template contains only required options. 3 | # Visit the docs to find more https://docs.tandoor.dev/system/configuration/ 4 | # --------------------------------------------------------------------------- 5 | 6 | # random secret key, use for example `base64 /dev/urandom | head -c50` to generate one 7 | SECRET_KEY= 8 | 9 | # allowed hosts (see documentation), should be set to your hostname(s) but might be * (default) for some proxies/providers 10 | # ALLOWED_HOSTS=recipes.mydomain.com 11 | 12 | # add only a database password if you want to run with the default postgres, otherwise change settings accordingly 13 | DB_ENGINE=django.db.backends.postgresql 14 | POSTGRES_HOST=db_recipes 15 | POSTGRES_DB=djangodb 16 | POSTGRES_PORT=5432 17 | POSTGRES_USER=djangouser 18 | POSTGRES_PASSWORD= 19 | -------------------------------------------------------------------------------- /quadlets/tandoor/tandoor.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/tandoor/tandoor.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=tandoor-static 3 | VolumeName=tandoor-nginx-config 4 | VolumeName=tandoor-media 5 | VolumeName=tandoor-db 6 | 7 | -------------------------------------------------------------------------------- /quadlets/termix/termix.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Termix terminals 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/lukegus/termix:latest 13 | ContainerName=termix 14 | AutoUpdate=registry 15 | 16 | Network=termix.network 17 | HostName=termix 18 | PublishPort=8080:8080 19 | 20 | Volume=termix-db:/data/db 21 | 22 | Secret=termix-salt,type=env,target=SALT 23 | -------------------------------------------------------------------------------- /quadlets/termix/termix.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/termix/termix.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=termix-db 3 | 4 | -------------------------------------------------------------------------------- /quadlets/thelounge/thelounge.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=IRC client 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=ghcr.io/thelounge/thelounge:latest 13 | ContainerName=thelounge 14 | AutoUpdate=registry 15 | 16 | Network=thelounge.network 17 | HostName=thelounge 18 | PublishPort=9000 19 | 20 | Volume=thelounge-data:/var/opt/thelounge 21 | 22 | -------------------------------------------------------------------------------- /quadlets/thelounge/thelounge.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/thelounge/thelounge.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=thelounge-data 3 | -------------------------------------------------------------------------------- /quadlets/traggo/traggo.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tag-based time tracking 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/traggo/server:latest 13 | ContainerName=traggo 14 | AutoUpdate=registry 15 | 16 | Network=traggo.network 17 | HostName=traggo 18 | 19 | Volume=/volumes/traggo/opt/traggo/data:/opt/traggo/data 20 | 21 | Secret=traggo-pw,type=env,target=TRAGGO_DEFAULT_USER_PASS 22 | 23 | Environment=TRAGGO_DEFAULT_USER_NAME=$traggo_admin 24 | -------------------------------------------------------------------------------- /quadlets/traggo/traggo.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/vaultwarden/vaultwarden.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Vaultwarden 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/vaultwarden/server:latest 13 | ContainerName=vaultwarden 14 | AutoUpdate=registry 15 | 16 | Network=vaultwarden.network 17 | HostName=vaultwarden 18 | PublishPort=80:80 19 | 20 | Volume=vaultwarden-data:/data 21 | 22 | Environment=DOMAIN=https://vaultwarden.example.com 23 | -------------------------------------------------------------------------------- /quadlets/vaultwarden/vaultwarden.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/vaultwarden/vaultwarden.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=vaultwarden-data 3 | 4 | -------------------------------------------------------------------------------- /quadlets/vector/vector.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Vector observability 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/timberio/vector:latest-alpine 13 | ContainerName=vector 14 | AutoUpdate=registry 15 | 16 | Network=vector.network 17 | HostName=vector 18 | PublishPort=8686:8686 19 | 20 | Volume=./vector.yaml:/etc/vector/vector.yamlf:ro 21 | -------------------------------------------------------------------------------- /quadlets/vector/vector.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /quadlets/vector/vector.yaml: -------------------------------------------------------------------------------- 1 | api: 2 | enabled: true 3 | address: 0.0.0.0:8686 4 | sources: 5 | demo_logs: 6 | type: demo_logs 7 | interval: 1 8 | format: json 9 | sinks: 10 | console: 11 | inputs: 12 | - demo_logs 13 | target: stdout 14 | type: console 15 | encoding: 16 | codec: json 17 | 18 | -------------------------------------------------------------------------------- /quadlets/vikunja/vikunja-mariadb.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Vikunja mariadb 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/mariadb:10 13 | ContainerName=vikunja-mariadb 14 | AutoUpdate=registry 15 | 16 | Network=vikunja.network 17 | HostName=vikunja-mariadb 18 | 19 | Volume=vikunja-mariadb:/var/lib/mysql 20 | 21 | Environment=MYSQL_USER=vikunja 22 | Environment=MYSQL_DATABASE=vikunja 23 | 24 | Secret=vikunja-mysql-root-password,type=env,taget=MYSQL_ROOT_PASSWORD 25 | Secret=vikunja-mysql-password,type=env,taget=MYSQL_PASSWORD 26 | 27 | Exec=--character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci 28 | -------------------------------------------------------------------------------- /quadlets/vikunja/vikunja.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Vikunja 3 | Requires=vikunja-mariadb.service 4 | After=vikunja-mariadb.service 5 | 6 | [Service] 7 | Restart=on-failure 8 | TimeoutStartSec=900 9 | 10 | [Install] 11 | WantedBy=default.target 12 | 13 | [Container] 14 | Image=docker.io/vikunja 15 | ContainerName=vikunja 16 | AutoUpdate=registry 17 | 18 | Network=vikunja.network 19 | HostName=vikunja 20 | PublishPort=3546:3546 21 | 22 | Volume=vikunja-files:/app/vikunja/files 23 | 24 | Environment=VIKUNJA_SERVICE_PUBLICURL=https://vikunja.example.com 25 | Environment=VIKUNJA_DATABASE_HOST=vikunja-mariadb 26 | Environment=VIKUNJA_DATABASE_TYPE=mysql 27 | Environment=VIKUNJA_DATABASE_USER=vikunja 28 | Environment=VIKUNJA_DATABASE_DATABASE=vikunja 29 | 30 | Secret=vikunja-mysql-password,type=env,target=VIKUNJA_DATABASE_PASSWORD 31 | Secret=vikunja-service-jwtsecret,type=env,target=VIKUNJA_SERVICE_JWTSECRET 32 | -------------------------------------------------------------------------------- /quadlets/vikunja/vikunja.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Vikunja network 3 | 4 | [Network] 5 | NetworkName=vikunja 6 | 7 | -------------------------------------------------------------------------------- /quadlets/vikunja/vikunja.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName=vikunja-files 3 | VolumeName=vikunja-mariadb 4 | 5 | 6 | -------------------------------------------------------------------------------- /quadlets/weechat/weechat.container: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=IRC client 3 | 4 | [Service] 5 | Restart=on-failure 6 | TimeoutStartSec=900 7 | 8 | [Install] 9 | WantedBy=default.target 10 | 11 | [Container] 12 | Image=docker.io/weechat/weechat:latest-alpine-slim 13 | ContainerName=weechat 14 | AutoUpdate=registry 15 | 16 | Network=weechat.network 17 | HostName=weechat 18 | 19 | Volume=/volumes/books/weechat/home/user:/home/user 20 | 21 | # FIXME: Better way to attach stdin and tty. Quadlets don't seem to support 22 | # this well, probably because attaching to containers is weirder than `exec sh` 23 | # into it. This works the first time, but detaching doesn't work right and 24 | # reattaching a second time is pretty broken 25 | PodmanArgs=-a stdin --tty=true 26 | -------------------------------------------------------------------------------- /quadlets/weechat/weechat.network: -------------------------------------------------------------------------------- 1 | [Network] 2 | -------------------------------------------------------------------------------- /templates/template.container: -------------------------------------------------------------------------------- 1 | # original: https://github.com/fpatrick/podman-quadlet 2 | [Unit] 3 | Description= # (Optional) A brief description of the service 4 | Wants= # (Optional) Services you want to run with this one 5 | After= # (Optional) Services that need to start before this one 6 | 7 | [Service] 8 | Restart=on-failure # (Optional) Set to 'always' or 'on-failure' to restart on failure 9 | TimeoutStartSec=900 # (Optional) Time to wait before considering a failure 10 | 11 | [Install] 12 | WantedBy=default.target # (Optional) Target to start with (default: multi-user.target). For graphical user interface systems default.target 13 | 14 | [Container] 15 | Image= # (Mandatory) The container image to use (e.g., docker.io/library/alpine) 16 | ContainerName= # (Mandatory) The container's name 17 | AutoUpdate=registry 18 | 19 | AddCapability= # (Optional) Extra capabilities to add to the container 20 | AddDevice= # (Optional) Add host devices to the container 21 | SecurityLabelDisable= # (Optional) Disable SELinux labels 22 | User= # (Optional) Run as a specific user inside the container 23 | Label= # (Optional) Add metadata labels to the container 24 | UIDMap= # (Optional) User ID mapping. Example: 0:10000:10 (Inside:Outside:Range) 25 | GIDMap= # (Optional) Group ID mapping Example: 0:10000:10 (Inside:Outside:Range) 26 | 27 | Network= # (Optional) Custom network for the container 28 | HostName= # (Optional) The containers hostname 29 | PublishPort= # (Optional) Ports to expose (host:container) 30 | 31 | Volume= # (Optional) Persistent storage paths (host:container) 32 | 33 | EnvironmentFile= # (Optional) Path to an .env file 34 | Environment= # (Optional) Key=value pairs for environment variables 35 | 36 | PodmanArgs= # (Optional) Additional Podman arguments 37 | Exec= # (Optional) Custom command to run in the container 38 | -------------------------------------------------------------------------------- /templates/template.env: -------------------------------------------------------------------------------- 1 | EXAMPLE=value 2 | -------------------------------------------------------------------------------- /templates/template.network: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description= 3 | 4 | [Network] 5 | NetworkName= 6 | Subnet= 7 | Gateway= 8 | 9 | -------------------------------------------------------------------------------- /templates/template.pod: -------------------------------------------------------------------------------- 1 | [Pod] 2 | AddHost=example.com:192.168.10.11 # Add host-to-IP mapping to /etc/hosts. 3 | ContainersConfModule=/etc/nvd.conf # Load the specified containers.conf(5) module. 4 | DNS=192.168.55.1 # Set network-scoped DNS resolver/nameserver for containers in this pod 5 | DNSOption=ndots:1 # Set custom DNS options. 6 | DNSSearch=example.com # Set custom DNS search domains. Use DNSSearch=. to remove the search domain. 7 | GIDMap=0:10000:10 # Create the pod in a new user namespace using the supplied GID mapping. 8 | GlobalArgs=--log-level=debug # This key contains a list of arguments passed directly between podman and pod in the generated file. 9 | IP=192.5.0.1 # Specify a static IPv4 address for the pod. 10 | IP6=2001:db8::1 # Specify a static IPv6 address for the pod. 11 | Network=host # Specify a custom network for the pod. 12 | NetworkAlias=name # Add a network-scoped alias for the pod. 13 | PodmanArgs=--cpus=2 # This key contains a list of arguments passed directly to the end of the podman pod create command in the generated file. 14 | PodName=name # The name of the Podman pod. 15 | PublishPort=8080:80 # Exposes a port, or a range of ports (e.g. 50-59), from the pod to the host. 16 | ServiceName=name # By default, Quadlet will name the systemd service unit by appending -pod to the name of the Quadlet. 17 | SubGIDMap=gtest # Create the pod in a new user namespace using the map with name in the /etc/subgid file. 18 | SubUIDMap=utest # Create the pod in a new user namespace using the map with name in the /etc/subuid file. 19 | UIDMap=0:10000:10 # Create the pod in a new user namespace using the supplied UID mapping. 20 | UserNS=keep-id:uid=200,gid=210 # Set the user namespace mode for the pod. 21 | Volume=/source:/dest # Mount a volume in the pod. 22 | -------------------------------------------------------------------------------- /templates/template.volume: -------------------------------------------------------------------------------- 1 | [Volume] 2 | VolumeName= 3 | 4 | --------------------------------------------------------------------------------