├── .ansible-lint
├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
├── extra-vars.yml
└── workflows
│ ├── deploy-docs.yml
│ ├── run-playbook.yml
│ ├── scripts
│ ├── check_containers.py
│ └── requirements.txt
│ └── test-deploy-docs.yml
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── galaxy-requirements.yml
├── gen-docs
├── .gitignore
├── README.md
├── docs
│ ├── Examples
│ │ └── container-connections.md
│ ├── config-docs
│ │ ├── Authentik.md
│ │ ├── Cloudflare
│ │ │ ├── ddns.md
│ │ │ └── tunnel.md
│ │ ├── Homepage.md
│ │ ├── Netdata.md
│ │ ├── Networking.md
│ │ ├── Tailscale.md
│ │ ├── _category_.json
│ │ └── traefik
│ │ │ ├── _category_.json
│ │ │ ├── other-svcs.md
│ │ │ ├── security.md
│ │ │ └── ssl.md
│ ├── container-list.md
│ ├── container-map.md
│ ├── design-decisions.md
│ ├── getting-started
│ │ ├── GPU.md
│ │ ├── _category_.json
│ │ ├── app-bootstrap.md
│ │ ├── dns-setup.md
│ │ ├── install.md
│ │ ├── network-shares
│ │ │ ├── NAS.md
│ │ │ ├── _category_.json
│ │ │ └── additional-nas.md
│ │ ├── requirements.md
│ │ ├── updating.md
│ │ └── vpn-and-dl-clients
│ │ │ ├── _category_.json
│ │ │ ├── deluge.md
│ │ │ ├── qbittorrent.md
│ │ │ └── transmission.md
│ ├── intro.md
│ ├── paths.md
│ ├── release-notes
│ │ ├── _category_.json
│ │ ├── v0.1.md
│ │ ├── v0.2.md
│ │ ├── v0.3.md
│ │ ├── v1.4.md
│ │ ├── v1.5.md
│ │ ├── v1.6.md
│ │ ├── v1.7.md
│ │ ├── v1.8.md
│ │ └── v1.9.md
│ └── static
│ │ └── img
│ │ ├── cloudflare_tunnel_example.png
│ │ ├── cloudflare_tunnel_token.png
│ │ ├── container_connect_example.png
│ │ ├── hmsd.png
│ │ └── proxy_example.png
├── docusaurus.config.js
├── package-lock.json
├── package.json
├── sidebars.js
├── src
│ └── css
│ │ └── custom.css
└── static
│ ├── .nojekyll
│ ├── CNAME
│ └── img
│ ├── favicon.ico
│ └── hmsd.png
├── hms-docker.yml
├── inventory
└── hosts.yml
└── roles
├── _archive
└── docker
│ ├── defaults
│ └── main.yml
│ ├── files
│ └── docker-daemon.json
│ ├── handlers
│ └── main.yml
│ └── tasks
│ ├── debian.yml
│ ├── main.yml
│ └── redhat.yml
├── gpu
├── defaults
│ └── main.yml
├── handlers
│ └── main.yml
└── tasks
│ ├── debian.yml
│ ├── main.yml
│ └── redhat.yml
└── hmsdocker
├── defaults
└── main
│ ├── app_bootstrap.yml
│ ├── authentik.yml
│ ├── cloudflare.yml
│ ├── container_map.yml
│ ├── container_settings.yml
│ ├── gpu.yml
│ ├── hmsd_advanced.yml
│ ├── homepage_api_keys.yml
│ ├── main.yml
│ ├── nas.yml
│ ├── nas_additional.yml
│ ├── nas_cifs.yml
│ ├── nas_nfs.yml
│ ├── plex.yml
│ ├── scripts.yml
│ ├── service_misc.yml
│ ├── tailscale.yml
│ ├── traefik.yml
│ ├── transmission.yml
│ └── vpn.yml
├── files
├── homepage_docker.yaml
├── homepage_services.yaml
└── scripts
│ ├── monitoring
│ └── check_media_availability.py
│ ├── requirements.txt
│ └── traefik_cert_convert.py
├── handlers
└── main.yml
├── tasks
├── app_api_key_reader.yml
├── app_inits
│ ├── app_init.yml
│ ├── lidarr.yml
│ ├── prowlarr.yml
│ ├── radarr.yml
│ ├── readarr.yml
│ └── sonarr.yml
├── container_postreqs
│ ├── checkrr.yml
│ ├── plex.yml
│ ├── sabnzbd.yml
│ └── traefik.yml
├── container_prereqs
│ ├── authentik.yml
│ ├── homepage.yml
│ ├── qbittorrent.yml
│ ├── tautulli.yml
│ └── traefik.yml
├── generate_compose_files.yml
├── main.yml
├── nas
│ ├── nas_additional_cifs.yml
│ ├── nas_additional_local.yml
│ ├── nas_additional_nfs.yml
│ ├── nas_cifs.yml
│ ├── nas_local.yml
│ └── nas_nfs.yml
├── scripts.yml
├── versioning.yml
├── vpn_setup.yml
├── vpn_validation.yml
└── youtube_downloaders.yml
├── templates
├── authentik_outpost.j2
├── authentik_outpost_4k.j2
├── authentik_outpost_ext_host.j2
├── authentik_secret.j2
├── cifs_creds.j2
├── container_configs
│ ├── checkrr_config.yaml.j2
│ └── qbittorrent_config.conf.j2
├── containers
│ ├── authentik.yml.j2
│ ├── autobrr.yml.j2
│ ├── bazarr.yml.j2
│ ├── calibre.yml.j2
│ ├── checkrr.yml.j2
│ ├── cloudflare.yml.j2
│ ├── deluge.yml.j2
│ ├── emby.yml.j2
│ ├── flaresolverr.yml.j2
│ ├── heimdall.yml.j2
│ ├── homepage.yml.j2
│ ├── huntarr.yml.j2
│ ├── jellyfin.yml.j2
│ ├── jellyseerr.yml.j2
│ ├── kavita.yml.j2
│ ├── lidarr.yml.j2
│ ├── maintainerr.yml.j2
│ ├── netdata.yml.j2
│ ├── notifiarr.yml.j2
│ ├── nzbget.yml.j2
│ ├── overseerr.yml.j2
│ ├── pasta.yml.j2
│ ├── pinchflat.yml.j2
│ ├── plex.yml.j2
│ ├── portainer.yml.j2
│ ├── prowlarr.yml.j2
│ ├── qbittorrent.yml.j2
│ ├── radarr.yml.j2
│ ├── readarr.yml.j2
│ ├── recyclarr.yml.j2
│ ├── requestrr.yml.j2
│ ├── sabnzbd.yml.j2
│ ├── sonarr.yml.j2
│ ├── speedtest-tracker.yml.j2
│ ├── tailscale.yml.j2
│ ├── tautulli.yml.j2
│ ├── tdarr.yml.j2
│ ├── tinymediamanager.yml.j2
│ ├── traefik.yml.j2
│ ├── transmission.yml.j2
│ ├── tubearchivist.yml.j2
│ ├── unpackerr.yml.j2
│ ├── uptimekuma.yml.j2
│ ├── watchtower.yml.j2
│ └── wizarr.yml.j2
├── docker-compose.yml.j2
├── env.j2
├── hmsd_traefik_middlewares.yml.j2
├── recyclarr_conf.yml.j2
├── traefik.yml.j2
└── traefik_additional_routes.yml.j2
└── vars
└── main.yml
/.ansible-lint:
--------------------------------------------------------------------------------
1 | skip_list:
2 | - yaml[line-length]
3 | - yaml[colons]
4 | - role-name[path]
5 |
6 | exclude_paths:
7 | - ./galaxy-roles
8 | - ./gen-docs
9 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a bug report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ahembree
7 |
8 | ---
9 |
10 | Please only submit issues in English, thank you.
11 |
12 | ---
13 |
14 | Have you updated your local code to the latest available from the repo: yes/no
15 | Have you ran `make update` after updating your local code: yes/no
16 | What version of HMS-Docker are you currently using (run `cat /opt/hms-docker/.hmsd-version`):
17 |
18 | **Describe the bug**
19 | A clear and concise description of what the bug is.
20 |
21 | **Expected behavior**
22 | A clear and concise description of what you expected to happen.
23 |
24 | **List any applicable variables and their values**
25 | Please be sure to redact any sensitive information
26 |
27 | **Additional context**
28 | Add any other context about the problem here.
29 |
--------------------------------------------------------------------------------
/.github/workflows/deploy-docs.yml:
--------------------------------------------------------------------------------
1 | name: Deploy to GitHub Pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | paths:
8 | - 'gen-docs/**'
9 |
10 | jobs:
11 | build:
12 | name: Build Docusaurus
13 | runs-on: ubuntu-22.04
14 | steps:
15 | - uses: actions/checkout@v4
16 | with:
17 | fetch-depth: 0
18 |
19 | - name: Set up Node.js
20 | uses: actions/setup-node@v4
21 | with:
22 | node-version: 20
23 | cache: npm
24 | cache-dependency-path: gen-docs/package-lock.json
25 |
26 | - name: Install dependencies
27 | run: |
28 | cd gen-docs
29 | npm install --frozen-lockfile
30 |
31 | - name: Build website
32 | run: |
33 | cd gen-docs
34 | npm run build
35 |
36 | - name: Upload Build Artifact
37 | uses: actions/upload-pages-artifact@v3
38 | with:
39 | path: gen-docs/build
40 |
41 | deploy:
42 | name: Deploy to GitHub Pages
43 | needs: build
44 | concurrency: build-deploy-pages
45 |
46 | # Grant GITHUB_TOKEN the permissions required to make a Pages deployment
47 | permissions:
48 | pages: write
49 | id-token: write
50 |
51 | environment:
52 | name: github-pages
53 | url: ${{ steps.deployment.outputs.page_url }}
54 |
55 | runs-on: ubuntu-22.04
56 | steps:
57 | - name: Deploy to GitHub Pages
58 | id: deployment
59 | uses: actions/deploy-pages@v4
60 |
--------------------------------------------------------------------------------
/.github/workflows/run-playbook.yml:
--------------------------------------------------------------------------------
1 | name: Ubuntu Deployment Tests
2 | run-name: Test Ubuntu Deployments
3 | on:
4 | push:
5 | paths-ignore:
6 | - 'gen-docs/**'
7 | pull_request:
8 | schedule:
9 | - cron: '23 9 * * 0'
10 |
11 | permissions:
12 | contents: read
13 |
14 | jobs:
15 | Ubuntu_22:
16 | name: Test Ubuntu 22.04
17 | runs-on: ubuntu-22.04
18 | steps:
19 | - name: Check out repo code
20 | uses: actions/checkout@v4
21 |
22 | - name: Add hosts to /etc/hosts
23 | run: |
24 | for name in sonarr radarr sonarr-4k radarr-4k prowlarr transmission bazarr readarr plex overseerr requestrr traefik nzbget sabnzbd authentik tautulli tdarr homepage uptime-kuma qbittorrent deluge lidarr readarr; do echo "127.0.0.1 $name.home.local" | sudo tee -a /etc/hosts; done
25 |
26 | - name: Install ansible
27 | run: |
28 | sudo apt update
29 | sudo sudo apt install software-properties-common
30 | sudo add-apt-repository --yes --update ppa:ansible/ansible
31 | sudo apt install ansible
32 |
33 | - name: Ensure base playbook requirements
34 | # does not use the `make` command since it requires input and we cannot input in a Runner
35 | run: |
36 | mkdir -p ./inventory/group_vars/all
37 | cp ./roles/hmsdocker/defaults/main/*.yml ./inventory/group_vars/all
38 | chmod 0600 ./inventory/group_vars/all/*.yml
39 | make install-reqs
40 |
41 | - name: Run playbook in check mode
42 | run: >-
43 | sudo ansible-playbook
44 | -i inventory/hosts.yml
45 | hms-docker.yml
46 | --diff
47 | --check
48 | --extra-vars "@.github/extra-vars.yml"
49 |
50 | - name: Run playbook
51 | run: >-
52 | sudo ansible-playbook
53 | -i inventory/hosts.yml
54 | hms-docker.yml
55 | --diff
56 | --extra-vars "@.github/extra-vars.yml"
57 |
58 | - name: Check containers
59 | run: |
60 | sleep 60
61 | sudo pip3 install -r .github/workflows/scripts/requirements.txt
62 | sudo make verify-containers
63 |
64 | - name: Run playbook again to ensure API keys are retrieved
65 | run: >-
66 | sudo ansible-playbook
67 | -i inventory/hosts.yml
68 | hms-docker.yml
69 | --diff
70 | --extra-vars "@.github/extra-vars.yml"
71 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2024.7.4
2 | charset-normalizer==3.3.2
3 | docker==7.1.0
4 | idna==3.7
5 | python-dotenv==1.0.1
6 | requests==2.32.2
7 | urllib3==2.2.2
8 |
--------------------------------------------------------------------------------
/.github/workflows/test-deploy-docs.yml:
--------------------------------------------------------------------------------
1 | name: Test Docusaurus Deployment
2 |
3 | on:
4 | push:
5 | paths:
6 | - 'gen-docs/**'
7 | pull_request:
8 | branches:
9 | - master
10 | paths:
11 | - 'gen-docs/**'
12 |
13 | jobs:
14 | test-deploy:
15 | name: Test Install and Build
16 | runs-on: ubuntu-22.04
17 | steps:
18 | - uses: actions/checkout@v4
19 | with:
20 | fetch-depth: 0
21 | - uses: actions/setup-node@v4
22 | with:
23 | node-version: 20
24 | cache: npm
25 | cache-dependency-path: gen-docs/package-lock.json
26 |
27 | - name: Install dependencies
28 | run: |
29 | cd gen-docs
30 | npm install --frozen-lockfile
31 |
32 | - name: Test build website
33 | run: |
34 | cd gen-docs
35 | npm run build
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # cache files
2 | .dccache
3 |
4 | # env stuff
5 | .venv/
6 |
7 | # python stuff
8 | __pycache__/
9 |
10 | galaxy-roles/
11 | vars/custom*
12 | inventory/group_vars/all/*
13 | inventory/hosts-custom.yml
14 | .docusaurus/
15 | build/
16 | node_modules/
17 | # others to ignore
18 | *.log
19 | *.env
20 | .vscode/
21 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | SHELL := /bin/bash
2 |
3 | .DEFAULT_GOAL:=help
4 |
5 | DEFAULT_CONFS = vars/default/*.yml
6 | ADVANCED_CONFS = roles/hmsdocker/defaults/main/*.yml
7 |
8 | BASEDIR=$(shell pwd)
9 |
10 | CUSTOM_CONF_DIR = inventory/group_vars/all
11 |
12 | # Found and modified from: https://gist.github.com/Pierstoval/b2539c387c467c017bf2b0ace5a2e79b
13 | # To use the "confirm" target inside another target,
14 | # use the " if $(MAKE) -s confirm ; " syntax.
15 | # The CI environment variable can be set to a non-empty string,
16 | # it'll bypass this command that will "return true", as a "yes" answer.
17 | confirm:
18 | REPLY="" ; \
19 | read -p "⚠ This will overwrite all existing files in '$(CUSTOM_CONF_DIR)', are you sure? [y/n] > " -r ; \
20 | if [[ ! $$REPLY =~ ^[Yy]$$ ]]; then \
21 | printf $(_ERROR) "FAIL" "Stopping" ; \
22 | exit 1; \
23 | else \
24 | printf $(_TITLE) "OK" "Copying files..." ; \
25 | exit 0; \
26 | fi
27 | _WARN := "\033[33m[%s]\033[0m %s\n" # Yellow text for "printf"
28 | _TITLE := "\033[32m[%s]\033[0m %s\n" # Green text for "printf"
29 | _ERROR := "\033[31m[%s]\033[0m %s\n" # Red text for "printf"
30 |
31 | config:
32 | @if $(MAKE) -s confirm ; then \
33 | mkdir -p $(CUSTOM_CONF_DIR); \
34 | cp $(ADVANCED_CONFS) $(CUSTOM_CONF_DIR); \
35 | chmod 0600 $(CUSTOM_CONF_DIR)/*.yml; \
36 | fi
37 |
38 | check: install-reqs
39 | @ansible-playbook -i inventory/hosts.yml hms-docker.yml --diff --check
40 |
41 | apply: install-reqs
42 | @ansible-playbook -i inventory/hosts.yml hms-docker.yml --diff
43 |
44 | install-reqs:
45 | @ansible-galaxy install -r galaxy-requirements.yml -p ./galaxy-roles
46 |
47 | verify-containers:
48 | @sudo python3 .github/workflows/scripts/check_containers.py
49 |
50 | update:
51 | @echo Updating from Git repo... && git pull
52 | @echo Updating variable names
53 | @echo Updating Traefik variables
54 | @sed -i 's\traefik_ext_hosts_configs_path:\hmsdocker_traefik_static_config_location:\g' $(CUSTOM_CONF_DIR)/traefik.yml
55 | @sed -i 's\hms_docker_library_path\hmsdocker_library_path\g' $(CUSTOM_CONF_DIR)/hmsd_advanced.yml
56 | @sed -i 's\transmission_vpn_provider:\hmsdocker_vpn_provider:\g' $(CUSTOM_CONF_DIR)/transmission.yml
57 | @sed -i 's\transmission_vpn_user:\hmsdocker_vpn_user:\g' $(CUSTOM_CONF_DIR)/transmission.yml
58 | @sed -i 's\transmission_vpn_pass:\hmsdocker_vpn_pass:\g' $(CUSTOM_CONF_DIR)/transmission.yml
59 | @sed -i 's\transmission_ovpn_config_local_path:\transmission_ovpn_config_local_dir:\g' $(CUSTOM_CONF_DIR)/transmission.yml
60 | @grep -q '^hmsdocker_vpn_type:' $(CUSTOM_CONF_DIR)/vpn.yml || echo "hmsdocker_vpn_type: ''" >> $(CUSTOM_CONF_DIR)/vpn.yml
61 | @echo Update finished
62 |
63 | help:
64 | @echo make config :: copy default config files
65 | @echo make check :: check for any changes without doing anything \(diff\)
66 | @echo make apply :: apply any changes identified in the diff
67 | @echo make install-reqs :: installs ansible galaxy role requirements
68 | @echo make verify-containers :: checks containers exposed ports \(used in GitHub Actions\)
69 | @echo make update :: updates from the git repo and updates variable names \(if they were changed\)
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |

4 |
5 | [](https://discord.gg/MqXTXvWSkZ)
6 | 
7 | 
8 |
9 | 
10 | 
11 | 
12 |
13 |
14 |
15 | # HMS-Docker
16 |
17 | Ansible Playbook to setup an automated Home Media Server stack running on Docker across a variety of platforms with support for GPUs, SSL, SSO, DDNS, and more.
18 |
19 | ## Features
20 |
21 | - GPU acceleration for media transcoding
22 | - Intel and Nvidia GPU support
23 | - You must install the drivers for your Nvidia GPU yourself, it is not included in this playbook, but it will verify GPU acceleration is available
24 | - Automatic Docker installation
25 | - Automatic container updates
26 | - Automatic App Initialization (Sonarr, Radarr, Prowlarr, etc.)
27 | - Dynamic DNS updates with Cloudflare
28 | - Wildcard SSL certificate generation
29 | - Support for multiple network shares
30 | - Single Sign-On with Authentik
31 | - Support for separate 4K instances of Sonarr and Radarr
32 | - Automated dashboard configuration in [Homepage](https://gethomepage.dev/)
33 | - Custom scripts
34 | - Advanced monitoring script(s) for Uptime-Kuma to detect if media is actually accessible by the Plex container
35 | - Convert Traefik certificate file to a Plex-supported certificate file (PKCS12)
36 |
37 | ## Getting Started
38 |
39 | Please see the docs page at: https://docs.hmsdocker.dev
40 |
41 | ## Contributing
42 |
43 | Pull requests are always welcome!
44 |
45 | If you have suggestions for containers to add or any other improvements, please submit a [Discussion Post](https://github.com/ahembree/ansible-hms-docker/discussions)
46 |
--------------------------------------------------------------------------------
/galaxy-requirements.yml:
--------------------------------------------------------------------------------
1 | - src: geerlingguy.docker
2 |
--------------------------------------------------------------------------------
/gen-docs/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | /node_modules
3 |
4 | # Production
5 | /build
6 |
7 | # Generated files
8 | .docusaurus
9 | .cache-loader
10 |
11 | # Misc
12 | .DS_Store
13 | .env.local
14 | .env.development.local
15 | .env.test.local
16 | .env.production.local
17 |
18 | npm-debug.log*
19 | yarn-debug.log*
20 | yarn-error.log*
21 |
--------------------------------------------------------------------------------
/gen-docs/README.md:
--------------------------------------------------------------------------------
1 | # Website
2 |
3 | This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator.
4 |
5 | ### Installation
6 |
7 | ```
8 | $ npm install
9 | ```
10 |
11 | ### Local Development
12 |
13 | ```
14 | $ npm run start
15 | ```
16 |
17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
18 |
19 | ### Build
20 |
21 | ```
22 | $ npm run build
23 | ```
24 |
25 | This command generates static content into the `build` directory and can be served using any static contents hosting service.
26 |
--------------------------------------------------------------------------------
/gen-docs/docs/Examples/container-connections.md:
--------------------------------------------------------------------------------
1 | # Connecting the Containers
2 |
3 | ## HTTP Proxy for Arr Services
4 |
5 | Transmission, qBittorrent, and Deluge all have a HTTP proxy that will route requests through the VPN connection.
6 |
7 | You can configure other apps to connect to these containers proxy services by using `transmission:8888`, `qbittorrent:8118`, or `deluge:8118`, depending on which you are using.
8 |
9 | 
10 |
11 | ## Connecting the Containers to each other
12 |
13 | When connecting containers together such as Prowlarr to Sonarr and Radarr or Sonarr/Radarr to Overseerr etc, you can use the name of the container (e.g. `prowlarr` or `radarr`) and then defining the container port to connect to (e.g. `prowlarr:9696` or `radarr:7878`).
14 |
15 | For the names and port of each container to use, get the `Container Name` and `Container Port` values from the **[Container Map](../container-map)**.
16 |
17 | Here's an example within Prowlarr:
18 |
19 | 
20 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/Cloudflare/ddns.md:
--------------------------------------------------------------------------------
1 | # Cloudflare DDNS
2 |
3 | Below is how to configure DDNS (Dynamic DNS) for Cloudflare.
4 |
5 | ## Requirements
6 |
7 | - A Cloudflare account and Cloudflare configured as your domains DNS servers
8 | - API keys for your account with the correct permissions
9 | - Requires `Zone.DNS:Edit` permissions for the correct zone
10 |
11 | ## Enabling
12 |
13 | In `inventory/group_vars/all/cloudflare_ddns.yml`:
14 |
15 | - `cloudflare_ddns_enabled` : `yes` or `no` to enable/disable Cloudflare DDNS (default: `no`)
16 | - `cloudflare_api_token` : the API token of the Cloudflare account
17 | - `cloudflare_zone` : the domain name of the Cloudflare zone (e.g. `example.com`)
18 | - `cloudflare_ddns_subdomain` : the subdomain record (e.g. `overseerr` would be created as `overseerr.example.com`) (default: `overseerr`)
19 | - `cloudflare_ddns_proxied` : `'true'` or `'false'` to enable/disable proxying the traffic through Cloudflare (default: `'true'`)
20 | - NOTE: This value must be in quotes
21 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/Cloudflare/tunnel.md:
--------------------------------------------------------------------------------
1 | # Cloudflare Tunnel
2 |
3 | ## Requirements and Enabling
4 |
5 | 1. You will need to first generate a token by following the steps [here](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/remote/#1-create-a-tunnel)
6 |
7 | a. You can find the token here:
8 |
9 | 
10 |
11 | 2. Once you've generated the token, update the variables in `inventory/group_vars/all/cloudflare.yml`:
12 |
13 | - `cloudflare_tunnel_enabled` to `yes`
14 | - `cloudflare_tunnel_token` to your token
15 |
16 | 3. After the container has been started, you should now see an active Connector in your Cloudflare dashboard
17 |
18 | 4. Follow [the steps here](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/remote/#2-connect-an-application) to link containers to the tunnel, following the [container map](../../container-map.md) for the available container names and ports (use the container name as the "Service" name in the Cloudflare webgui, and append the port, e.g. `overseerr:5055`)
19 |
20 | Example:
21 |
22 | 
23 |
24 | ## Important Notes
25 |
26 | :::tip
27 |
28 | The "public hostname" subdomain you use does not need to match any Traefik proxy rule as **this traffic does NOT pass through Traefik**, it goes directly from the container -> Cloudflare Tunnel via the internal Docker network.
29 |
30 | :::
31 |
32 | :::warning
33 |
34 | This also means that **SSO using Authentik will not work for any container configured to go through the Tunnel** due to the authentication middleware being applied by Traefik. In order to use Authentik with a publicly accessible container, you will need to port forward.
35 |
36 | :::
37 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/Homepage.md:
--------------------------------------------------------------------------------
1 | # Homepage
2 |
3 | ## Using Homepage
4 |
5 | Homepage is able to integrate directly with Docker, allowing it to "auto-discover" the running containers/services.
6 |
7 | Homepage can also integrate with a very large number of the containers in this project, so setting up the connection is very easy.
8 |
9 | You just need to define the services API key in the `inventory/group_vars/all/homepage_api_keys.yml` file. These can be found in the applications setting page.
10 |
11 | It is also _highly_ recommended to ensure the permissions and ownership of this file is locked down. You can do this by running:
12 |
13 | ```bash
14 | chmod 0600 inventory/group_vars/all/homepage_api_keys.yml
15 | ```
16 |
17 | ```bash
18 | chown $(whoami):$(whoami) inventory/group_vars/all/homepage_api_keys.yml
19 | ```
20 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/Netdata.md:
--------------------------------------------------------------------------------
1 | # Netdata
2 |
3 | ## Claiming Instance
4 |
5 | To claim your Netdata node when prompted in the webpage, run this command:
6 |
7 | ```bash
8 | docker exec -it netdata cat /var/lib/netdata/netdata_random_session_id
9 | ```
10 |
11 | ## Accessing via Traefik
12 |
13 | Since Netdata uses the `host` network mode, it cannot be connected to Traefik via the Docker internal network which results in it not being "auto-discovered" by Traefik.
14 |
15 | Instead, you will have to treat it as an "external service" to Traefik. More documentation is available in the [External Services](./traefik/other-svcs.md) Traefik documentation for configuring external services, but the below should help.
16 |
17 | In `inventory/group_vars/all/traefik.yml`, set `traefik_ext_hosts_enabled` to `yes` and uncomment the lines in `traefik_ext_hosts_list` that are related to Netdata (like below):
18 |
19 | ```yml
20 | ...
21 | {
22 | friendly_name: netdata,
23 | subdomain_name: netdata,
24 | backend_url: "http://netdata.{{ hms_docker_domain }}:19999",
25 | enabled: yes,
26 | authentik: no,
27 | authentik_provider_type: proxy
28 | }
29 | ```
30 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/Networking.md:
--------------------------------------------------------------------------------
1 | # Networking
2 |
3 | :::tip
4 |
5 | Ports are _NOT_ exposed by default (with the exception of Traefik, `80/443`) on the host.
6 |
7 | :::
8 |
9 | The service ports (such as for Sonarr (`8989`) or Radarr (`7878`)) will be **exposed/open** on the host machine if:
10 |
11 | * `container_expose_ports` is set to `yes` in `inventory/group_vars/all/container_settings.yml`
12 |
13 | * Traefik is disabled entirely
14 |
15 | * Traefik is disabled on that specific container in `inventory/group_vars/all/container_map.yml`
16 |
17 | See the **[Container Map](../container-map.md)** for the `Host Port` value for each service as there may be overlapping default ports, meaning the default port may have changed for a service.
18 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/Tailscale.md:
--------------------------------------------------------------------------------
1 | # Tailscale
2 |
3 | ## Important Notes
4 |
5 | - You must generate an Ephemeral auth key in the Tailscale admin console, you can find [instructions here](https://tailscale.com/kb/1111/ephemeral-nodes/#step-1-generate-an-ephemeral-auth-key).
6 | - If you must manually confirm new nodes on your Tailnet, you must confirm it within a couple minutes unless you set the key to automatically approve new nodes. Otherwise it will create a new node on your Tailnet every couple minutes.
7 |
8 | - Tailscale auth keys are only able to be valid for up to 90 days.
9 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Additional Configuration",
3 | "position": 5,
4 | "collapsible": true,
5 | "collapsed": false,
6 | "link": {
7 | "type": "generated-index",
8 | "description": "Getting started with ansible-hms-docker"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/traefik/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Traefik",
3 | "link": {
4 | "type": "generated-index",
5 | "description": "How to configure Traefik"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/traefik/other-svcs.md:
--------------------------------------------------------------------------------
1 | # Other Containers and Services
2 |
3 | ## Adding other Containers to Traefik
4 |
5 | If a container exists outside of this Compose project but on the same host, you can add them to Traefik so they can also have TLS/SSL
6 |
7 | 1. Add the `hms-docker_proxy_net` (default) network to the container along with required labels:
8 |
9 | ```yml
10 | services:
11 | mycontainer:
12 | image: mycontainerimage:latest
13 | ...
14 | network:
15 | - hms-docker_proxy_net
16 | labels:
17 | - traefik.enable=true
18 | - traefik.http.services..loadbalancer.server.port=
19 | - traefik.http.routers..rule=Host(`.${HMSD_DOMAIN}`)
20 | - traefik.http.routers..middlewares=internal-ipallowlist@file
21 | ...
22 | networks:
23 | - hms-docker_proxy_net
24 | external: true
25 | ...
26 | ```
27 |
28 | :::note
29 |
30 | If you changed the `project_name` in the `hmsd_advanced.yml` config file, use that `project_name` instead of `hms-docker`
31 |
32 | :::
33 |
34 | 2. [Add DNS records](../../getting-started/dns-setup.md) (if necessary)
35 |
36 | 3. Restart the containers you just added labels to
37 |
38 | 4. Check to see if it is working correctly
39 |
40 | ## Adding External Services to Traefik
41 |
42 | You can add external services (such as services running on another host/server, like an external grafana server) to this projects Traefik config.
43 |
44 | In `inventory/group_vars/all/traefik.yml` you _must_ set `traefik_ext_hosts_enabled` to `yes`, and add the correct items to the `traefik_ext_hosts_list` array.
45 |
46 | :::warning
47 |
48 | All traffic between the host that runs Traefik and the target external service will be **unencrypted**:
49 |
50 | > Traefik will terminate the SSL connections (meaning that it will send decrypted data to the services).
51 |
52 | **_[Source](https://doc.traefik.io/traefik/routing/routers/#general)_**.
53 |
54 | You may be able to add additional Traefik configuration to configure certificates for HTTPS or bypass certificate checking, but that is outside this scope.
55 |
56 | :::
57 |
--------------------------------------------------------------------------------
/gen-docs/docs/config-docs/traefik/security.md:
--------------------------------------------------------------------------------
1 | # Security
2 |
3 | ## How the containers are protected
4 |
5 | There is an allowlist configured within Traefik that only allows private IPs (RFC1918) to access all of the containers via Traefik. However if you choose to route a container through [Cloudflare Tunnel](../Cloudflare/tunnel.md) (recommended so you don't have to port forward), then it is no longer being routed through Traefik.
6 |
7 | This is controlled on a per-container basis in the `inventory/group_vars/all/container_map.yml` file as the `expose_to_public` variable for each container. If you set this to `yes`, it will allow all IPs (`0.0.0.0/0`) to access them.
8 |
9 | ## SSO
10 |
11 | To configure SSO (Single Sign-On) for certain containers, see the [Authentik docs](../Authentik.md)
12 |
13 | ## TLS Versions
14 |
15 | `traefik_security_hardening`: This will disable TLS1.0 and TLS1.1 and use TLS1.2 as the new minimum.
16 |
17 |
--------------------------------------------------------------------------------
/gen-docs/docs/design-decisions.md:
--------------------------------------------------------------------------------
1 | # Design Decisions
2 |
3 | Below are some of the ideas behind why certain decisions were made within this project (and for me to remember why I did things this way)
4 |
5 | ## Variable Layout
6 |
7 | The variables being stored in `inventory/group_vars/all` was decided due to [Ansible variable precedence](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable).
8 |
9 | This layout allows someone to specify multiple hosts in the `inventory/hosts.yml` file (or other custom inventory file) each with their own specific variable settings with a "common" shared config between them. See the [Install Docs](getting-started/install.md#remote-host).
10 |
11 | Use case: development instance with different domain and/or SSL certificate but all other settings the same
12 |
13 | ## Containers
14 |
15 | ### Gluetun
16 |
17 | Gluetun was not implemented because adding `network_mode: "service:gluetun"` to other containers, such as qbittorrent, did not fully protect the traffic (see [this discussion post](https://github.com/ahembree/ansible-hms-docker/discussions/116#discussioncomment-12888175))
18 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/GPU.md:
--------------------------------------------------------------------------------
1 | # GPU
2 |
3 | Supported containers:
4 |
5 | - Plex
6 | - Emby
7 | - Jellyfin
8 | - Tdarr
9 |
10 | The variables listed below for each supported GPU type should exist in your `inventory/group_vars/all/gpu.yml` file
11 |
12 | ## Tdarr
13 |
14 | You can also control Tdarr GPU support individually in the `inventory/group_vars/all/service_misc.yml` file. By default, it will use the values defined in the above mentioned file.
15 |
16 | ```yaml
17 | tdarr_enable_nvidia_gpu: true
18 | tdarr_enable_intel_gpu: true
19 | ```
20 |
21 | ## Intel GPU
22 |
23 | If you have a supported Intel processor, you can enable Intel Quick Sync Video for use within containers.
24 |
25 | ```yaml
26 | # inventory/group_vars/all/gpu.yml
27 |
28 | enable_intel_gpu: true # or yes
29 | ```
30 |
31 | ## Nvidia GPU
32 |
33 | If you have a [supported Nvidia graphics card](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new), you can enable Nvidia GPU transcoding.
34 |
35 | :::note
36 |
37 | You must install the correct Nvidia driver for your system _before_ running this playbook with the Nvidia GPU support enabled as shown below.
38 |
39 | This playbook does _not_ install the required driver.
40 |
41 | :::
42 |
43 | ```yaml
44 | # inventory/group_vars/all/gpu.yml
45 |
46 | enable_nvidia_gpu: true # or yes
47 | ```
48 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Getting Started",
3 | "position": 3,
4 | "collapsible": true,
5 | "collapsed": false,
6 | "link": {
7 | "type": "generated-index",
8 | "description": "Getting started with ansible-hms-docker"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/dns-setup.md:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 5
3 | ---
4 | # Configuring DNS
5 |
6 | :::tip
7 |
8 | It is recommended to use an internal DNS server (such as a Pi-hole or AdGuard Home) to resolve requests that point to an internal private IP address
9 |
10 | :::
11 |
12 | The domain used is defined in the variable `hms_docker_domain` in `inventory/group_vars/all/main.yml`
13 |
14 | ## Accessing the Containers
15 |
16 | ### Internally
17 |
18 | If you do not already have a "wildcard" DNS record (`*.example.com`) setup for the domain you used on your LOCAL DNS server, create this `A` record to point to the private IP address of the server.
19 |
20 | You can also create individual `A` records for each container listed in the [Container Map](../container-map.md), or have 1 `A` record with multiple `CNAME` records pointed to the `A` record. This will allow you to change 1 DNS record if the IP were to ever change, instead of having to change many individual records.
21 |
22 | If the appropriate DNS records exist (you can test by running `nslookup `, or optionally `nslookup ` to query a specific DNS server), you can then access the containers from your network by going to `.` where `` is the `proxy_host_rule` value (from the container map config file) and `` is the domain you used for the variable `hms_docker_domain`.
23 |
24 | You can also change the name/subdomain of each application within the `hms_docker_container_map` in the containers `proxy_host_rule` value.
25 |
26 | ### Externally
27 |
28 | If you enabled Cloudflare DDNS, an `overseerr` public `A` record will be created automatically that points to your networks _public_ IP.
29 |
30 | - This default `A` record can be changed in the `cloudflare_ddns_subdomain` variable located in `inventory/group_vars/all/cloudflare.yml`.
31 |
32 | :::note
33 |
34 | The below only applies if you are NOT using a [Cloudflare Tunnel](../config-docs/Cloudflare/tunnel.md):
35 |
36 | Although this DNS record is created automatically, you will need to set the `expose_to_public` value to `yes` for the `overseerr` container in the [Container Map](../container-map.md) config file if you want Overseerr to be public
37 |
38 | :::
39 |
40 | Unless port `80` and `443` are port forwarded on the router to your host, accessing this public address from outside your main network will not work.
41 |
42 | To grant public access to other containers, you will need to:
43 |
44 | - Create a public DNS record for it that is either:
45 |
46 | a.) `A` record that points to the public IP
47 |
48 | b.) `CNAME` record that points to the `.` (eg. `overseerr.example.com`)
49 |
50 | - Set the `expose_to_public` value to `yes` for the specific container in the [Container Map](../container-map.md)
51 |
52 | - OR use a [Cloudflare Tunnel](../config-docs/Cloudflare/tunnel.md)
53 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/network-shares/NAS.md:
--------------------------------------------------------------------------------
1 | # NAS
2 |
3 | If you have more than 1 share you need to mount, see the **[Additional NAS docs](./additional-nas.md)** after doing this configuration.
4 |
5 | The main mount point is defined in `hms_docker_mount_path` in file `inventory/group_vars/all/main.yml`
6 |
7 | The main NAS share type is defined in `hms_docker_media_share_type` in file `inventory/group_vars/all/main.yml`
8 |
9 | ## NFS Shares
10 |
11 | ### NFS Requirements
12 |
13 | Required settings for the `hms_docker_media_share_type` of `nfs`:
14 |
15 | In `inventory/group_vars/all/nas_nfs.yml`
16 |
17 | - `nas_client_remote_nfs_path` : the path to the network share (e.g. `nas.example.com:/share`)
18 | - `nas_client_nfs_opts` : the options for the network share (Google can help you find the correct options)
19 |
20 | ## CIFS Shares
21 |
22 | :::warning
23 |
24 | The CIFS credentials will be stored in plaintext within the `hms_docker_data_path` folder, but will be owned by `root:root` with `0600` permissions, so only those with root or sudo access can read
25 |
26 | :::
27 |
28 | ### CIFS Requirements
29 |
30 | Required settings for the `hms_docker_media_share_type` of `cifs`:
31 |
32 | In `inventory/group_vars/all/nas_cifs.yml`
33 |
34 | - `nas_client_remote_cifs_path` : the path to the network share (e.g. `//nas.example.com/share`)
35 | - `nas_client_cifs_username` : the username of the network share
36 | - `nas_client_cifs_password` : the password of the network share
37 | - `nas_client_cifs_opts` : the options for the network share (Google can help you find the correct options)
38 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/network-shares/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Network Shares",
3 | "link": {
4 | "type": "generated-index",
5 | "description": "How to configure network shares for the containers"
6 | },
7 | "position": 3
8 | }
9 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/network-shares/additional-nas.md:
--------------------------------------------------------------------------------
1 | # Additional NAS Mounts
2 |
3 | If you have more than 1 network share you want to mount, set `nas_client_use_additional_paths` in `inventory/group_vars/all/nas_additional.yml` to `yes`
4 |
5 | ## List of Variables
6 |
7 | Use the below variables to create a list of mappings in the `nas_client_remote_additional_paths` variable in `inventory/group_vars/all/nas_additional.yml`.
8 |
9 | Confused? See the [Example below](#example), a version of it already exists in `inventory/group_vars/all/nas_additional.yml` so just modify that.
10 |
11 | ### Local Folder
12 |
13 | - `name`: Friendly name of the path
14 | - `local_mount_path`: Local path to the folder
15 | - `type`: Type of path, valid: `local`
16 |
17 | ### NFS Share Variables
18 |
19 | - `name`: Friendly name of the path
20 | - `remote_path`: Remote path to the folder
21 | - `local_mount_path`: Local path to where it will be mounted
22 | - `type`: Type of path, valid: `nfs`
23 | - `nfs_opts`: NFS options, default: `defaults`
24 |
25 | ### CIFS Share Variables
26 |
27 | - `name`: Friendly name of the path
28 | - `remote_path`: Remote path to the folder
29 | - `local_mount_path`: Local path to where it will be mounted
30 | - `type`: Type of path, valid: `cifs`
31 | - `cifs_username`: CIFS username, default: `""`
32 | - `cifs_password`: CIFS password, default: `""`
33 | - `cifs_opts`: CIFS options, default: `rw,soft`
34 |
35 | ## Example
36 |
37 | Below is an example configuration that defines both an NFS mount (the first) and a CIFS mount (the second) that also uses the default mounting location defined in the variable `hms_docker_mount_path` in file `inventory/group_vars/all/main.yml`
38 |
39 | ```yaml
40 | nas_client_remote_additional_paths:
41 | [
42 | {
43 | name: "Media 4K",
44 | remote_path: "192.168.1.5:/Media_4K",
45 | local_mount_path: "{{ hms_docker_mount_path }}/Media_4k",
46 | type: nfs,
47 | nfs_opts: "rw,defaults"
48 | },
49 | {
50 | name: "Media NAS 3",
51 | remote_path: "//nas.example.com/media_3",
52 | local_mount_path: "{{ hms_docker_mount_path }}_custom_path_3",
53 | type: cifs,
54 | cifs_username: "insecureusername",
55 | cifs_password: "veryinsecurepassword",
56 | cifs_opts: "rw,soft",
57 | },
58 | ]
59 | ```
60 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/requirements.md:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 1
3 | ---
4 |
5 | # Requirements
6 |
7 | ## Supported Platforms
8 |
9 | Currently only Ubuntu 22.04 LTS is actively supported and is used for GitHub Actions testing.
10 |
11 | Ubuntu 24.04 LTS may work, please submit a [GitHub Issue](https://github.com/ahembree/ansible-hms-docker/issues) if you encounter any.
12 |
13 | I've confirmed this repo also works on a Raspberry Pi 5 with 8GB RAM, but have not tested against other ARM-based systems (Apple Silicon, NAS systems, etc).
14 |
15 | RHEL based systems (CentOS 8, Fedora, Alma Linux, Rocky Linux) may work, but are no longer being tested against and are not officially supported.
16 |
17 | ## Hardware
18 |
19 | - Minimum 4 CPU Cores
20 | - Minimum 4GB RAM (2GB additional if using Authentik)
21 | - Minimum 8GB free disk space
22 |
23 | ## Software / Services
24 |
25 | - [Supported Platform](#supported-platforms)
26 | - `root` or `sudo` access
27 | - [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/installation_distros.html)
28 | - You own a domain name and are able to modify DNS `A` and `TXT` records (if you want SSL and/or dynamic DNS)
29 | - (Preferred) Have an internal DNS server that can resolve some or all of your domain name/zone.
30 | - You use a [supported VPN provider](https://haugene.github.io/docker-transmission-openvpn/supported-providers/#internal_providers) (if Transmission is enabled)
31 | - You use a [supported DNS provider](https://doc.traefik.io/traefik/https/acme/#providers) (if SSL is enabled)
32 | - You have a Cloudflare account with the correct DNS zones and API keys configured (if Tunnel or dynamic DNS and/or SSL is enabled)
33 | - Nvidia GPU drivers already installed (if using Nvidia GPU acceleration)
34 |
35 | ## Network
36 |
37 | If you plan to make Plex and/or Overseerr available outside your local network, the following ports must be forwarded in your router to the IP of the server that will be running these containers.
38 |
39 | Instructions for forwarding ports to the correct device is outside the scope of this project as every router/gateway has different instructions.
40 |
41 | This is in no way guaranteed to be the best or most secure way to do this, and this assumes your ISP does not block these ports.
42 |
43 | Ports required to port forward:
44 |
45 | - `32400/tcp` (Plex)
46 | - `80/tcp` (HTTP) (Not required if using Cloudflare Tunnel)
47 | - `443/tcp` (HTTPS) (Not required if using Cloudflare Tunnel)
48 |
49 | ## Technical Skills
50 |
51 | - Familiarity with editing config files (mainly YAML format)
52 | - Familiarity with Linux (installing packages, troubleshooting, etc)
53 | - Familiarity with Docker/containers (debugging, starting/stopping, getting a shell/CLI)
54 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/updating.md:
--------------------------------------------------------------------------------
1 | # Updating
2 |
3 | To easily update from this git repo _**and**_ update your custom variable names (due to deprecating/renaming variables) if you are on a previous release, run:
4 |
5 | ```bash
6 | make update
7 | ```
8 |
9 | Previous variable names will still work for at least a year after the change and will be noted as such within the default configs. Please update to resolve.
10 |
11 | Please see the [Release Notes](../category/release-notes) if you are updating from a previous version.
12 |
13 | ## New Containers
14 |
15 | When a new container is added, you will need to manually add the new container to your [Container Map](../container-map.md) file, it will look something like this:
16 |
17 | ```yaml
18 | ...
19 | newcontainername:
20 | enabled: yes
21 | proxy_host_rule: new-container-name
22 | directory: yes
23 | traefik: yes
24 | authentik: no
25 | authentik_provider_type: proxy
26 | expose_to_public: no
27 | homepage: yes
28 | homepage_stats: no
29 | ...
30 | ```
31 |
32 | :::note
33 |
34 | If the key (such as `newcontainername:`) does not match an available container, an error may be thrown.
35 |
36 | :::
37 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/vpn-and-dl-clients/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "VPN and Download Clients",
3 | "link": {
4 | "type": "generated-index",
5 | "description": "How to configure VPN and Download Clients"
6 | },
7 | "position": 4
8 | }
9 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/vpn-and-dl-clients/deluge.md:
--------------------------------------------------------------------------------
1 | # Deluge
2 |
3 | This setup uses the container maintained here: [binhex/arch-delugevpn](https://github.com/binhex/arch-delugevpn)
4 |
5 | Settings mentioned below should already exist in your `inventory/group_vars/all/vpn.yml` file
6 |
7 | The default password is `deluge`
8 |
9 | :::note
10 |
11 | If you update the Deluge password _and_ are using the automatic app bootstrap, you will need to update the password for the `HMSD - Deluge` download client in the Sonarr and Radarr app(s)
12 |
13 | :::
14 |
15 | :::note
16 |
17 | After updating the VPN config file, run `docker restart deluge`
18 |
19 | :::
20 |
21 | ## OpenVPN
22 |
23 | Requires the following variables:
24 |
25 | * `hmsdocker_vpn_type`: `openvpn`
26 | * `hmsdocker_vpn_user`: Your VPN account/service account username
27 | * `hmsdocker_vpn_pass`: Your VPN account/service account password
28 |
29 | If using an OpenVPN config file for your VPN connection, please update or place the `.ovpn` file and any other required files in the directory: `/opt/hms-docker/apps/deluge/config/openvpn` (default)
30 |
31 | This folder will not exist until the playbook is ran or the container runs
32 |
33 | ## WireGuard
34 |
35 | Requires the following variables:
36 |
37 | * `hmsdocker_vpn_type`: `wireguard`
38 |
39 | If using WireGuard for your VPN connection, please update or place the `wg0.conf` file and any other required files in the directory: `/opt/hms-docker/apps/deluge/config/wireguard` (default)
40 |
41 | This folder will not exist until the playbook is ran or the container runs
42 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/vpn-and-dl-clients/qbittorrent.md:
--------------------------------------------------------------------------------
1 | # qBittorrent
2 |
3 | This setup uses the container maintained here: [binhex/arch-qbittorrentvpn](https://github.com/binhex/arch-qbittorrentvpn)
4 |
5 | Settings mentioned below should already exist in your `inventory/group_vars/all/vpn.yml` file
6 |
7 | :::note
8 |
9 | After updating the VPN config file, run `docker restart qbittorrent`
10 |
11 | :::
12 |
13 | :::warning
14 |
15 | Authentication will be **disabled** by default for private (RFC1918) IP space:
16 |
17 | - `10.0.0.0/8`
18 | - `172.16.0.0/12`
19 | - `192.168.0.0/16`
20 |
21 | This is to allow the automatic app bootstrap to work and to reduce complexity of retrieving temporary admin password.
22 |
23 | If you wish to have authentication, you can configure this in the WebUI settings of qBittorrent. You will also need to update the download client for qBittorrent in the Sonarr and Radarr apps
24 |
25 | If you enabled authentication _before_ running the app bootstrap tasks, they will fail. To resolve, turn off authentication in qBittorrent, then run the bootstrap tasks and then configure the qBittorrent download client username/password in the specific apps
26 |
27 | :::
28 |
29 | ## OpenVPN
30 |
31 | Requires the following variables:
32 |
33 | * `hmsdocker_vpn_type`: `openvpn`
34 | * `hmsdocker_vpn_user`: Your VPN account/service account username
35 | * `hmsdocker_vpn_pass`: Your VPN account/service account password
36 |
37 | If using OpenVPN for your VPN connection, please update or place the `.ovpn` file and any other required files in the directory: `/opt/hms-docker/apps/qbittorrent/config/openvpn` (default)
38 |
39 | This folder will not exist until the playbook is ran or the container runs
40 |
41 | ## WireGuard
42 |
43 | Requires the following variables:
44 |
45 | * `hmsdocker_vpn_type`: `wireguard`
46 |
47 | If using WireGuard for your VPN connection, please update or place the `wg0.conf` file and any other required files in the directory: `/opt/hms-docker/apps/qbittorrent/config/wireguard` (default)
48 |
49 | This folder will not exist until the playbook is ran or the container runs
50 |
--------------------------------------------------------------------------------
/gen-docs/docs/getting-started/vpn-and-dl-clients/transmission.md:
--------------------------------------------------------------------------------
1 | # Transmission
2 |
3 | This setup uses the container maintained here: [haugene/docker-transmission-openvpn](https://github.com/haugene/docker-transmission-openvpn)
4 |
5 | Settings mentioned below should already exist in your `inventory/group_vars/all/vpn.yml` or `inventory/group_vars/all/transmission.yml` file
6 |
7 | ## OpenVPN
8 |
9 | Requires the following variables:
10 |
11 | * `hmsdocker_vpn_provider`: see [the official docs page](https://haugene.github.io/docker-transmission-openvpn/supported-providers/)
12 | * `hmsdocker_vpn_user`: Your VPN account/service account username
13 | * `hmsdocker_vpn_pass`: Your VPN account/service account password
14 |
15 | ### Using a different VPN location or type
16 |
17 | For supported providers, you can change the server location and/or type.
18 |
19 | 1. Make sure `hmsdocker_vpn_provider` is set to your correct provider
20 |
21 | a. You can find supported providers at [the official docs page](https://haugene.github.io/docker-transmission-openvpn/supported-providers/)
22 |
23 | 2. Find your VPN providers folder in [this github repo](https://github.com/haugene/vpn-configs-contrib/tree/main/openvpn)
24 |
25 | 3. Find the correct VPN config you want to use, and use this as the value for `transmission_ovpn_config_file`, and remove the `.ovpn` from the end
26 |
27 | For example, if you wanted to use the US Chicago server for mullvad:
28 |
29 | ```yml
30 | hmsdocker_vpn_provider: MULLVAD
31 | ...
32 | transmission_ovpn_config_file: us_chi
33 | ```
34 |
35 | ### Custom Provider
36 |
37 | #### Using a local OpenVPN config file
38 |
39 | 1. Change `hmsdocker_vpn_provider` to `custom`
40 |
41 | 2. Change `transmission_ovpn_config_file` to the `.ovpn` file name, and remove the `.ovpn` from the end
42 |
43 | 3. Change `transmission_ovpn_config_local_dir` to the folder path where the above file is stored
44 |
45 | a. If needed by your provider/server, make sure certificate files and any others are also in the same folder
46 |
47 | For example, if you had a custom file named `test-vpn.ovpn` located in `/opt/hms-docker/vpn_configs` (this folder does not exist by default, just an example):
48 |
49 | ```yml
50 | transmission_ovpn_config_file: test-vpn
51 | transmission_ovpn_config_local_dir: /opt/hms-docker/vpn_configs
52 | ```
53 |
--------------------------------------------------------------------------------
/gen-docs/docs/intro.md:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_label: Introduction
3 | sidebar_position: 1
4 | slug: /
5 | ---
6 |
7 | # HMS-Docker
8 |
9 | 
10 |
11 | ## Introduction
12 |
13 | ansible-hms-docker helps setup a home media server automation pipeline using many containers.
14 |
15 | The idea was to go from a fresh OS install to a fully running media server after only installing Ansible and configuring variables.
16 |
17 | Setting up the individual container configurations, such as for Sonarr, Radarr, Overseerr, Prowlarr, etc. are outside the scope of this project. The purpose of this project is to ensure the necessary base containers are running with the appropriate configs. There is a basic outline of how to connect the containers together in the [Container Connections](./Examples/container-connections.md) doc.
18 |
19 | ## Features
20 |
21 | - Automatic Docker installation
22 | - Automatic container/service updates
23 | - Wildcard SSL certificate generation
24 | - Dynamic DNS updates with Cloudflare
25 | - GPU acceleration for media transcoding
26 | - Intel and Nvidia GPU support
27 | - You must install the drivers for your Nvidia GPU yourself, it is not included in this playbook, but it will verify GPU acceleration is available
28 | - Support for multiple network shares
29 | - Single Sign-On with Authentik
30 | - Support for separate 4K instances of Sonarr and Radarr
31 | - Automated dashboard configuration in [Homepage](https://gethomepage.dev/)
32 | - Custom scripts
33 | - Advanced monitoring script(s) for Uptime-Kuma to detect if media is actually accessible by the Plex container
34 | - Convert Traefik certificate file to a Plex-supported certificate file (PKCS12)
35 |
36 | ## Contributing
37 |
38 | Pull requests are always welcome!
39 |
40 | If you have suggestions for containers to add or any other improvements, please submit a [Discussion Post](https://github.com/ahembree/ansible-hms-docker/discussions)
41 |
42 | ## Disclaimer
43 |
44 | By using or running this code, you acknowledge and agree that it is provided **"as-is"** without any warranties or guarantees. The author(s) of this project are not responsible for any damages, data loss, or any other consequences that may result from the use or misuse of this code.
45 |
46 | You use this code at your own risk. It is your responsibility to review and test the code in a safe environment before deploying it in production or on any system where data loss, corruption, or security risks could occur.
47 |
48 | If you choose to run or implement this code, you do so with full knowledge of the potential risks involved.
49 |
50 | Also the logos were generated by AI because I'm terrible at designing stuff.
51 |
--------------------------------------------------------------------------------
/gen-docs/docs/paths.md:
--------------------------------------------------------------------------------
1 | # File/Directory Layout
2 |
3 | By default, the content is in the following directory structure, if you wish to change the install location, you must change the `hms_docker_data_path` variable in `inventory/group_vars/all/hmsd_advanced.yml`
4 |
5 | Generated compose file location: `/opt/hms-docker/docker-compose.yml`
6 |
7 | Container data directory: `/opt/hms-docker/apps/`
8 |
9 | Default mount path for local share (known as the `mount_path` in this readme): `/opt/hms-docker/media_data/`
10 |
11 | Media folder that contains movie and tv show folders (known as the `media_path` in this readme): `/_library`
12 |
13 | Movie folder: `/Movies`
14 |
15 | TV Show folder: `/TV_Shows`
16 |
17 | Secrets file (where sensitive key material is stored, other than the ansible variable files in `inventory/group_vars/all`): `/opt/hms-docker/.env`
18 |
19 | - This files default ownership and permissions requires you to enter the sudo/root password every time you run a `docker compose` command within the project directory
20 |
21 | - If you wish to get around this (and reduce security), you can change the `secrets_env_user`, `secrets_env_group`, and `secrets_env_mode` within the `inventory/group_vars/all/hmsd_advanced.yml` file
22 |
23 | - These recommended values (if you wish to do this) will allow all users with `docker` access to read the file, and thus run `docker compose` commands without needing to run as sudo/root, but will not allow them to modify.
24 |
25 | - `secrets_env_user: root`
26 |
27 | - `secrets_env_group: docker`
28 |
29 | - `secrets_env_mode: 0640`
30 |
31 | ## File Paths
32 |
33 | As of release [Version 0.2](release-notes/v0.2.md), file paths were changed in order to support [hardlinks and instant/atomic-moves](https://trash-guides.info/Hardlinks/Hardlinks-and-Instant-Moves/).
34 |
35 | Application configs (Sonarr, Radarr, Plex, etc.) are stored in `/opt/hms-docker/apps//config` by default.
36 |
37 | Network drives will be mounted to a folder within the path specified in the `hms_docker_mount_path` variable in `inventory/group_vars/all/main.yml`. The parent directory of all the mounted folders (`hms_docker_mount_path`) is what is mounted to the required containers on `/data`.
38 |
39 | Hard links are supported as long as the downloads folder and media content folder are on the same filesystem. This means if you have another NAS network share connected that is on a different underlying file system (like a different physical NAS) that you want to put media content on, you must change the download client to download files to that NAS share folder within the container.
40 |
41 | ---
42 |
43 | If you were running the playbook before versioning was implemented (August 2024) and then update your local code with the new code in the repo, you will be prompted multiple times with warnings. It is highly recommended to read these warnings and understand any changes being made as you will likely have to update the paths inside the apps (Sonarr, Radarr, etc) to point to the new directory locations.
44 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Release Notes",
3 | "link": {
4 | "type": "generated-index",
5 | "description": "Release notes for ansible-hms-docker"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v0.1.md:
--------------------------------------------------------------------------------
1 | # Version 0.1
2 |
3 | Initial versioning implementation, no changes other than writing a new `.hmsd-version` file that will be read on every playbook run.
4 |
5 | If your version is behind the current version, you will be prompted to continue so you are aware of any changes that may be made. It is highly recommened to always read through any changes.
6 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v0.2.md:
--------------------------------------------------------------------------------
1 | # Verison 0.2
2 |
3 | Major changes to file paths inside and outside containers to standardize on one path for all apps in order to support hard links
4 |
5 | * No more multiple volumes per container for data, all data (including multiple NAS paths) is now under 1 `/data` folder in each container
6 |
7 | To update Plex config (source: https://support.plex.tv/articles/201154537-move-media-content-to-a-new-location/):
8 |
9 | * Go to Settings -> Library -> Disable "Empty trash automatically after every scan"
10 |
11 | * Apply playbook
12 |
13 | * Add new Movie and TV paths to the appropriate Plex library
14 |
15 | * Scan for library changes
16 |
17 | * Once scan is finished and media is available, remove the old path from the library config
18 |
19 | To update Arr app configs:
20 |
21 | * Add the new root folder
22 |
23 | * Mass edit the content and change the root path to the new one, select "No, I'll move the files myself" when prompted
24 |
25 | * Edit your lists to also change the root paths
26 |
27 | Remove NAS Transmission download path toggle
28 |
29 | Remove custom Transmission download path
30 |
31 | Remove usenet app optional volume mounts
32 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v0.3.md:
--------------------------------------------------------------------------------
1 | # Version 1.3
2 |
3 | No changes other than number, I didn't realize there were already "Tags" for Releases on GitHub so this just aligned them.
4 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v1.4.md:
--------------------------------------------------------------------------------
1 | # Version 1.4
2 |
3 | New containers:
4 |
5 | * Maintainerr
6 | * Unpackerr
7 | * Lidarr
8 | * Autobrr
9 | * Speedtest-Tracker
10 | * Recyclarr
11 | * tinyMediaManager
12 | * PASTA
13 | * Netdata
14 |
15 | I now realize while writing these notes that version 0.2 was merged into 1.3 without a new Tag release, whoops...
16 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v1.5.md:
--------------------------------------------------------------------------------
1 | # Version 1.5
2 |
3 | Modularize the main Docker Compose file to import other containers from files in order to drasitcally reduce line count and make it easier to modify individual containers
4 |
5 | New container:
6 |
7 | * Wizarr
8 |
9 | ## Version 1.5.1
10 |
11 | New container:
12 |
13 | * Jellyseerr
14 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v1.6.md:
--------------------------------------------------------------------------------
1 | # Version 1.6
2 |
3 | - Updated documentation pages, new website replaces readme
4 |
5 | - Removed different "basic" and "advanced" configuration deployments to reduce maintenance, the "advanced" configuration is now the default
6 |
7 | - Fix Overseerr and Jellyseerr Traefik allowlist configuration to obey container map settings
8 |
9 | - Add additional check for new install to bypass update prompt
10 |
11 | - Align Tailscale compose file to official docs
12 |
13 | - Add `/dev/net/tun` device to Transmission container ([#80](https://github.com/ahembree/ansible-hms-docker/issues/80))
14 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v1.7.md:
--------------------------------------------------------------------------------
1 | # Version 1.7
2 |
3 | ## Breaking changes
4 |
5 | Renamed the following variables as some are no longer used only for Transmission
6 |
7 | If running a previous version, run `make update` to pull the latest version of the code, it will then also replace these variables in the `inventory/group_vars/all/transmission.yml` file automatically.
8 |
9 | `transmission_vpn_provider` --> `hmsdocker_vpn_provider`
10 | `transmission_vpn_user` --> `hmsdocker_vpn_user`
11 | `transmission_vpn_pass` --> `hmsdocker_vpn_pass`
12 | `transmission_ovpn_config_local_path` --> `transmission_ovpn_config_local_dir`
13 |
14 | ## New Containers
15 |
16 | * qbittorrent
17 | * deluge
18 |
19 | Both of these containers come with a VPN built in like the Transmission container has had but these also support WireGuard
20 |
21 | If you have an existing install, there is a new variable required for these `hmsdocker_vpn_type`, Add this to a `vpn.yml` file, or any other variable file you want in `inventory/group_vars/all/`
22 |
23 | This variable accepts these values and selects which VPN type to use for the new containers:
24 |
25 | * `openvpn`
26 | * `wireguard`
27 |
28 | ## Other changes
29 |
30 | * change and rename default VPN creds file from `transmission.yml` to `vpn.yml`
31 | * bump docusaurus versions
32 | * add debug mode that will output more task info, but will also now expose "secrets" when running this
33 | * reduces playbook output size so its easier to see what containers it is actioning on
34 |
35 | ## Misc
36 |
37 | * remove a lot of containers from GitHub Actions due to reduced disk size
38 | * update ansible-lint ignores
39 | * update some docs
40 | * add proxy connection guide
41 | * add info on adding other containers on same host to traefik
42 | * add docs for new containers
43 | * update handlers to use `docker_compose_v2` module instead of shell command
44 | * rename `transmission.yml` task to `vpn_validation.yml`
45 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v1.8.md:
--------------------------------------------------------------------------------
1 | # Version 1.8
2 |
3 | ## New Containers
4 |
5 | * Huntarr
6 | * Also comes with Swaparr built in
7 | * Tubearchivist
8 | * Pinchflat
9 | * Checkrr
10 |
11 | ## Other changes
12 |
13 | * Created new prereq and postreq container-specific tasks to better manage individual container requirements
14 | * fix small typo in `hmsdocker_netdata_claim_token`
15 | * remove option to specify a "4K instance suffix" for Sonarr and Radarr
16 | * add docs around how to add new containers
17 |
--------------------------------------------------------------------------------
/gen-docs/docs/release-notes/v1.9.md:
--------------------------------------------------------------------------------
1 | # Version 1.9
2 |
3 | ## Changes
4 |
5 | - Add automatic app bootstrapping for the apps (see [the docs](../getting-started/app-bootstrap.md)):
6 | - Sonarr
7 | - Radarr
8 | - Prowlarr
9 | - Lidarr
10 | - Readarr
11 | - Move FlareSolverr toggle to container map
12 | - Will still use existing variable if previous install
13 |
14 | - Move NAS tasks to folder
15 | - Move Plex SSL tasks to Plex Postreqs tasks
16 | - Add Discord invite link
17 | - New container postreqs for Traefik
18 | - New container prereqs for qBittorrent
19 |
20 | ## New Variables
21 |
22 | To support the new app bootstrapping, there is a new `inventory/group_vars/all/app_bootstrap.yml` file with these variables:
23 |
24 | ```yml
25 | # If the playbook should attempt to connect supported apps together automatically.
26 | hmsdocker_app_bootstrap: false
27 |
28 | # If the created connections in the apps should be removed if the target resource is disabled
29 | # Example: Disabling qBittorrent after having it enabled will remove the "HMSD - qBittorrent" download client from Sonarr and Radarr
30 | hmsdocker_app_bootstrap_delete_unused: true
31 |
32 | hmsdocker_app_bootstrap_prowlarr_enabled: true
33 | hmsdocker_app_bootstrap_sonarr_enabled: true
34 | hmsdocker_app_bootstrap_radarr_enabled: true
35 | hmsdocker_app_bootstrap_lidarr_enabled: true
36 | hmsdocker_app_bootstrap_readarr_enabled: true
37 | ```
38 |
--------------------------------------------------------------------------------
/gen-docs/docs/static/img/cloudflare_tunnel_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/docs/static/img/cloudflare_tunnel_example.png
--------------------------------------------------------------------------------
/gen-docs/docs/static/img/cloudflare_tunnel_token.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/docs/static/img/cloudflare_tunnel_token.png
--------------------------------------------------------------------------------
/gen-docs/docs/static/img/container_connect_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/docs/static/img/container_connect_example.png
--------------------------------------------------------------------------------
/gen-docs/docs/static/img/hmsd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/docs/static/img/hmsd.png
--------------------------------------------------------------------------------
/gen-docs/docs/static/img/proxy_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/docs/static/img/proxy_example.png
--------------------------------------------------------------------------------
/gen-docs/docusaurus.config.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 | // `@type` JSDoc annotations allow editor autocompletion and type checking
3 | // (when paired with `@ts-check`).
4 | // There are various equivalent ways to declare your Docusaurus config.
5 | // See: https://docusaurus.io/docs/api/docusaurus-config
6 |
7 | import {themes as prismThemes} from 'prism-react-renderer';
8 |
9 | // This runs in Node.js - Don't use client-side code here (browser APIs, JSX...)
10 |
11 | /** @type {import('@docusaurus/types').Config} */
12 | const config = {
13 | title: 'HMS-Docker Docs',
14 | tagline: 'Automate Everything',
15 | favicon: 'img/favicon.ico',
16 |
17 | url: 'https://docs.hmsdocker.dev/',
18 | baseUrl: '/',
19 | trailingSlash: false,
20 |
21 | organizationName: 'ahembree',
22 | projectName: 'ansible-hms-docker',
23 | deploymentBranch: 'gh-pages',
24 |
25 | onBrokenLinks: 'throw',
26 | onBrokenMarkdownLinks: 'warn',
27 |
28 | i18n: {
29 | defaultLocale: 'en',
30 | locales: ['en'],
31 | },
32 |
33 | presets: [
34 | [
35 | 'classic',
36 | /** @type {import('@docusaurus/preset-classic').Options} */
37 | ({
38 | docs: {
39 | routeBasePath: '/',
40 | },
41 | blog: false,
42 | theme: {
43 | customCss: './src/css/custom.css',
44 | },
45 | }),
46 | ],
47 | ],
48 |
49 | themes: [
50 | [
51 | '@easyops-cn/docusaurus-search-local',
52 | /** @type {import("@easyops-cn/docusaurus-search-local").PluginOptions} */
53 | {
54 | hashed: true,
55 | indexBlog: false,
56 | docsDir: './docs',
57 | docsRouteBasePath: '/',
58 | explicitSearchResultPath: true,
59 | },
60 | ],
61 | ],
62 |
63 | themeConfig:
64 | /** @type {import('@docusaurus/preset-classic').ThemeConfig} */
65 | ({
66 | image: 'img/hmsd.jpg',
67 | colorMode: {
68 | defaultMode: 'dark',
69 | respectPrefersColorScheme: false,
70 | },
71 | navbar: {
72 | title: 'HMS-Docker Docs',
73 | logo: {
74 | alt: 'HMS-Docker Logo',
75 | src: 'img/hmsd.png',
76 | },
77 | items: [
78 | {
79 | href: 'https://github.com/ahembree/ansible-hms-docker',
80 | label: 'GitHub',
81 | position: 'right',
82 | },
83 | ],
84 | },
85 | footer: {
86 | style: 'dark',
87 | copyright: `Copyright © ${new Date().getFullYear()}. Built with Docusaurus.`,
88 | },
89 | prism: {
90 | theme: prismThemes.github,
91 | darkTheme: prismThemes.dracula,
92 | },
93 | }),
94 | };
95 |
96 | export default config;
97 |
--------------------------------------------------------------------------------
/gen-docs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "gen-docs",
3 | "version": "0.0.0",
4 | "private": true,
5 | "scripts": {
6 | "docusaurus": "docusaurus",
7 | "start": "docusaurus start",
8 | "build": "docusaurus build",
9 | "swizzle": "docusaurus swizzle",
10 | "deploy": "docusaurus deploy",
11 | "clear": "docusaurus clear",
12 | "serve": "docusaurus serve",
13 | "write-translations": "docusaurus write-translations",
14 | "write-heading-ids": "docusaurus write-heading-ids"
15 | },
16 | "dependencies": {
17 | "@docusaurus/core": "^3.7.0",
18 | "@docusaurus/preset-classic": "^3.7.0",
19 | "@easyops-cn/docusaurus-search-local": "^0.46.1",
20 | "@mdx-js/react": "^3.0.0",
21 | "clsx": "^2.0.0",
22 | "prism-react-renderer": "^2.4.1",
23 | "react": "^18.0.0",
24 | "react-dom": "^18.0.0"
25 | },
26 | "devDependencies": {
27 | "@docusaurus/module-type-aliases": "^3.6.3",
28 | "@docusaurus/types": "^3.6.3"
29 | },
30 | "browserslist": {
31 | "production": [
32 | ">0.5%",
33 | "not dead",
34 | "not op_mini all"
35 | ],
36 | "development": [
37 | "last 3 chrome version",
38 | "last 3 firefox version",
39 | "last 5 safari version"
40 | ]
41 | },
42 | "engines": {
43 | "node": ">=18.0"
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/gen-docs/sidebars.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | // This runs in Node.js - Don't use client-side code here (browser APIs, JSX...)
4 |
5 | /**
6 | *
7 | @type {import('@docusaurus/plugin-content-docs').SidebarsConfig}
8 | */
9 | const sidebars = {
10 | sidebar: [{type: 'autogenerated', dirName: '.'}],
11 | };
12 |
13 | export default sidebars;
14 |
--------------------------------------------------------------------------------
/gen-docs/src/css/custom.css:
--------------------------------------------------------------------------------
1 | /**
2 | * Any CSS included here will be global. The classic template
3 | * bundles Infima by default. Infima is a CSS framework designed to
4 | * work well for content-centric websites.
5 | */
6 |
7 | /* You can override the default Infima variables here. */
8 | :root {
9 | --ifm-color-primary: #2e8555;
10 | --ifm-color-primary-dark: #29784c;
11 | --ifm-color-primary-darker: #277148;
12 | --ifm-color-primary-darkest: #205d3b;
13 | --ifm-color-primary-light: #33925d;
14 | --ifm-color-primary-lighter: #359962;
15 | --ifm-color-primary-lightest: #3cad6e;
16 | --ifm-code-font-size: 95%;
17 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
18 | }
19 |
20 | /* For readability concerns, you should choose a lighter palette in dark mode. */
21 | [data-theme='dark'] {
22 | --ifm-color-primary: #25c2a0;
23 | --ifm-color-primary-dark: #21af90;
24 | --ifm-color-primary-darker: #1fa588;
25 | --ifm-color-primary-darkest: #1a8870;
26 | --ifm-color-primary-light: #29d5b0;
27 | --ifm-color-primary-lighter: #32d8b4;
28 | --ifm-color-primary-lightest: #4fddbf;
29 | --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
30 | }
31 |
--------------------------------------------------------------------------------
/gen-docs/static/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/static/.nojekyll
--------------------------------------------------------------------------------
/gen-docs/static/CNAME:
--------------------------------------------------------------------------------
1 | docs.hmsdocker.dev
2 |
--------------------------------------------------------------------------------
/gen-docs/static/img/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/static/img/favicon.ico
--------------------------------------------------------------------------------
/gen-docs/static/img/hmsd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ahembree/ansible-hms-docker/e1df1fd1e4270f10169617199e558b126e9e5293/gen-docs/static/img/hmsd.png
--------------------------------------------------------------------------------
/hms-docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: HMS-Docker
3 | hosts: all
4 | become: true
5 | gather_facts: true
6 | vars:
7 | hmsd_current_version: 1.9
8 | hmsd_version_file: "{{ hms_docker_data_path }}/.hmsd-version"
9 | regex: '[^A-Za-z0-9._-]'
10 | replace: '_'
11 | arr_apikey: '(?<=\)\w*(?=\<\/ApiKey\>)'
12 | debug_mode: false
13 |
14 | roles:
15 | - galaxy-roles/geerlingguy.docker
16 |
17 | tasks:
18 | - name: Ensure Nvidia GPU role if enabled
19 | ansible.builtin.import_role:
20 | name: gpu
21 | when: enable_nvidia_gpu | default(false)
22 |
23 | - name: Ensure HMS-Docker role
24 | ansible.builtin.import_role:
25 | name: hmsdocker
26 |
--------------------------------------------------------------------------------
/inventory/hosts.yml:
--------------------------------------------------------------------------------
1 | all:
2 | hosts:
3 | localhost:
4 | ansible_connection: local
5 |
6 |
--------------------------------------------------------------------------------
/roles/_archive/docker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_platform_agnostic_requirements_state: present # default: present // valid: present, absent
3 | docker_prereq_packages_state: present # default: present // valid: present, absent
4 | docker_package_state: present # default: present
5 | docker_service_state: started # default: started
6 | docker_service_enabled: yes # default: yes
7 | docker_compose_version: "v2.23.3" # default: 'v2.23.3'
8 | docker_repo_state: present # default: present
9 | docker_enable_live_restore: no # default: no
10 | docker_pip_package_state: present # default: present
11 | distribution_override: "{{ 'centos' if ansible_distribution | lower == 'almalinux' else ansible_distribution | lower }}"
12 | hmsdocker_family_override: "{{ 'redhat' if ansible_os_family | lower == 'almalinux' else ansible_os_family | lower }}"
13 |
--------------------------------------------------------------------------------
/roles/_archive/docker/files/docker-daemon.json:
--------------------------------------------------------------------------------
1 | {
2 | "live-restore": true
3 | }
4 |
--------------------------------------------------------------------------------
/roles/_archive/docker/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart docker
3 | service:
4 | name: docker
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/_archive/docker/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure previous Debian Docker packages are absent.
3 | package:
4 | name:
5 | - docker
6 | - docker-engine
7 | - docker.io
8 | - containerd
9 | - runc
10 | state: absent
11 |
12 | - name: Ensure Debian Docker requirements.
13 | package:
14 | name:
15 | - apt-transport-https
16 | - ca-certificates
17 | - curl
18 | - gnupg
19 | - lsb-release
20 | state: "{{ docker_prereq_packages_state }}"
21 |
22 | - name: Ensure Debian Docker GPG Key.
23 | apt_key:
24 | url: https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg
25 | state: "{{ docker_repo_state }}"
26 |
27 | - name: Ensure Debian Docker stable repository.
28 | apt_repository:
29 | repo: deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable
30 | state: "{{ docker_repo_state }}"
31 |
--------------------------------------------------------------------------------
/roles/_archive/docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure platform agnostic requirements.
3 | package:
4 | name:
5 | - python3-pip
6 | state: "{{ docker_platform_agnostic_requirements_state }}"
7 |
8 | - name: Run RHEL-based Docker install.
9 | import_tasks: redhat.yml
10 | when: hmsdocker_family_override | lower == "redhat"
11 |
12 | - name: Run Debian-based Docker install.
13 | import_tasks: debian.yml
14 | when: ansible_facts["os_family"] | lower == "debian"
15 |
16 | - name: Ensure Docker packages.
17 | package:
18 | name:
19 | - docker-ce
20 | - docker-ce-cli
21 | - containerd.io
22 | state: "{{ docker_package_state }}"
23 | when: not ansible_check_mode
24 |
25 | - name: Ensure Docker daemon config
26 | template:
27 | src: docker-daemon.json
28 | dest: /etc/docker/daemon.json
29 | notify: Restart docker
30 | when: docker_enable_live_restore
31 |
32 | - name: Ensure docker-compose.
33 | get_url:
34 | url: https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-{{ ansible_system }}-{{ ansible_machine }}
35 | dest: /usr/local/bin/docker-compose
36 | owner: root
37 | group: root
38 | mode: 0755
39 |
40 | - name: Ensure docker-compose symlink.
41 | file:
42 | src: /usr/local/bin/docker-compose
43 | dest: /usr/bin/docker-compose
44 | owner: root
45 | group: root
46 | state: link
47 | when: not ansible_check_mode
48 |
49 | - name: Ensure pip Docker packages.
50 | pip:
51 | name:
52 | - docker==6.1.3
53 | - docker-compose
54 | state: "{{ docker_pip_package_state }}"
55 |
56 | - name: Ensure Docker service.
57 | service:
58 | name: docker
59 | state: "{{ docker_service_state }}"
60 | enabled: "{{ docker_service_enabled }}"
61 | when: not ansible_check_mode
62 |
63 | - name: Ensure docker users are in the docker group.
64 | user:
65 | name: "{{ item }}"
66 | groups:
67 | - docker
68 | append: yes
69 | with_items: "{{ docker_users }}"
70 | when:
71 | - docker_users is defined
72 | - not ansible_check_mode
73 |
--------------------------------------------------------------------------------
/roles/_archive/docker/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure previous RHEL Docker packages are absent.
3 | package:
4 | name:
5 | - docker
6 | - docker-client
7 | - docker-client-latest
8 | - docker-common
9 | - docker-latest
10 | - docker-latest-logrotate
11 | - docker-logrotate
12 | - docker-engine
13 | state: absent
14 |
15 | - name: Ensure RHEL required packages.
16 | package:
17 | name:
18 | - epel-release
19 | state: "{{ docker_prereq_packages_state }}"
20 |
21 | - name: Ensure CentOS requirements.
22 | block:
23 | - name: Ensure yum-utils.
24 | package:
25 | name: yum-utils
26 | state: "{{ docker_prereq_packages_state }}"
27 | when:
28 | - distribution_override == "centos"
29 |
30 | - name: Ensure Fedora requirements.
31 | block:
32 | - name: Ensure previous Fedora Docker packages are absent.
33 | package:
34 | name:
35 | - docker-selinux
36 | - docker-engine-selinux
37 | state: absent
38 |
39 | - name: Ensure Fedora dnf-plugins-core.
40 | package:
41 | name: dnf-plugins-core
42 | state: "{{ docker_prereq_packages_state }}"
43 | when:
44 | - ansible_facts['distribution'] | lower == "fedora"
45 |
46 | - name: Ensure RHEL Docker repo.
47 | yum_repository:
48 | name: docker-ce-stable
49 | description: "Docker CE Stable - $basearch"
50 | baseurl: https://download.docker.com/linux/{{ distribution_override | lower }}/$releasever/$basearch/stable
51 | enabled: yes
52 | gpgcheck: yes
53 | gpgkey: https://download.docker.com/linux/{{ distribution_override | lower }}/gpg
54 | state: "{{ docker_repo_state }}"
55 |
56 |
--------------------------------------------------------------------------------
/roles/gpu/defaults/main.yml:
--------------------------------------------------------------------------------
1 | gpu_prereq_packages_state: present
2 | gpu_distribution_override: "{{ 'centos' if ansible_distribution | lower == 'almalinux' else ansible_distribution | lower }}"
3 | gpu_family_override: "{{ 'redhat' if ansible_os_family | lower == 'almalinux' else ansible_os_family | lower }}"
4 |
--------------------------------------------------------------------------------
/roles/gpu/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart docker
3 | ansible.builtin.service:
4 | name: docker
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/gpu/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure nvidia-container-toolkit apt GPG key
3 | ansible.builtin.apt_key:
4 | url: https://nvidia.github.io/libnvidia-container/gpgkey
5 | keyring: /etc/apt/trusted.gpg.d/libnvidia-container.gpg
6 | state: present
7 |
8 | - name: Ensure nvidia-container-toolkit repo
9 | ansible.builtin.apt_repository:
10 | repo: deb [signed-by=/etc/apt/trusted.gpg.d/libnvidia-container.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(ARCH) /
11 | state: present
12 | filename: nvidia-container-toolkit
13 |
--------------------------------------------------------------------------------
/roles/gpu/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Run RHEL-based install.
3 | ansible.builtin.import_tasks: redhat.yml
4 | when: gpu_family_override | lower == "redhat"
5 |
6 | - name: Run Debian-based install.
7 | ansible.builtin.import_tasks: debian.yml
8 | when: ansible_facts["os_family"] | lower == "debian"
9 |
10 | - name: Ensure nvidia-container-toolkit package
11 | ansible.builtin.package:
12 | name: nvidia-container-toolkit
13 | update_cache: true
14 | state: "{{ gpu_prereq_packages_state }}"
15 |
16 | - name: Verify nvidia-container-runtime-hook is in $PATH
17 | ansible.builtin.shell: "which nvidia-container-runtime-hook"
18 | register: nvidia_container_runtime_hook_path
19 | changed_when: false
20 |
21 | - name: Exit if nvidia-container-runtime-hook is not in $PATH
22 | ansible.builtin.fail:
23 | msg: "nvidia-container-runtime-hook not found in $PATH"
24 | when: nvidia_container_runtime_hook_path.rc != 0
25 |
26 | - name: Check if Docker daemon config exists
27 | ansible.builtin.stat:
28 | path: '/etc/docker/daemon.json'
29 | register: docker_daemon_config_path
30 |
31 | - name: Get existing Docker daemon config
32 | ansible.builtin.slurp:
33 | path: '/etc/docker/daemon.json'
34 | register: docker_daemon_config
35 | when: docker_daemon_config_path.stat.exists
36 |
37 | - name: Run nvidia-ctk command if existing docker daemon config doesn't exist or does not contain nvidia-container-runtime
38 | ansible.builtin.shell:
39 | cmd: sudo nvidia-ctk runtime configure --runtime=docker
40 | when: not docker_daemon_config_path.stat.exists or not docker_daemon_config['content'] | b64decode | regex_search('nvidia-container-runtime') | default(false)
41 | notify: Restart docker
42 |
43 | # Flush handlers to run them immediately
44 | - name: Flush handlers to force run Restart Docker handler
45 | ansible.builtin.meta: flush_handlers
46 |
47 | - name: Verify CUDA container works
48 | community.docker.docker_container:
49 | name: nvidia-gpu-validation
50 | image: ubuntu
51 | command: nvidia-smi
52 | runtime: nvidia
53 | state: started
54 | device_requests:
55 | - driver: nvidia
56 | count: -1
57 | capabilities:
58 | - gpu
59 |
60 | - name: Remove CUDA container
61 | community.docker.docker_container:
62 | name: nvidia-gpu-validation
63 | state: absent
64 |
--------------------------------------------------------------------------------
/roles/gpu/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure Packages
3 | ansible.builtin.package:
4 | name: "{{ item }}"
5 | state: present
6 | loop:
7 | - tar
8 | - bzip2
9 | - make
10 | - automake
11 | - gcc
12 | - gcc-c++
13 | - vim
14 | - pciutils
15 | - elfutils-libelf-devel
16 | - libglvnd-devel
17 | - kernel-devel
18 |
19 | - name: Ensure libnvidia Docker repo
20 | ansible.builtin.yum_repository:
21 | name: nvidia-container-toolkit
22 | description: nvidia-container-toolkit
23 | baseurl: https://nvidia.github.io/libnvidia-container/stable/rpm/$basearch
24 | enabled: 1
25 | gpgcheck: 0
26 | repo_gpgcheck: 1
27 | gpgkey: https://nvidia.github.io/libnvidia-container/gpgkey
28 | sslverify: 1
29 | sslcacert: /etc/pki/tls/certs/ca-bundle.crt
30 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/app_bootstrap.yml:
--------------------------------------------------------------------------------
1 | # If the playbook should attempt to connect supported apps together automatically.
2 | hmsdocker_app_bootstrap: false
3 |
4 | # If the created connections in the apps should be removed if the target resource is disabled
5 | # Example: Disabling qBittorrent after having it enabled will remove the "HMSD - qBittorrent" download client from Sonarr and Radarr
6 | hmsdocker_app_bootstrap_delete_unused: true
7 |
8 | hmsdocker_app_bootstrap_prowlarr_enabled: true
9 | hmsdocker_app_bootstrap_sonarr_enabled: true
10 | hmsdocker_app_bootstrap_radarr_enabled: true
11 | hmsdocker_app_bootstrap_lidarr_enabled: true
12 | hmsdocker_app_bootstrap_readarr_enabled: true
13 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/authentik.yml:
--------------------------------------------------------------------------------
1 | ### Authentik settings
2 | # This OR the option in the container map will enable or disable the Authentik container
3 | authentik_enabled: no
4 | authentik_geoip_account_id: ""
5 | authentik_geoip_license_key: ""
6 |
7 | # Advanced Authentik settings
8 | authentik_pg_user: authentik
9 | authentik_pg_db: authentik
10 | authentik_key_path: "{{ hms_docker_data_path }}/.authentik.key"
11 | authentik_pgpass_path: "{{ hms_docker_data_path }}/.authentik.pgpass"
12 | authentik_external_host: 'https://{{ hms_docker_container_map["authentik"]["proxy_host_rule"] }}.{{ hms_docker_domain }}' # This needs to match the host rule that routes traffic to the Authentik container
13 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/cloudflare.yml:
--------------------------------------------------------------------------------
1 | ### Cloudflare settings
2 |
3 | ## DDNS
4 | # Enables or disables the Cloudflare DDNS (Dynamic-DNS) container // default: "no"
5 | cloudflare_ddns_enabled: no
6 |
7 | # Your Cloudflare API token that has read/write permissions for your DNS zone
8 | # This will also be provided to Traefik if using SSL (assuming Cloudflare is your DNS provider)
9 | cloudflare_api_token: ""
10 |
11 | # The domain that will be used for the Cloudflare DDNS container // default: "{{ hms_docker_domain }}"
12 | # Automatically uses the same domain defined in `main.yml`
13 | cloudflare_ddns_domain: "{{ hms_docker_domain }}"
14 |
15 | # The A (or AAAA) record to be created // default: "overseerr"
16 | cloudflare_ddns_subdomain: overseerr
17 |
18 | # Whether to proxy the above record through Cloudflare // default: "true"
19 | cloudflare_ddns_proxied: "true"
20 |
21 | # Deletes the record when the container is stopped // default: "false"
22 | cloudflare_ddns_delete_record_on_stop: "false"
23 |
24 | # Creates an AAAA record for IPv6 // default: "no"
25 | cloudflare_ddns_create_ipv6_aaaa_record: no
26 |
27 |
28 | ## Cloudflare Tunnel
29 | # Enables or disables the Cloudflare Tunnel container // default: "no"
30 | cloudflare_tunnel_enabled: no
31 |
32 | # Your Cloudflare Tunnel token, see https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/remote
33 | # Be sure to only add the actual token, it will be a very long string of random characters
34 | cloudflare_tunnel_token: ""
35 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/container_settings.yml:
--------------------------------------------------------------------------------
1 | ### Container settings
2 | # The "state" of all containers // default: "present" // valid: "present" // "absent"
3 | hms_docker_compose_container_state: present
4 |
5 | # Controls how the containers are restarted upon server or docker restart // default: "unless-stopped" // valid: "always", "unless-stopped", "on-failure"
6 | container_restart_policy: unless-stopped
7 |
8 | # Enables the "watchtower" container to automatically download and apply container updates // default: "yes"
9 | container_enable_auto_updates: yes
10 |
11 | # The cron-style schedule for the watchtower container to check for updates in UTC time // default: "0 0 9 * * *"
12 | container_auto_update_schedule: "0 0 9 * * *"
13 |
14 | # Will stop and remove containers that you disable within the container map (defined below) // default: "yes"
15 | container_remove_orphans: yes
16 |
17 | # The timezone to use for the containers // default: "America/New_York"
18 | container_timezone: America/New_York
19 |
20 | # User and Group ID's to use for the running processes in the containers (may cause or be the cause of permissions issues)
21 | # default: "1234"
22 | container_uid: 1234
23 | container_gid: 1234
24 |
25 | # This will expose each containers individual ports on the host (check the README for more info on which ports map to which containers) // default: "no"
26 | # If you disable traefik within the container map, the playbook will automatically override this setting and expose the ports
27 | container_expose_ports: no
28 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/gpu.yml:
--------------------------------------------------------------------------------
1 | ### GPU settings
2 | # Requires supported hardware
3 |
4 | # Enables Nvidia GPU hardware accelleration within the Plex container // default: "no"
5 | # Requires the Nvidia drivers to already be installed and working
6 | enable_nvidia_gpu: no
7 |
8 | # Enables Intel GPU hardware accelleration within the Plex container // default: "no"
9 | enable_intel_gpu: no
10 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/hmsd_advanced.yml:
--------------------------------------------------------------------------------
1 | # The `project_name` controls the name of the base folders created (e.g. /opt// and /mnt//) // default: "hms-docker"
2 | project_name: hms-docker
3 |
4 | # Where container data and configs are stored // default: "/opt/{{ project_name }}"
5 | hms_docker_data_path: "/opt/{{ project_name }}"
6 |
7 | # Where the container data is stored // default: "{{ hms_docker_data_path }}/apps"
8 | hms_docker_apps_path: "{{ hms_docker_data_path }}/apps"
9 |
10 | # Ownership of the secrets (.env) file
11 | secrets_env_user: root
12 | secrets_env_group: root
13 | secrets_env_mode: "0600"
14 |
15 | # Most home networks are a "/24" network, so this is the default // default: "24"
16 | # If you don't know what this means, leave the next 2 lines alone
17 | # If you know your specific network mask, you can change that here
18 | # These are used to generate Traefik allow-list rules for the proxy so only internal networks are permitted to access certain containers
19 | # Also controls how some containers treat local network traffic (e.g. Plex, transmission)
20 | hms_docker_subnet_mask: "24"
21 | hms_docker_network_subnet: "{{ ansible_default_ipv4.network }}/{{ hms_docker_subnet_mask }}"
22 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/homepage_api_keys.yml:
--------------------------------------------------------------------------------
1 | # Other keys not listed here may be retrieved during a playbook run from the apps config file
2 |
3 | # NZB
4 | homepage_nzbget_key:
5 |
6 | # Authentik
7 | homepage_authentik_key:
8 |
9 | # Portainer
10 | homepage_portainer_key:
11 |
12 | # Jellyfin
13 | homepage_jellyfin_key:
14 |
15 | # Emby
16 | homepage_emby_key:
17 |
18 | # Autobrr
19 | homepage_autobrr_key:
20 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Any variables such as "{{ }}" will reference a variable that was defined earlier in this file (or one that exists in any file in the `roles/hmsdocker/defaults/main` directory)
3 |
4 | #######################################################################
5 | ### HMS-Docker settings
6 |
7 | # The domain used for the Traefik proxy Host rules, SSL certificate (if enabled), and DDNS (if enabled) // default: "home.local"
8 | hms_docker_domain: home.local
9 |
10 | # The type of "media share" (if using a NAS) to use for reading/writing media data, such as movies and tv // default: "local"
11 | # Options:
12 | # `local`: local folder path
13 | # `cifs`: CIFS or SMB share
14 | # `nfs`: NFS share
15 | hms_docker_media_share_type: local
16 |
17 | # The path where all media-related data (including external shares) will be mounted as subdirectories // default: "/opt/{{ project_name }}"
18 | # Recommended to change this if using `cifs` or `nfs` to a local mount point, such as "/mnt/{{ project_name }}"
19 | hms_docker_mount_path: "/opt/{{ project_name }}"
20 |
21 | # The name of the "primary" mounted folder
22 | hms_docker_primary_mount_name: "media_data"
23 |
24 | # The name of the folder that will have the library folders defined below // default: "_library"
25 | hms_docker_library_folder_name: "_library"
26 | # The name of the folder that will store the usenet and torrent downloads // default: "_downloads"
27 | hms_docker_downloads_folder_name: "_downloads"
28 |
29 | # The name of the library folders that will be created within the library folder that was defined above
30 | # Valid `type` values are:
31 | # `movies`
32 | # `tv_shows`
33 |
34 | # The `folder_name` is the name of the folder that will be created for that `type` of library
35 | hms_docker_library_folders:
36 | [
37 | {
38 | type: "movies",
39 | folder_name: "Movies"
40 | },
41 | {
42 | type: "tv_shows",
43 | folder_name: "TV_Shows"
44 | }
45 | ]
46 |
47 | ### End of HMS-Docker settings
48 | #######################################################################
49 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/nas.yml:
--------------------------------------------------------------------------------
1 | ### NAS settings
2 | ## Only used if the `hms_docker_media_share_type` is NOT set to `local`
3 |
4 | ## NAS client general settings
5 | # Controls the "install state" of the required package // default: "present" // valid: "present", "latest", "absent"
6 | nas_client_package_state: present
7 |
8 | # Controls the "mount state" of the remote share // default: "mounted" // valid: present, absent, mounted, unmounted, remounted
9 | nas_client_mount_state: mounted
10 |
11 | # Controls if the mount will be enabled on boot // default: "yes"
12 | nas_client_mount_on_boot: yes
13 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/nas_additional.yml:
--------------------------------------------------------------------------------
1 | # If you have more than one remote share path to use, you can enable this setting and define them below, please be sure to read the required values for each share type (CIFS, NFS, local)
2 | nas_client_use_additional_paths: no
3 |
4 | ### Follow the example template after these required values
5 | # Required values for `local` folder type:
6 | # `name`: Friendly name of the path
7 | # `local_mount_path`: Local path to the folder
8 | # `type`: Type of path, valid: local
9 |
10 | # Required values for `nfs` folder type:
11 | # `name`: Friendly name of the path
12 | # `remote_path`: Remote path to the folder
13 | # `local_mount_path`: Local path to where it will be mounted
14 | # `type`: Type of path, valid: nfs
15 | # `nfs_opts`: NFS options, default: defaults
16 |
17 | # Required values for `cifs` folder type:
18 | # `name`: Friendly name of the path
19 | # `remote_path`: Remote path to the folder
20 | # `local_mount_path`: Local path to where it will be mounted
21 | # `type`: Type of path, valid: cifs
22 | # `cifs_username`: CIFS username, default: ""
23 | # `cifs_password`: CIFS password, default: ""
24 | # `cifs_opts`: CIFS options, default: rw,soft
25 |
26 | # This should be scalable to as many different shares as you want
27 | # This is an example template
28 |
29 | # It is HIGHLY recommended to set the `local_mount_path` to a subdirectory of `{{ hms_docker_mount_path }}` in order to support hard links
30 | nas_client_remote_additional_paths:
31 | [
32 | {
33 | name: "Media NAS 2",
34 | remote_path: "nas.example.com:/volume1/media_2",
35 | local_mount_path: "{{ hms_docker_mount_path }}/nas_share_2",
36 | type: nfs,
37 | nfs_opts: defaults,
38 | },
39 | #{
40 | # name: "Media NAS 3",
41 | # remote_path: "//nas.example.com/media_3",
42 | # local_mount_path: "{{ hms_docker_mount_path }}/nas_share_3",
43 | # type: cifs,
44 | # cifs_username: "",
45 | # cifs_password: "",
46 | # cifs_opts: "rw,soft",
47 | #},
48 | ]
49 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/nas_cifs.yml:
--------------------------------------------------------------------------------
1 | ## NAS client CIFS settings, only if `hms_docker_media_share_type` is set to `cifs`
2 | # The CIFS/SMB remote share path to connect to // default: "//nas.example.com/share"
3 | nas_client_remote_cifs_path: "//nas.example.com/share"
4 |
5 | ## WARNING: these credentials will be stored in plaintext within the `hms_docker_data_path` folder, but will be owned by `root:root` with `0600` permissions, so only those with root or sudo access can read
6 | # The username to use when connecting to the remote share
7 | nas_client_cifs_username: ""
8 | # The password to use when connecting to the remote share
9 | nas_client_cifs_password: ""
10 |
11 | # The CIFS options to use for the mount, Google should be able to help troubleshoot // default: "rw,soft"
12 | nas_client_cifs_opts: rw,soft
13 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/nas_nfs.yml:
--------------------------------------------------------------------------------
1 | ## NAS client NFS settings, only if `hms_docker_media_share_type` is set to `nfs`
2 | # The NFS remote share path to connect to // default: "nas.example.com:/share"
3 | nas_client_remote_nfs_path: "nas.example.com:/share"
4 |
5 | # The NFS options to use for the mount, Google should be able to help troubleshoot // default: "defaults"
6 | nas_client_nfs_opts: defaults
7 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/plex.yml:
--------------------------------------------------------------------------------
1 | ### Plex Settings
2 |
3 | # Visit https://plex.tv/claim to obtain this token specific to your account
4 | plex_claim_token: ""
5 |
6 | # This will grab the default interface IPv4 address of the host // default: "{{ ansible_default_ipv4.address }}"
7 | plex_advertise_ip: "{{ ansible_default_ipv4.address }}"
8 |
9 | # Transcode folder location for Plex.
10 | # If you have enough RAM, you can change this to "/dev/shm/plex_transcode" to use RAM for transcoding
11 | plex_transcode_folder: "/tmp/{{ project_name }}/plex_transcode" # default: "/tmp/{{ project_name }}/plex_transcode"
12 |
13 | # Adds a script to convert a Traefik certificate file to a Plex-supported certificate file so you can define a custom certificate for Plex
14 | # This script will also automatically update the Plex config file to point to the converted certificate file (if enabled a little further below)
15 | # This script runs at 5am
16 | # NOTE: You must also have "traefik_ssl_enabled" set to "yes"
17 | # Enables the script and other resources // default: no // valid: yes, no
18 | hms_docker_plex_ssl_enabled: no
19 | # The subdomain that will be published to Plex and is within scope of the certificates being generated // default: plex
20 | # Example: "plex" if the subdomain record were "plex.example.com"
21 | hms_docker_plex_ssl_subdomain: plex
22 | # If the script should update the Plex server config file to point to the newly converted script // default: no // valid: yes, no
23 | hms_docker_plex_ssl_update_config: no
24 | # If the script should restart the Plex server after modifying the config // default: no // valid yes, no
25 | hms_docker_plex_ssl_restart_plex: no
26 | # The passphrase used to protect the certificate, leave as "null" if there should not be one // default: null
27 | hms_docker_plex_ssl_cert_pass: null
28 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/scripts.yml:
--------------------------------------------------------------------------------
1 | # Enable custom scripts
2 | custom_scripts_enabled: no
3 |
4 |
5 | ##### Monitoring scripts
6 | monitoring_scripts_enabled: no
7 |
8 | ### Media Availability script settings
9 | ## Checks to see if a specific media item is actually available on disk (one that you know you'll never delete)
10 | ## Useful for when there is an issue with a network share that causes media to show as unavailable, but the plex service is still online
11 | media_avail_script_enabled: no
12 |
13 | # The URL from Uptime Kuma for a "Push" notification
14 | # Remove the "?status=up&msg=OK&ping=" from the end, that is added by the script
15 | media_avail_kuma_push_url:
16 |
17 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/tailscale.yml:
--------------------------------------------------------------------------------
1 | ### Tailscale Settings
2 | tailscale_enabled: no # default: `no`
3 | tailscale_auth_key:
4 | tailscale_enable_subnet_routes: no # default: `no`
5 | tailscale_subnet_routes: '{{ hms_docker_network_subnet }}' # comma-separated list of subnets to expose (such as '192.168.1.0/24,192.168.2.0/24'), default: '{{ hms_docker_network_subnet }}'
6 | tailscale_advertise_exit_node: no # default: `no`
7 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/transmission.yml:
--------------------------------------------------------------------------------
1 | # Transmission-specific settings
2 |
3 | # Transmission seed ratio settings
4 | transmission_ratio_limit: "1" # default: "1"
5 | transmission_ratio_enabled: "true" # default: "true"
6 |
7 | # DNS servers to use for the transmission container
8 | transmission_dns_1: "8.8.8.8"
9 | transmission_dns_2: "8.8.4.4"
10 |
11 | # Additional environment variables for Transmission (can be found at link above)
12 | transmission_additional_env_vars:
13 | {
14 | "TRANSMISSION_DOWNLOAD_QUEUE_SIZE": "25",
15 | "TRANSMISSION_MAX_PEERS_GLOBAL": "3000",
16 | "TRANSMISSION_PEER_LIMIT_GLOBAL": "3000",
17 | "TRANSMISSION_PEER_LIMIT_PER_TORRENT": "300",
18 | }
19 |
20 |
21 | ## External Providers and Custom VPN Options
22 | # These are for changing the VPN config to a different server or type for example
23 | # For help with these variables, see the docs located in `docs/Transmission.md`
24 |
25 | # default: ""
26 | transmission_ovpn_config_file: ""
27 |
28 | # default: ""
29 | transmission_ovpn_config_local_dir: ""
30 |
31 | # The git repo where the .ovpn file is stored, see: https://github.com/haugene/vpn-configs-contrib/blob/main/CONTRIBUTING.md
32 | # If this is left blank, it will use the default that comes with the container # default: ""
33 | transmission_ovpn_config_source_repo: ""
34 |
--------------------------------------------------------------------------------
/roles/hmsdocker/defaults/main/vpn.yml:
--------------------------------------------------------------------------------
1 | # VPN-specific settings
2 |
3 | # For `qbittorrent` or `deluge` container, you can use either: `openvpn`, `wireguard`
4 | # The Transmission container only supports `openvpn` or `custom`
5 | # `custom` requires variables to be configured in `inventory/group_vars/all/transmission.yml`
6 | hmsdocker_vpn_type: openvpn
7 |
8 | # If using `transmission`, find your provider here: https://haugene.github.io/docker-transmission-openvpn/supported-providers/
9 | # If using `qbittorrent` or `deluge`, you must copy your VPN config file
10 | # to the container config directory (default: /opt/hms-docker/apps//config/)
11 | # NOTE: these directories will be automatically generated after first playbook run
12 | # Restart the container using `docker restart ` after modifying the files
13 | hmsdocker_vpn_provider:
14 |
15 | # Your VPN credentials if authentication is not embedded in the config file
16 | hmsdocker_vpn_user:
17 | hmsdocker_vpn_pass:
18 |
--------------------------------------------------------------------------------
/roles/hmsdocker/files/homepage_docker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # For configuration options and examples, please see:
3 | # https://gethomepage.dev/en/configs/docker/
4 |
5 | hms-docker:
6 | socket: /var/run/docker.sock
7 |
--------------------------------------------------------------------------------
/roles/hmsdocker/files/homepage_services.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # For configuration options and examples, please see:
3 | # https://gethomepage.dev/en/configs/services
4 |
--------------------------------------------------------------------------------
/roles/hmsdocker/files/scripts/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2024.7.4
2 | cffi==1.16.0
3 | charset-normalizer==3.3.2
4 | cryptography==44.0.1
5 | docker==7.1.0
6 | idna==3.7
7 | pycparser==2.22
8 | python-dotenv==1.0.1
9 | requests==2.32.2
10 | urllib3==2.2.2
11 | xmltodict==0.13.0
12 |
--------------------------------------------------------------------------------
/roles/hmsdocker/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart traefik
3 | community.docker.docker_compose_v2:
4 | project_src: "{{ hms_docker_data_path }}"
5 | project_name: "{{ project_name }}"
6 | state: restarted
7 | remove_orphans: "{{ container_remove_orphans }}"
8 | services:
9 | - traefik
10 | when:
11 | - not traefik_certs_dir.changed
12 | - not ansible_check_mode
13 |
14 | - name: restart sabnzbd
15 | community.docker.docker_compose_v2:
16 | project_src: "{{ hms_docker_data_path }}"
17 | project_name: "{{ project_name }}"
18 | state: restarted
19 | remove_orphans: "{{ container_remove_orphans }}"
20 | services:
21 | - sabnzbd
22 | when: not ansible_check_mode
23 |
24 | - name: Restart checkrr
25 | community.docker.docker_compose_v2:
26 | project_src: "{{ hms_docker_data_path }}"
27 | project_name: "{{ project_name }}"
28 | state: restarted
29 | remove_orphans: "{{ container_remove_orphans }}"
30 | services:
31 | - checkrr
32 | when: not ansible_check_mode
33 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/app_inits/app_init.yml:
--------------------------------------------------------------------------------
1 | - name: Import API key reader task to retrieve API keys (again)
2 | ansible.builtin.import_tasks: "app_api_key_reader.yml"
3 |
4 | - name: App init - Get list of app bootstrap task files in the role
5 | ansible.builtin.set_fact:
6 | container_init_task_files: "{{ lookup('fileglob', role_path + '/tasks/app_inits/*.yml', wantlist=True) }}"
7 |
8 | - name: App init - Get names of available app bootstrap files
9 | ansible.builtin.set_fact:
10 | container_init_task_names: >-
11 | {{ container_init_task_files
12 | | map('basename')
13 | | map('regex_replace', '\.yml$', '')
14 | | list }}
15 |
16 | - name: App init - Slim variables and set global app variables
17 | ansible.builtin.set_fact:
18 | app_inits: "{{ enabled_containers | intersect(container_init_task_names) }}"
19 | hmsd_app_init_config_prefix: "HMSD - "
20 |
21 | - name: App init - Run app inits
22 | ansible.builtin.include_tasks: "app_inits/{{ container_init_name }}.yml"
23 | loop: '{{ app_inits }}'
24 | loop_control:
25 | loop_var: container_init_name
26 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_postreqs/checkrr.yml:
--------------------------------------------------------------------------------
1 | - name: Checkrr Postreq - Ensure config file exists
2 | ansible.builtin.template:
3 | src: "container_configs/checkrr_config.yaml.j2"
4 | dest: "{{ hms_docker_apps_path }}/checkrr/config/checkrr.yaml"
5 | owner: "{{ container_uid }}"
6 | group: "{{ container_gid }}"
7 | mode: '600'
8 | lstrip_blocks: true
9 | trim_blocks: true
10 | vars:
11 | sonarr_key: "{{ slurped_api_key_sonarr['content'] | b64decode | regex_search(arr_apikey) | default('') }}"
12 | sonarr_4k_key: "{{ slurped_api_key_sonarr_4k['content'] | b64decode | regex_search(arr_apikey) | default('') }}"
13 | radarr_key: "{{ slurped_api_key_radarr['content'] | b64decode | regex_search(arr_apikey) | default('') }}"
14 | radarr_4k_key: "{{ slurped_api_key_radarr_4k['content'] | b64decode | regex_search(arr_apikey) | default('') }}"
15 | lidarr_key: "{{ slurped_api_key_lidarr['content'] | b64decode | regex_search(arr_apikey) | default('') }}"
16 | notify: Restart checkrr
17 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_postreqs/plex.yml:
--------------------------------------------------------------------------------
1 | - name: Plex Postreq - Ensure Plex SSL script and resources
2 | when: hms_docker_plex_ssl_enabled and traefik_ssl_enabled
3 | block:
4 | - name: Plex Postreq - Plex SSL - Ensure requirements file
5 | ansible.builtin.copy:
6 | src: scripts/requirements.txt
7 | dest: "{{ hms_docker_data_path }}/requirements.txt"
8 | mode: '644'
9 | register: scripts_req_file_dest
10 |
11 | - name: Plex Postreq - Plex SSL - Ensure venv pip package
12 | ansible.builtin.package:
13 | name:
14 | - python3-venv
15 | state: present
16 |
17 | - name: Plex Postreq - Plex SSL - Ensure pip is updated first
18 | ansible.builtin.pip:
19 | virtualenv: "{{ hms_docker_data_path }}/.venv"
20 | virtualenv_command: python3 -m venv
21 | name:
22 | - pip
23 | state: latest
24 |
25 | - name: Plex Postreq - Plex SSL - Ensure pip environment
26 | ansible.builtin.pip:
27 | requirements: "{{ scripts_req_file_dest.dest | default(hms_docker_data_path + '/requirements.txt') }}"
28 | virtualenv: "{{ hms_docker_data_path }}/.venv"
29 | virtualenv_command: python3 -m venv
30 |
31 | - name: Plex Postreq - Plex SSL - Ensure pkcs12 script
32 | ansible.builtin.copy:
33 | src: scripts/traefik_cert_convert.py
34 | dest: "{{ hms_docker_data_path }}"
35 | mode: '700'
36 | owner: "{{ container_uid }}"
37 | group: "{{ container_gid }}"
38 | register: cert_script
39 |
40 | - name: Plex Postreq - Plex SSL - Ensure cron job
41 | ansible.builtin.cron:
42 | name: "{{ project_name }}-plex-ssl"
43 | user: root
44 | job: "{{ hms_docker_data_path }}/.venv/bin/python {{ cert_script.dest | default(hms_docker_data_path + '/traefik_cert_convert.py') }}"
45 | minute: 0
46 | hour: 5
47 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_postreqs/sabnzbd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Sabnzbd Postreq - Check if config file exists
3 | ansible.builtin.stat:
4 | path: "{{ hms_docker_apps_path }}/sabnzbd/config/sabnzbd.ini"
5 | register: sabnzbd_config_path
6 |
7 | - name: Sabnzbd Postreq - restart sabnzbd inline if config not exist
8 | when: not sabnzbd_config_path.stat.exists
9 | community.docker.docker_compose_v2:
10 | project_src: "{{ hms_docker_data_path }}"
11 | project_name: "{{ project_name }}"
12 | state: restarted
13 | remove_orphans: "{{ container_remove_orphans }}"
14 | services:
15 | - sabnzbd
16 |
17 | - name: Wait 5 secs for Sabnzbd to init
18 | ansible.builtin.wait_for:
19 | timeout: 5
20 |
21 | - name: Sabnzbd Postreq - Check if config file exists after container restart
22 | ansible.builtin.stat:
23 | path: "{{ hms_docker_apps_path }}/sabnzbd/config/sabnzbd.ini"
24 | register: sabnzbd_config_path
25 |
26 | - name: Sabnzbd Postreq - Configure sabnzbd config file
27 | when: sabnzbd_config_path.stat.exists
28 | block:
29 | - name: Sabnzbd Postreq - Get current sabnzbd config file contents
30 | ansible.builtin.slurp:
31 | path: "{{ sabnzbd_config_path.stat.path }}"
32 | register: slurped_sabnzbd_data
33 | check_mode: false
34 | no_log: "{{ not debug_mode }}"
35 |
36 | - name: Sabnzbd Postreq - Pull current sabnzbd allowed hosts
37 | ansible.builtin.set_fact:
38 | slurped_sabnzbd_hostlist: "{{ slurped_sabnzbd_data['content'] | b64decode | regex_search('(?<=host_whitelist = ).*') | split(',') }}"
39 | cacheable: false
40 | when: slurped_sabnzbd_data['content'] is defined
41 | no_log: "{{ not debug_mode }}"
42 |
43 | - name: Sabnzbd Postreq - Ensure sabnzbd container config has allowlisted hostname
44 | ansible.builtin.replace:
45 | path: "{{ sabnzbd_config_path.stat.path }}"
46 | regexp: '(?<=host_whitelist = ).*'
47 | replace: "{{ slurped_sabnzbd_hostlist | join(',') }}{{ hms_docker_container_map['sabnzbd']['proxy_host_rule'] }}.{{ hms_docker_domain }}"
48 | when: (hms_docker_container_map['sabnzbd']['proxy_host_rule'] + "." + hms_docker_domain) not in slurped_sabnzbd_hostlist
49 | notify: restart sabnzbd
50 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_postreqs/traefik.yml:
--------------------------------------------------------------------------------
1 | - name: Traefik Postreq - Extend Traefik check list if 4K instances are enabled
2 | ansible.builtin.set_fact:
3 | traefik_enabled_subdomains: "{{ traefik_enabled_subdomains + [item + '-4k'] }}"
4 | when:
5 | - separate_4k_instances_enable
6 | - item in enabled_containers
7 | loop:
8 | - sonarr
9 | - radarr
10 |
11 | - name: Traefik Postreq - Verify all Traefik endpoints are accessible
12 | ansible.builtin.uri:
13 | url: http://{{ item }}.{{ hms_docker_domain }}
14 | status_code: 200,302,401,403
15 | validate_certs: false
16 | loop: "{{ traefik_enabled_subdomains }}"
17 | when:
18 | - hmsdocker_container_enabled_traefik
19 | - traefik_verify_endpoints | default(false)
20 | ignore_errors: true
21 | register: traefik_successful_endpoints
22 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_prereqs/authentik.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Authentik Prereq - Ensure authentik secret key file
3 | ansible.builtin.template:
4 | src: authentik_secret.j2
5 | dest: "{{ authentik_key_path }}"
6 | mode: 0600
7 | owner: root
8 | group: root
9 | backup: yes
10 | force: no
11 | register: authentik_key_template_output
12 | no_log: "{{ not debug_mode }}"
13 | vars:
14 | key: "{{ lookup('ansible.builtin.password', '/dev/null', chars=['ascii_letters', 'digits'], length=50) }}"
15 |
16 | - name: Authentik Prereq - Ensure authentik postgres password file
17 | ansible.builtin.template:
18 | src: authentik_secret.j2
19 | dest: "{{ authentik_pgpass_path }}"
20 | mode: 0600
21 | owner: root
22 | group: root
23 | backup: yes
24 | force: no
25 | register: authentik_pgpass_template_output
26 | no_log: "{{ not debug_mode }}"
27 | vars:
28 | key: "{{ lookup('ansible.builtin.password', '/dev/null', chars=['ascii_letters', 'digits'], length=50) }}"
29 |
30 | - name: Authentik Prereq - Slurp authentik secret key data
31 | ansible.builtin.slurp:
32 | src: "{{ authentik_key_template_output.dest }}"
33 | register: slurped_key_data
34 | check_mode: false
35 | when: authentik_key_template_output.dest is defined
36 | no_log: "{{ not debug_mode }}"
37 |
38 | - name: Authentik Prereq - Slurp authentik postgres password data
39 | ansible.builtin.slurp:
40 | src: "{{ authentik_pgpass_template_output.dest }}"
41 | register: slurped_pg_pass_data
42 | check_mode: false
43 | when: authentik_pgpass_template_output.dest is defined
44 | no_log: "{{ not debug_mode }}"
45 |
46 | - name: Authentik Prereq - Ensure Outposts directory
47 | ansible.builtin.file:
48 | path: "{{ hms_docker_apps_path }}/authentik/outposts"
49 | state: directory
50 | mode: 0755
51 | owner: root
52 | group: root
53 |
54 | - name: Authentik Prereq - Ensure authentik Outpost configs
55 | ansible.builtin.template:
56 | src: authentik_outpost.j2
57 | dest: "{{ hms_docker_apps_path }}/authentik/outposts/authentik-{{ item.name }}.outpost.yml"
58 | mode: 0644
59 | loop: "{{ authentik_proxy_enabled_containers }}"
60 |
61 | - name: Authentik Prereq - Ensure authentik Outpost configs for static traefik configs
62 | ansible.builtin.template:
63 | src: authentik_outpost_ext_host.j2
64 | dest: "{{ hms_docker_apps_path }}/authentik/outposts/authentik-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}.outpost.yml"
65 | mode: 0644
66 | with_items: "{{ traefik_ext_hosts_list }}"
67 | when:
68 | - item.enabled | default(false)
69 | - item.authentik | default(false)
70 | - item.authentik_provider_type | default('proxy') == 'proxy'
71 |
72 | - name: Authentik Prereq - Ensure authentik Outpost configs for 4K containers
73 | ansible.builtin.template:
74 | src: authentik_outpost_4k.j2
75 | dest: "{{ hms_docker_apps_path }}/authentik/outposts/authentik-{{ item.name }}-4k.outpost.yml"
76 | mode: 0644
77 | loop: "{{ authentik_proxy_enabled_containers }}"
78 | when:
79 | - item.name in ['sonarr', 'radarr']
80 | - separate_4k_instances_enable | default(false)
81 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_prereqs/homepage.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Homepage Prereq - Ensure Watchtower API key
3 | ansible.builtin.template:
4 | src: authentik_secret.j2
5 | dest: "{{ hms_docker_data_path }}/.watchtower.key"
6 | mode: 0600
7 | owner: "{{ container_uid }}"
8 | group: "{{ container_gid }}"
9 | force: no
10 | no_log: "{{ not debug_mode }}"
11 | register: watchtower_key_output
12 | vars:
13 | key: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters') }}"
14 |
15 | - name: Homepage Prereq - Slurp Watchtower key data
16 | ansible.builtin.slurp:
17 | src: "{{ watchtower_key_output.dest }}"
18 | register: slurped_watchtower_key_data
19 | check_mode: false
20 | when: watchtower_key_output.dest is defined
21 | no_log: "{{ not debug_mode }}"
22 |
23 | - name: Homepage Prereq - Ensure homepage docker config
24 | ansible.builtin.copy:
25 | src: homepage_docker.yaml
26 | dest: "{{ hms_docker_apps_path }}/homepage/config/docker.yaml"
27 | mode: 0644
28 | owner: "{{ container_uid }}"
29 | group: "{{ container_gid }}"
30 | force: no
31 |
32 | - name: Homepage Prereq - Ensure homepage services config
33 | ansible.builtin.copy:
34 | src: homepage_services.yaml
35 | dest: "{{ hms_docker_apps_path }}/homepage/config/services.yaml"
36 | mode: 0644
37 | owner: "{{ container_uid }}"
38 | group: "{{ container_gid }}"
39 | force: no
40 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_prereqs/qbittorrent.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure qBittorrent config exists before first start to disable authentication
2 | ansible.builtin.template:
3 | src: container_configs/qbittorrent_config.conf.j2
4 | dest: "{{ hms_docker_apps_path }}/qbittorrent/config/qBittorrent/config/qBittorrent.conf"
5 | owner: "{{ container_uid }}"
6 | group: "{{ container_gid }}"
7 | mode: '0644'
8 | force: false
9 | lstrip_blocks: true
10 | trim_blocks: true
11 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_prereqs/tautulli.yml:
--------------------------------------------------------------------------------
1 | - name: Tautulli Prereq - Ensure JBOPS if enabled
2 | when: tautulli_jbops_enabled | default(false)
3 | block:
4 | - name: Ensure global git config ignores dubious ownership permissions
5 | community.general.git_config:
6 | name: safe.directory
7 | scope: global
8 | value: "{{ tautulli_jbops_install_path }}"
9 |
10 | - name: Ensure JBOPS repo
11 | ansible.builtin.git:
12 | repo: https://github.com/blacktwin/JBOPS
13 | clone: true
14 | dest: "{{ tautulli_jbops_install_path }}"
15 | diff: false
16 | changed_when: false
17 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/container_prereqs/traefik.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Traefik Prereq - Ensure Traefik config.
3 | ansible.builtin.template:
4 | src: traefik.yml.j2
5 | dest: "{{ hms_docker_apps_path }}/traefik/config/traefik.yml"
6 | mode: 0644
7 | group: docker
8 | backup: yes
9 | lstrip_blocks: yes
10 | trim_blocks: yes
11 | notify: restart traefik
12 |
13 | - name: Traefik Prereq - Ensure Traefik certs directory
14 | ansible.builtin.file:
15 | path: "{{ hms_docker_apps_path }}/traefik/config/certs"
16 | state: directory
17 | mode: 0755
18 | register: traefik_certs_dir
19 |
20 | - name: Traefik Prereq - Ensure Traefik Static file dir
21 | ansible.builtin.file:
22 | path: "{{ hmsdocker_traefik_static_config_location }}"
23 | state: directory
24 | mode: 0755
25 | group: docker
26 | register: traefik_static_config_dir
27 |
28 | - name: Traefik Prereq - Ensure Traefik static config file
29 | ansible.builtin.template:
30 | src: hmsd_traefik_middlewares.yml.j2
31 | dest: "{{ traefik_static_config_dir.path }}/hmsd_middlewares.yml"
32 | mode: 0644
33 | group: docker
34 | backup: yes
35 | lstrip_blocks: yes
36 | trim_blocks: yes
37 |
38 | - name: Traefik Prereq - Ensure Traefik static external host configs
39 | ansible.builtin.template:
40 | src: traefik_additional_routes.yml.j2
41 | dest: "{{ traefik_static_config_dir.path }}/{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}.yml"
42 | mode: 0644
43 | group: docker
44 | backup: yes
45 | with_items: "{{ traefik_ext_hosts_list }}"
46 | when:
47 | - traefik_ext_hosts_enabled
48 | - item.friendly_name is defined and item.friendly_name != ''
49 | - item.subdomain_name is defined and item.subdomain_name != ''
50 | - item.backend_url is defined and item.backend_url != ''
51 | - item.enabled is defined and item.enabled
52 |
53 | - name: Traefik Prereq - Ensure disabled Traefik static external host configs are removed
54 | ansible.builtin.file:
55 | path: "{{ traefik_static_config_dir.path }}/{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}.yml"
56 | state: absent
57 | with_items: "{{ traefik_ext_hosts_list }}"
58 | when:
59 | - not item.enabled or not traefik_ext_hosts_enabled
60 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/generate_compose_files.yml:
--------------------------------------------------------------------------------
1 | - name: Generate Docker Compose files for Watchtower
2 | when: container_enable_auto_updates
3 | ansible.builtin.template:
4 | src: containers/watchtower.yml.j2
5 | dest: "{{ compose_dir.dest | default(hms_docker_data_path + '/compose_files') }}/watchtower.yml"
6 | owner: "{{ container_uid }}"
7 | group: "{{ container_gid }}"
8 | mode: 0644
9 | trim_blocks: yes
10 | lstrip_blocks: yes
11 | register: watchtower_compose_file_path
12 |
13 | - name: Generate Docker Compose files for Cloudflare
14 | when: cloudflare_ddns_enabled | default(false) or cloudflare_tunnel_enabled| default(false)
15 | ansible.builtin.template:
16 | src: containers/cloudflare.yml.j2
17 | dest: "{{ compose_dir.dest | default(hms_docker_data_path + '/compose_files') }}/cloudflare.yml"
18 | owner: "{{ container_uid }}"
19 | group: "{{ container_gid }}"
20 | mode: 0644
21 | trim_blocks: yes
22 | lstrip_blocks: yes
23 | register: cloudflare_compose_file_path
24 |
25 | - name: Generate Docker Compose files for Authentik
26 | when: hmsdocker_authentik_enabled_globally
27 | ansible.builtin.template:
28 | src: containers/authentik.yml.j2
29 | dest: "{{ compose_dir.dest | default(hms_docker_data_path + '/compose_files') }}/authentik.yml"
30 | owner: "{{ container_uid }}"
31 | group: "{{ container_gid }}"
32 | mode: 0644
33 | trim_blocks: yes
34 | lstrip_blocks: yes
35 | register: authentik_compose_file_path
36 |
37 | - name: Generate Docker Compose files for Tailscale
38 | when: hmsdocker_container_enabled_tailscale
39 | ansible.builtin.template:
40 | src: containers/tailscale.yml.j2
41 | dest: "{{ compose_dir.dest | default(hms_docker_data_path + '/compose_files') }}/tailscale.yml"
42 | owner: "{{ container_uid }}"
43 | group: "{{ container_gid }}"
44 | mode: 0644
45 | trim_blocks: yes
46 | lstrip_blocks: yes
47 | register: tailscale_compose_file_path
48 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/nas/nas_additional_cifs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: NAS - Additional CIFS - Ensure CIFS packages.
3 | ansible.builtin.package:
4 | name:
5 | - cifs-utils
6 | - samba-client
7 | - samba-common
8 | state: "{{ nas_client_package_state }}"
9 |
10 | - name: NAS - Additional CIFS - Ensure additional CIFS paths exist if defined
11 | ansible.builtin.file:
12 | path: "{{ mount_path.local_mount_path }}"
13 | state: directory
14 | with_items:
15 | - "{{ nas_client_remote_additional_paths }}"
16 | loop_control:
17 | loop_var: mount_path
18 | when:
19 | - nas_client_remote_additional_paths is defined
20 | - item.type == "cifs"
21 | register: cifs_additional_paths
22 |
23 | - name: NAS - Additional CIFS - Ensure additional CIFS credential files if defined
24 | ansible.builtin.template:
25 | dest: "{{ hms_docker_data_path }}/.{{ project_name }}.{{ cifs_creds.name | map('regex_replace', regex, replace) | list | join }}.cifs_creds"
26 | src: cifs_creds.j2
27 | owner: root
28 | group: root
29 | mode: 0600
30 | with_items:
31 | - "{{ nas_client_remote_additional_paths }}"
32 | loop_control:
33 | loop_var: cifs_creds
34 | when:
35 | - nas_client_remote_additional_paths is defined
36 | - cifs_creds.type == "cifs"
37 | vars:
38 | username: "{{ cifs_creds.cifs_username }}"
39 | password: "{{ cifs_creds.cifs_password }}"
40 | register: cifs_additional_creds
41 |
42 | - name: NAS - Additional CIFS - Ensure additional CIFS fstab entries if defined
43 | ansible.builtin.mount:
44 | backup: yes
45 | boot: "{{ nas_client_mount_on_boot }}"
46 | path: "{{ cifs_mount[0].path }}"
47 | src: "{{ cifs_mount[0].mount_path.remote_path }}"
48 | fstype: cifs
49 | opts: credentials={{ cifs_mount[1].dest }},{{ cifs_mount[0].mount_path.cifs_opts }}
50 | state: "{{ nas_client_mount_state }}"
51 | with_nested:
52 | - "{{ cifs_additional_paths.results }}"
53 | - "{{ cifs_additional_creds.results }}"
54 | loop_control:
55 | loop_var: cifs_mount
56 | when:
57 | - cifs_additional_paths is defined
58 | - cifs_mount[1].dest is defined
59 | - cifs_mount[0].mount_path.type == "cifs"
60 | - not ansible_check_mode
61 |
62 | - name: NAS - Additional CIFS - Ensure library folders for additional paths
63 | ansible.builtin.file:
64 | path: "{{ cifs_library_folder[0].path }}/{{ hms_docker_library_folder_name }}/{{ cifs_library_folder[1].folder_name }}"
65 | state: directory
66 | with_nested:
67 | - "{{ cifs_additional_paths.results }}"
68 | - "{{ hms_docker_library_folders }}"
69 | loop_control:
70 | loop_var: cifs_library_folder
71 | when:
72 | - cifs_additional_paths is defined
73 | - cifs_library_folder[0].mount_path.type == "cifs"
74 | - not ansible_check_mode
75 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/nas/nas_additional_local.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: NAS - Additional Local - Ensure additional local paths exist if defined
3 | ansible.builtin.file:
4 | path: "{{ mount_path.local_mount_path }}"
5 | state: directory
6 | with_items:
7 | - "{{ nas_client_remote_additional_paths }}"
8 | loop_control:
9 | loop_var: mount_path
10 | when:
11 | - nas_client_remote_additional_paths is defined
12 | - mount_path.type == "local"
13 | register: local_additional_paths
14 |
15 | - name: NAS - Additional Local - Ensure library folders for additional paths
16 | ansible.builtin.file:
17 | path: "{{ library_folders[0].path }}/{{ hms_docker_library_folder_name }}/{{ library_folders[1].folder_name }}"
18 | state: directory
19 | with_nested:
20 | - "{{ local_additional_paths.results }}"
21 | - "{{ hms_docker_library_folders }}"
22 | loop_control:
23 | loop_var: library_folders
24 | when:
25 | - local_additional_paths is defined
26 | - library_folders[0].mount_path.type == "local"
27 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/nas/nas_additional_nfs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: NAS - Additional NFS - Ensure RedHat NFS packages.
3 | ansible.builtin.package:
4 | name:
5 | - nfs-utils
6 | state: "{{ nas_client_package_state }}"
7 | when: hmsdocker_family_override | lower == "redhat"
8 |
9 | - name: NAS - Additional NFS - Ensure Debian NFS packages.
10 | ansible.builtin.package:
11 | name:
12 | - nfs-common
13 | state: "{{ nas_client_package_state }}"
14 | when: ansible_facts['os_family'] | lower == "debian"
15 |
16 | - name: NAS - Additional NFS - Ensure additional NFS paths exist if defined
17 | ansible.builtin.file:
18 | path: "{{ mount_path.local_mount_path }}"
19 | state: directory
20 | with_items:
21 | - "{{ nas_client_remote_additional_paths }}"
22 | loop_control:
23 | loop_var: mount_path
24 | when:
25 | - nas_client_remote_additional_paths is defined
26 | - mount_path.type == "nfs"
27 | register: nfs_additional_paths
28 |
29 | - name: NAS - Additional NFS - Ensure additional NFS fstab entries if defined
30 | mount:
31 | backup: yes
32 | boot: "{{ nas_client_mount_on_boot }}"
33 | path: "{{ nfs_mount.path }}"
34 | src: "{{ nfs_mount.mount_path.remote_path }}"
35 | fstype: nfs
36 | opts: "{{ nfs_mount.mount_path.nfs_opts }}"
37 | state: "{{ nas_client_mount_state }}"
38 | with_items:
39 | - "{{ nfs_additional_paths.results }}"
40 | loop_control:
41 | loop_var: nfs_mount
42 | when:
43 | - nfs_additional_paths is defined
44 | - nfs_mount.mount_path.type == "nfs"
45 | - not ansible_check_mode
46 |
47 | - name: NAS - Additional NFS - Ensure library folders for additional paths
48 | ansible.builtin.file:
49 | path: "{{ library_folders[0].path }}/{{ hms_docker_library_folder_name }}/{{ library_folders[1].folder_name }}"
50 | state: directory
51 | with_nested:
52 | - "{{ nfs_additional_paths.results }}"
53 | - "{{ hms_docker_library_folders }}"
54 | loop_control:
55 | loop_var: library_folders
56 | when:
57 | - nfs_additional_paths is defined
58 | - library_folders[0].mount_path.type == "nfs"
59 | - not ansible_check_mode
60 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/nas/nas_cifs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: NAS - CIFS - Ensure CIFS packages.
3 | ansible.builtin.package:
4 | name:
5 | - cifs-utils
6 | - samba-client
7 | - samba-common
8 | state: "{{ nas_client_package_state }}"
9 |
10 | - name: NAS - CIFS - Ensure CIFS credentials file.
11 | ansible.builtin.template:
12 | dest: "{{ hms_docker_data_path }}/.{{ project_name }}.cifs_creds"
13 | src: cifs_creds.j2
14 | owner: root
15 | group: root
16 | mode: 0600
17 | vars:
18 | username: "{{ nas_client_cifs_username }}"
19 | password: "{{ nas_client_cifs_password }}"
20 | register: cifs_creds_file
21 | no_log: "{{ not debug_mode }}"
22 |
23 | - name: NAS - CIFS - Ensure CIFS fstab entry.
24 | ansible.posix.mount:
25 | backup: yes
26 | boot: "{{ nas_client_mount_on_boot }}"
27 | path: "{{ hmsdocker_primary_mount_path }}"
28 | src: "{{ nas_client_remote_cifs_path }}"
29 | fstype: cifs
30 | opts: credentials={{ cifs_creds_file.dest }},{{ nas_client_cifs_opts }}
31 | state: "{{ nas_client_mount_state }}"
32 | when:
33 | - cifs_creds_file is defined
34 | - not ansible_check_mode
35 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/nas/nas_local.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: NAS - Local - Using local path
3 | ansible.builtin.debug:
4 | msg: Using local path "{{ hmsdocker_primary_mount_path }}" for media files
5 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/nas/nas_nfs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: NAS - NFS - Ensure RedHat NFS packages.
3 | ansible.builtin.package:
4 | name:
5 | - nfs-utils
6 | state: "{{ nas_client_package_state }}"
7 | when: hmsdocker_family_override | lower == "redhat"
8 |
9 | - name: NAS - NFS - Ensure Debian NFS packages.
10 | ansible.builtin.package:
11 | name:
12 | - nfs-common
13 | state: "{{ nas_client_package_state }}"
14 | when: ansible_facts['os_family'] | lower == "debian"
15 |
16 | - name: NAS - NFS - Ensure NFS fstab entry and mount.
17 | ansible.posix.mount:
18 | backup: yes
19 | boot: "{{ nas_client_mount_on_boot }}"
20 | path: "{{ hmsdocker_primary_mount_path }}"
21 | src: "{{ nas_client_remote_nfs_path }}"
22 | fstype: nfs
23 | opts: "{{ nas_client_nfs_opts }}"
24 | state: "{{ nas_client_mount_state }}"
25 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/scripts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure scripts folder
3 | ansible.builtin.file:
4 | path: "{{ hms_docker_data_path }}/scripts"
5 | state: directory
6 | owner: root
7 | group: root
8 | mode: 0755
9 | register: scripts_path
10 |
11 | - name: Ensure monitoring scripts
12 | when: monitoring_scripts_enabled | default(false)
13 | block:
14 | - name: Ensure monitoring directory
15 | ansible.builtin.file:
16 | path: "{{ scripts_path.path }}/monitoring"
17 | state: directory
18 | owner: root
19 | group: root
20 | mode: 0755
21 | register: monitoring_scripts_path
22 |
23 | - name: Ensure env symlink
24 | ansible.builtin.file:
25 | state: link
26 | src: "{{ compose_env.dest | default(hms_docker_data_path + '/.env') }}"
27 | dest: "{{ monitoring_scripts_path.path }}/.env"
28 | force: "{{ ansible_check_mode }}"
29 |
30 | - name: Ensure monitoring scripts
31 | ansible.builtin.copy:
32 | src: scripts/monitoring
33 | dest: "{{ scripts_path.path }}"
34 | mode: 0700
35 |
36 | - name: Ensure media availability cron
37 | ansible.builtin.cron:
38 | name: "{{ project_name }}-media-availability-monitoring"
39 | user: root
40 | job: "{{ hms_docker_data_path }}/.venv/bin/python3 {{ monitoring_scripts_path.path }}/check_media_availability.py"
41 | minute: "*"
42 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/vpn_setup.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure VPN config type directories exist before start
2 | ansible.builtin.file:
3 | path: '{{ hms_docker_apps_path }}/{{ item }}/config/{{ hmsdocker_vpn_type }}'
4 | state: directory
5 | owner: '{{ container_uid }}'
6 | group: '{{ container_gid }}'
7 | mode: '755'
8 | loop:
9 | - transmission
10 | - qbittorrent
11 | - deluge
12 | when: item in enabled_containers and item != 'transmission' or (item == 'transmission' and hmsdocker_vpn_provider == 'custom' and hmsdocker_vpn_type == 'openvpn')
13 |
14 | - name: Ensure WireGuard config exists with restricted permissions
15 | ansible.builtin.file:
16 | path: '{{ hms_docker_apps_path }}/{{ item }}/config/{{ hmsdocker_vpn_type }}/wg0.conf'
17 | state: touch
18 | owner: '{{ container_uid }}'
19 | group: '{{ container_gid }}'
20 | mode: '600'
21 | when:
22 | - item in enabled_containers
23 | - hmsdocker_vpn_type == 'wireguard'
24 | loop:
25 | - qbittorrent
26 | - deluge
27 | changed_when: false
28 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/vpn_validation.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Obtain host current public IP
3 | community.general.ipify_facts:
4 | register: public_ip
5 | ignore_errors: true
6 |
7 | - name: Wait 15 secs for VPNs to init
8 | ansible.builtin.wait_for:
9 | timeout: 15
10 |
11 | - name: Get VPN IPs from containers
12 | community.docker.docker_container_exec:
13 | container: '{{ item }}'
14 | command: sh -c 'command -v curl >/dev/null && curl -s api.ipify.org || wget -qO- api.ipify.org'
15 | loop:
16 | - transmission
17 | - qbittorrent
18 | - deluge
19 | when:
20 | - item in enabled_containers
21 | register: vpn_public_ip
22 | changed_when: false
23 |
24 | - name: Initialize empty variable for IP results
25 | ansible.builtin.set_fact:
26 | slimmed_vpn_public_ip_results: []
27 | no_log: "{{ not debug_mode }}"
28 |
29 | - name: Append IP results to slim output
30 | ansible.builtin.set_fact:
31 | slimmed_vpn_public_ip_results: "{{ slimmed_vpn_public_ip_results + [{'item': item.item | default(''), 'vpn_ip': item.stdout | default(''), 'skipped': item.skipped | default(false)}] }}"
32 | loop: "{{ vpn_public_ip.results }}"
33 | no_log: "{{ not debug_mode }}"
34 |
35 | - name: Ensure public IP and VPN public IP are different
36 | ansible.builtin.debug:
37 | msg:
38 | - Your public IP is protected in {{ item.item }}!
39 | - "Current public IP: {{ ansible_facts['ipify_public_ip'] }}"
40 | - "Current {{ item.item }} VPN IP: {{ item.vpn_ip }}"
41 | when:
42 | - not ansible_check_mode
43 | - not item.skipped
44 | - ansible_facts['ipify_public_ip'] is defined
45 | - ansible_facts['ipify_public_ip'] != item.vpn_ip
46 | loop: "{{ slimmed_vpn_public_ip_results }}"
47 |
48 | - name: Stop VPN container if public IP and VPN IP match
49 | community.docker.docker_compose_v2:
50 | project_src: "{{ hms_docker_data_path }}"
51 | project_name: "{{ project_name }}"
52 | state: stopped
53 | remove_orphans: "{{ container_remove_orphans }}"
54 | services:
55 | - "{{ item.item }}"
56 | loop: "{{ slimmed_vpn_public_ip_results }}"
57 | when:
58 | - not item.skipped | default(True)
59 | - item.vpn_ip is defined
60 | - ansible_facts['ipify_public_ip'] == item.vpn_ip or ansible_facts['ipify_public_ip'] == '' or ansible_facts['ipify_public_ip'] is undefined
61 |
62 | - name: Print error message if public IP and VPN IP are identical
63 | ansible.builtin.debug:
64 | msg:
65 | - 🔴 Your public IP is NOT protected in {{ item.item }}! 🔴
66 | - "Current public IP: {{ ansible_facts['ipify_public_ip'] }}"
67 | - "Current {{ item.item }} VPN IP: {{ item.vpn_ip }}"
68 | - 🔴 The {{ item.item }} container has been stopped 🔴
69 | loop: "{{ slimmed_vpn_public_ip_results }}"
70 | when:
71 | - not item.skipped | default(True)
72 | - item.vpn_ip is defined
73 | - ansible_facts['ipify_public_ip'] == item.vpn_ip or ansible_facts['ipify_public_ip'] == '' or ansible_facts['ipify_public_ip'] is undefined
74 |
--------------------------------------------------------------------------------
/roles/hmsdocker/tasks/youtube_downloaders.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure Youtube media dir exists
2 | ansible.builtin.file:
3 | path: "{{ hms_docker_mount_path }}/{{ hms_docker_primary_mount_name }}/{{ hms_docker_library_folder_name }}/youtube"
4 | state: directory
5 | when: '"tubearchivist" in enabled_containers or "pinchflat" in enabled_containers'
6 |
7 | - name: Ensure Youtube downloader media dir exists
8 | ansible.builtin.file:
9 | path: "{{ hms_docker_mount_path }}/{{ hms_docker_primary_mount_name }}/{{ hms_docker_library_folder_name }}/youtube/{{ item }}"
10 | state: directory
11 | loop:
12 | - tubearchivist
13 | - pinchflat
14 | when: item in enabled_containers
15 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/authentik_outpost.j2:
--------------------------------------------------------------------------------
1 | log_level: info
2 | docker_labels:
3 | traefik.enable: "true"
4 | traefik.http.services.authentik-proxy-{{ project_name }}-{{ item.name }}-service.loadbalancer.server.port: "9000"
5 | traefik.http.routers.authentik-proxy-{{ project_name }}-{{ item.name }}-router.rule: Host(`{{ item.proxy_host_rule }}.{{ hms_docker_domain }}`) && PathPrefix(`/outpost.goauthentik.io/`)
6 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.name }}-midware.forwardauth.address: http://authentik-proxy-{{ project_name }}-{{ item.name }}:9000/outpost.goauthentik.io/auth/traefik
7 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.name }}-midware.forwardauth.trustForwardHeader: "true"
8 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.name }}-midware.forwardauth.authResponseHeaders: X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid,X-authentik-jwt,X-authentik-meta-jwks,X-authentik-meta-outpost,X-authentik-meta-provider,X-authentik-meta-app,X-authentik-meta-version
9 | authentik_host: https://authentik-server:9443
10 | authentik_host_insecure: true
11 | authentik_host_browser: {{ authentik_external_host }}
12 | docker_network: {{ project_name }}_proxy_net
13 | container_image: null
14 | docker_map_ports: false
15 | kubernetes_replicas: 1
16 | kubernetes_namespace: default
17 | object_naming_template: authentik-proxy-{{ project_name }}-{{ item.name }}
18 | kubernetes_service_type: ClusterIP
19 | kubernetes_image_pull_secrets: []
20 | kubernetes_disabled_components: []
21 | kubernetes_ingress_annotations: {}
22 | kubernetes_ingress_secret_name: authentik-outpost-tls
23 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/authentik_outpost_4k.j2:
--------------------------------------------------------------------------------
1 | log_level: info
2 | docker_labels:
3 | traefik.enable: "true"
4 | traefik.http.services.authentik-proxy-{{ project_name }}-{{ item.name }}-4k-service.loadbalancer.server.port: "9000"
5 | traefik.http.routers.authentik-proxy-{{ project_name }}-{{ item.name }}-4k-router.rule: Host(`{{ item.proxy_host_rule }}-4k.{{ hms_docker_domain }}`) && PathPrefix(`/outpost.goauthentik.io/`)
6 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.name }}-4k-midware.forwardauth.address: http://authentik-proxy-{{ project_name }}-{{ item.name }}-4k:9000/outpost.goauthentik.io/auth/traefik
7 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.name }}-4k-midware.forwardauth.trustForwardHeader: "true"
8 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.name }}-4k-midware.forwardauth.authResponseHeaders: X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid,X-authentik-jwt,X-authentik-meta-jwks,X-authentik-meta-outpost,X-authentik-meta-provider,X-authentik-meta-app,X-authentik-meta-version
9 | authentik_host: https://authentik-server:9443
10 | authentik_host_insecure: true
11 | authentik_host_browser: {{ authentik_external_host }}
12 | docker_network: {{ project_name }}_proxy_net
13 | container_image: null
14 | docker_map_ports: false
15 | kubernetes_replicas: 1
16 | kubernetes_namespace: default
17 | object_naming_template: authentik-proxy-{{ project_name }}-{{ item.name }}-4k
18 | kubernetes_service_type: ClusterIP
19 | kubernetes_image_pull_secrets: []
20 | kubernetes_disabled_components: []
21 | kubernetes_ingress_annotations: {}
22 | kubernetes_ingress_secret_name: authentik-outpost-tls
23 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/authentik_outpost_ext_host.j2:
--------------------------------------------------------------------------------
1 | log_level: info
2 | docker_labels:
3 | traefik.enable: "true"
4 | traefik.http.services.authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-service.loadbalancer.server.port: "9000"
5 | traefik.http.routers.authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-router.rule: Host(`{{ item.subdomain_name }}.{{ hms_docker_domain }}`) && PathPrefix(`/outpost.goauthentik.io/`)
6 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-midware.forwardauth.address: http://authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}:9000/outpost.goauthentik.io/auth/traefik
7 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-midware.forwardauth.trustForwardHeader: "true"
8 | traefik.http.middlewares.authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-midware.forwardauth.authResponseHeaders: X-authentik-username,X-authentik-groups,X-authentik-email,X-authentik-name,X-authentik-uid,X-authentik-jwt,X-authentik-meta-jwks,X-authentik-meta-outpost,X-authentik-meta-provider,X-authentik-meta-app,X-authentik-meta-version
9 | authentik_host: https://authentik-server:9443
10 | authentik_host_insecure: true
11 | authentik_host_browser: {{ authentik_external_host }}
12 | docker_network: {{ project_name }}_proxy_net
13 | container_image: null
14 | docker_map_ports: false
15 | kubernetes_replicas: 1
16 | kubernetes_namespace: default
17 | object_naming_template: authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}
18 | kubernetes_service_type: ClusterIP
19 | kubernetes_image_pull_secrets: []
20 | kubernetes_disabled_components: []
21 | kubernetes_ingress_annotations: {}
22 | kubernetes_ingress_secret_name: authentik-outpost-tls
23 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/authentik_secret.j2:
--------------------------------------------------------------------------------
1 | {{ key }}
2 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/cifs_creds.j2:
--------------------------------------------------------------------------------
1 | username={{ username }}
2 | password={{ password }}
3 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/container_configs/qbittorrent_config.conf.j2:
--------------------------------------------------------------------------------
1 | [Preferences]
2 | WebUI\Address=*
3 | WebUI\AuthSubnetWhitelist=10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16
4 | WebUI\AuthSubnetWhitelistEnabled=true
5 | WebUI\CSRFProtection=false
6 | WebUI\LocalHostAuth=false
7 | WebUI\Port=8086
8 | WebUI\ServerDomains=*
9 | WebUI\UseUPnP=false
10 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/autobrr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | autobrr:
3 | container_name: autobrr
4 | image: ghcr.io/autobrr/autobrr:latest
5 | restart: ${RESTART_POLICY}
6 | networks:
7 | - proxy_net
8 | - download_net
9 | logging:
10 | options:
11 | max-size: "12m"
12 | max-file: "5"
13 | driver: json-file
14 | user: ${PUID}:${PGID}
15 | environment:
16 | - TZ=${TIMEZONE}
17 | volumes:
18 | - ${HMSD_APPS_PATH}/autobrr/config:/config
19 | {% if hmsdocker_expose_ports_enabled_autobrr %}
20 | ports:
21 | - 7474:7474
22 | {% endif %}
23 | {% if hmsdocker_traefik_enabled_autobrr or hmsdocker_homepage_enabled_autobrr %}
24 | labels:
25 | {% if hmsdocker_traefik_enabled_autobrr %}
26 | - traefik.enable=true
27 | - traefik.http.services.autobrr-${COMPOSE_PROJECT}.loadbalancer.server.port=7474
28 | - traefik.http.routers.autobrr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['autobrr']['proxy_host_rule'] | default('autobrr') }}.${HMSD_DOMAIN}`)
29 | {% if not hmsdocker_expose_public_enabled_autobrr %}
30 | - traefik.http.routers.autobrr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
31 | {% endif %}
32 | {% if hmsdocker_authentik_enabled_autobrr %}
33 | - traefik.http.routers.autobrr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-autobrr-midware@docker
34 | {% endif %}
35 | {% endif %}
36 | {% if hmsdocker_homepage_enabled_autobrr %}
37 | - homepage.group=Downloads
38 | - homepage.name=Autobrr
39 | - homepage.icon=autobrr.png
40 | - homepage.href=http://{{ hms_docker_container_map['autobrr']['proxy_host_rule'] | default('autobrr') }}.${HMSD_DOMAIN}
41 | - homepage.description=Torrent Automation
42 | - homepage.widget.type=autobrr
43 | - homepage.widget.url=http://autobrr:7474
44 | - homepage.widget.key=${AUTOBRR_KEY:-apikeyapikeyapikey}
45 | {% if hmsdocker_homepage_stats_enabled_autobrr %}
46 | - homepage.showStats=true
47 | {% endif %}
48 | {% endif %}
49 | {% endif %}
50 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/bazarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | bazarr:
3 | image: linuxserver/bazarr:latest
4 | container_name: bazarr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if hmsdocker_expose_ports_enabled_bazarr %}
14 | ports:
15 | - 6767:6767
16 | {% endif %}
17 | environment:
18 | - PUID=${PUID}
19 | - PGID=${PGID}
20 | - TZ=${TIMEZONE}
21 | volumes:
22 | - ${HMSD_APPS_PATH}/bazarr/config:/config
23 | - ${HMSD_MOUNT_PATH}:/data
24 | {% if hmsdocker_traefik_enabled_bazarr or hmsdocker_homepage_enabled_bazarr %}
25 | labels:
26 | {% if hmsdocker_traefik_enabled_bazarr %}
27 | - traefik.enable=true
28 | - traefik.http.services.bazarr-${COMPOSE_PROJECT}.loadbalancer.server.port=6767
29 | - traefik.http.routers.bazarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['bazarr']['proxy_host_rule'] | default('bazarr') }}.${HMSD_DOMAIN}`)
30 | {% if not hmsdocker_expose_public_enabled_bazarr %}
31 | - traefik.http.routers.bazarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
32 | {% endif %}
33 | {% if hmsdocker_authentik_enabled_bazarr %}
34 | - traefik.http.routers.bazarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-bazarr-midware@docker
35 | {% endif %}
36 | {% endif %}
37 | {% if hmsdocker_homepage_enabled_bazarr %}
38 | - homepage.group=Managers
39 | - homepage.name=Bazarr
40 | - homepage.icon=bazarr.png
41 | - homepage.href=http://{{ hms_docker_container_map['bazarr']['proxy_host_rule'] | default('bazarr') }}.${HMSD_DOMAIN}
42 | - homepage.description=Subtitle Manager
43 | - homepage.widget.type=bazarr
44 | - homepage.widget.url=http://bazarr:6767
45 | - homepage.widget.key=${BAZARR_KEY:-apikeyapikeyapikey}
46 | {% if hmsdocker_homepage_stats_enabled_bazarr %}
47 | - homepage.showStats=true
48 | {% endif %}
49 | {% endif %}
50 | {% endif %}
51 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/calibre.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | calibre:
3 | image: lscr.io/linuxserver/calibre:latest
4 | container_name: calibre
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | environment:
14 | - PUID=${PUID}
15 | - PGID=${PGID}
16 | - TZ=${TIMEZONE}
17 | volumes:
18 | - ${HMSD_APPS_PATH}/calibre/config:/config
19 | - ${HMSD_MOUNT_PATH}:/data
20 | {% if hmsdocker_expose_ports_enabled_calibre %}
21 | ports:
22 | - 8083:8080 # remote desktop http
23 | - 8182:8181 # remote desktop https
24 | - 8084:8081 # webserver (must be enabled in settings using remote desktop)
25 | {% endif %}
26 | {% if hmsdocker_traefik_enabled_calibre %}
27 | labels:
28 | - traefik.enable=true
29 | - traefik.http.services.calibre-${COMPOSE_PROJECT}.loadbalancer.server.port=8181
30 | - traefik.http.services.calibre-${COMPOSE_PROJECT}.loadbalancer.server.scheme=https
31 | - traefik.http.routers.calibre-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['calibre']['proxy_host_rule'] | default('calibre') }}.${HMSD_DOMAIN}`)
32 | - traefik.http.routers.calibre-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
33 | {% if hmsdocker_authentik_enabled_calibre %}
34 | - traefik.http.routers.calibre-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-calibre-midware@docker
35 | {% endif %}
36 | {% endif %}
37 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/checkrr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | checkrr:
3 | image: aetaric/checkrr:latest
4 | container_name: checkrr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if hmsdocker_expose_ports_enabled_checkrr %}
14 | ports:
15 | - 8585:8585
16 | {% endif %}
17 | volumes:
18 | - ${HMSD_APPS_PATH}/checkrr/config:/etc/checkrr
19 | - ${HMSD_MOUNT_PATH}:/data:ro
20 | {% if hmsdocker_traefik_enabled_checkrr or hmsdocker_homepage_enabled_checkrr %}
21 | labels:
22 | {% if hmsdocker_traefik_enabled_checkrr %}
23 | - traefik.enable=true
24 | - traefik.http.services.checkrr-${COMPOSE_PROJECT}.loadbalancer.server.port=8585
25 | - traefik.http.routers.checkrr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['checkrr']['proxy_host_rule'] | default('checkrr') }}.${HMSD_DOMAIN}`)
26 | {% if not hmsdocker_expose_public_enabled_checkrr %}
27 | - traefik.http.routers.checkrr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
28 | {% endif %}
29 | {% if hmsdocker_authentik_enabled_checkrr %}
30 | - traefik.http.routers.checkrr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-checkrr-midware@docker
31 | {% endif %}
32 | {% endif %}
33 | {% if hmsdocker_homepage_enabled_checkrr %}
34 | - homepage.group=Managers
35 | - homepage.name=Checkrr
36 | - homepage.icon=checkrr.png
37 | - homepage.href=https://{{ hms_docker_container_map['checkrr']['proxy_host_rule'] | default('checkrr') }}.${HMSD_DOMAIN}
38 | - homepage.description=Media Integrity Scanner
39 | {% if hmsdocker_homepage_stats_enabled_checkrr %}
40 | - homepage.showStats=true
41 | {% endif %}
42 | {% endif %}
43 | {% endif %}
44 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/cloudflare.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | {% if cloudflare_tunnel_enabled and cloudflare_tunnel_token != "" %}
3 | cloudflare-tunnel:
4 | image: cloudflare/cloudflared:latest
5 | container_name: cloudflare-tunnel
6 | restart: ${RESTART_POLICY}
7 | networks:
8 | - proxy_net
9 | logging:
10 | options:
11 | max-size: "12m"
12 | max-file: "5"
13 | driver: json-file
14 | command: tunnel --no-autoupdate run --token ${CLOUDFLARE_TUNNEL_TOKEN}
15 | {% endif %}
16 |
17 | {% if cloudflare_ddns_enabled %}
18 | cloudflare-ddns:
19 | image: oznu/cloudflare-ddns:latest
20 | container_name: cloudflare-ddns
21 | restart: ${RESTART_POLICY}
22 | logging:
23 | options:
24 | max-size: "12m"
25 | max-file: "5"
26 | driver: json-file
27 | environment:
28 | - API_KEY=${CLOUDFLARE_API_TOKEN}
29 | - ZONE=${CLOUDFLARE_DOMAIN}
30 | - DELETE_ON_STOP={{ cloudflare_ddns_delete_record_on_stop }}
31 | {% if cloudflare_ddns_create_ipv6_aaaa_record %}
32 | - RRTYPE=AAAA
33 | {% endif %}
34 | {% if cloudflare_ddns_subdomain %}
35 | - SUBDOMAIN={{ cloudflare_ddns_subdomain }}
36 | {% endif %}
37 | - PROXIED={{ cloudflare_ddns_proxied }}
38 | {% endif %}
39 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/deluge.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | deluge:
3 | image: binhex/arch-delugevpn
4 | container_name: deluge
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - download_net
13 | - proxy_net
14 | {% if hmsdocker_vpn_type == 'wireguard' %}
15 | privileged: true
16 | sysctls:
17 | - net.ipv4.conf.all.src_valid_mark=1
18 | {% else %}
19 | cap_add:
20 | - NET_ADMIN
21 | {% endif %}
22 | environment:
23 | - PUID=${PUID}
24 | - PGID=${PGID}
25 | - TZ=${TIMEZONE}
26 | - VPN_ENABLED=yes
27 | - VPN_USER=${VPN_USER}
28 | - VPN_PASS=${VPN_PASS}
29 | - VPN_PROV=custom
30 | - VPN_CLIENT={{ hmsdocker_vpn_type }}
31 | - ENABLE_PRIVOXY=yes
32 | - LAN_NETWORK={{ hms_docker_network_subnet }}
33 | volumes:
34 | - ${HMSD_APPS_PATH}/deluge/config:/config
35 | - ${HMSD_MOUNT_PATH}:/data
36 | {% if hmsdocker_expose_ports_enabled_deluge %}
37 | ports:
38 | - 8112:8112
39 | - 8119:8118 # privoxy
40 | {% endif %}
41 | {% if hmsdocker_traefik_enabled_deluge or hmsdocker_homepage_enabled_deluge %}
42 | labels:
43 | {% if hmsdocker_traefik_enabled_deluge %}
44 | - traefik.enable=true
45 | - traefik.http.services.deluge-${COMPOSE_PROJECT}.loadbalancer.server.port=8112
46 | - traefik.http.routers.deluge-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['deluge']['proxy_host_rule'] | default('deluge') }}.${HMSD_DOMAIN}`)
47 | {% if not hmsdocker_expose_public_enabled_deluge %}
48 | - traefik.http.routers.deluge-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
49 | {% endif %}
50 | {% if hmsdocker_authentik_enabled_deluge %}
51 | - traefik.http.routers.deluge-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-deluge-midware@docker
52 | {% endif %}
53 | {% endif %}
54 | {% if hmsdocker_homepage_enabled_deluge %}
55 | - homepage.group=Downloads
56 | - homepage.name=deluge
57 | - homepage.icon=deluge.png
58 | - homepage.href=http://{{ hms_docker_container_map['deluge']['proxy_host_rule'] | default('deluge') }}.${HMSD_DOMAIN}
59 | - homepage.description=Torrent Management
60 | - homepage.widget.type=deluge
61 | - homepage.widget.url=http://deluge:8112
62 | {% if hmsdocker_homepage_stats_enabled_deluge %}
63 | - homepage.showStats=true
64 | {% endif %}
65 | {% endif %}
66 | {% endif %}
67 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/emby.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | emby:
3 | image: lscr.io/linuxserver/emby:latest
4 | container_name: emby
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if enable_intel_gpu %}
14 | devices:
15 | - /dev/dri:/dev/dri
16 | {% endif %}
17 | environment:
18 | - PUID=${PUID}
19 | - PGID=${PGID}
20 | - TZ=${TIMEZONE}
21 | {% if enable_nvidia_gpu %}
22 | - NVIDIA_VISIBLE_DEVICES=all
23 | runtime: nvidia
24 | {% endif %}
25 | volumes:
26 | - ${HMSD_APPS_PATH}/emby/config:/config
27 | - {{ plex_transcode_folder }}/cache:/cache
28 | # media folder where all movies and series are stored
29 | - ${HMSD_MOUNT_PATH}:/data
30 | {% if hmsdocker_expose_ports_enabled_emby %}
31 | ports:
32 | {% if hmsdocker_expose_ports_enabled_jellyfin %}
33 | - 8097:8096
34 | - 8921:8920 #optional
35 | {% else %}
36 | - 8096:8096
37 | - 8920:8920 #optional
38 | {% endif %}
39 | {% endif %}
40 | {% if hmsdocker_traefik_enabled_emby or hmsdocker_homepage_enabled_emby %}
41 | labels:
42 | {% if hmsdocker_traefik_enabled_emby %}
43 | - traefik.enable=true
44 | - traefik.http.services.emby-${COMPOSE_PROJECT}.loadbalancer.server.port=8096
45 | - traefik.http.routers.emby-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['emby']['proxy_host_rule'] | default('emby') }}.${HMSD_DOMAIN}`)
46 | {% if not hmsdocker_expose_public_enabled_emby %}
47 | - traefik.http.routers.emby-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
48 | {% endif %}
49 | {% if hmsdocker_authentik_enabled_emby %}
50 | - traefik.http.routers.emby-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-emby-midware@docker
51 | {% endif %}
52 | {% endif %}
53 | {% if hmsdocker_homepage_enabled_emby %}
54 | - homepage.group=Media
55 | - homepage.name=Emby
56 | - homepage.icon=emby.png
57 | - homepage.href=http://{{ hms_docker_container_map['emby']['proxy_host_rule'] | default('emby') }}.${HMSD_DOMAIN}
58 | - homepage.description=Media Server
59 | - homepage.widget.type=emby
60 | - homepage.widget.url=https://emby:8096
61 | - homepage.widget.key=${EMBY_KEY:-apikeyapikeyapikey}
62 | {% if hmsdocker_homepage_stats_enabled_emby %}
63 | - homepage.showStats=true
64 | {% endif %}
65 | {% endif %}
66 | {% endif %}
67 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/flaresolverr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | flaresolverr:
3 | image: ghcr.io/flaresolverr/flaresolverr:latest
4 | container_name: flaresolverr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - "download_net"
13 | environment:
14 | - LOG_LEVEL=info
15 | - LOG_HTML=false
16 | - CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none}
17 | - TZ=${TIMEZONE}
18 | {% if hmsdocker_expose_ports_enabled_flaresolverr %}
19 | ports:
20 | - 8191:8191
21 | {% endif %}
22 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/heimdall.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | heimdall:
3 | image: lscr.io/linuxserver/heimdall:latest
4 | container_name: heimdall
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if hmsdocker_expose_ports_enabled_heimdall %}
14 | ports:
15 | - 8000:80
16 | - 8443:443
17 | {% endif %}
18 | environment:
19 | - PUID=${PUID}
20 | - PGID=${PGID}
21 | - TZ=${TIMEZONE}
22 | volumes:
23 | - ${HMSD_APPS_PATH}/heimdall/config:/config
24 | {% if hmsdocker_traefik_enabled_heimdall %}
25 | labels:
26 | - traefik.enable=true
27 | - traefik.http.services.heimdall-${COMPOSE_PROJECT}.loadbalancer.server.port=443
28 | - traefik.http.services.heimdall-${COMPOSE_PROJECT}.loadbalancer.server.scheme=https
29 | - traefik.http.routers.heimdall-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['heimdall']['proxy_host_rule'] | default('heimdall') }}.${HMSD_DOMAIN}`)
30 | {% if not hmsdocker_expose_public_enabled_heimdall %}
31 | - traefik.http.routers.heimdall-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
32 | {% endif %}
33 | {% if hmsdocker_authentik_enabled_heimdall %}
34 | - traefik.http.routers.heimdall-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-heimdall-midware@docker
35 | {% endif %}
36 | {% endif %}
37 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/homepage.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | homepage:
3 | image: ghcr.io/gethomepage/homepage:latest
4 | container_name: homepage
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | volumes:
14 | - /var/run/docker.sock:/var/run/docker.sock
15 | - ${HMSD_APPS_PATH}/homepage/config:/app/config
16 | environment:
17 | - TZ=${TIMEZONE}
18 | - HOMEPAGE_ALLOWED_HOSTS={{ hms_docker_container_map['homepage']['proxy_host_rule'] | default('homepage') }}.${HMSD_DOMAIN},{{ ansible_default_ipv4.address }}:3000
19 | {% if hmsdocker_expose_ports_enabled_homepage %}
20 | ports:
21 | - 3000:3000
22 | {% endif %}
23 | {% if hmsdocker_traefik_enabled_homepage %}
24 | labels:
25 | - traefik.enable=true
26 | - traefik.http.services.homepage-${COMPOSE_PROJECT}.loadbalancer.server.port=3000
27 | - traefik.http.routers.homepage-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['homepage']['proxy_host_rule'] | default('homepage') }}.${HMSD_DOMAIN}`)
28 | {% if not hmsdocker_expose_public_enabled_homepage %}
29 | - traefik.http.routers.homepage-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
30 | {% endif %}
31 | {% if hmsdocker_authentik_enabled_homepage %}
32 | - traefik.http.routers.homepage-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-homepage-midware@docker
33 | {% endif %}
34 | {% endif %}
35 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/huntarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | huntarr:
3 | image: huntarr/huntarr:latest
4 | container_name: huntarr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if hmsdocker_expose_ports_enabled_huntarr %}
14 | ports:
15 | - 9705:9705
16 | {% endif %}
17 | environment:
18 | - PUID=${PUID}
19 | - PGID=${PGID}
20 | - TZ=${TIMEZONE}
21 | volumes:
22 | - ${HMSD_APPS_PATH}/huntarr/config:/config
23 | {% if hmsdocker_traefik_enabled_huntarr or hmsdocker_homepage_enabled_huntarr %}
24 | labels:
25 | {% if hmsdocker_traefik_enabled_huntarr %}
26 | - traefik.enable=true
27 | - traefik.http.services.huntarr-${COMPOSE_PROJECT}.loadbalancer.server.port=9705
28 | - traefik.http.routers.huntarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['huntarr']['proxy_host_rule'] | default('huntarr') }}.${HMSD_DOMAIN}`)
29 | {% if not hmsdocker_expose_public_enabled_huntarr %}
30 | - traefik.http.routers.huntarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
31 | {% endif %}
32 | {% if hmsdocker_authentik_enabled_huntarr %}
33 | - traefik.http.routers.huntarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-huntarr-midware@docker
34 | {% endif %}
35 | {% endif %}
36 | {% if hmsdocker_homepage_enabled_huntarr %}
37 | - homepage.group=Managers
38 | - homepage.name=Huntarr
39 | - homepage.icon=huntarr.png
40 | - homepage.href=http://{{ hms_docker_container_map['huntarr']['proxy_host_rule'] | default('huntarr') }}.${HMSD_DOMAIN}
41 | - homepage.description=Media Download Manager
42 | {% if hmsdocker_homepage_stats_enabled_huntarr %}
43 | - homepage.showStats=true
44 | {% endif %}
45 | {% endif %}
46 | {% endif %}
47 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/jellyfin.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | jellyfin:
3 | image: lscr.io/linuxserver/jellyfin:latest
4 | container_name: jellyfin
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if enable_intel_gpu %}
14 | devices:
15 | - /dev/dri:/dev/dri
16 | {% endif %}
17 | {% if hmsdocker_expose_ports_enabled_jellyfin %}
18 | ports:
19 | - 8096:8096
20 | - 8920:8920 #optional
21 | - 7359:7359/udp #optional
22 | {% endif %}
23 | volumes:
24 | - ${HMSD_APPS_PATH}/jellyfin/config:/config
25 | - {{ plex_transcode_folder }}/cache:/cache
26 | # media folder where all movies and series are stored
27 | - ${HMSD_MOUNT_PATH}:/data
28 | environment:
29 | - TZ=${TIMEZONE}
30 | - PUID=${PUID}
31 | - PGID=${PGID}
32 | {% if enable_nvidia_gpu %}
33 | - NVIDIA_VISIBLE_DEVICES=all
34 | runtime: nvidia
35 | {% endif %}
36 | {% if hmsdocker_traefik_enabled_jellyfin or hmsdocker_homepage_enabled_jellyfin %}
37 | labels:
38 | {% if hmsdocker_traefik_enabled_jellyfin %}
39 | - traefik.enable=true
40 | - traefik.http.services.jellyfin-${COMPOSE_PROJECT}.loadbalancer.server.port=8096
41 | - traefik.http.routers.jellyfin-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['jellyfin']['proxy_host_rule'] | default('jellyfin') }}.${HMSD_DOMAIN}`)
42 | {% if not hmsdocker_expose_public_enabled_jellyfin %}
43 | - traefik.http.routers.jellyfin-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
44 | {% endif %}
45 | {% if hmsdocker_authentik_enabled_jellyfin %}
46 | - traefik.http.routers.jellyfin-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-jellyfin-midware@docker
47 | {% endif %}
48 | {% endif %}
49 | {% if hmsdocker_homepage_enabled_jellyfin %}
50 | - homepage.group=Media
51 | - homepage.name=Jellyfin
52 | - homepage.icon=jellyfin.png
53 | - homepage.href=http://{{ hms_docker_container_map['jellyfin']['proxy_host_rule'] | default('jellyfin') }}.${HMSD_DOMAIN}
54 | - homepage.description=Media Server
55 | - homepage.widget.type=jellyfin
56 | - homepage.widget.url=https://jellyfin:8096
57 | - homepage.widget.key=${JELLYFIN_KEY:-apikeyapikeyapikey}
58 | {% if hmsdocker_homepage_stats_enabled_jellyfin %}
59 | - homepage.showStats=true
60 | {% endif %}
61 | {% endif %}
62 | {% endif %}
63 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/jellyseerr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | jellyseerr:
3 | image: fallenbagel/jellyseerr:latest
4 | container_name: jellyseerr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | environment:
14 | - LOG_LEVEL=info
15 | - PUID=${PUID}
16 | - PGID=${PGID}
17 | - TZ=${TIMEZONE}
18 | {% if hmsdocker_traefik_enabled_jellyseerr or hmsdocker_homepage_enabled_jellyseerr %}
19 | labels:
20 | {% if hmsdocker_traefik_enabled_jellyseerr %}
21 | - traefik.enable=true
22 | - traefik.http.services.jellyseerr-${COMPOSE_PROJECT}.loadbalancer.server.port=5055
23 | - traefik.http.routers.jellyseerr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['jellyseerr']['proxy_host_rule'] | default('jellyseerr') }}.${HMSD_DOMAIN}`)
24 | {% if not hmsdocker_expose_public_enabled_jellyseerr %}
25 | - traefik.http.routers.jellyseerr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
26 | {% endif %}
27 | {% if hmsdocker_authentik_enabled_jellyseerr %}
28 | - traefik.http.routers.jellyseerr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-jellyseerr-midware@docker
29 | {% endif %}
30 | {% endif %}
31 | {% if hmsdocker_homepage_enabled_jellyseerr %}
32 | - homepage.group=Managers
33 | - homepage.name=Jellyseerr
34 | - homepage.icon=jellyseerr.png
35 | - homepage.href=http://{{ hms_docker_container_map['jellyseerr']['proxy_host_rule'] | default('jellyseerr') }}.${HMSD_DOMAIN}
36 | - homepage.description=Request Manager
37 | - homepage.widget.type=jellyseerr
38 | - homepage.widget.url=http://jellyseerr:5055
39 | - homepage.widget.key=${JELLYSEERR_KEY:-apikeyapikeyapikey}
40 | {% if hmsdocker_homepage_stats_enabled_jellyseerr %}
41 | - homepage.showStats=true
42 | {% endif %}
43 | {% endif %}
44 | {% endif %}
45 | volumes:
46 | - ${HMSD_APPS_PATH}/jellyseerr/config:/app/config
47 | {% if hmsdocker_expose_ports_enabled_jellyseerr %}
48 | ports:
49 | - 5056:5055
50 | {% endif %}
51 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/kavita.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | kavita:
3 | image: lscr.io/linuxserver/kavita:latest
4 | container_name: kavita
5 | networks:
6 | - proxy_net
7 | restart: ${RESTART_POLICY}
8 | logging:
9 | options:
10 | max-size: "12m"
11 | max-file: "5"
12 | driver: json-file
13 | environment:
14 | - PUID=${PUID}
15 | - PGID=${PGID}
16 | - TZ=${TIMEZONE}
17 | volumes:
18 | - ${HMSD_APPS_PATH}/kavita/config:/config
19 | - ${HMSD_MOUNT_PATH}:/data
20 | {% if hmsdocker_expose_ports_enabled_kavita %}
21 | ports:
22 | - 5000:5000
23 | {% endif %}
24 | {% if hmsdocker_traefik_enabled_kavita %}
25 | labels:
26 | - traefik.enable=true
27 | - traefik.http.services.kavita-${COMPOSE_PROJECT}.loadbalancer.server.port=5000
28 | - traefik.http.routers.kavita-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['kavita']['proxy_host_rule'] | default('kavita') }}.${HMSD_DOMAIN}`)
29 | - traefik.http.routers.kavita-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
30 | {% if hmsdocker_authentik_enabled_kavita %}
31 | - traefik.http.routers.kavita-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-kavita-midware@docker
32 | {% endif %}
33 | {% endif %}
34 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/lidarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | lidarr:
3 | image: lscr.io/linuxserver/lidarr:latest
4 | container_name: lidarr
5 | restart: ${RESTART_POLICY}
6 | networks:
7 | - proxy_net
8 | - download_net
9 | logging:
10 | options:
11 | max-size: "12m"
12 | max-file: "5"
13 | driver: json-file
14 | {% if hmsdocker_expose_ports_enabled_lidarr %}
15 | ports:
16 | - 8686:8686
17 | {% endif %}
18 | volumes:
19 | - ${HMSD_APPS_PATH}/lidarr/config:/config
20 | - ${HMSD_MOUNT_PATH}:/data
21 | environment:
22 | - PUID=${PUID}
23 | - PGID=${PGID}
24 | - TZ=${TIMEZONE}
25 | {% if hmsdocker_traefik_enabled_lidarr or hmsdocker_homepage_enabled_lidarr %}
26 | labels:
27 | {% if hmsdocker_traefik_enabled_lidarr %}
28 | - traefik.enable=true
29 | - traefik.http.services.lidarr-${COMPOSE_PROJECT}.loadbalancer.server.port=8686
30 | - traefik.http.routers.lidarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['lidarr']['proxy_host_rule'] | default('lidarr') }}.${HMSD_DOMAIN}`)
31 | {% if not hmsdocker_expose_public_enabled_lidarr %}
32 | - traefik.http.routers.lidarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
33 | {% endif %}
34 | {% if hmsdocker_authentik_enabled_lidarr %}
35 | - traefik.http.routers.lidarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-lidarr-midware@docker
36 | {% endif %}
37 | {% endif %}
38 | {% if hmsdocker_homepage_enabled_lidarr %}
39 | - homepage.group=Managers
40 | - homepage.name=Lidarr
41 | - homepage.icon=lidarr.png
42 | - homepage.href=http://{{ hms_docker_container_map['lidarr']['proxy_host_rule'] | default('lidarr') }}.${HMSD_DOMAIN}
43 | - homepage.description=Music Manager
44 | - homepage.widget.type=lidarr
45 | - homepage.widget.url=http://lidarr:8686
46 | - homepage.widget.key=${LIDARR_KEY:-apikeyapikeyapikey}
47 | {% if hmsdocker_homepage_stats_enabled_lidarr %}
48 | - homepage.showStats=true
49 | {% endif %}
50 | {% endif %}
51 | {% endif %}
52 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/maintainerr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | maintainerr:
3 | image: ghcr.io/jorenn92/maintainerr:latest
4 | container_name: maintainerr
5 | restart: ${RESTART_POLICY}
6 | networks:
7 | - proxy_net
8 | logging:
9 | options:
10 | max-size: "12m"
11 | max-file: "5"
12 | driver: json-file
13 | user: ${PUID}:${PGID}
14 | volumes:
15 | - ${HMSD_APPS_PATH}/maintainerr/config:/opt/data
16 | environment:
17 | - TZ=${TIMEZONE}
18 | {% if hmsdocker_expose_ports_enabled_maintainerr %}
19 | ports:
20 | - 6246:6246
21 | {% endif %}
22 | {% if hmsdocker_traefik_enabled_maintainerr %}
23 | labels:
24 | {% if hmsdocker_traefik_enabled_maintainerr %}
25 | - traefik.enable=true
26 | - traefik.http.services.maintainerr-${COMPOSE_PROJECT}.loadbalancer.server.port=6246
27 | - traefik.http.routers.maintainerr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['maintainerr']['proxy_host_rule'] | default('maintainerr') }}.${HMSD_DOMAIN}`)
28 | {% if not hmsdocker_expose_public_enabled_maintainerr %}
29 | - traefik.http.routers.maintainerr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
30 | {% endif %}
31 | {% if hmsdocker_authentik_enabled_maintainerr %}
32 | - traefik.http.routers.maintainerr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-maintainerr-midware@docker
33 | {% endif %}
34 | {% endif %}
35 | {% endif %}
36 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/netdata.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | netdata:
3 | image: netdata/netdata
4 | container_name: netdata
5 | pid: host
6 | restart: ${RESTART_POLICY}
7 | network_mode: host
8 | logging:
9 | options:
10 | max-size: "12m"
11 | max-file: "5"
12 | driver: json-file
13 | cap_add:
14 | - SYS_PTRACE
15 | - SYS_ADMIN
16 | security_opt:
17 | - apparmor:unconfined
18 | volumes:
19 | - netdataconfig:/etc/netdata
20 | - netdatalib:/var/lib/netdata
21 | - netdatacache:/var/cache/netdata
22 | - /:/host/root:ro,rslave
23 | - /etc/passwd:/host/etc/passwd:ro
24 | - /etc/group:/host/etc/group:ro
25 | - /etc/localtime:/etc/localtime:ro
26 | - /proc:/host/proc:ro
27 | - /sys:/host/sys:ro
28 | - /etc/os-release:/host/etc/os-release:ro
29 | - /var/log:/host/var/log:ro
30 | - /var/run/docker.sock:/var/run/docker.sock:ro
31 | environment:
32 | - NETDATA_CLAIM_TOKEN=${NETDATA_CLAIM_TOKEN}
33 | - NETDATA_CLAIM_URL=${NETDATA_CLAIM_URL}
34 | - NETDATA_CLAIM_ROOMS=${NETDATA_CLAIM_ROOMS}
35 |
36 | volumes:
37 | netdataconfig:
38 | netdatalib:
39 | netdatacache:
40 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/nzbget.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | nzbget:
3 | image: lscr.io/linuxserver/nzbget:latest
4 | container_name: nzbget
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | - download_net
14 | environment:
15 | - PUID=${PUID}
16 | - PGID=${PGID}
17 | - TZ=${TIMEZONE}
18 | volumes:
19 | - ${HMSD_APPS_PATH}/nzbget/config:/config
20 | - ${HMSD_MOUNT_PATH}:/data
21 | {% if hmsdocker_expose_ports_enabled_nzbget %}
22 | ports:
23 | - 6789:6789
24 | {% endif %}
25 | {% if hmsdocker_traefik_enabled_nzbget or hmsdocker_homepage_enabled_nzbget %}
26 | labels:
27 | {% if hmsdocker_traefik_enabled_nzbget %}
28 | - traefik.enable=true
29 | - traefik.http.services.nzbget-${COMPOSE_PROJECT}.loadbalancer.server.port=6789
30 | - traefik.http.routers.nzbget-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['nzbget']['proxy_host_rule'] | default('nzbget') }}.${HMSD_DOMAIN}`)
31 | {% if not hmsdocker_expose_public_enabled_nzbget %}
32 | - traefik.http.routers.nzbget-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
33 | {% endif %}
34 | {% if hmsdocker_authentik_enabled_nzbget %}
35 | - traefik.http.routers.nzbget-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-nzbget-midware@docker
36 | {% endif %}
37 | {% endif %}
38 | {% if hmsdocker_homepage_enabled_nzbget %}
39 | - homepage.group=Downloads
40 | - homepage.name=NZBget
41 | - homepage.icon=nzbget.png
42 | - homepage.href=http://{{ hms_docker_container_map['nzbget']['proxy_host_rule'] | default('nzbget') }}.${HMSD_DOMAIN}
43 | - homepage.description=NZB Manager
44 | - homepage.widget.type=nzbget
45 | - homepage.widget.url=http://nzbget:8080
46 | - homepage.widget.key=${NZBGET_KEY:-apikeyapikeyapikey}
47 | {% if hmsdocker_homepage_stats_enabled_nzbget %}
48 | - homepage.showStats=true
49 | {% endif %}
50 | {% endif %}
51 | {% endif %}
52 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/overseerr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | overseerr:
3 | image: linuxserver/overseerr:latest
4 | container_name: overseerr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | environment:
14 | - PUID=${PUID}
15 | - PGID=${PGID}
16 | - TZ=${TIMEZONE}
17 | {% if hmsdocker_traefik_enabled_overseerr or hmsdocker_homepage_enabled_overseerr %}
18 | labels:
19 | {% if hmsdocker_traefik_enabled_overseerr %}
20 | - traefik.enable=true
21 | - traefik.http.services.overseerr-${COMPOSE_PROJECT}.loadbalancer.server.port=5055
22 | - traefik.http.routers.overseerr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['overseerr']['proxy_host_rule'] | default('overseerr') }}.${HMSD_DOMAIN}`)
23 | {% if not hmsdocker_expose_public_enabled_overseerr %}
24 | - traefik.http.routers.overseerr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
25 | {% endif %}
26 | {% if hmsdocker_authentik_enabled_overseerr %}
27 | - traefik.http.routers.overseerr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-overseerr-midware@docker
28 | {% endif %}
29 | {% endif %}
30 | {% if hmsdocker_homepage_enabled_overseerr %}
31 | - homepage.group=Managers
32 | - homepage.name=Overseerr
33 | - homepage.icon=overseerr.png
34 | - homepage.href=http://{{ hms_docker_container_map['overseerr']['proxy_host_rule'] | default('overseerr') }}.${HMSD_DOMAIN}
35 | - homepage.description=Request Manager
36 | - homepage.widget.type=overseerr
37 | - homepage.widget.url=http://overseerr:5055
38 | - homepage.widget.key=${OVERSEERR_KEY:-apikeyapikeyapikey}
39 | {% if hmsdocker_homepage_stats_enabled_overseerr %}
40 | - homepage.showStats=true
41 | {% endif %}
42 | {% endif %}
43 | {% endif %}
44 | volumes:
45 | - ${HMSD_APPS_PATH}/overseerr/config:/config
46 | {% if hmsdocker_expose_ports_enabled_overseerr %}
47 | ports:
48 | - 5055:5055
49 | {% endif %}
50 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/pasta.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | pasta:
3 | image: cglatot/pasta:latest
4 | container_name: pasta
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if hmsdocker_expose_ports_enabled_pasta %}
14 | ports:
15 | - 8085:80
16 | {% endif %}
17 | {% if hmsdocker_traefik_enabled_pasta %}
18 | labels:
19 | {% if hmsdocker_traefik_enabled_pasta %}
20 | - traefik.enable=true
21 | - traefik.http.services.pasta-${COMPOSE_PROJECT}.loadbalancer.server.port=80
22 | - traefik.http.routers.pasta-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['pasta']['proxy_host_rule'] | default('pasta') }}.${HMSD_DOMAIN}`)
23 | {% if not hmsdocker_expose_public_enabled_pasta %}
24 | - traefik.http.routers.pasta-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
25 | {% endif %}
26 | {% if hmsdocker_authentik_enabled_pasta %}
27 | - traefik.http.routers.pasta-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-pasta-midware@docker
28 | {% endif %}
29 | {% endif %}
30 | {% endif %}
31 |
32 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/pinchflat.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | pinchflat:
3 | image: ghcr.io/kieraneglin/pinchflat:latest
4 | container_name: pinchflat
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | environment:
14 | - TZ=${TIMEZONE}
15 | {% if hmsdocker_expose_ports_enabled_pinchflat %}
16 | ports:
17 | - 8945:8945
18 | {% endif %}
19 | volumes:
20 | - ${HMSD_APPS_PATH}/pinchflat/config:/config
21 | - ${HMSD_MOUNT_PATH}/{{ hms_docker_primary_mount_name }}/{{ hms_docker_library_folder_name }}/youtube/pinchflat:/downloads
22 | {% if hmsdocker_traefik_enabled_pinchflat or hmsdocker_homepage_enabled_pinchflat %}
23 | labels:
24 | {% if hmsdocker_traefik_enabled_pinchflat %}
25 | - traefik.enable=true
26 | - traefik.http.services.pinchflat-${COMPOSE_PROJECT}.loadbalancer.server.port=8945
27 | - traefik.http.routers.pinchflat-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['pinchflat']['proxy_host_rule'] | default('pinchflat') }}.${HMSD_DOMAIN}`)
28 | {% if not hmsdocker_expose_public_enabled_pinchflat %}
29 | - traefik.http.routers.pinchflat-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
30 | {% endif %}
31 | {% if hmsdocker_authentik_enabled_pinchflat %}
32 | - traefik.http.routers.pinchflat-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-pinchflat-midware@docker
33 | {% endif %}
34 | {% endif %}
35 | {% if hmsdocker_homepage_enabled_pinchflat %}
36 | - homepage.group=Managers
37 | - homepage.name=Pinchflat
38 | - homepage.icon=pinchflat.png
39 | - homepage.href=https://{{ hms_docker_container_map['pinchflat']['proxy_host_rule'] | default('pinchflat') }}.${HMSD_DOMAIN}
40 | - homepage.description=YouTube Archiver
41 | {% if hmsdocker_homepage_stats_enabled_pinchflat %}
42 | - homepage.showStats=true
43 | {% endif %}
44 | {% endif %}
45 | {% endif %}
46 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/plex.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | plex:
3 | image: lscr.io/linuxserver/plex:latest
4 | container_name: plex
5 | restart: ${RESTART_POLICY}
6 | {% if enable_intel_gpu %}
7 | devices:
8 | - /dev/dri:/dev/dri
9 | {% endif %}
10 | logging:
11 | options:
12 | max-size: "12m"
13 | max-file: "5"
14 | driver: json-file
15 | networks:
16 | - proxy_net
17 | ports:
18 | - 32400:32400/tcp
19 | - 3005:3005/tcp
20 | - 8324:8324/tcp
21 | - 32469:32469/tcp
22 | - 1900:1900/udp
23 | - 32410:32410/udp
24 | - 32412:32412/udp
25 | - 32413:32413/udp
26 | - 32414:32414/udp
27 | environment:
28 | - VERSION=docker
29 | - TZ=${TIMEZONE}
30 | - PUID=${PUID}
31 | - PGID=${PGID}
32 | - PLEX_CLAIM=${PLEX_CLAIM_TOKEN}
33 | - ADVERTISE_IP={{ plex_advertise_ip }}:32400
34 | - ALLOWED_NETWORKS={{ hms_docker_network_subnet }}
35 | {% if enable_nvidia_gpu %}
36 | - NVIDIA_VISIBLE_DEVICES=all
37 | runtime: nvidia
38 | {% endif %}
39 | volumes:
40 | - ${HMSD_APPS_PATH}/plex/config:/config
41 | # temp folder for Plex transcoding
42 | - {{ plex_transcode_folder }}:/transcode
43 | # media folder where all movies and series are stored
44 | - ${HMSD_MOUNT_PATH}:/data
45 | {% if hmsdocker_traefik_enabled_plex or hmsdocker_homepage_enabled_plex %}
46 | labels:
47 | {% if hmsdocker_traefik_enabled_plex %}
48 | - traefik.enable=true
49 | - traefik.http.services.plex-${COMPOSE_PROJECT}.loadbalancer.server.port=32400
50 | - traefik.http.services.plex-${COMPOSE_PROJECT}.loadbalancer.server.scheme=https
51 | - traefik.http.routers.plex-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['plex']['proxy_host_rule'] | default('plex') }}.${HMSD_DOMAIN}`)
52 | {% if not hmsdocker_expose_public_enabled_plex %}
53 | - traefik.http.routers.plex-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
54 | {% endif %}
55 | {% if hmsdocker_authentik_enabled_plex %}
56 | - traefik.http.routers.plex-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-plex-midware@docker
57 | {% endif %}
58 | {% endif %}
59 | {% if hmsdocker_homepage_enabled_plex %}
60 | - homepage.group=Media
61 | - homepage.name=Plex
62 | - homepage.icon=plex.png
63 | - homepage.href=http://{{ hms_docker_container_map['plex']['proxy_host_rule'] | default('plex') }}.${HMSD_DOMAIN}
64 | - homepage.description=Media Server
65 | - homepage.widget.type=plex
66 | - homepage.widget.url=https://plex:32400
67 | - homepage.widget.key=${PLEX_KEY:-apikeyapikeyapikey}
68 | {% if hmsdocker_homepage_stats_enabled_plex %}
69 | - homepage.showStats=true
70 | {% endif %}
71 | {% endif %}
72 | {% endif %}
73 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/portainer.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | portainer:
3 | image: portainer/portainer-ce:latest
4 | container_name: portainer
5 | command: -H unix:///var/run/docker.sock
6 | restart: ${RESTART_POLICY}
7 | logging:
8 | options:
9 | max-size: "12m"
10 | max-file: "5"
11 | driver: json-file
12 | networks:
13 | - proxy_net
14 | {% if hmsdocker_expose_ports_enabled_portainer %}
15 | ports:
16 | - 9000:9000
17 | {% endif %}
18 | volumes:
19 | - /var/run/docker.sock:/var/run/docker.sock
20 | - ${HMSD_APPS_PATH}/portainer/config:/data
21 | {% if hmsdocker_traefik_enabled_portainer or hmsdocker_homepage_enabled_portainer %}
22 | labels:
23 | {% if hmsdocker_traefik_enabled_portainer %}
24 | - traefik.enable=true
25 | - traefik.http.services.portainer-${COMPOSE_PROJECT}.loadbalancer.server.port=9000
26 | - traefik.http.routers.portainer-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['portainer']['proxy_host_rule'] | default('portainer') }}.${HMSD_DOMAIN}`)
27 | {% if not hmsdocker_expose_public_enabled_portainer %}
28 | - traefik.http.routers.portainer-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
29 | {% endif %}
30 | {% if hmsdocker_authentik_enabled_portainer %}
31 | - traefik.http.routers.portainer-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-portainer-midware@docker
32 | {% endif %}
33 | {% endif %}
34 | {% if hmsdocker_homepage_enabled_portainer %}
35 | - homepage.group=Infrastructure
36 | - homepage.name=Portainer
37 | - homepage.icon=portainer.png
38 | - homepage.href=http://{{ hms_docker_container_map['portainer']['proxy_host_rule'] | default('portainer') }}.${HMSD_DOMAIN}
39 | - homepage.description=Container Management
40 | - homepage.widget.type=portainer
41 | - homepage.widget.url=https://portainer:9443
42 | - homepage.widget.env=1
43 | - homepage.widget.key=${PORTAINER_KEY:-apikeyapikeyapikey}
44 | {% if hmsdocker_homepage_stats_enabled_portainer %}
45 | - homepage.showStats=true
46 | {% endif %}
47 | {% endif %}
48 | {% endif %}
49 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/prowlarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | prowlarr:
3 | image: linuxserver/prowlarr:latest
4 | container_name: prowlarr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - download_net
13 | - proxy_net
14 | environment:
15 | - PUID=${PUID}
16 | - PGID=${PGID}
17 | - TZ=${TIMEZONE}
18 | volumes:
19 | - ${HMSD_APPS_PATH}/prowlarr/config:/config
20 | {% if hmsdocker_expose_ports_enabled_prowlarr %}
21 | ports:
22 | - 9696:9696
23 | {% endif %}
24 | {% if hmsdocker_traefik_enabled_prowlarr or hmsdocker_homepage_enabled_prowlarr %}
25 | labels:
26 | {% if hmsdocker_traefik_enabled_prowlarr %}
27 | - traefik.enable=true
28 | - traefik.http.services.prowlarr-${COMPOSE_PROJECT}.loadbalancer.server.port=9696
29 | - traefik.http.routers.prowlarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['prowlarr']['proxy_host_rule'] | default('prowlarr') }}.${HMSD_DOMAIN}`)
30 | {% if not hmsdocker_expose_public_enabled_prowlarr %}
31 | - traefik.http.routers.prowlarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
32 | {% endif %}
33 | {% if hmsdocker_authentik_enabled_prowlarr %}
34 | - traefik.http.routers.prowlarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-prowlarr-midware@docker
35 | {% endif %}
36 | {% endif %}
37 | {% if hmsdocker_homepage_enabled_prowlarr %}
38 | - homepage.group=Managers
39 | - homepage.name=Prowlarr
40 | - homepage.icon=prowlarr.png
41 | - homepage.href=http://{{ hms_docker_container_map['prowlarr']['proxy_host_rule'] | default('prowlarr')}}.${HMSD_DOMAIN}
42 | - homepage.description=Indexer Management
43 | - homepage.widget.type=prowlarr
44 | - homepage.widget.url=http://prowlarr:9696
45 | - homepage.widget.key=${PROWLARR_KEY:-apikeyapikeyapikey}
46 | {% if hmsdocker_homepage_stats_enabled_prowlarr %}
47 | - homepage.showStats=true
48 | {% endif %}
49 | {% endif %}
50 | {% endif %}
51 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/qbittorrent.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | qbittorrent:
3 | image: binhex/arch-qbittorrentvpn
4 | container_name: qbittorrent
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - download_net
13 | - proxy_net
14 | {% if hmsdocker_vpn_type == 'wireguard' %}
15 | privileged: true
16 | sysctls:
17 | - net.ipv4.conf.all.src_valid_mark=1
18 | {% else %}
19 | cap_add:
20 | - NET_ADMIN
21 | {% endif %}
22 | environment:
23 | - PUID=${PUID}
24 | - PGID=${PGID}
25 | - TZ=${TIMEZONE}
26 | - WEBUI_PORT=8086
27 | - VPN_ENABLED=yes
28 | - VPN_USER=${VPN_USER}
29 | - VPN_PASS=${VPN_PASS}
30 | - VPN_PROV=custom
31 | - VPN_CLIENT={{ hmsdocker_vpn_type }}
32 | - ENABLE_PRIVOXY=yes
33 | - LAN_NETWORK={{ hms_docker_network_subnet }}
34 | volumes:
35 | - ${HMSD_APPS_PATH}/qbittorrent/config:/config
36 | - ${HMSD_MOUNT_PATH}:/data
37 | {% if hmsdocker_expose_ports_enabled_qbittorrent %}
38 | ports:
39 | - 8086:8086
40 | - 8118:8118 # privoxy
41 | {% endif %}
42 | {% if hmsdocker_traefik_enabled_qbittorrent or hmsdocker_homepage_enabled_qbittorrent %}
43 | labels:
44 | {% if hmsdocker_traefik_enabled_qbittorrent %}
45 | - traefik.enable=true
46 | - traefik.http.services.qbittorrent-${COMPOSE_PROJECT}.loadbalancer.server.port=8086
47 | - traefik.http.routers.qbittorrent-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['qbittorrent']['proxy_host_rule'] | default('qbittorrent') }}.${HMSD_DOMAIN}`)
48 | {% if not hmsdocker_expose_public_enabled_qbittorrent %}
49 | - traefik.http.routers.qbittorrent-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
50 | {% endif %}
51 | {% if hmsdocker_authentik_enabled_qbittorrent %}
52 | - traefik.http.routers.qbittorrent-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-qbittorrent-midware@docker
53 | {% endif %}
54 | {% endif %}
55 | {% if hmsdocker_homepage_enabled_qbittorrent %}
56 | - homepage.group=Downloads
57 | - homepage.name=qbittorrent
58 | - homepage.icon=qbittorrent.png
59 | - homepage.href=http://{{ hms_docker_container_map['qbittorrent']['proxy_host_rule'] | default('qbittorrent') }}.${HMSD_DOMAIN}
60 | - homepage.description=Torrent Management
61 | - homepage.widget.type=qbittorrent
62 | - homepage.widget.url=http://qbittorrent:8086
63 | {% if hmsdocker_homepage_stats_enabled_qbittorrent %}
64 | - homepage.showStats=true
65 | {% endif %}
66 | {% endif %}
67 | {% endif %}
68 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/readarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | readarr:
3 | image: lscr.io/linuxserver/readarr:develop
4 | restart: ${RESTART_POLICY}
5 | container_name: readarr
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | - download_net
14 | environment:
15 | - PUID=${PUID}
16 | - PGID=${PGID}
17 | - TZ=${TIMEZONE}
18 | volumes:
19 | - ${HMSD_APPS_PATH}/readarr/config:/config
20 | - ${HMSD_MOUNT_PATH}:/data
21 | {% if hmsdocker_expose_ports_enabled_readarr %}
22 | ports:
23 | - 8787:8787
24 | {% endif %}
25 | {% if hmsdocker_traefik_enabled_readarr or hmsdocker_homepage_enabled_readarr %}
26 | labels:
27 | {% if hmsdocker_traefik_enabled_readarr %}
28 | - traefik.enable=true
29 | - traefik.http.services.readarr-${COMPOSE_PROJECT}.loadbalancer.server.port=8787
30 | - traefik.http.routers.readarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['readarr']['proxy_host_rule'] | default('readarr') }}.${HMSD_DOMAIN}`)
31 | - traefik.http.routers.readarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
32 | {% if hmsdocker_authentik_enabled_readarr %}
33 | - traefik.http.routers.readarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-readarr-midware@docker
34 | {% endif %}
35 | {% endif %}
36 | {% if hmsdocker_homepage_enabled_readarr %}
37 | - homepage.group=Managers
38 | - homepage.name=Readarr
39 | - homepage.icon=readarr.png
40 | - homepage.href=http://{{ hms_docker_container_map['readarr']['proxy_host_rule'] | default('readarr') }}.${HMSD_DOMAIN}
41 | - homepage.description=Media Analytics
42 | - homepage.widget.type=readarr
43 | - homepage.widget.url=http://readarr:8787
44 | - homepage.widget.key=${READARR_KEY:-apikeyapikeyapikey}
45 | {% if hmsdocker_homepage_stats_enabled_readarr %}
46 | - homepage.showStats=true
47 | {% endif %}
48 | {% endif %}
49 | {% endif %}
50 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/recyclarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | recyclarr:
3 | image: ghcr.io/recyclarr/recyclarr
4 | container_name: recyclarr
5 | restart: ${RESTART_POLICY}
6 | security_opt: ['no-new-privileges:true']
7 | logging:
8 | options:
9 | max-size: "12m"
10 | max-file: "5"
11 | driver: json-file
12 | user: ${PUID}:${PGID}
13 | networks:
14 | - proxy_net
15 | volumes:
16 | - ${HMSD_APPS_PATH}/recyclarr/config:/config
17 | environment:
18 | - TZ=${TIMEZONE}
19 | - RECYCLARR_CREATE_CONFIG=true
20 | - SONARR_KEY=${SONARR_KEY}
21 | - SONARR_4K_KEY=${SONARR_4K_KEY}
22 | - RADARR_KEY=${RADARR_KEY}
23 | - RADARR_4K_KEY=${RADARR_4K_KEY}
24 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/requestrr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | requestrr:
3 | image: thomst08/requestrr:latest
4 | container_name: requestrr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | environment:
14 | - PUID=${PUID}
15 | - PGID=${PGID}
16 | - TZ=${TIMEZONE}
17 | volumes:
18 | - ${HMSD_APPS_PATH}/requestrr/config:/root/config
19 | {% if hmsdocker_expose_ports_enabled_requestrr %}
20 | ports:
21 | - 4545:4545
22 | {% endif %}
23 | {% if hmsdocker_traefik_enabled_requestrr %}
24 | labels:
25 | - traefik.enable=true
26 | - traefik.http.services.requestrr-${COMPOSE_PROJECT}.loadbalancer.server.port=4545
27 | - traefik.http.routers.requestrr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['requestrr']['proxy_host_rule'] | default('requestrr') }}.${HMSD_DOMAIN}`)
28 | {% if not hmsdocker_expose_public_enabled_requestrr %}
29 | - traefik.http.routers.requestrr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
30 | {% endif %}
31 | {% if hmsdocker_authentik_enabled_requestrr %}
32 | - traefik.http.routers.requestrr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-requestrr-midware@docker
33 | {% endif %}
34 | {% endif %}
35 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/sabnzbd.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | sabnzbd:
3 | image: lscr.io/linuxserver/sabnzbd:latest
4 | container_name: sabnzbd
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | - download_net
14 | environment:
15 | - PUID=${PUID}
16 | - PGID=${PGID}
17 | - TZ=${TIMEZONE}
18 | volumes:
19 | - ${HMSD_APPS_PATH}/sabnzbd/config:/config
20 | - ${HMSD_MOUNT_PATH}:/data
21 | {% if hmsdocker_expose_ports_enabled_sabnzbd %}
22 | ports:
23 | - 8082:8080
24 | {% endif %}
25 | {% if hmsdocker_traefik_enabled_sabnzbd or hmsdocker_homepage_enabled_sabnzbd %}
26 | labels:
27 | {% if hmsdocker_traefik_enabled_sabnzbd %}
28 | - traefik.enable=true
29 | - traefik.http.services.sabnzbd-${COMPOSE_PROJECT}.loadbalancer.server.port=8080
30 | - traefik.http.routers.sabnzbd-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['sabnzbd']['proxy_host_rule'] | default('sabnzbd') }}.${HMSD_DOMAIN}`)
31 | {% if not hmsdocker_expose_public_enabled_sabnzbd %}
32 | - traefik.http.routers.sabnzbd-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
33 | {% endif %}
34 | {% if hmsdocker_authentik_enabled_sabnzbd %}
35 | - traefik.http.routers.sabnzbd-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-sabnzbd-midware@docker
36 | {% endif %}
37 | {% endif %}
38 | {% if hmsdocker_homepage_enabled_sabnzbd %}
39 | - homepage.group=Downloads
40 | - homepage.name=SABnzbd
41 | - homepage.icon=sabnzbd.png
42 | - homepage.href=http://{{ hms_docker_container_map['sabnzbd']['proxy_host_rule'] | default('sabnzbd') }}.${HMSD_DOMAIN}
43 | - homepage.description=NZB Manager
44 | - homepage.widget.type=sabnzbd
45 | - homepage.widget.url=http://sabnzbd:8080
46 | - homepage.widget.key=${SABNZBD_KEY:-apikeyapikeyapikey}
47 | {% if hmsdocker_homepage_stats_enabled_sabnzbd %}
48 | - homepage.showStats=true
49 | {% endif %}
50 | {% endif %}
51 | {% endif %}
52 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/speedtest-tracker.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | speedtest-tracker:
3 | image: lscr.io/linuxserver/speedtest-tracker:latest
4 | container_name: speedtest-tracker
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | {% if hmsdocker_expose_ports_enabled_speedtest %}
14 | ports:
15 | - 8090:80
16 | - 8444:443
17 | {% endif %}
18 | environment:
19 | - PUID=${PUID}
20 | - PGID=${PGID}
21 | - APP_KEY=${SPEEDTEST_API_KEY}
22 | - APP_URL=http://{{ hms_docker_container_map['speedtest-tracker']['proxy_host_rule'] | default('speedtest') }}.${HMSD_DOMAIN}
23 | - DB_CONNECTION=sqlite
24 | - SPEEDTEST_SCHEDULE=${SPEEDTEST_SCHEDULE}
25 | - APP_TIMEZONE=${TIMEZONE}
26 | - DISPLAY_TIMEZONE=${TIMEZONE}
27 | volumes:
28 | - ${HMSD_APPS_PATH}/speedtest-tracker/config:/config
29 | {% if hmsdocker_traefik_enabled_speedtest %}
30 | labels:
31 | {% if hmsdocker_traefik_enabled_speedtest %}
32 | - traefik.enable=true
33 | - traefik.http.services.speedtest-${COMPOSE_PROJECT}.loadbalancer.server.port=80
34 | - traefik.http.routers.speedtest-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['speedtest-tracker']['proxy_host_rule'] | default('speedtest') }}.${HMSD_DOMAIN}`)
35 | {% if not hmsdocker_expose_public_enabled_speedtest %}
36 | - traefik.http.routers.speedtest-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
37 | {% endif %}
38 | {% if hmsdocker_authentik_enabled_speedtest %}
39 | - traefik.http.routers.speedtest-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-speedtest-midware@docker
40 | {% endif %}
41 | {% endif %}
42 | {% endif %}
43 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/tailscale.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | tailscale:
3 | image: tailscale/tailscale:latest
4 | container_name: tailscale
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | cap_add:
12 | - NET_ADMIN
13 | - NET_RAW
14 | volumes:
15 | - '/var/lib:/var/lib'
16 | devices:
17 | - /dev/net/tun:/dev/net/tun
18 | environment:
19 | - TS_AUTHKEY=${TAILSCALE_AUTH_KEY}
20 | {% if tailscale_enable_subnet_routes %}
21 | - TS_ROUTES={{ tailscale_subnet_routes }}
22 | {% endif %}
23 | {% if tailscale_advertise_exit_node %}
24 | - TS_EXTRA_ARGS=--advertise-exit-node
25 | {% endif %}
26 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/tautulli.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | tautulli:
3 | image: tautulli/tautulli:latest
4 | container_name: tautulli
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | environment:
14 | - PUID=${PUID}
15 | - PGID=${PGID}
16 | - TZ=${TIMEZONE}
17 | {% if hmsdocker_traefik_enabled_tautulli or hmsdocker_homepage_enabled_tautulli %}
18 | labels:
19 | {% if hmsdocker_traefik_enabled_tautulli %}
20 | - traefik.enable=true
21 | - traefik.http.services.tautulli-${COMPOSE_PROJECT}.loadbalancer.server.port=8181
22 | - traefik.http.routers.tautulli-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['tautulli']['proxy_host_rule'] | default('tautulli') }}.${HMSD_DOMAIN}`)
23 | {% if not hmsdocker_expose_public_enabled_tautulli %}
24 | - traefik.http.routers.tautulli-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
25 | {% endif %}
26 | {% if hmsdocker_authentik_enabled_tautulli %}
27 | - traefik.http.routers.tautulli-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-tautulli-midware@docker
28 | {% endif %}
29 | {% endif %}
30 | {% if hmsdocker_homepage_enabled_tautulli %}
31 | - homepage.group=Media
32 | - homepage.name=Tautulli
33 | - homepage.icon=tautulli.png
34 | - homepage.href=http://{{ hms_docker_container_map['tautulli']['proxy_host_rule'] | default('tautulli') }}.${HMSD_DOMAIN}
35 | - homepage.description=Media Analytics
36 | - homepage.widget.type=tautulli
37 | - homepage.widget.url=http://tautulli:8181
38 | - homepage.widget.key=${TAUTULLI_KEY:-apikeyapikeyapikey}
39 | {% if hmsdocker_homepage_stats_enabled_tautulli %}
40 | - homepage.showStats=true
41 | {% endif %}
42 | {% endif %}
43 | {% endif %}
44 | {% if hmsdocker_expose_ports_enabled_tautulli %}
45 | ports:
46 | - 8181:8181
47 | {% endif %}
48 | volumes:
49 | - ${HMSD_APPS_PATH}/tautulli/config:/config
50 | # Plex logs location
51 | - ${HMSD_APPS_PATH}/plex/config/Library/Application Support/Plex Media Server/Logs:/plex_logs:ro
52 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/tdarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | tdarr:
3 | container_name: tdarr
4 | image: ghcr.io/haveagitgat/tdarr:latest
5 | {% if tdarr_enable_intel_gpu %}
6 | devices:
7 | - /dev/dri:/dev/dri
8 | {% endif %}
9 | restart: ${RESTART_POLICY}
10 | logging:
11 | options:
12 | max-size: "12m"
13 | max-file: "5"
14 | driver: json-file
15 | networks:
16 | - proxy_net
17 | {% if hmsdocker_expose_ports_enabled_tdarr %}
18 | ports:
19 | - 8265:8265 # webUI port
20 | - 8266:8266 # server port
21 | #- 8267:8267 # Internal node port
22 | #- 8268:8268 # Example extra node port
23 | {% elif tdarr_enable_node_server %}
24 | ports:
25 | - 8266:8266
26 | {% endif %}
27 | environment:
28 | - PUID=${PUID}
29 | - PGID=${PGID}
30 | - TZ=${TIMEZONE}
31 | - UMASK_SET=002
32 | - serverIP=0.0.0.0
33 | - serverPort=8266
34 | - webUIPort=8265
35 | - internalNode=true
36 | - nodeName=MyInternalNode
37 | {% if tdarr_enable_nvidia_gpu %}
38 | - NVIDIA_VISIBLE_DEVICES=all
39 | - NVIDIA_DRIVER_CAPABILITIES=all
40 | runtime: nvidia
41 | {% endif %}
42 | {% if hmsdocker_traefik_enabled_tdarr or hmsdocker_homepage_enabled_tdarr %}
43 | labels:
44 | {% if hmsdocker_traefik_enabled_tdarr %}
45 | - traefik.enable=true
46 | - traefik.http.services.tdarr-${COMPOSE_PROJECT}.loadbalancer.server.port=8265
47 | - traefik.http.routers.tdarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['tdarr']['proxy_host_rule'] | default('tdarr') }}.${HMSD_DOMAIN}`)
48 | {% if not hmsdocker_expose_public_enabled_tdarr %}
49 | - traefik.http.routers.tdarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
50 | {% endif %}
51 | {% if hmsdocker_authentik_enabled_tdarr %}
52 | - traefik.http.routers.tdarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-tdarr-midware@docker
53 | {% endif %}
54 | {% endif %}
55 | {% if hmsdocker_homepage_enabled_tdarr %}
56 | - homepage.group=Managers
57 | - homepage.name=Tdarr
58 | - homepage.icon=tdarr.png
59 | - homepage.href=http://{{ hms_docker_container_map['tdarr']['proxy_host_rule'] | default('tdarr') }}.${HMSD_DOMAIN}
60 | - homepage.description=Transcode Manager
61 | - homepage.widget.type=tdarr
62 | - homepage.widget.url=http://tdarr:8265
63 | {% if hmsdocker_homepage_stats_enabled_tdarr %}
64 | - homepage.showStats=true
65 | {% endif %}
66 | {% endif %}
67 | {% endif %}
68 | volumes:
69 | - ${HMSD_APPS_PATH}/tdarr/config/server:/app/server
70 | - ${HMSD_APPS_PATH}/tdarr/config/app:/app/configs
71 | - ${HMSD_APPS_PATH}/tdarr/config/logs:/app/logs
72 | - {{ tdarr_transcode_folder }}:/temp_transcode
73 | - ${HMSD_MOUNT_PATH}:/data
74 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/tinymediamanager.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | tinymediamanager:
3 | image: tinymediamanager/tinymediamanager:latest
4 | container_name: tinymediamanager
5 | restart: ${RESTART_POLICY}
6 | {% if hmsdocker_expose_ports_enabled_tinymediamanager %}
7 | ports:
8 | - 5900:5900 # VNC port
9 | - 4000:4000 # Webinterface
10 | {% endif %}
11 | networks:
12 | - proxy_net
13 | logging:
14 | options:
15 | max-size: "12m"
16 | max-file: "5"
17 | driver: json-file
18 | environment:
19 | - USER_ID=${PUID}
20 | - GROUP_ID=${PGID}
21 | - ALLOW_DIRECT_VNC=true
22 | - LC_ALL=en_US.UTF-8 # force UTF8
23 | - LANG=en_US.UTF-8 # force UTF8
24 | - PASSWORD=${TMM_VNC_PASSWORD}
25 | - TZ=${TIMEZONE}
26 | volumes:
27 | - ${HMSD_APPS_PATH}/tinymediamanager/config:/data
28 | - ${HMSD_MOUNT_PATH}:/media
29 | {% if hmsdocker_traefik_enabled_tinymediamanager %}
30 | labels:
31 | {% if hmsdocker_traefik_enabled_tinymediamanager %}
32 | - traefik.enable=true
33 | - traefik.http.services.tinymediamanager-${COMPOSE_PROJECT}.loadbalancer.server.port=4000
34 | - traefik.http.routers.tinymediamanager-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['tinymediamanager']['proxy_host_rule'] | default('tmm') }}.${HMSD_DOMAIN}`)
35 | {% if not hmsdocker_expose_public_enabled_tinymediamanager %}
36 | - traefik.http.routers.tinymediamanager-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
37 | {% endif %}
38 | {% if hmsdocker_authentik_enabled_tinymediamanager %}
39 | - traefik.http.routers.tinymediamanager-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-tinymediamanager-midware@docker
40 | {% endif %}
41 | {% endif %}
42 | {% endif %}
43 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/traefik.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | traefik:
3 | image: traefik:${TRAEFIK_TAG}
4 | container_name: traefik
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | ports:
12 | - 80:80
13 | - 443:443
14 | - 8080:8080
15 | environment:
16 | - TZ=${TIMEZONE}
17 | - PUID=${PUID}
18 | - PGID=${PGID}
19 | {% if traefik_ssl_dns_provider_environment_vars %}
20 | {% for key, value in traefik_ssl_dns_provider_environment_vars.items() %}
21 | {# Due to how Ansible interprets curly brackets, I had to use 'raw' statements in order to render the ${} around the ansible 'key' variable to reference the .env file variable #}
22 | - {{ key }}={% raw %}${{% endraw %}{{ key }}{% raw %}}{% endraw +%}
23 | {% endfor %}
24 | {% endif %}
25 | networks:
26 | - proxy_net
27 | volumes:
28 | - /var/run/docker.sock:/var/run/docker.sock
29 | - ${HMSD_APPS_PATH}/traefik/config/traefik.yml:/etc/traefik/traefik.yml
30 | - ${HMSD_APPS_PATH}/traefik/config/certs/:/certs/
31 | - {{ hmsdocker_traefik_static_config_location }}:/etc/traefik/static_files
32 | {% if hmsdocker_traefik_enabled_traefik or hmsdocker_homepage_enabled_traefik %}
33 | labels:
34 | {% if hmsdocker_traefik_enabled_traefik %}
35 | - traefik.enable=true
36 | - traefik.http.routers.traefik-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['traefik']['proxy_host_rule'] | default('traefik') }}.${HMSD_DOMAIN}`)
37 | - traefik.http.services.traefik-${COMPOSE_PROJECT}.loadbalancer.server.port=8080
38 | - traefik.http.routers.traefik-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
39 | {% if hmsdocker_authentik_enabled_traefik %}
40 | - traefik.http.routers.traefik-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-traefik-midware@docker
41 | {% endif %}
42 | {% endif %}
43 | {% if hmsdocker_homepage_enabled_traefik %}
44 | - homepage.group=Infrastructure
45 | - homepage.name=Traefik
46 | - homepage.icon=traefik.png
47 | - homepage.href=http://{{ hms_docker_container_map['traefik']['proxy_host_rule'] | default('traefik') }}.${HMSD_DOMAIN}
48 | - homepage.description=Reverse Proxy
49 | - homepage.widget.type=traefik
50 | - homepage.widget.url=http://traefik:8080
51 | {% if hmsdocker_homepage_stats_enabled_traefik %}
52 | - homepage.showStats=true
53 | {% endif %}
54 | {% endif %}
55 | {% endif %}
56 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/unpackerr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | unpackerr:
3 | image: golift/unpackerr:latest
4 | container_name: unpackerr
5 | logging:
6 | options:
7 | max-size: "12m"
8 | max-file: "5"
9 | driver: json-file
10 | restart: ${RESTART_POLICY}
11 | user: ${PUID}:${PGID}
12 | networks:
13 | - proxy_net
14 | volumes:
15 | - ${HMSD_MOUNT_PATH}:/data
16 | environment:
17 | - TZ=${TIMEZONE}
18 | {% for key, value in hmsdocker_unpackerr_additional_env_vars.items() %}
19 | - {{ key }}={{ value }}
20 | {% endfor %}
21 | {% if hmsdocker_container_enabled_sonarr %}
22 | ## Sonarr Settings
23 | - UN_SONARR_0_URL=http://sonarr:8989
24 | - UN_SONARR_0_API_KEY=${SONARR_KEY}
25 | - UN_SONARR_0_PATHS_0=/data
26 | - UN_SONARR_0_PROTOCOLS=torrent
27 | - UN_SONARR_0_TIMEOUT=10s
28 | - UN_SONARR_0_DELETE_DELAY=5m
29 | - UN_SONARR_0_DELETE_ORIG=false
30 | - UN_SONARR_0_SYNCTHING=false
31 | {% endif %}
32 | {% if hmsdocker_container_enabled_radarr %}
33 | ## Radarr Settings
34 | - UN_RADARR_0_URL=http://radarr:7878
35 | - UN_RADARR_0_API_KEY=${RADARR_KEY}
36 | - UN_RADARR_0_PATHS_0=/data
37 | - UN_RADARR_0_PROTOCOLS=torrent
38 | - UN_RADARR_0_TIMEOUT=10s
39 | - UN_RADARR_0_DELETE_DELAY=5m
40 | - UN_RADARR_0_DELETE_ORIG=false
41 | - UN_RADARR_0_SYNCTHING=false
42 | {% endif %}
43 | {% if hmsdocker_container_enabled_readarr %}
44 | ## Readarr Settings
45 | - UN_READARR_0_URL=http://readarr:8787
46 | - UN_READARR_0_API_KEY=${READARR_KEY}
47 | - UN_READARR_0_PATHS_0=/data
48 | - UN_READARR_0_PROTOCOLS=torrent
49 | - UN_READARR_0_TIMEOUT=10s
50 | - UN_READARR_0_DELETE_DELAY=5m
51 | - UN_READARR_0_DELETE_ORIG=false
52 | - UN_READARR_0_SYNCTHING=false
53 | {% endif %}
54 | {% if hmsdocker_container_enabled_lidarr %}
55 | ## Lidarr Settings
56 | - UN_LIDARR_0_URL=http://lidarr:8686
57 | - UN_LIDARR_0_API_KEY=${LIDARR_KEY}
58 | - UN_LIDARR_0_PATHS_0=/data
59 | - UN_LIDARR_0_PROTOCOLS=torrent
60 | - UN_LIDARR_0_TIMEOUT=10s
61 | - UN_LIDARR_0_DELETE_DELAY=5m
62 | - UN_LIDARR_0_DELETE_ORIG=false
63 | - UN_LIDARR_0_SYNCTHING=false
64 | {% endif %}
65 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/uptimekuma.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | uptime-kuma:
3 | image: louislam/uptime-kuma:latest
4 | container_name: uptime-kuma
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | volumes:
14 | - ${HMSD_APPS_PATH}/uptimekuma/config:/app/data
15 | - /var/run/docker.sock:/var/run/docker.sock
16 | {% if hmsdocker_expose_ports_enabled_uptimekuma %}
17 | ports:
18 | - 3001:3001
19 | {% endif %}
20 | environment:
21 | - PUID=${PUID}
22 | - PGID=${PGID}
23 | - TZ=${TIMEZONE}
24 | {% if hmsdocker_traefik_enabled_uptimekuma %}
25 | labels:
26 | - traefik.enable=true
27 | - traefik.http.services.uptimekuma-${COMPOSE_PROJECT}.loadbalancer.server.port=3001
28 | - traefik.http.routers.uptimekuma-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['uptimekuma']['proxy_host_rule'] | default('uptime-kuma') }}.${HMSD_DOMAIN}`)
29 | {% if not hmsdocker_expose_public_enabled_uptimekuma %}
30 | - traefik.http.routers.uptimekuma-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
31 | {% endif %}
32 | {% if hmsdocker_authentik_enabled_uptimekuma %}
33 | - traefik.http.routers.uptimekuma-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-uptimekuma-midware@docker
34 | {% endif %}
35 | {% endif %}
36 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/watchtower.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | watchtower:
3 | image: containrrr/watchtower:latest
4 | container_name: watchtower
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | command: --cleanup --schedule "{{ container_auto_update_schedule }}"
12 | volumes:
13 | - /var/run/docker.sock:/var/run/docker.sock
14 | {% if hmsdocker_container_enabled_homepage %}
15 | networks:
16 | - proxy_net
17 | environment:
18 | - WATCHTOWER_HTTP_API_TOKEN=${WATCHTOWER_KEY}
19 | - WATCHTOWER_HTTP_API_METRICS=true
20 | labels:
21 | - homepage.group=Infrastructure
22 | - homepage.name=Watchtower
23 | - homepage.icon=watchtower.png
24 | - homepage.description=Container Updates
25 | - homepage.widget.type=watchtower
26 | - homepage.widget.url=http://watchtower:8080
27 | - homepage.widget.key=${WATCHTOWER_KEY:-apikeyapikeyapikey}
28 | {% endif %}
29 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/containers/wizarr.yml.j2:
--------------------------------------------------------------------------------
1 | services:
2 | wizarr:
3 | image: ghcr.io/wizarrrr/wizarr:latest
4 | container_name: wizarr
5 | restart: ${RESTART_POLICY}
6 | logging:
7 | options:
8 | max-size: "12m"
9 | max-file: "5"
10 | driver: json-file
11 | networks:
12 | - proxy_net
13 | volumes:
14 | - ${HMSD_APPS_PATH}/wizarr/config:/data/database
15 | environment:
16 | - TZ=${TIMEZONE}
17 | {% if hmsdocker_expose_ports_enabled_wizarr %}
18 | ports:
19 | - 5690:5690
20 | {% endif %}
21 | {% if hmsdocker_traefik_enabled_wizarr %}
22 | labels:
23 | - traefik.enable=true
24 | - traefik.http.services.wizarr-${COMPOSE_PROJECT}.loadbalancer.server.port=5690
25 | - traefik.http.routers.wizarr-${COMPOSE_PROJECT}.rule=Host(`{{ hms_docker_container_map['wizarr']['proxy_host_rule'] | default('wizarr') }}.${HMSD_DOMAIN}`)
26 | {% if not hmsdocker_expose_public_enabled_wizarr %}
27 | - traefik.http.routers.wizarr-${COMPOSE_PROJECT}.middlewares=internal-ipallowlist@file
28 | {% endif %}
29 | {% if hmsdocker_authentik_enabled_wizarr %}
30 | - traefik.http.routers.wizarr-${COMPOSE_PROJECT}.middlewares=authentik-proxy-${COMPOSE_PROJECT}-wizarr-midware@docker
31 | {% endif %}
32 | {% endif %}
33 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/docker-compose.yml.j2:
--------------------------------------------------------------------------------
1 | include:
2 | {# Default functions are here because the ".dest" attribute is only available during non-check mode or if the file already exists (which is an issue when creating for the first time) #}
3 | {% for file in compose_files_created.results %}
4 | {% if file.item != 'authentik' %}
5 | - {{ file.dest | default(compose_dir.path + '/' + file.item + '.yml') }}
6 | {% endif %}
7 | {% endfor %}
8 | {% if container_enable_auto_updates and watchtower_compose_file_path is defined | default(False) %}
9 | - {{ watchtower_compose_file_path.dest | default(compose_dir.path + '/watchtower.yml') }}
10 | {% endif %}
11 | {% if (cloudflare_tunnel_enabled or cloudflare_ddns_enabled) and cloudflare_compose_file_path is defined | default(False) %}
12 | - {{ cloudflare_compose_file_path.dest | default(compose_dir.path + '/cloudflare.yml') }}
13 | {% endif %}
14 | {% if hmsdocker_authentik_enabled_globally and authentik_compose_file_path is defined | default(False) %}
15 | - {{ authentik_compose_file_path.dest | default(compose_dir.path + '/authentik.yml') }}
16 | {% endif %}
17 | {% if hmsdocker_container_enabled_tailscale and tailscale_compose_file_path is defined | default(False) %}
18 | - {{ tailscale_compose_file_path.dest | default(compose_dir.path + '/tailscale.yml') }}
19 | {% endif %}
20 |
21 | networks:
22 | download_net:
23 | driver: bridge
24 | attachable: false
25 | media_net:
26 | driver: bridge
27 | attachable: false
28 | proxy_net:
29 | driver: bridge
30 | attachable: true
31 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/hmsd_traefik_middlewares.yml.j2:
--------------------------------------------------------------------------------
1 | http:
2 | middlewares:
3 | internal-ipallowlist:
4 | ipAllowList:
5 | sourceRange:
6 | - "127.0.0.1/32"
7 | {% for address in traefik_subnet_allow_list | split(', ') %}
8 | - "{{ address }}"
9 | {% endfor %}
10 | external-ipallowlist:
11 | ipAllowList:
12 | sourceRange:
13 | - "0.0.0.0/0"
14 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/recyclarr_conf.yml.j2:
--------------------------------------------------------------------------------
1 | {% if hmsdocker_container_enabled_sonarr %}
2 | sonarr:
3 | sonarr:
4 | base_url: http://sonarr:8989
5 | api_key: !env_var SONARR_KEY
6 |
7 | {% if separate_4k_instances_enable %}
8 | sonarr-4k:
9 | base_url: http://sonarr-4k:8989
10 | api_key: !env_var SONARR_4K_KEY
11 | {% endif %}
12 | {% endif %}
13 |
14 | {% if hmsdocker_container_enabled_radarr %}
15 | radarr:
16 | radarr:
17 | base_url: http://radarr:7878
18 | api_key: !env_var RADARR_KEY
19 |
20 | {% if separate_4k_instances_enable %}
21 | radarr-4k:
22 | base_url: http://radarr-4k:7878
23 | api_key: !env_var RADARR_4K_KEY
24 | {% endif %}
25 | {% endif %}
26 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/traefik.yml.j2:
--------------------------------------------------------------------------------
1 | global:
2 | checkNewVersion: false
3 | sendAnonymousUsage: false
4 |
5 | log:
6 | level: "{{ traefik_log_level }}"
7 |
8 | {% if traefik_enable_access_logs %}
9 | accessLog: {}
10 | {% endif %}
11 |
12 | api:
13 | dashboard: true
14 | insecure: true
15 |
16 | entryPoints:
17 | web:
18 | address: :80
19 | {% if traefik_ssl_enabled %}
20 | http:
21 | redirections:
22 | entryPoint:
23 | to: websecure
24 | scheme: https
25 |
26 | websecure:
27 | address: :443
28 | http:
29 | tls:
30 | certResolver: letsencrypt
31 | domains:
32 | - main: "{{ traefik_ssl_dns_provider_zone }}"
33 | sans:
34 | {% for san in traefik_ssl_sans %}
35 | - "{{- san -}}"
36 | {% endfor %}
37 |
38 | certificatesResolvers:
39 | letsencrypt:
40 | acme:
41 | email: "{{ traefik_ssl_letsencrypt_email }}"
42 | {% if traefik_ssl_use_letsencrypt_staging_url %}
43 | caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
44 | storage: /certs/acme_staging.json
45 | {% else %}
46 | storage: /certs/acme.json
47 | {% endif %}
48 | dnsChallenge:
49 | provider: "{{ traefik_ssl_dns_provider_code }}"
50 | resolvers:
51 | - "{{ traefik_ssl_dns_resolver_1 }}"
52 | - "{{ traefik_ssl_dns_resolver_2 }}"
53 | {% endif %}
54 |
55 | {% if traefik_security_hardening and traefik_ssl_enabled %}
56 | tls:
57 | options:
58 | default:
59 | minVersion: VersionTLS12
60 |
61 | mintls13:
62 | minVersion: VersionTLS13
63 | {% endif %}
64 |
65 | serversTransport:
66 | insecureSkipVerify: true
67 |
68 | providers:
69 | docker:
70 | network: "{{ project_name }}_proxy_net"
71 | endpoint: unix:///var/run/docker.sock
72 | exposedByDefault: false
73 | defaultRule: {% raw %}Host(`{{if index .Labels "com.docker.compose.service" }}{{ index .Labels "com.docker.compose.service" }}{% endraw %}.{{ hms_docker_domain }}{% raw %}{{else}}{{ trimPrefix `/` .Name }}{% endraw %}.{{ hms_docker_domain }}{% raw %}{{end}}`){% endraw +%}
74 | file:
75 | directory: /etc/traefik/static_files
76 | watch: true
77 |
78 |
--------------------------------------------------------------------------------
/roles/hmsdocker/templates/traefik_additional_routes.yml.j2:
--------------------------------------------------------------------------------
1 | http:
2 | routers:
3 | {{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-{{ project_name }}:
4 | entryPoints:
5 | {% if traefik_ssl_enabled -%}
6 | - websecure
7 | {% else -%}
8 | - web
9 | {% endif -%}
10 | {% if hmsdocker_authentik_enabled_globally and item.authentik is defined and item.authentik -%}
11 | middlewares:
12 | - authentik-proxy-{{ project_name }}-{{ item.friendly_name | map('regex_replace', regex, replace) | list | join }}-midware@docker
13 | {% endif -%}
14 | service: {{ item.friendly_name + "_svc" | map('regex_replace', regex, replace) | list | join }}
15 | rule: Host(`{{ item.subdomain_name }}.{{ hms_docker_domain }}`)
16 |
17 | services:
18 | {{ item.friendly_name + "_svc" | map('regex_replace', regex, replace) | list | join }}:
19 | loadBalancer:
20 | servers:
21 | - url: {{ item.backend_url }}
22 | passHostHeader: false
23 |
--------------------------------------------------------------------------------