├── .github ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── linting.yml ├── .gitignore ├── .markdownlint.yml ├── LICENSE ├── README.md ├── funnel-serve ├── funnel-example.json └── serve-example.json ├── images ├── tailscale-funnel.png └── tailscale-serve.png ├── services ├── adguardhome-sync │ ├── .env │ ├── README.md │ └── docker-compose.yml ├── adguardhome │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── audiobookshelf │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── bazarr │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── beszel │ ├── README.md │ ├── agent │ │ ├── .env │ │ └── docker-compose.yml │ └── hub │ │ ├── .env │ │ ├── config │ │ └── serve.json │ │ └── docker-compose.yml ├── caddy │ ├── .env │ ├── Caddyfile │ ├── README.md │ └── docker-compose.yml ├── changedetection │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── clipcascade │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── cyberchef │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── ddns-updater │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── donetick │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ ├── docker-compose.yml │ └── donetick-data │ │ └── config │ │ └── selfhosted.yaml ├── dozzle │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ ├── docker-compose.yml │ └── dozzle-data │ │ └── users.yml ├── dumbdo │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── eigenfocus │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── excalidraw │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── flatnotes │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── ghost │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── gokapi │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── haptic │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── homarr │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── home-assistant │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── isley │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── it-tools │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── jellyfin │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── kaneo │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── karakeep │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── languagetool │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── linkding │ ├── .env │ ├── .linkding.env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── metube │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── miniqr │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── nanote │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── nessus │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── nextcloud │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── nodered │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── pihole │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── pingvin-share │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── plex │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── pocket-id │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── portainer │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── qbittorrent │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── radarr │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── resilio-sync │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── searxng │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── slink │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── sonarr │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── speedtest-tracker │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ ├── docker-compose.yml │ └── nginx │ │ └── default.conf ├── stirlingpdf │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── tailscale-exit-node │ ├── .env │ ├── README.md │ └── docker-compose.yml ├── tautulli │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── technitium │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── traefik │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ ├── docker-compose.yml │ └── traefik │ │ └── app │ │ └── traefik.yml ├── uptime-kuma │ ├── .env │ ├── README.md │ ├── config │ │ └── serve.json │ └── docker-compose.yml └── vaultwarden │ ├── .env │ ├── README.md │ ├── config │ └── serve.json │ └── docker-compose.yml └── templates └── service-template ├── .env ├── README.md ├── config └── serve.json └── docker-compose.yml /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Issue Title: [Brief Summary of the Issue] 2 | 3 | ## Description 4 | 5 | Provide a clear and concise description of the issue: 6 | 7 | - What is the problem? 8 | - Where does it occur (specific service or file)? 9 | - Steps to reproduce (if applicable) 10 | 11 | ## Expected Behaviour 12 | 13 | Describe what you expected to happen. 14 | 15 | ## Actual Behaviour 16 | 17 | Describe what actually happened. 18 | 19 | ## Screenshots (if applicable) 20 | 21 | Include any relevant screenshots or error logs that might help in troubleshooting. 22 | 23 | ## Environment 24 | 25 | - **OS**: [e.g., Ubuntu 22.04] 26 | - **Docker Version**: [e.g., 20.10.8] 27 | - **Tailscale Version**: [e.g., 1.32.3] 28 | - **Other dependencies**: [Specify versions of relevant libraries or tools] 29 | 30 | ## Additional Context 31 | 32 | Add any other context about the problem here. 33 | 34 | ## Checklist 35 | 36 | - [ ] I have searched for similar issues in this repository 37 | - [ ] I have updated to the latest version (if applicable) 38 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Pull Request Title: [Short Description of Changes] 2 | 3 | ## Description 4 | 5 | Provide a brief description of the changes made in this pull request. Include the following: 6 | 7 | - The purpose of the changes 8 | - Any background context or additional details 9 | 10 | ## Related Issues 11 | 12 | - Issue #[issue number] (e.g., Fixes #[issue number]) 13 | 14 | ## Type of Change 15 | 16 | - [ ] Bug fix 17 | - [ ] New feature 18 | - [ ] Documentation update 19 | - [ ] Refactoring 20 | 21 | ## How Has This Been Tested? 22 | 23 | Describe the tests you ran to verify your changes. Provide instructions for reproducing the testing process: 24 | 25 | 1. [Test case 1] 26 | 2. [Test case 2] 27 | 28 | ## Checklist 29 | 30 | - [ ] I have performed a self-review of my code 31 | - [ ] I have added tests that prove my fix or feature works 32 | - [ ] I have updated necessary documentation (e.g. frontpage `README.md`) 33 | - [ ] Any dependent changes have been merged and published in downstream modules 34 | 35 | ## Screenshots (if applicable) 36 | 37 | If visual changes are made, include screenshots to demonstrate them. 38 | 39 | ## Additional Notes 40 | 41 | Add any additional comments or information that may be relevant. 42 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | name: Perform markdown linting 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - '.github/**' 9 | # - 'README.md' 10 | - 'LICENSE' 11 | - '.gitignore' 12 | - '.gitattributes' 13 | - '.editorconfig' 14 | pull_request: 15 | branches: 16 | - main 17 | paths-ignore: 18 | - '.github/**' 19 | # - 'README.md' 20 | - 'LICENSE' 21 | - '.gitignore' 22 | - '.gitattributes' 23 | - '.editorconfig' 24 | 25 | jobs: 26 | lint: 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Clone this repo 30 | uses: actions/checkout@v4 31 | 32 | - name: Markdown Linting Action 33 | id: lint 34 | uses: avto-dev/markdown-lint@v1.5.0 35 | with: 36 | config: "./.markdownlint.yml" 37 | args: "./services/**/*.md" 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/macos 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos 3 | 4 | ### macOS ### 5 | # General 6 | .DS_Store 7 | .AppleDouble 8 | .LSOverride 9 | 10 | # Icon must end with two \r 11 | Icon 12 | 13 | 14 | # Thumbnails 15 | ._* 16 | 17 | # Files that might appear in the root of a volume 18 | .DocumentRevisions-V100 19 | .fseventsd 20 | .Spotlight-V100 21 | .TemporaryItems 22 | .Trashes 23 | .VolumeIcon.icns 24 | .com.apple.timemachine.donotpresent 25 | 26 | # Directories potentially created on remote AFP share 27 | .AppleDB 28 | .AppleDesktop 29 | Network Trash Folder 30 | Temporary Items 31 | .apdisk 32 | 33 | ### macOS Patch ### 34 | # iCloud generated files 35 | *.icloud 36 | 37 | # End of https://www.toptal.com/developers/gitignore/api/macos -------------------------------------------------------------------------------- /.markdownlint.yml: -------------------------------------------------------------------------------- 1 | { "default": true, "MD013": false, "MD033": false } 2 | 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 2Tiny2Scale 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /funnel-serve/funnel-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | }, 16 | "AllowFunnel": { 17 | "${TS_CERT_DOMAIN}:443": true 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /funnel-serve/serve-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /images/tailscale-funnel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2Tiny2Scale/ScaleTail/8ac9389f0ba0e0882c1574c05e4936a41e93a5cb/images/tailscale-funnel.png -------------------------------------------------------------------------------- /images/tailscale-serve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2Tiny2Scale/ScaleTail/8ac9389f0ba0e0882c1574c05e4936a41e93a5cb/images/tailscale-serve.png -------------------------------------------------------------------------------- /services/adguardhome-sync/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=adguardhome-sync 5 | IMAGE_URL=ghcr.io/bakito/adguardhome-sync 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/adguardhome-sync/README.md: -------------------------------------------------------------------------------- 1 | # AdGuardHome Sync with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[AdGuardHome Sync](https://github.com/bakito/adguardhome-sync)** with Tailscale as a sidecar container to securely synchronize your AdGuard Home instances over a private Tailscale network. By integrating Tailscale, you ensure that configuration data is transmitted securely between nodes and accessible only to authorized devices in your private network. 4 | 5 | ## AdGuardHome Sync 6 | 7 | [AdGuardHome Sync](https://github.com/bakito/adguardhome-sync) is a **lightweight tool for synchronizing configuration between multiple AdGuard Home servers**. It supports syncing DNS settings, clients, rules, rewrites, and more—making it ideal for managing AdGuard Home across multiple networks or locations. Whether you're managing redundant setups or simply keeping home and remote deployments in sync, this tool helps you maintain consistency and saves time. 8 | 9 | ## Key Features 10 | 11 | * **Multi-Node Syncing** – Keep multiple AdGuard Home instances in sync effortlessly. 12 | * **Granular Configuration** – Choose which parts of the configuration to sync (rules, clients, rewrites, etc.). 13 | * **Push or Pull Modes** – Use a master-push or node-pull strategy to fit your setup. 14 | * **Self-Hosted** – Fully local, no third-party service required. 15 | * **Secure Access with Tailscale** – Safely connect and sync instances across private networks using Tailscale. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-adguardhome-sync` service runs Tailscale, which manages secure networking for the AdGuardHome Sync service. The `adguardhome-sync` container uses the Tailscale network stack via Docker’s `network_mode: service:` configuration. This ensures that all sync communication is confined to your private Tailscale network, preventing exposure to the public internet. 20 | -------------------------------------------------------------------------------- /services/adguardhome/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=adguardhome 5 | IMAGE_URL=adguard/adguardhome:latest 6 | SERVICEPORT=53 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/adguardhome/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/audiobookshelf/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=audiobookshelf 5 | IMAGE_URL=ghcr.io/advplyr/audiobookshelf 6 | SERVICEPORT=13378 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/audiobookshelf/README.md: -------------------------------------------------------------------------------- 1 | # Audiobookshelf with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Audiobookshelf](https://github.com/advplyr/audiobookshelf) with Tailscale as a sidecar container to securely access and manage your audiobook and podcast library over a private Tailscale network. By integrating Tailscale, you can ensure that your Audiobookshelf instance remains private and accessible only to devices within your Tailscale network. 4 | 5 | ## Audiobookshelf 6 | 7 | [Audiobookshelf](https://github.com/advplyr/audiobookshelf) is an open-source self-hosted application for managing and streaming audiobooks and podcasts. It offers features like multi-user support, playback progress sync, a web player, and mobile app integrations, making it easy to organize and enjoy your audiobook and podcast collection from anywhere. By adding Tailscale, you can protect your library from unauthorized access while maintaining seamless and secure connectivity for all your devices. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-audiobookshelf` service runs Tailscale, which manages secure networking for the Audiobookshelf service. The `audiobookshelf` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Audiobookshelf’s web interface and streaming capabilities are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your personal audiobook and podcast collection. 12 | -------------------------------------------------------------------------------- /services/audiobookshelf/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:13378" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/audiobookshelf/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/app/config:/config 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always 59 | -------------------------------------------------------------------------------- /services/bazarr/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=bazarr 5 | IMAGE_URL=lscr.io/linuxserver/bazarr:latest 6 | SERVICEPORT=6767 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/bazarr/README.md: -------------------------------------------------------------------------------- 1 | # Bazarr with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Bazarr](https://github.com/morpheus65535/bazarr) with Tailscale as a sidecar container to securely manage and access your subtitle management system over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Bazarr instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Bazarr 6 | 7 | [Bazarr](https://github.com/morpheus65535/bazarr) is an open-source, self-hosted application for managing and downloading subtitles for your movies and TV shows. It works in conjunction with other media managers like Sonarr and Radarr to automatically search for and download subtitles from various sources. This configuration leverages Tailscale to securely connect to your Bazarr instance, ensuring that your subtitle management interface is protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the tailscale-bazarr service runs Tailscale, which manages secure networking for the Bazarr service. The bazarr service uses the Tailscale network stack via Docker's network_mode: service: configuration. This setup ensures that Bazarr’s web interface and API are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted subtitle manager. 12 | -------------------------------------------------------------------------------- /services/bazarr/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:6767" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/beszel/README.md: -------------------------------------------------------------------------------- 1 | # Beszel with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration integrates [Beszel](https://github.com/henrygd/beszel) with Tailscale in a sidecar setup to enhance secure communication over a private Tailscale network. By utilizing Tailscale, this configuration ensures that all communication handled by Beszel remains secure and private within your Tailscale network. Thanks to @[henrygd](https://github.com/henrygd) for the tool development. 4 | 5 | ## Beszel 6 | 7 | [Beszel](https://github.com/henrygd/beszel) is a lightweight, open-source communication tool designed for secure messaging and real-time collaboration. It provides an encrypted and efficient way to exchange messages and share information within teams or between devices. This configuration leverages Beszel in conjunction with Tailscale to ensure that all messages are securely transmitted over a private, encrypted network. 8 | 9 | ### Hub 10 | 11 | The [Hub](hub) is the core component of Beszel, responsible for routing messages between agents and managing the overall communication flow. In this configuration, the Beszel Hub runs in its own Docker service and is secured by the Tailscale sidecar, ensuring that all traffic to and from the Hub is encrypted and restricted to your Tailscale network. 12 | 13 | ### Agent 14 | 15 | The [Agent](agent) is the client-side component that connects to the Hub to send and receive messages. Multiple agents can connect to a single Hub, enabling secure communication across different devices. The Agent also benefits from the Tailscale sidecar, ensuring that its communication with the Hub is conducted over a secure, private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-beszel` service runs Tailscale, which manages secure networking for the Beszel service. The `beszel` service connects to the Tailscale network stack using Docker's `network_mode: service:` configuration. This setup guarantees that Beszel's communication channels are only accessible through the Tailscale network, providing an extra layer of security and privacy. 20 | -------------------------------------------------------------------------------- /services/beszel/agent/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=beszel-agent 5 | IMAGE_URL=henrygd/beszel-agent:latest 6 | SERVICEPORT=45876 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/beszel/agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | PORT: 45876 45 | KEY: "ssh-ed25519 " 46 | volumes: 47 | - /var/run/docker.sock:/var/run/docker.sock:ro # Read-only access to the docker.sock 48 | depends_on: 49 | tailscale: 50 | condition: service_healthy 51 | healthcheck: 52 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 53 | interval: 1m # How often to perform the check 54 | timeout: 10s # Time to wait for the check to succeed 55 | retries: 3 # Number of retries before marking as unhealthy 56 | start_period: 30s # Time to wait before starting health checks 57 | restart: always -------------------------------------------------------------------------------- /services/beszel/hub/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=beszel-hub 5 | IMAGE_URL=henrygd/beszel:latest 6 | SERVICEPORT=8090 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/beszel/hub/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8090" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/beszel/hub/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/beszel_data:/beszel_data # Work directory for Beszel Hub - you may need to change the path 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/caddy/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=caddy # If this is changed please set docker-compose.yml services:caddy_proxy:healthcheck $SERVICE to the string caddy. Also update the Caddyfile FQDN. 5 | IMAGE_URL=caddy:latest 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/caddy/Caddyfile: -------------------------------------------------------------------------------- 1 | # ${SERVICE}.magicDNSname.ts.net 2 | http://caddy.MagicDNSname.ts.net { 3 | # respond "Hello, world!" 4 | reverse_proxy http://whoami 5 | } 6 | -------------------------------------------------------------------------------- /services/caddy/README.md: -------------------------------------------------------------------------------- 1 | # Caddy with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Caddy](https://github.com/caddyserver/caddy-docker) with Tailscale as a sidecar container to securely manage and route your traffic over a private Tailscale network. By integrating Tailscale, you can enhance the security and privacy of your Caddy instance, ensuring that access is restricted to devices within your Tailscale network. 4 | 5 | ## Caddy 6 | 7 | [Caddy](https://github.com/caddyserver/caddy-docker) is an extensible platform for deploying long-running services ("apps") using a single, unified configuration. It is enterprise-ready, extensible, open source, and provides automatic HTTPS. By incorporating Tailscale, your Caddy instance is safeguarded, ensuring that only authorized users and devices on your Tailscale network can access your applications and services. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-caddy` service runs Tailscale, which manages secure networking for the Caddy service. The `caddy_proxy` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Caddy’s dashboard and routing functionalities are only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of privacy and security to your network architecture. 12 | 13 | To get this working: 14 | 15 | - Update the FQDN in `Caddyfile` to match your `${SERVICE}.MagicDNSname.ts.net`. 16 | - Update the TS_AUTHKEY in the .env file to your Tailscale key. 17 | 18 | If you change the `SERVICE=caddy` line in the .env file, the hostname of the FQDN in the Caddyfile must be updated as well. Additionally please replace $SERVICE in docker-compose.yml services:caddy_proxy:healthcheck with the string caddy. 19 | 20 | The example `docker-compose.yml` uses a simple webserver for testing purposes. 21 | 22 | Within your Tailscale dashboard do you have [HTTPS](https://tailscale.com/kb/1153/enabling-https) and [MagicDNS](https://tailscale.com/kb/1081/magicdns) enabled? If so, remove the http:// from the Caddyfile and Caddy should automatically provision a public HTTPS certificate from Let's Encrypt via the Tailscale infrastructure. The certificate takes ~20s to be procured upon first visit. This is further documented in [Caddy certificates on Tailscale](https://tailscale.com/kb/1190/caddy-certificates). 23 | -------------------------------------------------------------------------------- /services/changedetection/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=changedetection 5 | IMAGE_URL=ghcr.io/dgtlmoon/changedetection.io 6 | SERVICEPORT=5000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/changedetection/README.md: -------------------------------------------------------------------------------- 1 | # ChangeDetection.io with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [ChangeDetection.io](https://github.com/dgtlmoon/changedetection.io) with Tailscale as a sidecar container to securely monitor and access website changes over a private Tailscale network. By using Tailscale in a sidecar configuration, you can ensure that your ChangeDetection.io instance is only accessible within your Tailscale network, providing enhanced security and privacy. 4 | 5 | ## ChangeDetection.io 6 | 7 | [ChangeDetection.io](https://github.com/dgtlmoon/changedetection.io) is an open-source tool for tracking changes on websites. Whether monitoring prices, content updates, or new product launches, it provides an easy-to-use interface for tracking and alerting you to changes. By integrating Tailscale, you can securely connect to your ChangeDetection.io instance, ensuring that your sensitive tracking information and alerts are protected from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-changedetection` service runs Tailscale, which manages secure networking for the ChangeDetection.io service. The `changedetection` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that ChangeDetection.io’s web interface is only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy to your website monitoring setup. 12 | -------------------------------------------------------------------------------- /services/changedetection/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:5000" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/changedetection/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/datastore:/datastore 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/clipcascade/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=clipcascade 5 | IMAGE_URL=sathvikrao/clipcascade 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/clipcascade/README.md: -------------------------------------------------------------------------------- 1 | # ClipCascade with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [ClipCascade](https://github.com/Sathvik-Rao/ClipCascade) with Tailscale as a sidecar container to securely manage and access your clipboard history over a private Tailscale network. By integrating Tailscale, you can ensure that your ClipCascade instance remains private and accessible only to authorized devices on your Tailscale network. 4 | 5 | ## ClipCascade 6 | 7 | [ClipCascade](https://github.com/Sathvik-Rao/ClipCascade) is a self-hosted, open-source clipboard manager that synchronizes and organizes clipboard history across devices. It offers features like an intuitive web interface, multi-device clipboard synchronization, and searchable history, making it an essential tool for productivity and seamless workflows. By leveraging Tailscale, your ClipCascade instance remains secure and accessible only to devices within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Multi-Device Sync**: Synchronize clipboard history across multiple devices. 12 | - **Searchable History**: Easily search and retrieve past clipboard entries. 13 | - **Self-Hosted Privacy**: Keep your clipboard data secure and private. 14 | - **User-Friendly Interface**: Manage clipboard history through an intuitive web interface. 15 | 16 | ## Configuration Overview 17 | 18 | In this setup, the `tailscale-clipcascade` service runs Tailscale, which manages secure networking for the ClipCascade service. The `clipcascade` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that ClipCascade’s web interface and functionality are only accessible through the Tailscale network (or locally, if preferred), providing enhanced privacy and security for managing your clipboard history. 19 | -------------------------------------------------------------------------------- /services/clipcascade/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/cyberchef/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=cyberchef 5 | IMAGE_URL=ghcr.io/gchq/cyberchef 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/cyberchef/README.md: -------------------------------------------------------------------------------- 1 | # CyberChef with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [CyberChef](https://github.com/gchq/CyberChef) with Tailscale as a sidecar container to securely access your data analysis and manipulation tool over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your CyberChef instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## CyberChef 6 | 7 | [CyberChef](https://github.com/gchq/CyberChef) is an open-source web application designed to simplify the process of carrying out complex data analysis and encoding/decoding operations. It features a user-friendly drag-and-drop interface that enables users to create "recipes" for analyzing and manipulating data. This configuration leverages Tailscale to securely connect to your CyberChef instance, ensuring that your powerful data tool is protected from unauthorized access and that it is only accessible via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-cyberchef` service runs Tailscale, which manages secure networking for the CyberChef service. The `cyberchef` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that CyberChef’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted data analysis tool. 12 | -------------------------------------------------------------------------------- /services/cyberchef/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/cyberchef/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - TZ=Europe/Amsterdam 45 | depends_on: 46 | tailscale: 47 | condition: service_healthy 48 | healthcheck: 49 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 50 | interval: 1m # How often to perform the check 51 | timeout: 10s # Time to wait for the check to succeed 52 | retries: 3 # Number of retries before marking as unhealthy 53 | start_period: 30s # Time to wait before starting health checks 54 | restart: always -------------------------------------------------------------------------------- /services/ddns-updater/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=ddns-updater 5 | IMAGE_URL=qmcgaw/ddns-updater 6 | SERVICEPORT=8000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/ddns-updater/README.md: -------------------------------------------------------------------------------- 1 | # DDNS Updater with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [DDNS Updater](https://github.com/qdm12/ddns-updater) with Tailscale as a sidecar container, enabling secure and private management of your dynamic DNS records over a Tailscale network. Integrating Tailscale ensures that your DDNS Updater instance is accessible only to authorized devices within your Tailnet, enhancing the security of your DNS management. 4 | 5 | ## DDNS Updater 6 | 7 | [DDNS Updater](https://github.com/qdm12/ddns-updater) is a lightweight, universal program designed to keep your DNS A and/or AAAA records updated across multiple DNS providers. It supports a wide range of DNS services, including Cloudflare, DuckDNS, and many others. Key features include: 8 | 9 | - **Periodic Updates**: Automatically updates DNS records at specified intervals to ensure they always point to your current IP address. 10 | - **Web User Interface**: Provides a user-friendly web UI for monitoring and managing your DNS records. 11 | - **Multi-Provider Support**: Compatible with numerous DNS providers, offering flexibility in your DNS management. 12 | - **Docker Compatibility**: Available as a lightweight Docker image, facilitating easy deployment and integration into existing setups. 13 | 14 | By combining DDNS Updater with Tailscale, you can securely manage your dynamic DNS records without exposing the service to the public internet. 15 | 16 | ## Configuration Overview 17 | 18 | In this setup, the `tailscale-ddns-updater` service runs Tailscale, providing a secure networking layer for the DDNS Updater service. The `ddns-updater` service utilizes Docker's `network_mode: service:` configuration to route all traffic through the Tailscale network. This setup ensures that the DDNS Updater's web interface and API are only accessible within your private Tailnet, adding an extra layer of security to your DNS management. 19 | -------------------------------------------------------------------------------- /services/ddns-updater/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8000" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/donetick/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=donetick 5 | IMAGE_URL=donetick/donetick 6 | SERVICEPORT=2021 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/donetick/README.md: -------------------------------------------------------------------------------- 1 | # Donetick with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Donetick](https://github.com/donetick/donetick)** with Tailscale as a sidecar container to securely manage and access your self-hosted task management system over a private Tailscale network. By integrating Tailscale, you can ensure that your Donetick instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Donetick 6 | 7 | [Donetick](https://github.com/donetick/donetick) is a **self-hosted task and checklist manager** designed for simplicity and efficiency. It helps users stay organized with structured to-do lists and task tracking while ensuring full control over their data. With Donetick, you can create tasks, set deadlines, and track progress without relying on third-party services. By integrating Tailscale, you can further secure your Donetick instance by restricting access to only authorized devices within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Task & Checklist Management** – Organize and track tasks efficiently. 12 | - **Collaborative Workflows** – Share tasks and checklists with team members. 13 | - **Self-Hosted Privacy** – Keep full control over your task management data. 14 | - **Minimalist & Lightweight** – A simple, distraction-free interface for productivity. 15 | - **Secure Access with Tailscale** – Restrict access to only authorized devices within your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-donetick` service runs Tailscale, which manages secure networking for the Donetick service. The `donetick` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Donetick’s web interface is only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy for managing tasks and checklists. 20 | -------------------------------------------------------------------------------- /services/donetick/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:2021" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/donetick/donetick-data/config/selfhosted.yaml: -------------------------------------------------------------------------------- 1 | name: "selfhosted" 2 | is_done_tick_dot_com: false 3 | is_user_creation_disabled: false 4 | telegram: 5 | token: "" 6 | pushover: 7 | token: "" 8 | database: 9 | type: "sqlite" 10 | migration: true 11 | # these are only required for postgres 12 | host: "secret" 13 | port: 5432 14 | user: "secret" 15 | password: "secret" 16 | name: "secret" 17 | jwt: 18 | secret: "secret" 19 | session_time: 168h 20 | max_refresh: 168h 21 | server: 22 | port: 2021 23 | read_timeout: 10s 24 | write_timeout: 10s 25 | rate_period: 60s 26 | rate_limit: 300 27 | cors_allow_origins: 28 | - "http://localhost:5173" 29 | - "http://localhost:7926" 30 | # the below are required for the android app to work 31 | - "https://localhost" 32 | - "capacitor://localhost" 33 | serve_frontend: true 34 | scheduler_jobs: 35 | due_job: 30m 36 | overdue_job: 3h 37 | pre_due_job: 3h 38 | email: 39 | host: 40 | port: 41 | key: 42 | email: 43 | appHost: 44 | oauth2: 45 | client_id: 46 | client_secret: 47 | auth_url: 48 | token_url: 49 | user_info_url: 50 | redirect_url: 51 | name: -------------------------------------------------------------------------------- /services/dozzle/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=dozzle 5 | IMAGE_URL=amir20/dozzle 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/dozzle/README.md: -------------------------------------------------------------------------------- 1 | # Dozzle with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Dozzle](https://github.com/amir20/dozzle) with Tailscale as a sidecar container to securely access your real-time Docker log viewer over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Dozzle instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Dozzle 6 | 7 | [Dozzle](https://github.com/amir20/dozzle) is a lightweight, self-hosted application for viewing Docker container logs in real time. It offers an intuitive web interface that makes it easy to monitor logs from your Docker environment without the need for complex setups or additional dependencies. This configuration leverages Tailscale to securely connect to your Dozzle instance, ensuring that your log viewer is protected from unauthorized access and that your instance is only accessible via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-dozzle` service runs Tailscale, which manages secure networking for the Dozzle service. The `dozzle` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Dozzle’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted log viewer. 12 | -------------------------------------------------------------------------------- /services/dozzle/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/dozzle/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | DOZZLE_AUTH_PROVIDER: simple 45 | volumes: 46 | - ${PWD}/${SERVICE}-data/dozzle-data:/data 47 | - /var/run/docker.sock:/var/run/docker.sock:ro 48 | depends_on: 49 | tailscale: 50 | condition: service_healthy 51 | healthcheck: 52 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 53 | interval: 1m # How often to perform the check 54 | timeout: 10s # Time to wait for the check to succeed 55 | retries: 3 # Number of retries before marking as unhealthy 56 | start_period: 30s # Time to wait before starting health checks 57 | restart: always 58 | -------------------------------------------------------------------------------- /services/dozzle/dozzle-data/users.yml: -------------------------------------------------------------------------------- 1 | ## Create user with: docker run amir20/dozzle generate --name YourNameHere --email YourEmailHere --password YourPasswordHere YourUsernameHere 2 | 3 | users: 4 | ExampleUser1: 5 | email: example@example.com 6 | name: exampleName 7 | password: $2a$11$OoyqEqK/bH8cBFnbD.UuB.t7HADiZOeX4z4_ExampleBCRYPT 8 | filter: "" -------------------------------------------------------------------------------- /services/dumbdo/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=dumbdo 5 | IMAGE_URL=dumbwareio/dumbdo:latest 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/dumbdo/README.md: -------------------------------------------------------------------------------- 1 | # DumbDo with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [DumbDo](https://github.com/DumbWareio/DumbDo) with Tailscale as a sidecar container to securely manage and access your lightweight task manager over a private Tailscale network. By integrating Tailscale, you can ensure that your DumbDo instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## DumbDo 6 | 7 | [DumbDo](https://github.com/DumbWareio/DumbDo) is a self-hosted, minimalistic task management tool designed to provide a distraction-free experience for managing to-do lists and tasks. With its simple interface and lightweight nature, DumbDo allows users to focus on productivity without unnecessary complexity. By integrating Tailscale, you can keep your task manager secure and accessible only within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Minimalist Task Management** – A straightforward approach to to-do lists without unnecessary complexity. 12 | - **Self-Hosted** – Maintain full control over your data with a locally hosted instance. 13 | - **Lightweight & Fast** – Designed for speed and efficiency without bloated features. 14 | - **Secure Integration** – Pair with Tailscale to restrict access to authorized devices only. 15 | 16 | ## Configuration Overview 17 | 18 | In this setup, the `tailscale-dumbdo` service runs Tailscale, which manages secure networking for the DumbDo service. The `dumbdo` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that DumbDo’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing enhanced privacy and security for your task management system. 19 | -------------------------------------------------------------------------------- /services/dumbdo/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/eigenfocus/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=eigenfocus 5 | IMAGE_URL=eigenfocus/eigenfocus:0.8.0 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/eigenfocus/README.md: -------------------------------------------------------------------------------- 1 | # Eigenfocus with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Eigenfocus](https://github.com/Eigenfocus/eigenfocus)** with Tailscale as a sidecar container to securely manage and access your self-hosted task and project management tool over a private Tailscale network. By integrating Tailscale, you can ensure that your Eigenfocus instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Eigenfocus 6 | 7 | [Eigenfocus](https://github.com/Eigenfocus/eigenfocus) is a self-hosted, privacy-focused task and project management tool that helps individuals and teams stay organized. With its clean, minimalist interface and structured workflow, Eigenfocus is designed for those who prefer a lightweight yet powerful alternative to traditional project management apps. By integrating Tailscale, your Eigenfocus instance is secured, allowing access only from trusted devices within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Task & Project Management** – Organize tasks, set deadlines, and track progress effortlessly. 12 | - **Privacy-Focused** – Self-hosted to keep your data secure and under your control. 13 | - **Minimalist Interface** – A distraction-free, efficient workflow for productivity. 14 | - **Collaboration Ready** – Share tasks and projects with team members. 15 | - **Secure Access with Tailscale** – Restrict access to only authorized devices within your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-eigenfocus` service runs Tailscale, which manages secure networking for the Eigenfocus service. The `eigenfocus` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Eigenfocus’ web interface is only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy for managing tasks and projects. 20 | -------------------------------------------------------------------------------- /services/eigenfocus/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/eigenfocus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | - DEFAULT_HOST_URL=http://127.0.0.1:3000 48 | volumes: 49 | - ${PWD}/${SERVICE}-data:/eigenfocus-app/app-data 50 | depends_on: 51 | tailscale: 52 | condition: service_healthy 53 | # healthcheck: 54 | # test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 55 | # interval: 1m # How often to perform the check 56 | # timeout: 10s # Time to wait for the check to succeed 57 | # retries: 3 # Number of retries before marking as unhealthy 58 | # start_period: 30s # Time to wait before starting health checks 59 | restart: always -------------------------------------------------------------------------------- /services/excalidraw/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=excalidraw 5 | IMAGE_URL=excalidraw/excalidraw 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/excalidraw/README.md: -------------------------------------------------------------------------------- 1 | # Excalidraw with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Excalidraw](https://github.com/excalidraw/excalidraw) with Tailscale as a sidecar container to securely collaborate on whiteboard diagrams over a private Tailscale network. By integrating Tailscale in a sidecar configuration, you can enhance the security and accessibility of your Excalidraw server, ensuring that it is only available within your Tailscale network. 4 | 5 | ## Excalidraw 6 | 7 | [Excalidraw](https://github.com/excalidraw/excalidraw) is an open-source virtual whiteboard tool designed for real-time collaboration. It allows users to create diagrams, sketches, and wireframes in a minimalist, hand-drawn style. Excalidraw is easy to set up and supports self-hosting for private, secure collaboration. This configuration incorporates Tailscale to provide a secure connection to your Excalidraw server, protecting your sessions from unauthorized access and enabling private collaboration. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-excalidraw` service runs Tailscale, which manages secure networking for the Excalidraw service. The `excalidraw` service utilizes the Tailscale network stack via Docker's `network_mode: service:` configuration. This design ensures that Excalidraw's collaboration and editing features are only accessible through the Tailscale network (or locally, if preferred), providing enhanced security and privacy for your self-hosted Excalidraw instance. 12 | -------------------------------------------------------------------------------- /services/excalidraw/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/excalidraw/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - NODE_ENV=production 45 | - TZ=Europe/Amsterdam 46 | stdin_open: true 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/app/config:/config 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/flatnotes/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=flatnotes 5 | IMAGE_URL=dullage/flatnotes:latest 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/flatnotes/README.md: -------------------------------------------------------------------------------- 1 | # Flatnotes with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Flatnotes](https://github.com/dullage/flatnotes)** with Tailscale as a sidecar container to securely manage and access your self-hosted note-taking application over a private Tailscale network. By integrating Tailscale, you can ensure that your Flatnotes instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Flatnotes 6 | 7 | [Flatnotes](https://github.com/dullage/flatnotes) is a **lightweight, self-hosted note-taking app** that stores notes in plain text Markdown files. With a simple yet powerful interface, Flatnotes offers **tag-based organization**, **full-text search**, and a **distraction-free writing experience**. By integrating Tailscale, you can keep your notes private and secure, ensuring that only trusted devices can access them. 8 | 9 | ## Key Features 10 | 11 | - **Markdown-Based Notes** – Write and store notes in Markdown format for flexibility. 12 | - **Tag-Based Organization** – Easily categorize and manage notes with tags. 13 | - **Full-Text Search** – Quickly find notes with an efficient search function. 14 | - **Self-Hosted Privacy** – Keep full control of your data without relying on third-party services. 15 | - **Secure Access with Tailscale** – Restrict access to only authorized devices within your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-flatnotes` service runs Tailscale, which manages secure networking for the Flatnotes service. The `flatnotes` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Flatnotes’ web interface and note storage are only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy for managing your notes. 20 | -------------------------------------------------------------------------------- /services/flatnotes/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/ghost/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=ghost 5 | IMAGE_URL=ghost:5-alpine 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/ghost/README.md: -------------------------------------------------------------------------------- 1 | # Ghost with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Ghost](https://github.com/TryGhost/Ghost)** with Tailscale as a sidecar container to securely manage and access your self-hosted publishing platform over a private Tailscale network. By integrating Tailscale, you can ensure that your Ghost instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Ghost 6 | 7 | [Ghost](https://github.com/TryGhost/Ghost) is a modern, open-source publishing platform designed for professional blogs, newsletters, and online publications. It provides a sleek, minimalist editor, built-in SEO features, and powerful customization options. By integrating Tailscale, your Ghost instance remains secure and accessible only to authorized users, ensuring that your content is managed in a private environment. 8 | 9 | ## Key Features 10 | 11 | - **Minimalist & Fast** – A lightweight, streamlined writing experience for bloggers and content creators. 12 | - **Built-in SEO & Analytics** – Optimize content for search engines and track performance effortlessly. 13 | - **Customizable Themes & Integrations** – Extend Ghost with themes, memberships, and integrations. 14 | - **Self-Hosted Privacy** – Maintain full control over your content with a locally hosted instance. 15 | - **Secure Access with Tailscale** – Restrict access to only authorized devices within your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-ghost` service runs Tailscale, which manages secure networking for the Ghost service. The `ghost` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Ghost’s web interface and publishing tools are only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy to your publishing workflow. 20 | -------------------------------------------------------------------------------- /services/ghost/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/gokapi/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=gokapi 5 | IMAGE_URL=f0rc3/gokapi 6 | SERVICEPORT=53842 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/gokapi/README.md: -------------------------------------------------------------------------------- 1 | # Gokapi with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Gokapi](https://github.com/Forceu/Gokapi) with Tailscale as a sidecar container to securely manage and access your lightweight file-sharing service over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Gokapi instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Gokapi 6 | 7 | [Gokapi](https://github.com/Forceu/Gokapi) is a lightweight, self-hosted file-sharing platform designed to provide a simple and secure way to share files with others. It features an intuitive web interface, token-based sharing, and the ability to control file expiry and download limits. This configuration leverages Tailscale to securely connect to your Gokapi instance, ensuring that your file-sharing activities remain private and protected from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-gokapi` service runs Tailscale, which manages secure networking for the Gokapi service. The `gokapi` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Gokapi's web interface and file-sharing services are only accessible through the Tailscale network (or locally, if preferred), providing an additional layer of security and privacy for your file-sharing solution. 12 | -------------------------------------------------------------------------------- /services/gokapi/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:53842" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/gokapi/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - TZ=Europe/Amsterdam 45 | volumes: 46 | - ${PWD}/${SERVICE}-data/gokapi-data:/app/data 47 | - ${PWD}/${SERVICE}-data/gokapi-config:/app/config 48 | depends_on: 49 | tailscale: 50 | condition: service_healthy 51 | healthcheck: 52 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 53 | interval: 1m # How often to perform the check 54 | timeout: 10s # Time to wait for the check to succeed 55 | retries: 3 # Number of retries before marking as unhealthy 56 | start_period: 30s # Time to wait before starting health checks 57 | restart: always -------------------------------------------------------------------------------- /services/haptic/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=haptic 5 | IMAGE_URL=chroxify/haptic-web:latest 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/haptic/README.md: -------------------------------------------------------------------------------- 1 | # Haptic with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Haptic](https://github.com/chroxify/haptic)** with Tailscale as a sidecar container to securely manage and access your self-hosted bookmark manager over a private Tailscale network. By integrating Tailscale, you can ensure that your Haptic instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Haptic 6 | 7 | [Haptic](https://github.com/chroxify/haptic) is a modern, self-hosted bookmark manager designed for simplicity, speed, and privacy. It allows users to organize, search, and access saved links efficiently while providing a clean and user-friendly interface. With Haptic, you can take full control of your bookmarks without relying on third-party services. By integrating Tailscale, you can further secure your Haptic instance by ensuring access is restricted to authorized devices within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Self-Hosted Bookmark Management** – Organize and store bookmarks securely. 12 | - **Full-Text Search** – Quickly find saved bookmarks with an intuitive search function. 13 | - **Minimalist & Fast** – Designed for speed and usability without unnecessary complexity. 14 | - **Privacy-Focused** – Keep your bookmarks safe and private with a self-hosted solution. 15 | - **Secure Access with Tailscale** – Restrict access to only authorized devices within your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-haptic` service runs Tailscale, which manages secure networking for the Haptic service. The `haptic` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Haptic’s web interface is only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy for managing your bookmarks. 20 | -------------------------------------------------------------------------------- /services/haptic/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/haptic/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | # volumes: 48 | # - ${PWD}/${SERVICE}-data/app/config:/config 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | # healthcheck: 53 | # test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | # interval: 1m # How often to perform the check 55 | # timeout: 10s # Time to wait for the check to succeed 56 | # retries: 3 # Number of retries before marking as unhealthy 57 | # start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/homarr/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=homarr 5 | IMAGE_URL=ghcr.io/ajnart/homarr 6 | SERVICEPORT=7575 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/homarr/README.md: -------------------------------------------------------------------------------- 1 | # Homarr with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Homarr](https://github.com/ajnart/homarr) with Tailscale as a sidecar container to securely manage and access your dashboard over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Homarr instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Homarr 6 | 7 | [Homarr](https://github.com/ajnart/homarr) is an open-source, self-hosted dashboard that integrates with all your self-hosted services, providing a centralized location to manage and access your apps, notifications, and more. It supports customization and can be extended with various plugins and integrations. This configuration leverages Tailscale to securely connect to your Homarr instance, ensuring that your dashboard interface is protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the tailscale-homarr service runs Tailscale, which manages secure networking for the Homarr service. The homarr service uses the Tailscale network stack via Docker's network_mode: service: configuration. This setup ensures that Homarr’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted dashboard. 12 | -------------------------------------------------------------------------------- /services/homarr/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:7575" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/home-assistant/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=home-assistant 5 | IMAGE_URL=ghcr.io/home-assistant/home-assistant:stable 6 | SERVICEPORT=8123 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/home-assistant/README.md: -------------------------------------------------------------------------------- 1 | # Home Assistant with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Home Assistant](https://github.com/home-assistant/)** with Tailscale as a sidecar container to securely manage and access your smart home automation platform over a private Tailscale network. By integrating Tailscale, you can ensure that your Home Assistant instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Home Assistant 6 | 7 | [Home Assistant](https://github.com/home-assistant/) is an open-source home automation platform that allows you to control and automate smart devices from a unified interface. With support for thousands of integrations, it provides powerful automation capabilities and privacy-focused self-hosted control over your smart home. Pairing Home Assistant with Tailscale ensures a secure, remote-accessible smart home without exposing it to the public internet. 8 | 9 | ## Key Features 10 | 11 | - **Local Control & Privacy** – Self-hosted and privacy-focused, keeping your data in your home. 12 | - **Extensive Integrations** – Supports thousands of smart home devices and services. 13 | - **Automation & Customization** – Create complex automations with YAML or visual editors. 14 | - **Secure Remote Access** – Pair with Tailscale to safely access your Home Assistant instance from anywhere. 15 | 16 | ## Configuration Overview 17 | 18 | In this setup, the `tailscale-homeassistant` service runs Tailscale, which manages secure networking for the Home Assistant service. The `homeassistant` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Home Assistant’s web interface and smart home control features are only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy for your home automation system. 19 | 20 | ## Troubleshooting 21 | 22 | If you encounter a `400: Bad Request` after deployment, please alter the file `ha-data/config/configurations.yaml` to trust the reverse proxy configuration used by Tailscale. The `configurations.yaml` should look like this. 23 | 24 | ```plain 25 | $ cat ha-data/config/configuration.yaml 26 | 27 | # Loads default set of integrations. Do not remove. 28 | default_config: 29 | 30 | # Load frontend themes from the themes folder 31 | frontend: 32 | themes: !include_dir_merge_named themes 33 | 34 | automation: !include automations.yaml 35 | script: !include scripts.yaml 36 | scene: !include scenes.yaml 37 | 38 | http: 39 | use_x_forwarded_for: true 40 | trusted_proxies: 41 | - 127.0.0.1 42 | ``` 43 | -------------------------------------------------------------------------------- /services/home-assistant/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8123" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/isley/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=isley 5 | IMAGE_URL=dwot/isley 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/isley/README.md: -------------------------------------------------------------------------------- 1 | # Isley with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Isley](https://github.com/dwot/isley) with Tailscale as a sidecar container, enabling secure and private access to your self-hosted cannabis grow journal over a Tailscale network. With Tailscale, you can ensure that your sensitive grow data and notes are only accessible by trusted devices within your Tailnet. 4 | 5 | ## Isley 6 | 7 | [Isley](https://github.com/dwot/isley) is a self-hosted cannabis grow journal designed for home growers to track and monitor their plants with ease. It replaces vendor apps, spreadsheets, and notepads by centralizing tools into a clean, intuitive interface. Isley helps growers with some Key Features 🚀 8 | 9 | - **📒 Grow Logs**: Track plant growth, watering, and feeding schedules. 10 | - **🌡️ Environmental Monitoring**: View real-time data from grow equipment (AC Infinity, Ecowitt). 11 | - **📸 Image Uploads**: Attach photos to your grow logs for visual tracking. 12 | - **🌱 Seed Inventory**: Manage your seed collection and strain library. 13 | - **📊 Harvest Tracking**: Record harvest details and yields. 14 | - **📈 Graphs and Charts**: Visualize environmental data and plant progress over time. 15 | - **⚙️ Customizable Settings**: Add custom activities and measurements for your grow. 16 | - **📱 Mobile-Friendly**: Works on desktop and mobile devices for convenience. 17 | 18 | With integration options for popular grow equipment, Isley simplifies and elevates the grow experience by consolidating everything into one powerful and private tool. 19 | 20 | ## Default Credentials 21 | 22 | - **Default Username:** `admin` 23 | - **Default Password:** `isley` 24 | 25 | ## Configuration Overview 26 | 27 | In this setup, the `tailscale-isley` service runs Tailscale, which manages secure networking for the Isley service. The `isley` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Isley's interface is not exposed to the public internet, protecting your grow journal and data with an additional layer of privacy. 28 | -------------------------------------------------------------------------------- /services/isley/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/isley/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - TZ=Europe/Amsterdam 45 | volumes: 46 | - ${PWD}/${SERVICE}-data/isley-db:/app/data 47 | - ${PWD}/${SERVICE}-data/isley-uploads:/app/uploads 48 | depends_on: 49 | tailscale: 50 | condition: service_healthy 51 | healthcheck: 52 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 53 | interval: 1m # How often to perform the check 54 | timeout: 10s # Time to wait for the check to succeed 55 | retries: 3 # Number of retries before marking as unhealthy 56 | start_period: 30s # Time to wait before starting health checks 57 | restart: always -------------------------------------------------------------------------------- /services/it-tools/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=it-tools 5 | IMAGE_URL=corentinth/it-tools 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/it-tools/README.md: -------------------------------------------------------------------------------- 1 | # IT-Tools with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [IT-Tools](https://github.com/CorentinTh/it-tools) with Tailscale as a sidecar container to securely access your all-in-one developer utility over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your IT-Tools instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## IT-Tools 6 | 7 | [IT-Tools](https://github.com/CorentinTh/it-tools) is an open-source collection of online utilities designed for developers and IT professionals. It includes a variety of tools such as encoders, converters, formatters, and more—all in one sleek, web-based application. This configuration leverages Tailscale to securely connect to your IT-Tools instance, ensuring that your suite of developer utilities is protected from unauthorized access and accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-it-tools` service runs Tailscale, which manages secure networking for the IT-Tools service. The `it-tools` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that IT-Tools’ web interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted developer utilities. 12 | -------------------------------------------------------------------------------- /services/it-tools/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/it-tools/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - TZ=Europe/Amsterdam 45 | depends_on: 46 | tailscale: 47 | condition: service_healthy 48 | healthcheck: 49 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 50 | interval: 1m # How often to perform the check 51 | timeout: 10s # Time to wait for the check to succeed 52 | retries: 3 # Number of retries before marking as unhealthy 53 | start_period: 30s # Time to wait before starting health checks 54 | restart: always -------------------------------------------------------------------------------- /services/jellyfin/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=jellyfin 5 | IMAGE_URL=lscr.io/linuxserver/jellyfin 6 | SERVICEPORT=8096 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/jellyfin/README.md: -------------------------------------------------------------------------------- 1 | # Jellyfin with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Jellyfin](https://github.com/jellyfin/jellyfin) with Tailscale as a sidecar container to securely manage and access your media server over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Jellyfin instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Jellyfin 6 | 7 | [Jellyfin](https://github.com/jellyfin/jellyfin) is an open-source, self-hosted media server that allows you to manage and stream your media collection, including movies, TV shows, music, and more, to various devices. It provides a rich user interface and supports multiple clients, making it a powerful alternative to other media server solutions. This configuration leverages Tailscale to securely connect to your Jellyfin instance, ensuring that your media server interface is protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the tailscale-jellyfin service runs Tailscale, which manages secure networking for the Jellyfin service. The jellyfin service uses the Tailscale network stack via Docker's network_mode: service: configuration. This setup ensures that Jellyfin’s web interface and API are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted media server. 12 | -------------------------------------------------------------------------------- /services/jellyfin/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8096" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/kaneo/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=kaneo 5 | IMAGE_URL_BACKEND=ghcr.io/usekaneo/api:latest 6 | IMAGE_URL_FRONTEND=ghcr.io/usekaneo/web:latest 7 | SERVICEPORT=80 8 | TS_AUTHKEY= 9 | DNS_SERVER=9.9.9.9 10 | -------------------------------------------------------------------------------- /services/kaneo/README.md: -------------------------------------------------------------------------------- 1 | # Kaneo with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Kaneo](https://github.com/usekaneo/kaneo)** with Tailscale as a sidecar container to securely manage and access your self-hosted project management platform over a private Tailscale network. By integrating Tailscale, you ensure that your Kaneo instance is only accessible to authorized devices within your Tailscale network, keeping your tasks, projects, and team discussions private. 4 | 5 | ## Kaneo 6 | 7 | [Kaneo](https://github.com/usekaneo/kaneo) is an **open-source, self-hosted project management platform** focused on simplicity, clean UI, and efficient workflows. Designed as an alternative to tools like Trello or Linear, Kaneo offers a modern and distraction-free environment to manage tasks, organize projects, and collaborate with your team. You can self-host and fully customize the platform to match your workflow—no vendor lock-in, no subscriptions. 8 | 9 | ## Key Features 10 | 11 | - **Project & Task Boards** – Kanban-style boards for managing tasks and workflows. 12 | - **Clean & Fast UI** – Minimalist design focused on usability and speed. 13 | - **Self-Hosted & Customizable** – Deploy on your own infrastructure and modify freely. 14 | - **Privacy-First** – No tracking, no external dependencies. 15 | - **Secure Access with Tailscale** – Limit access to authorized devices in your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-kaneo` service runs Tailscale, which manages secure networking for the Kaneo service. The `kaneo` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Kaneo’s web interface is only accessible through the Tailscale network (or locally, if preferred), adding a strong layer of privacy and security to your self-hosted project management platform. 20 | -------------------------------------------------------------------------------- /services/kaneo/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | }, 13 | "/api": { 14 | "Proxy": "http://127.0.0.1:1337" 15 | } 16 | } 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /services/karakeep/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | # PLEASE READ MANUAL https://docs.karakeep.app/Installation/docker/#3-populate-the-environment-variables 5 | SERVICE=karakeep 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | KARAKEEP_VERSION=release 10 | NEXTAUTH_SECRET=super_random_string 11 | MEILI_MASTER_KEY=another_random_string 12 | NEXTAUTH_URL=https:// 13 | MAX_ASSET_SIZE_MB=50 14 | DISABLE_SIGNUPS=false 15 | DISABLE_PASSWORD_AUTH=false -------------------------------------------------------------------------------- /services/karakeep/README.md: -------------------------------------------------------------------------------- 1 | # Karakeep with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Karakeep](https://github.com/karakeep-app/karakeep)** with Tailscale as a sidecar container to securely manage and access your self-hosted notes and collaboration app over a private Tailscale network. By integrating Tailscale, you can ensure that your Karakeep instance is only accessible to authorized devices within your Tailscale network, protecting your ideas and information from the public web. 4 | 5 | ## Karakeep 6 | 7 | [Karakeep](https://github.com/karakeep-app/karakeep) is an **open-source, self-hosted alternative to Google Keep**, designed for simple note-taking and fast access. It supports collaborative editing, Markdown formatting, drag-and-drop organization, and tagging. Karakeep offers a clean, modern interface and is ideal for individuals or teams who want a privacy-focused, cloud-free way to manage notes and ideas. 8 | 9 | ## Key Features 10 | 11 | - **Simple Note-Taking** – Create and organize notes with a beautiful, fast UI. 12 | - **Collaborative Editing** – Share and edit notes in real time with others. 13 | - **Markdown Support** – Use lightweight formatting for structured notes. 14 | - **Tagging and Organization** – Categorize and manage content with tags and drag-and-drop. 15 | - **Self-Hosted & Private** – Own your data, free from third-party cloud services. 16 | - **Secure Access with Tailscale** – Restrict access to your notes using your private Tailscale network. 17 | 18 | ## Configuration Overview 19 | 20 | In this setup, the `tailscale-karakeep` service runs Tailscale, which manages secure networking for the Karakeep service. The `karakeep` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Karakeep’s web interface is only accessible through the Tailscale network (or locally, if preferred), enhancing the privacy and security of your notes and collaborative workspace. 21 | -------------------------------------------------------------------------------- /services/karakeep/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/languagetool/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=languagetool 5 | IMAGE_URL=erikvl87/languagetool 6 | SERVICEPORT=8010 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/languagetool/README.md: -------------------------------------------------------------------------------- 1 | # LanguageTool with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [LanguageTool](https://languagetool.org) with Tailscale as a sidecar container to enhance secure networking. 4 | 5 | ## LanguageTool 6 | 7 | [LanguageTool](https://languagetool.org) is a powerful grammar and spell-checking tool available for various languages. It can be used in various applications, including web browsers, office suites, and as a standalone server for integration with other services. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-adguardhome` service runs Tailscale, which manages secure networking for LanguageTool. The `languagetool` service utilizes the Tailscale network stack via Docker's `network_mode: service:`. This setup ensures that LanguageTool's service is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your LanguageTool deployment. 12 | 13 | ## Using n-gram datasets 14 | 15 | > LanguageTool can make use of large n-gram data sets to detect errors with words that are often confused, like __their__ and __there__. 16 | 17 | *Source: [https://dev.languagetool.org/finding-errors-using-n-gram-data](https://dev.languagetool.org/finding-errors-using-n-gram-data)* 18 | 19 | [Download](http://languagetool.org/download/ngram-data/) the n-gram dataset(s) onto your local machine and unzip them into a local ngrams directory: 20 | 21 | ```plain 22 | home/ 23 | ├─ / 24 | │ ├─ ngrams/ 25 | │ │ ├─ en/ 26 | │ │ │ ├─ 1grams/ 27 | │ │ │ ├─ 2grams/ 28 | │ │ │ ├─ 3grams/ 29 | │ │ ├─ nl/ 30 | │ │ │ ├─ 1grams/ 31 | │ │ │ ├─ 2grams/ 32 | │ │ │ ├─ 3grams/ 33 | ``` 34 | 35 | Mount the local ngrams directory to the `/ngrams` directory in the Docker container [using the `-v` configuration](https://docs.docker.com/engine/reference/commandline/container_run/#read-only) and set the `languageModel` configuration to the `/ngrams` folder. 36 | -------------------------------------------------------------------------------- /services/languagetool/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8010" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/linkding/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=linkding 5 | IMAGE_URL=sissbruecker/linkding 6 | SERVICEPORT=9090 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/linkding/.linkding.env: -------------------------------------------------------------------------------- 1 | # Docker container name 2 | #LD_CONTAINER_NAME=linkding 3 | # Port on the host system that the application should be published on 4 | #LD_HOST_PORT=9090 5 | # Directory on the host system that should be mounted as data dir into the Docker container 6 | LD_HOST_DATA_DIR=./data 7 | 8 | # Can be used to run linkding under a context path, for example: linkding/ 9 | # Must end with a slash `/` 10 | LD_CONTEXT_PATH= 11 | # Username of the initial superuser to create, leave empty to not create one 12 | LD_SUPERUSER_NAME= 13 | # Password for the initial superuser, leave empty to disable credentials authentication and rely on proxy authentication instead 14 | LD_SUPERUSER_PASSWORD= 15 | # Option to disable background tasks 16 | LD_DISABLE_BACKGROUND_TASKS=False 17 | # Option to disable URL validation for bookmarks completely 18 | LD_DISABLE_URL_VALIDATION=False 19 | # Enables support for authentication proxies such as Authelia 20 | LD_ENABLE_AUTH_PROXY=False 21 | # Name of the request header that the auth proxy passes to the application to identify the user 22 | # See docs/Options.md for more details 23 | LD_AUTH_PROXY_USERNAME_HEADER= 24 | # The URL that linkding should redirect to after a logout, when using an auth proxy 25 | # See docs/Options.md for more details 26 | LD_AUTH_PROXY_LOGOUT_URL= 27 | # List of trusted origins from which to accept POST requests 28 | # See docs/Options.md for more details 29 | LD_CSRF_TRUSTED_ORIGINS= 30 | 31 | # Database settings 32 | # These are currently only required for configuring PostreSQL. 33 | # By default, linkding uses SQLite for which you don't need to configure anything. 34 | 35 | # Database engine, can be sqlite (default) or postgres 36 | LD_DB_ENGINE= 37 | # Database name (default: linkding) 38 | LD_DB_DATABASE= 39 | # Username to connect to the database server (default: linkding) 40 | LD_DB_USER= 41 | # Password to connect to the database server 42 | LD_DB_PASSWORD= 43 | # The hostname where the database is hosted (default: localhost) 44 | LD_DB_HOST= 45 | # Port use to connect to the database server 46 | # Should use the default port if not set 47 | LD_DB_PORT= 48 | # Any additional options to pass to the database (default: {}) 49 | LD_DB_OPTIONS= -------------------------------------------------------------------------------- /services/linkding/README.md: -------------------------------------------------------------------------------- 1 | # Linkding with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Linkding](https://github.com/sissbruecker/linkding) with Tailscale as a sidecar container to securely manage and access your self-hosted bookmark manager over a private Tailscale network. By integrating Tailscale, you can ensure that your Linkding instance remains private and accessible only to authorized devices on your Tailscale network. 4 | 5 | ## Linkding 6 | 7 | [Linkding](https://github.com/sissbruecker/linkding) is a lightweight, self-hosted bookmark manager designed to simplify saving and organizing links. It supports features like tagging, searching, and bookmark importing/exporting. It also includes a browser extension for quick access and management. With Tailscale, your Linkding instance is safeguarded, ensuring that your bookmarks are only accessible to you and authorized users within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Tagging and Search**: Organize and find bookmarks effortlessly with tags and a robust search feature. 12 | - **Browser Integration**: Quickly save and manage bookmarks via browser extensions. 13 | - **Self-Hosted Privacy**: Keep your bookmarks secure and private with a locally hosted solution. 14 | - **Import/Export**: Easily migrate bookmarks to and from other services. 15 | 16 | ## Configuration Overview 17 | 18 | In this setup, the `tailscale-linkding` service runs Tailscale, which manages secure networking for the Linkding service. The `linkding` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Linkding’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing enhanced privacy and security for managing your bookmarks. 19 | -------------------------------------------------------------------------------- /services/linkding/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:9090" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/linkding/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/data:/etc/linkding/data 49 | env_file: 50 | - .linkding.env 51 | depends_on: 52 | tailscale: 53 | condition: service_healthy 54 | healthcheck: 55 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 56 | interval: 1m # How often to perform the check 57 | timeout: 10s # Time to wait for the check to succeed 58 | retries: 3 # Number of retries before marking as unhealthy 59 | start_period: 30s # Time to wait before starting health checks 60 | restart: always -------------------------------------------------------------------------------- /services/metube/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=metube 5 | IMAGE_URL=ghcr.io/alexta69/metube 6 | SERVICEPORT=8081 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/metube/README.md: -------------------------------------------------------------------------------- 1 | # Metube with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Metube](https://github.com/alexta69/metube) with Tailscale as a sidecar container to securely manage and access your self-hosted bookmark manager over a private Tailscale network. By integrating Tailscale, you can ensure that your Linkding instance remains private and accessible only to authorized devices on your Tailscale network. 4 | 5 | ## Metube 6 | 7 | [Metube](https://github.com/alexta69/metube) is a self-hosted YouTube downloader with playlist support. Allows you to download videos from YouTube and dozens of other sites. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-metube` service runs Tailscale, which manages secure networking for the metube application. The `metube` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that metube’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing enhanced privacy and security. 12 | -------------------------------------------------------------------------------- /services/metube/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8081" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/metube/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | - TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ./config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ./ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ./downloads:/downloads 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always 59 | -------------------------------------------------------------------------------- /services/miniqr/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=mini-qr 5 | IMAGE_URL=ghcr.io/lyqht/mini-qr:latest 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/miniqr/README.md: -------------------------------------------------------------------------------- 1 | # Mini-QR with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Mini-QR](https://github.com/lyqht/mini-qr)** with Tailscale as a sidecar container to securely access your self-hosted QR code generation tool over a private Tailscale network. By integrating Tailscale, you can ensure that your Mini-QR instance is only accessible to authorized devices within your private network, adding an extra layer of privacy and security. 4 | 5 | ## Mini-QR 6 | 7 | [Mini-QR](https://github.com/lyqht/mini-qr) is a **minimalist, self-hosted web app** for quickly generating QR codes on the fly. It features a sleek and simple interface that works well on both desktop and mobile, making it ideal for sharing links, text, or other short data via QR. It’s lightweight, fast, and requires no external services. Pairing it with Tailscale ensures that only trusted devices can access your QR generation tool—perfect for local, secure usage scenarios. 8 | 9 | ## Key Features 10 | 11 | - **Quick QR Generation** – Instantly generate QR codes from text or URLs. 12 | - **Mobile-Friendly UI** – Works smoothly on mobile and desktop devices. 13 | - **No Tracking, No Dependencies** – Lightweight and privacy-respecting. 14 | - **Self-Hosted** – Full control, no reliance on third-party QR services. 15 | - **Secure Access with Tailscale** – Limit access to authorized devices via your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-miniqr` service runs Tailscale, which handles secure networking for the Mini-QR service. The `mini-qr` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that the Mini-QR web interface is only accessible via your Tailscale network (or locally if preferred), giving you complete control over access and visibility. 20 | -------------------------------------------------------------------------------- /services/miniqr/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/miniqr/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | depends_on: 48 | tailscale: 49 | condition: service_healthy 50 | healthcheck: 51 | test: ["CMD", "pgrep", "-f", "http-server"] # Check if ${SERVICE} process is running 52 | interval: 1m # How often to perform the check 53 | timeout: 10s # Time to wait for the check to succeed 54 | retries: 3 # Number of retries before marking as unhealthy 55 | start_period: 30s # Time to wait before starting health checks 56 | restart: always -------------------------------------------------------------------------------- /services/nanote/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=nanote 5 | IMAGE_URL=omarmir/nanote 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/nanote/README.md: -------------------------------------------------------------------------------- 1 | # Nanote with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up **[Nanote](https://github.com/omarmir/nanote)** with Tailscale as a sidecar container to securely manage and access your self-hosted note-taking application over a private Tailscale network. By integrating Tailscale, you can ensure that your Nanote instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Nanote 6 | 7 | [Nanote](https://github.com/omarmir/nanote) is a lightweight, self-hosted note-taking application designed for simplicity and speed. It provides a distraction-free environment to jot down quick notes, ideas, or reminders without the complexity of traditional note-taking apps. By integrating Tailscale, you can keep your Nanote instance secure and accessible only within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Minimalist Design** – A clean and distraction-free interface for note-taking. 12 | - **Fast & Lightweight** – Optimized for quick note-taking without unnecessary bloat. 13 | - **Self-Hosted Privacy** – Keep your notes secure and under your control. 14 | - **Markdown Support** – Write notes in Markdown for easy formatting. 15 | - **Secure Access with Tailscale** – Restrict access to only authorized devices within your private network. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-nanote` service runs Tailscale, which manages secure networking for the Nanote service. The `nanote` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Nanote’s web interface and note storage are only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of security and privacy to your note-taking workflow. 20 | -------------------------------------------------------------------------------- /services/nanote/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/nanote/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | - NOTES_PATH=/notes 48 | - SECRET_KEY= 49 | volumes: 50 | - ${PWD}/${SERVICE}-data:/notes 51 | depends_on: 52 | tailscale: 53 | condition: service_healthy 54 | healthcheck: 55 | test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/health'] 56 | interval: 1m # How often to perform the check 57 | timeout: 10s # Time to wait for the check to succeed 58 | retries: 3 # Number of retries before marking as unhealthy 59 | start_period: 30s # Time to wait before starting health checks 60 | restart: always -------------------------------------------------------------------------------- /services/nessus/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=nessus 5 | IMAGE_URL=tenable/nessus:latest-ubuntu 6 | SERVICEPORT=8834 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/nessus/README.md: -------------------------------------------------------------------------------- 1 | # Nessus with Tailscale Sidecar Configuration 2 | 3 | > ⚠️ **Important:** This container has no ability for persistent storage - your configuration will be lost when restarting the instance. 4 | 5 | This Docker Compose configuration sets up **[Nessus](https://www.tenable.com/products/nessus)** with Tailscale as a sidecar container to securely manage and access your vulnerability assessment tool over a private Tailscale network. By integrating Tailscale, you can ensure that your Nessus instance remains private and accessible only to authorized devices on your Tailscale network. 6 | 7 | ## Nessus 8 | 9 | [Nessus](https://www.tenable.com/products/nessus) is one of the most widely used vulnerability assessment tools, designed to help identify and remediate security issues in IT environments. With powerful scanning capabilities, Nessus provides detailed reports on system vulnerabilities, configuration errors, and compliance issues. By pairing Nessus with Tailscale, you can further secure your vulnerability management setup by restricting access to authorized devices within your private network. 10 | 11 | ### Nessus Essentials: Free for Personal Use 12 | 13 | Nessus Essentials offers a free version of the tool for personal and home use, [request your license here](https://www.tenable.com/products/nessus/nessus-essentials). It allows scanning up to **16 IP addresses**, making it an excellent choice for individuals looking to improve the security of their home networks. Despite being a free version, Nessus Essentials provides access to many of the powerful scanning capabilities that Nessus is known for, making it ideal for learning or small-scale vulnerability assessments. 14 | 15 | ## Key Features 16 | 17 | - **Comprehensive Scanning**: Identify vulnerabilities, misconfigurations, and compliance violations across networks. 18 | - **Detailed Reporting**: Generate in-depth reports to prioritize and remediate security issues effectively. 19 | - **Self-Hosted**: Maintain full control over your scanning environment with a locally hosted instance. 20 | - **Customizable Policies**: Tailor scans to meet your organization’s unique security needs. 21 | - **Free Essentials Model**: Start for free with up to 16 IPs using Nessus Essentials. 22 | 23 | ## Configuration Overview 24 | 25 | In this setup, the `tailscale-nessus` service runs Tailscale, which manages secure networking for the Nessus service. The `nessus` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Nessus’ web interface and scanning functionalities are only accessible through the Tailscale network (or locally, if preferred), adding an additional layer of security to your vulnerability management infrastructure. 26 | 27 | For additional configuration (environment variables) - please refer to the [Tenable documentation](https://docs.tenable.com/nessus/Content/DeployNessusDocker.htm). 28 | -------------------------------------------------------------------------------- /services/nessus/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "https+insecure://127.0.0.1:8834" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/nessus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | # volumes: 48 | # - ${PWD}/${SERVICE}-data/app/config:/config 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/nextcloud/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=nextcloud 5 | IMAGE_URL=nextcloud 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | MYSQL_ROOT_PASSWORD= //Insert super root strong password 10 | MYSQL_PASSWORD= //Insert super strong password -------------------------------------------------------------------------------- /services/nextcloud/README.md: -------------------------------------------------------------------------------- 1 | # Nextcloud Server with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Nextcloud Server](https://github.com/nextcloud/server) with Tailscale as a sidecar container to securely manage and access your personal cloud storage over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Nextcloud instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Nextcloud Server 6 | 7 | [Nextcloud Server](https://github.com/nextcloud/server) is an open-source, self-hosted cloud storage platform that allows you to store, share, and sync your files across multiple devices. It provides a secure and private alternative to commercial cloud services, giving you full control over your data. This configuration leverages Tailscale to securely connect to your Nextcloud instance, protecting your files and personal data from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-nextcloud` service runs Tailscale, which manages secure networking for the Nextcloud Server. The `nextcloud` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Nextcloud's web interface and file synchronization services are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your cloud storage solution. 12 | -------------------------------------------------------------------------------- /services/nextcloud/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/nodered/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=nodered 5 | IMAGE_URL=nodered/node-red:latest 6 | SERVICEPORT=1080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/nodered/README.md: -------------------------------------------------------------------------------- 1 | # Node-RED with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Node-RED](https://github.com/node-red/node-red) with Tailscale as a sidecar container to securely access and manage your flow-based programming tool over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Node-RED instance, ensuring it is only accessible within your Tailscale network. 4 | 5 | ## Node-RED 6 | 7 | [Node-RED](https://github.com/node-red/node-red) is a low-code programming tool for event-driven applications, designed to connect devices, APIs, and online services through an intuitive, browser-based flow editor. It’s widely used for IoT, automation, and integration tasks, offering a powerful yet user-friendly way to build workflows. This configuration leverages Tailscale to securely connect to your Node-RED instance, ensuring that your workflows and configurations are protected from unauthorized access and accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-node-red` service runs Tailscale, which manages secure networking for the Node-RED service. The `node-red` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Node-RED’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing an additional layer of security and privacy for your flow-based programming environment. 12 | -------------------------------------------------------------------------------- /services/nodered/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:1080" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/nodered/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | ports: 26 | - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/app/config:/data 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/pihole/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=pihole 5 | IMAGE_URL=pihole/pihole 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/pihole/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80/admin" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/pingvin-share/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=pingvin-share 5 | IMAGE_URL=stonith404/pingvin-share 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/pingvin-share/README.md: -------------------------------------------------------------------------------- 1 | # Pingvin Share with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Pingvin Share](https://github.com/stonith404/pingvin-share) with Tailscale as a sidecar container to securely share files over a private Tailscale network. By using Tailscale in a sidecar configuration, you can ensure your file-sharing instance is accessible only within your Tailscale network, providing enhanced security and privacy. 4 | 5 | ## Pingvin Share 6 | 7 | [Pingvin Share](https://github.com/stonith404/pingvin-share) is a simple, open-source file-sharing application designed to make sharing files quick, easy, and efficient. It supports drag-and-drop uploads, expiring links, and a user-friendly web interface. With this setup, Tailscale ensures that your Pingvin Share instance remains secure and private, limiting access to only authorized devices on your Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-pingvin` service runs Tailscale, which manages secure networking for the Pingvin Share service. The `pingvin-share` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Pingvin Share’s web interface and file-sharing capabilities are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted file-sharing needs. 12 | -------------------------------------------------------------------------------- /services/pingvin-share/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/plex/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=plex 5 | IMAGE_URL=lscr.io/linuxserver/plex 6 | SERVICEPORT=32400 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/plex/README.md: -------------------------------------------------------------------------------- 1 | # Plex with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Plex Media Server](https://hub.docker.com/r/linuxserver/plex) with Tailscale as a sidecar container to securely manage and stream your media over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your media server, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Plex Media Server 6 | 7 | [Plex Media Server](https://hub.docker.com/r/linuxserver/plex) is a versatile platform for organizing and streaming your personal media collection, including movies, TV shows, music, and photos. Plex makes it easy to access your media from any device, both locally and remotely, with a user-friendly interface and extensive device support. This configuration leverages Tailscale to securely connect to your Plex server, protecting your media streams from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-plex` service runs Tailscale, which manages secure networking for the Plex Media Server. The `plex` service utilizes the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Plex's media streaming service is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your media server. 12 | -------------------------------------------------------------------------------- /services/plex/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:32400" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/pocket-id/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | #Find Specific Variables in documentation https://stonith404.github.io/pocket-id/configuration/environment-variables 5 | SERVICE=pocket-id 6 | IMAGE_URL=ghcr.io/pocket-id/pocket-id 7 | SERVICEPORT=80 8 | TS_AUTHKEY= 9 | DNS_SERVER=9.9.9.9 10 | -------------------------------------------------------------------------------- /services/pocket-id/README.md: -------------------------------------------------------------------------------- 1 | # Pocket ID with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Pocket ID](https://github.com/stonith404/pocket-id) with Tailscale as a sidecar container to securely manage and access your decentralized identity service over a private Tailscale network. By integrating Tailscale, you can ensure that your Pocket ID instance remains private and accessible only to authorized devices within your Tailscale network. 4 | 5 | ## Pocket ID 6 | 7 | [Pocket ID](https://github.com/stonith404/pocket-id) is an open-source, self-hosted decentralized identity (DID) solution that simplifies user authentication and identity management. It leverages the power of blockchain principles and modern cryptographic techniques to provide a secure, privacy-first approach to identity verification. With Pocket ID, you can authenticate users, manage permissions, and securely issue verifiable credentials, all while maintaining complete control over your identity system. 8 | 9 | ## Key Features 10 | 11 | - **Decentralized Identity**: Built on W3C’s DID standards, enabling privacy-first, self-sovereign identity management. 12 | - **Verifiable Credentials**: Issue, share, and verify credentials without relying on centralized authorities. 13 | - **Interoperability**: Compatible with a wide range of DID methods and cryptographic algorithms. 14 | - **Self-Hosted**: Maintain full control over your identity solution by hosting it locally. 15 | - **Secure Integration**: Pair with Tailscale for enhanced security, limiting access to your identity services to authorized devices. 16 | 17 | ## Configuration Overview 18 | 19 | In this setup, the `tailscale-pocket-id` service runs Tailscale, which manages secure networking for the Pocket ID service. The `pocket-id` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Pocket ID’s web interface and APIs are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your identity management system. 20 | -------------------------------------------------------------------------------- /services/pocket-id/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/portainer/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=portainer 5 | IMAGE_URL=portainer/portainer-ce 6 | SERVICEPORT=9000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/portainer/README.md: -------------------------------------------------------------------------------- 1 | # Portainer with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Portainer](https://github.com/portainer/portainer) with Tailscale as a sidecar container to securely manage and monitor your Docker environments over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Portainer instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Portainer 6 | 7 | [Portainer](https://github.com/portainer/portainer) is an open-source management tool that provides a simple and easy-to-use interface for managing Docker environments. Whether you are deploying containers, managing networks, or monitoring your Docker services, Portainer offers a comprehensive solution for managing your containerized applications. This configuration leverages Tailscale to securely connect to your Portainer instance, protecting your Docker management interface from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-portainer` service runs Tailscale, which manages secure networking for the Portainer service. The `portainer` service uses the Tailscale network stack via Docker’s `network_mode: service:` configuration. This setup ensures that Portainer’s management interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for managing your Docker environments. 12 | -------------------------------------------------------------------------------- /services/portainer/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:9000" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/portainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - /var/run/docker.sock:/var/run/docker.sock 49 | - ${PWD}/${SERVICE}-data/portainer_data:/data 50 | depends_on: 51 | tailscale: 52 | condition: service_healthy 53 | healthcheck: 54 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 55 | interval: 1m # How often to perform the check 56 | timeout: 10s # Time to wait for the check to succeed 57 | retries: 3 # Number of retries before marking as unhealthy 58 | start_period: 30s # Time to wait before starting health checks 59 | restart: always -------------------------------------------------------------------------------- /services/qbittorrent/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=qbittorrent 5 | IMAGE_URL=lscr.io/linuxserver/qbittorrent 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/qbittorrent/README.md: -------------------------------------------------------------------------------- 1 | # qBittorrent with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [qBittorrent](https://www.qbittorrent.org/) with Tailscale as a sidecar container to securely manage and access your torrent client over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your qBittorrent instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## qBittorrent 6 | 7 | [qBittorrent](https://www.qbittorrent.org/) is an open-source, cross-platform torrent client that offers a clean interface, powerful search capabilities, and support for most features found in modern BitTorrent clients. This configuration leverages Tailscale to securely connect to your qBittorrent instance, ensuring that your torrent management interface is protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the tailscale-qbittorrent service runs Tailscale, which manages secure networking for the qBittorrent service. The qbittorrent service uses the Tailscale network stack via Docker's network_mode: service: configuration. This setup ensures that qBittorrent’s web interface and API are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted torrent client. 12 | -------------------------------------------------------------------------------- /services/qbittorrent/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/radarr/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=radarr 5 | IMAGE_URL=radarr/server 6 | SERVICEPORT=7878 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/radarr/README.md: -------------------------------------------------------------------------------- 1 | # Radarr with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Radarr](https://github.com/Radarr/Radarr) with Tailscale as a sidecar container to securely manage and access your media management system over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Radarr instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Radarr 6 | 7 | [Radarr](https://github.com/Radarr/Radarr) is an open-source, self-hosted application for managing movies in your media collection. It allows you to automatically download movies from Usenet and BitTorrent sources and organize them in your media library. This configuration leverages Tailscale to securely connect to your Radarr instance, ensuring that your media management interface is protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-radarr` service runs Tailscale, which manages secure networking for the Radarr service. The `radarr` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Radarr’s web interface and API are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted media manager. 12 | -------------------------------------------------------------------------------- /services/radarr/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:7878" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/resilio-sync/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=resilio-sync 5 | IMAGE_URL=linuxserver/resilio-sync 6 | SERVICEPORT=8888 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/resilio-sync/README.md: -------------------------------------------------------------------------------- 1 | # Resilio Sync with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Resilio Sync](https://github.com/linuxserver/docker-resilio-sync) with Tailscale as a sidecar container to securely synchronize and share your files over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your file synchronization, ensuring that Resilio Sync is only accessible within your Tailscale network. 4 | 5 | ## Resilio Sync 6 | 7 | [Resilio Sync](https://github.com/linuxserver/docker-resilio-sync) is a powerful, peer-to-peer file synchronization tool that allows you to sync files between devices or share them with others, without relying on cloud services. With its robust and flexible syncing capabilities, Resilio Sync is ideal for personal and professional use cases where secure, decentralized file sharing is required. This configuration leverages Tailscale to securely connect to your Resilio Sync instance, protecting your file transfers from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-resilio-sync` service runs Tailscale, which manages secure networking for the Resilio Sync service. The `resilio-sync` service uses the Tailscale network stack via Docker’s `network_mode: service:` configuration. This setup ensures that Resilio Sync is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your file synchronization and sharing tasks. 12 | -------------------------------------------------------------------------------- /services/resilio-sync/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8888" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/searxng/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=searxng 5 | IMAGE_URL=docker.io/searxng/searxng 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/searxng/README.md: -------------------------------------------------------------------------------- 1 | # searXNG with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [searXNG](https://github.com/searxng/searxng) with Tailscale as a sidecar container, enabling secure access to your private metasearch engine over a private Tailscale network. By integrating Tailscale in a sidecar configuration, you can ensure that your searXNG instance is accessible only within your Tailscale network, providing an additional layer of security and privacy for your searches. 4 | 5 | ## searXNG 6 | 7 | [searXNG](https://github.com/searxng/searxng) is a free, open-source metasearch engine that aggregates results from multiple search engines while protecting your privacy. With no user tracking and the ability to self-host, searXNG empowers you to take control of your search experience. By leveraging Tailscale, you can securely access your self-hosted searXNG instance from any of your devices, ensuring that your searches remain private and inaccessible to unauthorized users. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-searxng` service runs Tailscale, which manages secure networking for the searXNG service. The `searxng` service utilizes the Tailscale network stack via Docker’s `network_mode: service:` configuration. This setup ensures that searXNG is only accessible through your Tailscale network (or locally, if preferred). With this configuration, you can enjoy a private, secure, and customizable search engine experience, free from user tracking or external access. 12 | 13 | ## References 14 | 15 | [![Replace Google with SearXNG - a privacy respecting, self-hosted search engine](https://img.youtube.com/vi/cg9d87PuanE/0.jpg)](https://www.youtube.com/watch?v=cg9d87PuanE) 16 | -------------------------------------------------------------------------------- /services/searxng/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /services/slink/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=slink 5 | IMAGE_URL=anirdev/slink 6 | SERVICEPORT=3000 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/slink/README.md: -------------------------------------------------------------------------------- 1 | # Slink with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Slink](https://github.com/andrii-kryvoviaz/slink) with Tailscale as a sidecar container to securely manage and access your local file-sharing system over a private Tailscale network. By integrating Tailscale in a sidecar configuration, you can ensure that your Slink instance is both secure and private, accessible only within your Tailscale network. 4 | 5 | ## Slink 6 | 7 | [Slink](https://github.com/andrii-kryvoviaz/slink) is a fast, self-hosted alternative to ShareDrop, enabling secure, real-time file sharing over local networks. It allows you to easily share files between devices without relying on third-party servers, ensuring complete control and privacy. By combining Slink with Tailscale, this configuration provides a secure way to connect and share files exclusively within your private network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-slink` service runs Tailscale, which manages secure networking for the Slink service. The `slink` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Slink's file-sharing interface is only accessible through the Tailscale network, adding an extra layer of security and privacy for your self-hosted file-sharing system. 12 | -------------------------------------------------------------------------------- /services/slink/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3000" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/sonarr/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=sonarr 5 | IMAGE_URL=lscr.io/linuxserver/sonarr 6 | SERVICEPORT=8989 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/sonarr/README.md: -------------------------------------------------------------------------------- 1 | # Sonarr with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Sonarr](https://github.com/Sonarr/Sonarr) with Tailscale as a sidecar container to securely manage and access your media management system over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Sonarr instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Sonarr 6 | 7 | [Sonarr](https://github.com/Sonarr/Sonarr) is an open-source, self-hosted application for managing TV shows in your media collection. It allows you to automatically download TV episodes from Usenet and BitTorrent sources and organize them in your media library. This configuration leverages Tailscale to securely connect to your Sonarr instance, ensuring that your media management interface is protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the tailscale-sonarr service runs Tailscale, which manages secure networking for the Sonarr service. The sonarr service uses the Tailscale network stack via Docker's network_mode: service: configuration. This setup ensures that Sonarr’s web interface and API are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted media manager. 12 | -------------------------------------------------------------------------------- /services/sonarr/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8989" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/speedtest-tracker/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=speedtest-tracker 5 | IMAGE_URL=lscr.io/linuxserver/speedtest-tracker 6 | SERVICEPORT=8888 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/speedtest-tracker/README.md: -------------------------------------------------------------------------------- 1 | # Speedtest Tracker with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Speedtest Tracker](https://github.com/alexjustesen/speedtest-tracker) with Tailscale as a sidecar container to securely monitor and access your internet speed tracking tool over a private Tailscale network. By integrating Tailscale, you can ensure that your Speedtest Tracker instance remains private and accessible only to authorized devices on your Tailscale network. 4 | 5 | ## Speedtest Tracker 6 | 7 | [Speedtest Tracker](https://github.com/alexjustesen/speedtest-tracker) is an open-source, self-hosted tool designed to regularly test and monitor your internet connection speed. It logs historical speed test data and provides detailed visualizations, making it ideal for diagnosing network issues or keeping your ISP accountable. Adding Tailscale enhances the security of your Speedtest Tracker instance by ensuring access is limited to authorized devices within your private network. 8 | 9 | ## Key Features 10 | 11 | - **Automated Speed Tests**: Schedule regular speed tests for consistent monitoring. 12 | - **Data Logging**: Keep historical records of your upload, download, and ping stats. 13 | - **Detailed Visualizations**: View trends and performance over time with an intuitive web interface. 14 | - **Self-Hosted**: Maintain full control over your data with a locally hosted solution. 15 | 16 | ## Configuration Overview 17 | 18 | In this setup, the `tailscale-speedtest` service runs Tailscale, which manages secure networking for the Speedtest Tracker service. The `speedtest-tracker` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Speedtest Tracker’s web interface is only accessible through the Tailscale network (or locally, if preferred), providing enhanced privacy and security for your internet speed monitoring. 19 | -------------------------------------------------------------------------------- /services/speedtest-tracker/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8888" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/speedtest-tracker/nginx/default.conf: -------------------------------------------------------------------------------- 1 | ## Version 2024/07/16 - Changelog: https://github.com/linuxserver/docker-baseimage-alpine-nginx/commits/master/root/defaults/nginx/site-confs/default.conf.sample 2 | 3 | server { 4 | listen 8888 default_server; 5 | # listen [::]:8888 default_server; 6 | # listen 8443 ssl default_server; 7 | # listen [::]:443 ssl default_server; 8 | 9 | server_name _; 10 | 11 | include /config/nginx/ssl.conf; 12 | 13 | set $root /app/www/public; 14 | if (!-d /app/www/public) { 15 | set $root /config/www; 16 | } 17 | root $root; 18 | index index.html index.htm index.php; 19 | 20 | location / { 21 | # enable for basic auth 22 | #auth_basic "Restricted"; 23 | #auth_basic_user_file /config/nginx/.htpasswd; 24 | 25 | try_files $uri $uri/ /index.html /index.htm /index.php$is_args$args; 26 | } 27 | 28 | location ~ ^(.+\.php)(.*)$ { 29 | # enable the next two lines for http auth 30 | #auth_basic "Restricted"; 31 | #auth_basic_user_file /config/nginx/.htpasswd; 32 | 33 | fastcgi_split_path_info ^(.+\.php)(.*)$; 34 | if (!-f $document_root$fastcgi_script_name) { return 404; } 35 | fastcgi_pass 127.0.0.1:9000; 36 | fastcgi_index index.php; 37 | include /etc/nginx/fastcgi_params; 38 | } 39 | 40 | # deny access to .htaccess/.htpasswd files 41 | location ~ /\.ht { 42 | deny all; 43 | } 44 | } -------------------------------------------------------------------------------- /services/stirlingpdf/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=stirlingpdf 5 | IMAGE_URL=frooodle/s-pdf 6 | SERVICEPORT=8080 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/stirlingpdf/README.md: -------------------------------------------------------------------------------- 1 | # Stirling-PDF with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Stirling-PDF](https://github.com/Stirling-Tools/Stirling-PDF) with Tailscale as a sidecar container to securely manage and manipulate PDF files over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your PDF processing, ensuring that the Stirling-PDF interface is only accessible within your Tailscale network. 4 | 5 | ## Stirling-PDF 6 | 7 | Stirling-PDF is a versatile, open-source toolkit that allows you to perform various PDF manipulations, such as merging, splitting, compressing, and converting PDF files. With an intuitive and user-friendly interface, Stirling-PDF simplifies complex PDF tasks, making it a valuable tool for both personal and professional use. This configuration leverages Tailscale to securely connect to your Stirling-PDF instance, protecting your sensitive document operations from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-stirlingpdf` service runs Tailscale, which manages secure networking for the Stirling-PDF service. The `stirlingpdf` service uses the Tailscale network stack via Docker’s `network_mode: service:` configuration. This setup ensures that Stirling-PDF's interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your PDF processing tasks. 12 | -------------------------------------------------------------------------------- /services/stirlingpdf/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/tailscale-exit-node/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=tailscale-exit-node 5 | IMAGE_URL=tailscale/tailscale 6 | #SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/tailscale-exit-node/README.md: -------------------------------------------------------------------------------- 1 | # Tailscale Exit Node Configuration 2 | 3 | This Docker Compose configuration sets up a Tailscale Exit Node, allowing devices in your Tailscale network to route their internet traffic securely through this node. By configuring a Tailscale Exit Node, you can enhance the privacy and security of your internet browsing by routing traffic through a trusted network, such as your home or office, rather than relying on potentially less secure public networks. 4 | 5 | ## Tailscale Exit Node 6 | 7 | A Tailscale Exit Node is a device within your Tailscale network that other devices can use as a gateway to the internet. By setting up an Exit Node, you ensure that all traffic from connected devices is routed through a secure and private network, benefiting from the encryption and privacy that Tailscale provides. This configuration leverages Docker to easily deploy and manage a Tailscale Exit Node, offering a straightforward solution to secure your internet traffic. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale` service runs a Tailscale container configured as an Exit Node. The key configurations include: 12 | 13 | - **TS_AUTHKEY**: This environment variable is where you insert your Tailscale authentication key. 14 | - **TS_EXTRA_ARGS**: The `--advertise-exit-node` flag is used to designate this container as an Exit Node within your Tailscale network. 15 | - **Sysctls**: The system controls `net.ipv4.ip_forward` and `net.ipv6.conf.all.forwarding` are enabled to allow IP forwarding, which is necessary for routing traffic through the Exit Node. 16 | - **Network Mode**: The `bridge` network mode is used to create a virtual network interface for the container, enabling it to handle traffic routing. 17 | 18 | This configuration ensures that the Tailscale Exit Node is set up correctly, allowing devices connected to your Tailscale network to securely route their internet traffic through this node. 19 | -------------------------------------------------------------------------------- /services/tailscale-exit-node/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_EXTRA_ARGS=--advertise-exit-node 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | dns: 23 | - 9.9.9.9 # Can be changed to your desired DNS provider 24 | sysctls: 25 | net.ipv4.ip_forward: 1 26 | net.ipv6.conf.all.forwarding: 1 27 | cap_add: 28 | - net_admin # Tailscale requirement 29 | - sys_module # Tailscale requirement 30 | network_mode: bridge 31 | healthcheck: 32 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 33 | interval: 1m # How often to perform the check 34 | timeout: 10s # Time to wait for the check to succeed 35 | retries: 3 # Number of retries before marking as unhealthy 36 | start_period: 10s # Time to wait before starting health checks 37 | restart: always -------------------------------------------------------------------------------- /services/tautulli/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=tautulli 5 | IMAGE_URL=lscr.io/linuxserver/tautulli 6 | SERVICEPORT=8181 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/tautulli/README.md: -------------------------------------------------------------------------------- 1 | # Tautulli with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Tautulli for Docker](https://hub.docker.com/r/linuxserver/tautulli) with Tailscale as a sidecar container to securely monitor and manage your Plex Media Server over a private Tailscale network. By integrating Tailscale in a sidecar configuration, you enhance the security and privacy of your Tautulli installation, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Tautulli 6 | 7 | [Tautulli](https://tautulli.com/) is a popular monitoring and analytics tool for Plex Media Server. It provides detailed insights into your server’s activity, including media consumption, user activity, and server health. Tautulli allows you to generate reports, send notifications, and manage users, making it an essential tool for Plex server administrators. This configuration leverages Tailscale to securely connect to your Tautulli interface, protecting your server's monitoring data from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the tailscale-tautulli service runs Tailscale, which manages secure networking for the Tautulli service. The tautulli service utilizes the Tailscale network stack via Docker's network_mode: service: configuration. This setup ensures that Tautulli’s monitoring interface is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your Plex server monitoring and management. 12 | -------------------------------------------------------------------------------- /services/tautulli/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8181" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/tautulli/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/app/config:/config 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always -------------------------------------------------------------------------------- /services/technitium/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | TS_AUTHKEY= 5 | SERVICE=technitium 6 | IMAGE_URL=technitium/dns-server:latest 7 | SERVICEPORT=5380 8 | #Specific to Technitium 9 | DNS_SERVER1=9.9.9.9 10 | DNS_SERVER2=8.8.4.4 11 | ADMIN_PASSWORD=ChangeME 12 | 13 | -------------------------------------------------------------------------------- /services/technitium/README.md: -------------------------------------------------------------------------------- 1 | # Technitium DNS server with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up a [Technitium DNS Server](https://github.com/TechnitiumSoftware/DnsServer) with Tailscale as a sidecar container ...... 4 | 5 | ## Technitium 6 | 7 | [Technitium DNS Server](https://github.com/TechnitiumSoftware/DnsServer) information about Technitium... 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the Technitium Service runs on Tailscale, which manages secure networking for the Technitium DNS Services. The `Technitium` utilizes the Tailscale network stack via Docker's `network_mode: technitium:` configuration. This setup ensures that Technitium's Technitium is only accessible through the Tailscale network (or locally, if preferred. Modifications nescsary), providing an extra layer of security and privacy for your Technitium. 12 | -------------------------------------------------------------------------------- /services/technitium/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:5380" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/traefik/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=traefik 5 | IMAGE_URL=traefik:latest 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 -------------------------------------------------------------------------------- /services/traefik/README.md: -------------------------------------------------------------------------------- 1 | # Traefik with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Traefik](https://github.com/traefik/traefik) with Tailscale as a sidecar container to securely manage and route your traffic over a private Tailscale network. By integrating Tailscale, you can enhance the security and privacy of your Traefik instance, ensuring that access is restricted to devices within your Tailscale network. 4 | 5 | ## Traefik 6 | 7 | [Traefik](https://github.com/traefik/traefik) is a modern, open-source reverse proxy and load balancer that simplifies the deployment and management of services in dynamic environments. It supports a wide range of integrations with container orchestration platforms and cloud providers, offering features like automatic HTTPS, load balancing, and monitoring. By incorporating Tailscale, your Traefik instance is safeguarded, ensuring that only authorized users and devices on your Tailscale network can access your applications and services. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-traefik` service runs Tailscale, which manages secure networking for the Traefik service. The `traefik` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This ensures that Traefik’s dashboard and routing functionalities are only accessible through the Tailscale network (or locally, if preferred), adding an extra layer of privacy and security to your network architecture. 12 | -------------------------------------------------------------------------------- /services/traefik/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/traefik/traefik/app/traefik.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/2Tiny2Scale/ScaleTail/8ac9389f0ba0e0882c1574c05e4936a41e93a5cb/services/traefik/traefik/app/traefik.yml -------------------------------------------------------------------------------- /services/uptime-kuma/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=uptime-kuma 5 | IMAGE_URL=louislam/uptime-kuma 6 | SERVICEPORT=3001 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/uptime-kuma/README.md: -------------------------------------------------------------------------------- 1 | # Uptime Kuma with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Uptime Kuma](https://github.com/louislam/uptime-kuma) with Tailscale as a sidecar container to securely monitor your services and websites over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your monitoring dashboard, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Uptime Kuma 6 | 7 | [Uptime Kuma](https://github.com/louislam/uptime-kuma) is a self-hosted monitoring tool that allows you to keep track of the uptime and performance of your websites, APIs, and services. With a sleek and user-friendly interface, Uptime Kuma provides real-time monitoring, notifications, and detailed reports to help you maintain the reliability of your infrastructure. This configuration leverages Tailscale to securely connect to your Uptime Kuma dashboard, protecting your monitoring data from unauthorized access. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-uptimekuma` service runs Tailscale, which manages secure networking for the Uptime Kuma service. The `uptimekuma` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Uptime Kuma's monitoring dashboard is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your monitoring solution. 12 | -------------------------------------------------------------------------------- /services/uptime-kuma/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:3001" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/uptime-kuma/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/uptime-kuma-data:/app/data # uptime-kuma data/configuration folder 49 | - /var/run/docker.sock:/var/run/docker.sock:ro # Read-only access to the docker.sock 50 | depends_on: 51 | tailscale: 52 | condition: service_healthy 53 | restart: always 54 | -------------------------------------------------------------------------------- /services/vaultwarden/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=vaultwarden 5 | IMAGE_URL=vaultwarden/server 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /services/vaultwarden/README.md: -------------------------------------------------------------------------------- 1 | # Vaultwarden with Tailscale Sidecar Configuration 2 | 3 | This Docker Compose configuration sets up [Vaultwarden](https://github.com/dani-garcia/vaultwarden) with Tailscale as a sidecar container to securely manage and access your password manager over a private Tailscale network. By using Tailscale in a sidecar configuration, you can enhance the security and privacy of your Vaultwarden instance, ensuring that it is only accessible within your Tailscale network. 4 | 5 | ## Vaultwarden 6 | 7 | [Vaultwarden](https://github.com/dani-garcia/vaultwarden) is an open-source, self-hosted alternative to Bitwarden, a popular password manager. Vaultwarden allows you to securely store and manage your passwords, notes, and other sensitive data. This configuration leverages Tailscale to securely connect to your Vaultwarden instance, ensuring that your passwords and sensitive information are protected from unauthorized access and that your instance is accessible only via your private Tailscale network. 8 | 9 | ## Configuration Overview 10 | 11 | In this setup, the `tailscale-vaultwarden` service runs Tailscale, which manages secure networking for the Vaultwarden service. The `vaultwarden` service uses the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that Vaultwarden’s web interface and API are only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your self-hosted password manager. 12 | -------------------------------------------------------------------------------- /services/vaultwarden/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /services/vaultwarden/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | # dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | # DOMAIN: "https://vaultwarden.example.com" # required when using a reverse proxy; your domain; vaultwarden needs to know it's https to work properly with attachments 45 | SIGNUPS_ALLOWED: "true" # Deactivate this with "false" after you have created your account so that no strangers can register 46 | volumes: 47 | - ${PWD}/${SERVICE}-data/vw-data:/data 48 | depends_on: 49 | tailscale: 50 | condition: service_healthy 51 | restart: always -------------------------------------------------------------------------------- /templates/service-template/.env: -------------------------------------------------------------------------------- 1 | #version=1.0 2 | #url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs 3 | #COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra 4 | SERVICE=adguard 5 | IMAGE_URL=adguard/adguard-home 6 | SERVICEPORT=80 7 | TS_AUTHKEY= 8 | DNS_SERVER=9.9.9.9 9 | -------------------------------------------------------------------------------- /templates/service-template/README.md: -------------------------------------------------------------------------------- 1 | # Make sure you replace SERVICE. 2 | 3 | # SERVICE with Tailscale Sidecar Configuration 4 | 5 | This Docker Compose configuration sets up [SERVICE](LINK TO PAFE OF MAINTAINER) with Tailscale as a sidecar container ...... 6 | 7 | ## SERVICE 8 | 9 | [SERVICE](LINK TO PAFE OF MAINTAINER) information about service... 10 | 11 | ## Configuration Overview 12 | 13 | In this setup, the `tailscale-SERVICE` service runs Tailscale, which manages secure networking for the SERVICE. The `SERVICE` service utilizes the Tailscale network stack via Docker's `network_mode: service:` configuration. This setup ensures that SERVICE's service is only accessible through the Tailscale network (or locally, if preferred), providing an extra layer of security and privacy for your SERVICE. 14 | 15 | ## Files to check 16 | 17 | Please check the following contents for validity as some variables need to be defined upfront. 18 | 19 | - `.env` // This files hold the main parts 20 | - `./config/serve.json` // This file requires a service port of the app to be defined 21 | -------------------------------------------------------------------------------- /templates/service-template/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:80" 12 | } 13 | } 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /templates/service-template/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # Make sure you have updated/checked the .env file with the correct variables. 3 | # All the ${ xx } need to be defined there. 4 | # Tailscale Sidecar Configuration 5 | tailscale: 6 | image: tailscale/tailscale:latest # Image to be used 7 | container_name: tailscale-${SERVICE} # Name for local container management 8 | hostname: ${SERVICE} # Name used within your Tailscale environment 9 | environment: 10 | - TS_AUTHKEY=${TS_AUTHKEY} 11 | - TS_STATE_DIR=/var/lib/tailscale 12 | - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required 13 | - TS_USERSPACE=false 14 | - TS_ENABLE_HEALTH_CHECK=true # Enable healthcheck endpoint: "/healthz" 15 | - TS_LOCAL_ADDR_PORT=127.0.0.1:41234 # The : for the healthz endpoint 16 | #- TS_ACCEPT_DNS=true # Uncomment when using MagicDNS 17 | volumes: 18 | - ${PWD}/config:/config # Config folder used to store Tailscale files - you may need to change the path 19 | - ${PWD}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path 20 | devices: 21 | - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work 22 | cap_add: 23 | - net_admin # Tailscale requirement 24 | - sys_module # Tailscale requirement 25 | #ports: 26 | # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required 27 | # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below 28 | #dns: 29 | # - ${DNS_SERVER} 30 | healthcheck: 31 | test: ["CMD", "wget", "--spider", "-q", "http://127.0.0.1:41234/healthz"] # Check Tailscale has a Tailnet IP and is operational 32 | interval: 1m # How often to perform the check 33 | timeout: 10s # Time to wait for the check to succeed 34 | retries: 3 # Number of retries before marking as unhealthy 35 | start_period: 10s # Time to wait before starting health checks 36 | restart: always 37 | 38 | # ${SERVICE} 39 | application: 40 | image: ${IMAGE_URL} # Image to be used 41 | network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale 42 | container_name: app-${SERVICE} # Name for local container management 43 | environment: 44 | - PUID=1000 45 | - PGID=1000 46 | - TZ=Europe/Amsterdam 47 | volumes: 48 | - ${PWD}/${SERVICE}-data/app/config:/config 49 | depends_on: 50 | tailscale: 51 | condition: service_healthy 52 | healthcheck: 53 | test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running 54 | interval: 1m # How often to perform the check 55 | timeout: 10s # Time to wait for the check to succeed 56 | retries: 3 # Number of retries before marking as unhealthy 57 | start_period: 30s # Time to wait before starting health checks 58 | restart: always 59 | --------------------------------------------------------------------------------