├── AGENTS.md ├── GEMINI.md ├── .python-version ├── src └── compose_farm │ ├── py.typed │ ├── console.py │ ├── __init__.py │ ├── cli │ ├── __init__.py │ ├── app.py │ ├── common.py │ ├── monitoring.py │ ├── lifecycle.py │ └── config.py │ ├── paths.py │ ├── example-config.yaml │ ├── state.py │ ├── logs.py │ ├── config.py │ ├── compose.py │ └── traefik.py ├── .envrc ├── tests ├── __init__.py ├── test_logs.py ├── test_operations.py ├── test_refresh.py ├── test_config.py ├── test_cli_logs.py ├── test_executor.py ├── test_config_cmd.py ├── test_state.py └── test_traefik.py ├── .github ├── release-drafter.yml ├── workflows │ ├── toc.yaml │ ├── release-drafter.yml │ ├── release.yml │ ├── renovate.json │ ├── ci.yml │ ├── update-readme.yml │ └── docker.yml └── renovate.json ├── examples ├── mealie │ ├── .env │ └── compose.yaml ├── uptime-kuma │ ├── .env │ └── compose.yaml ├── paperless-ngx │ ├── .env │ └── compose.yaml ├── autokuma │ ├── .env │ └── compose.yaml ├── traefik │ ├── .env │ ├── dynamic.d │ │ └── compose-farm.yml │ └── compose.yaml ├── compose-farm-state.yaml ├── compose-farm.yaml └── README.md ├── Dockerfile ├── docker-compose.yml ├── .gitignore ├── .pre-commit-config.yaml ├── compose-farm.example.yaml ├── LICENSE ├── reddit-post.md ├── docs ├── truenas-nfs-root-squash.md ├── dev │ ├── docker-swarm-network.md │ └── future-improvements.md └── truenas-nested-nfs.md ├── CLAUDE.md └── pyproject.toml /AGENTS.md: -------------------------------------------------------------------------------- 1 | CLAUDE.md -------------------------------------------------------------------------------- /GEMINI.md: -------------------------------------------------------------------------------- 1 | CLAUDE.md -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.14 2 | -------------------------------------------------------------------------------- /src/compose_farm/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | source .venv/bin/activate 2 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for SDC.""" 2 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | template: | 2 | ## What’s Changed 3 | 4 | $CHANGES 5 | -------------------------------------------------------------------------------- /examples/mealie/.env: -------------------------------------------------------------------------------- 1 | # Copy to .env and fill in your values 2 | DOMAIN=example.com 3 | -------------------------------------------------------------------------------- /examples/uptime-kuma/.env: -------------------------------------------------------------------------------- 1 | # Copy to .env and fill in your values 2 | DOMAIN=example.com 3 | -------------------------------------------------------------------------------- /examples/paperless-ngx/.env: -------------------------------------------------------------------------------- 1 | # Copy to .env and fill in your values 2 | DOMAIN=example.com 3 | PAPERLESS_SECRET_KEY=change-me-to-a-random-string 4 | -------------------------------------------------------------------------------- /examples/autokuma/.env: -------------------------------------------------------------------------------- 1 | # Copy to .env and fill in your values 2 | DOMAIN=example.com 3 | UPTIME_KUMA_USERNAME=admin 4 | UPTIME_KUMA_PASSWORD=your-uptime-kuma-password 5 | -------------------------------------------------------------------------------- /examples/traefik/.env: -------------------------------------------------------------------------------- 1 | # Copy to .env and fill in your values 2 | DOMAIN=example.com 3 | ACME_EMAIL=you@example.com 4 | CF_API_EMAIL=you@example.com 5 | CF_API_KEY=your-cloudflare-api-key 6 | -------------------------------------------------------------------------------- /examples/compose-farm-state.yaml: -------------------------------------------------------------------------------- 1 | deployed: 2 | autokuma: 3 | - primary 4 | - secondary 5 | - local 6 | mealie: secondary 7 | paperless-ngx: primary 8 | traefik: primary 9 | uptime-kuma: secondary 10 | -------------------------------------------------------------------------------- /src/compose_farm/console.py: -------------------------------------------------------------------------------- 1 | """Shared console instances for consistent output styling.""" 2 | 3 | from rich.console import Console 4 | 5 | console = Console(highlight=False) 6 | err_console = Console(stderr=True, highlight=False) 7 | -------------------------------------------------------------------------------- /.github/workflows/toc.yaml: -------------------------------------------------------------------------------- 1 | on: push 2 | name: TOC Generator 3 | jobs: 4 | generateTOC: 5 | name: TOC Generator 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: technote-space/toc-generator@v4 9 | with: 10 | TOC_TITLE: "" 11 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | update_release_draft: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: release-drafter/release-drafter@v6 13 | env: 14 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 15 | -------------------------------------------------------------------------------- /src/compose_farm/__init__.py: -------------------------------------------------------------------------------- 1 | """Compose Farm - run docker compose commands across multiple hosts.""" 2 | 3 | try: 4 | from compose_farm._version import __version__, __version_tuple__ 5 | except ImportError: 6 | __version__ = "0.0.0" 7 | __version_tuple__ = (0, 0, 0) 8 | 9 | __all__ = ["__version__", "__version_tuple__"] 10 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | FROM ghcr.io/astral-sh/uv:python3.14-alpine 3 | 4 | # Install SSH client (required for remote host connections) 5 | RUN apk add --no-cache openssh-client 6 | 7 | # Install compose-farm from PyPI 8 | ARG VERSION 9 | RUN uv tool install compose-farm${VERSION:+==$VERSION} 10 | 11 | # Add uv tool bin to PATH 12 | ENV PATH="/root/.local/bin:$PATH" 13 | 14 | # Default entrypoint 15 | ENTRYPOINT ["cf"] 16 | CMD ["--help"] 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | cf: 3 | image: ghcr.io/basnijholt/compose-farm:latest 4 | volumes: 5 | - ${SSH_AUTH_SOCK}:/ssh-agent:ro 6 | # Compose directory (contains compose files AND compose-farm.yaml config) 7 | - ${CF_COMPOSE_DIR:-/opt/compose}:${CF_COMPOSE_DIR:-/opt/compose} 8 | environment: 9 | - SSH_AUTH_SOCK=/ssh-agent 10 | # Config file path (state stored alongside it) 11 | - CF_CONFIG=${CF_COMPOSE_DIR:-/opt/compose}/compose-farm.yaml 12 | -------------------------------------------------------------------------------- /src/compose_farm/cli/__init__.py: -------------------------------------------------------------------------------- 1 | """CLI interface using Typer.""" 2 | 3 | from __future__ import annotations 4 | 5 | # Import command modules to trigger registration via @app.command() decorators 6 | from compose_farm.cli import ( 7 | config, # noqa: F401 8 | lifecycle, # noqa: F401 9 | management, # noqa: F401 10 | monitoring, # noqa: F401 11 | ) 12 | 13 | # Import the shared app instance 14 | from compose_farm.cli.app import app 15 | 16 | __all__ = ["app"] 17 | 18 | if __name__ == "__main__": 19 | app() 20 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-latest 10 | environment: 11 | name: pypi 12 | url: https://pypi.org/p/${{ github.repository }} 13 | permissions: 14 | id-token: write 15 | steps: 16 | - uses: actions/checkout@v6 17 | - name: Install uv 18 | uses: astral-sh/setup-uv@v7 19 | - name: Build 20 | run: uv build 21 | - name: Publish package distributions to PyPI 22 | uses: pypa/gh-action-pypi-publish@release/v1 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | src/compose_farm/_version.py 4 | *.py[cod] 5 | *$py.class 6 | *.so 7 | .Python 8 | build/ 9 | develop-eggs/ 10 | dist/ 11 | downloads/ 12 | eggs/ 13 | .eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | wheels/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | 24 | # Virtual environments 25 | .venv/ 26 | venv/ 27 | ENV/ 28 | 29 | # IDE 30 | .idea/ 31 | .vscode/ 32 | *.swp 33 | *.swo 34 | .DS_Store 35 | 36 | # Testing 37 | .coverage 38 | .pytest_cache/ 39 | htmlcov/ 40 | 41 | # Local config (don't commit real configs) 42 | compose-farm.yaml 43 | !examples/compose-farm.yaml 44 | coverage.xml 45 | -------------------------------------------------------------------------------- /src/compose_farm/paths.py: -------------------------------------------------------------------------------- 1 | """Path utilities - lightweight module with no heavy dependencies.""" 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | from pathlib import Path 7 | 8 | 9 | def xdg_config_home() -> Path: 10 | """Get XDG config directory, respecting XDG_CONFIG_HOME env var.""" 11 | return Path(os.environ.get("XDG_CONFIG_HOME", Path.home() / ".config")) 12 | 13 | 14 | def default_config_path() -> Path: 15 | """Get the default user config path.""" 16 | return xdg_config_home() / "compose-farm" / "compose-farm.yaml" 17 | 18 | 19 | def config_search_paths() -> list[Path]: 20 | """Get search paths for config files.""" 21 | return [Path("compose-farm.yaml"), default_config_path()] 22 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-yaml 8 | - id: check-added-large-files 9 | - id: check-merge-conflict 10 | - id: debug-statements 11 | 12 | - repo: https://github.com/astral-sh/ruff-pre-commit 13 | rev: v0.14.9 14 | hooks: 15 | - id: ruff 16 | args: [--fix] 17 | - id: ruff-format 18 | 19 | - repo: https://github.com/pre-commit/mirrors-mypy 20 | rev: v1.14.0 21 | hooks: 22 | - id: mypy 23 | additional_dependencies: 24 | - pydantic>=2.0.0 25 | - typer>=0.9.0 26 | - asyncssh>=2.14.0 27 | - types-PyYAML 28 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "rebaseWhen": "behind-base-branch", 4 | "dependencyDashboard": true, 5 | "labels": [ 6 | "dependencies", 7 | "no-stale" 8 | ], 9 | "commitMessagePrefix": "⬆️", 10 | "commitMessageTopic": "{{depName}}", 11 | "prBodyDefinitions": { 12 | "Release": "yes" 13 | }, 14 | "packageRules": [ 15 | { 16 | "matchManagers": [ 17 | "github-actions" 18 | ], 19 | "addLabels": [ 20 | "github_actions" 21 | ], 22 | "rangeStrategy": "pin" 23 | }, 24 | { 25 | "matchManagers": [ 26 | "github-actions" 27 | ], 28 | "matchUpdateTypes": [ 29 | "minor", 30 | "patch" 31 | ], 32 | "automerge": true 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /.github/workflows/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "rebaseWhen": "behind-base-branch", 4 | "dependencyDashboard": true, 5 | "labels": [ 6 | "dependencies", 7 | "no-stale" 8 | ], 9 | "commitMessagePrefix": "⬆️", 10 | "commitMessageTopic": "{{depName}}", 11 | "prBodyDefinitions": { 12 | "Release": "yes" 13 | }, 14 | "packageRules": [ 15 | { 16 | "matchManagers": [ 17 | "github-actions" 18 | ], 19 | "addLabels": [ 20 | "github_actions" 21 | ], 22 | "rangeStrategy": "pin" 23 | }, 24 | { 25 | "matchManagers": [ 26 | "github-actions" 27 | ], 28 | "matchUpdateTypes": [ 29 | "minor", 30 | "patch" 31 | ], 32 | "automerge": true 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /compose-farm.example.yaml: -------------------------------------------------------------------------------- 1 | # Example Compose Farm configuration 2 | # Copy to ~/.config/compose-farm/compose-farm.yaml or ./compose-farm.yaml 3 | 4 | compose_dir: /opt/compose 5 | 6 | # Optional: Auto-regenerate Traefik file-provider config after up/down/restart/update 7 | traefik_file: /opt/traefik/dynamic.d/compose-farm.yml 8 | traefik_service: traefik # Skip services on same host (docker provider handles them) 9 | 10 | hosts: 11 | # Full form with all options 12 | server-1: 13 | address: 192.168.1.10 14 | user: docker 15 | port: 22 16 | 17 | # Short form (just address, user defaults to current user) 18 | server-2: 192.168.1.11 19 | 20 | # Local execution (no SSH) 21 | local: localhost 22 | 23 | services: 24 | # Map service names to hosts 25 | # Compose file expected at: {compose_dir}/{service}/compose.yaml 26 | traefik: server-1 # Traefik runs here 27 | plex: server-2 # Services on other hosts get file-provider entries 28 | jellyfin: server-2 29 | sonarr: server-1 30 | radarr: local 31 | -------------------------------------------------------------------------------- /src/compose_farm/cli/app.py: -------------------------------------------------------------------------------- 1 | """Shared Typer app instance.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import Annotated 6 | 7 | import typer 8 | 9 | from compose_farm import __version__ 10 | 11 | __all__ = ["app"] 12 | 13 | 14 | def _version_callback(value: bool) -> None: 15 | """Print version and exit.""" 16 | if value: 17 | typer.echo(f"compose-farm {__version__}") 18 | raise typer.Exit 19 | 20 | 21 | app = typer.Typer( 22 | name="compose-farm", 23 | help="Compose Farm - run docker compose commands across multiple hosts", 24 | no_args_is_help=True, 25 | context_settings={"help_option_names": ["-h", "--help"]}, 26 | ) 27 | 28 | 29 | @app.callback() 30 | def main( 31 | version: Annotated[ 32 | bool, 33 | typer.Option( 34 | "--version", 35 | "-v", 36 | help="Show version and exit", 37 | callback=_version_callback, 38 | is_eager=True, 39 | ), 40 | ] = False, 41 | ) -> None: 42 | """Compose Farm - run docker compose commands across multiple hosts.""" 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Bas Nijholt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/traefik/dynamic.d/compose-farm.yml: -------------------------------------------------------------------------------- 1 | # Auto-generated by compose-farm 2 | # https://github.com/basnijholt/compose-farm 3 | # 4 | # This file routes traffic to services running on hosts other than Traefik's host. 5 | # Services on Traefik's host use the Docker provider directly. 6 | # 7 | # Regenerate with: compose-farm traefik-file --all -o 8 | # Or configure traefik_file in compose-farm.yaml for automatic updates. 9 | 10 | http: 11 | routers: 12 | mealie: 13 | rule: Host(`mealie.example.com`) 14 | entrypoints: 15 | - websecure 16 | service: mealie 17 | mealie-local: 18 | rule: Host(`mealie.local`) 19 | entrypoints: 20 | - web 21 | service: mealie 22 | uptime: 23 | rule: Host(`uptime.example.com`) 24 | entrypoints: 25 | - websecure 26 | service: uptime 27 | uptime-local: 28 | rule: Host(`uptime.local`) 29 | entrypoints: 30 | - web 31 | service: uptime 32 | services: 33 | mealie: 34 | loadbalancer: 35 | servers: 36 | - url: http://192.168.1.11:9925 37 | uptime: 38 | loadbalancer: 39 | servers: 40 | - url: http://192.168.1.11:3001 41 | -------------------------------------------------------------------------------- /examples/autokuma/compose.yaml: -------------------------------------------------------------------------------- 1 | # AutoKuma - Automatic Uptime Kuma monitor creation from Docker labels 2 | # Demonstrates: Multi-host service (runs on ALL hosts) 3 | # 4 | # This service monitors Docker containers on each host and automatically 5 | # creates Uptime Kuma monitors based on container labels. 6 | # 7 | # In compose-farm.yaml, configure as: 8 | # autokuma: all 9 | # 10 | # This runs the same container on every host, so each host's local 11 | # Docker socket is monitored. 12 | name: autokuma 13 | services: 14 | autokuma: 15 | image: ghcr.io/bigboot/autokuma:latest 16 | container_name: autokuma 17 | restart: unless-stopped 18 | environment: 19 | # Connect to your Uptime Kuma instance 20 | AUTOKUMA__KUMA__URL: https://uptime.${DOMAIN} 21 | AUTOKUMA__KUMA__USERNAME: ${UPTIME_KUMA_USERNAME} 22 | AUTOKUMA__KUMA__PASSWORD: ${UPTIME_KUMA_PASSWORD} 23 | # Tag for auto-created monitors 24 | AUTOKUMA__TAG__NAME: autokuma 25 | AUTOKUMA__TAG__COLOR: "#10B981" 26 | volumes: 27 | # Access local Docker socket to discover containers 28 | - /var/run/docker.sock:/var/run/docker.sock:ro 29 | # Custom DNS for resolving internal domains 30 | dns: 31 | - 192.168.1.1 # Your local DNS server 32 | -------------------------------------------------------------------------------- /examples/compose-farm.yaml: -------------------------------------------------------------------------------- 1 | # Example Compose Farm configuration 2 | # Demonstrates a multi-host setup with NFS shared storage 3 | # 4 | # To test locally: Update the host addresses and run from the examples directory 5 | 6 | compose_dir: /opt/stacks/compose-farm/examples 7 | 8 | # Auto-regenerate Traefik file-provider config after up/down/restart/update 9 | traefik_file: /opt/stacks/compose-farm/examples/traefik/dynamic.d/compose-farm.yml 10 | traefik_service: traefik # Skip Traefik's host in file-provider (docker provider handles it) 11 | 12 | hosts: 13 | # Primary server - runs Traefik and most services 14 | # Full form with all options 15 | primary: 16 | address: 192.168.1.10 17 | user: deploy 18 | port: 22 19 | 20 | # Secondary server - runs some services for load distribution 21 | # Short form (user defaults to current user, port defaults to 22) 22 | secondary: 192.168.1.11 23 | 24 | # Local execution (no SSH) - for testing or when running on the host itself 25 | local: localhost 26 | 27 | services: 28 | # Infrastructure (runs on primary where Traefik is) 29 | traefik: primary 30 | 31 | # Multi-host services (runs on ALL hosts) 32 | # AutoKuma monitors Docker containers on each host 33 | autokuma: all 34 | 35 | # Primary server services 36 | paperless-ngx: primary 37 | 38 | # Secondary server services (distributed for performance) 39 | mealie: secondary 40 | uptime-kuma: secondary 41 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | test: 11 | runs-on: ${{ matrix.os }} 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | os: [ubuntu-latest, macos-latest, windows-latest] 16 | python-version: ["3.11", "3.12", "3.13"] 17 | 18 | steps: 19 | - uses: actions/checkout@v6 20 | 21 | - name: Install uv 22 | uses: astral-sh/setup-uv@v7 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | run: uv python install ${{ matrix.python-version }} 26 | 27 | - name: Install dependencies 28 | run: uv sync --all-extras --dev 29 | 30 | - name: Run tests 31 | run: uv run pytest 32 | 33 | - name: Upload coverage reports to Codecov 34 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.13' 35 | uses: codecov/codecov-action@v5 36 | env: 37 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 38 | 39 | lint: 40 | runs-on: ubuntu-latest 41 | steps: 42 | - uses: actions/checkout@v6 43 | 44 | - name: Install uv 45 | uses: astral-sh/setup-uv@v7 46 | 47 | - name: Set up Python 48 | run: uv python install 3.12 49 | 50 | - name: Install dependencies 51 | run: uv sync --all-extras --dev 52 | 53 | - name: Run pre-commit (via prek) 54 | uses: j178/prek-action@v1 55 | -------------------------------------------------------------------------------- /examples/uptime-kuma/compose.yaml: -------------------------------------------------------------------------------- 1 | # Uptime Kuma - Monitoring dashboard 2 | # 3 | # Demonstrates: 4 | # - HTTPS route: uptime.${DOMAIN} (e.g., uptime.example.com) with Let's Encrypt 5 | # - HTTP route: uptime.local for LAN access without TLS 6 | # - Docker socket access, user mapping for NFS, custom DNS 7 | name: uptime-kuma 8 | services: 9 | uptime-kuma: 10 | image: louislam/uptime-kuma:2 11 | container_name: uptime-kuma 12 | restart: unless-stopped 13 | # Run as non-root user (important for NFS volumes) 14 | user: "1000:1000" 15 | networks: 16 | - mynetwork 17 | ports: 18 | - "3001:3001" 19 | volumes: 20 | - /var/run/docker.sock:/var/run/docker.sock:ro 21 | - /mnt/data/uptime-kuma:/app/data 22 | environment: 23 | PUID: 1000 24 | PGID: 1000 25 | # Custom DNS for internal domain resolution 26 | dns: 27 | - 192.168.1.1 # Your local DNS server 28 | labels: 29 | # HTTPS route: uptime.example.com (requires DOMAIN in .env) 30 | - traefik.enable=true 31 | - traefik.http.routers.uptime.rule=Host(`uptime.${DOMAIN}`) 32 | - traefik.http.routers.uptime.entrypoints=websecure 33 | - traefik.http.services.uptime.loadbalancer.server.port=3001 34 | # HTTP route: uptime.local (for LAN access, no TLS) 35 | - traefik.http.routers.uptime-local.rule=Host(`uptime.local`) 36 | - traefik.http.routers.uptime-local.entrypoints=web 37 | # AutoKuma: automatically create Uptime Kuma monitor 38 | - kuma.uptime.http.name=Uptime Kuma 39 | - kuma.uptime.http.url=https://uptime.${DOMAIN} 40 | 41 | networks: 42 | mynetwork: 43 | external: true 44 | -------------------------------------------------------------------------------- /examples/mealie/compose.yaml: -------------------------------------------------------------------------------- 1 | # Mealie - Recipe manager 2 | # Simple single-container service with Traefik labels 3 | # 4 | # Demonstrates: 5 | # - HTTPS route: mealie.${DOMAIN} (e.g., mealie.example.com) with Let's Encrypt 6 | # - HTTP route: mealie.local for LAN access without TLS 7 | # - External network, resource limits, environment variables 8 | name: mealie 9 | services: 10 | mealie: 11 | image: ghcr.io/mealie-recipes/mealie:latest 12 | container_name: mealie 13 | restart: unless-stopped 14 | networks: 15 | - mynetwork 16 | ports: 17 | - "9925:9000" 18 | deploy: 19 | resources: 20 | limits: 21 | memory: 1000M 22 | volumes: 23 | - /mnt/data/mealie:/app/data 24 | environment: 25 | ALLOW_SIGNUP: "false" 26 | PUID: 1000 27 | PGID: 1000 28 | TZ: America/Los_Angeles 29 | MAX_WORKERS: 1 30 | WEB_CONCURRENCY: 1 31 | BASE_URL: https://mealie.${DOMAIN} 32 | labels: 33 | # HTTPS route: mealie.example.com (requires DOMAIN in .env) 34 | - traefik.enable=true 35 | - traefik.http.routers.mealie.rule=Host(`mealie.${DOMAIN}`) 36 | - traefik.http.routers.mealie.entrypoints=websecure 37 | - traefik.http.services.mealie.loadbalancer.server.port=9000 38 | # HTTP route: mealie.local (for LAN access, no TLS) 39 | - traefik.http.routers.mealie-local.rule=Host(`mealie.local`) 40 | - traefik.http.routers.mealie-local.entrypoints=web 41 | # AutoKuma: automatically create Uptime Kuma monitor 42 | - kuma.mealie.http.name=Mealie 43 | - kuma.mealie.http.url=https://mealie.${DOMAIN} 44 | 45 | networks: 46 | mynetwork: 47 | external: true 48 | -------------------------------------------------------------------------------- /.github/workflows/update-readme.yml: -------------------------------------------------------------------------------- 1 | name: Update README.md 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | update_readme: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Check out repository 14 | uses: actions/checkout@v6 15 | with: 16 | persist-credentials: false 17 | fetch-depth: 0 18 | 19 | - name: Set up Python 20 | uses: actions/setup-python@v6 21 | 22 | - name: Install uv 23 | uses: astral-sh/setup-uv@v7 24 | 25 | - name: Run markdown-code-runner 26 | env: 27 | TERM: dumb 28 | NO_COLOR: 1 29 | TERMINAL_WIDTH: 90 30 | run: | 31 | uvx --with . markdown-code-runner README.md 32 | sed -i 's/[[:space:]]*$//' README.md 33 | 34 | - name: Commit updated README.md 35 | id: commit 36 | run: | 37 | git add README.md 38 | git config --local user.email "github-actions[bot]@users.noreply.github.com" 39 | git config --local user.name "github-actions[bot]" 40 | if git diff --quiet && git diff --staged --quiet; then 41 | echo "No changes in README.md, skipping commit." 42 | echo "commit_status=skipped" >> $GITHUB_ENV 43 | else 44 | git commit -m "Update README.md" 45 | echo "commit_status=committed" >> $GITHUB_ENV 46 | fi 47 | 48 | - name: Push changes 49 | if: env.commit_status == 'committed' 50 | uses: ad-m/github-push-action@master 51 | with: 52 | github_token: ${{ secrets.GITHUB_TOKEN }} 53 | branch: ${{ github.head_ref || github.ref_name }} 54 | -------------------------------------------------------------------------------- /examples/paperless-ngx/compose.yaml: -------------------------------------------------------------------------------- 1 | # Paperless-ngx - Document management system 2 | # 3 | # Demonstrates: 4 | # - HTTPS route: paperless.${DOMAIN} (e.g., paperless.example.com) with Let's Encrypt 5 | # - HTTP route: paperless.local for LAN access without TLS 6 | # - Multi-container stack (Redis + App with SQLite) 7 | # 8 | # NOTE: This example uses SQLite (the default) instead of PostgreSQL. 9 | # PostgreSQL should NOT be used with NFS storage due to fsync/locking issues. 10 | # If you need PostgreSQL, use local volumes for the database. 11 | name: paperless-ngx 12 | services: 13 | redis: 14 | image: redis:8 15 | container_name: paperless-redis 16 | restart: unless-stopped 17 | networks: 18 | - mynetwork 19 | volumes: 20 | - /mnt/data/paperless/redis:/data 21 | 22 | paperless: 23 | image: ghcr.io/paperless-ngx/paperless-ngx:latest 24 | container_name: paperless 25 | restart: unless-stopped 26 | depends_on: 27 | - redis 28 | networks: 29 | - mynetwork 30 | ports: 31 | - "8000:8000" 32 | volumes: 33 | # SQLite database stored here (safe on NFS for single-writer) 34 | - /mnt/data/paperless/data:/usr/src/paperless/data 35 | - /mnt/data/paperless/media:/usr/src/paperless/media 36 | - /mnt/data/paperless/export:/usr/src/paperless/export 37 | - /mnt/data/paperless/consume:/usr/src/paperless/consume 38 | environment: 39 | PAPERLESS_REDIS: redis://redis:6379 40 | PAPERLESS_URL: https://paperless.${DOMAIN} 41 | PAPERLESS_SECRET_KEY: ${PAPERLESS_SECRET_KEY} 42 | USERMAP_UID: 1000 43 | USERMAP_GID: 1000 44 | labels: 45 | # HTTPS route: paperless.example.com (requires DOMAIN in .env) 46 | - traefik.enable=true 47 | - traefik.http.routers.paperless.rule=Host(`paperless.${DOMAIN}`) 48 | - traefik.http.routers.paperless.entrypoints=websecure 49 | - traefik.http.services.paperless.loadbalancer.server.port=8000 50 | - traefik.docker.network=mynetwork 51 | # HTTP route: paperless.local (for LAN access, no TLS) 52 | - traefik.http.routers.paperless-local.rule=Host(`paperless.local`) 53 | - traefik.http.routers.paperless-local.entrypoints=web 54 | # AutoKuma: automatically create Uptime Kuma monitor 55 | - kuma.paperless.http.name=Paperless 56 | - kuma.paperless.http.url=https://paperless.${DOMAIN} 57 | 58 | networks: 59 | mynetwork: 60 | external: true 61 | -------------------------------------------------------------------------------- /examples/traefik/compose.yaml: -------------------------------------------------------------------------------- 1 | # Traefik reverse proxy with Let's Encrypt and file-provider support 2 | # This is the foundation service - other services route through it 3 | # 4 | # Entrypoints: 5 | # - web (port 80): HTTP for .local domains (no TLS needed on LAN) 6 | # - websecure (port 443): HTTPS with Let's Encrypt for custom domains 7 | name: traefik 8 | services: 9 | traefik: 10 | image: traefik:v3.2 11 | container_name: traefik 12 | command: 13 | - --api.dashboard=true 14 | - --providers.docker=true 15 | - --providers.docker.exposedbydefault=false 16 | - --providers.docker.network=mynetwork 17 | # File provider for routing to services on other hosts 18 | - --providers.file.directory=/dynamic.d 19 | - --providers.file.watch=true 20 | # HTTP entrypoint for .local domains (LAN access, no TLS) 21 | - --entrypoints.web.address=:80 22 | # HTTPS entrypoint for custom domains (with Let's Encrypt TLS) 23 | - --entrypoints.websecure.address=:443 24 | - --entrypoints.websecure.asDefault=true 25 | - --entrypoints.websecure.http.tls.certresolver=letsencrypt 26 | # Let's Encrypt DNS challenge (using Cloudflare as example) 27 | - --certificatesresolvers.letsencrypt.acme.email=${ACME_EMAIL} 28 | - --certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json 29 | - --certificatesresolvers.letsencrypt.acme.dnschallenge.provider=cloudflare 30 | - --certificatesresolvers.letsencrypt.acme.dnschallenge.resolvers=1.1.1.1:53 31 | environment: 32 | # Cloudflare API token for DNS challenge 33 | CF_API_EMAIL: ${CF_API_EMAIL} 34 | CF_API_KEY: ${CF_API_KEY} 35 | restart: unless-stopped 36 | ports: 37 | - "80:80" 38 | - "443:443" 39 | - "8080:8080" # Dashboard 40 | volumes: 41 | - /var/run/docker.sock:/var/run/docker.sock:ro 42 | - /mnt/data/traefik/letsencrypt:/letsencrypt 43 | - ./dynamic.d:/dynamic.d:ro 44 | networks: 45 | - mynetwork 46 | labels: 47 | - traefik.enable=true 48 | # Dashboard accessible at traefik.yourdomain.com 49 | - traefik.http.routers.traefik.rule=Host(`traefik.${DOMAIN}`) 50 | - traefik.http.routers.traefik.entrypoints=websecure 51 | - traefik.http.routers.traefik.service=api@internal 52 | # AutoKuma: automatically create Uptime Kuma monitor 53 | - kuma.traefik.http.name=Traefik 54 | - kuma.traefik.http.url=https://traefik.${DOMAIN} 55 | 56 | networks: 57 | mynetwork: 58 | external: true 59 | -------------------------------------------------------------------------------- /reddit-post.md: -------------------------------------------------------------------------------- 1 | # Title options 2 | 3 | - Multi-host Docker Compose without Kubernetes or file changes 4 | - I built a CLI to run Docker Compose across hosts. Zero changes to your files. 5 | 6 | --- 7 | 8 | # I made a CLI to run Docker Compose across multiple hosts without Kubernetes or Swarm 9 | 10 | I've been running 100+ Docker Compose stacks on a single machine, and it kept running out of memory. I needed to spread services across multiple hosts, but: 11 | 12 | - **Kubernetes** felt like overkill. I don't need pods, ingress controllers, or 10x more YAML. 13 | - **Docker Swarm** is basically in maintenance mode. 14 | - Both require rewriting my compose files. 15 | 16 | So I built **Compose Farm**, a simple CLI that runs `docker compose` commands over SSH. No agents, no cluster setup, no changes to your existing compose files. 17 | 18 | ## How it works 19 | 20 | One YAML file maps services to hosts: 21 | 22 | ```yaml 23 | compose_dir: /opt/stacks 24 | 25 | hosts: 26 | nuc: 192.168.1.10 27 | hp: 192.168.1.11 28 | 29 | services: 30 | plex: nuc 31 | jellyfin: hp 32 | sonarr: nuc 33 | radarr: nuc 34 | ``` 35 | 36 | Then just: 37 | 38 | ```bash 39 | cf up plex # runs on nuc via SSH 40 | cf up --all # starts everything on their assigned hosts 41 | cf logs -f plex # streams logs 42 | cf ps # shows status across all hosts 43 | ``` 44 | 45 | ## Auto-migration 46 | 47 | Change a service's host in the config and run `cf up`. It stops the service on the old host and starts it on the new one. No manual SSH needed. 48 | 49 | ```yaml 50 | # Before 51 | plex: nuc 52 | 53 | # After (just change this) 54 | plex: hp 55 | ``` 56 | 57 | ```bash 58 | cf up plex # migrates automatically 59 | ``` 60 | 61 | ## Requirements 62 | 63 | - SSH key auth to your hosts 64 | - Same paths on all hosts (I use NFS from my NAS) 65 | - That's it. No agents, no daemons. 66 | 67 | ## What it doesn't do 68 | 69 | - No high availability (if a host goes down, services don't auto-migrate) 70 | - No overlay networking (containers on different hosts can't talk via Docker DNS) 71 | - No service discovery 72 | - No health checks or automatic restarts 73 | 74 | It's a convenience wrapper around `docker compose` + SSH. If you need failover or cross-host container networking, you probably do need Swarm or Kubernetes. 75 | 76 | ## Links 77 | 78 | - GitHub: https://github.com/basnijholt/compose-farm 79 | - Install: `uv tool install compose-farm` or `pip install compose-farm` 80 | 81 | Built this in 4 days because I was mass-SSHing into machines like a caveman. Happy to answer questions or take feedback! 82 | -------------------------------------------------------------------------------- /docs/truenas-nfs-root-squash.md: -------------------------------------------------------------------------------- 1 | # TrueNAS NFS: Disabling Root Squash 2 | 3 | When running Docker containers on NFS-mounted storage, containers that run as root will fail to write files unless root squash is disabled. This document explains the problem and solution. 4 | 5 | ## The Problem 6 | 7 | By default, NFS uses "root squash" which maps the root user (UID 0) on clients to `nobody` on the server. This is a security feature to prevent remote root users from having root access to the NFS server's files. 8 | 9 | However, many Docker containers run as root internally. When these containers try to write to NFS-mounted volumes, the writes fail with "Permission denied" because the NFS server sees them as `nobody`, not `root`. 10 | 11 | Example error in container logs: 12 | ``` 13 | System.UnauthorizedAccessException: Access to the path '/data' is denied. 14 | Error: EACCES: permission denied, mkdir '/app/data' 15 | ``` 16 | 17 | ## The Solution 18 | 19 | In TrueNAS, configure the NFS share to map remote root to local root: 20 | 21 | ### TrueNAS SCALE UI 22 | 23 | 1. Go to **Shares → NFS** 24 | 2. Edit your share 25 | 3. Under **Advanced Options**: 26 | - **Maproot User**: `root` 27 | - **Maproot Group**: `wheel` 28 | 4. Save 29 | 30 | ### Result in /etc/exports 31 | 32 | ``` 33 | "/mnt/pool/data"\ 34 | 192.168.1.25(sec=sys,rw,no_root_squash,no_subtree_check)\ 35 | 192.168.1.26(sec=sys,rw,no_root_squash,no_subtree_check) 36 | ``` 37 | 38 | The `no_root_squash` option means remote root is treated as root on the server. 39 | 40 | ## Why `wheel`? 41 | 42 | On FreeBSD/TrueNAS, the root user's primary group is `wheel` (GID 0), not `root` like on Linux. So `root:wheel` = `0:0`. 43 | 44 | ## Security Considerations 45 | 46 | Disabling root squash means any machine that can mount the NFS share has full root access to those files. This is acceptable when: 47 | 48 | - The NFS clients are on a trusted private network 49 | - Only known hosts (by IP) are allowed to mount the share 50 | - The data isn't security-critical 51 | 52 | For home lab setups with Docker containers, this is typically fine. 53 | 54 | ## Alternative: Run Containers as Non-Root 55 | 56 | If you prefer to keep root squash enabled, you can run containers as a non-root user: 57 | 58 | 1. **LinuxServer.io images**: Set `PUID=1000` and `PGID=1000` environment variables 59 | 2. **Other images**: Add `user: "1000:1000"` to the compose service 60 | 61 | However, not all containers support running as non-root (they may need to bind to privileged ports, create system directories, etc.). 62 | 63 | ## Tested On 64 | 65 | - TrueNAS SCALE 24.10 66 | -------------------------------------------------------------------------------- /docs/dev/docker-swarm-network.md: -------------------------------------------------------------------------------- 1 | # Docker Swarm Overlay Networks with Compose Farm 2 | 3 | Notes from testing Docker Swarm's attachable overlay networks as a way to get cross-host container networking while still using `docker compose`. 4 | 5 | ## The Idea 6 | 7 | Docker Swarm overlay networks can be made "attachable", allowing regular `docker compose` containers (not just swarm services) to join them. This would give us: 8 | 9 | - Cross-host Docker DNS (containers find each other by name) 10 | - No need to publish ports for inter-container communication 11 | - Keep using `docker compose up` instead of `docker stack deploy` 12 | 13 | ## Setup Steps 14 | 15 | ```bash 16 | # On manager node 17 | docker swarm init --advertise-addr 18 | 19 | # On worker nodes (use token from init output) 20 | docker swarm join --token :2377 21 | 22 | # Create attachable overlay network (on manager) 23 | docker network create --driver overlay --attachable my-network 24 | 25 | # In compose files, add the network 26 | networks: 27 | my-network: 28 | external: true 29 | ``` 30 | 31 | ## Required Ports 32 | 33 | Docker Swarm requires these ports open **bidirectionally** between all nodes: 34 | 35 | | Port | Protocol | Purpose | 36 | |------|----------|---------| 37 | | 2377 | TCP | Cluster management | 38 | | 7946 | TCP + UDP | Node communication | 39 | | 4789 | UDP | Overlay network traffic (VXLAN) | 40 | 41 | ## Test Results (2024-12-13) 42 | 43 | - docker-debian (192.168.1.66) as manager 44 | - dev-lxc (192.168.1.167) as worker 45 | 46 | ### What worked 47 | 48 | - Swarm init and join 49 | - Overlay network creation 50 | - Nodes showed as Ready 51 | 52 | ### What failed 53 | 54 | - Container on dev-lxc couldn't attach to overlay network 55 | - Error: `attaching to network failed... context deadline exceeded` 56 | - Cause: Port 7946 blocked from docker-debian → dev-lxc 57 | 58 | ### Root cause 59 | 60 | Firewall on dev-lxc wasn't configured to allow swarm ports. Opening these ports requires sudo access on each node. 61 | 62 | ## Conclusion 63 | 64 | Docker Swarm overlay networks are **not plug-and-play**. Requirements: 65 | 66 | 1. Swarm init/join on all nodes 67 | 2. Firewall rules on all nodes (needs sudo/root) 68 | 3. All nodes must have bidirectional connectivity on 3 ports 69 | 70 | For a simpler alternative, consider: 71 | 72 | - **Tailscale**: VPN mesh, containers use host's Tailscale IP 73 | - **Host networking + published ports**: What compose-farm does today 74 | - **Keep dependent services together**: Avoid cross-host networking entirely 75 | 76 | ## Future Work 77 | 78 | If we decide to support overlay networks: 79 | 80 | 1. Add a `compose-farm network create` command that: 81 | - Initializes swarm if needed 82 | - Creates attachable overlay network 83 | - Documents required firewall rules 84 | 85 | 2. Add network config to compose-farm.yaml: 86 | ```yaml 87 | overlay_network: compose-farm-net 88 | ``` 89 | 90 | 3. Auto-inject network into compose files (or document manual setup) 91 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push Docker Image 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Upload Python Package"] 6 | types: [completed] 7 | workflow_dispatch: 8 | inputs: 9 | version: 10 | description: 'Version to build (leave empty for latest)' 11 | required: false 12 | 13 | env: 14 | REGISTRY: ghcr.io 15 | IMAGE_NAME: ${{ github.repository }} 16 | 17 | jobs: 18 | build-and-push: 19 | runs-on: ubuntu-latest 20 | # Only run if PyPI upload succeeded (or manual dispatch) 21 | if: ${{ github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' }} 22 | permissions: 23 | contents: read 24 | packages: write 25 | 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@v6 29 | 30 | - name: Set up Docker Buildx 31 | uses: docker/setup-buildx-action@v3 32 | 33 | - name: Log in to Container Registry 34 | uses: docker/login-action@v3 35 | with: 36 | registry: ${{ env.REGISTRY }} 37 | username: ${{ github.actor }} 38 | password: ${{ secrets.GITHUB_TOKEN }} 39 | 40 | - name: Extract version 41 | id: version 42 | run: | 43 | if [ "${{ github.event_name }}" = "workflow_run" ]; then 44 | # Get version from the tag that triggered the release 45 | VERSION="${{ github.event.workflow_run.head_branch }}" 46 | # Strip 'v' prefix if present 47 | VERSION="${VERSION#v}" 48 | elif [ -n "${{ github.event.inputs.version }}" ]; then 49 | VERSION="${{ github.event.inputs.version }}" 50 | else 51 | VERSION="" 52 | fi 53 | echo "version=$VERSION" >> $GITHUB_OUTPUT 54 | 55 | - name: Wait for PyPI 56 | if: steps.version.outputs.version != '' 57 | run: | 58 | VERSION="${{ steps.version.outputs.version }}" 59 | echo "Waiting for compose-farm==$VERSION on PyPI..." 60 | for i in {1..30}; do 61 | if curl -sf "https://pypi.org/pypi/compose-farm/$VERSION/json" > /dev/null; then 62 | echo "✓ Version $VERSION available on PyPI" 63 | exit 0 64 | fi 65 | echo "Attempt $i: not yet available, waiting 10s..." 66 | sleep 10 67 | done 68 | echo "✗ Timeout waiting for PyPI" 69 | exit 1 70 | 71 | - name: Extract metadata 72 | id: meta 73 | uses: docker/metadata-action@v5 74 | with: 75 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 76 | tags: | 77 | type=semver,pattern={{version}},value=v${{ steps.version.outputs.version }} 78 | type=semver,pattern={{major}}.{{minor}},value=v${{ steps.version.outputs.version }} 79 | type=semver,pattern={{major}},value=v${{ steps.version.outputs.version }} 80 | type=raw,value=latest 81 | 82 | - name: Build and push 83 | uses: docker/build-push-action@v6 84 | with: 85 | context: . 86 | push: true 87 | tags: ${{ steps.meta.outputs.tags }} 88 | labels: ${{ steps.meta.outputs.labels }} 89 | build-args: | 90 | VERSION=${{ steps.version.outputs.version }} 91 | cache-from: type=gha 92 | cache-to: type=gha,mode=max 93 | -------------------------------------------------------------------------------- /tests/test_logs.py: -------------------------------------------------------------------------------- 1 | """Tests for snapshot logging.""" 2 | 3 | import json 4 | import tomllib 5 | from datetime import UTC, datetime 6 | from pathlib import Path 7 | 8 | import pytest 9 | 10 | from compose_farm.config import Config, Host 11 | from compose_farm.executor import CommandResult 12 | from compose_farm.logs import ( 13 | _parse_images_output, 14 | collect_service_entries, 15 | isoformat, 16 | load_existing_entries, 17 | merge_entries, 18 | write_toml, 19 | ) 20 | 21 | 22 | def test_parse_images_output_handles_list_and_lines() -> None: 23 | data = [ 24 | {"Service": "svc", "Image": "redis", "Digest": "sha256:abc"}, 25 | {"Service": "svc", "Image": "db", "Digest": "sha256:def"}, 26 | ] 27 | as_array = _parse_images_output(json.dumps(data)) 28 | assert len(as_array) == 2 29 | 30 | as_lines = _parse_images_output("\n".join(json.dumps(item) for item in data)) 31 | assert len(as_lines) == 2 32 | 33 | 34 | @pytest.mark.asyncio 35 | async def test_snapshot_preserves_first_seen(tmp_path: Path) -> None: 36 | compose_dir = tmp_path / "compose" 37 | compose_dir.mkdir() 38 | service_dir = compose_dir / "svc" 39 | service_dir.mkdir() 40 | (service_dir / "docker-compose.yml").write_text("services: {}\n") 41 | 42 | config = Config( 43 | compose_dir=compose_dir, 44 | hosts={"local": Host(address="localhost")}, 45 | services={"svc": "local"}, 46 | ) 47 | 48 | sample_output = json.dumps([{"Service": "svc", "Image": "redis", "Digest": "sha256:abc"}]) 49 | 50 | async def fake_run_compose( 51 | _cfg: Config, service: str, compose_cmd: str, *, stream: bool = True 52 | ) -> CommandResult: 53 | assert compose_cmd == "images --format json" 54 | assert stream is False or stream is True 55 | return CommandResult( 56 | service=service, 57 | exit_code=0, 58 | success=True, 59 | stdout=sample_output, 60 | stderr="", 61 | ) 62 | 63 | log_path = tmp_path / "dockerfarm-log.toml" 64 | 65 | # First snapshot 66 | first_time = datetime(2025, 1, 1, tzinfo=UTC) 67 | first_entries = await collect_service_entries( 68 | config, "svc", now=first_time, run_compose_fn=fake_run_compose 69 | ) 70 | first_iso = isoformat(first_time) 71 | merged = merge_entries([], first_entries, now_iso=first_iso) 72 | meta = {"generated_at": first_iso, "compose_dir": str(config.compose_dir)} 73 | write_toml(log_path, meta=meta, entries=merged) 74 | 75 | after_first = tomllib.loads(log_path.read_text()) 76 | first_seen = after_first["entries"][0]["first_seen"] 77 | 78 | # Second snapshot 79 | second_time = datetime(2025, 2, 1, tzinfo=UTC) 80 | second_entries = await collect_service_entries( 81 | config, "svc", now=second_time, run_compose_fn=fake_run_compose 82 | ) 83 | second_iso = isoformat(second_time) 84 | existing = load_existing_entries(log_path) 85 | merged = merge_entries(existing, second_entries, now_iso=second_iso) 86 | meta = {"generated_at": second_iso, "compose_dir": str(config.compose_dir)} 87 | write_toml(log_path, meta=meta, entries=merged) 88 | 89 | after_second = tomllib.loads(log_path.read_text()) 90 | entry = after_second["entries"][0] 91 | assert entry["first_seen"] == first_seen 92 | assert entry["last_seen"].startswith("2025-02-01") 93 | -------------------------------------------------------------------------------- /src/compose_farm/example-config.yaml: -------------------------------------------------------------------------------- 1 | # Compose Farm configuration 2 | # Documentation: https://github.com/basnijholt/compose-farm 3 | # 4 | # This file configures compose-farm to manage Docker Compose services 5 | # across multiple hosts via SSH. 6 | # 7 | # Place this file at: 8 | # - ./compose-farm.yaml (current directory) 9 | # - ~/.config/compose-farm/compose-farm.yaml 10 | # - Or specify with: cf --config /path/to/config.yaml 11 | # - Or set CF_CONFIG environment variable 12 | 13 | # ------------------------------------------------------------------------------ 14 | # compose_dir: Directory containing service subdirectories with compose files 15 | # ------------------------------------------------------------------------------ 16 | # Each subdirectory should contain a compose.yaml (or docker-compose.yml). 17 | # This path must be the same on all hosts (NFS mount recommended). 18 | # 19 | compose_dir: /opt/compose 20 | 21 | # ------------------------------------------------------------------------------ 22 | # hosts: SSH connection details for each host 23 | # ------------------------------------------------------------------------------ 24 | # Simple form: 25 | # hostname: ip-or-fqdn 26 | # 27 | # Full form: 28 | # hostname: 29 | # address: ip-or-fqdn 30 | # user: ssh-username # default: current user 31 | # port: 22 # default: 22 32 | # 33 | # Note: "all" is a reserved keyword and cannot be used as a host name. 34 | # 35 | hosts: 36 | # Example: simple form (uses current user, port 22) 37 | server1: 192.168.1.10 38 | 39 | # Example: full form with explicit user 40 | server2: 41 | address: 192.168.1.20 42 | user: admin 43 | 44 | # Example: full form with custom port 45 | server3: 46 | address: 192.168.1.30 47 | user: root 48 | port: 2222 49 | 50 | # ------------------------------------------------------------------------------ 51 | # services: Map service names to their target host(s) 52 | # ------------------------------------------------------------------------------ 53 | # Each service name must match a subdirectory in compose_dir. 54 | # 55 | # Single host: 56 | # service-name: hostname 57 | # 58 | # Multiple hosts (explicit list): 59 | # service-name: [host1, host2] 60 | # 61 | # All hosts: 62 | # service-name: all 63 | # 64 | services: 65 | # Example: service runs on a single host 66 | nginx: server1 67 | postgres: server2 68 | 69 | # Example: service runs on multiple specific hosts 70 | # prometheus: [server1, server2] 71 | 72 | # Example: service runs on ALL hosts (e.g., monitoring agents) 73 | # node-exporter: all 74 | 75 | # ------------------------------------------------------------------------------ 76 | # traefik_file: (optional) Auto-generate Traefik file-provider config 77 | # ------------------------------------------------------------------------------ 78 | # When set, compose-farm automatically regenerates this file after 79 | # up/down/restart/update commands. Traefik watches this file for changes. 80 | # 81 | # traefik_file: /opt/compose/traefik/dynamic.d/compose-farm.yml 82 | 83 | # ------------------------------------------------------------------------------ 84 | # traefik_service: (optional) Service name running Traefik 85 | # ------------------------------------------------------------------------------ 86 | # When generating traefik_file, services on the same host as Traefik are 87 | # skipped (they're handled by Traefik's Docker provider directly). 88 | # 89 | # traefik_service: traefik 90 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # Compose Farm Development Guidelines 2 | 3 | ## Core Principles 4 | 5 | - **KISS**: Keep it simple. This is a thin wrapper around `docker compose` over SSH. 6 | - **YAGNI**: Don't add features until they're needed. No orchestration, no service discovery, no health checks. 7 | - **DRY**: Reuse patterns. Common CLI options are defined once, SSH logic is centralized. 8 | 9 | ## Architecture 10 | 11 | ``` 12 | compose_farm/ 13 | ├── cli/ # CLI subpackage 14 | │ ├── __init__.py # Imports modules to trigger command registration 15 | │ ├── app.py # Shared Typer app instance, version callback 16 | │ ├── common.py # Shared helpers, options, progress bar utilities 17 | │ ├── config.py # Config subcommand (init, show, path, validate, edit) 18 | │ ├── lifecycle.py # up, down, pull, restart, update, apply commands 19 | │ ├── management.py # refresh, check, init-network, traefik-file commands 20 | │ └── monitoring.py # logs, ps, stats commands 21 | ├── config.py # Pydantic models, YAML loading 22 | ├── compose.py # Compose file parsing (.env, ports, volumes, networks) 23 | ├── console.py # Shared Rich console instances 24 | ├── executor.py # SSH/local command execution, streaming output 25 | ├── operations.py # Business logic (up, migrate, discover, preflight checks) 26 | ├── state.py # Deployment state tracking (which service on which host) 27 | ├── logs.py # Image digest snapshots (dockerfarm-log.toml) 28 | └── traefik.py # Traefik file-provider config generation from labels 29 | ``` 30 | 31 | ## Key Design Decisions 32 | 33 | 1. **Hybrid SSH approach**: asyncssh for parallel streaming with prefixes; native `ssh -t` for raw mode (progress bars) 34 | 2. **Parallel by default**: Multiple services run concurrently via `asyncio.gather` 35 | 3. **Streaming output**: Real-time stdout/stderr with `[service]` prefix using Rich 36 | 4. **SSH key auth only**: Uses ssh-agent, no password handling (YAGNI) 37 | 5. **NFS assumption**: Compose files at same path on all hosts 38 | 6. **Local IP auto-detection**: Skips SSH when target host matches local machine's IP 39 | 7. **State tracking**: Tracks where services are deployed for auto-migration 40 | 8. **Pre-flight checks**: Verifies NFS mounts and Docker networks exist before starting/migrating 41 | 42 | ## Communication Notes 43 | 44 | - Clarify ambiguous wording (e.g., homophones like "right"/"write", "their"/"there"). 45 | 46 | ## Git Safety 47 | 48 | - Never amend commits. 49 | - **NEVER merge anything into main.** Always commit directly or use fast-forward/rebase. 50 | - Never force push. 51 | 52 | ## Commands Quick Reference 53 | 54 | CLI available as `cf` or `compose-farm`. 55 | 56 | | Command | Description | 57 | |---------|-------------| 58 | | `up` | Start services (`docker compose up -d`), auto-migrates if host changed | 59 | | `down` | Stop services (`docker compose down`). Use `--orphaned` to stop services removed from config | 60 | | `pull` | Pull latest images | 61 | | `restart` | `down` + `up -d` | 62 | | `update` | `pull` + `down` + `up -d` | 63 | | `apply` | Make reality match config: migrate services + stop orphans. Use `--dry-run` to preview | 64 | | `logs` | Show service logs | 65 | | `ps` | Show status of all services | 66 | | `stats` | Show overview (hosts, services, pending migrations; `--live` for container counts) | 67 | | `refresh` | Update state from reality: discover running services, capture image digests | 68 | | `check` | Validate config, traefik labels, mounts, networks; show host compatibility | 69 | | `init-network` | Create Docker network on hosts with consistent subnet/gateway | 70 | | `traefik-file` | Generate Traefik file-provider config from compose labels | 71 | | `config` | Manage config files (init, show, path, validate, edit) | 72 | -------------------------------------------------------------------------------- /tests/test_operations.py: -------------------------------------------------------------------------------- 1 | """Tests for operations module.""" 2 | 3 | from __future__ import annotations 4 | 5 | import inspect 6 | from pathlib import Path # noqa: TC003 7 | from unittest.mock import patch 8 | 9 | import pytest 10 | 11 | from compose_farm.cli import lifecycle 12 | from compose_farm.config import Config, Host 13 | from compose_farm.executor import CommandResult 14 | from compose_farm.operations import _migrate_service 15 | 16 | 17 | @pytest.fixture 18 | def basic_config(tmp_path: Path) -> Config: 19 | """Create a basic test config.""" 20 | compose_dir = tmp_path / "compose" 21 | service_dir = compose_dir / "test-service" 22 | service_dir.mkdir(parents=True) 23 | (service_dir / "docker-compose.yml").write_text("services: {}") 24 | return Config( 25 | compose_dir=compose_dir, 26 | hosts={ 27 | "host1": Host(address="localhost"), 28 | "host2": Host(address="localhost"), 29 | }, 30 | services={"test-service": "host2"}, 31 | ) 32 | 33 | 34 | class TestMigrationCommands: 35 | """Tests for migration command sequence.""" 36 | 37 | @pytest.fixture 38 | def config(self, tmp_path: Path) -> Config: 39 | """Create a test config.""" 40 | compose_dir = tmp_path / "compose" 41 | service_dir = compose_dir / "test-service" 42 | service_dir.mkdir(parents=True) 43 | (service_dir / "docker-compose.yml").write_text("services: {}") 44 | return Config( 45 | compose_dir=compose_dir, 46 | hosts={ 47 | "host1": Host(address="localhost"), 48 | "host2": Host(address="localhost"), 49 | }, 50 | services={"test-service": "host2"}, 51 | ) 52 | 53 | async def test_migration_uses_pull_ignore_buildable(self, config: Config) -> None: 54 | """Migration should use 'pull --ignore-buildable' to skip buildable images.""" 55 | commands_called: list[str] = [] 56 | 57 | async def mock_run_compose_step( 58 | cfg: Config, # noqa: ARG001 59 | service: str, 60 | command: str, 61 | *, 62 | raw: bool, # noqa: ARG001 63 | host: str | None = None, # noqa: ARG001 64 | ) -> CommandResult: 65 | commands_called.append(command) 66 | return CommandResult( 67 | service=service, 68 | exit_code=0, 69 | success=True, 70 | ) 71 | 72 | with patch( 73 | "compose_farm.operations._run_compose_step", 74 | side_effect=mock_run_compose_step, 75 | ): 76 | await _migrate_service( 77 | config, 78 | "test-service", 79 | current_host="host1", 80 | target_host="host2", 81 | prefix="[test]", 82 | raw=False, 83 | ) 84 | 85 | # Migration should call pull with --ignore-buildable, then build, then down 86 | assert "pull --ignore-buildable" in commands_called 87 | assert "build" in commands_called 88 | assert "down" in commands_called 89 | # pull should come before build 90 | pull_idx = commands_called.index("pull --ignore-buildable") 91 | build_idx = commands_called.index("build") 92 | assert pull_idx < build_idx 93 | 94 | 95 | class TestUpdateCommandSequence: 96 | """Tests for update command sequence.""" 97 | 98 | def test_update_command_sequence_includes_build(self) -> None: 99 | """Update command should use pull --ignore-buildable and build.""" 100 | # This is a static check of the command sequence in lifecycle.py 101 | # The actual command sequence is defined in the update function 102 | 103 | source = inspect.getsource(lifecycle.update) 104 | 105 | # Verify the command sequence includes pull --ignore-buildable 106 | assert "pull --ignore-buildable" in source 107 | # Verify build is included 108 | assert '"build"' in source or "'build'" in source 109 | # Verify the sequence is pull, build, down, up 110 | assert "down" in source 111 | assert "up -d" in source 112 | -------------------------------------------------------------------------------- /docs/dev/future-improvements.md: -------------------------------------------------------------------------------- 1 | # Future Improvements 2 | 3 | Low-priority improvements identified during code review. These are not currently causing issues but could be addressed if they become pain points. 4 | 5 | ## 1. State Module Efficiency (LOW) 6 | 7 | **Current:** Every state operation reads and writes the entire file. 8 | 9 | ```python 10 | def set_service_host(config, service, host): 11 | state = load_state(config) # Read file 12 | state[service] = host 13 | save_state(config, state) # Write file 14 | ``` 15 | 16 | **Impact:** With 87 services, this is fine. With 1000+, it would be slow. 17 | 18 | **Potential fix:** Add batch operations: 19 | ```python 20 | def update_state(config, updates: dict[str, str | None]) -> None: 21 | """Batch update: set services to hosts, None means remove.""" 22 | state = load_state(config) 23 | for service, host in updates.items(): 24 | if host is None: 25 | state.pop(service, None) 26 | else: 27 | state[service] = host 28 | save_state(config, state) 29 | ``` 30 | 31 | **When to do:** Only if state operations become noticeably slow. 32 | 33 | --- 34 | 35 | ## 2. Remote-Aware Compose Path Resolution (LOW) 36 | 37 | **Current:** `config.get_compose_path()` checks if files exist on the local filesystem: 38 | 39 | ```python 40 | def get_compose_path(self, service: str) -> Path: 41 | for filename in ("compose.yaml", "compose.yml", ...): 42 | candidate = service_dir / filename 43 | if candidate.exists(): # Local check! 44 | return candidate 45 | ``` 46 | 47 | **Why this works:** NFS/shared storage means local = remote. 48 | 49 | **Why it could break:** If running compose-farm from a machine without the NFS mount, it returns `compose.yaml` (the default) even if `docker-compose.yml` exists on the remote host. 50 | 51 | **Potential fix:** Query the remote host for file existence, or accept this limitation and document it. 52 | 53 | **When to do:** Only if users need to run compose-farm from non-NFS machines. 54 | 55 | --- 56 | 57 | ## 3. Add Integration Tests for CLI Commands (MEDIUM) 58 | 59 | **Current:** No integration tests for the actual CLI commands. Tests cover the underlying functions but not the Typer commands themselves. 60 | 61 | **Potential fix:** Add integration tests using `CliRunner` from Typer: 62 | 63 | ```python 64 | from typer.testing import CliRunner 65 | from compose_farm.cli import app 66 | 67 | runner = CliRunner() 68 | 69 | def test_check_command_validates_config(): 70 | result = runner.invoke(app, ["check", "--local"]) 71 | assert result.exit_code == 0 72 | ``` 73 | 74 | **When to do:** When CLI behavior becomes complex enough to warrant dedicated testing. 75 | 76 | --- 77 | 78 | ## 4. Add Tests for operations.py (MEDIUM) 79 | 80 | **Current:** Operations module has 30% coverage. Most logic is tested indirectly through test_sync.py. 81 | 82 | **Potential fix:** Add dedicated tests for: 83 | - `up_services()` with migration scenarios 84 | - `preflight_check()` 85 | - `check_host_compatibility()` 86 | 87 | **When to do:** When adding new operations or modifying migration logic. 88 | 89 | --- 90 | 91 | ## 5. Consider Structured Logging (LOW) 92 | 93 | **Current:** Operations print directly to console using Rich. This couples the operations module to the Rich library. 94 | 95 | **Potential fix:** Use Python's logging module with a custom Rich handler: 96 | 97 | ```python 98 | import logging 99 | 100 | logger = logging.getLogger(__name__) 101 | 102 | # In operations: 103 | logger.info("Migrating %s from %s to %s", service, old_host, new_host) 104 | 105 | # In cli.py - configure Rich handler: 106 | from rich.logging import RichHandler 107 | logging.basicConfig(handlers=[RichHandler()]) 108 | ``` 109 | 110 | **Benefits:** 111 | - Operations become testable without capturing stdout 112 | - Logs can be redirected to files 113 | - Log levels provide filtering 114 | 115 | **When to do:** Only if console output coupling becomes a problem for testing or extensibility. 116 | 117 | --- 118 | 119 | ## Design Decisions to Keep 120 | 121 | These patterns are working well and should be preserved: 122 | 123 | 1. **asyncio + asyncssh** - Solid async foundation 124 | 2. **Pydantic models** - Clean validation 125 | 3. **Rich for output** - Good UX 126 | 4. **Test structure** - Good coverage 127 | 5. **Module separation** - cli/operations/executor/compose pattern 128 | 6. **KISS principle** - Don't over-engineer 129 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "compose-farm" 3 | dynamic = ["version"] 4 | description = "Compose Farm - run docker compose commands across multiple hosts" 5 | readme = "README.md" 6 | license = "MIT" 7 | license-files = ["LICENSE"] 8 | authors = [ 9 | { name = "Bas Nijholt", email = "bas@nijho.lt" } 10 | ] 11 | maintainers = [ 12 | { name = "Bas Nijholt", email = "bas@nijho.lt" } 13 | ] 14 | requires-python = ">=3.11" 15 | keywords = [ 16 | "docker", 17 | "docker-compose", 18 | "ssh", 19 | "devops", 20 | "deployment", 21 | "container", 22 | "orchestration", 23 | "multi-host", 24 | "homelab", 25 | "self-hosted", 26 | ] 27 | classifiers = [ 28 | "Development Status :: 4 - Beta", 29 | "Environment :: Console", 30 | "Intended Audience :: Developers", 31 | "Intended Audience :: System Administrators", 32 | "License :: OSI Approved :: MIT License", 33 | "Operating System :: OS Independent", 34 | "Programming Language :: Python :: 3", 35 | "Programming Language :: Python :: 3.11", 36 | "Programming Language :: Python :: 3.12", 37 | "Programming Language :: Python :: 3.13", 38 | "Programming Language :: Python :: 3.14", 39 | "Topic :: System :: Systems Administration", 40 | "Topic :: Utilities", 41 | "Typing :: Typed", 42 | ] 43 | dependencies = [ 44 | "typer>=0.9.0", 45 | "pydantic>=2.0.0", 46 | "asyncssh>=2.14.0", 47 | "pyyaml>=6.0", 48 | "rich>=13.0.0", 49 | ] 50 | 51 | [project.urls] 52 | Homepage = "https://github.com/basnijholt/compose-farm" 53 | Repository = "https://github.com/basnijholt/compose-farm" 54 | Documentation = "https://github.com/basnijholt/compose-farm#readme" 55 | Issues = "https://github.com/basnijholt/compose-farm/issues" 56 | Changelog = "https://github.com/basnijholt/compose-farm/releases" 57 | 58 | [project.scripts] 59 | compose-farm = "compose_farm.cli:app" 60 | cf = "compose_farm.cli:app" 61 | 62 | [build-system] 63 | requires = ["hatchling", "hatch-vcs"] 64 | build-backend = "hatchling.build" 65 | 66 | [tool.hatch.version] 67 | source = "vcs" 68 | 69 | [tool.hatch.build.hooks.vcs] 70 | version-file = "src/compose_farm/_version.py" 71 | 72 | [tool.hatch.build.targets.wheel] 73 | packages = ["src/compose_farm"] 74 | 75 | [tool.ruff] 76 | target-version = "py311" 77 | line-length = 100 78 | 79 | [tool.ruff.lint] 80 | select = ["ALL"] 81 | ignore = [ 82 | "T20", # allow print-style streaming output 83 | "S101", # assertions are fine in tests 84 | "S603", # `subprocess` is constrained and intentional 85 | "ANN002", # allow args without type comments in some call sites 86 | "ANN003", # allow kwargs without type comments in some call sites 87 | "ANN401", # allow `Any` for external library hooks 88 | "D401", # short docstrings are acceptable 89 | "D402", # allow "Return" in first line 90 | "PLW0603", # global statements not used here 91 | "SLF001", # internal attributes may be accessed in helpers 92 | "PLR0913", # typer command signatures naturally take many params 93 | "TD002", # allow TODOs without issue links 94 | "E501", # formatter handles line length 95 | "TRY300", # nested try blocks sometimes clearer for SSH flows 96 | "FBT001", # boolean positional args in CLI are acceptable 97 | "FBT002", # boolean keyword-only args in CLI are acceptable 98 | "BLE001", # broad exceptions acceptable when surfacing remote errors 99 | "COM812", # avoid formatter conflicts 100 | "ISC001", # allow implicit string concat on single line when clearer 101 | ] 102 | 103 | [tool.ruff.lint.per-file-ignores] 104 | "tests/*" = ["S101", "PLR2004", "S108", "D102", "D103"] # relaxed docstrings + asserts in tests 105 | 106 | [tool.ruff.lint.mccabe] 107 | max-complexity = 18 108 | 109 | [tool.mypy] 110 | python_version = "3.11" 111 | strict = true 112 | plugins = ["pydantic.mypy"] 113 | 114 | [[tool.mypy.overrides]] 115 | module = "asyncssh.*" 116 | ignore_missing_imports = true 117 | 118 | [[tool.mypy.overrides]] 119 | module = "tests.*" 120 | disallow_untyped_decorators = false 121 | 122 | [tool.pytest.ini_options] 123 | asyncio_mode = "auto" 124 | testpaths = ["tests"] 125 | asyncio_default_fixture_loop_scope = "function" 126 | addopts = [ 127 | "--cov=compose_farm", 128 | "--cov-report=term", 129 | "--cov-report=xml", 130 | "--cov-report=html", 131 | "--no-cov-on-fail", 132 | "-v", 133 | ] 134 | 135 | [tool.coverage.run] 136 | omit = [] 137 | 138 | [tool.coverage.report] 139 | exclude_lines = [ 140 | "pragma: no cover", 141 | "raise NotImplementedError", 142 | "if TYPE_CHECKING:", 143 | 'if __name__ == "__main__":', 144 | ] 145 | 146 | [dependency-groups] 147 | dev = [ 148 | "mypy>=1.19.0", 149 | "pre-commit>=4.5.0", 150 | "pytest>=9.0.2", 151 | "pytest-asyncio>=1.3.0", 152 | "pytest-cov>=6.0.0", 153 | "ruff>=0.14.8", 154 | "types-pyyaml>=6.0.12.20250915", 155 | "markdown-code-runner>=0.7.0", 156 | ] 157 | -------------------------------------------------------------------------------- /docs/truenas-nested-nfs.md: -------------------------------------------------------------------------------- 1 | # TrueNAS NFS: Accessing Child ZFS Datasets 2 | 3 | When NFS-exporting a parent ZFS dataset on TrueNAS, child datasets appear as **empty directories** to NFS clients. This document explains the problem and provides a workaround. 4 | 5 | ## The Problem 6 | 7 | TrueNAS structures storage as ZFS datasets. A common pattern is: 8 | 9 | ``` 10 | tank/data <- parent dataset (NFS exported) 11 | tank/data/app1 <- child dataset 12 | tank/data/app2 <- child dataset 13 | ``` 14 | 15 | When you create an NFS share for `tank/data`, clients mount it and see the `app1/` and `app2/` directories—but they're empty. This happens because each ZFS dataset is a separate filesystem, and NFS doesn't traverse into child filesystems by default. 16 | 17 | ## The Solution: `crossmnt` 18 | 19 | The NFS `crossmnt` export option tells the server to allow clients to traverse into child filesystems. However, TrueNAS doesn't expose this option in the UI. 20 | 21 | ### Workaround Script 22 | 23 | This Python script injects `crossmnt` into `/etc/exports`: 24 | 25 | ```python 26 | #!/usr/bin/env python3 27 | """ 28 | Add crossmnt to TrueNAS NFS exports for child dataset visibility. 29 | 30 | Usage: fix-nfs-crossmnt.py /mnt/pool/dataset 31 | 32 | Setup: 33 | 1. scp fix-nfs-crossmnt.py root@truenas.local:/root/ 34 | 2. chmod +x /root/fix-nfs-crossmnt.py 35 | 3. Test: /root/fix-nfs-crossmnt.py /mnt/pool/dataset 36 | 4. Add cron job: TrueNAS UI > System > Advanced > Cron Jobs 37 | Command: /root/fix-nfs-crossmnt.py /mnt/pool/dataset 38 | Schedule: */5 * * * * 39 | """ 40 | 41 | import re 42 | import subprocess 43 | import sys 44 | from pathlib import Path 45 | 46 | EXPORTS_FILE = Path("/etc/exports") 47 | 48 | 49 | def main(): 50 | if len(sys.argv) != 2: 51 | print(f"Usage: {sys.argv[0]} /mnt/pool/dataset", file=sys.stderr) 52 | return 1 53 | 54 | export_path = sys.argv[1] 55 | content = EXPORTS_FILE.read_text() 56 | 57 | if f'"{export_path}"' not in content: 58 | print(f"ERROR: {export_path} not found in {EXPORTS_FILE}", file=sys.stderr) 59 | return 1 60 | 61 | lines = content.splitlines() 62 | result = [] 63 | in_block = False 64 | modified = False 65 | 66 | for line in lines: 67 | if f'"{export_path}"' in line: 68 | in_block = True 69 | elif line.startswith('"'): 70 | in_block = False 71 | 72 | if in_block and line[:1] in (" ", "\t") and "crossmnt" not in line: 73 | line = re.sub(r"\)(\\\s*)?$", r",crossmnt)\1", line) 74 | modified = True 75 | 76 | result.append(line) 77 | 78 | if not modified: 79 | return 0 # Already applied 80 | 81 | EXPORTS_FILE.write_text("\n".join(result) + "\n") 82 | subprocess.run(["exportfs", "-ra"], check=True) 83 | print(f"Added crossmnt to {export_path}") 84 | return 0 85 | 86 | 87 | if __name__ == "__main__": 88 | sys.exit(main()) 89 | ``` 90 | 91 | ## Setup Instructions 92 | 93 | ### 1. Copy the script to TrueNAS 94 | 95 | ```bash 96 | scp fix-nfs-crossmnt.py root@truenas.local:/root/ 97 | ssh root@truenas.local chmod +x /root/fix-nfs-crossmnt.py 98 | ``` 99 | 100 | ### 2. Test manually 101 | 102 | ```bash 103 | ssh root@truenas.local 104 | 105 | # Run the script 106 | /root/fix-nfs-crossmnt.py /mnt/tank/data 107 | 108 | # Verify crossmnt was added 109 | cat /etc/exports 110 | ``` 111 | 112 | You should see `,crossmnt` added to the client options: 113 | 114 | ``` 115 | "/mnt/tank/data"\ 116 | 192.168.1.10(sec=sys,rw,no_subtree_check,crossmnt)\ 117 | 192.168.1.11(sec=sys,rw,no_subtree_check,crossmnt) 118 | ``` 119 | 120 | ### 3. Verify on NFS client 121 | 122 | ```bash 123 | # Before: empty directory 124 | ls /mnt/data/app1/ 125 | # (nothing) 126 | 127 | # After: actual contents visible 128 | ls /mnt/data/app1/ 129 | # config.yaml data/ logs/ 130 | ``` 131 | 132 | ### 4. Make it persistent 133 | 134 | TrueNAS regenerates `/etc/exports` when you modify NFS shares in the UI. To survive this, set up a cron job: 135 | 136 | 1. Go to **TrueNAS UI → System → Advanced → Cron Jobs → Add** 137 | 2. Configure: 138 | - **Description:** Fix NFS crossmnt 139 | - **Command:** `/root/fix-nfs-crossmnt.py /mnt/tank/data` 140 | - **Run As User:** root 141 | - **Schedule:** `*/5 * * * *` (every 5 minutes) 142 | - **Enabled:** checked 143 | 3. Save 144 | 145 | The script is idempotent—it only modifies the file if `crossmnt` is missing, and skips the write entirely if already applied. 146 | 147 | ## How It Works 148 | 149 | 1. Parses `/etc/exports` to find the specified export block 150 | 2. Adds `,crossmnt` before the closing `)` on each client line 151 | 3. Writes the file only if changes were made 152 | 4. Runs `exportfs -ra` to reload the NFS configuration 153 | 154 | ## Why Not Use SMB Instead? 155 | 156 | SMB handles child datasets seamlessly, but: 157 | 158 | - NFS is simpler for Linux-to-Linux with matching UIDs 159 | - SMB requires more complex permission mapping for Docker volumes 160 | - Many existing setups already use NFS 161 | 162 | ## Related Links 163 | 164 | - [TrueNAS Forum: Add crossmnt option to NFS exports](https://forums.truenas.com/t/add-crossmnt-option-to-nfs-exports/10573) 165 | - [exports(5) man page](https://man7.org/linux/man-pages/man5/exports.5.html) - see `crossmnt` option 166 | 167 | ## Tested On 168 | 169 | - TrueNAS SCALE 24.10 170 | -------------------------------------------------------------------------------- /tests/test_refresh.py: -------------------------------------------------------------------------------- 1 | """Tests for sync command and related functions.""" 2 | 3 | from pathlib import Path 4 | from unittest.mock import AsyncMock, patch 5 | 6 | import pytest 7 | 8 | from compose_farm import executor as executor_module 9 | from compose_farm import state as state_module 10 | from compose_farm.cli import management as cli_management_module 11 | from compose_farm.config import Config, Host 12 | from compose_farm.executor import CommandResult, check_service_running 13 | 14 | 15 | @pytest.fixture 16 | def mock_config(tmp_path: Path) -> Config: 17 | """Create a mock config for testing.""" 18 | compose_dir = tmp_path / "stacks" 19 | compose_dir.mkdir() 20 | 21 | # Create service directories with compose files 22 | for service in ["plex", "jellyfin", "sonarr"]: 23 | svc_dir = compose_dir / service 24 | svc_dir.mkdir() 25 | (svc_dir / "compose.yaml").write_text(f"# {service} compose file\n") 26 | 27 | return Config( 28 | compose_dir=compose_dir, 29 | hosts={ 30 | "nas01": Host(address="192.168.1.10", user="admin", port=22), 31 | "nas02": Host(address="192.168.1.11", user="admin", port=22), 32 | }, 33 | services={ 34 | "plex": "nas01", 35 | "jellyfin": "nas01", 36 | "sonarr": "nas02", 37 | }, 38 | ) 39 | 40 | 41 | @pytest.fixture 42 | def state_dir(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path: 43 | """Create a temporary state directory and patch _get_state_path.""" 44 | state_path = tmp_path / ".config" / "compose-farm" 45 | state_path.mkdir(parents=True) 46 | 47 | def mock_get_state_path() -> Path: 48 | return state_path / "state.yaml" 49 | 50 | monkeypatch.setattr(state_module, "_get_state_path", mock_get_state_path) 51 | return state_path 52 | 53 | 54 | class TestCheckServiceRunning: 55 | """Tests for check_service_running function.""" 56 | 57 | @pytest.mark.asyncio 58 | async def test_service_running(self, mock_config: Config) -> None: 59 | """Returns True when service has running containers.""" 60 | with patch.object(executor_module, "run_command", new_callable=AsyncMock) as mock_run: 61 | mock_run.return_value = CommandResult( 62 | service="plex", 63 | exit_code=0, 64 | success=True, 65 | stdout="abc123\ndef456\n", 66 | ) 67 | result = await check_service_running(mock_config, "plex", "nas01") 68 | assert result is True 69 | 70 | @pytest.mark.asyncio 71 | async def test_service_not_running(self, mock_config: Config) -> None: 72 | """Returns False when service has no running containers.""" 73 | with patch.object(executor_module, "run_command", new_callable=AsyncMock) as mock_run: 74 | mock_run.return_value = CommandResult( 75 | service="plex", 76 | exit_code=0, 77 | success=True, 78 | stdout="", 79 | ) 80 | result = await check_service_running(mock_config, "plex", "nas01") 81 | assert result is False 82 | 83 | @pytest.mark.asyncio 84 | async def test_command_failed(self, mock_config: Config) -> None: 85 | """Returns False when command fails.""" 86 | with patch.object(executor_module, "run_command", new_callable=AsyncMock) as mock_run: 87 | mock_run.return_value = CommandResult( 88 | service="plex", 89 | exit_code=1, 90 | success=False, 91 | ) 92 | result = await check_service_running(mock_config, "plex", "nas01") 93 | assert result is False 94 | 95 | 96 | class TestReportSyncChanges: 97 | """Tests for _report_sync_changes function.""" 98 | 99 | def test_reports_added(self, capsys: pytest.CaptureFixture[str]) -> None: 100 | """Reports newly discovered services.""" 101 | cli_management_module._report_sync_changes( 102 | added=["plex", "jellyfin"], 103 | removed=[], 104 | changed=[], 105 | discovered={"plex": "nas01", "jellyfin": "nas02"}, 106 | current_state={}, 107 | ) 108 | captured = capsys.readouterr() 109 | assert "New services found (2)" in captured.out 110 | assert "+ plex on nas01" in captured.out 111 | assert "+ jellyfin on nas02" in captured.out 112 | 113 | def test_reports_removed(self, capsys: pytest.CaptureFixture[str]) -> None: 114 | """Reports services that are no longer running.""" 115 | cli_management_module._report_sync_changes( 116 | added=[], 117 | removed=["sonarr"], 118 | changed=[], 119 | discovered={}, 120 | current_state={"sonarr": "nas01"}, 121 | ) 122 | captured = capsys.readouterr() 123 | assert "Services no longer running (1)" in captured.out 124 | assert "- sonarr (was on nas01)" in captured.out 125 | 126 | def test_reports_changed(self, capsys: pytest.CaptureFixture[str]) -> None: 127 | """Reports services that moved to a different host.""" 128 | cli_management_module._report_sync_changes( 129 | added=[], 130 | removed=[], 131 | changed=[("plex", "nas01", "nas02")], 132 | discovered={"plex": "nas02"}, 133 | current_state={"plex": "nas01"}, 134 | ) 135 | captured = capsys.readouterr() 136 | assert "Services on different hosts (1)" in captured.out 137 | assert "~ plex: nas01 → nas02" in captured.out 138 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Compose Farm Examples 2 | 3 | Real-world examples demonstrating compose-farm patterns for multi-host Docker deployments. 4 | 5 | ## Services 6 | 7 | | Service | Type | Demonstrates | 8 | |---------|------|--------------| 9 | | [traefik](traefik/) | Infrastructure | Reverse proxy, Let's Encrypt, file-provider | 10 | | [mealie](mealie/) | Single container | Traefik labels, resource limits, environment vars | 11 | | [uptime-kuma](uptime-kuma/) | Single container | Docker socket, user mapping, custom DNS | 12 | | [paperless-ngx](paperless-ngx/) | Multi-container | Redis + App stack (SQLite) | 13 | | [autokuma](autokuma/) | Multi-host | Demonstrates `all` keyword (runs on every host) | 14 | 15 | ## Key Patterns 16 | 17 | ### External Network 18 | 19 | All services connect to a shared external network for inter-service communication: 20 | 21 | ```yaml 22 | networks: 23 | mynetwork: 24 | external: true 25 | ``` 26 | 27 | Create it on each host with consistent settings: 28 | 29 | ```bash 30 | compose-farm init-network --network mynetwork --subnet 172.20.0.0/16 31 | ``` 32 | 33 | ### Traefik Labels (Dual Routes) 34 | 35 | Services expose two routes for different access patterns: 36 | 37 | 1. **HTTPS route** (`websecure` entrypoint): For your custom domain with Let's Encrypt TLS 38 | 2. **HTTP route** (`web` entrypoint): For `.local` domains on your LAN (no TLS needed) 39 | 40 | This pattern allows accessing services via: 41 | - `https://mealie.example.com` - from anywhere, with TLS 42 | - `http://mealie.local` - from your local network, no TLS overhead 43 | 44 | ```yaml 45 | labels: 46 | # HTTPS route for custom domain (e.g., mealie.example.com) 47 | - traefik.enable=true 48 | - traefik.http.routers.myapp.rule=Host(`myapp.${DOMAIN}`) 49 | - traefik.http.routers.myapp.entrypoints=websecure 50 | - traefik.http.services.myapp.loadbalancer.server.port=8080 51 | # HTTP route for .local domain (e.g., myapp.local) 52 | - traefik.http.routers.myapp-local.rule=Host(`myapp.local`) 53 | - traefik.http.routers.myapp-local.entrypoints=web 54 | ``` 55 | 56 | > **Note:** `.local` domains require local DNS (e.g., Pi-hole, Technitium) to resolve to your Traefik host. 57 | 58 | ### Environment Variables 59 | 60 | Each service has a `.env` file for secrets and domain configuration. 61 | Edit these files to set your domain and credentials: 62 | 63 | ```bash 64 | # Example: set your domain 65 | echo "DOMAIN=example.com" > mealie/.env 66 | ``` 67 | 68 | Variables like `${DOMAIN}` are substituted at runtime by Docker Compose. 69 | 70 | ### NFS Volume Mounts 71 | 72 | All data is stored on shared NFS storage at `/mnt/data/`: 73 | 74 | ```yaml 75 | volumes: 76 | - /mnt/data/myapp:/app/data 77 | ``` 78 | 79 | This allows services to migrate between hosts without data loss. 80 | 81 | ### Multi-Host Services 82 | 83 | Services that need to run on every host (e.g., monitoring agents): 84 | 85 | ```yaml 86 | # In compose-farm.yaml 87 | services: 88 | autokuma: all # Runs on every configured host 89 | ``` 90 | 91 | ### Multi-Container Stacks 92 | 93 | Database-backed apps with multiple services: 94 | 95 | ```yaml 96 | services: 97 | redis: 98 | image: redis:7 99 | app: 100 | depends_on: 101 | - redis 102 | ``` 103 | 104 | > **NFS + PostgreSQL Warning:** PostgreSQL should NOT run on NFS storage due to 105 | > fsync and file locking issues. Use SQLite (safe for single-writer on NFS) or 106 | > keep PostgreSQL data on local volumes (non-migratable). 107 | 108 | ### AutoKuma Labels (Optional) 109 | 110 | The autokuma example demonstrates compose-farm's **multi-host feature** - running the same service on all hosts using the `all` keyword. AutoKuma itself is not part of compose-farm; it's just a good example because it needs to run on every host to monitor local Docker containers. 111 | 112 | [AutoKuma](https://github.com/BigBoot/AutoKuma) automatically creates Uptime Kuma monitors from Docker labels: 113 | 114 | ```yaml 115 | labels: 116 | - kuma.myapp.http.name=My App 117 | - kuma.myapp.http.url=https://myapp.${DOMAIN} 118 | ``` 119 | 120 | ## Quick Start 121 | 122 | ```bash 123 | cd examples 124 | 125 | # 1. Create the shared network on all hosts 126 | compose-farm init-network 127 | 128 | # 2. Start Traefik first (the reverse proxy) 129 | compose-farm up traefik 130 | 131 | # 3. Start other services 132 | compose-farm up mealie uptime-kuma 133 | 134 | # 4. Check status 135 | compose-farm ps 136 | 137 | # 5. Generate Traefik file-provider config for cross-host routing 138 | compose-farm traefik-file --all 139 | 140 | # 6. View logs 141 | compose-farm logs mealie 142 | 143 | # 7. Stop everything 144 | compose-farm down --all 145 | ``` 146 | 147 | ## Configuration 148 | 149 | The `compose-farm.yaml` shows a multi-host setup: 150 | 151 | - **primary** (192.168.1.10): Runs Traefik and heavy services 152 | - **secondary** (192.168.1.11): Runs lighter services 153 | - **autokuma**: Runs on ALL hosts to monitor local containers 154 | 155 | When Traefik runs on `primary` and a service runs on `secondary`, compose-farm 156 | automatically generates file-provider config so Traefik can route to it. 157 | 158 | ## Traefik File-Provider 159 | 160 | When services run on different hosts than Traefik, use `traefik-file` to generate routing config: 161 | 162 | ```bash 163 | # Generate config for all services 164 | compose-farm traefik-file --all -o traefik/dynamic.d/compose-farm.yml 165 | 166 | # Or configure auto-generation in compose-farm.yaml: 167 | traefik_file: /opt/stacks/traefik/dynamic.d/compose-farm.yml 168 | traefik_service: traefik 169 | ``` 170 | 171 | With `traefik_file` configured, compose-farm automatically regenerates the config after `up`, `down`, `restart`, and `update` commands. 172 | -------------------------------------------------------------------------------- /tests/test_config.py: -------------------------------------------------------------------------------- 1 | """Tests for config module.""" 2 | 3 | from pathlib import Path 4 | 5 | import pytest 6 | import yaml 7 | 8 | from compose_farm.config import Config, Host, load_config 9 | 10 | 11 | class TestHost: 12 | """Tests for Host model.""" 13 | 14 | def test_host_with_all_fields(self) -> None: 15 | host = Host(address="192.168.1.10", user="docker", port=2222) 16 | assert host.address == "192.168.1.10" 17 | assert host.user == "docker" 18 | assert host.port == 2222 19 | 20 | def test_host_defaults(self) -> None: 21 | host = Host(address="192.168.1.10") 22 | assert host.address == "192.168.1.10" 23 | assert host.port == 22 24 | # user defaults to current user, just check it's set 25 | assert host.user 26 | 27 | def test_local_host(self) -> None: 28 | host = Host(address="local") 29 | assert host.address == "local" 30 | 31 | 32 | class TestConfig: 33 | """Tests for Config model.""" 34 | 35 | def test_config_validation(self) -> None: 36 | config = Config( 37 | compose_dir=Path("/opt/compose"), 38 | hosts={"nas01": Host(address="192.168.1.10")}, 39 | services={"plex": "nas01"}, 40 | ) 41 | assert config.compose_dir == Path("/opt/compose") 42 | assert "nas01" in config.hosts 43 | assert config.services["plex"] == "nas01" 44 | 45 | def test_config_invalid_service_host(self) -> None: 46 | with pytest.raises(ValueError, match="unknown host"): 47 | Config( 48 | compose_dir=Path("/opt/compose"), 49 | hosts={"nas01": Host(address="192.168.1.10")}, 50 | services={"plex": "nonexistent"}, 51 | ) 52 | 53 | def test_get_host(self) -> None: 54 | config = Config( 55 | compose_dir=Path("/opt/compose"), 56 | hosts={"nas01": Host(address="192.168.1.10")}, 57 | services={"plex": "nas01"}, 58 | ) 59 | host = config.get_host("plex") 60 | assert host.address == "192.168.1.10" 61 | 62 | def test_get_host_unknown_service(self) -> None: 63 | config = Config( 64 | compose_dir=Path("/opt/compose"), 65 | hosts={"nas01": Host(address="192.168.1.10")}, 66 | services={"plex": "nas01"}, 67 | ) 68 | with pytest.raises(ValueError, match="Unknown service"): 69 | config.get_host("unknown") 70 | 71 | def test_get_compose_path(self) -> None: 72 | config = Config( 73 | compose_dir=Path("/opt/compose"), 74 | hosts={"nas01": Host(address="192.168.1.10")}, 75 | services={"plex": "nas01"}, 76 | ) 77 | path = config.get_compose_path("plex") 78 | # Defaults to compose.yaml when no file exists 79 | assert path == Path("/opt/compose/plex/compose.yaml") 80 | 81 | 82 | class TestLoadConfig: 83 | """Tests for load_config function.""" 84 | 85 | def test_load_config_full_host_format(self, tmp_path: Path) -> None: 86 | config_data = { 87 | "compose_dir": "/opt/compose", 88 | "hosts": { 89 | "nas01": {"address": "192.168.1.10", "user": "docker", "port": 2222}, 90 | }, 91 | "services": {"plex": "nas01"}, 92 | } 93 | config_file = tmp_path / "sdc.yaml" 94 | config_file.write_text(yaml.dump(config_data)) 95 | 96 | config = load_config(config_file) 97 | assert config.hosts["nas01"].address == "192.168.1.10" 98 | assert config.hosts["nas01"].user == "docker" 99 | assert config.hosts["nas01"].port == 2222 100 | 101 | def test_load_config_simple_host_format(self, tmp_path: Path) -> None: 102 | config_data = { 103 | "compose_dir": "/opt/compose", 104 | "hosts": {"nas01": "192.168.1.10"}, 105 | "services": {"plex": "nas01"}, 106 | } 107 | config_file = tmp_path / "sdc.yaml" 108 | config_file.write_text(yaml.dump(config_data)) 109 | 110 | config = load_config(config_file) 111 | assert config.hosts["nas01"].address == "192.168.1.10" 112 | 113 | def test_load_config_mixed_host_formats(self, tmp_path: Path) -> None: 114 | config_data = { 115 | "compose_dir": "/opt/compose", 116 | "hosts": { 117 | "nas01": {"address": "192.168.1.10", "user": "docker"}, 118 | "nas02": "192.168.1.11", 119 | }, 120 | "services": {"plex": "nas01", "jellyfin": "nas02"}, 121 | } 122 | config_file = tmp_path / "sdc.yaml" 123 | config_file.write_text(yaml.dump(config_data)) 124 | 125 | config = load_config(config_file) 126 | assert config.hosts["nas01"].user == "docker" 127 | assert config.hosts["nas02"].address == "192.168.1.11" 128 | 129 | def test_load_config_not_found(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: 130 | monkeypatch.chdir(tmp_path) 131 | monkeypatch.delenv("CF_CONFIG", raising=False) 132 | monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "empty_config")) 133 | with pytest.raises(FileNotFoundError, match="Config file not found"): 134 | load_config() 135 | 136 | def test_load_config_local_host(self, tmp_path: Path) -> None: 137 | config_data = { 138 | "compose_dir": "/opt/compose", 139 | "hosts": {"local": "localhost"}, 140 | "services": {"test": "local"}, 141 | } 142 | config_file = tmp_path / "sdc.yaml" 143 | config_file.write_text(yaml.dump(config_data)) 144 | 145 | config = load_config(config_file) 146 | assert config.hosts["local"].address == "localhost" 147 | -------------------------------------------------------------------------------- /src/compose_farm/state.py: -------------------------------------------------------------------------------- 1 | """State tracking for deployed services.""" 2 | 3 | from __future__ import annotations 4 | 5 | import contextlib 6 | from typing import TYPE_CHECKING, Any 7 | 8 | import yaml 9 | 10 | if TYPE_CHECKING: 11 | from collections.abc import Generator 12 | 13 | from .config import Config 14 | 15 | 16 | def load_state(config: Config) -> dict[str, str | list[str]]: 17 | """Load the current deployment state. 18 | 19 | Returns a dict mapping service names to host name(s). 20 | Multi-host services store a list of hosts. 21 | """ 22 | state_path = config.get_state_path() 23 | if not state_path.exists(): 24 | return {} 25 | 26 | with state_path.open() as f: 27 | data: dict[str, Any] = yaml.safe_load(f) or {} 28 | 29 | deployed: dict[str, str | list[str]] = data.get("deployed", {}) 30 | return deployed 31 | 32 | 33 | def _sorted_dict(d: dict[str, str | list[str]]) -> dict[str, str | list[str]]: 34 | """Return a dictionary sorted by keys.""" 35 | return dict(sorted(d.items(), key=lambda item: item[0])) 36 | 37 | 38 | def save_state(config: Config, deployed: dict[str, str | list[str]]) -> None: 39 | """Save the deployment state.""" 40 | state_path = config.get_state_path() 41 | with state_path.open("w") as f: 42 | yaml.safe_dump({"deployed": _sorted_dict(deployed)}, f, sort_keys=False) 43 | 44 | 45 | @contextlib.contextmanager 46 | def _modify_state(config: Config) -> Generator[dict[str, str | list[str]], None, None]: 47 | """Context manager to load, modify, and save state.""" 48 | state = load_state(config) 49 | yield state 50 | save_state(config, state) 51 | 52 | 53 | def get_service_host(config: Config, service: str) -> str | None: 54 | """Get the host where a service is currently deployed. 55 | 56 | For multi-host services, returns the first host or None. 57 | """ 58 | state = load_state(config) 59 | value = state.get(service) 60 | if value is None: 61 | return None 62 | if isinstance(value, list): 63 | return value[0] if value else None 64 | return value 65 | 66 | 67 | def set_service_host(config: Config, service: str, host: str) -> None: 68 | """Record that a service is deployed on a host.""" 69 | with _modify_state(config) as state: 70 | state[service] = host 71 | 72 | 73 | def set_multi_host_service(config: Config, service: str, hosts: list[str]) -> None: 74 | """Record that a multi-host service is deployed on multiple hosts.""" 75 | with _modify_state(config) as state: 76 | state[service] = hosts 77 | 78 | 79 | def remove_service(config: Config, service: str) -> None: 80 | """Remove a service from the state (after down).""" 81 | with _modify_state(config) as state: 82 | state.pop(service, None) 83 | 84 | 85 | def add_service_to_host(config: Config, service: str, host: str) -> None: 86 | """Add a specific host to a service's state. 87 | 88 | For multi-host services, adds the host to the list if not present. 89 | For single-host services, sets the host. 90 | """ 91 | with _modify_state(config) as state: 92 | current = state.get(service) 93 | 94 | if config.is_multi_host(service): 95 | # Multi-host: add to list if not present 96 | if isinstance(current, list): 97 | if host not in current: 98 | state[service] = [*current, host] 99 | else: 100 | state[service] = [host] 101 | else: 102 | # Single-host: just set it 103 | state[service] = host 104 | 105 | 106 | def remove_service_from_host(config: Config, service: str, host: str) -> None: 107 | """Remove a specific host from a service's state. 108 | 109 | For multi-host services, removes just that host from the list. 110 | For single-host services, removes the service entirely if host matches. 111 | """ 112 | with _modify_state(config) as state: 113 | current = state.get(service) 114 | if current is None: 115 | return 116 | 117 | if isinstance(current, list): 118 | # Multi-host: remove this host from list 119 | remaining = [h for h in current if h != host] 120 | if remaining: 121 | state[service] = remaining 122 | else: 123 | state.pop(service, None) 124 | elif current == host: 125 | # Single-host: remove if matches 126 | state.pop(service, None) 127 | 128 | 129 | def get_services_needing_migration(config: Config) -> list[str]: 130 | """Get services where current host differs from configured host. 131 | 132 | Multi-host services are never considered for migration. 133 | """ 134 | needs_migration = [] 135 | for service in config.services: 136 | # Skip multi-host services 137 | if config.is_multi_host(service): 138 | continue 139 | 140 | configured_host = config.get_hosts(service)[0] 141 | current_host = get_service_host(config, service) 142 | if current_host and current_host != configured_host: 143 | needs_migration.append(service) 144 | return needs_migration 145 | 146 | 147 | def get_orphaned_services(config: Config) -> dict[str, str | list[str]]: 148 | """Get services that are in state but not in config. 149 | 150 | These are services that were previously deployed but have been 151 | removed from the config file (e.g., commented out). 152 | 153 | Returns a dict mapping service name to host(s) where it's deployed. 154 | """ 155 | state = load_state(config) 156 | return {service: hosts for service, hosts in state.items() if service not in config.services} 157 | 158 | 159 | def get_services_not_in_state(config: Config) -> list[str]: 160 | """Get services that are in config but not in state. 161 | 162 | These are services that should be running but aren't tracked 163 | (e.g., newly added to config, or previously stopped as orphans). 164 | """ 165 | state = load_state(config) 166 | return [service for service in config.services if service not in state] 167 | -------------------------------------------------------------------------------- /src/compose_farm/logs.py: -------------------------------------------------------------------------------- 1 | """Snapshot current compose images into a TOML log.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | import tomllib 7 | from dataclasses import dataclass 8 | from datetime import UTC, datetime 9 | from typing import TYPE_CHECKING, Any 10 | 11 | from .executor import run_compose 12 | from .paths import xdg_config_home 13 | 14 | if TYPE_CHECKING: 15 | from collections.abc import Awaitable, Callable, Iterable 16 | from pathlib import Path 17 | 18 | from .config import Config 19 | from .executor import CommandResult 20 | 21 | 22 | DEFAULT_LOG_PATH = xdg_config_home() / "compose-farm" / "dockerfarm-log.toml" 23 | _DIGEST_HEX_LENGTH = 64 24 | 25 | 26 | @dataclass(frozen=True) 27 | class SnapshotEntry: 28 | """Normalized image snapshot for a single service.""" 29 | 30 | service: str 31 | host: str 32 | compose_file: Path 33 | image: str 34 | digest: str 35 | captured_at: datetime 36 | 37 | def as_dict(self, first_seen: str, last_seen: str) -> dict[str, str]: 38 | """Render snapshot as a TOML-friendly dict.""" 39 | return { 40 | "service": self.service, 41 | "host": self.host, 42 | "compose_file": str(self.compose_file), 43 | "image": self.image, 44 | "digest": self.digest, 45 | "first_seen": first_seen, 46 | "last_seen": last_seen, 47 | } 48 | 49 | 50 | def isoformat(dt: datetime) -> str: 51 | """Format a datetime as an ISO 8601 string with Z suffix for UTC.""" 52 | return dt.astimezone(UTC).replace(microsecond=0).isoformat().replace("+00:00", "Z") 53 | 54 | 55 | def _escape(value: str) -> str: 56 | return value.replace("\\", "\\\\").replace('"', '\\"') 57 | 58 | 59 | def _parse_images_output(raw: str) -> list[dict[str, Any]]: 60 | """Parse `docker compose images --format json` output. 61 | 62 | Handles both a JSON array and newline-separated JSON objects for robustness. 63 | """ 64 | raw = raw.strip() 65 | if not raw: 66 | return [] 67 | 68 | try: 69 | parsed = json.loads(raw) 70 | except json.JSONDecodeError: 71 | objects = [] 72 | for line in raw.splitlines(): 73 | if not line.strip(): 74 | continue 75 | objects.append(json.loads(line)) 76 | return objects 77 | 78 | if isinstance(parsed, list): 79 | return parsed 80 | if isinstance(parsed, dict): 81 | return [parsed] 82 | return [] 83 | 84 | 85 | def _extract_image_fields(record: dict[str, Any]) -> tuple[str, str]: 86 | """Extract image name and digest with fallbacks.""" 87 | image = record.get("Image") or record.get("Repository") or record.get("Name") or "" 88 | tag = record.get("Tag") or record.get("Version") 89 | if tag and ":" not in image.rsplit("/", 1)[-1]: 90 | image = f"{image}:{tag}" 91 | 92 | digest = ( 93 | record.get("Digest") 94 | or record.get("Image ID") 95 | or record.get("ImageID") 96 | or record.get("ID") 97 | or "" 98 | ) 99 | 100 | if digest and not digest.startswith("sha256:") and len(digest) == _DIGEST_HEX_LENGTH: 101 | digest = f"sha256:{digest}" 102 | 103 | return image, digest 104 | 105 | 106 | async def collect_service_entries( 107 | config: Config, 108 | service: str, 109 | *, 110 | now: datetime, 111 | run_compose_fn: Callable[..., Awaitable[CommandResult]] = run_compose, 112 | ) -> list[SnapshotEntry]: 113 | """Run `docker compose images` for a service and normalize results.""" 114 | result = await run_compose_fn(config, service, "images --format json", stream=False) 115 | if not result.success: 116 | msg = result.stderr or f"compose images exited with {result.exit_code}" 117 | error = f"[{service}] Unable to read images: {msg}" 118 | raise RuntimeError(error) 119 | 120 | records = _parse_images_output(result.stdout) 121 | # Use first host for snapshots (multi-host services use same images on all hosts) 122 | host_name = config.get_hosts(service)[0] 123 | compose_path = config.get_compose_path(service) 124 | 125 | entries: list[SnapshotEntry] = [] 126 | for record in records: 127 | image, digest = _extract_image_fields(record) 128 | if not digest: 129 | continue 130 | entries.append( 131 | SnapshotEntry( 132 | service=service, 133 | host=host_name, 134 | compose_file=compose_path, 135 | image=image, 136 | digest=digest, 137 | captured_at=now, 138 | ) 139 | ) 140 | return entries 141 | 142 | 143 | def load_existing_entries(log_path: Path) -> list[dict[str, str]]: 144 | """Load existing snapshot entries from a TOML log file.""" 145 | if not log_path.exists(): 146 | return [] 147 | data = tomllib.loads(log_path.read_text()) 148 | return list(data.get("entries", [])) 149 | 150 | 151 | def merge_entries( 152 | existing: Iterable[dict[str, str]], 153 | new_entries: Iterable[SnapshotEntry], 154 | *, 155 | now_iso: str, 156 | ) -> list[dict[str, str]]: 157 | """Merge new snapshot entries with existing ones, preserving first_seen timestamps.""" 158 | merged: dict[tuple[str, str, str], dict[str, str]] = { 159 | (e["service"], e["host"], e["digest"]): dict(e) for e in existing 160 | } 161 | 162 | for entry in new_entries: 163 | key = (entry.service, entry.host, entry.digest) 164 | first_seen = merged.get(key, {}).get("first_seen", now_iso) 165 | merged[key] = entry.as_dict(first_seen, now_iso) 166 | 167 | return list(merged.values()) 168 | 169 | 170 | def write_toml(log_path: Path, *, meta: dict[str, str], entries: list[dict[str, str]]) -> None: 171 | """Write snapshot entries to a TOML log file.""" 172 | lines: list[str] = ["[meta]"] 173 | lines.extend(f'{key} = "{_escape(meta[key])}"' for key in sorted(meta)) 174 | 175 | if entries: 176 | lines.append("") 177 | 178 | for entry in sorted(entries, key=lambda e: (e["service"], e["host"], e["digest"])): 179 | lines.append("[[entries]]") 180 | for field in [ 181 | "service", 182 | "host", 183 | "compose_file", 184 | "image", 185 | "digest", 186 | "first_seen", 187 | "last_seen", 188 | ]: 189 | value = entry[field] 190 | lines.append(f'{field} = "{_escape(str(value))}"') 191 | lines.append("") 192 | 193 | content = "\n".join(lines).rstrip() + "\n" 194 | log_path.parent.mkdir(parents=True, exist_ok=True) 195 | log_path.write_text(content) 196 | -------------------------------------------------------------------------------- /src/compose_farm/config.py: -------------------------------------------------------------------------------- 1 | """Configuration loading and Pydantic models.""" 2 | 3 | from __future__ import annotations 4 | 5 | import getpass 6 | import os 7 | from pathlib import Path 8 | 9 | import yaml 10 | from pydantic import BaseModel, Field, model_validator 11 | 12 | from .paths import xdg_config_home 13 | 14 | 15 | class Host(BaseModel): 16 | """SSH host configuration.""" 17 | 18 | address: str 19 | user: str = Field(default_factory=getpass.getuser) 20 | port: int = 22 21 | 22 | 23 | class Config(BaseModel): 24 | """Main configuration.""" 25 | 26 | compose_dir: Path = Path("/opt/compose") 27 | hosts: dict[str, Host] 28 | services: dict[str, str | list[str]] # service_name -> host_name or list of hosts 29 | traefik_file: Path | None = None # Auto-regenerate traefik config after up/down 30 | traefik_service: str | None = None # Service name for Traefik (skip its host in file-provider) 31 | config_path: Path = Path() # Set by load_config() 32 | 33 | def get_state_path(self) -> Path: 34 | """Get the state file path (stored alongside config).""" 35 | return self.config_path.parent / "compose-farm-state.yaml" 36 | 37 | @model_validator(mode="after") 38 | def validate_hosts_and_services(self) -> Config: 39 | """Validate host names and service configurations.""" 40 | # "all" is reserved keyword, cannot be used as host name 41 | if "all" in self.hosts: 42 | msg = "'all' is a reserved keyword and cannot be used as a host name" 43 | raise ValueError(msg) 44 | 45 | for service, host_value in self.services.items(): 46 | # Validate list configurations 47 | if isinstance(host_value, list): 48 | if not host_value: 49 | msg = f"Service '{service}' has empty host list" 50 | raise ValueError(msg) 51 | if len(host_value) != len(set(host_value)): 52 | msg = f"Service '{service}' has duplicate hosts in list" 53 | raise ValueError(msg) 54 | 55 | # Validate all referenced hosts exist 56 | host_names = self.get_hosts(service) 57 | for host_name in host_names: 58 | if host_name not in self.hosts: 59 | msg = f"Service '{service}' references unknown host '{host_name}'" 60 | raise ValueError(msg) 61 | return self 62 | 63 | def get_hosts(self, service: str) -> list[str]: 64 | """Get list of host names for a service. 65 | 66 | Supports: 67 | - Single host: "truenas-debian" -> ["truenas-debian"] 68 | - All hosts: "all" -> list of all configured hosts 69 | - Explicit list: ["host1", "host2"] -> ["host1", "host2"] 70 | """ 71 | if service not in self.services: 72 | msg = f"Unknown service: {service}" 73 | raise ValueError(msg) 74 | host_value = self.services[service] 75 | if isinstance(host_value, list): 76 | return host_value 77 | if host_value == "all": 78 | return list(self.hosts.keys()) 79 | return [host_value] 80 | 81 | def is_multi_host(self, service: str) -> bool: 82 | """Check if a service runs on multiple hosts.""" 83 | return len(self.get_hosts(service)) > 1 84 | 85 | def get_host(self, service: str) -> Host: 86 | """Get host config for a service (first host if multi-host).""" 87 | if service not in self.services: 88 | msg = f"Unknown service: {service}" 89 | raise ValueError(msg) 90 | host_names = self.get_hosts(service) 91 | return self.hosts[host_names[0]] 92 | 93 | def get_compose_path(self, service: str) -> Path: 94 | """Get compose file path for a service. 95 | 96 | Tries compose.yaml first, then docker-compose.yml. 97 | """ 98 | service_dir = self.compose_dir / service 99 | for filename in ( 100 | "compose.yaml", 101 | "compose.yml", 102 | "docker-compose.yml", 103 | "docker-compose.yaml", 104 | ): 105 | candidate = service_dir / filename 106 | if candidate.exists(): 107 | return candidate 108 | # Default to compose.yaml if none exist (will error later) 109 | return service_dir / "compose.yaml" 110 | 111 | def discover_compose_dirs(self) -> set[str]: 112 | """Find all directories in compose_dir that contain a compose file.""" 113 | compose_filenames = { 114 | "compose.yaml", 115 | "compose.yml", 116 | "docker-compose.yml", 117 | "docker-compose.yaml", 118 | } 119 | found: set[str] = set() 120 | if not self.compose_dir.exists(): 121 | return found 122 | for subdir in self.compose_dir.iterdir(): 123 | if subdir.is_dir(): 124 | for filename in compose_filenames: 125 | if (subdir / filename).exists(): 126 | found.add(subdir.name) 127 | break 128 | return found 129 | 130 | 131 | def _parse_hosts(raw_hosts: dict[str, str | dict[str, str | int]]) -> dict[str, Host]: 132 | """Parse hosts from config, handling both simple and full forms.""" 133 | hosts = {} 134 | for name, value in raw_hosts.items(): 135 | if isinstance(value, str): 136 | # Simple form: hostname: address 137 | hosts[name] = Host(address=value) 138 | else: 139 | # Full form: hostname: {address: ..., user: ..., port: ...} 140 | hosts[name] = Host(**value) 141 | return hosts 142 | 143 | 144 | def load_config(path: Path | None = None) -> Config: 145 | """Load configuration from YAML file. 146 | 147 | Search order: 148 | 1. Explicit path if provided via --config 149 | 2. CF_CONFIG environment variable 150 | 3. ./compose-farm.yaml 151 | 4. $XDG_CONFIG_HOME/compose-farm/compose-farm.yaml (defaults to ~/.config) 152 | """ 153 | search_paths = [ 154 | Path("compose-farm.yaml"), 155 | xdg_config_home() / "compose-farm" / "compose-farm.yaml", 156 | ] 157 | 158 | if path: 159 | config_path = path 160 | elif env_path := os.environ.get("CF_CONFIG"): 161 | config_path = Path(env_path) 162 | else: 163 | config_path = None 164 | for p in search_paths: 165 | if p.exists(): 166 | config_path = p 167 | break 168 | 169 | if config_path is None or not config_path.exists(): 170 | msg = f"Config file not found. Searched: {', '.join(str(p) for p in search_paths)}" 171 | raise FileNotFoundError(msg) 172 | 173 | if config_path.is_dir(): 174 | msg = ( 175 | f"Config path is a directory, not a file: {config_path}\n" 176 | "This often happens when Docker creates an empty directory for a missing mount." 177 | ) 178 | raise FileNotFoundError(msg) 179 | 180 | with config_path.open() as f: 181 | raw = yaml.safe_load(f) 182 | 183 | # Parse hosts with flexible format support 184 | raw["hosts"] = _parse_hosts(raw.get("hosts", {})) 185 | raw["config_path"] = config_path.resolve() 186 | 187 | return Config(**raw) 188 | -------------------------------------------------------------------------------- /src/compose_farm/cli/common.py: -------------------------------------------------------------------------------- 1 | """Shared CLI helpers, options, and utilities.""" 2 | 3 | from __future__ import annotations 4 | 5 | import asyncio 6 | import contextlib 7 | from pathlib import Path 8 | from typing import TYPE_CHECKING, Annotated, TypeVar 9 | 10 | import typer 11 | from rich.progress import ( 12 | BarColumn, 13 | MofNCompleteColumn, 14 | Progress, 15 | SpinnerColumn, 16 | TaskID, 17 | TextColumn, 18 | TimeElapsedColumn, 19 | ) 20 | 21 | from compose_farm.console import console, err_console 22 | 23 | if TYPE_CHECKING: 24 | from collections.abc import Callable, Coroutine, Generator 25 | 26 | from compose_farm.config import Config 27 | from compose_farm.executor import CommandResult 28 | 29 | _T = TypeVar("_T") 30 | 31 | 32 | # --- Shared CLI Options --- 33 | ServicesArg = Annotated[ 34 | list[str] | None, 35 | typer.Argument(help="Services to operate on"), 36 | ] 37 | AllOption = Annotated[ 38 | bool, 39 | typer.Option("--all", "-a", help="Run on all services"), 40 | ] 41 | ConfigOption = Annotated[ 42 | Path | None, 43 | typer.Option("--config", "-c", help="Path to config file"), 44 | ] 45 | LogPathOption = Annotated[ 46 | Path | None, 47 | typer.Option("--log-path", "-l", help="Path to Dockerfarm TOML log"), 48 | ] 49 | HostOption = Annotated[ 50 | str | None, 51 | typer.Option("--host", "-H", help="Filter to services on this host"), 52 | ] 53 | 54 | # --- Constants (internal) --- 55 | _MISSING_PATH_PREVIEW_LIMIT = 2 56 | _STATS_PREVIEW_LIMIT = 3 # Max number of pending migrations to show by name 57 | 58 | 59 | @contextlib.contextmanager 60 | def progress_bar(label: str, total: int) -> Generator[tuple[Progress, TaskID], None, None]: 61 | """Create a standardized progress bar with consistent styling. 62 | 63 | Yields (progress, task_id). Use progress.update(task_id, advance=1, description=...) 64 | to advance. 65 | """ 66 | with Progress( 67 | SpinnerColumn(), 68 | TextColumn(f"[bold blue]{label}[/]"), 69 | BarColumn(), 70 | MofNCompleteColumn(), 71 | TextColumn("•"), 72 | TimeElapsedColumn(), 73 | TextColumn("•"), 74 | TextColumn("[progress.description]{task.description}"), 75 | console=console, 76 | transient=True, 77 | ) as progress: 78 | task_id = progress.add_task("", total=total) 79 | yield progress, task_id 80 | 81 | 82 | def load_config_or_exit(config_path: Path | None) -> Config: 83 | """Load config or exit with a friendly error message.""" 84 | # Lazy import: pydantic adds ~50ms to startup, only load when actually needed 85 | from compose_farm.config import load_config # noqa: PLC0415 86 | 87 | try: 88 | return load_config(config_path) 89 | except FileNotFoundError as e: 90 | err_console.print(f"[red]✗[/] {e}") 91 | raise typer.Exit(1) from e 92 | 93 | 94 | def get_services( 95 | services: list[str], 96 | all_services: bool, 97 | config_path: Path | None, 98 | ) -> tuple[list[str], Config]: 99 | """Resolve service list and load config.""" 100 | config = load_config_or_exit(config_path) 101 | 102 | if all_services: 103 | return list(config.services.keys()), config 104 | if not services: 105 | err_console.print("[red]✗[/] Specify services or use --all") 106 | raise typer.Exit(1) 107 | return list(services), config 108 | 109 | 110 | def run_async(coro: Coroutine[None, None, _T]) -> _T: 111 | """Run async coroutine.""" 112 | try: 113 | return asyncio.run(coro) 114 | except KeyboardInterrupt: 115 | console.print("\n[yellow]Interrupted[/]") 116 | raise typer.Exit(130) from None # Standard exit code for SIGINT 117 | 118 | 119 | def report_results(results: list[CommandResult]) -> None: 120 | """Report command results and exit with appropriate code.""" 121 | succeeded = [r for r in results if r.success] 122 | failed = [r for r in results if not r.success] 123 | 124 | # Always print summary when there are multiple results 125 | if len(results) > 1: 126 | console.print() # Blank line before summary 127 | if failed: 128 | for r in failed: 129 | err_console.print( 130 | f"[red]✗[/] [cyan]{r.service}[/] failed with exit code {r.exit_code}" 131 | ) 132 | console.print() 133 | console.print( 134 | f"[green]✓[/] {len(succeeded)}/{len(results)} services succeeded, " 135 | f"[red]✗[/] {len(failed)} failed" 136 | ) 137 | else: 138 | console.print(f"[green]✓[/] All {len(results)} services succeeded") 139 | 140 | elif failed: 141 | # Single service failed 142 | r = failed[0] 143 | err_console.print(f"[red]✗[/] [cyan]{r.service}[/] failed with exit code {r.exit_code}") 144 | 145 | if failed: 146 | raise typer.Exit(1) 147 | 148 | 149 | def maybe_regenerate_traefik( 150 | cfg: Config, 151 | results: list[CommandResult] | None = None, 152 | ) -> None: 153 | """Regenerate traefik config if traefik_file is configured. 154 | 155 | If results are provided, skips regeneration if all services failed. 156 | """ 157 | if cfg.traefik_file is None: 158 | return 159 | 160 | # Skip if all services failed 161 | if results and not any(r.success for r in results): 162 | return 163 | 164 | # Lazy import: traefik/yaml adds startup time, only load when traefik_file is configured 165 | from compose_farm.traefik import ( # noqa: PLC0415 166 | generate_traefik_config, 167 | render_traefik_config, 168 | ) 169 | 170 | try: 171 | dynamic, warnings = generate_traefik_config(cfg, list(cfg.services.keys())) 172 | new_content = render_traefik_config(dynamic) 173 | 174 | # Check if content changed 175 | old_content = "" 176 | if cfg.traefik_file.exists(): 177 | old_content = cfg.traefik_file.read_text() 178 | 179 | if new_content != old_content: 180 | cfg.traefik_file.parent.mkdir(parents=True, exist_ok=True) 181 | cfg.traefik_file.write_text(new_content) 182 | console.print() # Ensure we're on a new line after streaming output 183 | console.print(f"[green]✓[/] Traefik config updated: {cfg.traefik_file}") 184 | 185 | for warning in warnings: 186 | err_console.print(f"[yellow]![/] {warning}") 187 | except (FileNotFoundError, ValueError) as exc: 188 | err_console.print(f"[yellow]![/] Failed to update traefik config: {exc}") 189 | 190 | 191 | def validate_host_for_service(cfg: Config, service: str, host: str) -> None: 192 | """Validate that a host is valid for a service.""" 193 | if host not in cfg.hosts: 194 | err_console.print(f"[red]✗[/] Host '{host}' not found in config") 195 | raise typer.Exit(1) 196 | allowed_hosts = cfg.get_hosts(service) 197 | if host not in allowed_hosts: 198 | err_console.print( 199 | f"[red]✗[/] Service '{service}' is not configured for host '{host}' " 200 | f"(configured: {', '.join(allowed_hosts)})" 201 | ) 202 | raise typer.Exit(1) 203 | 204 | 205 | def run_host_operation( 206 | cfg: Config, 207 | svc_list: list[str], 208 | host: str, 209 | command: str, 210 | action_verb: str, 211 | state_callback: Callable[[Config, str, str], None], 212 | ) -> None: 213 | """Run an operation on a specific host for multiple services.""" 214 | from compose_farm.executor import run_compose_on_host # noqa: PLC0415 215 | 216 | results: list[CommandResult] = [] 217 | for service in svc_list: 218 | validate_host_for_service(cfg, service, host) 219 | console.print(f"[cyan]\\[{service}][/] {action_verb} on [magenta]{host}[/]...") 220 | result = run_async(run_compose_on_host(cfg, service, host, command, raw=True)) 221 | print() # Newline after raw output 222 | results.append(result) 223 | if result.success: 224 | state_callback(cfg, service, host) 225 | maybe_regenerate_traefik(cfg, results) 226 | report_results(results) 227 | -------------------------------------------------------------------------------- /tests/test_cli_logs.py: -------------------------------------------------------------------------------- 1 | """Tests for CLI logs command.""" 2 | 3 | from collections.abc import Coroutine 4 | from pathlib import Path 5 | from typing import Any 6 | from unittest.mock import patch 7 | 8 | import pytest 9 | import typer 10 | 11 | from compose_farm.cli.monitoring import logs 12 | from compose_farm.config import Config, Host 13 | from compose_farm.executor import CommandResult 14 | 15 | 16 | def _make_config(tmp_path: Path) -> Config: 17 | """Create a minimal config for testing.""" 18 | compose_dir = tmp_path / "compose" 19 | compose_dir.mkdir() 20 | for svc in ("svc1", "svc2", "svc3"): 21 | svc_dir = compose_dir / svc 22 | svc_dir.mkdir() 23 | (svc_dir / "docker-compose.yml").write_text("services: {}\n") 24 | 25 | return Config( 26 | compose_dir=compose_dir, 27 | hosts={"local": Host(address="localhost"), "remote": Host(address="192.168.1.10")}, 28 | services={"svc1": "local", "svc2": "local", "svc3": "remote"}, 29 | ) 30 | 31 | 32 | def _make_result(service: str) -> CommandResult: 33 | """Create a successful command result.""" 34 | return CommandResult(service=service, exit_code=0, success=True, stdout="", stderr="") 35 | 36 | 37 | def _mock_run_async_factory( 38 | services: list[str], 39 | ) -> tuple[Any, list[CommandResult]]: 40 | """Create a mock run_async that returns results for given services.""" 41 | results = [_make_result(s) for s in services] 42 | 43 | def mock_run_async(_coro: Coroutine[Any, Any, Any]) -> list[CommandResult]: 44 | return results 45 | 46 | return mock_run_async, results 47 | 48 | 49 | class TestLogsContextualDefault: 50 | """Tests for logs --tail contextual default behavior.""" 51 | 52 | def test_logs_all_services_defaults_to_20(self, tmp_path: Path) -> None: 53 | """When --all is specified, default tail should be 20.""" 54 | cfg = _make_config(tmp_path) 55 | mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"]) 56 | 57 | with ( 58 | patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg), 59 | patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg), 60 | patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async), 61 | patch("compose_farm.cli.monitoring.run_on_services") as mock_run, 62 | ): 63 | mock_run.return_value = None 64 | 65 | logs(services=None, all_services=True, host=None, follow=False, tail=None, config=None) 66 | 67 | mock_run.assert_called_once() 68 | call_args = mock_run.call_args 69 | assert call_args[0][2] == "logs --tail 20" 70 | 71 | def test_logs_single_service_defaults_to_100(self, tmp_path: Path) -> None: 72 | """When specific services are specified, default tail should be 100.""" 73 | cfg = _make_config(tmp_path) 74 | mock_run_async, _ = _mock_run_async_factory(["svc1"]) 75 | 76 | with ( 77 | patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg), 78 | patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg), 79 | patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async), 80 | patch("compose_farm.cli.monitoring.run_on_services") as mock_run, 81 | ): 82 | logs( 83 | services=["svc1"], 84 | all_services=False, 85 | host=None, 86 | follow=False, 87 | tail=None, 88 | config=None, 89 | ) 90 | 91 | mock_run.assert_called_once() 92 | call_args = mock_run.call_args 93 | assert call_args[0][2] == "logs --tail 100" 94 | 95 | def test_logs_explicit_tail_overrides_default(self, tmp_path: Path) -> None: 96 | """When --tail is explicitly provided, it should override the default.""" 97 | cfg = _make_config(tmp_path) 98 | mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2", "svc3"]) 99 | 100 | with ( 101 | patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg), 102 | patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg), 103 | patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async), 104 | patch("compose_farm.cli.monitoring.run_on_services") as mock_run, 105 | ): 106 | logs( 107 | services=None, 108 | all_services=True, 109 | host=None, 110 | follow=False, 111 | tail=50, 112 | config=None, 113 | ) 114 | 115 | mock_run.assert_called_once() 116 | call_args = mock_run.call_args 117 | assert call_args[0][2] == "logs --tail 50" 118 | 119 | def test_logs_follow_appends_flag(self, tmp_path: Path) -> None: 120 | """When --follow is specified, -f should be appended to command.""" 121 | cfg = _make_config(tmp_path) 122 | mock_run_async, _ = _mock_run_async_factory(["svc1"]) 123 | 124 | with ( 125 | patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg), 126 | patch("compose_farm.cli.common.load_config_or_exit", return_value=cfg), 127 | patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async), 128 | patch("compose_farm.cli.monitoring.run_on_services") as mock_run, 129 | ): 130 | logs( 131 | services=["svc1"], 132 | all_services=False, 133 | host=None, 134 | follow=True, 135 | tail=None, 136 | config=None, 137 | ) 138 | 139 | mock_run.assert_called_once() 140 | call_args = mock_run.call_args 141 | assert call_args[0][2] == "logs --tail 100 -f" 142 | 143 | 144 | class TestLogsHostFilter: 145 | """Tests for logs --host filter behavior.""" 146 | 147 | def test_logs_host_filter_selects_services_on_host(self, tmp_path: Path) -> None: 148 | """When --host is specified, only services on that host are included.""" 149 | cfg = _make_config(tmp_path) 150 | mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"]) 151 | 152 | with ( 153 | patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg), 154 | patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async), 155 | patch("compose_farm.cli.monitoring.run_on_services") as mock_run, 156 | ): 157 | logs( 158 | services=None, 159 | all_services=False, 160 | host="local", 161 | follow=False, 162 | tail=None, 163 | config=None, 164 | ) 165 | 166 | mock_run.assert_called_once() 167 | call_args = mock_run.call_args 168 | # svc1 and svc2 are on "local", svc3 is on "remote" 169 | assert set(call_args[0][1]) == {"svc1", "svc2"} 170 | 171 | def test_logs_host_filter_defaults_to_20_lines(self, tmp_path: Path) -> None: 172 | """When --host is specified, default tail should be 20 (multiple services).""" 173 | cfg = _make_config(tmp_path) 174 | mock_run_async, _ = _mock_run_async_factory(["svc1", "svc2"]) 175 | 176 | with ( 177 | patch("compose_farm.cli.monitoring.load_config_or_exit", return_value=cfg), 178 | patch("compose_farm.cli.monitoring.run_async", side_effect=mock_run_async), 179 | patch("compose_farm.cli.monitoring.run_on_services") as mock_run, 180 | ): 181 | logs( 182 | services=None, 183 | all_services=False, 184 | host="local", 185 | follow=False, 186 | tail=None, 187 | config=None, 188 | ) 189 | 190 | mock_run.assert_called_once() 191 | call_args = mock_run.call_args 192 | assert call_args[0][2] == "logs --tail 20" 193 | 194 | def test_logs_all_and_host_mutually_exclusive(self) -> None: 195 | """Using --all and --host together should error.""" 196 | # No config mock needed - error is raised before config is loaded 197 | with pytest.raises(typer.Exit) as exc_info: 198 | logs( 199 | services=None, 200 | all_services=True, 201 | host="local", 202 | follow=False, 203 | tail=None, 204 | config=None, 205 | ) 206 | 207 | assert exc_info.value.exit_code == 1 208 | -------------------------------------------------------------------------------- /src/compose_farm/cli/monitoring.py: -------------------------------------------------------------------------------- 1 | """Monitoring commands: logs, ps, stats.""" 2 | 3 | from __future__ import annotations 4 | 5 | import asyncio 6 | import contextlib 7 | from typing import TYPE_CHECKING, Annotated 8 | 9 | import typer 10 | from rich.progress import Progress, TaskID # noqa: TC002 11 | from rich.table import Table 12 | 13 | from compose_farm.cli.app import app 14 | from compose_farm.cli.common import ( 15 | _STATS_PREVIEW_LIMIT, 16 | AllOption, 17 | ConfigOption, 18 | HostOption, 19 | ServicesArg, 20 | get_services, 21 | load_config_or_exit, 22 | progress_bar, 23 | report_results, 24 | run_async, 25 | ) 26 | from compose_farm.console import console, err_console 27 | from compose_farm.executor import run_command, run_on_services 28 | from compose_farm.state import get_services_needing_migration, load_state 29 | 30 | if TYPE_CHECKING: 31 | from collections.abc import Mapping 32 | 33 | from compose_farm.config import Config 34 | 35 | 36 | def _group_services_by_host( 37 | services: dict[str, str | list[str]], 38 | hosts: Mapping[str, object], 39 | all_hosts: list[str] | None = None, 40 | ) -> dict[str, list[str]]: 41 | """Group services by their assigned host(s). 42 | 43 | For multi-host services (list or "all"), the service appears in multiple host lists. 44 | """ 45 | by_host: dict[str, list[str]] = {h: [] for h in hosts} 46 | for service, host_value in services.items(): 47 | if isinstance(host_value, list): 48 | # Explicit list of hosts 49 | for host_name in host_value: 50 | if host_name in by_host: 51 | by_host[host_name].append(service) 52 | elif host_value == "all" and all_hosts: 53 | # "all" keyword - add to all hosts 54 | for host_name in all_hosts: 55 | if host_name in by_host: 56 | by_host[host_name].append(service) 57 | elif host_value in by_host: 58 | # Single host 59 | by_host[host_value].append(service) 60 | return by_host 61 | 62 | 63 | def _get_container_counts(cfg: Config) -> dict[str, int]: 64 | """Get container counts from all hosts with a progress bar.""" 65 | 66 | async def get_count(host_name: str) -> tuple[str, int]: 67 | host = cfg.hosts[host_name] 68 | result = await run_command(host, "docker ps -q | wc -l", host_name, stream=False) 69 | count = 0 70 | if result.success: 71 | with contextlib.suppress(ValueError): 72 | count = int(result.stdout.strip()) 73 | return host_name, count 74 | 75 | async def gather_with_progress(progress: Progress, task_id: TaskID) -> dict[str, int]: 76 | hosts = list(cfg.hosts.keys()) 77 | tasks = [asyncio.create_task(get_count(h)) for h in hosts] 78 | results: dict[str, int] = {} 79 | for coro in asyncio.as_completed(tasks): 80 | host_name, count = await coro 81 | results[host_name] = count 82 | progress.update(task_id, advance=1, description=f"[cyan]{host_name}[/]") 83 | return results 84 | 85 | with progress_bar("Querying hosts", len(cfg.hosts)) as (progress, task_id): 86 | return asyncio.run(gather_with_progress(progress, task_id)) 87 | 88 | 89 | def _build_host_table( 90 | cfg: Config, 91 | services_by_host: dict[str, list[str]], 92 | running_by_host: dict[str, list[str]], 93 | container_counts: dict[str, int], 94 | *, 95 | show_containers: bool, 96 | ) -> Table: 97 | """Build the hosts table.""" 98 | table = Table(title="Hosts", show_header=True, header_style="bold cyan") 99 | table.add_column("Host", style="magenta") 100 | table.add_column("Address") 101 | table.add_column("Configured", justify="right") 102 | table.add_column("Running", justify="right") 103 | if show_containers: 104 | table.add_column("Containers", justify="right") 105 | 106 | for host_name in sorted(cfg.hosts.keys()): 107 | host = cfg.hosts[host_name] 108 | configured = len(services_by_host[host_name]) 109 | running = len(running_by_host[host_name]) 110 | 111 | row = [ 112 | host_name, 113 | host.address, 114 | str(configured), 115 | str(running) if running > 0 else "[dim]0[/]", 116 | ] 117 | if show_containers: 118 | count = container_counts.get(host_name, 0) 119 | row.append(str(count) if count > 0 else "[dim]0[/]") 120 | 121 | table.add_row(*row) 122 | return table 123 | 124 | 125 | def _build_summary_table( 126 | cfg: Config, state: dict[str, str | list[str]], pending: list[str] 127 | ) -> Table: 128 | """Build the summary table.""" 129 | on_disk = cfg.discover_compose_dirs() 130 | 131 | table = Table(title="Summary", show_header=False) 132 | table.add_column("Label", style="dim") 133 | table.add_column("Value", style="bold") 134 | 135 | table.add_row("Total hosts", str(len(cfg.hosts))) 136 | table.add_row("Services (configured)", str(len(cfg.services))) 137 | table.add_row("Services (tracked)", str(len(state))) 138 | table.add_row("Compose files on disk", str(len(on_disk))) 139 | 140 | if pending: 141 | preview = ", ".join(pending[:_STATS_PREVIEW_LIMIT]) 142 | suffix = "..." if len(pending) > _STATS_PREVIEW_LIMIT else "" 143 | table.add_row("Pending migrations", f"[yellow]{len(pending)}[/] ({preview}{suffix})") 144 | else: 145 | table.add_row("Pending migrations", "[green]0[/]") 146 | 147 | return table 148 | 149 | 150 | # --- Command functions --- 151 | 152 | 153 | @app.command(rich_help_panel="Monitoring") 154 | def logs( 155 | services: ServicesArg = None, 156 | all_services: AllOption = False, 157 | host: HostOption = None, 158 | follow: Annotated[bool, typer.Option("--follow", "-f", help="Follow logs")] = False, 159 | tail: Annotated[ 160 | int | None, 161 | typer.Option("--tail", "-n", help="Number of lines (default: 20 for --all, 100 otherwise)"), 162 | ] = None, 163 | config: ConfigOption = None, 164 | ) -> None: 165 | """Show service logs.""" 166 | if all_services and host is not None: 167 | err_console.print("[red]✗[/] Cannot use --all and --host together") 168 | raise typer.Exit(1) 169 | 170 | cfg = load_config_or_exit(config) 171 | 172 | # Determine service list based on options 173 | if host is not None: 174 | if host not in cfg.hosts: 175 | err_console.print(f"[red]✗[/] Host '{host}' not found in config") 176 | raise typer.Exit(1) 177 | # Include services where host is in the list of configured hosts 178 | svc_list = [s for s in cfg.services if host in cfg.get_hosts(s)] 179 | if not svc_list: 180 | err_console.print(f"[yellow]![/] No services configured for host '{host}'") 181 | return 182 | else: 183 | svc_list, cfg = get_services(services or [], all_services, config) 184 | 185 | # Default to fewer lines when showing multiple services 186 | many_services = all_services or host is not None or len(svc_list) > 1 187 | effective_tail = tail if tail is not None else (20 if many_services else 100) 188 | cmd = f"logs --tail {effective_tail}" 189 | if follow: 190 | cmd += " -f" 191 | results = run_async(run_on_services(cfg, svc_list, cmd)) 192 | report_results(results) 193 | 194 | 195 | @app.command(rich_help_panel="Monitoring") 196 | def ps( 197 | config: ConfigOption = None, 198 | ) -> None: 199 | """Show status of all services.""" 200 | cfg = load_config_or_exit(config) 201 | results = run_async(run_on_services(cfg, list(cfg.services.keys()), "ps")) 202 | report_results(results) 203 | 204 | 205 | @app.command(rich_help_panel="Monitoring") 206 | def stats( 207 | live: Annotated[ 208 | bool, 209 | typer.Option("--live", "-l", help="Query Docker for live container stats"), 210 | ] = False, 211 | config: ConfigOption = None, 212 | ) -> None: 213 | """Show overview statistics for hosts and services. 214 | 215 | Without --live: Shows config/state info (hosts, services, pending migrations). 216 | With --live: Also queries Docker on each host for container counts. 217 | """ 218 | cfg = load_config_or_exit(config) 219 | state = load_state(cfg) 220 | pending = get_services_needing_migration(cfg) 221 | 222 | all_hosts = list(cfg.hosts.keys()) 223 | services_by_host = _group_services_by_host(cfg.services, cfg.hosts, all_hosts) 224 | running_by_host = _group_services_by_host(state, cfg.hosts, all_hosts) 225 | 226 | container_counts: dict[str, int] = {} 227 | if live: 228 | container_counts = _get_container_counts(cfg) 229 | 230 | host_table = _build_host_table( 231 | cfg, services_by_host, running_by_host, container_counts, show_containers=live 232 | ) 233 | console.print(host_table) 234 | 235 | console.print() 236 | console.print(_build_summary_table(cfg, state, pending)) 237 | -------------------------------------------------------------------------------- /tests/test_executor.py: -------------------------------------------------------------------------------- 1 | """Tests for executor module.""" 2 | 3 | import sys 4 | from pathlib import Path 5 | 6 | import pytest 7 | 8 | from compose_farm.config import Config, Host 9 | from compose_farm.executor import ( 10 | CommandResult, 11 | _run_local_command, 12 | check_networks_exist, 13 | check_paths_exist, 14 | is_local, 15 | run_command, 16 | run_compose, 17 | run_on_services, 18 | ) 19 | 20 | # These tests run actual shell commands that only work on Linux 21 | linux_only = pytest.mark.skipif(sys.platform != "linux", reason="Linux-only shell commands") 22 | 23 | 24 | class TestIsLocal: 25 | """Tests for is_local function.""" 26 | 27 | @pytest.mark.parametrize( 28 | "address", 29 | ["local", "localhost", "127.0.0.1", "::1", "LOCAL", "LOCALHOST"], 30 | ) 31 | def test_local_addresses(self, address: str) -> None: 32 | host = Host(address=address) 33 | assert is_local(host) is True 34 | 35 | @pytest.mark.parametrize( 36 | "address", 37 | ["192.168.1.10", "nas01.local", "10.0.0.1", "example.com"], 38 | ) 39 | def test_remote_addresses(self, address: str) -> None: 40 | host = Host(address=address) 41 | assert is_local(host) is False 42 | 43 | 44 | class TestRunLocalCommand: 45 | """Tests for local command execution.""" 46 | 47 | async def test_run_local_command_success(self) -> None: 48 | result = await _run_local_command("echo hello", "test-service") 49 | assert result.success is True 50 | assert result.exit_code == 0 51 | assert result.service == "test-service" 52 | 53 | async def test_run_local_command_failure(self) -> None: 54 | result = await _run_local_command("exit 1", "test-service") 55 | assert result.success is False 56 | assert result.exit_code == 1 57 | 58 | async def test_run_local_command_not_found(self) -> None: 59 | result = await _run_local_command("nonexistent_command_xyz", "test-service") 60 | assert result.success is False 61 | assert result.exit_code != 0 62 | 63 | async def test_run_local_command_captures_output(self) -> None: 64 | result = await _run_local_command("echo hello", "test-service", stream=False) 65 | assert "hello" in result.stdout 66 | 67 | 68 | class TestRunCommand: 69 | """Tests for run_command dispatcher.""" 70 | 71 | async def test_run_command_local(self) -> None: 72 | host = Host(address="localhost") 73 | result = await run_command(host, "echo test", "test-service") 74 | assert result.success is True 75 | 76 | async def test_run_command_result_structure(self) -> None: 77 | host = Host(address="local") 78 | result = await run_command(host, "true", "my-service") 79 | assert isinstance(result, CommandResult) 80 | assert result.service == "my-service" 81 | assert result.exit_code == 0 82 | assert result.success is True 83 | 84 | 85 | class TestRunCompose: 86 | """Tests for compose command execution.""" 87 | 88 | async def test_run_compose_builds_correct_command(self, tmp_path: Path) -> None: 89 | # Create a minimal compose file 90 | compose_dir = tmp_path / "compose" 91 | service_dir = compose_dir / "test-service" 92 | service_dir.mkdir(parents=True) 93 | compose_file = service_dir / "docker-compose.yml" 94 | compose_file.write_text("services: {}") 95 | 96 | config = Config( 97 | compose_dir=compose_dir, 98 | hosts={"local": Host(address="localhost")}, 99 | services={"test-service": "local"}, 100 | ) 101 | 102 | # This will fail because docker compose isn't running, 103 | # but we can verify the command structure works 104 | result = await run_compose(config, "test-service", "config", stream=False) 105 | # Command may fail due to no docker, but structure is correct 106 | assert result.service == "test-service" 107 | 108 | 109 | class TestRunOnServices: 110 | """Tests for parallel service execution.""" 111 | 112 | async def test_run_on_services_parallel(self) -> None: 113 | config = Config( 114 | compose_dir=Path("/tmp"), 115 | hosts={"local": Host(address="localhost")}, 116 | services={"svc1": "local", "svc2": "local"}, 117 | ) 118 | 119 | # Use a simple command that will work without docker 120 | # We'll test the parallelism structure 121 | results = await run_on_services(config, ["svc1", "svc2"], "version", stream=False) 122 | assert len(results) == 2 123 | assert results[0].service == "svc1" 124 | assert results[1].service == "svc2" 125 | 126 | 127 | @linux_only 128 | class TestCheckPathsExist: 129 | """Tests for check_paths_exist function (uses 'test -e' shell command).""" 130 | 131 | async def test_check_existing_paths(self, tmp_path: Path) -> None: 132 | """Check paths that exist.""" 133 | config = Config( 134 | compose_dir=tmp_path, 135 | hosts={"local": Host(address="localhost")}, 136 | services={}, 137 | ) 138 | # Create test paths 139 | (tmp_path / "dir1").mkdir() 140 | (tmp_path / "file1").touch() 141 | 142 | result = await check_paths_exist( 143 | config, "local", [str(tmp_path / "dir1"), str(tmp_path / "file1")] 144 | ) 145 | 146 | assert result[str(tmp_path / "dir1")] is True 147 | assert result[str(tmp_path / "file1")] is True 148 | 149 | async def test_check_missing_paths(self, tmp_path: Path) -> None: 150 | """Check paths that don't exist.""" 151 | config = Config( 152 | compose_dir=tmp_path, 153 | hosts={"local": Host(address="localhost")}, 154 | services={}, 155 | ) 156 | 157 | result = await check_paths_exist( 158 | config, "local", [str(tmp_path / "missing1"), str(tmp_path / "missing2")] 159 | ) 160 | 161 | assert result[str(tmp_path / "missing1")] is False 162 | assert result[str(tmp_path / "missing2")] is False 163 | 164 | async def test_check_mixed_paths(self, tmp_path: Path) -> None: 165 | """Check mix of existing and missing paths.""" 166 | config = Config( 167 | compose_dir=tmp_path, 168 | hosts={"local": Host(address="localhost")}, 169 | services={}, 170 | ) 171 | (tmp_path / "exists").mkdir() 172 | 173 | result = await check_paths_exist( 174 | config, "local", [str(tmp_path / "exists"), str(tmp_path / "missing")] 175 | ) 176 | 177 | assert result[str(tmp_path / "exists")] is True 178 | assert result[str(tmp_path / "missing")] is False 179 | 180 | async def test_check_empty_paths(self, tmp_path: Path) -> None: 181 | """Empty path list returns empty dict.""" 182 | config = Config( 183 | compose_dir=tmp_path, 184 | hosts={"local": Host(address="localhost")}, 185 | services={}, 186 | ) 187 | 188 | result = await check_paths_exist(config, "local", []) 189 | assert result == {} 190 | 191 | 192 | @linux_only 193 | class TestCheckNetworksExist: 194 | """Tests for check_networks_exist function (requires Docker).""" 195 | 196 | async def test_check_bridge_network_exists(self, tmp_path: Path) -> None: 197 | """The 'bridge' network always exists on Docker hosts.""" 198 | config = Config( 199 | compose_dir=tmp_path, 200 | hosts={"local": Host(address="localhost")}, 201 | services={}, 202 | ) 203 | 204 | result = await check_networks_exist(config, "local", ["bridge"]) 205 | assert result["bridge"] is True 206 | 207 | async def test_check_nonexistent_network(self, tmp_path: Path) -> None: 208 | """Check a network that doesn't exist.""" 209 | config = Config( 210 | compose_dir=tmp_path, 211 | hosts={"local": Host(address="localhost")}, 212 | services={}, 213 | ) 214 | 215 | result = await check_networks_exist(config, "local", ["nonexistent_network_xyz_123"]) 216 | assert result["nonexistent_network_xyz_123"] is False 217 | 218 | async def test_check_mixed_networks(self, tmp_path: Path) -> None: 219 | """Check mix of existing and non-existing networks.""" 220 | config = Config( 221 | compose_dir=tmp_path, 222 | hosts={"local": Host(address="localhost")}, 223 | services={}, 224 | ) 225 | 226 | result = await check_networks_exist( 227 | config, "local", ["bridge", "nonexistent_network_xyz_123"] 228 | ) 229 | assert result["bridge"] is True 230 | assert result["nonexistent_network_xyz_123"] is False 231 | 232 | async def test_check_empty_networks(self, tmp_path: Path) -> None: 233 | """Empty network list returns empty dict.""" 234 | config = Config( 235 | compose_dir=tmp_path, 236 | hosts={"local": Host(address="localhost")}, 237 | services={}, 238 | ) 239 | 240 | result = await check_networks_exist(config, "local", []) 241 | assert result == {} 242 | -------------------------------------------------------------------------------- /tests/test_config_cmd.py: -------------------------------------------------------------------------------- 1 | """Tests for config command module.""" 2 | 3 | from pathlib import Path 4 | from typing import Any 5 | 6 | import pytest 7 | import yaml 8 | from typer.testing import CliRunner 9 | 10 | from compose_farm.cli import app 11 | from compose_farm.cli.config import ( 12 | _generate_template, 13 | _get_config_file, 14 | _get_editor, 15 | ) 16 | 17 | 18 | @pytest.fixture 19 | def runner() -> CliRunner: 20 | return CliRunner() 21 | 22 | 23 | @pytest.fixture 24 | def valid_config_data() -> dict[str, Any]: 25 | return { 26 | "compose_dir": "/opt/compose", 27 | "hosts": {"server1": "192.168.1.10"}, 28 | "services": {"nginx": "server1"}, 29 | } 30 | 31 | 32 | class TestGetEditor: 33 | """Tests for _get_editor function.""" 34 | 35 | def test_uses_editor_env(self, monkeypatch: pytest.MonkeyPatch) -> None: 36 | monkeypatch.setenv("EDITOR", "code") 37 | monkeypatch.delenv("VISUAL", raising=False) 38 | assert _get_editor() == "code" 39 | 40 | def test_uses_visual_env(self, monkeypatch: pytest.MonkeyPatch) -> None: 41 | monkeypatch.delenv("EDITOR", raising=False) 42 | monkeypatch.setenv("VISUAL", "subl") 43 | assert _get_editor() == "subl" 44 | 45 | def test_editor_takes_precedence(self, monkeypatch: pytest.MonkeyPatch) -> None: 46 | monkeypatch.setenv("EDITOR", "vim") 47 | monkeypatch.setenv("VISUAL", "code") 48 | assert _get_editor() == "vim" 49 | 50 | 51 | class TestGetConfigFile: 52 | """Tests for _get_config_file function.""" 53 | 54 | def test_explicit_path(self, tmp_path: Path) -> None: 55 | config_file = tmp_path / "my-config.yaml" 56 | config_file.touch() 57 | result = _get_config_file(config_file) 58 | assert result == config_file.resolve() 59 | 60 | def test_cf_config_env(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: 61 | config_file = tmp_path / "env-config.yaml" 62 | config_file.touch() 63 | monkeypatch.setenv("CF_CONFIG", str(config_file)) 64 | result = _get_config_file(None) 65 | assert result == config_file.resolve() 66 | 67 | def test_returns_none_when_not_found( 68 | self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 69 | ) -> None: 70 | monkeypatch.chdir(tmp_path) 71 | monkeypatch.delenv("CF_CONFIG", raising=False) 72 | # Set XDG_CONFIG_HOME to a nonexistent path - config_search_paths() will 73 | # now return paths that don't exist 74 | monkeypatch.setenv("XDG_CONFIG_HOME", str(tmp_path / "nonexistent")) 75 | result = _get_config_file(None) 76 | assert result is None 77 | 78 | 79 | class TestGenerateTemplate: 80 | """Tests for _generate_template function.""" 81 | 82 | def test_generates_valid_yaml(self) -> None: 83 | template = _generate_template() 84 | # Should be valid YAML 85 | data = yaml.safe_load(template) 86 | assert "compose_dir" in data 87 | assert "hosts" in data 88 | assert "services" in data 89 | 90 | def test_has_documentation_comments(self) -> None: 91 | template = _generate_template() 92 | assert "# Compose Farm configuration" in template 93 | assert "hosts:" in template 94 | assert "services:" in template 95 | 96 | 97 | class TestConfigInit: 98 | """Tests for cf config init command.""" 99 | 100 | def test_init_creates_file( 101 | self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 102 | ) -> None: 103 | monkeypatch.delenv("CF_CONFIG", raising=False) 104 | config_file = tmp_path / "new-config.yaml" 105 | result = runner.invoke(app, ["config", "init", "-p", str(config_file)]) 106 | assert result.exit_code == 0 107 | assert config_file.exists() 108 | assert "Config file created" in result.stdout 109 | 110 | def test_init_force_overwrites( 111 | self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 112 | ) -> None: 113 | monkeypatch.delenv("CF_CONFIG", raising=False) 114 | config_file = tmp_path / "existing.yaml" 115 | config_file.write_text("old content") 116 | result = runner.invoke(app, ["config", "init", "-p", str(config_file), "-f"]) 117 | assert result.exit_code == 0 118 | content = config_file.read_text() 119 | assert "old content" not in content 120 | assert "compose_dir" in content 121 | 122 | def test_init_prompts_on_existing( 123 | self, runner: CliRunner, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 124 | ) -> None: 125 | monkeypatch.delenv("CF_CONFIG", raising=False) 126 | config_file = tmp_path / "existing.yaml" 127 | config_file.write_text("old content") 128 | result = runner.invoke(app, ["config", "init", "-p", str(config_file)], input="n\n") 129 | assert result.exit_code == 0 130 | assert "Aborted" in result.stdout 131 | assert config_file.read_text() == "old content" 132 | 133 | 134 | class TestConfigPath: 135 | """Tests for cf config path command.""" 136 | 137 | def test_path_shows_config( 138 | self, 139 | runner: CliRunner, 140 | tmp_path: Path, 141 | valid_config_data: dict[str, Any], 142 | monkeypatch: pytest.MonkeyPatch, 143 | ) -> None: 144 | monkeypatch.chdir(tmp_path) 145 | monkeypatch.delenv("CF_CONFIG", raising=False) 146 | config_file = tmp_path / "compose-farm.yaml" 147 | config_file.write_text(yaml.dump(valid_config_data)) 148 | result = runner.invoke(app, ["config", "path"]) 149 | assert result.exit_code == 0 150 | assert str(config_file) in result.stdout 151 | 152 | def test_path_with_explicit_path(self, runner: CliRunner, tmp_path: Path) -> None: 153 | # When explicitly provided, path is returned even if file doesn't exist 154 | nonexistent = tmp_path / "nonexistent.yaml" 155 | result = runner.invoke(app, ["config", "path", "-p", str(nonexistent)]) 156 | assert result.exit_code == 0 157 | assert str(nonexistent) in result.stdout 158 | 159 | 160 | class TestConfigShow: 161 | """Tests for cf config show command.""" 162 | 163 | def test_show_displays_content( 164 | self, 165 | runner: CliRunner, 166 | tmp_path: Path, 167 | valid_config_data: dict[str, Any], 168 | monkeypatch: pytest.MonkeyPatch, 169 | ) -> None: 170 | monkeypatch.chdir(tmp_path) 171 | monkeypatch.delenv("CF_CONFIG", raising=False) 172 | config_file = tmp_path / "compose-farm.yaml" 173 | config_file.write_text(yaml.dump(valid_config_data)) 174 | result = runner.invoke(app, ["config", "show"]) 175 | assert result.exit_code == 0 176 | assert "Config file:" in result.stdout 177 | 178 | def test_show_raw_output( 179 | self, 180 | runner: CliRunner, 181 | tmp_path: Path, 182 | valid_config_data: dict[str, Any], 183 | monkeypatch: pytest.MonkeyPatch, 184 | ) -> None: 185 | monkeypatch.chdir(tmp_path) 186 | monkeypatch.delenv("CF_CONFIG", raising=False) 187 | config_file = tmp_path / "compose-farm.yaml" 188 | content = yaml.dump(valid_config_data) 189 | config_file.write_text(content) 190 | result = runner.invoke(app, ["config", "show", "-r"]) 191 | assert result.exit_code == 0 192 | assert content in result.stdout 193 | 194 | 195 | class TestConfigValidate: 196 | """Tests for cf config validate command.""" 197 | 198 | def test_validate_valid_config( 199 | self, 200 | runner: CliRunner, 201 | tmp_path: Path, 202 | valid_config_data: dict[str, Any], 203 | monkeypatch: pytest.MonkeyPatch, 204 | ) -> None: 205 | monkeypatch.chdir(tmp_path) 206 | monkeypatch.delenv("CF_CONFIG", raising=False) 207 | config_file = tmp_path / "compose-farm.yaml" 208 | config_file.write_text(yaml.dump(valid_config_data)) 209 | result = runner.invoke(app, ["config", "validate"]) 210 | assert result.exit_code == 0 211 | assert "Valid config" in result.stdout 212 | assert "Hosts: 1" in result.stdout 213 | assert "Services: 1" in result.stdout 214 | 215 | def test_validate_invalid_config(self, runner: CliRunner, tmp_path: Path) -> None: 216 | config_file = tmp_path / "invalid.yaml" 217 | config_file.write_text("invalid: [yaml: content") 218 | result = runner.invoke(app, ["config", "validate", "-p", str(config_file)]) 219 | assert result.exit_code == 1 220 | # Error goes to stderr (captured in output when using CliRunner) 221 | output = result.stdout + (result.stderr or "") 222 | assert "Invalid config" in output or "✗" in output 223 | 224 | def test_validate_missing_config(self, runner: CliRunner, tmp_path: Path) -> None: 225 | nonexistent = tmp_path / "nonexistent.yaml" 226 | result = runner.invoke(app, ["config", "validate", "-p", str(nonexistent)]) 227 | assert result.exit_code == 1 228 | # Error goes to stderr 229 | output = result.stdout + (result.stderr or "") 230 | assert "Config file not found" in output or "not found" in output.lower() 231 | -------------------------------------------------------------------------------- /tests/test_state.py: -------------------------------------------------------------------------------- 1 | """Tests for state module.""" 2 | 3 | from pathlib import Path 4 | 5 | import pytest 6 | 7 | from compose_farm.config import Config, Host 8 | from compose_farm.state import ( 9 | get_orphaned_services, 10 | get_service_host, 11 | get_services_not_in_state, 12 | load_state, 13 | remove_service, 14 | save_state, 15 | set_service_host, 16 | ) 17 | 18 | 19 | @pytest.fixture 20 | def config(tmp_path: Path) -> Config: 21 | """Create a config with a temporary config path for state storage.""" 22 | config_path = tmp_path / "compose-farm.yaml" 23 | config_path.write_text("") # Create empty file 24 | return Config( 25 | compose_dir=tmp_path / "compose", 26 | hosts={"nas01": Host(address="192.168.1.10")}, 27 | services={"plex": "nas01"}, 28 | config_path=config_path, 29 | ) 30 | 31 | 32 | class TestLoadState: 33 | """Tests for load_state function.""" 34 | 35 | def test_load_state_empty(self, config: Config) -> None: 36 | """Returns empty dict when state file doesn't exist.""" 37 | result = load_state(config) 38 | assert result == {} 39 | 40 | def test_load_state_with_data(self, config: Config) -> None: 41 | """Loads existing state from file.""" 42 | state_file = config.get_state_path() 43 | state_file.write_text("deployed:\n plex: nas01\n jellyfin: nas02\n") 44 | 45 | result = load_state(config) 46 | assert result == {"plex": "nas01", "jellyfin": "nas02"} 47 | 48 | def test_load_state_empty_file(self, config: Config) -> None: 49 | """Returns empty dict for empty file.""" 50 | state_file = config.get_state_path() 51 | state_file.write_text("") 52 | 53 | result = load_state(config) 54 | assert result == {} 55 | 56 | 57 | class TestSaveState: 58 | """Tests for save_state function.""" 59 | 60 | def test_save_state(self, config: Config) -> None: 61 | """Saves state to file.""" 62 | save_state(config, {"plex": "nas01", "jellyfin": "nas02"}) 63 | 64 | state_file = config.get_state_path() 65 | assert state_file.exists() 66 | content = state_file.read_text() 67 | assert "plex: nas01" in content 68 | assert "jellyfin: nas02" in content 69 | 70 | 71 | class TestGetServiceHost: 72 | """Tests for get_service_host function.""" 73 | 74 | def test_get_existing_service(self, config: Config) -> None: 75 | """Returns host for existing service.""" 76 | state_file = config.get_state_path() 77 | state_file.write_text("deployed:\n plex: nas01\n") 78 | 79 | host = get_service_host(config, "plex") 80 | assert host == "nas01" 81 | 82 | def test_get_nonexistent_service(self, config: Config) -> None: 83 | """Returns None for service not in state.""" 84 | state_file = config.get_state_path() 85 | state_file.write_text("deployed:\n plex: nas01\n") 86 | 87 | host = get_service_host(config, "unknown") 88 | assert host is None 89 | 90 | 91 | class TestSetServiceHost: 92 | """Tests for set_service_host function.""" 93 | 94 | def test_set_new_service(self, config: Config) -> None: 95 | """Adds new service to state.""" 96 | set_service_host(config, "plex", "nas01") 97 | 98 | result = load_state(config) 99 | assert result["plex"] == "nas01" 100 | 101 | def test_update_existing_service(self, config: Config) -> None: 102 | """Updates host for existing service.""" 103 | state_file = config.get_state_path() 104 | state_file.write_text("deployed:\n plex: nas01\n") 105 | 106 | set_service_host(config, "plex", "nas02") 107 | 108 | result = load_state(config) 109 | assert result["plex"] == "nas02" 110 | 111 | 112 | class TestRemoveService: 113 | """Tests for remove_service function.""" 114 | 115 | def test_remove_existing_service(self, config: Config) -> None: 116 | """Removes service from state.""" 117 | state_file = config.get_state_path() 118 | state_file.write_text("deployed:\n plex: nas01\n jellyfin: nas02\n") 119 | 120 | remove_service(config, "plex") 121 | 122 | result = load_state(config) 123 | assert "plex" not in result 124 | assert result["jellyfin"] == "nas02" 125 | 126 | def test_remove_nonexistent_service(self, config: Config) -> None: 127 | """Removing nonexistent service doesn't error.""" 128 | state_file = config.get_state_path() 129 | state_file.write_text("deployed:\n plex: nas01\n") 130 | 131 | remove_service(config, "unknown") # Should not raise 132 | 133 | result = load_state(config) 134 | assert result["plex"] == "nas01" 135 | 136 | 137 | class TestGetOrphanedServices: 138 | """Tests for get_orphaned_services function.""" 139 | 140 | def test_no_orphans(self, config: Config) -> None: 141 | """Returns empty dict when all services in state are in config.""" 142 | state_file = config.get_state_path() 143 | state_file.write_text("deployed:\n plex: nas01\n") 144 | 145 | result = get_orphaned_services(config) 146 | assert result == {} 147 | 148 | def test_finds_orphaned_service(self, config: Config) -> None: 149 | """Returns services in state but not in config.""" 150 | state_file = config.get_state_path() 151 | state_file.write_text("deployed:\n plex: nas01\n jellyfin: nas02\n") 152 | 153 | result = get_orphaned_services(config) 154 | # plex is in config, jellyfin is not 155 | assert result == {"jellyfin": "nas02"} 156 | 157 | def test_finds_orphaned_multi_host_service(self, config: Config) -> None: 158 | """Returns multi-host orphaned services with host list.""" 159 | state_file = config.get_state_path() 160 | state_file.write_text("deployed:\n plex: nas01\n dozzle:\n - nas01\n - nas02\n") 161 | 162 | result = get_orphaned_services(config) 163 | assert result == {"dozzle": ["nas01", "nas02"]} 164 | 165 | def test_empty_state(self, config: Config) -> None: 166 | """Returns empty dict when state is empty.""" 167 | result = get_orphaned_services(config) 168 | assert result == {} 169 | 170 | def test_all_orphaned(self, tmp_path: Path) -> None: 171 | """Returns all services when none are in config.""" 172 | config_path = tmp_path / "compose-farm.yaml" 173 | config_path.write_text("") 174 | cfg = Config( 175 | compose_dir=tmp_path / "compose", 176 | hosts={"nas01": Host(address="192.168.1.10")}, 177 | services={}, # No services in config 178 | config_path=config_path, 179 | ) 180 | state_file = cfg.get_state_path() 181 | state_file.write_text("deployed:\n plex: nas01\n jellyfin: nas02\n") 182 | 183 | result = get_orphaned_services(cfg) 184 | assert result == {"plex": "nas01", "jellyfin": "nas02"} 185 | 186 | 187 | class TestGetServicesNotInState: 188 | """Tests for get_services_not_in_state function.""" 189 | 190 | def test_all_in_state(self, config: Config) -> None: 191 | """Returns empty list when all services are in state.""" 192 | state_file = config.get_state_path() 193 | state_file.write_text("deployed:\n plex: nas01\n") 194 | 195 | result = get_services_not_in_state(config) 196 | assert result == [] 197 | 198 | def test_finds_missing_service(self, tmp_path: Path) -> None: 199 | """Returns services in config but not in state.""" 200 | config_path = tmp_path / "compose-farm.yaml" 201 | config_path.write_text("") 202 | cfg = Config( 203 | compose_dir=tmp_path / "compose", 204 | hosts={"nas01": Host(address="192.168.1.10")}, 205 | services={"plex": "nas01", "jellyfin": "nas01"}, 206 | config_path=config_path, 207 | ) 208 | state_file = cfg.get_state_path() 209 | state_file.write_text("deployed:\n plex: nas01\n") 210 | 211 | result = get_services_not_in_state(cfg) 212 | assert result == ["jellyfin"] 213 | 214 | def test_empty_state(self, tmp_path: Path) -> None: 215 | """Returns all services when state is empty.""" 216 | config_path = tmp_path / "compose-farm.yaml" 217 | config_path.write_text("") 218 | cfg = Config( 219 | compose_dir=tmp_path / "compose", 220 | hosts={"nas01": Host(address="192.168.1.10")}, 221 | services={"plex": "nas01", "jellyfin": "nas01"}, 222 | config_path=config_path, 223 | ) 224 | 225 | result = get_services_not_in_state(cfg) 226 | assert set(result) == {"plex", "jellyfin"} 227 | 228 | def test_empty_config(self, config: Config) -> None: 229 | """Returns empty list when config has no services.""" 230 | # config fixture has plex: nas01, but we need empty config 231 | config_path = config.config_path 232 | config_path.write_text("") 233 | cfg = Config( 234 | compose_dir=config.compose_dir, 235 | hosts={"nas01": Host(address="192.168.1.10")}, 236 | services={}, 237 | config_path=config_path, 238 | ) 239 | 240 | result = get_services_not_in_state(cfg) 241 | assert result == [] 242 | -------------------------------------------------------------------------------- /src/compose_farm/cli/lifecycle.py: -------------------------------------------------------------------------------- 1 | """Lifecycle commands: up, down, pull, restart, update, apply.""" 2 | 3 | from __future__ import annotations 4 | 5 | from typing import TYPE_CHECKING, Annotated 6 | 7 | import typer 8 | 9 | if TYPE_CHECKING: 10 | from compose_farm.config import Config 11 | 12 | from compose_farm.cli.app import app 13 | from compose_farm.cli.common import ( 14 | AllOption, 15 | ConfigOption, 16 | HostOption, 17 | ServicesArg, 18 | get_services, 19 | load_config_or_exit, 20 | maybe_regenerate_traefik, 21 | report_results, 22 | run_async, 23 | run_host_operation, 24 | ) 25 | from compose_farm.console import console, err_console 26 | from compose_farm.executor import run_on_services, run_sequential_on_services 27 | from compose_farm.operations import stop_orphaned_services, up_services 28 | from compose_farm.state import ( 29 | add_service_to_host, 30 | get_orphaned_services, 31 | get_service_host, 32 | get_services_needing_migration, 33 | get_services_not_in_state, 34 | remove_service, 35 | remove_service_from_host, 36 | ) 37 | 38 | 39 | @app.command(rich_help_panel="Lifecycle") 40 | def up( 41 | services: ServicesArg = None, 42 | all_services: AllOption = False, 43 | host: HostOption = None, 44 | config: ConfigOption = None, 45 | ) -> None: 46 | """Start services (docker compose up -d). Auto-migrates if host changed.""" 47 | svc_list, cfg = get_services(services or [], all_services, config) 48 | 49 | # Per-host operation: run on specific host only 50 | if host: 51 | run_host_operation(cfg, svc_list, host, "up -d", "Starting", add_service_to_host) 52 | return 53 | 54 | # Normal operation: use up_services with migration logic 55 | results = run_async(up_services(cfg, svc_list, raw=True)) 56 | maybe_regenerate_traefik(cfg, results) 57 | report_results(results) 58 | 59 | 60 | @app.command(rich_help_panel="Lifecycle") 61 | def down( 62 | services: ServicesArg = None, 63 | all_services: AllOption = False, 64 | orphaned: Annotated[ 65 | bool, 66 | typer.Option( 67 | "--orphaned", help="Stop orphaned services (in state but removed from config)" 68 | ), 69 | ] = False, 70 | host: HostOption = None, 71 | config: ConfigOption = None, 72 | ) -> None: 73 | """Stop services (docker compose down).""" 74 | # Handle --orphaned flag 75 | if orphaned: 76 | if services or all_services or host: 77 | err_console.print("[red]✗[/] Cannot use --orphaned with services, --all, or --host") 78 | raise typer.Exit(1) 79 | 80 | cfg = load_config_or_exit(config) 81 | orphaned_services = get_orphaned_services(cfg) 82 | 83 | if not orphaned_services: 84 | console.print("[green]✓[/] No orphaned services to stop") 85 | return 86 | 87 | console.print( 88 | f"[yellow]Stopping {len(orphaned_services)} orphaned service(s):[/] " 89 | f"{', '.join(orphaned_services.keys())}" 90 | ) 91 | results = run_async(stop_orphaned_services(cfg)) 92 | report_results(results) 93 | return 94 | 95 | svc_list, cfg = get_services(services or [], all_services, config) 96 | 97 | # Per-host operation: run on specific host only 98 | if host: 99 | run_host_operation(cfg, svc_list, host, "down", "Stopping", remove_service_from_host) 100 | return 101 | 102 | # Normal operation 103 | raw = len(svc_list) == 1 104 | results = run_async(run_on_services(cfg, svc_list, "down", raw=raw)) 105 | 106 | # Remove from state on success 107 | # For multi-host services, result.service is "svc@host", extract base name 108 | removed_services: set[str] = set() 109 | for result in results: 110 | if result.success: 111 | base_service = result.service.split("@")[0] 112 | if base_service not in removed_services: 113 | remove_service(cfg, base_service) 114 | removed_services.add(base_service) 115 | 116 | maybe_regenerate_traefik(cfg, results) 117 | report_results(results) 118 | 119 | 120 | @app.command(rich_help_panel="Lifecycle") 121 | def pull( 122 | services: ServicesArg = None, 123 | all_services: AllOption = False, 124 | config: ConfigOption = None, 125 | ) -> None: 126 | """Pull latest images (docker compose pull).""" 127 | svc_list, cfg = get_services(services or [], all_services, config) 128 | raw = len(svc_list) == 1 129 | results = run_async(run_on_services(cfg, svc_list, "pull", raw=raw)) 130 | report_results(results) 131 | 132 | 133 | @app.command(rich_help_panel="Lifecycle") 134 | def restart( 135 | services: ServicesArg = None, 136 | all_services: AllOption = False, 137 | config: ConfigOption = None, 138 | ) -> None: 139 | """Restart services (down + up).""" 140 | svc_list, cfg = get_services(services or [], all_services, config) 141 | raw = len(svc_list) == 1 142 | results = run_async(run_sequential_on_services(cfg, svc_list, ["down", "up -d"], raw=raw)) 143 | maybe_regenerate_traefik(cfg, results) 144 | report_results(results) 145 | 146 | 147 | @app.command(rich_help_panel="Lifecycle") 148 | def update( 149 | services: ServicesArg = None, 150 | all_services: AllOption = False, 151 | config: ConfigOption = None, 152 | ) -> None: 153 | """Update services (pull + build + down + up).""" 154 | svc_list, cfg = get_services(services or [], all_services, config) 155 | raw = len(svc_list) == 1 156 | results = run_async( 157 | run_sequential_on_services( 158 | cfg, svc_list, ["pull --ignore-buildable", "build", "down", "up -d"], raw=raw 159 | ) 160 | ) 161 | maybe_regenerate_traefik(cfg, results) 162 | report_results(results) 163 | 164 | 165 | def _format_host(host: str | list[str]) -> str: 166 | """Format a host value for display.""" 167 | if isinstance(host, list): 168 | return ", ".join(host) 169 | return host 170 | 171 | 172 | def _report_pending_migrations(cfg: Config, migrations: list[str]) -> None: 173 | """Report services that need migration.""" 174 | console.print(f"[cyan]Services to migrate ({len(migrations)}):[/]") 175 | for svc in migrations: 176 | current = get_service_host(cfg, svc) 177 | target = cfg.get_hosts(svc)[0] 178 | console.print(f" [cyan]{svc}[/]: [magenta]{current}[/] → [magenta]{target}[/]") 179 | 180 | 181 | def _report_pending_orphans(orphaned: dict[str, str | list[str]]) -> None: 182 | """Report orphaned services that will be stopped.""" 183 | console.print(f"[yellow]Orphaned services to stop ({len(orphaned)}):[/]") 184 | for svc, hosts in orphaned.items(): 185 | console.print(f" [cyan]{svc}[/] on [magenta]{_format_host(hosts)}[/]") 186 | 187 | 188 | def _report_pending_starts(cfg: Config, missing: list[str]) -> None: 189 | """Report services that will be started.""" 190 | console.print(f"[green]Services to start ({len(missing)}):[/]") 191 | for svc in missing: 192 | target = _format_host(cfg.get_hosts(svc)) 193 | console.print(f" [cyan]{svc}[/] on [magenta]{target}[/]") 194 | 195 | 196 | def _report_pending_refresh(cfg: Config, to_refresh: list[str]) -> None: 197 | """Report services that will be refreshed.""" 198 | console.print(f"[blue]Services to refresh ({len(to_refresh)}):[/]") 199 | for svc in to_refresh: 200 | target = _format_host(cfg.get_hosts(svc)) 201 | console.print(f" [cyan]{svc}[/] on [magenta]{target}[/]") 202 | 203 | 204 | @app.command(rich_help_panel="Lifecycle") 205 | def apply( 206 | dry_run: Annotated[ 207 | bool, 208 | typer.Option("--dry-run", "-n", help="Show what would change without executing"), 209 | ] = False, 210 | no_orphans: Annotated[ 211 | bool, 212 | typer.Option("--no-orphans", help="Only migrate, don't stop orphaned services"), 213 | ] = False, 214 | full: Annotated[ 215 | bool, 216 | typer.Option("--full", "-f", help="Also run up on all services to apply config changes"), 217 | ] = False, 218 | config: ConfigOption = None, 219 | ) -> None: 220 | """Make reality match config (start, migrate, stop as needed). 221 | 222 | This is the "reconcile" command that ensures running services match your 223 | config file. It will: 224 | 225 | 1. Stop orphaned services (in state but removed from config) 226 | 2. Migrate services on wrong host (host in state ≠ host in config) 227 | 3. Start missing services (in config but not in state) 228 | 229 | Use --dry-run to preview changes before applying. 230 | Use --no-orphans to only migrate/start without stopping orphaned services. 231 | Use --full to also run 'up' on all services (picks up compose/env changes). 232 | """ 233 | cfg = load_config_or_exit(config) 234 | orphaned = get_orphaned_services(cfg) 235 | migrations = get_services_needing_migration(cfg) 236 | missing = get_services_not_in_state(cfg) 237 | 238 | # For --full: refresh all services not already being started/migrated 239 | handled = set(migrations) | set(missing) 240 | to_refresh = [svc for svc in cfg.services if svc not in handled] if full else [] 241 | 242 | has_orphans = bool(orphaned) and not no_orphans 243 | has_migrations = bool(migrations) 244 | has_missing = bool(missing) 245 | has_refresh = bool(to_refresh) 246 | 247 | if not has_orphans and not has_migrations and not has_missing and not has_refresh: 248 | console.print("[green]✓[/] Nothing to apply - reality matches config") 249 | return 250 | 251 | # Report what will be done 252 | if has_orphans: 253 | _report_pending_orphans(orphaned) 254 | if has_migrations: 255 | _report_pending_migrations(cfg, migrations) 256 | if has_missing: 257 | _report_pending_starts(cfg, missing) 258 | if has_refresh: 259 | _report_pending_refresh(cfg, to_refresh) 260 | 261 | if dry_run: 262 | console.print("\n[dim](dry-run: no changes made)[/]") 263 | return 264 | 265 | # Execute changes 266 | console.print() 267 | all_results = [] 268 | 269 | # 1. Stop orphaned services first 270 | if has_orphans: 271 | console.print("[yellow]Stopping orphaned services...[/]") 272 | all_results.extend(run_async(stop_orphaned_services(cfg))) 273 | 274 | # 2. Migrate services on wrong host 275 | if has_migrations: 276 | console.print("[cyan]Migrating services...[/]") 277 | migrate_results = run_async(up_services(cfg, migrations, raw=True)) 278 | all_results.extend(migrate_results) 279 | maybe_regenerate_traefik(cfg, migrate_results) 280 | 281 | # 3. Start missing services (reuse up_services which handles state updates) 282 | if has_missing: 283 | console.print("[green]Starting missing services...[/]") 284 | start_results = run_async(up_services(cfg, missing, raw=True)) 285 | all_results.extend(start_results) 286 | maybe_regenerate_traefik(cfg, start_results) 287 | 288 | # 4. Refresh remaining services (--full: run up to apply config changes) 289 | if has_refresh: 290 | console.print("[blue]Refreshing services...[/]") 291 | refresh_results = run_async(up_services(cfg, to_refresh, raw=True)) 292 | all_results.extend(refresh_results) 293 | maybe_regenerate_traefik(cfg, refresh_results) 294 | 295 | report_results(all_results) 296 | 297 | 298 | # Alias: cf a = cf apply 299 | app.command("a", hidden=True)(apply) 300 | -------------------------------------------------------------------------------- /src/compose_farm/cli/config.py: -------------------------------------------------------------------------------- 1 | """Configuration management commands for compose-farm.""" 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | import platform 7 | import shlex 8 | import shutil 9 | import subprocess 10 | from importlib import resources 11 | from pathlib import Path 12 | from typing import Annotated 13 | 14 | import typer 15 | 16 | from compose_farm.cli.app import app 17 | from compose_farm.console import console, err_console 18 | from compose_farm.paths import config_search_paths, default_config_path 19 | 20 | config_app = typer.Typer( 21 | name="config", 22 | help="Manage compose-farm configuration files.", 23 | no_args_is_help=True, 24 | ) 25 | 26 | 27 | # --- CLI Options (same pattern as cli.py) --- 28 | _PathOption = Annotated[ 29 | Path | None, 30 | typer.Option("--path", "-p", help="Path to config file. Uses auto-detection if not specified."), 31 | ] 32 | _ForceOption = Annotated[ 33 | bool, 34 | typer.Option("--force", "-f", help="Overwrite existing config without confirmation."), 35 | ] 36 | _RawOption = Annotated[ 37 | bool, 38 | typer.Option("--raw", "-r", help="Output raw file contents (for copy-paste)."), 39 | ] 40 | 41 | 42 | def _get_editor() -> str: 43 | """Get the user's preferred editor. 44 | 45 | Checks $EDITOR, then $VISUAL, then falls back to platform defaults. 46 | """ 47 | for env_var in ("EDITOR", "VISUAL"): 48 | editor = os.environ.get(env_var) 49 | if editor: 50 | return editor 51 | 52 | if platform.system() == "Windows": 53 | return "notepad" 54 | 55 | # Try common editors on Unix-like systems 56 | for editor in ("nano", "vim", "vi"): 57 | if shutil.which(editor): 58 | return editor 59 | 60 | return "vi" 61 | 62 | 63 | def _generate_template() -> str: 64 | """Generate a config template with documented schema.""" 65 | try: 66 | template_file = resources.files("compose_farm") / "example-config.yaml" 67 | return template_file.read_text(encoding="utf-8") 68 | except FileNotFoundError as e: 69 | err_console.print("[red]Example config template is missing from the package.[/red]") 70 | err_console.print("Reinstall compose-farm or report this issue.") 71 | raise typer.Exit(1) from e 72 | 73 | 74 | def _get_config_file(path: Path | None) -> Path | None: 75 | """Resolve config path, or auto-detect from standard locations.""" 76 | if path: 77 | return path.expanduser().resolve() 78 | 79 | # Check environment variable 80 | if env_path := os.environ.get("CF_CONFIG"): 81 | p = Path(env_path) 82 | if p.exists(): 83 | return p.resolve() 84 | 85 | # Check standard locations 86 | for p in config_search_paths(): 87 | if p.exists(): 88 | return p.resolve() 89 | 90 | return None 91 | 92 | 93 | @config_app.command("init") 94 | def config_init( 95 | path: _PathOption = None, 96 | force: _ForceOption = False, 97 | ) -> None: 98 | """Create a new config file with documented example. 99 | 100 | The generated config file serves as a template showing all available 101 | options with explanatory comments. 102 | """ 103 | target_path = (path.expanduser().resolve() if path else None) or default_config_path() 104 | 105 | if target_path.exists() and not force: 106 | console.print( 107 | f"[bold yellow]Config file already exists at:[/bold yellow] [cyan]{target_path}[/cyan]", 108 | ) 109 | if not typer.confirm("Overwrite existing config file?"): 110 | console.print("[dim]Aborted.[/dim]") 111 | raise typer.Exit(0) 112 | 113 | # Create parent directories 114 | target_path.parent.mkdir(parents=True, exist_ok=True) 115 | 116 | # Generate and write template 117 | template_content = _generate_template() 118 | target_path.write_text(template_content, encoding="utf-8") 119 | 120 | console.print(f"[green]✓[/] Config file created at: {target_path}") 121 | console.print("\n[dim]Edit the file to customize your settings:[/dim]") 122 | console.print(" [cyan]cf config edit[/cyan]") 123 | 124 | 125 | @config_app.command("edit") 126 | def config_edit( 127 | path: _PathOption = None, 128 | ) -> None: 129 | """Open the config file in your default editor. 130 | 131 | The editor is determined by: $EDITOR > $VISUAL > platform default. 132 | """ 133 | config_file = _get_config_file(path) 134 | 135 | if config_file is None: 136 | console.print("[yellow]No config file found.[/yellow]") 137 | console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.") 138 | console.print("\nSearched locations:") 139 | for p in config_search_paths(): 140 | console.print(f" - {p}") 141 | raise typer.Exit(1) 142 | 143 | if not config_file.exists(): 144 | console.print("[yellow]Config file not found.[/yellow]") 145 | console.print(f"\nProvided path does not exist: [cyan]{config_file}[/cyan]") 146 | console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.") 147 | raise typer.Exit(1) 148 | 149 | editor = _get_editor() 150 | console.print(f"[dim]Opening {config_file} with {editor}...[/dim]") 151 | 152 | try: 153 | editor_cmd = shlex.split(editor, posix=os.name != "nt") 154 | except ValueError as e: 155 | err_console.print("[red]Invalid editor command. Check $EDITOR/$VISUAL.[/red]") 156 | raise typer.Exit(1) from e 157 | 158 | if not editor_cmd: 159 | err_console.print("[red]Editor command is empty.[/red]") 160 | raise typer.Exit(1) 161 | 162 | try: 163 | subprocess.run([*editor_cmd, str(config_file)], check=True) 164 | except FileNotFoundError: 165 | err_console.print(f"[red]Editor '{editor_cmd[0]}' not found.[/red]") 166 | err_console.print("Set $EDITOR environment variable to your preferred editor.") 167 | raise typer.Exit(1) from None 168 | except subprocess.CalledProcessError as e: 169 | err_console.print(f"[red]Editor exited with error code {e.returncode}[/red]") 170 | raise typer.Exit(e.returncode) from None 171 | 172 | 173 | @config_app.command("show") 174 | def config_show( 175 | path: _PathOption = None, 176 | raw: _RawOption = False, 177 | ) -> None: 178 | """Display the config file location and contents.""" 179 | config_file = _get_config_file(path) 180 | 181 | if config_file is None: 182 | console.print("[yellow]No config file found.[/yellow]") 183 | console.print("\nSearched locations:") 184 | for p in config_search_paths(): 185 | status = "[green]exists[/green]" if p.exists() else "[dim]not found[/dim]" 186 | console.print(f" - {p} ({status})") 187 | console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.") 188 | raise typer.Exit(0) 189 | 190 | if not config_file.exists(): 191 | console.print("[yellow]Config file not found.[/yellow]") 192 | console.print(f"\nProvided path does not exist: [cyan]{config_file}[/cyan]") 193 | console.print("\nRun [bold cyan]cf config init[/bold cyan] to create one.") 194 | raise typer.Exit(1) 195 | 196 | content = config_file.read_text(encoding="utf-8") 197 | 198 | if raw: 199 | print(content, end="") 200 | return 201 | 202 | from rich.syntax import Syntax # noqa: PLC0415 203 | 204 | console.print(f"[bold green]Config file:[/bold green] [cyan]{config_file}[/cyan]") 205 | console.print() 206 | syntax = Syntax(content, "yaml", theme="monokai", line_numbers=True, word_wrap=True) 207 | console.print(syntax) 208 | console.print() 209 | console.print("[dim]Tip: Use -r for copy-paste friendly output[/dim]") 210 | 211 | 212 | @config_app.command("path") 213 | def config_path( 214 | path: _PathOption = None, 215 | ) -> None: 216 | """Print the config file path (useful for scripting).""" 217 | config_file = _get_config_file(path) 218 | 219 | if config_file is None: 220 | console.print("[yellow]No config file found.[/yellow]") 221 | console.print("\nSearched locations:") 222 | for p in config_search_paths(): 223 | status = "[green]exists[/green]" if p.exists() else "[dim]not found[/dim]" 224 | console.print(f" - {p} ({status})") 225 | raise typer.Exit(1) 226 | 227 | # Just print the path for easy piping 228 | print(config_file) 229 | 230 | 231 | @config_app.command("validate") 232 | def config_validate( 233 | path: _PathOption = None, 234 | ) -> None: 235 | """Validate the config file syntax and schema.""" 236 | config_file = _get_config_file(path) 237 | 238 | if config_file is None: 239 | err_console.print("[red]✗[/] No config file found") 240 | raise typer.Exit(1) 241 | 242 | # Lazy import: pydantic adds ~50ms to startup, only load when actually needed 243 | from compose_farm.config import load_config # noqa: PLC0415 244 | 245 | try: 246 | cfg = load_config(config_file) 247 | except FileNotFoundError as e: 248 | err_console.print(f"[red]✗[/] {e}") 249 | raise typer.Exit(1) from e 250 | except Exception as e: 251 | err_console.print(f"[red]✗[/] Invalid config: {e}") 252 | raise typer.Exit(1) from e 253 | 254 | console.print(f"[green]✓[/] Valid config: {config_file}") 255 | console.print(f" Hosts: {len(cfg.hosts)}") 256 | console.print(f" Services: {len(cfg.services)}") 257 | 258 | 259 | @config_app.command("symlink") 260 | def config_symlink( 261 | target: Annotated[ 262 | Path | None, 263 | typer.Argument(help="Config file to link to. Defaults to ./compose-farm.yaml"), 264 | ] = None, 265 | force: _ForceOption = False, 266 | ) -> None: 267 | """Create a symlink from the default config location to a config file. 268 | 269 | This makes a local config file discoverable globally without copying. 270 | Always uses absolute paths to avoid broken symlinks. 271 | 272 | Examples: 273 | cf config symlink # Link to ./compose-farm.yaml 274 | cf config symlink /opt/compose/config.yaml # Link to specific file 275 | 276 | """ 277 | # Default to compose-farm.yaml in current directory 278 | target_path = (target or Path("compose-farm.yaml")).expanduser().resolve() 279 | 280 | if not target_path.exists(): 281 | err_console.print(f"[red]✗[/] Target config file not found: {target_path}") 282 | raise typer.Exit(1) 283 | 284 | if not target_path.is_file(): 285 | err_console.print(f"[red]✗[/] Target is not a file: {target_path}") 286 | raise typer.Exit(1) 287 | 288 | symlink_path = default_config_path() 289 | 290 | # Check if symlink location already exists 291 | if symlink_path.exists() or symlink_path.is_symlink(): 292 | if symlink_path.is_symlink(): 293 | current_target = symlink_path.resolve() if symlink_path.exists() else None 294 | if current_target == target_path: 295 | console.print(f"[green]✓[/] Symlink already points to: {target_path}") 296 | return 297 | # Update existing symlink 298 | if not force: 299 | existing = symlink_path.readlink() 300 | console.print(f"[yellow]Symlink exists:[/] {symlink_path} -> {existing}") 301 | if not typer.confirm(f"Update to point to {target_path}?"): 302 | console.print("[dim]Aborted.[/dim]") 303 | raise typer.Exit(0) 304 | symlink_path.unlink() 305 | else: 306 | # Regular file exists 307 | err_console.print(f"[red]✗[/] A regular file exists at: {symlink_path}") 308 | err_console.print(" Back it up or remove it first, then retry.") 309 | raise typer.Exit(1) 310 | 311 | # Create parent directories 312 | symlink_path.parent.mkdir(parents=True, exist_ok=True) 313 | 314 | # Create symlink with absolute path 315 | symlink_path.symlink_to(target_path) 316 | 317 | console.print("[green]✓[/] Created symlink:") 318 | console.print(f" {symlink_path}") 319 | console.print(f" -> {target_path}") 320 | 321 | 322 | # Register config subcommand on the shared app 323 | app.add_typer(config_app, name="config", rich_help_panel="Configuration") 324 | -------------------------------------------------------------------------------- /src/compose_farm/compose.py: -------------------------------------------------------------------------------- 1 | """Compose file parsing utilities. 2 | 3 | Handles .env loading, variable interpolation, port/volume/network extraction. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import os 9 | import re 10 | from dataclasses import dataclass 11 | from typing import TYPE_CHECKING, Any 12 | 13 | import yaml 14 | 15 | if TYPE_CHECKING: 16 | from pathlib import Path 17 | 18 | from .config import Config 19 | 20 | # Port parsing constants 21 | _SINGLE_PART = 1 22 | _PUBLISHED_TARGET_PARTS = 2 23 | _HOST_PUBLISHED_PARTS = 3 24 | _MIN_VOLUME_PARTS = 2 25 | 26 | _VAR_PATTERN = re.compile(r"\$\{([A-Za-z_][A-Za-z0-9_]*)(?::-(.*?))?\}") 27 | 28 | 29 | @dataclass(frozen=True) 30 | class PortMapping: 31 | """Port mapping for a compose service.""" 32 | 33 | target: int 34 | published: int | None 35 | 36 | 37 | def _load_env(compose_path: Path) -> dict[str, str]: 38 | """Load environment variables for compose interpolation. 39 | 40 | Reads from .env file in the same directory as compose file, 41 | then overlays current environment variables. 42 | """ 43 | env: dict[str, str] = {} 44 | env_path = compose_path.parent / ".env" 45 | if env_path.exists(): 46 | for line in env_path.read_text().splitlines(): 47 | stripped = line.strip() 48 | if not stripped or stripped.startswith("#") or "=" not in stripped: 49 | continue 50 | key, value = stripped.split("=", 1) 51 | key = key.strip() 52 | value = value.strip() 53 | if (value.startswith('"') and value.endswith('"')) or ( 54 | value.startswith("'") and value.endswith("'") 55 | ): 56 | value = value[1:-1] 57 | env[key] = value 58 | env.update({k: v for k, v in os.environ.items() if isinstance(v, str)}) 59 | return env 60 | 61 | 62 | def _interpolate(value: str, env: dict[str, str]) -> str: 63 | """Perform ${VAR} and ${VAR:-default} interpolation.""" 64 | 65 | def replace(match: re.Match[str]) -> str: 66 | var = match.group(1) 67 | default = match.group(2) 68 | resolved = env.get(var) 69 | if resolved: 70 | return resolved 71 | return default or "" 72 | 73 | return _VAR_PATTERN.sub(replace, value) 74 | 75 | 76 | def _parse_ports(raw: Any, env: dict[str, str]) -> list[PortMapping]: # noqa: PLR0912 77 | """Parse port specifications from compose file. 78 | 79 | Handles string formats like "8080", "8080:80", "0.0.0.0:8080:80", 80 | and dict formats with target/published keys. 81 | """ 82 | if raw is None: 83 | return [] 84 | mappings: list[PortMapping] = [] 85 | 86 | items = raw if isinstance(raw, list) else [raw] 87 | 88 | for item in items: 89 | if isinstance(item, str): 90 | interpolated = _interpolate(item, env) 91 | port_spec, _, _ = interpolated.partition("/") 92 | parts = port_spec.split(":") 93 | published: int | None = None 94 | target: int | None = None 95 | 96 | if len(parts) == _SINGLE_PART and parts[0].isdigit(): 97 | target = int(parts[0]) 98 | elif ( 99 | len(parts) == _PUBLISHED_TARGET_PARTS and parts[0].isdigit() and parts[1].isdigit() 100 | ): 101 | published = int(parts[0]) 102 | target = int(parts[1]) 103 | elif ( 104 | len(parts) == _HOST_PUBLISHED_PARTS and parts[-2].isdigit() and parts[-1].isdigit() 105 | ): 106 | published = int(parts[-2]) 107 | target = int(parts[-1]) 108 | 109 | if target is not None: 110 | mappings.append(PortMapping(target=target, published=published)) 111 | elif isinstance(item, dict): 112 | target_raw = item.get("target") 113 | if isinstance(target_raw, str): 114 | target_raw = _interpolate(target_raw, env) 115 | if target_raw is None: 116 | continue 117 | try: 118 | target_val = int(str(target_raw)) 119 | except (TypeError, ValueError): 120 | continue 121 | 122 | published_raw = item.get("published") 123 | if isinstance(published_raw, str): 124 | published_raw = _interpolate(published_raw, env) 125 | published_val: int | None 126 | try: 127 | published_val = int(str(published_raw)) if published_raw is not None else None 128 | except (TypeError, ValueError): 129 | published_val = None 130 | mappings.append(PortMapping(target=target_val, published=published_val)) 131 | 132 | return mappings 133 | 134 | 135 | def _resolve_host_path(host_path: str, compose_dir: Path) -> str | None: 136 | """Resolve a host path from volume mount, returning None for named volumes.""" 137 | if host_path.startswith("/"): 138 | return host_path 139 | if host_path.startswith(("./", "../")): 140 | return str((compose_dir / host_path).resolve()) 141 | return None # Named volume 142 | 143 | 144 | def _parse_volume_item( 145 | item: str | dict[str, Any], 146 | env: dict[str, str], 147 | compose_dir: Path, 148 | ) -> str | None: 149 | """Parse a single volume item and return host path if it's a bind mount.""" 150 | if isinstance(item, str): 151 | interpolated = _interpolate(item, env) 152 | parts = interpolated.split(":") 153 | if len(parts) >= _MIN_VOLUME_PARTS: 154 | return _resolve_host_path(parts[0], compose_dir) 155 | elif isinstance(item, dict) and item.get("type") == "bind": 156 | source = item.get("source") 157 | if source: 158 | interpolated = _interpolate(str(source), env) 159 | return _resolve_host_path(interpolated, compose_dir) 160 | return None 161 | 162 | 163 | def parse_host_volumes(config: Config, service: str) -> list[str]: 164 | """Extract host bind mount paths from a service's compose file. 165 | 166 | Returns a list of absolute host paths used as volume mounts. 167 | Skips named volumes and resolves relative paths. 168 | """ 169 | compose_path = config.get_compose_path(service) 170 | if not compose_path.exists(): 171 | return [] 172 | 173 | env = _load_env(compose_path) 174 | compose_data = yaml.safe_load(compose_path.read_text()) or {} 175 | raw_services = compose_data.get("services", {}) 176 | if not isinstance(raw_services, dict): 177 | return [] 178 | 179 | paths: list[str] = [] 180 | compose_dir = compose_path.parent 181 | 182 | for definition in raw_services.values(): 183 | if not isinstance(definition, dict): 184 | continue 185 | 186 | volumes = definition.get("volumes") 187 | if not volumes: 188 | continue 189 | 190 | items = volumes if isinstance(volumes, list) else [volumes] 191 | for item in items: 192 | host_path = _parse_volume_item(item, env, compose_dir) 193 | if host_path: 194 | paths.append(host_path) 195 | 196 | # Return unique paths, preserving order 197 | seen: set[str] = set() 198 | unique: list[str] = [] 199 | for p in paths: 200 | if p not in seen: 201 | seen.add(p) 202 | unique.append(p) 203 | return unique 204 | 205 | 206 | def parse_devices(config: Config, service: str) -> list[str]: 207 | """Extract host device paths from a service's compose file. 208 | 209 | Returns a list of host device paths (e.g., /dev/dri, /dev/dri/renderD128). 210 | """ 211 | compose_path = config.get_compose_path(service) 212 | if not compose_path.exists(): 213 | return [] 214 | 215 | env = _load_env(compose_path) 216 | compose_data = yaml.safe_load(compose_path.read_text()) or {} 217 | raw_services = compose_data.get("services", {}) 218 | if not isinstance(raw_services, dict): 219 | return [] 220 | 221 | devices: list[str] = [] 222 | for definition in raw_services.values(): 223 | if not isinstance(definition, dict): 224 | continue 225 | 226 | device_list = definition.get("devices") 227 | if not device_list or not isinstance(device_list, list): 228 | continue 229 | 230 | for item in device_list: 231 | if not isinstance(item, str): 232 | continue 233 | interpolated = _interpolate(item, env) 234 | # Format: host_path:container_path[:options] 235 | parts = interpolated.split(":") 236 | if parts: 237 | host_path = parts[0] 238 | if host_path.startswith("/dev/"): 239 | devices.append(host_path) 240 | 241 | # Return unique devices, preserving order 242 | seen: set[str] = set() 243 | unique: list[str] = [] 244 | for d in devices: 245 | if d not in seen: 246 | seen.add(d) 247 | unique.append(d) 248 | return unique 249 | 250 | 251 | def parse_external_networks(config: Config, service: str) -> list[str]: 252 | """Extract external network names from a service's compose file. 253 | 254 | Returns a list of network names marked as external: true. 255 | """ 256 | compose_path = config.get_compose_path(service) 257 | if not compose_path.exists(): 258 | return [] 259 | 260 | compose_data = yaml.safe_load(compose_path.read_text()) or {} 261 | networks = compose_data.get("networks", {}) 262 | if not isinstance(networks, dict): 263 | return [] 264 | 265 | external_networks: list[str] = [] 266 | for name, definition in networks.items(): 267 | if isinstance(definition, dict) and definition.get("external") is True: 268 | external_networks.append(name) 269 | 270 | return external_networks 271 | 272 | 273 | def load_compose_services( 274 | config: Config, 275 | stack: str, 276 | ) -> tuple[dict[str, Any], dict[str, str], str]: 277 | """Load services from a compose file with environment interpolation. 278 | 279 | Returns (services_dict, env_dict, host_address). 280 | """ 281 | compose_path = config.get_compose_path(stack) 282 | if not compose_path.exists(): 283 | message = f"[{stack}] Compose file not found: {compose_path}" 284 | raise FileNotFoundError(message) 285 | 286 | env = _load_env(compose_path) 287 | compose_data = yaml.safe_load(compose_path.read_text()) or {} 288 | raw_services = compose_data.get("services", {}) 289 | if not isinstance(raw_services, dict): 290 | return {}, env, config.get_host(stack).address 291 | return raw_services, env, config.get_host(stack).address 292 | 293 | 294 | def normalize_labels(raw: Any, env: dict[str, str]) -> dict[str, str]: 295 | """Normalize labels from list or dict format, with interpolation.""" 296 | if raw is None: 297 | return {} 298 | if isinstance(raw, dict): 299 | return { 300 | _interpolate(str(k), env): _interpolate(str(v), env) 301 | for k, v in raw.items() 302 | if k is not None 303 | } 304 | if isinstance(raw, list): 305 | labels: dict[str, str] = {} 306 | for item in raw: 307 | if not isinstance(item, str) or "=" not in item: 308 | continue 309 | key_raw, value_raw = item.split("=", 1) 310 | key = _interpolate(key_raw.strip(), env) 311 | value = _interpolate(value_raw.strip(), env) 312 | labels[key] = value 313 | return labels 314 | return {} 315 | 316 | 317 | def get_ports_for_service( 318 | definition: dict[str, Any], 319 | all_services: dict[str, Any], 320 | env: dict[str, str], 321 | ) -> list[PortMapping]: 322 | """Get ports for a service, following network_mode: service:X if present.""" 323 | network_mode = definition.get("network_mode", "") 324 | if isinstance(network_mode, str) and network_mode.startswith("service:"): 325 | # Service uses another service's network - get ports from that service 326 | ref_service = network_mode[len("service:") :] 327 | if ref_service in all_services: 328 | ref_def = all_services[ref_service] 329 | if isinstance(ref_def, dict): 330 | return _parse_ports(ref_def.get("ports"), env) 331 | return _parse_ports(definition.get("ports"), env) 332 | -------------------------------------------------------------------------------- /tests/test_traefik.py: -------------------------------------------------------------------------------- 1 | """Tests for Traefik config generator.""" 2 | 3 | from pathlib import Path 4 | 5 | import yaml 6 | 7 | from compose_farm.compose import parse_external_networks 8 | from compose_farm.config import Config, Host 9 | from compose_farm.traefik import generate_traefik_config 10 | 11 | 12 | def _write_compose(path: Path, data: dict[str, object]) -> None: 13 | path.parent.mkdir(parents=True, exist_ok=True) 14 | path.write_text(yaml.safe_dump(data, sort_keys=False)) 15 | 16 | 17 | def test_generate_traefik_config_with_published_port(tmp_path: Path) -> None: 18 | cfg = Config( 19 | compose_dir=tmp_path, 20 | hosts={"nas01": Host(address="192.168.1.10")}, 21 | services={"plex": "nas01"}, 22 | ) 23 | compose_path = tmp_path / "plex" / "docker-compose.yml" 24 | _write_compose( 25 | compose_path, 26 | { 27 | "services": { 28 | "plex": { 29 | "ports": ["32400:32400"], 30 | "labels": [ 31 | "traefik.enable=true", 32 | "traefik.http.routers.plex.rule=Host(`plex.lab.mydomain.org`)", 33 | "traefik.http.routers.plex.entrypoints=web,websecure", 34 | "traefik.http.routers.plex.tls.domains[0].main=plex.lab.mydomain.org", 35 | "traefik.http.services.plex.loadbalancer.server.port=32400", 36 | ], 37 | } 38 | } 39 | }, 40 | ) 41 | 42 | dynamic, warnings = generate_traefik_config(cfg, ["plex"]) 43 | 44 | assert warnings == [] 45 | assert dynamic["http"]["routers"]["plex"]["rule"] == "Host(`plex.lab.mydomain.org`)" 46 | assert dynamic["http"]["routers"]["plex"]["entrypoints"] == ["web", "websecure"] 47 | assert ( 48 | dynamic["http"]["routers"]["plex"]["tls"]["domains"][0]["main"] == "plex.lab.mydomain.org" 49 | ) 50 | 51 | servers = dynamic["http"]["services"]["plex"]["loadbalancer"]["servers"] 52 | assert servers == [{"url": "http://192.168.1.10:32400"}] 53 | 54 | 55 | def test_generate_traefik_config_without_published_port_warns(tmp_path: Path) -> None: 56 | cfg = Config( 57 | compose_dir=tmp_path, 58 | hosts={"nas01": Host(address="192.168.1.10")}, 59 | services={"app": "nas01"}, 60 | ) 61 | compose_path = tmp_path / "app" / "docker-compose.yml" 62 | _write_compose( 63 | compose_path, 64 | { 65 | "services": { 66 | "app": { 67 | "ports": ["8080"], 68 | "labels": [ 69 | "traefik.http.routers.app.rule=Host(`app.lab.mydomain.org`)", 70 | "traefik.http.services.app.loadbalancer.server.port=8080", 71 | ], 72 | } 73 | } 74 | }, 75 | ) 76 | 77 | dynamic, warnings = generate_traefik_config(cfg, ["app"]) 78 | 79 | assert dynamic["http"]["routers"]["app"]["rule"] == "Host(`app.lab.mydomain.org`)" 80 | assert any("No published port found" in warning for warning in warnings) 81 | 82 | 83 | def test_generate_interpolates_env_and_infers_router_service(tmp_path: Path) -> None: 84 | cfg = Config( 85 | compose_dir=tmp_path, 86 | hosts={"nas01": Host(address="192.168.1.10")}, 87 | services={"wakapi": "nas01"}, 88 | ) 89 | compose_dir = tmp_path / "wakapi" 90 | compose_dir.mkdir(parents=True, exist_ok=True) 91 | (compose_dir / ".env").write_text("DOMAIN=lab.mydomain.org\n") 92 | compose_path = compose_dir / "docker-compose.yml" 93 | _write_compose( 94 | compose_path, 95 | { 96 | "services": { 97 | "wakapi": { 98 | "ports": ["3009:3000"], 99 | "labels": [ 100 | "traefik.enable=true", 101 | "traefik.http.routers.wakapi.rule=Host(`wakapi.${DOMAIN}`)", 102 | "traefik.http.routers.wakapi.entrypoints=websecure", 103 | "traefik.http.routers.wakapi-local.rule=Host(`wakapi.local`)", 104 | "traefik.http.routers.wakapi-local.entrypoints=web", 105 | "traefik.http.services.wakapi.loadbalancer.server.port=3000", 106 | ], 107 | } 108 | } 109 | }, 110 | ) 111 | 112 | dynamic, warnings = generate_traefik_config(cfg, ["wakapi"]) 113 | 114 | assert warnings == [] 115 | routers = dynamic["http"]["routers"] 116 | assert routers["wakapi"]["rule"] == "Host(`wakapi.lab.mydomain.org`)" 117 | assert routers["wakapi"]["entrypoints"] == ["websecure"] 118 | assert routers["wakapi-local"]["entrypoints"] == ["web"] 119 | assert routers["wakapi-local"]["service"] == "wakapi" 120 | 121 | servers = dynamic["http"]["services"]["wakapi"]["loadbalancer"]["servers"] 122 | assert servers == [{"url": "http://192.168.1.10:3009"}] 123 | 124 | 125 | def test_generate_interpolates_label_keys_and_ports(tmp_path: Path) -> None: 126 | cfg = Config( 127 | compose_dir=tmp_path, 128 | hosts={"nas01": Host(address="192.168.1.10")}, 129 | services={"supabase": "nas01"}, 130 | ) 131 | compose_dir = tmp_path / "supabase" 132 | compose_dir.mkdir(parents=True, exist_ok=True) 133 | (compose_dir / ".env").write_text( 134 | "CONTAINER_PREFIX=supa\n" 135 | "SUBDOMAIN=api\n" 136 | "DOMAIN=lab.mydomain.org\n" 137 | "PUBLIC_DOMAIN=public.example.org\n" 138 | "KONG_HTTP_PORT=8000\n" 139 | ) 140 | compose_path = compose_dir / "docker-compose.yml" 141 | _write_compose( 142 | compose_path, 143 | { 144 | "services": { 145 | "kong": { 146 | "ports": ["${KONG_HTTP_PORT}:8000/tcp"], 147 | "labels": [ 148 | "traefik.enable=true", 149 | "traefik.http.routers.${CONTAINER_PREFIX}.rule=Host(`${SUBDOMAIN}.${DOMAIN}`) || Host(`${SUBDOMAIN}.${PUBLIC_DOMAIN}`)", 150 | "traefik.http.routers.${CONTAINER_PREFIX}-studio.rule=Host(`studio.${DOMAIN}`)", 151 | "traefik.http.services.${CONTAINER_PREFIX}.loadbalancer.server.port=8000", 152 | ], 153 | } 154 | } 155 | }, 156 | ) 157 | 158 | dynamic, warnings = generate_traefik_config(cfg, ["supabase"]) 159 | 160 | assert warnings == [] 161 | routers = dynamic["http"]["routers"] 162 | assert "supa" in routers 163 | assert "supa-studio" in routers 164 | assert routers["supa"]["service"] == "supa" 165 | assert routers["supa-studio"]["service"] == "supa" 166 | servers = dynamic["http"]["services"]["supa"]["loadbalancer"]["servers"] 167 | assert servers == [{"url": "http://192.168.1.10:8000"}] 168 | 169 | 170 | def test_generate_skips_services_with_enable_false(tmp_path: Path) -> None: 171 | cfg = Config( 172 | compose_dir=tmp_path, 173 | hosts={"nas01": Host(address="192.168.1.10")}, 174 | services={"stack": "nas01"}, 175 | ) 176 | compose_path = tmp_path / "stack" / "docker-compose.yml" 177 | _write_compose( 178 | compose_path, 179 | { 180 | "services": { 181 | "studio": { 182 | "ports": ["3000:3000"], 183 | "labels": [ 184 | "traefik.enable=false", 185 | "traefik.http.routers.studio.rule=Host(`studio.lab.mydomain.org`)", 186 | "traefik.http.services.studio.loadbalancer.server.port=3000", 187 | ], 188 | } 189 | } 190 | }, 191 | ) 192 | 193 | dynamic, warnings = generate_traefik_config(cfg, ["stack"]) 194 | 195 | assert dynamic == {} 196 | assert warnings == [] 197 | 198 | 199 | def test_generate_follows_network_mode_service_for_ports(tmp_path: Path) -> None: 200 | """Services using network_mode: service:X should use ports from service X.""" 201 | cfg = Config( 202 | compose_dir=tmp_path, 203 | hosts={"nas01": Host(address="192.168.1.10")}, 204 | services={"vpn-stack": "nas01"}, 205 | ) 206 | compose_path = tmp_path / "vpn-stack" / "docker-compose.yml" 207 | _write_compose( 208 | compose_path, 209 | { 210 | "services": { 211 | "vpn": { 212 | "image": "gluetun", 213 | "ports": ["5080:5080", "9696:9696"], 214 | }, 215 | "qbittorrent": { 216 | "image": "qbittorrent", 217 | "network_mode": "service:vpn", 218 | "labels": [ 219 | "traefik.enable=true", 220 | "traefik.http.routers.torrent.rule=Host(`torrent.example.com`)", 221 | "traefik.http.services.torrent.loadbalancer.server.port=5080", 222 | ], 223 | }, 224 | "prowlarr": { 225 | "image": "prowlarr", 226 | "network_mode": "service:vpn", 227 | "labels": [ 228 | "traefik.enable=true", 229 | "traefik.http.routers.prowlarr.rule=Host(`prowlarr.example.com`)", 230 | "traefik.http.services.prowlarr.loadbalancer.server.port=9696", 231 | ], 232 | }, 233 | } 234 | }, 235 | ) 236 | 237 | dynamic, warnings = generate_traefik_config(cfg, ["vpn-stack"]) 238 | 239 | assert warnings == [] 240 | # Both services should get their ports from the vpn service 241 | torrent_servers = dynamic["http"]["services"]["torrent"]["loadbalancer"]["servers"] 242 | assert torrent_servers == [{"url": "http://192.168.1.10:5080"}] 243 | prowlarr_servers = dynamic["http"]["services"]["prowlarr"]["loadbalancer"]["servers"] 244 | assert prowlarr_servers == [{"url": "http://192.168.1.10:9696"}] 245 | 246 | 247 | def test_parse_external_networks_single(tmp_path: Path) -> None: 248 | """Extract a single external network from compose file.""" 249 | cfg = Config( 250 | compose_dir=tmp_path, 251 | hosts={"host1": Host(address="192.168.1.10")}, 252 | services={"app": "host1"}, 253 | ) 254 | compose_path = tmp_path / "app" / "compose.yaml" 255 | _write_compose( 256 | compose_path, 257 | { 258 | "services": {"app": {"image": "nginx"}}, 259 | "networks": {"mynetwork": {"external": True}}, 260 | }, 261 | ) 262 | 263 | networks = parse_external_networks(cfg, "app") 264 | assert networks == ["mynetwork"] 265 | 266 | 267 | def test_parse_external_networks_multiple(tmp_path: Path) -> None: 268 | """Extract multiple external networks from compose file.""" 269 | cfg = Config( 270 | compose_dir=tmp_path, 271 | hosts={"host1": Host(address="192.168.1.10")}, 272 | services={"app": "host1"}, 273 | ) 274 | compose_path = tmp_path / "app" / "compose.yaml" 275 | _write_compose( 276 | compose_path, 277 | { 278 | "services": {"app": {"image": "nginx"}}, 279 | "networks": { 280 | "frontend": {"external": True}, 281 | "backend": {"external": True}, 282 | "internal": {"driver": "bridge"}, # not external 283 | }, 284 | }, 285 | ) 286 | 287 | networks = parse_external_networks(cfg, "app") 288 | assert set(networks) == {"frontend", "backend"} 289 | 290 | 291 | def test_parse_external_networks_none(tmp_path: Path) -> None: 292 | """No external networks returns empty list.""" 293 | cfg = Config( 294 | compose_dir=tmp_path, 295 | hosts={"host1": Host(address="192.168.1.10")}, 296 | services={"app": "host1"}, 297 | ) 298 | compose_path = tmp_path / "app" / "compose.yaml" 299 | _write_compose( 300 | compose_path, 301 | { 302 | "services": {"app": {"image": "nginx"}}, 303 | "networks": {"internal": {"driver": "bridge"}}, 304 | }, 305 | ) 306 | 307 | networks = parse_external_networks(cfg, "app") 308 | assert networks == [] 309 | 310 | 311 | def test_parse_external_networks_no_networks_section(tmp_path: Path) -> None: 312 | """No networks section returns empty list.""" 313 | cfg = Config( 314 | compose_dir=tmp_path, 315 | hosts={"host1": Host(address="192.168.1.10")}, 316 | services={"app": "host1"}, 317 | ) 318 | compose_path = tmp_path / "app" / "compose.yaml" 319 | _write_compose( 320 | compose_path, 321 | {"services": {"app": {"image": "nginx"}}}, 322 | ) 323 | 324 | networks = parse_external_networks(cfg, "app") 325 | assert networks == [] 326 | 327 | 328 | def test_parse_external_networks_missing_compose(tmp_path: Path) -> None: 329 | """Missing compose file returns empty list.""" 330 | cfg = Config( 331 | compose_dir=tmp_path, 332 | hosts={"host1": Host(address="192.168.1.10")}, 333 | services={"app": "host1"}, 334 | ) 335 | # Don't create compose file 336 | 337 | networks = parse_external_networks(cfg, "app") 338 | assert networks == [] 339 | -------------------------------------------------------------------------------- /src/compose_farm/traefik.py: -------------------------------------------------------------------------------- 1 | """Generate Traefik file-provider config from compose labels. 2 | 3 | Compose Farm keeps compose files as the source of truth for Traefik routing. 4 | This module reads `traefik.*` labels from a stack's docker-compose.yml and 5 | emits an equivalent file-provider fragment with upstream servers rewritten to 6 | use host-published ports for cross-host reachability. 7 | """ 8 | 9 | from __future__ import annotations 10 | 11 | from dataclasses import dataclass 12 | from typing import TYPE_CHECKING, Any 13 | 14 | import yaml 15 | 16 | from .compose import ( 17 | PortMapping, 18 | get_ports_for_service, 19 | load_compose_services, 20 | normalize_labels, 21 | ) 22 | from .executor import LOCAL_ADDRESSES 23 | 24 | if TYPE_CHECKING: 25 | from .config import Config 26 | 27 | 28 | @dataclass 29 | class _TraefikServiceSource: 30 | """Source information to build an upstream for a Traefik service.""" 31 | 32 | traefik_service: str 33 | stack: str 34 | compose_service: str 35 | host_address: str 36 | ports: list[PortMapping] 37 | container_port: int | None = None 38 | scheme: str | None = None 39 | 40 | 41 | _LIST_VALUE_KEYS = {"entrypoints", "middlewares"} 42 | _MIN_ROUTER_PARTS = 3 43 | _MIN_SERVICE_LABEL_PARTS = 6 44 | 45 | 46 | def _parse_value(key: str, raw_value: str) -> Any: 47 | value = raw_value.strip() 48 | lower = value.lower() 49 | if lower in {"true", "false"}: 50 | return lower == "true" 51 | if value.isdigit(): 52 | return int(value) 53 | last_segment = key.rsplit(".", 1)[-1] 54 | if last_segment in _LIST_VALUE_KEYS: 55 | parts = [v.strip() for v in value.split(",")] if "," in value else [value] 56 | return [part for part in parts if part] 57 | return value 58 | 59 | 60 | def _parse_segment(segment: str) -> tuple[str, int | None]: 61 | if "[" in segment and segment.endswith("]"): 62 | name, index_raw = segment[:-1].split("[", 1) 63 | if index_raw.isdigit(): 64 | return name, int(index_raw) 65 | return segment, None 66 | 67 | 68 | def _insert(root: dict[str, Any], key_path: list[str], value: Any) -> None: # noqa: PLR0912 69 | current: Any = root 70 | for idx, segment in enumerate(key_path): 71 | is_last = idx == len(key_path) - 1 72 | name, list_index = _parse_segment(segment) 73 | 74 | if list_index is None: 75 | if is_last: 76 | if not isinstance(current, dict): 77 | return 78 | current[name] = value 79 | else: 80 | if not isinstance(current, dict): 81 | return 82 | next_container = current.get(name) 83 | if not isinstance(next_container, dict): 84 | next_container = {} 85 | current[name] = next_container 86 | current = next_container 87 | continue 88 | 89 | if not isinstance(current, dict): 90 | return 91 | container_list = current.get(name) 92 | if not isinstance(container_list, list): 93 | container_list = [] 94 | current[name] = container_list 95 | while len(container_list) <= list_index: 96 | container_list.append({}) 97 | if is_last: 98 | container_list[list_index] = value 99 | else: 100 | if not isinstance(container_list[list_index], dict): 101 | container_list[list_index] = {} 102 | current = container_list[list_index] 103 | 104 | 105 | def _resolve_published_port(source: _TraefikServiceSource) -> tuple[int | None, str | None]: 106 | """Resolve host-published port for a Traefik service. 107 | 108 | Returns (published_port, warning_message). 109 | """ 110 | published_ports = [m for m in source.ports if m.published is not None] 111 | if not published_ports: 112 | return None, None 113 | 114 | if source.container_port is not None: 115 | for mapping in published_ports: 116 | if mapping.target == source.container_port: 117 | return mapping.published, None 118 | if len(published_ports) == 1: 119 | port = published_ports[0].published 120 | warn = ( 121 | f"[{source.stack}/{source.compose_service}] " 122 | f"No published port matches container port {source.container_port} " 123 | f"for Traefik service '{source.traefik_service}', using {port}." 124 | ) 125 | return port, warn 126 | return None, ( 127 | f"[{source.stack}/{source.compose_service}] " 128 | f"No published port matches container port {source.container_port} " 129 | f"for Traefik service '{source.traefik_service}'." 130 | ) 131 | 132 | if len(published_ports) == 1: 133 | return published_ports[0].published, None 134 | return None, ( 135 | f"[{source.stack}/{source.compose_service}] " 136 | f"Multiple published ports found for Traefik service '{source.traefik_service}', " 137 | "but no loadbalancer.server.port label to disambiguate." 138 | ) 139 | 140 | 141 | def _finalize_http_services( 142 | dynamic: dict[str, Any], 143 | sources: dict[str, _TraefikServiceSource], 144 | warnings: list[str], 145 | ) -> None: 146 | for traefik_service, source in sources.items(): 147 | published_port, warn = _resolve_published_port(source) 148 | if warn: 149 | warnings.append(warn) 150 | if published_port is None: 151 | warnings.append( 152 | f"[{source.stack}/{source.compose_service}] " 153 | f"No published port found for Traefik service '{traefik_service}'. " 154 | "Add a ports: mapping (e.g., '8080:8080') for cross-host routing." 155 | ) 156 | continue 157 | 158 | scheme = source.scheme or "http" 159 | upstream_url = f"{scheme}://{source.host_address}:{published_port}" 160 | 161 | http_section = dynamic.setdefault("http", {}) 162 | services_section = http_section.setdefault("services", {}) 163 | service_cfg = services_section.setdefault(traefik_service, {}) 164 | lb_cfg = service_cfg.setdefault("loadbalancer", {}) 165 | if isinstance(lb_cfg, dict): 166 | lb_cfg.pop("server", None) 167 | lb_cfg["servers"] = [{"url": upstream_url}] 168 | 169 | 170 | def _attach_default_services( 171 | stack: str, 172 | compose_service: str, 173 | routers: dict[str, bool], 174 | service_names: set[str], 175 | warnings: list[str], 176 | dynamic: dict[str, Any], 177 | ) -> None: 178 | if not routers: 179 | return 180 | if len(service_names) == 1: 181 | default_service = next(iter(service_names)) 182 | for router_name, explicit in routers.items(): 183 | if explicit: 184 | continue 185 | _insert(dynamic, ["http", "routers", router_name, "service"], default_service) 186 | return 187 | 188 | if len(service_names) == 0: 189 | for router_name, explicit in routers.items(): 190 | if not explicit: 191 | warnings.append( 192 | f"[{stack}/{compose_service}] Router '{router_name}' has no service " 193 | "and no traefik.http.services labels were found." 194 | ) 195 | return 196 | 197 | for router_name, explicit in routers.items(): 198 | if explicit: 199 | continue 200 | warnings.append( 201 | f"[{stack}/{compose_service}] Router '{router_name}' has no explicit service " 202 | "and multiple Traefik services are defined; add " 203 | f"traefik.http.routers.{router_name}.service." 204 | ) 205 | 206 | 207 | def _process_router_label( 208 | key_without_prefix: str, 209 | routers: dict[str, bool], 210 | ) -> None: 211 | if not key_without_prefix.startswith("http.routers."): 212 | return 213 | router_parts = key_without_prefix.split(".") 214 | if len(router_parts) < _MIN_ROUTER_PARTS: 215 | return 216 | router_name = router_parts[2] 217 | router_remainder = router_parts[3:] 218 | explicit = routers.get(router_name, False) 219 | if router_remainder == ["service"]: 220 | explicit = True 221 | routers[router_name] = explicit 222 | 223 | 224 | def _process_service_label( 225 | key_without_prefix: str, 226 | label_value: str, 227 | stack: str, 228 | compose_service: str, 229 | host_address: str, 230 | ports: list[PortMapping], 231 | service_names: set[str], 232 | sources: dict[str, _TraefikServiceSource], 233 | ) -> None: 234 | if not key_without_prefix.startswith("http.services."): 235 | return 236 | parts = key_without_prefix.split(".") 237 | if len(parts) < _MIN_SERVICE_LABEL_PARTS: 238 | return 239 | traefik_service = parts[2] 240 | service_names.add(traefik_service) 241 | remainder = parts[3:] 242 | 243 | source = sources.get(traefik_service) 244 | if source is None: 245 | source = _TraefikServiceSource( 246 | traefik_service=traefik_service, 247 | stack=stack, 248 | compose_service=compose_service, 249 | host_address=host_address, 250 | ports=ports, 251 | ) 252 | sources[traefik_service] = source 253 | 254 | if remainder == ["loadbalancer", "server", "port"]: 255 | parsed = _parse_value(key_without_prefix, label_value) 256 | if isinstance(parsed, int): 257 | source.container_port = parsed 258 | elif remainder == ["loadbalancer", "server", "scheme"]: 259 | source.scheme = str(_parse_value(key_without_prefix, label_value)) 260 | 261 | 262 | def _process_service_labels( 263 | stack: str, 264 | compose_service: str, 265 | definition: dict[str, Any], 266 | all_services: dict[str, Any], 267 | host_address: str, 268 | env: dict[str, str], 269 | dynamic: dict[str, Any], 270 | sources: dict[str, _TraefikServiceSource], 271 | warnings: list[str], 272 | ) -> None: 273 | labels = normalize_labels(definition.get("labels"), env) 274 | if not labels: 275 | return 276 | enable_raw = labels.get("traefik.enable") 277 | if enable_raw is not None and _parse_value("enable", enable_raw) is False: 278 | return 279 | 280 | ports = get_ports_for_service(definition, all_services, env) 281 | routers: dict[str, bool] = {} 282 | service_names: set[str] = set() 283 | 284 | for label_key, label_value in labels.items(): 285 | if not label_key.startswith("traefik."): 286 | continue 287 | if label_key in {"traefik.enable", "traefik.docker.network"}: 288 | continue 289 | 290 | key_without_prefix = label_key[len("traefik.") :] 291 | if not key_without_prefix.startswith(("http.", "tcp.", "udp.")): 292 | continue 293 | 294 | _insert( 295 | dynamic, key_without_prefix.split("."), _parse_value(key_without_prefix, label_value) 296 | ) 297 | _process_router_label(key_without_prefix, routers) 298 | _process_service_label( 299 | key_without_prefix, 300 | label_value, 301 | stack, 302 | compose_service, 303 | host_address, 304 | ports, 305 | service_names, 306 | sources, 307 | ) 308 | 309 | _attach_default_services(stack, compose_service, routers, service_names, warnings, dynamic) 310 | 311 | 312 | def generate_traefik_config( 313 | config: Config, 314 | services: list[str], 315 | *, 316 | check_all: bool = False, 317 | ) -> tuple[dict[str, Any], list[str]]: 318 | """Generate Traefik dynamic config from compose labels. 319 | 320 | Args: 321 | config: The compose-farm config. 322 | services: List of service names to process. 323 | check_all: If True, check all services for warnings (ignore host filtering). 324 | Used by the check command to validate all traefik labels. 325 | 326 | Returns (config_dict, warnings). 327 | 328 | """ 329 | dynamic: dict[str, Any] = {} 330 | warnings: list[str] = [] 331 | sources: dict[str, _TraefikServiceSource] = {} 332 | 333 | # Determine Traefik's host from service assignment 334 | traefik_host = None 335 | if config.traefik_service and not check_all: 336 | traefik_host = config.services.get(config.traefik_service) 337 | 338 | for stack in services: 339 | raw_services, env, host_address = load_compose_services(config, stack) 340 | stack_host = config.services.get(stack) 341 | 342 | # Skip services on Traefik's host - docker provider handles them directly 343 | # (unless check_all is True, for validation purposes) 344 | if not check_all: 345 | if host_address.lower() in LOCAL_ADDRESSES: 346 | continue 347 | if traefik_host and stack_host == traefik_host: 348 | continue 349 | 350 | for compose_service, definition in raw_services.items(): 351 | if not isinstance(definition, dict): 352 | continue 353 | _process_service_labels( 354 | stack, 355 | compose_service, 356 | definition, 357 | raw_services, 358 | host_address, 359 | env, 360 | dynamic, 361 | sources, 362 | warnings, 363 | ) 364 | 365 | _finalize_http_services(dynamic, sources, warnings) 366 | return dynamic, warnings 367 | 368 | 369 | _TRAEFIK_CONFIG_HEADER = """\ 370 | # Auto-generated by compose-farm 371 | # https://github.com/basnijholt/compose-farm 372 | # 373 | # This file routes traffic to services running on hosts other than Traefik's host. 374 | # Services on Traefik's host use the Docker provider directly. 375 | # 376 | # Regenerate with: compose-farm traefik-file --all -o 377 | # Or configure traefik_file in compose-farm.yaml for automatic updates. 378 | 379 | """ 380 | 381 | 382 | def render_traefik_config(dynamic: dict[str, Any]) -> str: 383 | """Render Traefik dynamic config as YAML with a header comment.""" 384 | body = yaml.safe_dump(dynamic, sort_keys=False) 385 | return _TRAEFIK_CONFIG_HEADER + body 386 | --------------------------------------------------------------------------------