├── .devcontainer ├── Dockerfile ├── devcontainer.json └── scripts │ ├── cecho.sh │ ├── nb.sh │ └── postCreateCommand.sh ├── .dockerignore ├── .gitattributes ├── .github ├── FUNDING.yml ├── renovate.json5 └── workflows │ ├── bump-version.yml │ ├── docker-image.yml │ ├── docs.yml │ ├── stale_issues_and_prs.yml │ └── test_docker_image.yml ├── .gitignore ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── app ├── __init__.py ├── api │ ├── authorize.py │ ├── config.py │ ├── main.py │ ├── nautical_router.py │ ├── static │ │ ├── favicon.ico │ │ ├── index.html │ │ ├── scripts.js │ │ └── styles.css │ └── utils.py ├── backup.py ├── db.py ├── db.sh ├── defaults.env ├── entry.sh ├── env.sh ├── logger.py ├── logger.sh ├── nautical.sh ├── nautical_env.py └── utils.sh ├── dev └── docker-compose.yml ├── docs ├── advanced │ ├── advanced.md │ ├── homepage-dashboard.md │ ├── nfs-share.md │ └── remote-backups.md ├── arguments.md ├── developers │ ├── dev-container.md │ ├── docs.md │ ├── env.md │ └── tests.md ├── docker-compose.yml ├── docker-socket-proxy.md ├── index.md ├── installation.md ├── introduction.md ├── labels.md ├── media │ ├── Logo-large.png │ ├── Logo-transparent.png │ ├── Logo.png │ ├── Logo.psd │ ├── homepage-example.png │ ├── homepage-logo.png │ └── homepage_demo.png ├── q-and-a.md ├── rest-api.md ├── stylesheets │ └── extra.css └── updates.md ├── mkdocs.yml ├── pyproject.toml ├── pytest ├── test_api.py ├── test_backup.py ├── test_db.py ├── test_logger.py ├── test_nautical_env.py ├── test_rsync.py └── test_utils.py ├── requirements-dev.txt ├── requirements.txt ├── s6-overlay └── etc │ └── s6-overlay │ └── s6-rc.d │ ├── ci-test │ ├── dependencies.d │ │ ├── init-env │ │ └── init-nautical │ ├── run.sh │ ├── type │ └── up │ ├── init-backup-on-start │ ├── dependencies.d │ │ ├── init-env │ │ └── init-nautical │ ├── run.sh │ ├── timeout-up │ ├── type │ └── up │ ├── init-env │ ├── type │ └── up │ ├── init-nautical │ ├── dependencies.d │ │ └── init-env │ ├── type │ └── up │ ├── svc-cron │ ├── dependencies.d │ │ ├── init-env │ │ └── init-nautical │ ├── run │ └── type │ ├── svc-http-api │ ├── dependencies.d │ │ ├── init-env │ │ └── init-nautical │ ├── run │ └── type │ └── user │ └── contents.d │ ├── ci-test │ ├── init-backup-on-start │ ├── init-env │ ├── init-nautical │ ├── svc-cron │ └── svc-http-api ├── snippets ├── docker-compose-example-no-tooltips.yml ├── docker-compose-example.yml ├── docker-compose-semver-example.yml ├── docker-compose-semver-major-example.yml ├── docker-example-tooltips.md ├── docker-run-example-no-tooltips.sh ├── docker-run-example.sh ├── docker-run-semver-example.sh ├── docker-run-semver-major-example.sh ├── docker-socket-proxy.yml └── exec_request_example.md └── tests ├── .simplecov ├── _fix_coverage_paths.sh ├── _integration_tests.sh ├── _validate_dockerfile.sh ├── _validate_rsync.sh ├── docker-compose.yml └── watchtower.yml /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # https://github.com/devcontainers/images/tree/main/src/python 2 | FROM mcr.microsoft.com/devcontainers/python:3.13 3 | 4 | 5 | RUN echo "Installing packages..." && \ 6 | apt-get update && \ 7 | apt-get install -y --no-install-recommends nano curl jq dos2unix && \ 8 | rm -rf /var/lib/apt/lists/* && \ 9 | echo "Packages installed." 10 | 11 | # These files must be ignored in the docker ignore 12 | COPY requirements.txt /tmp/nautical/requirements.txt 13 | COPY requirements-dev.txt /tmp/nautical/requirements-dev.txt 14 | 15 | RUN echo "Installing python packages (for api)..." && \ 16 | # This will also install everything in requirements.txt file 17 | python3 -m pip --disable-pip-version-check --no-cache-dir install -r /tmp/nautical/requirements-dev.txt && \ 18 | echo "Installation complete." 19 | 20 | # For development, prevents Nautical from stopping VSCode DevContainer itself 21 | LABEL nautical-backup.enable=false -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. 2 | { 3 | "name": "Nautical", 4 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 5 | // "image": "mcr.microsoft.com/devcontainers/base:ubuntu", 6 | "build": { 7 | "dockerfile": "Dockerfile", 8 | "context": ".." 9 | }, 10 | // Features to add to the dev container. More info: https://containers.dev/features. 11 | "features": { 12 | "ghcr.io/devcontainers-contrib/features/zsh-plugins:0": { 13 | "plugins": "copypath git dirhistory extract z colorize command-not-found colored-man-pages sudo zsh-completions zsh-autosuggestions fast-syntax-highlighting", 14 | "omzPlugins": "https://github.com/zdharma-continuum/fast-syntax-highlighting https://github.com/zsh-users/zsh-autosuggestions https://github.com/zsh-users/zsh-completions https://github.com/marlonrichert/zsh-autocomplete" 15 | }, 16 | "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} 17 | }, 18 | "containerEnv": { 19 | "PYTHONPATH": ".", // Required for python to work 20 | "NAUTICAL_DB_PATH": "/workspaces/nautical-backup/dev/config", 21 | "NAUTICAL_DB_NAME": "nautical-db.json", 22 | "DEST_LOCATION": "/workspaces/nautical-backup/dev/destination", 23 | "SOURCE_LOCATION": "/workspaces/nautical-backup/dev/source", 24 | "LOG_LEVEL": "DEBUG", 25 | "COMPOSE_BAKE": "true" 26 | }, 27 | "remoteEnv": { 28 | "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}" 29 | }, 30 | "customizations": { 31 | "vscode": { 32 | "http.proxyStrictSSL": false, 33 | "extensions.verifySignature": false, 34 | "extensions": [ 35 | "mhutchie.git-graph", 36 | "foxundermoon.shell-format", 37 | "mads-hartmann.bash-ide-vscode", 38 | "jeff-hykin.better-shellscript-syntax", 39 | "tetradresearch.vscode-h2o", 40 | "meronz.manpages", 41 | "jeff-hykin.better-dockerfile-syntax", 42 | "streetsidesoftware.code-spell-checker", 43 | "ms-python.python", 44 | "ms-python.black-formatter", 45 | "njpwerner.autodocstring", 46 | "rangav.vscode-thunder-client", 47 | "ms-azuretools.vscode-docker", 48 | "tamasfe.even-better-toml", 49 | "littlefoxteam.vscode-python-test-adapter" 50 | ], 51 | "settings": { 52 | "terminal.integrated.defaultProfile.linux": "zsh", 53 | "terminal.integrated.profiles.linux": { 54 | "bash": { 55 | "path": "bash", 56 | "icon": "terminal-bash" 57 | }, 58 | "zsh": { 59 | "path": "zsh" 60 | } 61 | }, 62 | "files.associations": { 63 | "*.env": "shellscript" 64 | }, 65 | "python.testing.pytestEnabled": true, 66 | "python.testing.unittestEnabled": false, 67 | "python.testing.pytestArgs": [ 68 | "-s" 69 | ], 70 | "python.analysis.inlayHints.pytestParameters": true, 71 | "python.formatting.provider": "none", 72 | "[python]": { 73 | "editor.defaultFormatter": "ms-python.black-formatter" 74 | }, 75 | "black-formatter.args": [ 76 | "--line-length", 77 | "120" 78 | ], 79 | "python.languageServer": "Pylance" 80 | } 81 | } 82 | }, 83 | "forwardPorts": [ 84 | 8069 85 | ], 86 | "portsAttributes": { 87 | "8000": { 88 | "label": "Nautical Docs" 89 | }, 90 | "8069": { 91 | "label": "Nautical Rest API" 92 | } 93 | }, 94 | "remoteUser": "root", 95 | "postCreateCommand": "dos2unix .devcontainer/scripts/postCreateCommand.sh && chmod +x .devcontainer/scripts/postCreateCommand.sh && ./.devcontainer/scripts/postCreateCommand.sh" 96 | } -------------------------------------------------------------------------------- /.devcontainer/scripts/cecho.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cecho() { 4 | local RED="\033[0;31m" 5 | local GREEN="\033[0;32m" # <-- [0 means not bold 6 | local YELLOW="\033[1;33m" # <-- [1 means bold 7 | local CYAN="\033[1;36m" 8 | 9 | # ... Add more colors if you like 10 | 11 | NC="\033[0m" # No Color 12 | 13 | printf "${!1}${2} ${NC}\n" 14 | } 15 | 16 | # Call the command immediately 17 | cecho "$@" -------------------------------------------------------------------------------- /.devcontainer/scripts/nb.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Function to display help information 4 | show_help() { 5 | cecho CYAN "Nautical Backup Developer Commands:" 6 | echo " build - Build Nautical container" 7 | echo " run - Run already built Nautical container" 8 | echo " build-run - Build and run Nautical container" 9 | echo "" 10 | echo " unit-test - Run Unit tests locally using mounts" 11 | echo " integration - Build and run integration tests locally" 12 | # echo " build-test - Build and run Nautical Testing container" 13 | echo " test - Run already built test Nautical container" 14 | # echo " build-test-run - Build and run Nautical Testing container" 15 | echo "" 16 | # echo " dev - Run Nautical Development container" 17 | # echo " build-dev - Build and run Nautical Development container" 18 | echo " api - Run the Python API locally" 19 | echo " pytest - Pytest locally and capture coverage" 20 | echo " format - Format all python code with black" 21 | echo "" 22 | echo " docs - Run the Nautical documentation locally" 23 | } 24 | 25 | APP_HOME="/workspaces/nautical-backup" 26 | export APP_HOME 27 | 28 | # Function to execute commands 29 | execute_command() { 30 | case $1 in 31 | build) 32 | clear 33 | cecho CYAN "Building Nautical..." 34 | cd $APP_HOME 35 | docker build -t nautical-backup -t nautical-backup:test --no-cache --progress=plain --build-arg='NAUTICAL_VERSION=testing' . 36 | ;; 37 | build-run) 38 | cd $APP_HOME 39 | nb build 40 | cecho CYAN "Running Nautical..." 41 | cd dev 42 | docker-compose up 43 | ;; 44 | run) 45 | cecho CYAN "Running Nautical..." 46 | cd $APP_HOME/dev 47 | docker-compose up 48 | ;; 49 | unit-test) 50 | cd $APP_HOME/tests 51 | docker compose run nautical-backup-test4 --exit-code-from nautical-backup-test4 52 | ;; 53 | unit-test-full) 54 | cd $APP_HOME 55 | nb build-test 56 | cd $APP_HOME/tests 57 | docker compose run nautical-backup-test4 --exit-code-from nautical-backup-test4 58 | ;; 59 | # dev) 60 | # cd $APP_HOME/tests 61 | # docker compose run nautical-backup-test5 --exit-code-from nautical-backup-test5 62 | # ;; 63 | # build-test) 64 | # clear 65 | # cecho CYAN "Building Test Nautical container..." 66 | # cd $APP_HOME 67 | # docker build -t minituff/nautical-test --no-cache --progress=plain --build-arg='NAUTICAL_VERSION=testing' --build-arg='TEST_MODE=0' . 68 | # ;; 69 | # build-dev) 70 | # cd $APP_HOME 71 | # nb build-test 72 | # ;; 73 | integration) 74 | cecho CYAN "Running Nautical integration tests..." 75 | cd $APP_HOME 76 | 77 | ./tests/_validate_dockerfile.sh 78 | 79 | cd $APP_HOME/tests 80 | 81 | cecho CYAN "Running integration test #1" 82 | docker compose run nautical-backup-test1 --exit-code-from nautical-backup-test1 83 | 84 | cecho CYAN "Running integration test #2" 85 | docker compose run nautical-backup-test2 --exit-code-from nautical-backup-test2 86 | 87 | # Staging integration test #3 88 | rm -rf source destination config 89 | mkdir -p source/watchtower-test 90 | echo "This is a test file" >> source/watchtower-test/test-file.txt 91 | 92 | cecho CYAN "Running integration test #3" 93 | docker compose -f watchtower.yml up -d 94 | docker compose run nautical-backup-test3 95 | docker compose -f watchtower.yml down 96 | 97 | cecho CYAN "Validating Nautical completed a successful backup..." 98 | 99 | ./_validate_rsync.sh 100 | exit_code=$? 101 | 102 | if [ $exit_code -eq 0 ]; then 103 | cecho GREEN "All tests passed!" 104 | else 105 | cecho RED "One or more tests failed!" 106 | fi 107 | 108 | rm -rf source destination config 109 | 110 | ;; 111 | # build-test-run) 112 | # nb build-test 113 | # clear 114 | # nb test 115 | # ;; 116 | api) 117 | cd $APP_HOME 118 | cecho CYAN "Running Nautical API locally..." 119 | cecho GREEN "Viewable at http://localhost:8069/docs" 120 | python3 -m uvicorn app.api.main:app --host 0.0.0.0 --port 8069 --lifespan on --use-colors --reload 121 | ;; 122 | pytest) 123 | cd $APP_HOME 124 | clear 125 | cecho CYAN "Running Pytest..." 126 | python3 -m pytest --cov app --cov-report html --cov-report term 127 | ;; 128 | docs) 129 | cd $APP_HOME 130 | clear 131 | cecho CYAN "Running Nautical documentation locally..." 132 | python3 -m mkdocs serve 133 | ;; 134 | format) 135 | cd $APP_HOME 136 | clear 137 | cecho CYAN "Formatting Python code with Black..." 138 | python3 -m black --line-length 120 app tests 139 | ;; 140 | *) 141 | cecho RED "Unknown command: $1" 142 | show_help 143 | # echo "Use 'nb --help' for a list of available commands." 144 | ;; 145 | esac 146 | } 147 | 148 | # Check for --help argument 149 | if [ -z "$1" ] || [ "$1" == "--help" ]; then 150 | show_help 151 | else 152 | execute_command "$1" 153 | fi 154 | -------------------------------------------------------------------------------- /.devcontainer/scripts/postCreateCommand.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | WORKSPACE_DIR="/workspaces/nautical-backup" 4 | 5 | echo "Installing 'cecho' command..." 6 | dos2unix $WORKSPACE_DIR/.devcontainer/scripts/cecho.sh 7 | ln -s $WORKSPACE_DIR/.devcontainer/scripts/cecho.sh /usr/bin/cecho 8 | chmod +x /usr/bin/cecho 9 | 10 | cecho CYAN "Installing 'nb' command..." 11 | dos2unix $WORKSPACE_DIR/.devcontainer/scripts/nb.sh 12 | ln -s $WORKSPACE_DIR/.devcontainer/scripts/nb.sh /usr/bin/nb 13 | chmod +x /usr/bin/nb 14 | 15 | 16 | cecho CYAN "Installing python packages (for api)..." 17 | python3 -m pip install -r $WORKSPACE_DIR/api/requirements.txt 18 | 19 | cecho CYAN "Installing python packages (for api tests)..." 20 | python3 -m pip install -r $WORKSPACE_DIR/pytest/requirements.txt 21 | 22 | 23 | cecho CYAN "Adding aliases (for convenience)..." 24 | for file in ~/.zshrc ~/.bashrc; do 25 | echo "alias home=\"cd /workspaces/nautical-backup\"" >> "$file" 26 | echo "alias cls=\"clear\"" >> "$file" 27 | done 28 | 29 | echo 'DISABLE_UPDATE_PROMPT=true # Auto update ohmyzsh and dont ask' >> ~/.zshrc 30 | 31 | find $WORKSPACE_DIR/app -type f -print0 | xargs -0 dos2unix 32 | find $WORKSPACE_DIR/s6-overlay/etc/s6-overlay/s6-rc.d -type f -print0 | xargs -0 dos2unix 33 | 34 | # cecho CYAN "Installing python packages (for docs)..." 35 | # python3 -m pip install -r $WORKSPACE_DIR/docs/requirements.txt 36 | 37 | cecho CYAN "Handling locales..." 38 | echo "export LANG=en_US.UTF-8" >> ~/.zshrc; 39 | LC_CTYPE=en_US.UTF-8 40 | echo en_US.UTF-8 UTF-8 > /etc/locale.gen 41 | locale-gen 42 | 43 | # Test commands 44 | nb --help 45 | 46 | cecho CYAN "Installing pre-commit hooks..." 47 | pre-commit install 48 | 49 | cecho "GREEN" "Success!! Nautical Development enviornment ready to go!!" 50 | cecho "GREEN" "Use the command 'nb --help' to get started." 51 | 52 | exit 0 53 | # No need to 'source ~/.zshrc' since the terminal won't be open yet 54 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # This dockerignore is being used as a whitelist. 2 | 3 | # Ignore everything 4 | * 5 | 6 | # Files/Folders we need 7 | !Dockerfile 8 | !.dockerignore 9 | !app/** 10 | !s6-overlay/** 11 | !requirements-dev.txt 12 | !requirements.txt 13 | 14 | 15 | # Ignore unnecessary files inside allowed directories 16 | # This should go after the allowed directories 17 | **/*~ 18 | **/*.log 19 | **/.DS_Store 20 | **/Thumbs.db 21 | **/__pycache__ -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Optionally, enforce LF line endings for all text files in the repository 2 | * text=auto eol=lf 3 | 4 | # Enforce LF for all shell scripts 5 | *.sh text eol=lf 6 | 7 | # Enforce LF for any file named 'run' 8 | run text eol=lf -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [minituff] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., 4 | # patreon: # Replace with a single Patreon username 5 | # open_collective: # Replace with a single Open Collective username 6 | # ko_fi: # Replace with a single Ko-fi username 7 | # tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | # community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | # liberapay: # Replace with a single Liberapay username 10 | # issuehunt: # Replace with a single IssueHunt username 11 | # otechie: # Replace with a single Otechie username 12 | # lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | # custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base", 5 | ":dependencyDashboard", 6 | "docker:enableMajor" 7 | ], 8 | "platform": "github", 9 | "dependencyDashboardTitle": "Renovate Dashboard 🤖", 10 | "prHourlyLimit": 0, 11 | "customManagers": [ 12 | { 13 | "customType": "regex", 14 | "description": "Self Docker image version updates", 15 | "fileMatch": [ 16 | ".*docker-run-example", 17 | ".*docker-run-semver", 18 | "README.md" 19 | ], 20 | "matchStrings": [ 21 | "(?minituff\/nautical-backup):(?.*?)([ |\n]|$)" 22 | ], 23 | "datasourceTemplate": "docker" 24 | }, 25 | { 26 | "customType": "regex", 27 | "fileMatch": ["^Dockerfile$"], 28 | "matchStrings": [ 29 | "#\\s*renovate:\\s*datasource=(?.*?) depName=(?.*?)( versioning=(?.*?))?\\sENV .*?_VERSION=\"(?.*)\"\\s" 30 | ], 31 | "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" 32 | } 33 | ], 34 | "packageRules": [ 35 | { 36 | "description": "Auto merge nautical-backup docker image version updates in documentation", 37 | "groupName": "docs", 38 | "matchFileNames": [ 39 | "docs/**", 40 | "snippets/**", 41 | "README.md" 42 | ], 43 | "automerge": true, 44 | "rangeStrategy": "bump", 45 | "labels": ["documentation"], 46 | "assignees": ["minituff"], 47 | "ignoreTests": true 48 | }, 49 | { 50 | "description": "Dockerfile dependencies", 51 | "matchDatasources": ["docker"], 52 | "matchFileNames": [ 53 | "Dockerfile" 54 | ], 55 | "labels": ["depedency"], 56 | "assignees": ["minituff"], 57 | "automerge": true, 58 | "rangeStrategy": "bump", 59 | "ignoreTests": false, 60 | "schedule": ["at any time"], 61 | "minimumReleaseAge": "2 days", 62 | "commitBody": "[bump version]", 63 | "extends": [ 64 | "default:automergeDigest", 65 | "default:automergeBranchPush" 66 | ] 67 | }, 68 | { 69 | "description": "Python dependencies", 70 | "matchDatasources": ["pypi"], 71 | "matchFileNames": [ 72 | "**/requirements.txt", 73 | "**/requirements-dev.txt" 74 | ], 75 | "labels": ["depedency"], 76 | "assignees": ["minituff"], 77 | "automerge": true, 78 | "rangeStrategy": "bump", 79 | "ignoreTests": false, 80 | "schedule": ["at any time"], 81 | "minimumReleaseAge": "2 days", 82 | "commitBody": "[bump version]", 83 | "extends": [ 84 | "default:automergeDigest", 85 | "default:automergeBranchPush" 86 | ] 87 | } 88 | ], 89 | "hostRules": [ 90 | { 91 | "hostType": "repology", 92 | "timeout": 60000 93 | } 94 | ] 95 | } 96 | -------------------------------------------------------------------------------- /.github/workflows/bump-version.yml: -------------------------------------------------------------------------------- 1 | name: Bump version 2 | on: 3 | workflow_dispatch: # Allow manually running 4 | push: 5 | branches: 6 | - main 7 | paths: # Only if any of these files has changed 8 | - 'Dockerfile' 9 | - 'requirements.txt' 10 | 11 | jobs: 12 | build: 13 | # Requires '[bump version]' to be present in the Git commit message or commit body 14 | # This is used so we don't accidentally publish an image on a commit to main.cancel-timeout-minutes: 15 | # Renovate will add '[bump version]' when it changes dependencies within the Dockerfile. 16 | if: contains(github.event.head_commit.message, '[bump version]') 17 | runs-on: ubuntu-latest 18 | permissions: 19 | contents: write 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | 24 | # Bump the `patch` level 25 | - name: Bump version and push tag 26 | id: tag_version 27 | uses: mathieudutour/github-tag-action@v6.2 28 | with: 29 | github_token: ${{ secrets.REPO_GITHUB_SECRET }} 30 | fetch_all_tags: false 31 | default_bump: patch 32 | 33 | # This new Github Release will kickoff the the pipeline at `docker-image.yml` 34 | # A release tag is needed for the pipeline to run correctly 35 | - name: Create a GitHub release 36 | uses: ncipollo/release-action@v1.16.0 37 | with: 38 | # Get the tag name from previous step 39 | tag: ${{ steps.tag_version.outputs.new_tag }} 40 | name: ${{ steps.tag_version.outputs.new_tag }} 41 | generateReleaseNotes: true 42 | makeLatest: true 43 | 44 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Create and publish a Docker image 2 | 3 | on: 4 | workflow_dispatch: # Allow manually running 5 | push: 6 | # branches: 7 | # - 'main' 8 | # - 'dev' 9 | tags: 10 | - 'v*' # Run whenever a new tag is published 11 | 12 | jobs: 13 | # Run the tests before publishing Docker Image 14 | call_tests: 15 | name: Run Tests 16 | uses: ./.github/workflows/test_docker_image.yml 17 | secrets: inherit 18 | 19 | push_to_registries: 20 | name: Push Docker image 21 | runs-on: ubuntu-latest 22 | permissions: # Sets the permissions granted to the GITHUB_TOKEN for the actions in this job. 23 | packages: write 24 | contents: read 25 | needs: [call_tests] # Will not run until tests have completed successfully 26 | steps: 27 | - uses: actions/checkout@v4 28 | 29 | - name: Set up QEMU # Allow multi-arch (arm64) builds 30 | uses: docker/setup-qemu-action@v3 31 | 32 | - name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@v3 34 | 35 | # This step uses docker/metadata-action to extract tags and labels that will be applied to the specified image. https://github.com/docker/metadata-action 36 | # The id "meta" allows the output of this step to be referenced in a subsequent step. The images value provides the base name for the tags and labels. 37 | - name: Extract metadata (tags, labels) for Docker 38 | id: meta 39 | uses: docker/metadata-action@v5.7.0 40 | with: 41 | images: | 42 | ${{ github.repository }} # minituff/nautical-backup 43 | ghcr.io/${{ github.repository }} 44 | # This stage requires being run attached to a tag, otherwise it will use the branch name 45 | tags: | 46 | # set latest tag for default branch 47 | type=raw,value=latest,enable={{is_default_branch}} 48 | # output 0.1.2 49 | type=semver,pattern={{version}} 50 | # output 0.1 51 | type=semver,pattern={{major}}.{{minor}} 52 | # disabled if major zero 53 | type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} 54 | 55 | - name: Log in to Docker Hub 56 | uses: docker/login-action@v3.4.0 57 | with: 58 | # This is your Docker Hub username and password. 59 | username: ${{ secrets.DOCKER_USERNAME }} 60 | password: ${{ secrets.DOCKER_PASSWORD }} 61 | 62 | - name: Log in to the GitHub Container registry 63 | uses: docker/login-action@v3.4.0 64 | with: 65 | registry: ghcr.io 66 | username: ${{ github.actor }} 67 | password: ${{ secrets.GITHUB_TOKEN }} 68 | 69 | # This step uses the docker/build-push-action action to build the image, based on your repository's Dockerfile. 70 | # It uses the context parameter to define the build's context as the set of files located in the specified path. 71 | # For more information, see "Usage" in the README of the docker/build-push-action repository. https://github.com/docker/build-push-action#usage 72 | # It uses the tags and labels parameters to tag and label the image with the output from the "meta" step. 73 | - name: Build and push Docker images 74 | uses: docker/build-push-action@v6.18.0 75 | with: 76 | context: . 77 | push: true 78 | platforms: linux/amd64,linux/arm64 79 | tags: ${{ steps.meta.outputs.tags }} 80 | labels: ${{ steps.meta.outputs.labels }} 81 | build-args: | 82 | NAUTICAL_VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }} 83 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: deploy mkdocs 2 | 3 | on: 4 | workflow_dispatch: # Allow manually running 5 | push: 6 | branches: 7 | - main 8 | paths: # Only redeploy docs when necessary 9 | - 'docs/**' 10 | - 'snippets/**' 11 | - 'mkdocs.yml' 12 | 13 | permissions: 14 | contents: write 15 | 16 | jobs: 17 | deploy-docs: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: actions/setup-python@v5 22 | with: 23 | python-version: 3.x 24 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 25 | - uses: actions/cache@v4 26 | with: 27 | key: mkdocs-material-${{ env.cache_id }} 28 | path: .cache 29 | restore-keys: | 30 | mkdocs-material- 31 | - run: pip install mkdocs-material 32 | - run: mkdocs gh-deploy --force 33 | -------------------------------------------------------------------------------- /.github/workflows/stale_issues_and_prs.yml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | workflow_dispatch: # Allow manually running 4 | schedule: 5 | - cron: '30 1 * * *' 6 | 7 | jobs: 8 | stale: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/stale@v9 12 | # with: 13 | # stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' 14 | # days-before-stale: 30 15 | # days-before-close: 5 16 | -------------------------------------------------------------------------------- /.github/workflows/test_docker_image.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | # This file contains 3 jobs that run in parallel 3 | 4 | on: 5 | workflow_dispatch: # Allow manually running 6 | workflow_call: # Allow another workflow to call this one 7 | pull_request: 8 | types: 9 | - opened 10 | - reopened 11 | - edited 12 | - synchronize 13 | branches: # Target 14 | - 'main' 15 | - 'dev' 16 | # push: 17 | # branches: 18 | # - 'group-updates' 19 | # tags: 20 | # - 'v*' 21 | 22 | jobs: 23 | 24 | python_unit_test_and_coverage: 25 | name: Python tests and coverage 26 | # if: contains(github.event.head_commit.message, '[ci]') 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v4 30 | 31 | - name: Set up Python 3.10 32 | uses: actions/setup-python@v5 33 | with: 34 | python-version: '3.13' 35 | 36 | - name: Install dependencies 37 | run: | 38 | echo "Installing python requirements" 39 | pip install -r requirements-dev.txt 40 | 41 | - name: Run Black format 42 | run: python3 -m black --verbose --check --line-length 120 app 43 | 44 | - name: Prepare enviornment variables 45 | run: | 46 | echo "NAUTICAL_DB_PATH=${GITHUB_WORKSPACE}/dev/config" >> "$GITHUB_ENV" 47 | echo "SOURCE_LOCATION=${GITHUB_WORKSPACE}/dev/source" >> "$GITHUB_ENV" 48 | echo "DEST_LOCATION=${GITHUB_WORKSPACE}/dev/destination" >> "$GITHUB_ENV" 49 | echo "LOG_LEVEL=DEBUG" >> "$GITHUB_ENV" 50 | 51 | - name: Create necessary folders 52 | run: | 53 | mkdir -p $NAUTICAL_DB_PATH 54 | mkdir -p $SOURCE_LOCATION 55 | mkdir -p $DEST_LOCATION 56 | 57 | - name: Run tests and collect coverage 58 | run: | 59 | echo "NAUTICAL_DB_PATH: ${NAUTICAL_DB_PATH}" 60 | python3 -m pytest --cov app --cov-report html --cov-report term --cov-report xml 61 | 62 | - name: Upload reports to Codecov 63 | uses: codecov/codecov-action@v5 64 | env: 65 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # Required for v4 66 | with: 67 | fail_ci_if_error: true 68 | files: coverage.xml 69 | 70 | - name: Archive code coverage results 71 | uses: actions/upload-artifact@v4 72 | with: 73 | name: python-code-coverage 74 | path: htmlcov/ 75 | retention-days: 5 76 | 77 | 78 | test_docker_architecture: 79 | name: Validate Docker Architecture 80 | # if: contains(github.event.head_commit.message, '[ci]') 81 | runs-on: ubuntu-latest 82 | steps: 83 | - uses: actions/checkout@v4 84 | 85 | - name: Validate Dockerfile supports multi-arch 86 | run: | 87 | echo "Checking if the base docker image supports amd64 and arm64..." 88 | bash ./tests/_validate_dockerfile.sh 89 | 90 | 91 | bash_unit_test_and_coverage: 92 | name: Integration tests 93 | # if: contains(github.event.head_commit.message, '[ci]') 94 | runs-on: ubuntu-latest 95 | steps: 96 | - uses: actions/checkout@v4 97 | 98 | - name: Set up Docker Buildx 99 | uses: docker/setup-buildx-action@v3 100 | 101 | - name: Build and export Docker image 102 | uses: docker/build-push-action@v6.18.0 103 | with: 104 | context: . 105 | load: true # Do not push this image 106 | tags: minituff/nautical-test 107 | build-args: TEST_MODE=0 108 | 109 | - name: Run built image against Docker image 110 | run: | 111 | cd tests 112 | docker compose run nautical-backup-test1 113 | docker compose run nautical-backup-test2 114 | 115 | - name: Run integration tests against Docker image 116 | timeout-minutes: 3 117 | run: | 118 | cd tests 119 | 120 | rm -rf source config 121 | mkdir -p source/watchtower-test 122 | echo "This is a test file" >> source/watchtower-test/test-file.txt 123 | 124 | docker compose -f watchtower.yml up -d 125 | docker compose run nautical-backup-test3 126 | docker compose -f watchtower.yml down 127 | 128 | bash ./_validate_rsync.sh 129 | 130 | # - name: Upload reports to Codecov 131 | # uses: codecov/codecov-action@v4 132 | # env: 133 | # CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # Required for v4 134 | # with: 135 | # fail_ci_if_error: true 136 | # files: tests/coverage/coverage.xml # The json file will not work here 137 | 138 | # - name: Archive code coverage results 139 | # uses: actions/upload-artifact@v4 140 | # with: 141 | # name: bash-code-coverage 142 | # path: tests/coverage/ 143 | # retention-days: 5 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /dev/config 2 | /dev/source 3 | /dev/destination 4 | /dev/*.json 5 | coverage 6 | .vscode 7 | __pycache__ 8 | pytest_cache 9 | .coverage 10 | htmlcov 11 | coverage.xml 12 | /tests/destination 13 | fake-path/ 14 | fake-path2/ 15 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | # Using this mirror lets us use mypyc-compiled black, which is about 2x faster 3 | - repo: https://github.com/psf/black-pre-commit-mirror 4 | rev: 24.4.0 5 | hooks: 6 | - id: black 7 | # It is recommended to specify the latest version of Python 8 | # supported by your project here, or alternatively use 9 | # pre-commit's default_language_version, see 10 | # https://pre-commit.com/#top_level-default_language_version 11 | language_version: python3.11 -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use base docker image; it contains the docker commands we need to start and stop containers. 2 | # Use this tool https://github.com/estesp/manifest-tool to get the multiplatform SHA. 3 | # For example: docker run --rm mplatform/mquery docker:cli 4 | FROM docker:24.0.7-cli-alpine3.18@sha256:a2a608408fa15d6694543a7308c2bfd1a7ea90a0e4ca989d0471ca7b8348fabb 5 | 6 | # The platform this image is created for (linux/amd64, linux/arm64) 7 | ARG TARGETPLATFORM 8 | ENV TARGETPLATFORM=${TARGETPLATFORM} 9 | 10 | # Nautical Version (for example "v0.2.1") or "main" if not set 11 | ARG NAUTICAL_VERSION="main" 12 | ENV NAUTICAL_VERSION=${NAUTICAL_VERSION} 13 | 14 | LABEL maintainer="minituff" \ 15 | # Prevent self backups 16 | nautical-backup.enable="false" \ 17 | nautical-backup.stop-before-backup="false" 18 | 19 | ARG TEST_MODE="-1" 20 | 21 | # renovate: datasource=github-releases depName=just-containers/s6-overlay versioning=loose 22 | ENV S6_OVERLAY_VERSION="3.1.6.2" 23 | 24 | # Install S6 Overlay 25 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp 26 | RUN tar -C / -Jxpf /tmp/s6-overlay-noarch.tar.xz 27 | 28 | # This is needed because each arch needs a different S6 build. All other S6 files are the same 29 | RUN apk add --no-cache curl --virtual=s6build-dependencies && \ 30 | S6_OVERLAY_ARCH=$(case "${TARGETPLATFORM}" in \ 31 | "linux/amd64") echo "x86_64";; \ 32 | "linux/arm64") echo "aarch64";; \ 33 | *) echo "x86_64";; \ 34 | esac) && \ 35 | echo "Installing S6 Overlay v${S6_OVERLAY_VERSION} -${S6_OVERLAY_ARCH} for ${TARGETPLATFORM}" && \ 36 | curl -sSL "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz" -o "/tmp/s6-arch.tar.xz" && \ 37 | tar -C / -Jxpf /tmp/s6-arch.tar.xz && \ 38 | apk del --purge s6build-dependencies 39 | 40 | # Add s6 optional symlinks (helps fix paths) 41 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp 42 | RUN tar -C / -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz 43 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp 44 | RUN tar -C / -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz 45 | 46 | # Copy all necessary files into the container (from /app in the repository to /app in the container) 47 | COPY app app 48 | COPY requirements.txt app/requirements.txt 49 | 50 | # Packages are sourced from https://pkgs.alpinelinux.org/packages?branch=v3.18&repo=main tracked from https://repology.org/projects/?inrepo=alpine_3_18 51 | # Renovate-Bot will update this Dockerfile once and updae is realsed to these packages. The comments are needed to match pkg info. 52 | 53 | # renovate: datasource=repology depName=alpine_3_18/bash versioning=loose 54 | ENV BASH_VERSION="5.2.15" 55 | # renovate: datasource=repology depName=alpine_3_18/rsync versioning=loose 56 | ENV RSYNC_VERSION="3.2.7" 57 | # renovate: datasource=repology depName=alpine_3_18/tzdata versioning=loose 58 | ENV TZ_DATA_VERSION="2024" 59 | # renovate: datasource=repology depName=alpine_3_18/dos2unix versioning=loose 60 | ENV DOS2UNIX_VERSION="7.4.4" 61 | # renovate: datasource=repology depName=alpine_3_18/jq versioning=loose 62 | ENV JQ_VERSION="1.6" 63 | # renovate: datasource=repology depName=alpine_3_18/curl versioning=loose 64 | ENV CURL_VERSION="8.9.1" 65 | # renovate: datasource=repology depName=alpine_3_18/7zip versioning=loose 66 | ENV SEVENZIP_VERSION="22.01" 67 | # renovate: datasource=repology depName=alpine_3_18/python3 versioning=loose 68 | ENV PYTHON_VERSION="3.11" 69 | # renovate: datasource=repology depName=alpine_3_18/py3-pip versioning=loose 70 | ENV PIP_VERSION="23.1.2" 71 | # renovate: datasource=repology depName=alpine_3_18/ruby-full versioning=loose 72 | ENV RUBY_VERSION="3.2.4" 73 | 74 | # Hide the S6 init logs. 2 = start and stop operations, 1 = warnings and errors, 0 = errors. Default 2: Options 0 (low) -- 5 (high) 75 | ENV S6_VERBOSITY=1 76 | 77 | # Set the maximum time to wait for services to be ready (0=forever). Needed for BACKUP_ON_START since it could take time. 78 | ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 79 | 80 | # Install dependencies 81 | RUN \ 82 | echo "**** Install build packages (will be uninstalled later) ****" && \ 83 | apk add --no-cache --virtual=build-dependencies \ 84 | dos2unix=~"${DOS2UNIX_VERSION}" && \ 85 | echo "**** Install runtime packages (required at runtime) ****" && \ 86 | apk add --no-cache \ 87 | bash>="${BASH_VERSION}" \ 88 | rsync>="${RSYNC_VERSION}" \ 89 | tzdata>="${TZ_DATA_VERSION}" \ 90 | jq>="${JQ_VERSION}" \ 91 | curl>="${CURL_VERSION}" \ 92 | 7zip>="${SEVENZIP_VERSION}" \ 93 | python3>="${PYTHON_VERSION}" \ 94 | py3-pip>="${PIP_VERSION}" && \ 95 | echo "**** Making the entire /app folder executable ****" && \ 96 | chmod -R +x /app && \ 97 | echo "**** Making the all files in the /app folder Unix format ****" && \ 98 | find /app -type f -print0 | xargs -0 dos2unix && \ 99 | echo "**** Making all files in ./etc/s6-overlay/s6-rc.d Unix format ****" && \ 100 | find ./etc/s6-overlay/s6-rc.d -type f -print0 | xargs -0 dos2unix && \ 101 | echo "**** Install Python packages ****" && \ 102 | python3 -m pip install --no-cache-dir --upgrade -r /app/requirements.txt && \ 103 | echo "**** Cleanup ****" && \ 104 | apk del --purge build-dependencies 105 | 106 | # Conditionally execute commands based on TESTMODE 107 | RUN if [ "$TEST_MODE" != "-1" ]; then \ 108 | echo "=== TEST MODE ENABLED ===" && \ 109 | echo "**** Installing TEST packages ****" && \ 110 | apk add --no-cache \ 111 | ruby-full=~"${RUBY_VERSION}" && \ 112 | echo "**** Installing ruby packages (for tests) ****" && \ 113 | gem install bashcov simplecov-cobertura simplecov-html; \ 114 | fi 115 | 116 | # Required for Python imports to work 117 | ENV PYTHONPATH="." 118 | 119 | # Add S6 files 120 | COPY --chmod=755 s6-overlay/ / 121 | 122 | VOLUME [ "/app/source" ] 123 | VOLUME [ "/app/destination" ] 124 | VOLUME [ "/config" ] 125 | 126 | # Only should be exposed when running in test mode 127 | VOLUME [ "/tests" ] 128 | 129 | # Publish this port to enable the HTTP endpoints 130 | EXPOSE 8069 131 | 132 | # Hit the healthcheck endpoint to ensure the container is healthy every 30 seconds 133 | HEALTHCHECK --start-period=10s --interval=20s --timeout=10s CMD curl --fail http://localhost:8069 || exit 1 134 | 135 | # Run the entry script and pass all variables to it 136 | ENTRYPOINT ["/init"] 137 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | Logo 3 | 4 | 5 | A simple Docker bind mount (volume) backup tool. 6 | 7 | --- 8 | 9 |
10 | 11 | [![Pulls from DockerHub](https://img.shields.io/docker/pulls/minituff/nautical-backup?logo=docker)](https://hub.docker.com/r/minituff/nautical-backup) 12 | [![Docker Image Version (latest semver)](https://img.shields.io/docker/v/minituff/nautical-backup/latest?label=latest%20version)](https://hub.docker.com/r/minituff/nautical-backup) 13 | [![Docker Image Size (tag)](https://img.shields.io/docker/image-size/minituff/nautical-backup/latest?label=size)](https://hub.docker.com/r/minituff/nautical-backup) 14 | [![Code Coverage](https://codecov.io/gh/Minituff/nautical-backup/graph/badge.svg?token=90PUDWN9XU)](https://codecov.io/gh/Minituff/nautical-backup) 15 | 16 | 17 | 18 |
19 | 20 | ### Documentation 21 | Full documentation is available at [https://minituff.github.io/nautical-backup](https://minituff.github.io/nautical-backup) 22 | 23 | ### Quick Start 24 | 25 | Docker Compose 26 | ```yaml 27 | services: 28 | nautical-backup: 29 | image: minituff/nautical-backup:2 30 | container_name: nautical-backup 31 | volumes: 32 | - /var/run/docker.sock:/var/run/docker.sock 33 | - /config:/config 34 | - /source:/app/source:ro 35 | - /destination:/app/destination 36 | environment: # Optional variables 37 | - TZ=America/Los_Angeles 38 | - CRON_SCHEDULE=0 4 * * * 39 | - SKIP_CONTAINERS=example1,example2,example3 40 | ``` 41 | Docker CLI 42 | ```bash 43 | docker run -d \ 44 | --name nautical-backup \ 45 | -v /var/run/docker.sock:/var/run/docker.sock \ 46 | -v /config:/config \ 47 | -v /source:/app/source:ro \ 48 | -v /destination:/app/destination \ 49 | -e TZ="America/Los_Angeles" \ 50 | -e CRON_SCHEDULE="0 4 * * *" \ 51 | -e SKIP_CONTAINERS="example1,example2,example3" \ 52 | minituff/nautical-backup:2 53 | ``` 54 | -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/app/__init__.py -------------------------------------------------------------------------------- /app/api/authorize.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | from fastapi import HTTPException, Depends, status, Security 3 | from fastapi.security import HTTPBasic, HTTPBasicCredentials 4 | from fastapi.responses import PlainTextResponse, JSONResponse 5 | from typing import Annotated 6 | import secrets 7 | from pydantic_settings import BaseSettings 8 | from functools import lru_cache 9 | 10 | 11 | from app.api.config import Settings 12 | 13 | 14 | @lru_cache 15 | def get_settings() -> Settings: 16 | return Settings() 17 | 18 | 19 | security = HTTPBasic() 20 | 21 | 22 | def authorize( 23 | credentials: Annotated[HTTPBasicCredentials, Depends(security)], 24 | settings: Annotated[Settings, Depends(get_settings)], 25 | ): 26 | correct_username_bytes = bytes(settings.HTTP_REST_API_USERNAME, "utf-8") 27 | correct_password_bytes = bytes(settings.HTTP_REST_API_PASSWORD, "utf-8") 28 | 29 | current_username_bytes = credentials.username.encode("utf8") 30 | current_password_bytes = credentials.password.encode("utf8") 31 | 32 | is_correct_username = secrets.compare_digest(current_username_bytes, correct_username_bytes) 33 | is_correct_password = secrets.compare_digest(current_password_bytes, correct_password_bytes) 34 | 35 | if not (is_correct_username and is_correct_password): 36 | raise HTTPException( 37 | status_code=status.HTTP_401_UNAUTHORIZED, 38 | detail="Incorrect username or password", 39 | headers={"WWW-Authenticate": "Basic"}, 40 | ) 41 | return credentials.username 42 | -------------------------------------------------------------------------------- /app/api/config.py: -------------------------------------------------------------------------------- 1 | from pydantic_settings import BaseSettings 2 | 3 | 4 | # Environment Variables 5 | class Settings(BaseSettings): 6 | NAUTICAL_VERSION: str = "0.0.0" 7 | HTTP_REST_API_USERNAME: str = "admin" 8 | HTTP_REST_API_PASSWORD: str = "password" 9 | -------------------------------------------------------------------------------- /app/api/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Depends 2 | from fastapi.security import HTTPBasic 3 | import uvicorn 4 | import os 5 | from typing import Annotated 6 | from fastapi.responses import FileResponse 7 | from fastapi.staticfiles import StaticFiles 8 | from functools import lru_cache 9 | from contextlib import asynccontextmanager 10 | 11 | from app.api.config import Settings 12 | from app.api.authorize import authorize 13 | import app.api.nautical_router as nautical_router 14 | from app.logger import Logger 15 | 16 | 17 | @asynccontextmanager 18 | async def lifespan(app: FastAPI): 19 | logger = Logger() 20 | # Steps that will be performed on startup events only once. 21 | logger.log_this("Starting API on port 8069...", "INFO") 22 | yield 23 | # Steps that will happen on shutdown event 24 | logger = Logger() 25 | logger.log_this("Shutting down API...", "INFO") 26 | 27 | 28 | @lru_cache 29 | def get_settings(): 30 | return Settings() 31 | 32 | 33 | # Mount the directory containing your static files (HTML, CSS, JS) as a static files route. 34 | script_dir = os.path.dirname(__file__) 35 | static_abs_file_path = os.path.join(script_dir, "static/") 36 | 37 | app = FastAPI( 38 | title="Nautical Backup", 39 | summary="A simple Docker volume backup tool 🚀", 40 | version=get_settings().NAUTICAL_VERSION, 41 | lifespan=lifespan, 42 | ) 43 | 44 | security = HTTPBasic() 45 | 46 | # Import other endpoints 47 | app.include_router(nautical_router.router) 48 | 49 | app.mount("/static", StaticFiles(directory=static_abs_file_path, html=True), name="static") 50 | 51 | 52 | @app.get("/") 53 | async def read_index(): 54 | return FileResponse(f"{static_abs_file_path}/index.html") 55 | 56 | 57 | @app.get("/health-check") 58 | async def health_check(): 59 | return {"status": "healthy"} 60 | 61 | 62 | @app.get("/auth") 63 | def auth(username: Annotated[str, Depends(authorize)]): 64 | return {"username": username} 65 | 66 | 67 | if __name__ == "__main__": 68 | uvicorn.run("main:app", host="0.0.0.0", port=8069, reload=True, log_level="debug") 69 | -------------------------------------------------------------------------------- /app/api/nautical_router.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Union, Optional 2 | from fastapi import HTTPException, APIRouter, Depends, Path, status, BackgroundTasks 3 | import subprocess 4 | from fastapi.responses import JSONResponse 5 | from fastapi.encoders import jsonable_encoder 6 | from typing import Annotated 7 | 8 | from app.api.authorize import authorize 9 | from app.api.utils import next_cron_occurrences 10 | from app.db import DB 11 | 12 | # All routes in this file start with /nautical 13 | router = APIRouter(prefix="/api/v1/nautical", tags=["nautical"]) 14 | 15 | db = DB() 16 | 17 | 18 | def kickoff_nautical(): 19 | try: 20 | subprocess.run(["nautical"], check=True) 21 | return {"message": f"Nautical Backup completed successfully"} 22 | except subprocess.CalledProcessError as e: 23 | raise HTTPException(status_code=500, detail=str(e.stderr.decode())) 24 | 25 | 26 | @router.get("/dashboard", summary="The most useful information", response_class=JSONResponse) 27 | def dashboard(username: Annotated[str, Depends(authorize)]) -> JSONResponse: 28 | """ 29 | This returns a summary of the Nautical container. Useful for 3rd party applications. 30 | """ 31 | 32 | next_crons = next_cron_occurrences(5) 33 | 34 | d = { 35 | "next_cron": next_crons, 36 | "next_run": next_crons.get("1", [None, None])[1] if next_crons else None, 37 | "last_cron": db.get("last_cron", "None"), 38 | "number_of_containers": db.get("number_of_containers", 0), 39 | "last_backup_seconds_taken": db.get("last_backup_seconds_taken", 0), 40 | "completed": db.get("containers_completed", 0), 41 | "skipped": db.get("containers_skipped", 0), 42 | "errors": db.get("errors", 0), 43 | "backup_running": db.get("containers_skipped", "false"), 44 | } 45 | return JSONResponse(content=jsonable_encoder(d)) 46 | 47 | 48 | @router.get( 49 | "/next_cron/{occurrences}", 50 | summary="Get the next CRON occurrences", 51 | response_class=JSONResponse, 52 | ) 53 | def next_cron( 54 | username: Annotated[str, Depends(authorize)], 55 | occurrences: Annotated[int, Path(title="The ID of the item to get", ge=1, le=100)], 56 | ) -> JSONResponse: 57 | d = next_cron_occurrences(occurrences) 58 | res = JSONResponse(content=jsonable_encoder(d)) 59 | return res 60 | 61 | 62 | @router.post( 63 | "/start_backup", 64 | summary="Start backup now, will not respond until the backup has been completed.", 65 | response_class=JSONResponse, 66 | ) 67 | def start_backup(username: Annotated[str, Depends(authorize)]): 68 | """ 69 | Start a backup now and respond when completed. This respects all environment and docker labels. 70 | """ 71 | return kickoff_nautical() 72 | 73 | 74 | @router.post( 75 | "/kickoff_backup", 76 | summary="Start backup now, will immediatly respond even though the backup continues in the background", 77 | response_class=JSONResponse, 78 | ) 79 | async def kickoff_backup(username: Annotated[str, Depends(authorize)], background_tasks: BackgroundTasks): 80 | """ 81 | Start a backup now and respond immediately. This respects all environment and docker labels. 82 | """ 83 | # Run the func 'kickoff_nautical' in the background, but return immediately 84 | background_tasks.add_task(kickoff_nautical) 85 | 86 | return {"message": f"Nautical Backup started successfully"} 87 | -------------------------------------------------------------------------------- /app/api/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/app/api/static/favicon.ico -------------------------------------------------------------------------------- /app/api/static/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Nautical Backup 7 | 8 | 9 | 10 | 11 |
12 |
13 |

Nautical Backup

14 | View the API 15 |
16 |
17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /app/api/static/scripts.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', function() { 2 | 3 | }); 4 | -------------------------------------------------------------------------------- /app/api/static/styles.css: -------------------------------------------------------------------------------- 1 | body, html { 2 | height: 100%; 3 | margin: 0; 4 | } 5 | 6 | body { 7 | font-family: Arial, sans-serif; 8 | background: linear-gradient(to right, #011b3c, #5365c9); 9 | color: white; 10 | } 11 | -------------------------------------------------------------------------------- /app/api/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, Union 2 | import croniter 3 | import pytz 4 | import os 5 | from datetime import datetime 6 | 7 | 8 | def next_cron_occurrences( 9 | occurrences: Optional[int] = 5, now: Optional[datetime] = None 10 | ) -> Optional[dict[str | int, Any]]: 11 | cron_enabled = os.getenv("CRON_SCHEDULE_ENABLED", "true").lower() 12 | if cron_enabled == "false": 13 | return None 14 | 15 | cron_expression = os.getenv("CRON_SCHEDULE", "0 4 * * *") 16 | timezone = os.getenv("TZ", "Etc/UTC") 17 | tz = pytz.timezone(timezone) 18 | 19 | if now == None: 20 | now = datetime.now(tz) 21 | 22 | # Create a cron iterator with the timezone 23 | cron = croniter.croniter(cron_expression, start_time=now) 24 | 25 | response: dict[Union[str, int], Any] = { 26 | "cron": f"{cron_expression}", 27 | "tz": f"{tz}", 28 | } 29 | 30 | if occurrences == None or occurrences <= 0: 31 | occurrences = 1 32 | elif occurrences >= 100: 33 | occurrences = 100 34 | 35 | for i in range(occurrences): 36 | next_occurrence: datetime = cron.get_next(datetime) 37 | response[str(i + 1)] = [ 38 | next_occurrence.strftime("%A, %B %d, %Y at %I:%M %p"), 39 | next_occurrence.strftime("%m/%d/%y %H:%M"), 40 | ] 41 | 42 | return response 43 | -------------------------------------------------------------------------------- /app/db.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from typing import Any, Optional, Union 4 | from pathlib import Path 5 | from app.logger import Logger, LogType, LogLevel 6 | from datetime import datetime 7 | 8 | 9 | class DB: 10 | def __init__(self, db_path: Union[str, Path] = ""): 11 | self.db_path: str = str(db_path) 12 | if self.db_path == "": 13 | NAUTICAL_DB_PATH = os.getenv("NAUTICAL_DB_PATH", "/config") 14 | NAUTICAL_DB_NAME = os.getenv("NAUTICAL_DB_NAME", "nautical-db.json") 15 | self.db_path = f"{NAUTICAL_DB_PATH}/{NAUTICAL_DB_NAME}" 16 | self.logger = Logger() 17 | 18 | if os.path.exists(self.db_path) and not os.path.isfile(self.db_path): 19 | # If db_path is a folder (not a file), just make it a file 20 | self.db_path += "/nautical-db.json" 21 | 22 | self._initialize_db() 23 | self._seed_db() 24 | 25 | def __repr__(self) -> str: 26 | return str({"db_path": self.db_path, "db": dict(self._read_db())}) 27 | 28 | def log_this(self, log_message, log_level=LogLevel.INFO, log_type: LogType = LogType.DEFAULT) -> None: 29 | """Wrapper for log this""" 30 | return self.logger.log_this(log_message, log_level, log_type) # TODO: Fix 31 | 32 | def _initialize_db(self): 33 | """Initialize the database if it doesn't exist.""" 34 | if os.path.isfile(self.db_path): 35 | self.log_this(f"Connected to database at '{self.db_path}'", log_type=LogType.INIT) 36 | else: 37 | self.log_this(f"Initializing database at '{self.db_path}'...", log_type=LogType.INIT) 38 | Path(self.db_path).parent.mkdir(parents=True, exist_ok=True) 39 | 40 | if not os.path.isfile(self.db_path): 41 | self.log_this(f"Creating Database at path: '{self.db_path}'...", log_type=LogType.INIT) 42 | with open(self.db_path, "w") as db_file: 43 | current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 44 | json.dump( 45 | { 46 | "created_at": f"{current_date}", 47 | }, 48 | db_file, 49 | ) 50 | 51 | self.log_this(f"Database initialized at '{self.db_path}'...", log_type=LogType.INIT) 52 | 53 | def _seed_db(self): 54 | """Seed the database with default values.""" 55 | with open(self.db_path, "r+") as db_file: 56 | data = json.load(db_file) 57 | 58 | if data.get("backup_running") is None: 59 | data["backup_running"] = False 60 | 61 | if data.get("containers_skipped") is None: 62 | data["containers_skipped"] = 0 63 | 64 | if data.get("containers_completed") is None: 65 | data["containers_completed"] = 0 66 | 67 | if data.get("number_of_containers") is None: 68 | data["number_of_containers"] = 0 69 | 70 | if data.get("errors") is None: 71 | data["errors"] = 0 72 | 73 | db_file.seek(0) 74 | json.dump(data, db_file, indent=4) 75 | db_file.truncate() 76 | 77 | def _read_db(self): 78 | if os.path.exists(self.db_path) and os.path.isfile(self.db_path): 79 | with open(self.db_path, "r") as f: 80 | return json.load(f) 81 | else: 82 | return {} 83 | 84 | def _write_db(self, data): 85 | with open(self.db_path, "w") as f: 86 | json.dump(data, f, indent=4) 87 | 88 | def get(self, key: str, default=None): 89 | data = self._read_db() 90 | return data.get(key, default) 91 | 92 | def put(self, key: str, value): 93 | data = self._read_db() 94 | data[key] = value 95 | self._write_db(data) 96 | 97 | def delete(self, key: str): 98 | data = self._read_db() 99 | if key in data: 100 | del data[key] 101 | self._write_db(data) 102 | 103 | def dump_json(self): 104 | return self._read_db() 105 | 106 | 107 | if __name__ == "__main__": 108 | db = DB() # This will seed and create the database if necessary (run at startup) 109 | -------------------------------------------------------------------------------- /app/db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | # Helper function to get the database path from parameters 4 | get_db_path() { 5 | local db_path="$NAUTICAL_DB_PATH/$NAUTICAL_DB_NAME" 6 | if [[ "$1" == "--db" ]] && [[ -n "$2" ]]; then 7 | db_path="$2" 8 | # Remove the first two parameters (--db and path) 9 | shift 2 10 | fi 11 | echo "$db_path" 12 | } 13 | 14 | # Function to get value by key 15 | get() { 16 | local db_path=$(get_db_path "$@") 17 | local key="$1" 18 | 19 | jq --raw-output ".$key" "$db_path" 20 | } 21 | 22 | # Function to insert or update a record 23 | # Only works at the root path 24 | put() { 25 | local db_path=$(get_db_path "$@") 26 | local key="$1" 27 | local value="$2" 28 | local tmp_file="$(mktemp)" # Create a unique temporary file using mktemp 29 | 30 | jq --arg key "$key" --arg value "$value" '.[$key] = $value' "$db_path" > "$tmp_file" && mv "$tmp_file" "$db_path" 31 | } 32 | 33 | 34 | # Function to delete a record by key 35 | delete() { 36 | local db_path=$(get_db_path "$@") 37 | local key="$1" 38 | 39 | jq "del(.$key)" "$db_path" > tmp && mv tmp "$db_path" 40 | } 41 | 42 | add_current_datetime() { 43 | local db_path=$(get_db_path "$@") 44 | local key="$1" # The JSON key where the date and time will be added 45 | local tmp_file="$(mktemp)" # Create a unique temporary file using mktemp 46 | 47 | # Format the current date and time 48 | local datetime_format1=$(date +"%A, %B %d, %Y at %I:%M %p") 49 | local datetime_format2=$(date +"%m/%d/%y %H:%M") 50 | 51 | # Update or create the key with the formatted date and time 52 | jq --arg key "$key" --arg datetime1 "$datetime_format1" --arg datetime2 "$datetime_format2" \ 53 | 'if .[$key] then .[$key] |= if type == "array" then . + [$datetime1, $datetime2] else [$datetime1, $datetime2] end else .[$key] = [$datetime1, $datetime2] end' \ 54 | "$db_path" > "$tmp_file" && mv "$tmp_file" "$db_path" 55 | } 56 | 57 | 58 | 59 | 60 | # Function to show help 61 | help() { 62 | echo ' 63 | Usage: db [--db ] [value] 64 | 65 | Commands: 66 | get - Get value of record by key 67 | put - Insert or update record 68 | delete - Delete record by key 69 | 70 | Dev Commands: 71 | add_current_datetime - Insert a date at a key 72 | get_db_path - Get the database path 73 | 74 | Options: 75 | --db - Specify the database path (default is '"$NAUTICAL_DB_PATH/$NAUTICAL_DB_NAME"') 76 | 77 | Example: 78 | db put --db /path/to/db "key" "value" 79 | ' 80 | } 81 | 82 | # Start 83 | case "$1" in 84 | get|put|add_current_datetime|get_db_path|delete) 85 | if [[ "$2" == "--help" ]]; then 86 | help 87 | exit 0 88 | fi 89 | "$@" 90 | ;; 91 | --help) 92 | help 93 | ;; 94 | *) 95 | echo "Unknown method '$1'" 96 | help 97 | exit 1 98 | ;; 99 | esac 100 | -------------------------------------------------------------------------------- /app/defaults.env: -------------------------------------------------------------------------------- 1 | 2 | # Set default timezone 3 | TZ=Etc/UTC 4 | 5 | # Default = Every day at 4am 6 | CRON_SCHEDULE=0 4 * * * 7 | 8 | # Enable Nautical to run on a CRON schedule 9 | CRON_SCHEDULE_ENABLED=true 10 | 11 | # Default enable the report file 12 | REPORT_FILE=true 13 | 14 | # Run the backup immediately on start 15 | BACKUP_ON_START=false 16 | 17 | # Create a new folder on each backup 18 | USE_DEST_DATE_FOLDER=false 19 | 20 | # Options are "container/date" or "date/container" 21 | DEST_DATE_PATH_FORMAT=date/container 22 | 23 | # Python Date format 24 | DEST_DATE_FORMAT=%Y-%m-%d 25 | 26 | # Use the precise date and time for fomatting the destination folder 27 | # Otherwise, use the time Nautical started the backup (not when the container was backed up) 28 | USE_CONTAINER_BACKUP_DATE=true 29 | 30 | # Use the default rsync args "-ahq" (archive, human-readable, quiet) 31 | USE_DEFAULT_RSYNC_ARGS=true 32 | 33 | # Require the Docker Label `nautical-backup.enable=true` to be present on each container or it will be skipped 34 | REQUIRE_LABEL=false 35 | 36 | # Label prefix 37 | LABEL_PREFIX=nautical-backup 38 | 39 | # How long to wait for a container to stop before killing it 40 | STOP_TIMEOUT=10 41 | 42 | # Set the default log level to INFO 43 | LOG_LEVEL=INFO 44 | 45 | # Set the default log level for the report file to INFO 46 | REPORT_FILE_LOG_LEVEL=INFO 47 | 48 | # Only write to the report file when backups run, not on initialization 49 | REPORT_FILE_ON_BACKUP_ONLY=true 50 | 51 | # Mirror the source directory name to the destination directory name 52 | KEEP_SRC_DIR_NAME=true 53 | 54 | # Usually combined with BACKUP_ON_START. Essentially, this just exits the container after 1 run. 55 | EXIT_AFTER_INIT=false 56 | 57 | # Log the rsync commands to the console (and/or report file) 58 | LOG_RSYNC_COMMANDS=false 59 | 60 | # Run the backup only once and then exit (whether it is from CRON or BACKUP_ON_START) 61 | RUN_ONCE=false 62 | 63 | # Do not include a trailing slash 64 | SOURCE_LOCATION=/app/source 65 | DEST_LOCATION=/app/destination 66 | 67 | # A value greater than -1 means the container will run in test mode. 68 | TEST_MODE=-1 69 | 70 | HTTP_REST_API_ENABLED=true 71 | HTTP_REST_API_USERNAME=admin 72 | HTTP_REST_API_PASSWORD=password 73 | 74 | # When do backup the additional folders? "before", "after", or "both" the container backups 75 | ADDITIONAL_FOLDERS_WHEN=before 76 | 77 | # Use the destination date folder for the additional folders 78 | ADDITIONAL_FOLDERS_USE_DEST_DATE_FOLDER=false 79 | 80 | # Path to the Nautical database. 81 | NAUTICAL_DB_PATH=/config 82 | NAUTICAL_DB_NAME=nautical-db.json 83 | 84 | # Required for Python to work properly 85 | PYTHONPATH=. 86 | 87 | # ------ Default Empty Values ------ # 88 | 89 | # Run a curl request before the backup starts 90 | PRE_BACKUP_CURL="" 91 | POST_BACKUP_CURL="" 92 | 93 | # Apply custom rsync args (in addition to the default args) 94 | RSYNC_CUSTOM_ARGS="" 95 | 96 | # Assuming OVERRIDE_SOURCE_DIR is passed as an environment variable in the format "container1:dir1,container2:dir2,..." 97 | OVERRIDE_SOURCE_DIR="" 98 | 99 | # Assuming OVERRIDE_DEST_DIR is passed as an environment variable in the format "container1:dir1,container2:dir2,..." 100 | OVERRIDE_DEST_DIR="" 101 | 102 | # Will be populated with the container ID or Name of Nautical itself. To prevent it from attempting to back itself up. 103 | SELF_CONTAINER_ID="" 104 | 105 | # Taken from the git tab 106 | NAUTICAL_VERSION="" 107 | 108 | # An automatic output from docker/setup-qemu-action@v3. 109 | TARGETPLATFORM="" 110 | 111 | # ------ Comma Seperated Values ------ # 112 | 113 | # Containers to be skipped completely. No backup 114 | SKIP_CONTAINERS="" 115 | 116 | # Containers to still be backed up, but not stopped beforehand. 117 | SKIP_STOPPING="" 118 | 119 | # Directories to be backed up that are not associated with a container 120 | ADDITIONAL_FOLDERS="" 121 | 122 | # Secondary destination directories 123 | SECONDARY_DEST_DIRS="" -------------------------------------------------------------------------------- /app/entry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | source /app/logger.sh # Use the logger script 4 | source /app/utils.sh 5 | 6 | install_cron(){ 7 | # Echo the CRON schedule for logging/debugging 8 | logThis "Installing CRON schedule: $CRON_SCHEDULE in TZ: $TZ" "DEBUG" "init" 9 | 10 | # Dump the current cron jobs to a temporary file 11 | crontab -l >tempcron 12 | 13 | # Remove the existing cron job for your backup script from the file 14 | sed -i '/nautical/d' tempcron 15 | 16 | # Add the new cron job to the file 17 | echo "$CRON_SCHEDULE with-contenv nautical" >>tempcron 18 | 19 | # Install the new cron jobs and remove the tempcron file 20 | crontab tempcron && rm tempcron 21 | } 22 | 23 | if [ "$CRON_SCHEDULE_ENABLED" = "true" ]; then 24 | install_cron 25 | else 26 | logThis "Skipping CRON installation since CRON_SCHEDULE_ENABLED=false" "INFO" "init" 27 | fi 28 | 29 | # Verify the source and destination locations 30 | verify_source_location $SOURCE_LOCATION 31 | verify_destination_location $DEST_LOCATION 32 | 33 | #? Old bash methods 34 | # initialize_db "$NAUTICAL_DB_PATH" "$NAUTICAL_DB_NAME" 35 | # seed_db 36 | 37 | # The script must be run from the root directory 38 | cd / 39 | with-contenv python3 /app/db.py 40 | 41 | # Simlinks the nautical command to the backup script (python) 42 | initialize_nautical 43 | 44 | 45 | # :nocov: 46 | if [ "$EXIT_AFTER_INIT" = "true" ]; then 47 | logThis "Exiting since EXIT_AFTER_INIT is true" "INFO" "init" 48 | exit 0 49 | fi 50 | # :nocov: 51 | -------------------------------------------------------------------------------- /app/env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | if [ ! -z "$TEST_MODE" ] && [ "$TEST_MODE" != "-1" ]; then 4 | if [ ! -z "$TEST_MODE" ]; then 5 | NAUTICAL_VERSION="Test-${TEST_MODE}" 6 | fi 7 | if [ ! -z "$TARGETPLATFORM" ]; then 8 | TARGETPLATFORM=TestPlatform 9 | fi 10 | fi 11 | 12 | source /app/utils.sh # This also loads the logger 13 | 14 | logThis "LOG_LEVEL: ${LOG_LEVEL}" "DEBUG" "INIT" 15 | handle_env() { 16 | # Export and log this env 17 | local var_name="$1" 18 | local var_value="$2" 19 | if [ ! -z "${var_value}" ]; then 20 | logThis "$var_name: $var_value" "DEBUG" "init" 21 | else 22 | logThis "$var_name: $var_value" "TRACE" "init" 23 | fi 24 | export_env "$var_name" "$var_value" 25 | } 26 | 27 | : "${REPORT_FILE:=true}" && handle_env REPORT_FILE "$REPORT_FILE" 28 | 29 | create_new_report_file 30 | logThis "Nautical Backup Version: $NAUTICAL_VERSION" "INFO" "init" 31 | logThis "Built for the platform: $TARGETPLATFORM" "DEBUG" "init" 32 | 33 | logThis "Perparing enviornment variables..." "DEBUG" "init" 34 | 35 | # Path to the defaults file 36 | DEFAULTS_FILE="/app/defaults.env" 37 | 38 | # Check if the defaults file exists 39 | if [ ! -f "$DEFAULTS_FILE" ]; then 40 | logThis "Enviornment defaults file not found: $DEFAULTS_FILE" "ERROR" "init" 41 | exit 1 42 | fi 43 | 44 | logThis "Found defaults.env" "DEBUG" "init" 45 | # Read each line in the defaults file 46 | while IFS= read -r line; do 47 | # Skip empty lines and lines starting with # 48 | [[ -z "$line" || "$line" == \#* ]] && continue 49 | 50 | # Extract variable name and value 51 | var="${line%%=*}" 52 | default_value="${line#*=}" 53 | 54 | # Handle empty string default_value 55 | if [ "$default_value" == '""' ]; then 56 | default_value="" 57 | fi 58 | 59 | # Set the variable to default if not already set 60 | if [ -z "${!var}" ]; then 61 | declare "$var=$default_value" 62 | handle_env "$var" "${!var}" 63 | fi 64 | done <"$DEFAULTS_FILE" 65 | 66 | # Get the container ID of the current container (Does not work on arm64) 67 | SELF_CONTAINER_ID=$(cat /proc/self/cgroup | grep 'docker' | sed 's/^.*\///' | tail -n1) 68 | if [ -z "${SELF_CONTAINER_ID}" ]; then 69 | SELF_CONTAINER_ID=$(hostname) # Workaround for arm64 70 | if [ -z "${SELF_CONTAINER_ID}" ]; then 71 | SELF_CONTAINER_ID="nautical-backup" 72 | echo "$SELF_CONTAINER_ID" > /etc/hostname 73 | fi 74 | fi 75 | handle_env SELF_CONTAINER_ID "$SELF_CONTAINER_ID" 76 | -------------------------------------------------------------------------------- /app/logger.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import os 3 | from pathlib import Path 4 | from typing import Optional, Union 5 | from app.nautical_env import NauticalEnv 6 | from enum import Enum 7 | 8 | 9 | class LogLevel(Enum): 10 | TRACE = 0 11 | DEBUG = 1 12 | INFO = 2 13 | WARN = 3 14 | ERROR = 4 15 | 16 | 17 | class LogType(Enum): 18 | INIT = 0 19 | DEFAULT = 1 20 | 21 | 22 | class Logger: 23 | def __init__(self): 24 | self.levels = {LogLevel.TRACE: 0, LogLevel.DEBUG: 1, LogLevel.INFO: 2, LogLevel.WARN: 3, LogLevel.ERROR: 4} 25 | self.env = NauticalEnv() 26 | 27 | # Defaults 28 | self.script_logging_level: LogLevel = LogLevel.INFO 29 | self.report_file_logging_level = LogLevel.INFO 30 | self.report_file_on_backup_only: bool = True 31 | 32 | self.script_logging_level = self._parse_log_level(self.env.LOG_LEVEL) or self.script_logging_level 33 | self.report_file_logging_level = ( 34 | self._parse_log_level(self.env.REPORT_FILE_LOG_LEVEL) or self.report_file_logging_level 35 | ) 36 | 37 | if self.env.REPORT_FILE_ON_BACKUP_ONLY.lower() == "true": 38 | self.report_file_on_backup_only = True 39 | elif self.env.REPORT_FILE_ON_BACKUP_ONLY.lower() == "false": 40 | self.report_file_on_backup_only = False 41 | 42 | self.dest_location: Union[str, Path] = os.environ.get("DEST_LOCATION", "") 43 | self.report_file = f"Backup Report - {datetime.datetime.now().strftime('%Y-%m-%d')}.txt" 44 | 45 | @staticmethod 46 | def set_to_string(input: set) -> str: 47 | """Converts a set to a string with comma separated values.""" 48 | return ", ".join(str(i) for i in input) 49 | 50 | @staticmethod 51 | def _parse_log_level(log_level: Union[str, LogLevel]) -> Optional[LogLevel]: 52 | if isinstance(log_level, LogLevel): 53 | return log_level 54 | 55 | # Override the defaults with environment variables if they exist 56 | if log_level.lower().strip() == "trace": 57 | return LogLevel.TRACE 58 | elif log_level.lower().strip() == "debug": 59 | return LogLevel.DEBUG 60 | elif log_level.lower().strip() == "info": 61 | return LogLevel.INFO 62 | elif log_level.lower().strip() == "warn": 63 | return LogLevel.WARN 64 | elif log_level.lower().strip() == "error": 65 | return LogLevel.ERROR 66 | return None 67 | 68 | def _delete_old_report_files(self): 69 | """Only completed on Nautical init""" 70 | if not os.path.exists(self.dest_location): 71 | return 72 | 73 | for file in os.listdir(self.dest_location): 74 | file.strip() 75 | if file.startswith("Backup Report -") and file.endswith(".txt"): 76 | if file != self.report_file: 77 | # Don't delete today's report file 78 | os.remove(os.path.join(self.dest_location, file)) 79 | 80 | def _create_new_report_file(self): 81 | """Only completed on Nautical init""" 82 | self._delete_old_report_files() 83 | 84 | if not os.path.exists(self.dest_location): 85 | raise FileNotFoundError(f"Destination location {self.dest_location} does not exist.") 86 | 87 | # Initialize the current report file with a header 88 | with open(os.path.join(self.dest_location, self.report_file), "w+") as f: 89 | f.write(f"Backup Report - {datetime.datetime.now()}\n") 90 | 91 | def _write_to_report_file(self, log_message, log_level: Union[str, LogLevel] = LogLevel.INFO): 92 | level = self._parse_log_level(log_level) 93 | if level not in self.levels: 94 | return # Check if level exists 95 | 96 | # Check if folder exists 97 | if not os.path.exists(self.dest_location): 98 | raise FileNotFoundError(f"Destination location {self.dest_location} does not exist.") 99 | 100 | with open(os.path.join(self.dest_location, self.report_file), "a") as f: 101 | f.write(f"{datetime.datetime.now()} - {str(level)[9:]}: {log_message}\n") 102 | 103 | def log_this(self, log_message, log_level: Union[str, LogLevel] = LogLevel.INFO, log_type=LogType.DEFAULT): 104 | 105 | level = self._parse_log_level(log_level) 106 | if level not in self.levels: 107 | return # Check if level exists 108 | 109 | # Check if level is enough for console logging 110 | if self.levels[level] >= self.levels[self.script_logging_level]: 111 | print(f"{str(level)[9:]}: {log_message}") 112 | 113 | if self.env.REPORT_FILE == False: 114 | return 115 | 116 | # Check if level is enough for report file logging 117 | if self.levels[level] >= self.levels[self.report_file_logging_level]: 118 | if self.report_file_on_backup_only == True: 119 | if log_type != LogType.INIT: 120 | self._write_to_report_file(log_message, log_level) 121 | else: 122 | # Always write to report file 123 | self._write_to_report_file(log_message, log_level) 124 | -------------------------------------------------------------------------------- /app/logger.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | declare -A levels=([TRACE]=0 [DEBUG]=1 [INFO]=2 [WARN]=3 [ERROR]=4) 4 | 5 | # Defaults 6 | script_logging_level="INFO" 7 | report_file_logging_level="INFO" 8 | report_file_on_backup_only="true" 9 | 10 | # Override the defaults 11 | if [ ! -z "$LOG_LEVEL" ]; then 12 | script_logging_level=$LOG_LEVEL 13 | fi 14 | 15 | if [ ! -z "$REPORT_FILE_LOG_LEVEL" ]; then 16 | report_file_logging_level=$REPORT_FILE_LOG_LEVEL 17 | fi 18 | 19 | # Convert logging level variables to uppercase for array lookup 20 | script_logging_level=$(echo "$script_logging_level" | tr '[:lower:]' '[:upper:]') 21 | report_file_logging_level=$(echo "$report_file_logging_level" | tr '[:lower:]' '[:upper:]') 22 | 23 | 24 | if [ ! -z "$REPORT_FILE_ON_BACKUP_ONLY" ]; then 25 | report_file_on_backup_only=$REPORT_FILE_ON_BACKUP_ONLY 26 | fi 27 | 28 | report_file="Backup Report - $(date +'%Y-%m-%d').txt" 29 | 30 | delete_report_file() { 31 | rm -f "$DEST_LOCATION/Backup Report - "*.txt 32 | } 33 | 34 | create_new_report_file() { 35 | if [ "$REPORT_FILE" = "true" ]; then 36 | delete_report_file 37 | # Initialize the current report file with a header 38 | echo "Backup Report - $(date)" >"$DEST_LOCATION/$report_file" 39 | fi 40 | } 41 | 42 | logThis() { 43 | local log_message=$1 44 | local log_priority=${2:-INFO} 45 | local message_type=${3:-"default"} 46 | 47 | # Convert log_priority to uppercase 48 | log_priority=$(echo "$log_priority" | tr '[:lower:]' '[:upper:]') 49 | 50 | # Check if level exists 51 | if [ -z "${levels[$log_priority]}" ]; then 52 | echo "Invalid log level: $log_priority" 53 | return 1 54 | fi 55 | [[ ${levels[$log_priority]} ]] || return 1 56 | 57 | # Check if level is enough for console logging 58 | if ((${levels[$log_priority]} >= ${levels[$script_logging_level]})); then 59 | echo "${log_priority}: ${log_message}" 60 | fi 61 | 62 | # Check if level is enough for report file logging 63 | if [ "$REPORT_FILE" = "true" ] && ((${levels[$log_priority]} >= ${levels[$report_file_logging_level]})); then 64 | if ! ([ "$message_type" == "init" ] && [ "$report_file_on_backup_only" == "true" ]); then 65 | echo "$(date) - ${log_priority}: ${log_message}" >>"$DEST_LOCATION/$report_file" 66 | fi 67 | fi 68 | } -------------------------------------------------------------------------------- /app/nautical.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # `with-contenv` is an s6 feature that allows the script to run with the container's environment variables 3 | 4 | # The backup script must be run from the root directory 5 | cd / 6 | 7 | python3 /app/backup.py -------------------------------------------------------------------------------- /app/nautical_env.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Dict, List 4 | 5 | 6 | class NauticalEnv: 7 | def __init__(self) -> None: 8 | self.SKIP_CONTAINERS = os.environ.get("SKIP_CONTAINERS", "") 9 | self.SKIP_STOPPING = os.environ.get("SKIP_STOPPING", "") 10 | self.SELF_CONTAINER_ID = os.environ.get("SELF_CONTAINER_ID", "") 11 | 12 | self.LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO") 13 | self.REPORT_FILE_LOG_LEVEL = os.environ.get("REPORT_FILE_LOG_LEVEL", "") 14 | self.REPORT_FILE_ON_BACKUP_ONLY = os.environ.get("REPORT_FILE_ON_BACKUP_ONLY", "") 15 | 16 | self.DEST_LOCATION = os.environ.get("DEST_LOCATION", "") 17 | self.SOURCE_LOCATION = os.environ.get("SOURCE_LOCATION", "") 18 | 19 | self.KEEP_SRC_DIR_NAME = os.environ.get("KEEP_SRC_DIR_NAME", "") 20 | 21 | self.OVERRIDE_SOURCE_DIR = self._populate_override_dirs("OVERRIDE_SOURCE_DIR") 22 | self.OVERRIDE_DEST_DIR = self._populate_override_dirs("OVERRIDE_DEST_DIR") 23 | 24 | self.DEFAULT_RNC_ARGS = "-raq" # Default 25 | self.USE_DEFAULT_RSYNC_ARGS = os.environ.get("USE_DEFAULT_RSYNC_ARGS", "") 26 | self.RSYNC_CUSTOM_ARGS = os.environ.get("RSYNC_CUSTOM_ARGS", "") 27 | 28 | self.REQUIRE_LABEL = False 29 | if os.environ.get("REQUIRE_LABEL", "False").lower() == "true": 30 | self.REQUIRE_LABEL = True 31 | self.LABEL_PREFIX = os.environ.get("LABEL_PREFIX", "nautical-backup") 32 | 33 | self.NAUTICAL_DB_PATH = os.environ.get("NAUTICAL_DB_PATH", "") 34 | 35 | self.USE_DEST_DATE_FOLDER = os.environ.get("USE_DEST_DATE_FOLDER", "") 36 | self.DEST_DATE_FORMAT = os.environ.get("DEST_DATE_FORMAT", "%Y-%m-%d") 37 | self.DEST_DATE_PATH_FORMAT = os.environ.get("DEST_DATE_PATH_FORMAT", "date/container") 38 | if self.DEST_DATE_PATH_FORMAT not in ["date/container", "container/date"]: 39 | self.DEST_DATE_PATH_FORMAT = "date/container" # Set default 40 | 41 | self.USE_CONTAINER_BACKUP_DATE = False 42 | if os.environ.get("USE_CONTAINER_BACKUP_DATE", "false").lower() == "true": 43 | self.USE_CONTAINER_BACKUP_DATE = True 44 | 45 | # Not associated with containers 46 | self.ADDITIONAL_FOLDERS = os.environ.get("ADDITIONAL_FOLDERS", "") 47 | self.ADDITIONAL_FOLDERS_WHEN = os.environ.get("ADDITIONAL_FOLDERS_WHEN", "before") 48 | self.ADDITIONAL_FOLDERS_USE_DEST_DATE_FOLDER = os.environ.get("ADDITIONAL_FOLDERS_USE_DEST_DATE_FOLDER", "") 49 | 50 | self.SECONDARY_DEST_DIRS: List[Path] = [] 51 | for dir in os.environ.get("SECONDARY_DEST_DIRS", "").split(","): 52 | if not dir or dir.strip() == "": 53 | continue 54 | self.SECONDARY_DEST_DIRS.append(Path(dir.strip())) 55 | 56 | self._PRE_BACKUP_CURL = os.environ.get("PRE_BACKUP_CURL", "") 57 | self._POST_BACKUP_CURL = os.environ.get("POST_BACKUP_CURL", "") 58 | 59 | # Temporily use the CURL variable 60 | self.PRE_BACKUP_EXEC = os.environ.get("PRE_BACKUP_EXEC", self._PRE_BACKUP_CURL) 61 | self.POST_BACKUP_EXEC = os.environ.get("POST_BACKUP_EXEC", self._POST_BACKUP_CURL) 62 | 63 | self.RUN_ONCE = False 64 | if os.environ.get("RUN_ONCE", "False").lower() == "true": 65 | self.RUN_ONCE = True 66 | 67 | self.REPORT_FILE = True 68 | if os.environ.get("REPORT_FILE", "True").lower() == "false": 69 | self.REPORT_FILE = False 70 | 71 | self.STOP_TIMEOUT = int(os.environ.get("STOP_TIMEOUT", 10)) 72 | 73 | @staticmethod 74 | def _populate_override_dirs(env_name: str) -> Dict[str, str]: 75 | """Translate the Enviornment variable from single string to Python Dict. 76 | 77 | ``` 78 | input="example1:example1-new-source-data,ctr2:ctr2-new-source" 79 | 80 | output = { 81 | "example1": "example1-new-source-data", 82 | "ctr2": "ctr2-new-source" 83 | } 84 | ``` 85 | """ 86 | raw = str(os.environ.get(env_name, "")) 87 | 88 | result = {} 89 | 90 | if not raw: 91 | return result 92 | 93 | for pair in raw.split(","): 94 | split = pair.split(":") 95 | if len(split) < 2: 96 | continue 97 | 98 | container_name = str(split[0]) 99 | new_dir = str(split[1]) 100 | result[container_name] = new_dir 101 | 102 | return result 103 | -------------------------------------------------------------------------------- /app/utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | source /app/logger.sh # Use the logger script 4 | 5 | export_env() { 6 | local var_name="$1" 7 | local var_value="$2" 8 | local env_file="/var/run/s6/container_environment/$var_name" 9 | printf "%s" "$var_value" >"$env_file" 10 | export "${var_name}" 11 | } 12 | 13 | cecho() { 14 | RED="\033[0;31m" 15 | GREEN="\033[0;32m" # <-- [0 means not bold 16 | YELLOW="\033[1;33m" # <-- [1 means bold 17 | CYAN="\033[1;36m" 18 | # ... Add more colors if you like 19 | 20 | NC="\033[0m" # No Color 21 | 22 | # printf "${(P)1}${2} ${NC}\n" # <-- zsh 23 | printf "${!1}${2} ${NC}\n" # <-- bash 24 | } 25 | 26 | # Function to populate a list array 27 | process_csv() { 28 | local -n skip_list_ref=$1 # Use nameref to update the array passed as argument 29 | local skip_var=$2 # The environment variable containing the skip list 30 | 31 | if [ ! -z "$skip_var" ]; then 32 | # Remove quotes and leading/trailing whitespaces 33 | local cleaned_skip_var=$(echo "$skip_var" | sed "s/'//g;s/\"//g" | tr -d ' ') 34 | 35 | # Split by commas into an array 36 | IFS=',' read -ra ADDITIONAL_SKIPS <<<"$cleaned_skip_var" 37 | 38 | # Add to the existing skip list 39 | skip_list_ref=("${skip_list_ref[@]}" "${ADDITIONAL_SKIPS[@]}") 40 | fi 41 | } 42 | 43 | initialize_logThis() { 44 | if [ ! -f "/usr/local/bin/logThis" ]; then 45 | logThis "Installing logThis script..." "DEBUG" "init" 46 | # Allows the logThis backup script to be run using `bash logThis` 47 | ln -s /app/logger.sh /usr/local/bin/logThis 48 | chmod +x /usr/local/bin/logThis 49 | fi 50 | } 51 | 52 | # Unusesd 53 | initialize_nautical_bash() { 54 | if [ ! -f "/usr/local/bin/nautical" ]; then 55 | logThis "Installing nautical backup script..." "DEBUG" "init" 56 | # Allows the nautical backup script to be run using `bash nautical` 57 | ln -s /app/backup.sh /usr/local/bin/nautical 58 | chmod +x /usr/local/bin/nautical 59 | fi 60 | } 61 | 62 | # Unusesd 63 | initialize_nautical_python() { 64 | if [ ! -f "/usr/local/bin/nautical" ]; then 65 | logThis "Installing nautical backup script..." "DEBUG" "init" 66 | # Allows the nautical backup script to be run using `bash nautical` 67 | ln -s /app/backup.py /usr/local/bin/nautical 68 | chmod +x /usr/local/bin/nautical 69 | logThis "ln -s /app/backup.py /usr/local/bin/nautical" "TRACE" "init" 70 | fi 71 | } 72 | 73 | initialize_nautical() { 74 | if [ ! -f "/usr/local/bin/nautical" ]; then 75 | logThis "Installing nautical backup script..." "DEBUG" "init" 76 | # Allows the nautical backup script to be run using `bash nautical` 77 | 78 | # Check if file exists 79 | if [ ! -f "/app/nautical.sh" ]; then 80 | logThis "Nautical backup script not found: /app/nautical.sh" "ERROR" "init" 81 | exit 1 82 | fi 83 | 84 | ln -s /app/nautical.sh /usr/local/bin/nautical 85 | chmod +x /usr/local/bin/nautical 86 | logThis "ln -s /app/backup.sh /usr/local/bin/nautical" "TRACE" "init" 87 | fi 88 | } 89 | 90 | # Function to initialize the database if it doesn't exist 91 | initialize_db() { 92 | local db_path=$1 93 | local db_name=$2 94 | local db_full_path="$db_path/$db_name" 95 | 96 | if [ -f "$db_full_path" ]; then 97 | logThis "Connected to database at '$db_full_path'..." "DEBUG" "init" 98 | else 99 | logThis "Initializing databse at '$db_full_path'..." "DEBUG" "init" 100 | # Check if directory exists, if not create it 101 | if [ ! -d "$db_path" ]; then 102 | mkdir -p "$db_path" 103 | fi 104 | 105 | # Check if database file exists, if not create it 106 | if [ ! -f "$db_full_path" ]; then 107 | logThis "Database Path: $db_full_path" "DEBUG" "init" 108 | echo "{}" >"$db_full_path" 109 | fi 110 | 111 | fi 112 | 113 | if [ ! -f "/usr/local/bin/db" ]; then 114 | logThis "Installing database script..." "DEBUG" "init" 115 | # Allows the database script to be run using `bash db --help` 116 | # cp /app/db.sh /usr/local/bin/db 117 | ln -s /app/db.sh /usr/local/bin/db 118 | chmod +x /usr/local/bin/db 119 | fi 120 | 121 | } 122 | 123 | seed_db() { 124 | if [ "$(db get "backup_running")" == "null" ]; then 125 | db put "backup_running" false 126 | fi 127 | 128 | if [ "$(db get "containers_skipped")" == "null" ]; then 129 | db put "containers_skipped" 0 130 | fi 131 | 132 | if [ "$(db get "containers_completed")" == "null" ]; then 133 | db put "containers_completed" 0 134 | fi 135 | 136 | if [ "$(db get "number_of_containers")" == "null" ]; then 137 | db put "number_of_containers" 0 138 | fi 139 | 140 | if [ "$(db get "errors")" == "null" ]; then 141 | db put "errors" 0 142 | fi 143 | } 144 | 145 | verify_source_location() { 146 | local src_dir=$1 147 | logThis "Verifying source directory '$src_dir'..." "DEBUG" "init" 148 | if [ ! -d "$src_dir" ]; then 149 | logThis "Source directory '$src_dir' does not exist." "ERROR" "init" 150 | exit 1 151 | elif [ ! -r "$src_dir" ]; then 152 | logThis "No read access to source directory '$src_dir'." "ERROR" "init" 153 | exit 1 154 | fi 155 | } 156 | 157 | verify_destination_location() { 158 | local dest_dir=$1 159 | logThis "Verifying destination directory '$dest_dir'..." "DEBUG" "init" 160 | if [ ! -d "$dest_dir" ]; then 161 | logThis "Destination directory '$dest_dir' does not exist." "ERROR" "init" 162 | exit 1 163 | elif [ ! -r "$dest_dir" ]; then 164 | logThis "No read access to destination directory '$dest_dir'." "ERROR" "init" 165 | exit 1 166 | elif [ ! -w "$dest_dir" ]; then 167 | logThis "No write access to destination directory '$dest_dir'." "ERROR" "init" 168 | exit 1 169 | fi 170 | } 171 | -------------------------------------------------------------------------------- /dev/docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | # cd .. docker build -t nautical-backup . --progress=plain --no-cache 3 | # cd dev && docker compose up 4 | --- 5 | services: 6 | # watchtower: 7 | # image: containrrr/watchtower 8 | # container_name: watchtower 9 | # volumes: 10 | # - /var/run/docker.sock:/var/run/docker.sock 11 | # - watchtower-data:/data/test:ro # Named volume 12 | # - /data/test2 # Anonymous volume 13 | # - ./source/watchtower:/data/watchtower:ro # Bind mount 14 | # ports: 15 | # - 8080:8080 16 | # environment: 17 | # - TZ=America/Los_Angeles 18 | # - WATCHTOWER_SCHEDULE=0 0 4 * * * # Every day at 4am 19 | # labels: 20 | # - "nautical-backup.enable=true" 21 | # # - "nautical-backup.curl.before=echo NB_EXEC_ATTACHED_TO_CONTAINER: $$NB_EXEC_ATTACHED_TO_CONTAINER" 22 | # # - "nautical-backup.curl.before=/config/script.sh" 23 | # # - "nautical-backup.override-source-dir=folder/watchtower" 24 | 25 | # docker_socket_proxy: 26 | # image: tecnativa/docker-socket-proxy 27 | # container_name: docker_socket_proxy 28 | # ports: 29 | # - 2375:2375 30 | # volumes: 31 | # - /var/run/docker.sock:/var/run/docker.sock 32 | # environment: 33 | # # Enable the API access to the following sections of the Docker API 34 | # # https://github.com/Tecnativa/docker-socket-proxy?tab=readme-ov-file#grant-or-revoke-access-to-certain-api-sections 35 | # - CONTAINERS=1 36 | # - IMAGES=1 37 | # - ALLOW_START=1 38 | # - ALLOW_STOP=1 39 | # - EXEC=1 40 | # - VOLUMES=1 41 | 42 | 43 | nautical-backup: 44 | build: ../ 45 | # container_name: nautical-backup-test 46 | develop: 47 | watch: 48 | - action: rebuild 49 | path: ${LOCAL_WORKSPACE_FOLDER-./}/app 50 | volumes: 51 | - ${LOCAL_WORKSPACE_FOLDER-./}/dev/config:/config 52 | - ${LOCAL_WORKSPACE_FOLDER-./}/dev/source:/data/source:ro 53 | - ${LOCAL_WORKSPACE_FOLDER-./}/dev/destination:/data/destination 54 | # - /var/lib/docker/volumes:/data/volumes:ro 55 | # Optional, or you can use the docker_socket_proxy service 56 | - /var/run/docker.sock:/var/run/docker.sock 57 | # - ${LOCAL_WORKSPACE_FOLDER-./}/app:/app # Allows code to be edited on the fly 58 | # user: 1000:1000 59 | ports: 60 | - 8069:8069 61 | environment: 62 | - TZ=America/Los_Angeles 63 | - LOG_LEVEL=TRACE 64 | # - BACKUP_ON_START=true 65 | - BACKUP_ON_START=false 66 | - REPORT_FILE=false 67 | - CRON_SCHEDULE=0 4 * * * 68 | # - REQUIRE_LABEL=true 69 | - HTTP_REST_API_ENABLED=true 70 | - EXIT_AFTER_INIT=true 71 | # - RUN_ONCE=trues 72 | # - USE_DEST_DATE_FOLDER=true 73 | # - DEST_DATE_PATH_FORMAT=container/date 74 | # - DEST_DATE_FORMAT=Nautical Backup - %Y-%m-%d 75 | # entrypoint: ["sleep", "infinity"] 76 | 77 | volumes: 78 | watchtower-data: 79 | -------------------------------------------------------------------------------- /docs/advanced/advanced.md: -------------------------------------------------------------------------------- 1 | These examples used Docker Compose syntax. See the [Installation section](../installation.md#docker-compose-example) to fit them into your configuration. 2 | 3 | !!! tip "Remember the folder naming convention" 4 | 1. The `container-name` must match the `source` and `destination` folder names. 5 | 2. You can override this using [Arguments](../arguments.md#override-source-directory). 6 | 7 | ## Alternative Source Directories 8 | Don't have all your container volumes in the same directory? That's okay, we can use Docker volume mappings to help. 9 | 10 | 11 | === "Example 1" 12 | This config allows additional volumes *outside* the traditional `source` directory. 13 | 14 | ```yaml 15 | volumes: 16 | # Standard config 17 | - /var/run/docker.sock:/var/run/docker.sock 18 | - /source:/app/source 19 | - /destination:/app/destination 20 | # Alternative source directories examples 21 | - /opt/pihole:/app/source/pihole #(1)! 22 | - /mnt/docker_volumes/plex:/app/source/plex #(2)! 23 | ``` 24 | 25 | 1. Mounted from a different `source` directory 26 | 2. Mounted from a different `source` directory 27 | 28 | We added 2 additional source volumes: `pihole` and `plex`. The end result will have a source directory inside the Nautical container that looks like this: 29 | 30 | ```bash 31 | /app/source: 32 | - container1-data #(1)! 33 | - container2-data #(2)! 34 | - pihole # Mapped from /opt/pihole 35 | - plex # Mapped from /mnt/docker_volumes/plex 36 | ``` 37 | 38 | 1. This is an example container data folder from the mounted `/source` directory 39 | 2. This is an example container data folder from the mounted `/source` directory 40 | 41 | === "Example 2" 42 | This config uses volumes __only__ *outside* the traditional `source` directory. 43 | 44 | ```yaml 45 | volumes: 46 | # Standard config 47 | - /var/run/docker.sock:/var/run/docker.sock 48 | - /destination:/app/destination #(1)! 49 | # Alternative source directories examples 50 | - /opt/pihole:/app/source/pihole 51 | - /opt/trilium:/app/source/trilium 52 | - /mnt/docker_volumes/plex:/app/source/plex 53 | - /var/data/portainer:/app/source/portainer 54 | ``` 55 | 56 | 1. Even though we used alternative `source` directories, the `destination` directories will all be the same: 57 | ```text 58 | /destination/pihole 59 | /destination/trilium 60 | /destination/plex 61 | /destination/portainer 62 | ``` 63 | 64 | This configuration allows us to map as many container data folders as we'd like from any source directory. 65 | ```yaml 66 | /app/source: 67 | - pihole # Mapped from /opt/pihole 68 | - trilium # Mapped from /opt/trilium 69 | - plex # Mapped from /mnt/docker_volumes/plex 70 | - portainer # Mapped from /var/data/portainer 71 | ``` 72 | 73 | ## Alternative Destination Directories 74 | We can also remap the destination directory for any container we'd like. 75 | 76 | 77 | === "Example 1" 78 | ```yaml 79 | volumes: 80 | # Standard config 81 | - /var/run/docker.sock:/var/run/docker.sock 82 | - /source:/app/source #(1)! 83 | - /destination:/app/destination 84 | # Alternative destination directories examples 85 | - /opt/pihole-backup:/app/destination/pihole 86 | - /mnt/docker_volume-backups/plex:/app/destination/plex 87 | ``` 88 | 89 | 1. Even though we used alternative `destination` directories, the `source` directories are using the standard configuration: 90 | ```text 91 | /source/pihole 92 | /source/plex 93 | ``` 94 | 95 | This config allows the addition of volumes outside the traditional `destination` directory. 96 | 97 | We added 2 additional destination volumes: `pihole` and `plex`. The end result will have a destination directory inside the Nautical container that looks like this: 98 | 99 | ```yaml 100 | /app/destination: 101 | - container1-data #(1)! 102 | - container2-data #(2)! 103 | - pihole # Mapped to /opt/pihole-backup 104 | - plex # Mapped to /mnt/docker_volume-backups/plex 105 | ``` 106 | 107 | 1. This is an example container data folder from the mounted `/source` directory 108 | 2. This is an example container data folder from the mounted `/source` directory 109 | 110 | === "Example 2" 111 | ```yaml 112 | volumes: 113 | # Standard config 114 | - /var/run/docker.sock:/var/run/docker.sock 115 | - /source:/app/source #(1)! 116 | # Alternative destination directories examples 117 | - /opt/pihole:/app/destination/pihole 118 | - /opt/trilium:/app/destination/trilium 119 | - /mnt/docker_volumes/plex:/app/destination/plex 120 | - /var/data/portainer:/app/destination/portainer 121 | ``` 122 | 123 | 1. Even though we used alternative `destination` directories, the `source` directories will all be the same: 124 | ```text 125 | /source/pihole 126 | /source/trilium 127 | /source/plex 128 | /source/portainer 129 | ``` 130 | 131 | This configuration allows us to map as many container data folders as we'd like to any destination directory. 132 | ```yaml 133 | /app/destination: 134 | - pihole # Mapped to /opt/pihole 135 | - trilium # Mapped to /opt/trilium 136 | - plex # Mapped to /mnt/docker_volumes/plex 137 | - portainer # Mapped to /var/data/portainer 138 | ``` -------------------------------------------------------------------------------- /docs/advanced/homepage-dashboard.md: -------------------------------------------------------------------------------- 1 | ![homepage logo](../media/homepage-logo.png) 2 | 3 | 4 | [Homepage](https://gethomepage.dev) is a modern, fully static, fast, secure fully proxied, highly customizable application dashboard with integrations for over 100 services and translations into multiple languages. Easily configured via YAML files or through docker label discovery. 5 | 6 | ![homepage demo](../media/homepage_demo.png) 7 | 8 | ## Add Nautical to Homepage 9 | 10 | We are going to take advantage of Homepage's [Custom API Widget](https://gethomepage.dev/latest/widgets/services/customapi) to get the following result: 11 | 12 | ![Homepage Example with Nautical](../media/homepage-example.png) 13 | 14 | Our configuration will look something like this: 15 | 16 | ```yaml 17 | - Nautical Backup: 18 | icon: https://raw.githubusercontent.com/Minituff/nautical-backup/main/docs/media/Logo-large.png 19 | description: Docker Volume Backups 20 | widget: 21 | type: customapi 22 | url: http://:8069/api/v1/nautical/dashboard 23 | username: admin 24 | password: password 25 | method: GET 26 | mappings: 27 | - field: number_of_containers 28 | label: Total Containers 29 | 30 | - field: completed 31 | label: Completed 32 | 33 | - field: skipped 34 | label: Skipped 35 | 36 | - field: errors 37 | label: errors 38 | 39 | - field: last_cron 40 | label: Last Run 41 | format: relativeDate # (1)! 42 | 43 | - field: next_cron 44 | label: Next Run 45 | 46 | ``` 47 | 48 | 1. Here, you can set an additional property called `format` to one of these options: 49 | * `relativeDate` example: 10 hours ago 50 | * `date` removes the exact time and shows the day only 51 | 52 | You can also add this to the `next_cron` field. 53 | 54 | 55 | It is recommended that you don't enable **all** the fields. Just comment out the fields that you don't need. 56 | 57 | ??? abstract "Field Translation" 58 | ```json 59 | { 60 | "next_cron": { 61 | "1": [ 62 | "Monday, April 22, 2024 at 05:00 AM", 63 | "04/22/24 05:00" 64 | ], 65 | "2": [ 66 | "Tuesday, April 23, 2024 at 05:00 AM", 67 | "04/23/24 05:00" 68 | ], 69 | "3": [ 70 | "Wednesday, April 24, 2024 at 05:00 AM", 71 | "04/24/24 05:00" 72 | ], 73 | "4": [ 74 | "Thursday, April 25, 2024 at 05:00 AM", 75 | "04/25/24 05:00" 76 | ], 77 | "5": [ 78 | "Friday, April 26, 2024 at 05:00 AM", 79 | "04/26/24 05:00" 80 | ], 81 | "cron": "0 5 * * *", 82 | "tz": "America/Los_Angeles" 83 | }, 84 | "last_cron": "04/21/24 05:00", 85 | "next_run": "04/22/24 05:00", 86 | "number_of_containers": 33, 87 | "completed": 25, 88 | "skipped": 8, 89 | "errors": 0, 90 | "backup_running": 8 91 | } 92 | ``` 93 | 94 | -------------------------------------------------------------------------------- /docs/advanced/nfs-share.md: -------------------------------------------------------------------------------- 1 | Nautical itself does not have the ability to map network shares. However, it can use a network share for either the source or destination. 2 | 3 | Commonly, we run containers on our host machine, then use an NFS share as the backup destination location. This page will give a brief overview of how to do that. 4 | 5 | ## Connect to an NFS Share On Container Host (Linux) 6 | 7 | 1. Create the NFS destination directories. 8 | ```bash 9 | # Create mount point (1) 10 | mkdir -p /mnt/nfs/docker_backups 11 | ``` 12 | 13 | 1. The destination directories must exist before a mount can be created 14 | 15 | 16 | 2. Setup NFS mount points: 17 | ```bash 18 | nano /etc/fstab 19 | ``` 20 | This will open a file, and here you can insert your NFS configuration: 21 | ```bash title="/etc/fstab" 22 | # | ------------- Source -------------- | ---- Destination ---- | -------- Options ---------- | 23 | 192.168.1.10:/mnt/backups/docker_volumes /mnt/nfs/docker_backups nfs _netdev,auto,rw,async 0 0 24 | ``` 25 | **Tip:** `192.168.1.10` is just an example IP address 26 | 27 | 3. Apply and mount the NFS shares 28 | ```bash 29 | mount -a 30 | ``` 31 | 32 | !!! success "A successful `mount -a` will return *nothing* in the console" 33 | 34 | 4. Verify *read* and *write* access 35 | ```bash 36 | cd /mnt/nfs/docker_backups 37 | touch test.txt && rm test.txt 38 | ``` 39 | 40 | ## Add Nautical Backup 41 | 42 | The above example created a local directory of `/mnt/nfs/docker_backups` which is an NFS share pointing to `192.168.1.10:/mnt/backups/docker_volumes`. 43 | 44 | Here is how we can use this new mount withing Nautical: 45 | === "Docker Compose" 46 | ```yaml hl_lines="9" 47 | ------8<------ "docker-compose-example.yml::8" 48 | - /mnt/nfs/docker_backups:/app/destination #(3) <-- NFS Share 49 | 50 | ``` 51 | 52 | ------8<------ "docker-example-tooltips.md" 53 | 54 | === "Docker Run" 55 | ```bash hl_lines="5" 56 | ------8<------ "docker-run-example.sh:0:5" 57 | -v /mnt/nfs/docker_backups:/app/destination \ #(2)! 58 | ------8<------ "docker-run-example.sh:10:" 59 | ``` 60 | 61 | ------8<------ "docker-example-tooltips.md" -------------------------------------------------------------------------------- /docs/advanced/remote-backups.md: -------------------------------------------------------------------------------- 1 | Nautical does not provide connectivity to remote services such as S3, B2, or Google Drive. We believe there are better tools for these jobs and think it is best not to recreate them. 2 | 3 | Nautical ^^*can* backup to an NFS share^^ though, we have detailed steps to do this [here](./nfs-share.md). 4 | 5 | --- 6 | 7 | Here is a list of a few of our favorite remote backup solutions: 8 | 9 | * https://kopia.io 10 | * https://borgbackup.org 11 | * https://restic.net 12 | * https://duplicacy.com 13 | * https://duplicati.com 14 | 15 | Ideally, you would configure Nautical to create a backup at a `destination` folder, then point that folder to a remote backup solution. -------------------------------------------------------------------------------- /docs/developers/dev-container.md: -------------------------------------------------------------------------------- 1 | 2 | ### 1. Verify Requirements 3 | 4 | - [x] [Docker](https://code.visualstudio.com/remote/advancedcontainers/docker-options) 5 | - [x] [Dev Containers VSCode extension](vscode:extension/ms-vscode-remote.remote-containers) 6 | 7 | ### 2. Open DevContainer 8 | 1. Clone to repo, then open it in VSCode. 9 | 1. Press ++ctrl+shift+p++ 10 | 2. Then select `Dev Container: Open Folder in Container` 11 | 3. Wait for the container to build 12 | 13 | ### 3. Check the container 14 | 15 | Once the container is running and you're connected, you should see `Dev Container: Nautical` in the bottom left of the Status bar. 16 | 17 | ## The `nb` command 18 | The `nb` command gets installed as part of the [DevContainer](https://code.visualstudio.com/docs/devcontainers/create-dev-container) creation process. 19 | 20 | ```{.properties .no-copy} 21 | nb --help 22 | 23 | -- Nautical Backup Developer Commands: 24 | build - Build Nautical container 25 | run - Run already built Nautical container 26 | build-run - Build and run Nautical container 27 | 28 | build-test - Build and run Nautical Testing container 29 | test - Run already built test Nautical container 30 | build-test-run - Build and run Nautical Testing container 31 | 32 | api - Run the Python API locally 33 | pytest - Pytest locally and capture coverage 34 | format - Format all python code with black 35 | 36 | docs - Run the Nautical documentation locally 37 | ``` -------------------------------------------------------------------------------- /docs/developers/docs.md: -------------------------------------------------------------------------------- 1 | # Contributing to the Documentation 2 | 3 | This documentation is built using 2 major components: 4 | 5 | 1. [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) is a powerful documentation framework on top of [MkDocs](https://www.mkdocs.org), a static site generator for project documentation. 6 | 7 | 1. [MkDocs](https://www.mkdocs.org) is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation. Documentation source files are written in `Markdown`, and configured with a single `YAML` configuration file. 8 | 9 | ## Running the Docs Locally 10 | Spinning up the the docs locally ideal for development thanks to *hot reload*. 11 | 12 | There are three easy ways to get MkDocs up and running locally: 13 | 14 | 1. [VsCode DevContainer](#vscode-devcontainer) (recommended) 15 | 1. [Docker](#docker) 16 | 1. [Python and pip](#python-and-pip) 17 | 18 | !!! warning "Methods 2 and 3 will not work in a VsCode DevContainer——use method 1 instead." 19 | 20 | ### VSCode DevContainer 21 | Spin up the Nautical developer environment using a VsCode DevContainer following [these](./dev-container.md) steps. 22 | 23 | Then simply run `nb docs` in the terminal. 24 | 25 | ```bash 26 | nb docs 27 | # INFO - Building documentation... 28 | # INFO - Cleaning site directory 29 | # INFO - Documentation built in 1.43 seconds 30 | # INFO - [22:52:06] Watching paths for changes: 'docs', 'mkdocs.yml' 31 | # INFO - [22:52:06] Serving on http://127.0.0.1:8000/nautical-backup/ 32 | ``` 33 | 34 | !!! tip "The `nb` command comes pre-loaded with the DevContainer." 35 | 36 | ### Docker 37 | 38 | If Docker is already installed on your machine, then running the docs locally is extremely easy. 39 | 40 | Verify Requirements 41 | ```bash 42 | docker --version 43 | # Docker version 20.10.22, build 3a2c30b 44 | 45 | docker compose version 46 | # Docker Compose version v2.15.1 47 | ``` 48 | 49 | The `docs/docker-compose.yml` file within the repo already has all the relevant information needed to get the docs up and running. 50 | The official [Docker image](https://hub.docker.com/r/squidfunk/mkdocs-material/) already contains all the requirements 51 | 52 | ???+ abstract "Our `docker-compose.yml`" 53 | 54 | ```yaml 55 | # This file exists at docs/docker-compose.yml 56 | version: "3" 57 | 58 | services: 59 | mkdocs: 60 | image: squidfunk/mkdocs-material:latest 61 | container_name: mkdocs 62 | hostname: mkdocs 63 | command: "" #(1)! 64 | volumes: 65 | - ../:/docs #(2)! 66 | ports: 67 | - 8000:8000 68 | restart: unless-stopped 69 | ``` 70 | 71 | 1. Serve the docs at http://127.0.0.1:8000 72 | 2. This works only if we start the container from `docs` directory. 73 | 74 | To start the container simply run the following command: 75 | ```bash 76 | cd docs #(1)! 77 | docker compose up 78 | # [+] Running 1/0 79 | # - Container mkdocs Created 80 | # mkdocs | INFO - Serving on http://0.0.0.0:8000/ 81 | ``` 82 | 83 | 1. We need to run the `docker compose up` command from the `docs` directory. 84 | 85 | MKDocs will now be available at: http://127.0.0.1:8000 86 | 87 | ### Python and pip 88 | 89 | While the [Docker](#docker) method is the easiest to use, it's still quite simple to setup MKDocs using Python and pip. 90 | 91 | Material for MkDocs is published as a [Python package](https://pypi.org/project/mkdocs-material/) and can be installed with 92 | `pip`, ideally by using a [virtual environment](https://realpython.com/what-is-pip/#using-pip-in-a-python-virtual-environment). 93 | 94 | 1. Ensure you have **Python 3.8** or greater installed:  95 | ```command 96 | python --version 97 | # Python 3.10.5 98 | ``` 99 | 1. Clone repository to local machine and open in editor (VSCode recommended) 100 | 1. Install project plugins/libraries:  101 | 102 | ```command 103 | pip install mkdocs-material 104 | ``` 105 | 106 | This will automatically install compatible versions of all dependencies: 107 | 108 | [MkDocs](https://www.mkdocs.org/), [Markdown](https://python-markdown.github.io/), [Pygments](https://pygments.org/) and [Python Markdown Extensions](https://facelessuser.github.io/pymdown-extensions/). 109 | 110 | 2. Serve the docs: 111 | 112 | ```bash 113 | mkdocs serve 114 | ``` 115 | MKDocs will now be available at: http://127.0.0.1:8000 116 | 117 |
118 |
-------------------------------------------------------------------------------- /docs/developers/env.md: -------------------------------------------------------------------------------- 1 | These environment variables are not needed for normal installations. 2 | 3 | This documentation is intended for developers only. 4 | 5 | ## Test Mode 6 | For usage of the Nautical scripts outside the container. Useful for running the Unit Tests. 7 | 8 | > **Default**: -1 9 | 10 | ```properties 11 | TEST_MODE=0 12 | ``` 13 | 14 | 15 | ## Exit Nautical after Initialization 16 | This variable will tell Nautical to immediately quit after initialization--whether a backup is performed or not. 17 | 18 | > **Default**: false 19 | 20 | ```properties 21 | EXIT_AFTER_INIT=true 22 | ``` 23 | 24 | -------------------------------------------------------------------------------- /docs/developers/tests.md: -------------------------------------------------------------------------------- 1 | 1. Run the test container 2 | 3 | ```bash 4 | cd tests 5 | docker compose run nautical-backup-test4 6 | ``` 7 | !!! tip "You may need to update the paths here to be absolute paths" 8 | This is a problem with DevContainers 9 | 10 | 1. Shell into the container 11 | 12 | ```bash 13 | docker exec -it nautical-backup-test 14 | ``` 15 | 16 | 1. Run the tests 17 | 18 | ```bash 19 | with-contenv bash _tests.sh 20 | ``` 21 | !!! tip "`with-contenv` preserves environment variables" -------------------------------------------------------------------------------- /docs/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # This file is used to easily run the mkdocs-material docker image to edit the docs locally. 2 | # To use this file, you need to have docker installed on your machine. 3 | # Run the following command to start the docker container: `docker-compose up -d` 4 | --- 5 | version: "3" 6 | services: 7 | mkdocs: 8 | image: squidfunk/mkdocs-material:latest 9 | container_name: nautical-docs 10 | hostname: nautical-docs 11 | command: "" # Serve the docs at http://127.0.0.1:8000 12 | volumes: 13 | - ../:/docs 14 | ports: 15 | - 8000:8000 16 | restart: unless-stopped 17 | -------------------------------------------------------------------------------- /docs/docker-socket-proxy.md: -------------------------------------------------------------------------------- 1 | ## Why? 2 | The simple configuration is to pass the Docker socket straight into the Nautical container like this: 3 | 4 | * `/var/run/docker.sock:/var/run/docker.sock` 5 | 6 | 7 | However, giving access to your Docker socket could mean giving root access to your host. 8 | While Nautical needs *some* control of your Docker socket to inspect/start/stop/exec your containers, it does not need complete control. 9 | Using the [Docker Socket Proxy](https://github.com/Tecnativa/docker-socket-proxy) allows you to remove permissions away from Nautical but still allow what's necessary. 10 | 11 | 12 | ## How? 13 | We can use the [Docker Socket Proxy](https://github.com/Tecnativa/docker-socket-proxy) container to act as a *man-in-the-middle* (AKA Proxy) for the Docker socket. 14 | 15 | Essentially, the [DSP](https://github.com/Tecnativa/docker-socket-proxy) gets full control over the Docker Socket, but it then gives out smaller permissions to the socket out to Nautical (or anything else). 16 | 17 | ## Setup 18 | For more information about which Docker Socket Proxy Enviornment varibles you must enable, check out [their docs](https://github.com/Tecnativa/docker-socket-proxy?tab=readme-ov-file#grant-or-revoke-access-to-certain-api-sections). 19 | 20 | ```yaml hl_lines="3 31" 21 | services: 22 | # Establish the docker socket proxy 23 | ------8<------ "docker-socket-proxy.yml" 24 | 25 | ------8<------ "docker-compose-example-no-tooltips.yml:2:5" 26 | # Notice we removed the socket mount 27 | ------8<------ "docker-compose-example-no-tooltips.yml:7:10" 28 | # Enable the Proxy in Nautical 29 | # The name `docker_socket_proxy` must match the name of the service 30 | # And they must be in the same compose, unless you use the absolute URL 31 | - DOCKER_HOST=tcp://docker_socket_proxy:2375 32 | ``` 33 | 34 | !!! tip "[LinuxServer.io](https://linuxserver.io) has their own version of avialable [here](https://github.com/Minituff/nautical-backup/issues/230)." 35 | It is based off the original [Tecnativa/docker-socket-proxy](https://github.com/Tecnativa/docker-socket-proxy), so all the configs and variables will carry over. -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 |

2 | Logo 3 |

4 | 5 |

6 | A simple Docker volume backup tool. 7 |

8 | 9 | Pulls from DockerHub 10 | Docker Image Version (latest semver) 11 | Docker Image Size (tag) 12 | 13 |

14 | 15 | --- 16 |
17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | For a full list of configuration options, see the [Variables](./arguments.md) and [Labels](./labels.md) sections. 2 | 3 | ## Docker Compose Example 4 | 5 | ```yaml 6 | ------8<------ "docker-compose-example.yml" 7 | ``` 8 | 9 | ------8<------ "docker-example-tooltips.md" 10 | 11 | ## Docker CLI Example 12 | 13 | ```bash 14 | ------8<------ "docker-run-example.sh" 15 | ``` 16 | 17 | ------8<------ "docker-example-tooltips.md" 18 | 19 |
20 | -------------------------------------------------------------------------------- /docs/introduction.md: -------------------------------------------------------------------------------- 1 | 2 | Essentially, this is an automated and configurable backup tool built around [rsync](https://en.wikipedia.org/wiki/Rsync). 3 | 4 | ## The Basics 5 | Nautical runs on a `CRON` schedule to: 6 | 7 | 1. Stop the container (if configured) 8 | 2. Run the backup via `rsync` 9 | 3. Restart the container (if stopped) 10 | 11 | ⚗️ **Need more control?** There are many more options available via [variables](./arguments.md) and [labels](./labels.md). 12 | 13 | 14 | ## Sample Configuration 15 | Nautical requires almost no configuration when container volumes are all in a folder matching its `container-name` within the source directory. Of course, we can use [variables](./arguments.md) and [labels](./labels.md) to override these defaults. 16 | 17 | Let's take a look at an example: 18 | 19 | | Container Name | Source Data Directory | Destination Data Directory | 20 | | --------------------------------------------------- | ------------------------------------- | --------------------------------------------- | 21 | | [homepage](https://github.com/gethomepage/homepage) | `/opt/docker-volumes/homepage` | `/mnt/nfs-share/backups/homepage` | 22 | | [trilium](https://github.com/zadam/trilium) | `/opt/docker-volumes/trilium` | `/mnt/nfs-share/backups/trilium` | 23 | | [dozzle](https://github.com/amir20/dozzle) | *N/A* (no data folder) | *N/A* (no backup needed) | 24 | 25 | !!! example "Here is how Nautical fits into the *Sample Configuration*" 26 | === "Docker Compose" 27 | ```yaml 28 | ------8<------ "docker-compose-example.yml:1:7" 29 | - /opt/docker-volumes:/app/source #(2)! 30 | - /mnt/nfs-share/backups:/app/destination #(3)! 31 | ``` 32 | 33 | ------8<------ "docker-example-tooltips.md" 34 | 35 | === "Docker Cli" 36 | ```bash 37 | ------8<------ "docker-run-example.sh::4" 38 | -v /opt/docker-volumes:/app/source \ #(2)! 39 | -v /mnt/nfs-share/backups:/app/destination \ #(3)! 40 | ------8<------ "docker-run-example.sh:10:" 41 | ``` 42 | 43 | ------8<------ "docker-example-tooltips.md" 44 | -------------------------------------------------------------------------------- /docs/media/Logo-large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/Logo-large.png -------------------------------------------------------------------------------- /docs/media/Logo-transparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/Logo-transparent.png -------------------------------------------------------------------------------- /docs/media/Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/Logo.png -------------------------------------------------------------------------------- /docs/media/Logo.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/Logo.psd -------------------------------------------------------------------------------- /docs/media/homepage-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/homepage-example.png -------------------------------------------------------------------------------- /docs/media/homepage-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/homepage-logo.png -------------------------------------------------------------------------------- /docs/media/homepage_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/docs/media/homepage_demo.png -------------------------------------------------------------------------------- /docs/q-and-a.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - footer 4 | --- 5 | 6 | ## **Why do we need docker volume backups?** 7 | If your Docker Host machine doesn't take snapshots like a ZFS-based machine does, then you aren't protected against faulty configuration or complete deletion of our container data. 8 | 9 | ## **Why do we need to stop the container before a backup?** 10 | This is important for containers that run databases, especially SQL. During database access, the database will be temporarily locked during a write action and then unlocked afterwards. If a container is backed up during a datable lock, then your database could become corrupted. 11 | 12 | 13 | Stopping the container guarantees it was given the proper time to gracefully stop all services and unlock the databases before we create a backup. Yes, there will be downtime for this, but it is only a few seconds and you can schedule this to run in off-peak hours. 14 | 15 | ## **Why don't I store the container volumes directly on a NFS share?** 16 | This is common idea, but SQL databases would constantly go into a locked state about once every few weeks. (This happens frequently with apps like [Sonarr](https://github.com/Sonarr/Sonarr), [Radarr](https://github.com/radarr/radarr), [Prowlarr](https://github.com/Prowlarr/Prowlarr), etc.) 17 | Stopping the container first is the only way to guarantee there is no corruption. 18 | 19 | ## **Why don't we backup the entire container itself?** 20 | Containers are meant to be ephemeral, and essentially meaningless. The goal is to have only the data referenced by the container be important----not the container itself. 21 | 22 | If something bad happened to the docker stack, we only need the `docker-compose` files and the data they referenced. This would allow us to be back online in no time! 23 | 24 | If you would like to save data or changes within the docker container, consider making a new image. This would save the modification steps and allow it to be easily replicated. 25 | 26 | ## **Does Nautical support remote backups?** 27 | This question is answered [here](./advanced/remote-backups.md). 28 | 29 | ## **Where do I run Nautical?** 30 | Nautical is only able to access the Docker containers running *on the same machine as the Nautical container itself*. So if you run multiple VMs/LXCs that have unique Docker installations on each of them, then you would need to install Nautical on each one. -------------------------------------------------------------------------------- /docs/rest-api.md: -------------------------------------------------------------------------------- 1 | ## Enable The API 2 | The API is ^^enabled internally by default^^, but you still must open the port for external access. Follow these steps: 3 | 4 | !!! abstract "Why is the REST API on internally?" 5 | The REST API is used internally for Docker [Healthchecks](https://docs.docker.com/reference/dockerfile). 6 | However, if do not open the port via Docker, then all the endpoints will remain unreachable. 7 | 8 | ### 1. Map the port 9 | You need to ensure the port is opened by Docker for the Nautical container. See the ==highlighted== sections of this example Nautical config: 10 | 11 | === "Docker Compose" 12 | ```yaml hl_lines="9 10" 13 | ------8<------ "docker-compose-example-no-tooltips.yml:0:8" 14 | ports: 15 | - "8069:8069/tcp" 16 | ``` 17 | 18 | === "Docker Cli" 19 | 20 | ```bash hl_lines="7 7" 21 | ------8<------ "docker-run-example-no-tooltips.sh::7" 22 | -p 8069:8069/tcp \ 23 | ------8<------ "docker-run-example-no-tooltips.sh:11:" 24 | ``` 25 | 26 | ### 2. Verify it works 27 | To view the API, go to http://localhost:8069/docs in your browser. 28 | 29 | ## Authentication 30 | 31 | The default login is `admin` / `password`. 32 | This can be changed [here](./arguments.md/#api-username-and-password). 33 | 34 | ```bash 35 | curl -X GET \ 36 | 'http://localhost:8069/auth' \ 37 | --header 'Authorization: Basic YWRtaW46cGFzc3dvcmQ=' 38 | ``` 39 | 40 | !!! tip "Use [this](https://mixedanalytics.com/tools/basic-authentication-generator) site to generate a Base64 token." 41 | 42 | ## Dashboard 43 | > GET 44 | > /api/v1/nautical/dashboard 45 | 46 | This endpoint is the quickest way to get a glimpse into everything Nautical has going on. 47 | 48 | ???+ example "Example response" 49 | ```json 50 | { 51 | "next_cron": { 52 | "1": [ 53 | "Monday, November 20, 2023 at 04:00 AM", 54 | "11/20/23 04:00" 55 | ], 56 | "2": [ 57 | "Tuesday, November 21, 2023 at 04:00 AM", 58 | "11/21/23 04:00" 59 | ], 60 | "3": [ 61 | "Wednesday, November 22, 2023 at 04:00 AM", 62 | "11/22/23 04:00" 63 | ], 64 | "4": [ 65 | "Thursday, November 23, 2023 at 04:00 AM", 66 | "11/23/23 04:00" 67 | ], 68 | "5": [ 69 | "Friday, November 24, 2023 at 04:00 AM", 70 | "11/24/23 04:00" 71 | ], 72 | "cron": "0 4 * * *", 73 | "tz": "America/Los_Angeles" 74 | }, 75 | "next_run": "11/20/23 04:00", 76 | "last_cron": "11/19/23 04:00", 77 | "number_of_containers": "1", 78 | "completed": "0", 79 | "skipped": "1", 80 | "errors": "0", 81 | "last_backup_seconds_taken": "15", 82 | "backup_running": "false" 83 | } 84 | ``` 85 | 86 | ## Next CRON 87 | 88 | > GET 89 | > /api/v1/nautical/next_cron/{occurrences} 90 | > 91 | > {occurrences} = integer between 1 and 100 92 | 93 | Get the next *n* scheduled times Nautical will run. 94 | 95 | ???+ example "Example response" 96 | ```json 97 | { 98 | "1": [ 99 | "Monday, November 20, 2023 at 04:00 AM", 100 | "11/20/23 04:00" 101 | ], 102 | "2": [ 103 | "Tuesday, November 21, 2023 at 04:00 AM", 104 | "11/21/23 04:00" 105 | ], 106 | "cron": "0 4 * * *", 107 | "tz": "America/Los_Angeles" 108 | } 109 | ``` 110 | 111 | ## Start Backup 112 | 113 | > POST 114 | > /api/v1/nautical/start_backup 115 | 116 | Start a backup now. The API will not respond until the backup has completed. 117 | 118 | All the [Variables](./arguments.md) and [Labels](./labels.md) are respected. 119 | 120 | ???+ example "Example response" 121 | ```json 122 | { 123 | "message": "Nautical Backup completed successfully" 124 | } 125 | ``` 126 | 127 | ## Kickoff Backup 128 | 129 | > POST 130 | > /api/v1/nautical/kickoff_backup 131 | 132 | Start a backup now in the background. The API will respond immediately. 133 | 134 | All the [Variables](./arguments.md) and [Labels](./labels.md) are respected. 135 | 136 | ???+ example "Example response" 137 | ```json 138 | { 139 | "message": "Nautical Backup started successfully" 140 | } 141 | ``` 142 | -------------------------------------------------------------------------------- /docs/stylesheets/extra.css: -------------------------------------------------------------------------------- 1 | /* Override the "DARK" mode theme */ 2 | [data-md-color-scheme="slate"] { 3 | /* --md-hue: 200; */ 4 | --md-default-bg-color: hsla(var(--md-hue),26%,14%,1); 5 | --md-code-hl-color--light: rgba(225, 216, 51, 0.102); 6 | --md-primary-fg-color: #011b3c; 7 | --md-accent-fg-color: #78c9ff; 8 | --md-typeset-a-color: #5365c9; 9 | --md-code-hl-color: #ffff003b; 10 | --md-typeset-mark-color: #ffff003b; 11 | 12 | /* Footer */ 13 | --md-footer-bg-color: hsla(var(--md-hue),26%,14%,1); 14 | --md-footer-bg-color--dark: hsla(var(--md-hue),26%,14%,1); 15 | } 16 | 17 | [data-md-color-scheme="default"] { 18 | --md-footer-bg-color: white; 19 | --md-footer-bg-color--light: hsla(var(--md-hue),15%,14%,1); 20 | --md-footer-bg-color--light: rgb(148, 0, 0); 21 | --md-footer-fg-color--light: #011b3c; 22 | --md-footer-bg-color--dark: white; 23 | --md-footer-fg-color--lighter: white; 24 | 25 | --md-footer-fg-color: #011b3c; 26 | --md-accent-fg-color: #78c9ff; 27 | --md-typeset-a-color: #5365c9; 28 | --md-primary-fg-color: #011b3c; 29 | --md-code-hl-color: #ffff003b; 30 | --md-typeset-mark-color: #ffff003b; 31 | } 32 | 33 | /* Makes the top left logo bigger */ 34 | .md-header__button.md-logo { 35 | scale: 1.6; 36 | } 37 | 38 | .md-search-result mark { 39 | color: #ff9100; 40 | background-color: #ffff003b; 41 | } 42 | 43 | .md-footer-meta__inner { 44 | padding: 0; 45 | } 46 | 47 | .md-nav__source { 48 | background-color: #223c5e; 49 | } -------------------------------------------------------------------------------- /docs/updates.md: -------------------------------------------------------------------------------- 1 | --- 2 | hide: 3 | - footer 4 | --- 5 | 6 | Updating Nautical (and any container) is a balance between *convenience*, *reliability* and *security*. 7 | 8 | Updating your container can bring new features, bug fixes and security patches, but can also introduce unintended behavior. 9 | 10 | 11 | ## Understanding Semantic Versioning 12 | [Semver](https://semver.org) is a means to communicate the changes to an application just by looking at the version number. 13 | Nautical uses [Semver](https://semver.org) like this. 14 | 15 | `vMAJOR.MINOR.PATCH`, for example: 16 | Docker Image Version (latest semver) 17 | 18 | * **MAJOR** - A large change that breaks/reworks an existing feature. 19 | * This usually means you will need to change the Nautical configuration. 20 | * **MINOR** - Add functionality in a backward compatible manner. 21 | * Everything *should* continue working without changes to the Nautical configuration. 22 | * **PATCH** - A small change such as updating a dependency, log output, or minor fix. 23 | * From the user perspective, nothing will have changed, but under the hood, small improvements were made. 24 | 25 | ## Manual Updates 26 | To manually update Nautical, simply re-deploy using either of these configs, but specify the latest version of the Nautical. 27 | Currently, the latest version of Nautical is Docker Image Version (latest semver). (do not add the `v`) 28 | 29 | This will need to be done each time a new version is released. 30 | === "Docker Compose" 31 | ```yaml hl_lines="3" 32 | ------8<------ "docker-compose-semver-example.yml::3" 33 | # Rest of config... 34 | ``` 35 | 36 | === "Docker Cli" 37 | 38 | ```bash hl_lines="6" 39 | ------8<------ "docker-run-example.sh::7" 40 | ------8<------ "docker-run-semver-example.sh" 41 | 42 | # Update the version number in the line above 43 | ``` 44 | 45 | ------8<------ "docker-example-tooltips.md" 46 | 47 | 48 | ## Automatic Updates 49 | [Watchtower](https://github.com/containrrr/watchtower/) is an excellent tool to keep your Docker containers updated. 50 | 51 | While convenient, automatic updates may break things. For this reason we recommend only automatically updating to the latest `PATCH` version. 52 | 53 | === "Patch Updates Only" 54 | !!! note "" 55 | These examples only specify the [Semver](https://semver.org) `vMAJOR.MINOR` numbers, leaving `PATCH` out--this means that Watchtower will update the `PATCH` number if available. 56 | 57 | === "Docker Compose" 58 | ```yaml hl_lines="3" 59 | ------8<------ "docker-compose-example.yml::3" 60 | # Rest of config... 61 | 62 | watchtower: 63 | image: containrrr/watchtower:latest 64 | container_name: watchtower 65 | volumes: 66 | - /var/run/docker.sock:/var/run/docker.sock 67 | command: nautical-backup # (9)! 68 | ``` 69 | 70 | ------8<------ "docker-example-tooltips.md" 71 | 1. Which containers to use. 72 | 73 | Remove this line to update all containers. 74 | 75 | === "Docker Cli" 76 | ```bash hl_lines="7" 77 | ------8<------ "docker-run-example.sh::7" 78 | ------8<------ "docker-run-example.sh:11:" 79 | 80 | docker run -d \ 81 | --name watchtower \ 82 | -v /var/run/docker.sock:/var/run/docker.sock \ 83 | containrrr/watchtower \ 84 | nautical-backup #(9)! 85 | ``` 86 | 87 | ------8<------ "docker-example-tooltips.md" 88 | 1. Which containers to use. 89 | 90 | Remove this line to update all containers. 91 | 92 | === "Minor And Patch Updates" 93 | !!! note "" 94 | These examples specify the [Semver](https://semver.org) `vMAJOR` number, leaving `MINOR` `PATCH` out--this means that Watchtower will update `MINOR` and `PATCH` versions if available. 95 | 96 | === "Docker Compose" 97 | ```yaml hl_lines="3" 98 | ------8<------ "docker-compose-semver-major-example.yml::3" 99 | # Rest of config... 100 | 101 | watchtower: 102 | image: containrrr/watchtower:latest 103 | container_name: watchtower 104 | volumes: 105 | - /var/run/docker.sock:/var/run/docker.sock 106 | command: nautical-backup # (9)! 107 | ``` 108 | 109 | ------8<------ "docker-example-tooltips.md" 110 | 1. Which containers to use. 111 | 112 | Remove this line to update all containers. 113 | 114 | === "Docker Cli" 115 | ```bash hl_lines="7" 116 | ------8<------ "docker-run-example.sh::7" 117 | ------8<------ "docker-run-semver-major-example.sh" 118 | 119 | docker run -d \ 120 | --name watchtower \ 121 | -v /var/run/docker.sock:/var/run/docker.sock \ 122 | containrrr/watchtower \ 123 | nautical-backup #(9)! 124 | ``` 125 | 126 | ------8<------ "docker-example-tooltips.md" 127 | 2. Which containers to use. 128 | 129 | Remove this line to update all containers. 130 | 131 | === "Latest Updates (All)" 132 | !!! note "" 133 | If you're really feeling like living on the bleeding edge. You can use the `latest` tag to ensure you are always up to date. 134 | This will get the latest [Semver](https://semver.org) `MAJOR`, `MINOR`, and `PATCH` updates. 135 | 136 | !!! danger "This will most likely break things at some point" 137 | If you go this route, just ensure you aren't using Nautical for anything mission critical, and be prepared to either help troubleshoot or wait for a new version with a bug fix. 138 | 139 | This is an example of using [Watchtower](https://github.com/containrrr/watchtower/) to keep Nautical on the `latest` version. 140 | 141 | === "Docker Compose" 142 | ```yaml hl_lines="3" 143 | ------8<------ "docker-compose-example.yml::2" 144 | image: minituff/nautical-backup:latest 145 | # Rest of config... 146 | 147 | watchtower: 148 | image: containrrr/watchtower:latest 149 | container_name: watchtower 150 | volumes: 151 | - /var/run/docker.sock:/var/run/docker.sock 152 | command: nautical-backup # (9)! 153 | ``` 154 | 155 | ------8<------ "docker-example-tooltips.md" 156 | 1. Which containers to use. 157 | 158 | Remove this line to update all containers. 159 | 160 | === "Docker Cli" 161 | ```bash hl_lines="7" 162 | ------8<------ "docker-run-example.sh::7" 163 | minituff/nautical-backup:latest 164 | 165 | docker run -d \ 166 | --name watchtower \ 167 | -v /var/run/docker.sock:/var/run/docker.sock \ 168 | containrrr/watchtower \ 169 | nautical-backup #(9)! 170 | ``` 171 | 172 | ------8<------ "docker-example-tooltips.md" 173 | 2. Which containers to use. 174 | 175 | Remove this line to update all containers. 176 | 177 | 178 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Nautical Backup 2 | site_description: 'A simple Docker volume backup tool.' 3 | site_author: 'James Tufarelli' 4 | site_url: 'https://minituff.github.io/nautical-backup/' 5 | edit_uri: 'edit/main/docs/' 6 | 7 | # Repository 8 | repo_name: 'GitHub' 9 | repo_url: 'https://github.com/minituff/nautical-backup' 10 | 11 | # copyright: 'Copyright © 2023 James Tufarelli' 12 | 13 | 14 | # Plugins 15 | plugins: 16 | # - site-urls 17 | # git-revision-date-localized: 18 | # enable_creation_date: true 19 | # offline: 20 | - search 21 | # minify: 22 | # minify_html: true 23 | # autolinks: 24 | # git-revision-date-localized: 25 | # type: date 26 | # fallback_to_build_date: true 27 | # macros: 28 | # verbose: true 29 | # include_dir: _includes 30 | # meta-descriptions: # If not provided, auto-generate a description (https://pypi.org/project/mkdocs-meta-descriptions-plugin/) 31 | # meta: 32 | 33 | nav: 34 | - Home: index.md 35 | - Introduction: introduction.md 36 | - Installation: installation.md 37 | - Environment Variables: arguments.md 38 | - Docker Labels: labels.md 39 | - Updating Nautical: updates.md 40 | - Rest API: rest-api.md 41 | - Docker Socket Proxy: docker-socket-proxy.md 42 | - Advanced Usage: 43 | - Source & Destination Mappings: advanced/advanced.md 44 | - NFS Shares: advanced/nfs-share.md 45 | - Remote Backups: advanced/remote-backups.md 46 | - Homepage Dashboard: advanced/homepage-dashboard.md 47 | - Q & A: q-and-a.md 48 | - Developer Documentation: 49 | - Dev Container: developers/dev-container.md 50 | - Tests: developers/tests.md 51 | - Contributing to the Docs: developers/docs.md 52 | - Developer Env Variables: developers/env.md 53 | 54 | watch: 55 | - snippets 56 | - docs 57 | - mkdocs.yml 58 | 59 | extra: 60 | generator: false # "false" will Remove the "Made with Material for MkDocs" branding 61 | # version: 62 | # provider: mike 63 | 64 | theme: 65 | name: material 66 | logo: media/Logo-large.png 67 | features: 68 | # - navigation.tabs 69 | # - navigation.tabs.sticky 70 | # - navigation.sections 71 | - navigation.indexes # Allow index pages for section 72 | - navigation.instant 73 | - navigation.top # Back to top button 74 | # - navigation.prune 75 | - navigation.path 76 | - navigation.tracking 77 | # - navigation.expand 78 | - search.suggest 79 | - search.share 80 | - search.highlight 81 | - content.code.annotate 82 | - content.code.copy 83 | - content.code.select 84 | - header.autohide 85 | - announce.dismiss 86 | - toc.follow 87 | # - toc.integrate # Table of contents on the left 88 | - content.action.edit 89 | - content.action.view 90 | - navigation.footer # Add the `prev` and `next` buttons in the footer 91 | # font: 92 | # text: 'Ubuntu' 93 | # code: 'Ubuntu Mono' 94 | palette: 95 | # scheme: slate 96 | 97 | # Toggle light mode 98 | - media: "(prefers-color-scheme: light)" 99 | scheme: default 100 | primary: indigo 101 | accent: orange 102 | toggle: 103 | icon: material/brightness-7 104 | name: Burn my eyes! 105 | 106 | # Toggle dark mode 107 | - media: "(prefers-color-scheme: dark)" 108 | scheme: slate 109 | primary: custom 110 | accent: custom 111 | toggle: 112 | icon: material/brightness-4 113 | name: Love my eyes! 114 | 115 | extra_css: 116 | - stylesheets/extra.css 117 | 118 | # Extensions 119 | markdown_extensions: 120 | - abbr 121 | - attr_list 122 | - admonition 123 | - def_list 124 | - codehilite: 125 | linenums: true 126 | - toc: 127 | permalink: true # Allow perma-linking the headers 128 | toc_depth: 6 # ignore H4/5/6 129 | - footnotes 130 | - pymdownx.critic 131 | - pymdownx.keys 132 | - pymdownx.arithmatex 133 | - pymdownx.betterem: 134 | smart_enable: all 135 | - pymdownx.caret 136 | - pymdownx.details 137 | - pymdownx.snippets: 138 | check_paths: true 139 | base_path: snippets 140 | - pymdownx.emoji: 141 | emoji_index: !!python/name:material.extensions.emoji.twemoji 142 | emoji_generator: !!python/name:material.extensions.emoji.to_svg 143 | - pymdownx.inlinehilite 144 | - pymdownx.magiclink: 145 | repo_url_shorthand: true 146 | user: funkypenguin 147 | repo: geek-cookbook 148 | - pymdownx.mark 149 | - pymdownx.smartsymbols 150 | - pymdownx.superfences: 151 | custom_fences: 152 | - name: mermaid 153 | class: mermaid 154 | format: !!python/name:pymdownx.superfences.fence_code_format 155 | - pymdownx.tasklist: 156 | custom_checkbox: true 157 | clickable_checkbox: false 158 | - pymdownx.tilde 159 | - pymdownx.tabbed: 160 | alternate_style: true 161 | - meta 162 | - md_in_html -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | 4 | 5 | 6 | 7 | [tool.pytest.ini_options] 8 | python_files = "test_*.py *_test.py" 9 | addopts = "--ignore-glob=notebooks/* --strict-markers" 10 | required_plugins = "pytest-cov" 11 | pythonpath = ".." 12 | markers = [ 13 | "it: name an test", 14 | "description: descption of a test", 15 | "integration: name an integration test (deselect with '-m \"not integration\"')", 16 | 17 | ] 18 | filterwarnings = [ 19 | 'ignore', 20 | ] 21 | tmpdir_keep=0 22 | 23 | [tool.coverage] 24 | [tool.coverage.run] 25 | omit = [ 26 | # omit anything in a .local directory anywhere 27 | '*/.local/*', 28 | '__init__.py', 29 | 'tests/*', 30 | '*/tests/*', 31 | # omit anything in a .venv directory anywhere 32 | '.venv/*' 33 | ] 34 | 35 | [tool.coverage.report] 36 | skip_empty = true -------------------------------------------------------------------------------- /pytest/test_api.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from pathlib import Path 4 | from mock import mock, MagicMock, patch 5 | 6 | from fastapi.testclient import TestClient 7 | 8 | from app.db import DB 9 | from app.api.utils import next_cron_occurrences 10 | from app.api.config import Settings 11 | from app.api.main import app 12 | from app.api.authorize import get_settings 13 | 14 | client = TestClient(app) 15 | 16 | 17 | def get_settings_override() -> Settings: 18 | return Settings( 19 | HTTP_REST_API_USERNAME="new-username", 20 | HTTP_REST_API_PASSWORD="new-password", 21 | ) 22 | 23 | 24 | def get_settings_disable() -> Settings: 25 | return Settings( 26 | HTTP_REST_API_USERNAME="", 27 | HTTP_REST_API_PASSWORD="", 28 | ) 29 | 30 | 31 | def reset_settings_override() -> Settings: 32 | return Settings( 33 | HTTP_REST_API_USERNAME="admin", 34 | HTTP_REST_API_PASSWORD="password", 35 | ) 36 | 37 | 38 | class TestAPI: 39 | @classmethod 40 | def setup_class(cls): 41 | """Runs 1 time before all tests in this class""" 42 | app.dependency_overrides[get_settings] = reset_settings_override 43 | 44 | def test_root(self): 45 | response = client.get("/") 46 | assert response.status_code == 200 47 | 48 | def test_login_on_default_settings(self): 49 | response = client.get("/auth", auth=("admin", "password")) 50 | assert response.status_code == 200 51 | assert response.json() == {"username": "admin"} 52 | 53 | response = client.get("/auth", auth=("admin", "BAD")) 54 | assert response.status_code == 401 55 | 56 | response = client.get("/auth") 57 | assert response.status_code == 401 58 | 59 | def test_login_disable(self, monkeypatch: pytest.MonkeyPatch): 60 | # Apply the environment variable override 61 | app.dependency_overrides[get_settings] = get_settings_disable 62 | 63 | response = client.get("/auth", auth=("", "")) 64 | assert response.status_code == 200 65 | 66 | app.dependency_overrides[get_settings] = reset_settings_override 67 | 68 | def test_login_on_with_env(self, monkeypatch: pytest.MonkeyPatch): 69 | # Apply the environment variable override 70 | app.dependency_overrides[get_settings] = get_settings_override 71 | 72 | response = client.get("/auth", auth=("admin", "password")) 73 | assert response.status_code == 401 74 | 75 | response = client.get("/auth", auth=("new-username", "new-password")) 76 | assert response.status_code == 200 77 | assert response.json() == {"username": "new-username"} 78 | 79 | # Reset the override 80 | app.dependency_overrides[get_settings] = reset_settings_override 81 | 82 | response = client.get("/auth", auth=("admin", "password")) 83 | assert response.status_code == 200 84 | assert response.json() == {"username": "admin"} 85 | 86 | def test_dashboard(self): 87 | db = DB() 88 | response = client.get("/api/v1/nautical/dashboard", auth=("admin", "password")) 89 | assert response.status_code == 200 90 | 91 | next_crons = next_cron_occurrences(5) 92 | 93 | assert response.json()["backup_running"] == db.get("backup_running", False) 94 | assert response.json()["errors"] == db.get("errors", 0) 95 | assert response.json()["skipped"] == db.get("containers_skipped", 0) 96 | assert response.json()["completed"] == db.get("containers_completed", 0) 97 | assert response.json()["number_of_containers"] == db.get("number_of_containers", 0) 98 | assert response.json()["last_cron"] == db.get("last_cron", "None") 99 | assert response.json()["last_backup_seconds_taken"] == db.get("last_backup_seconds_taken", 0) 100 | assert response.json()["next_run"] == next_crons.get("1", [None, None])[1] if next_crons else None 101 | assert len(response.json()["next_cron"]) == 7 102 | assert set(response.json()["next_cron"]) == set(next_crons) if next_crons else None 103 | 104 | @patch("subprocess.run") 105 | def test_start_backup(self, patched_subprocess_run): 106 | response = client.post("/api/v1/nautical/start_backup", auth=("admin", "password")) 107 | assert response.status_code == 200 108 | 109 | def test_next_cron(self): 110 | response = client.get("/api/v1/nautical/next_cron/1", auth=("admin", "password")) 111 | assert response.status_code == 200 112 | -------------------------------------------------------------------------------- /pytest/test_db.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from app.db import DB 4 | import pytest 5 | from pathlib import Path 6 | 7 | 8 | class TestADB: 9 | @classmethod 10 | def setup_class(cls): 11 | """ 12 | Runs 1 time before all tests in this class 13 | """ 14 | pass 15 | 16 | @classmethod 17 | def teardown_class(cls): 18 | """ 19 | Runs 1 time after all tests in this class 20 | """ 21 | # Remove the folders created by `tmp_path` 22 | shutil.rmtree("fake-path", ignore_errors=True) 23 | shutil.rmtree("fake-path2", ignore_errors=True) 24 | 25 | def test_read_invalid_db(self, tmp_path: Path): 26 | db = DB(tmp_path) # This is a folder, so it is invalid 27 | assert db.get("test") == None 28 | assert db.get("test", {}) == {} 29 | 30 | def test_db_paths(self, tmp_path: Path): 31 | db = DB(os.path.join(tmp_path, "test-db.json")) 32 | assert db.db_path.endswith("test-db.json") 33 | 34 | db = DB(tmp_path) 35 | assert db.db_path.endswith("nautical-db.json") 36 | 37 | def test_db_get(self, tmp_path): 38 | db = DB(tmp_path) 39 | db.put("test", "testVal") 40 | assert db.get("test") == "testVal" 41 | 42 | def test_db_get_override(self, tmp_path): 43 | db = DB(tmp_path) 44 | 45 | val = {"value": 1, "value2": True, "value3": "value3"} 46 | db.put("test", val) 47 | 48 | assert db.get("test") == val 49 | 50 | db.put("test", "override") 51 | assert db.get("test") == "override" 52 | 53 | def test_db_delete(self, tmp_path): 54 | db = DB(tmp_path) 55 | db.put("test", "testVal") 56 | assert db.get("test") == "testVal" 57 | 58 | db.put("test", "") 59 | assert db.get("test") == "" 60 | 61 | db.delete("test") 62 | assert db.get("test") == None 63 | 64 | @pytest.fixture(scope="function", autouse=True) 65 | def test_db_env(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): 66 | monkeypatch.setenv("NAUTICAL_DB_PATH", "fake-path") 67 | monkeypatch.setenv("NAUTICAL_DB_NAME", "test-db.json") 68 | 69 | db = DB() 70 | assert db.db_path == "fake-path/test-db.json" 71 | 72 | monkeypatch.setenv("NAUTICAL_DB_PATH", "fake-path2") 73 | monkeypatch.setenv("NAUTICAL_DB_NAME", "test-db2.json") 74 | 75 | db = DB() 76 | assert db.db_path == "fake-path2/test-db2.json" 77 | 78 | monkeypatch.setenv("NAUTICAL_DB_PATH", "fake-path3") 79 | # Should not be used since we only pass a folder. the default name should be used 80 | monkeypatch.setenv("NAUTICAL_DB_NAME", "test-db3.json") 81 | db = DB(tmp_path) 82 | assert db.db_path.endswith("nautical-db.json") 83 | -------------------------------------------------------------------------------- /pytest/test_logger.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import os 3 | import pytest 4 | from pathlib import Path 5 | from mock import mock, MagicMock, patch 6 | from app.logger import LogType, Logger, LogLevel 7 | import pytest 8 | from datetime import datetime 9 | 10 | 11 | class TestLogger: 12 | @classmethod 13 | def setup_class(cls): 14 | """Runs 1 time before all tests in this class""" 15 | pass 16 | 17 | def test_init_(self, monkeypatch: pytest.MonkeyPatch): 18 | monkeypatch.setenv("LOG_LEVEL", "INFO") 19 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "INFO") 20 | monkeypatch.setenv("REPORT_FILE_ON_BACKUP_ONLY", "TRUE") 21 | monkeypatch.setenv("DEST_LOCATION", "/app/destination") 22 | 23 | logger = Logger() 24 | assert logger.script_logging_level is LogLevel.INFO 25 | assert logger.report_file_logging_level is LogLevel.INFO 26 | assert logger.report_file_on_backup_only is True 27 | assert logger.dest_location == "/app/destination" 28 | 29 | monkeypatch.setenv("LOG_LEVEL", "DEBUG") 30 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "DEBUG") 31 | monkeypatch.setenv("REPORT_FILE_ON_BACKUP_ONLY", "FALSE") 32 | 33 | logger = Logger() 34 | assert logger.script_logging_level is LogLevel.DEBUG 35 | assert logger.report_file_logging_level is LogLevel.DEBUG 36 | assert logger.report_file_on_backup_only is False 37 | 38 | monkeypatch.setenv("LOG_LEVEL", "") 39 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "") 40 | monkeypatch.setenv("REPORT_FILE_ON_BACKUP_ONLY", "") 41 | 42 | logger = Logger() 43 | assert logger.script_logging_level is LogLevel.INFO 44 | assert logger.report_file_logging_level is LogLevel.INFO 45 | assert logger.report_file_on_backup_only is True 46 | 47 | rf = f"Backup Report - {datetime.now().strftime('%Y-%m-%d')}.txt" 48 | assert logger.report_file == rf 49 | 50 | @patch("builtins.open", new_callable=MagicMock) 51 | def test_create_report_file(self, mock_open: MagicMock, tmp_path: Path): 52 | logger = Logger() 53 | logger.dest_location = tmp_path 54 | logger.report_file = "mock_report_file.txt" 55 | 56 | mock_path = tmp_path / logger.report_file 57 | 58 | # Call log_this 59 | logger.log_this("mock_message", LogLevel.WARN) 60 | 61 | # Check that the message was written to the report file 62 | mock_open.assert_called_once_with(str(mock_path), "a") 63 | 64 | def test_delete_report_files(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): 65 | monkeypatch.setenv("LOG_LEVEL", "TRACE") 66 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "TRACE") 67 | 68 | report_file = f"Backup Report - {datetime.now().strftime('%Y-%m-%d')}.txt" 69 | # Create fake files in tmp_path 70 | (tmp_path / "Backup Report - 2024-01-01.txt").touch() 71 | (tmp_path / "Backup Report - 2025-01-01.txt").touch() 72 | (tmp_path / f"{report_file}").touch() 73 | 74 | logger = Logger() 75 | logger.dest_location = tmp_path 76 | logger.report_file_on_backup_only = False 77 | 78 | logger._delete_old_report_files() 79 | 80 | # List files in tmp_path 81 | files = [f.name for f in tmp_path.iterdir()] 82 | assert files[0] == report_file 83 | assert len(files) == 1 84 | 85 | def test_create_new_report_file(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): 86 | monkeypatch.setenv("LOG_LEVEL", "TRACE") 87 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "TRACE") 88 | 89 | report_file = f"Backup Report - {datetime.now().strftime('%Y-%m-%d')}.txt" 90 | logger = Logger() 91 | logger.dest_location = tmp_path 92 | logger.report_file_on_backup_only = False 93 | 94 | logger._create_new_report_file() 95 | 96 | # List files in tmp_path 97 | files = [f.name for f in tmp_path.iterdir()] 98 | assert files[0] == report_file 99 | assert len(files) == 1 100 | 101 | logger._create_new_report_file() 102 | assert len(files) == 1 103 | 104 | @patch("builtins.open", new_callable=MagicMock) 105 | @patch("builtins.print") 106 | def test_log_level_trace( 107 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 108 | ): 109 | monkeypatch.setenv("LOG_LEVEL", "TRACE") 110 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "TRACE") 111 | 112 | logger = Logger() 113 | logger.dest_location = tmp_path 114 | logger.report_file = "mock_report_file.txt" 115 | logger.report_file_on_backup_only = False 116 | 117 | # Call log_this 118 | logger.log_this("mock_trace", LogLevel.TRACE, LogType.DEFAULT) 119 | logger.log_this("mock_debug", LogLevel.DEBUG, LogType.DEFAULT) 120 | logger.log_this("mock_info", LogLevel.INFO, LogType.DEFAULT) 121 | logger.log_this("mock_warn", LogLevel.WARN, LogType.DEFAULT) 122 | logger.log_this("mock_error", LogLevel.ERROR, LogType.DEFAULT) 123 | 124 | assert mock_open.call_count == 5 125 | assert mock_print.call_count == 5 126 | 127 | assert mock_print.call_args_list[0][0][0] == "TRACE: mock_trace" 128 | assert mock_print.call_args_list[1][0][0] == "DEBUG: mock_debug" 129 | assert mock_print.call_args_list[2][0][0] == "INFO: mock_info" 130 | assert mock_print.call_args_list[3][0][0] == "WARN: mock_warn" 131 | assert mock_print.call_args_list[4][0][0] == "ERROR: mock_error" 132 | 133 | @patch("builtins.open", new_callable=MagicMock) 134 | @patch("builtins.print") 135 | def test_log_level_debug( 136 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 137 | ): 138 | monkeypatch.setenv("LOG_LEVEL", "DEBUG") 139 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "DEBUG") 140 | 141 | logger = Logger() 142 | logger.dest_location = tmp_path 143 | logger.report_file = "mock_report_file.txt" 144 | 145 | # Call log_this 146 | logger.log_this("mock_trace", LogLevel.TRACE) 147 | logger.log_this("mock_debug", LogLevel.DEBUG) 148 | logger.log_this("mock_info", LogLevel.INFO) 149 | logger.log_this("mock_warn", LogLevel.WARN) 150 | logger.log_this("mock_error", LogLevel.ERROR) 151 | 152 | assert mock_print.call_count == 4 153 | assert mock_open.call_count == 4 154 | 155 | assert mock_print.call_args_list[0][0][0] == "DEBUG: mock_debug" 156 | assert mock_print.call_args_list[1][0][0] == "INFO: mock_info" 157 | assert mock_print.call_args_list[2][0][0] == "WARN: mock_warn" 158 | assert mock_print.call_args_list[3][0][0] == "ERROR: mock_error" 159 | 160 | @patch("builtins.open", new_callable=MagicMock) 161 | @patch("builtins.print") 162 | def test_log_level_info( 163 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 164 | ): 165 | monkeypatch.setenv("LOG_LEVEL", "INFO") 166 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "INFO") 167 | 168 | logger = Logger() 169 | logger.dest_location = tmp_path 170 | logger.report_file = "mock_report_file.txt" 171 | 172 | # Call log_this 173 | logger.log_this("mock_trace", LogLevel.TRACE) 174 | logger.log_this("mock_debug", LogLevel.DEBUG) 175 | logger.log_this("mock_info", LogLevel.INFO) 176 | logger.log_this("mock_warn", LogLevel.WARN) 177 | logger.log_this("mock_error", LogLevel.ERROR) 178 | 179 | assert mock_print.call_count == 3 180 | assert mock_open.call_count == 3 181 | 182 | assert mock_print.call_args_list[0][0][0] == "INFO: mock_info" 183 | assert mock_print.call_args_list[1][0][0] == "WARN: mock_warn" 184 | assert mock_print.call_args_list[2][0][0] == "ERROR: mock_error" 185 | 186 | @patch("builtins.open", new_callable=MagicMock) 187 | @patch("builtins.print") 188 | def test_log_level_warn( 189 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 190 | ): 191 | monkeypatch.setenv("LOG_LEVEL", "WARN") 192 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "WARN") 193 | 194 | logger = Logger() 195 | logger.dest_location = tmp_path 196 | logger.report_file = "mock_report_file.txt" 197 | 198 | # Call log_this 199 | logger.log_this("mock_trace", LogLevel.TRACE) 200 | logger.log_this("mock_debug", LogLevel.DEBUG) 201 | logger.log_this("mock_info", LogLevel.INFO) 202 | logger.log_this("mock_warn", LogLevel.WARN) 203 | logger.log_this("mock_error", LogLevel.ERROR) 204 | 205 | assert mock_print.call_count == 2 206 | assert mock_open.call_count == 2 207 | 208 | assert mock_print.call_args_list[0][0][0] == "WARN: mock_warn" 209 | assert mock_print.call_args_list[1][0][0] == "ERROR: mock_error" 210 | 211 | @patch("builtins.open", new_callable=MagicMock) 212 | @patch("builtins.print") 213 | def test_log_level_error( 214 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 215 | ): 216 | monkeypatch.setenv("LOG_LEVEL", "ERROR") 217 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "ERROR") 218 | 219 | logger = Logger() 220 | logger.dest_location = tmp_path 221 | logger.report_file = "mock_report_file.txt" 222 | 223 | # Call log_this 224 | logger.log_this("mock_trace", LogLevel.TRACE) 225 | logger.log_this("mock_debug", LogLevel.DEBUG) 226 | logger.log_this("mock_info", LogLevel.INFO) 227 | logger.log_this("mock_warn", LogLevel.WARN) 228 | logger.log_this("mock_error", LogLevel.ERROR) 229 | 230 | assert mock_print.call_count == 1 231 | assert mock_open.call_count == 1 232 | 233 | assert mock_print.call_args_list[0][0][0] == "ERROR: mock_error" 234 | 235 | @patch("builtins.open", new_callable=MagicMock) 236 | @patch("builtins.print") 237 | def test_differnt_log_levels( 238 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 239 | ): 240 | monkeypatch.setenv("LOG_LEVEL", "DEBUG") 241 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "ERROR") 242 | 243 | logger = Logger() 244 | logger.dest_location = tmp_path 245 | logger.report_file = "mock_report_file.txt" 246 | 247 | # Call log_this 248 | logger.log_this("mock_trace", LogLevel.TRACE) 249 | logger.log_this("mock_debug", LogLevel.DEBUG) 250 | logger.log_this("mock_info", LogLevel.INFO) 251 | logger.log_this("mock_warn", LogLevel.WARN) 252 | logger.log_this("mock_error", LogLevel.ERROR) 253 | 254 | assert mock_print.call_count == 4 255 | assert mock_open.call_count == 1 256 | 257 | @patch("builtins.open", new_callable=MagicMock) 258 | @patch("builtins.print") 259 | def test_differnt_log_levels2( 260 | self, mock_print: MagicMock, mock_open: MagicMock, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 261 | ): 262 | monkeypatch.setenv("LOG_LEVEL", "WARN") 263 | monkeypatch.setenv("REPORT_FILE_LOG_LEVEL", "INFO") 264 | 265 | logger = Logger() 266 | logger.dest_location = tmp_path 267 | logger.report_file = "mock_report_file.txt" 268 | 269 | # Call log_this 270 | logger.log_this("mock_trace", LogLevel.TRACE) 271 | logger.log_this("mock_debug", LogLevel.DEBUG) 272 | logger.log_this("mock_info", LogLevel.INFO) 273 | logger.log_this("mock_warn", LogLevel.WARN) 274 | logger.log_this("mock_error", LogLevel.ERROR) 275 | 276 | assert mock_print.call_count == 2 277 | assert mock_open.call_count == 3 278 | 279 | @patch("builtins.print") 280 | def test_print(self, mock_print: MagicMock, tmp_path: Path): 281 | logger = Logger() 282 | logger.dest_location = tmp_path 283 | logger.report_file = "mock_report_file.txt" 284 | 285 | # Call log_this 286 | logger.log_this("mock_message", LogLevel.WARN) 287 | 288 | assert mock_print.call_args[0][0] == "WARN: mock_message" 289 | -------------------------------------------------------------------------------- /pytest/test_nautical_env.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from pathlib import Path 4 | from mock import PropertyMock, mock, MagicMock, patch 5 | from pathlib import Path 6 | import docker 7 | from docker.models.containers import Container 8 | from itertools import cycle 9 | 10 | from app.nautical_env import NauticalEnv 11 | from app.backup import NauticalBackup 12 | 13 | 14 | class TestNauticalEnv: 15 | @classmethod 16 | def setup_class(cls): 17 | """Runs 1 time before all tests in this class""" 18 | pass 19 | 20 | def test_populate_override_dirs(self, monkeypatch: pytest.MonkeyPatch): 21 | monkeypatch.setenv("OVERRIDE_SOURCE_DIR", "example1:example1-new-source-data,ctr2:ctr2-new-source") 22 | monkeypatch.setenv("OVERRIDE_DEST_DIR", "example3:example3-new-deste-data,ctr4:ctr4-new-dest") 23 | nautical_env = NauticalEnv() 24 | 25 | assert nautical_env.OVERRIDE_SOURCE_DIR == {"example1": "example1-new-source-data", "ctr2": "ctr2-new-source"} 26 | 27 | assert "example1" in nautical_env.OVERRIDE_SOURCE_DIR 28 | assert "fake" not in nautical_env.OVERRIDE_SOURCE_DIR 29 | 30 | assert nautical_env.OVERRIDE_DEST_DIR == {"example3": "example3-new-deste-data", "ctr4": "ctr4-new-dest"} 31 | 32 | assert "example3" in nautical_env.OVERRIDE_DEST_DIR 33 | assert "fake" not in nautical_env.OVERRIDE_DEST_DIR 34 | -------------------------------------------------------------------------------- /pytest/test_rsync.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pathlib import Path 3 | from mock import PropertyMock, mock, MagicMock, patch 4 | from pathlib import Path 5 | 6 | from app.nautical_env import NauticalEnv 7 | 8 | 9 | # class TestRsync: 10 | # def test_rsync_commands(self, monkeypatch: pytest.MonkeyPatch): 11 | 12 | # # Define the source location 13 | # monkeypatch.setenv("DEST_LOCATION", "./tests/destination") 14 | # monkeypatch.setenv("SOURCE_LOCATION", "./tests/source") 15 | 16 | # env = NauticalEnv() 17 | # SOURCE_LOCATION = env.SOURCE_LOCATION 18 | 19 | # # Create directories and files 20 | # Path(SOURCE_LOCATION, "container1").mkdir(parents=True, exist_ok=True) 21 | # Path(SOURCE_LOCATION, "container1", "test.txt").touch() 22 | 23 | # Path(SOURCE_LOCATION, "container2").mkdir(parents=True, exist_ok=True) 24 | # Path(SOURCE_LOCATION, "container1", "test.txt").touch() 25 | -------------------------------------------------------------------------------- /pytest/test_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from pathlib import Path 4 | from mock import mock, MagicMock, patch 5 | import datetime 6 | 7 | from app.api.utils import next_cron_occurrences 8 | 9 | 10 | class TestUtils: 11 | @classmethod 12 | def setup_class(cls): 13 | """ 14 | Runs 1 time before all tests in this class 15 | """ 16 | pass 17 | 18 | def test_next_cron_occurrences( 19 | self, 20 | monkeypatch: pytest.MonkeyPatch, 21 | ): 22 | faked_now = datetime.datetime(2022, 1, 1, 14, 0, 0) 23 | monkeypatch.setenv("TZ", "Etc/UTC") 24 | assert next_cron_occurrences(1, faked_now) == { 25 | "cron": "0 4 * * *", 26 | "tz": "Etc/UTC", 27 | "1": ["Sunday, January 02, 2022 at 04:00 AM", "01/02/22 04:00"], 28 | } 29 | 30 | monkeypatch.setenv("CRON_SCHEDULE", "0 4 * * *") 31 | monkeypatch.setenv("TZ", "Etc/UTC") 32 | assert next_cron_occurrences(1, faked_now) == { 33 | "cron": "0 4 * * *", 34 | "tz": "Etc/UTC", 35 | "1": ["Sunday, January 02, 2022 at 04:00 AM", "01/02/22 04:00"], 36 | } 37 | 38 | faked_now = datetime.datetime(2023, 11, 1, 14, 0, 0) 39 | monkeypatch.setenv("CRON_SCHEDULE", "0 8 * * *") 40 | monkeypatch.setenv("TZ", "America/Phoenix") 41 | assert next_cron_occurrences(2, faked_now) == { 42 | "cron": "0 8 * * *", 43 | "tz": "America/Phoenix", 44 | "1": ["Thursday, November 02, 2023 at 08:00 AM", "11/02/23 08:00"], 45 | "2": ["Friday, November 03, 2023 at 08:00 AM", "11/03/23 08:00"], 46 | } 47 | 48 | assert next_cron_occurrences(-10, faked_now) == { 49 | "cron": "0 8 * * *", 50 | "tz": "America/Phoenix", 51 | "1": ["Thursday, November 02, 2023 at 08:00 AM", "11/02/23 08:00"], 52 | } 53 | 54 | def test_next_cron_occurrences_disabled( 55 | self, 56 | monkeypatch: pytest.MonkeyPatch, 57 | ): 58 | monkeypatch.setenv("CRON_SCHEDULE_ENABLED", "false") 59 | faked_now = datetime.datetime(2022, 1, 1, 14, 0, 0) 60 | assert next_cron_occurrences(1, faked_now) == None 61 | 62 | def test_next_cron_occurrences_with_bad_value( 63 | self, 64 | monkeypatch: pytest.MonkeyPatch, 65 | ): 66 | monkeypatch.setenv("CRON_SCHEDULE_ENABLED", "oogabooga") 67 | faked_now = datetime.datetime(2022, 1, 1, 14, 0, 0) 68 | res = next_cron_occurrences(1, faked_now) 69 | assert res and res["cron"] == "0 4 * * *" 70 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Install everything in the requirements.txt file 2 | -r requirements.txt 3 | 4 | # Tests 5 | pytest==8.3.5 6 | pytest-cov==6.1.1 7 | black==25.1.0 8 | httpx==0.28.1 9 | pytest-testdox==3.1.0 10 | mock==5.2.0 11 | pre-commit==4.2.0 12 | 13 | # Docs 14 | mkdocs-material==9.6.14 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.115.12 2 | pydantic==2.11.5 3 | uvicorn==0.34.2 4 | croniter==6.0.0 5 | pytz==2025.2 6 | docker==7.1.0 7 | pydantic-settings==2.9.1 8 | -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/dependencies.d/init-env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/dependencies.d/init-env -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/dependencies.d/init-nautical: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/dependencies.d/init-nautical -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | # This should only be used for Unit tests 5 | if [ "$TEST_MODE" != "-1" ]; then 6 | echo "--- RUNNING IN TEST MODE ("$TEST_MODE") ---" 7 | 8 | # Set exit code to 0, will be overwritten for each test. Tests are only run 1 at at time 9 | exit_code=0 10 | 11 | # Run the tests and capture their exit code 12 | if [ "$TEST_MODE" == "1" ]; then 13 | with-contenv bash /tests/_integration_tests.sh test1 14 | exit_code=$? 15 | elif [ "$TEST_MODE" == "2" ]; then 16 | with-contenv bash /tests/_integration_tests.sh test2 17 | exit_code=$? 18 | elif [ "$TEST_MODE" == "3" ]; then 19 | with-contenv bash /tests/_integration_tests.sh test3 20 | exit_code=$? 21 | 22 | bash /tests/_fix_coverage_paths.sh 23 | elif [ "$TEST_MODE" == "4" ]; then 24 | cd /tests # The .simplecov must be detected in the directory from where the bashcov command is run from 25 | rm -rf /coverage/* # Remove the coverage (if it exists) 26 | # with-contenv bashcov /tests/_tests.sh 27 | # TODO: Add python integration tests 28 | exit_code=$? 29 | else 30 | echo "UNKNOWN TEST MODE: ${TEST_MODE}" 31 | fi 32 | 33 | # Tell S6 which exit code to use when the container exits 34 | echo "$exit_code" >/run/s6-linux-init-container-results/exitcode 35 | 36 | echo "Shutting down container since tests completed. EXIT CODE: ${exit_code}" 37 | 38 | kill -SIGTERM 1 # Quit the container 39 | 40 | else 41 | if [ "$LOG_LEVEL" == "TRACE" ]; then 42 | echo "TRACE: TEST_MODE: ${TEST_MODE}" 43 | fi 44 | fi 45 | -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/type: -------------------------------------------------------------------------------- 1 | oneshot -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/ci-test/up: -------------------------------------------------------------------------------- 1 | with-contenv bash /etc/s6-overlay/s6-rc.d/ci-test/run.sh -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/dependencies.d/init-env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/dependencies.d/init-env -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/dependencies.d/init-nautical: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/dependencies.d/init-nautical -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | source /app/logger.sh # Use the logger script 4 | 5 | if [ "$BACKUP_ON_START" = "true" ]; then 6 | logThis "Starting backup since BACKUP_ON_START is true" "INFO" "init" 7 | logThis "Note - BACKUP_ON_START logs are not available until all containers are processed, however the report file updates in real-time." "INFO" "init" 8 | # The backup script must be run from the root directory 9 | cd / 10 | # python3 /app/backup.py 11 | nautical 12 | fi 13 | 14 | # S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 must be set in the container enviornmet since the backup could take over 5 seconds -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/timeout-up: -------------------------------------------------------------------------------- 1 | 0 -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/type: -------------------------------------------------------------------------------- 1 | oneshot -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-backup-on-start/up: -------------------------------------------------------------------------------- 1 | with-contenv bash /etc/s6-overlay/s6-rc.d/init-backup-on-start/run.sh -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-env/type: -------------------------------------------------------------------------------- 1 | oneshot -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-env/up: -------------------------------------------------------------------------------- 1 | with-contenv bash /app/env.sh -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-nautical/dependencies.d/init-env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/init-nautical/dependencies.d/init-env -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-nautical/type: -------------------------------------------------------------------------------- 1 | oneshot -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/init-nautical/up: -------------------------------------------------------------------------------- 1 | with-contenv bash /app/entry.sh -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-env -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-nautical: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-nautical -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-cron/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | source /app/logger.sh # Use the logger script 5 | 6 | logThis "Initialization complete. Awaiting CRON schedule: $CRON_SCHEDULE" "INFO" "init" 7 | 8 | # Start cron and keep container running 9 | exec /usr/sbin/crond -f -l 8 -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-cron/type: -------------------------------------------------------------------------------- 1 | longrun -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-http-api/dependencies.d/init-env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/svc-http-api/dependencies.d/init-env -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-http-api/dependencies.d/init-nautical: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/svc-http-api/dependencies.d/init-nautical -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-http-api/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | source /app/logger.sh # Use the logger script 5 | 6 | 7 | if [ "$HTTP_REST_API_ENABLED" == "true" ]; then 8 | logThis "API listening on port 8069..." "INFO" "init" 9 | fi 10 | 11 | 12 | logThis "Running: exec with-contenv python3 -m uvicorn app.api.main:app --host 0.0.0.0 --port 8069 --lifespan on --use-colors --log-level 'warning'" "TRACE" "init" 13 | 14 | cd / # Do not leave this out. This next line must be run from the root directory 15 | exec with-contenv python3 -m uvicorn app.api.main:app --host 0.0.0.0 --port 8069 --lifespan on --use-colors --log-level 'warning' 16 | #-log-level Options: 'critical', 'error', 'warning', 'info', 'debug', 'trace'. Default: 'info'. 17 | -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/svc-http-api/type: -------------------------------------------------------------------------------- 1 | longrun -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/ci-test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/ci-test -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/init-backup-on-start: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/init-backup-on-start -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/init-env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/init-env -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/init-nautical: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/init-nautical -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron -------------------------------------------------------------------------------- /s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/svc-http-api: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Minituff/nautical-backup/eb7e0d152be967b2695256cddee30a38bafe52e5/s6-overlay/etc/s6-overlay/s6-rc.d/user/contents.d/svc-http-api -------------------------------------------------------------------------------- /snippets/docker-compose-example-no-tooltips.yml: -------------------------------------------------------------------------------- 1 | services: 2 | nautical-backup: 3 | image: minituff/nautical-backup:2.13 4 | container_name: nautical-backup 5 | volumes: 6 | - /var/run/docker.sock:/var/run/docker.sock 7 | - /config:/config 8 | - /source:/app/source 9 | - /destination:/app/destination 10 | environment: 11 | - TZ=America/Los_Angeles 12 | - CRON_SCHEDULE=0 4 * * * 13 | - SKIP_CONTAINERS=example1,example2,example3 -------------------------------------------------------------------------------- /snippets/docker-compose-example.yml: -------------------------------------------------------------------------------- 1 | services: 2 | nautical-backup: 3 | image: minituff/nautical-backup:2.13 #(7)! 4 | container_name: nautical-backup 5 | volumes: 6 | - /var/run/docker.sock:/var/run/docker.sock #(1)! 7 | - /config:/config #(9)! 8 | - /source:/app/source #(2)! 9 | - /destination:/app/destination #(3)! 10 | environment: # Optional variables (4) 11 | - TZ=America/Los_Angeles #(8)! 12 | - CRON_SCHEDULE=0 4 * * * #(5)! 13 | - SKIP_CONTAINERS=example1,example2,example3 #(6)! -------------------------------------------------------------------------------- /snippets/docker-compose-semver-example.yml: -------------------------------------------------------------------------------- 1 | services: 2 | nautical-backup: 3 | image: minituff/nautical-backup:2.13.1 # Change this to the latest version 4 | -------------------------------------------------------------------------------- /snippets/docker-compose-semver-major-example.yml: -------------------------------------------------------------------------------- 1 | services: 2 | nautical-backup: 3 | image: minituff/nautical-backup:2 -------------------------------------------------------------------------------- /snippets/docker-example-tooltips.md: -------------------------------------------------------------------------------- 1 | 1. Mount the docker socket. Used to start and stop containers. See the [Docker Socket Proxy page](https://minituff.github.io/nautical-backup/docker-socket-proxy) for more information. 2 | 2. Mount the `source` directory. 3 | 3. Mount the `destination` directory. 4 | 4. *TIP*: Avoid using "quotes" in the enviornment variables. 5 | 5. Scheduled time to run backups. Use [this website](https://crontab.guru) to help pick a CRON schedule. 6 | * Default = `0 4 * * *` - Every day at 4am. 7 | 6. Containers to skip for backup. A comma seperated list. 8 | 7. It is recommended to avoid using the `latest` tag. 9 | * This project is under active development, using a exact tag can help avoid updates breaking things. 10 | 8. Set the time-zone. See this [Wikipedia page](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) for a list of available time-zones. 11 | 9. Configuration folder. This directory will will Nautical's internal database which stores metrics and history. -------------------------------------------------------------------------------- /snippets/docker-run-example-no-tooltips.sh: -------------------------------------------------------------------------------- 1 | 2 | docker run -d \ 3 | --name nautical-backup \ 4 | -v /var/run/docker.sock:/var/run/docker.sock \ 5 | -v /config:/config \ 6 | -v /source:/app/source \ 7 | -v /destination:/app/destination \ 8 | -e TZ="America/Los_Angeles" \ 9 | -e CRON_SCHEDULE="0 4 * * *" \ 10 | -e SKIP_CONTAINERS="example1,example2,example3" \ 11 | minituff/nautical-backup:2.13 -------------------------------------------------------------------------------- /snippets/docker-run-example.sh: -------------------------------------------------------------------------------- 1 | 2 | docker run -d \ 3 | --name nautical-backup \ 4 | -v /var/run/docker.sock:/var/run/docker.sock \ #(1)! 5 | -v /config:/config \ #(9)! 6 | -v /source:/app/source \ #(2)! 7 | -v /destination:/app/destination \ #(3)! 8 | -e TZ="America/Los_Angeles" \ #(8)! 9 | -e CRON_SCHEDULE="0 4 * * *" \ #(5)! 10 | -e SKIP_CONTAINERS="example1,example2,example3" \ #(6)! 11 | minituff/nautical-backup:2.13 #(7)! -------------------------------------------------------------------------------- /snippets/docker-run-semver-example.sh: -------------------------------------------------------------------------------- 1 | minituff/nautical-backup:2.13.1 -------------------------------------------------------------------------------- /snippets/docker-run-semver-major-example.sh: -------------------------------------------------------------------------------- 1 | minituff/nautical-backup:2 -------------------------------------------------------------------------------- /snippets/docker-socket-proxy.yml: -------------------------------------------------------------------------------- 1 | docker_socket_proxy: 2 | image: tecnativa/docker-socket-proxy 3 | container_name: docker_socket_proxy 4 | ports: 5 | - 2375:2375 6 | volumes: 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | environment: 9 | # Enable the API access to the following sections of the Docker API 10 | - CONTAINERS=1 11 | - IMAGES=1 12 | - ALLOW_START=1 13 | - ALLOW_STOP=1 14 | - EXEC=1 15 | - VOLUMES=1 -------------------------------------------------------------------------------- /snippets/exec_request_example.md: -------------------------------------------------------------------------------- 1 | ??? example "Test your `exec`" 2 | Before setting the variable/label, it is a good idea to ensure it works first. Here is an example. 3 | 4 | Ensure Nautical is running first, then run: 5 | ```bash 6 | docker exec -it nautical-backup \ 7 | curl -X GET 'google.com' 8 | ``` 9 | **Note:** You can only have 1 *before* and 1 *after* Curl Request. This applies to Nautical itself, not to each container. 10 | 11 | ??? quote "Available Enviornment Variables" 12 | 13 | | Method | Description | 14 | |:-------------------------------------|:----------------------------------------------------------------------------------------| 15 | | `NB_EXEC_CONTAINER_NAME` | The container name* | 16 | | `NB_EXEC_CONTAINER_ID` | The contianer ID* | 17 | | `NB_EXEC_BEFORE_DURING_OR_AFTER` | When is this command being. [Options](./arguments.md#when-to-backup-additional-folders) | 18 | | `NB_EXEC_COMMAND` | The exact command exectuted | 19 | | `NB_EXEC_ATTACHED_TO_CONTAINER` | Is this exec command attached to a container | 20 | | | | 21 | | `NB_EXEC_TOTAL_ERRORS` | The total errors on the last run+ | 22 | | `NB_EXEC_TOTAL_CONTAINERS_COMPLETED` | The amount of containers processed successfully+ | 23 | | `NB_EXEC_TOTAL_CONTAINERS_SKIPPED` | The amount of containers skipped (for any reason)+ | 24 | | `NB_EXEC_TOTAL_NUMBER_OF_CONTAINERS` | The amount of containers Nautical looked at+ | 25 | 26 | * Require access to a container. Eg. When `NB_EXEC_ATTACHED_TO_CONTAINER=true` 27 | 28 | + Must be used `AFTER` so there are values to fill. Eg. When `nautical-backup.exec.after` 29 | 30 | 💰 **Tip:** To use the enviornment variables in a docker-compose file, you will need to escape them with a double `$`: 31 | ```yaml 32 | labels: 33 | - "nautical-backup.exec.before=echo name: $$NB_EXEC_CONTAINER_NAME" # (1)! 34 | ``` 35 | 36 | 1. Notice the double `$$` 37 | 38 | 🛎️ Want any additional enviornment variables? Submit an [issue](https://github.com/Minituff/nautical-backup/issues/new). 39 | 40 | 41 | ??? abstract "Executing a script" 42 | If you need to run more than a simple one-liner, we can run an entire script instead. 43 | Here is a basic example: 44 | 45 | Create a file (we will name it `script.sh`) and place it in the mounted `/config` directory. 46 | 47 | **Remember:** We mounted the `/config` folder as part of the [Installation](./installation.md). 48 | 49 | ```bash 50 | #!/usr/bin/env bash 51 | 52 | echo "Hello from script.sh" 53 | 54 | # Variable usage example 55 | echo "NB_EXEC_CONTAINER_NAME: $NB_EXEC_CONTAINER_NAME" 56 | echo "NB_EXEC_CONTAINER_ID: $NB_EXEC_CONTAINER_ID" 57 | ``` 58 | 59 | Give the file execution permission: `chmod +x /config/script.sh` 60 | 61 | **Test the script** 62 | 63 | Ensure Nautical is running first, then run: 64 | ```bash 65 | docker exec -it nautical-backup \ 66 | /bin/bash /config/script.sh 67 | ``` 68 | 69 | -------------------------------------------------------------------------------- /tests/.simplecov: -------------------------------------------------------------------------------- 1 | require 'simplecov' 2 | require 'simplecov-cobertura' 3 | require "simplecov-html" 4 | 5 | # frozen_string_literal: true 6 | 7 | # SimpleCov.formatter = SimpleCov::Formatter::CoberturaFormatter # Converts the `.resultset.json` to `coverage.xml` 8 | 9 | # SimpleCov::Formatter::JSONFormatter, # This formatter breaks the build 10 | 11 | # Use multiple formatters 12 | SimpleCov.formatters = SimpleCov::Formatter::MultiFormatter.new([ 13 | SimpleCov::Formatter::SimpleFormatter, 14 | SimpleCov::Formatter::CoberturaFormatter, 15 | SimpleCov::Formatter::HTMLFormatter, 16 | ]) 17 | 18 | 19 | # .simplecov 20 | SimpleCov.profiles.define 'bashcov' do 21 | filters.clear # This will remove the :root_filter and :bundler_filter that come via simplecov's defaults 22 | load_profile 'rails' 23 | command_name 'Unit Tests' 24 | enable_coverage :branch 25 | primary_coverage :branch 26 | 27 | # Remove any .sh files that start with "_" 28 | add_filter %r{^/_.*.sh} 29 | 30 | # These are not run from the unit test suite 31 | add_filter %r{^/.*utils.sh} 32 | 33 | # simplecov 0.22.0+ 34 | enable_coverage_for_eval if respond_to? :enable_coverage_for_eval 35 | end 36 | 37 | # Conditional loading of profiles 38 | if ENV.key? 'SKIP_PROFILE' 39 | puts "Skipping bashcov profile..." 40 | else 41 | puts "--- Loading bashcov configurations ---" 42 | SimpleCov.load_profile 'bashcov' 43 | end -------------------------------------------------------------------------------- /tests/_fix_coverage_paths.sh: -------------------------------------------------------------------------------- 1 | # This file is necessary since the bashcov will not run at the root of the container. 2 | # The paths in the coverage.xml file must match the git paths otherwise it will be invalid. 3 | 4 | echo "Fixing coverage paths..." 5 | 6 | # Define the path to your coverage.xml file 7 | COVERAGE_FILE="$PWD/coverage/coverage.xml" 8 | 9 | # Define the original and desired path prefixes 10 | ORIGINAL_PATH_PREFIX="../app/" 11 | DESIRED_PATH_PREFIX="pkg/" 12 | 13 | cat "$COVERAGE_FILE" | grep filename 14 | 15 | # Use sed to replace the path in the filename attribute 16 | sed -i "s|filename=\"$ORIGINAL_PATH_PREFIX|filename=\"$DESIRED_PATH_PREFIX|g" "$COVERAGE_FILE" 17 | 18 | echo "Fixed coverage paths!" 19 | cat "$COVERAGE_FILE" | grep filename 20 | -------------------------------------------------------------------------------- /tests/_integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | # These are run if TEST_MODE=1 and TEST_MODE=2 4 | 5 | # Change this as alpine updates 6 | ALPINE_VERSION="3.18" 7 | 8 | test_docker() { 9 | EXPECTED_OUTPUT="/usr/local/bin/docker" 10 | ACTUAL_OUTPUT=$(which docker) 11 | 12 | # Compare the actual output to the expected output 13 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 14 | echo "PASS: 'which docker' returns $EXPECTED_OUTPUT" 15 | else 16 | echo "FAIL: Output does not match expected output." 17 | echo "Expected: $EXPECTED_OUTPUT" 18 | echo "Got: $ACTUAL_OUTPUT" 19 | exit 1 20 | fi 21 | 22 | # Use 'docker --version' to check if it returns something 23 | if [[ $(docker --version) ]]; then 24 | echo "PASS: 'docker --version' returns a value." 25 | else 26 | echo "FAIL: 'docker --version' did not return a value." 27 | exit 1 28 | fi 29 | 30 | # Use 'docker ps' to check if it returns something 31 | if [[ $(docker ps) ]]; then 32 | echo "PASS: 'docker ps' returns a value." 33 | else 34 | echo "FAIL: 'docker ps' did not return a value." 35 | exit 1 36 | fi 37 | } 38 | 39 | test_cron() { 40 | # Expected output 41 | EXPECTED_OUTPUT="$CRON_SCHEDULE with-contenv nautical" 42 | 43 | # Run the command and capture its output 44 | ACTUAL_OUTPUT=$(crontab -l | grep contenv) 45 | 46 | if [ "$ACTUAL_OUTPUT" != "$EXPECTED_OUTPUT" ]; then 47 | echo "FAIL: CRON output does not match expected output." 48 | echo "Expected: $EXPECTED_OUTPUT" 49 | echo "Got: $ACTUAL_OUTPUT" 50 | exit 1 51 | fi 52 | 53 | # Compare the actual output to the expected output 54 | if [ "$ACTUAL_OUTPUT" != "$EXPECTED_OUTPUT" ]; then 55 | echo "FAIL: CRON output does not match expected output." 56 | echo "Expected: $EXPECTED_OUTPUT" 57 | echo "Got: $ACTUAL_OUTPUT" 58 | exit 1 59 | fi 60 | 61 | echo "PASS: 'crontab -l | grep' bash returns $EXPECTED_OUTPUT" 62 | } 63 | 64 | test_bash() { 65 | EXPECTED_OUTPUT="/bin/bash" 66 | ACTUAL_OUTPUT=$(which bash) 67 | 68 | # Compare the actual output to the expected output 69 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 70 | echo "PASS: 'which bash' returns $EXPECTED_OUTPUT" 71 | else 72 | echo "FAIL: Bash does not match expected output." 73 | echo "Expected: $EXPECTED_OUTPUT" 74 | echo "Got: $ACTUAL_OUTPUT" 75 | exit 1 76 | fi 77 | 78 | # Use 'bash --version' to check if it returns something 79 | if [[ $(bash --version) ]]; then 80 | echo "PASS: 'bash --version' returns a value." 81 | else 82 | echo "FAIL: 'bash --version' did not return a value." 83 | exit 1 84 | fi 85 | } 86 | 87 | test_rsync() { 88 | EXPECTED_OUTPUT="/usr/bin/rsync" 89 | ACTUAL_OUTPUT=$(which rsync) 90 | 91 | # Compare the actual output to the expected output 92 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 93 | echo "PASS: 'which rsync' returns $EXPECTED_OUTPUT" 94 | else 95 | echo "FAIL: Rsync does not match expected output." 96 | echo "Expected: $EXPECTED_OUTPUT" 97 | echo "Got: $ACTUAL_OUTPUT" 98 | exit 1 99 | fi 100 | 101 | # Use 'rsync --version' to check if it returns something 102 | if [[ $(rsync --version) ]]; then 103 | echo "PASS: 'rsync --version' returns a value." 104 | else 105 | echo "FAIL: 'rsync --version' did not return a value." 106 | exit 1 107 | fi 108 | } 109 | 110 | test_jq() { 111 | EXPECTED_OUTPUT="/usr/bin/jq" 112 | ACTUAL_OUTPUT=$(which jq) 113 | 114 | # Compare the actual output to the expected output 115 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 116 | echo "PASS: 'which jq' returns $EXPECTED_OUTPUT" 117 | else 118 | echo "FAIL: Jq does not match expected output." 119 | echo "Expected: $EXPECTED_OUTPUT" 120 | echo "Got: $ACTUAL_OUTPUT" 121 | exit 1 122 | fi 123 | 124 | # Use 'jq --help' to check if it returns something 125 | if [[ $(jq --help) ]]; then 126 | echo "PASS: 'jq --help' returns a value." 127 | else 128 | echo "FAIL: 'jq --help' did not return a value." 129 | exit 1 130 | fi 131 | } 132 | 133 | test_curl() { 134 | EXPECTED_OUTPUT="/usr/bin/curl" 135 | ACTUAL_OUTPUT=$(which curl) 136 | 137 | # Compare the actual output to the expected output 138 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 139 | echo "PASS: 'which curl' returns $EXPECTED_OUTPUT" 140 | else 141 | echo "FAIL: Curl does not match expected output." 142 | echo "Expected: $EXPECTED_OUTPUT" 143 | echo "Got: $ACTUAL_OUTPUT" 144 | exit 1 145 | fi 146 | 147 | # Use 'curl --version' to check if it returns something 148 | if [[ $(curl --version) ]]; then 149 | echo "PASS: 'curl --version' returns a value." 150 | else 151 | echo "FAIL: 'curl --version' did not return a value." 152 | exit 1 153 | fi 154 | } 155 | 156 | test_timeout() { 157 | EXPECTED_OUTPUT="/usr/bin/timeout" 158 | ACTUAL_OUTPUT=$(which timeout) 159 | 160 | # Compare the actual output to the expected output 161 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 162 | echo "PASS: 'which timeout' returns $EXPECTED_OUTPUT" 163 | else 164 | echo "FAIL: Timeout does not match expected output." 165 | echo "Expected: $EXPECTED_OUTPUT" 166 | echo "Got: $ACTUAL_OUTPUT" 167 | exit 1 168 | fi 169 | 170 | # Use 'timeout 5s echo "hello"' to check if it returns something 171 | if [[ $(timeout 5s echo "hello") ]]; then 172 | echo "PASS: 'timeout 5s echo "hello"' returns a value." 173 | else 174 | echo "FAIL: 'timeout 5s echo "hello"' did not return a value." 175 | exit 1 176 | fi 177 | } 178 | 179 | test_tz() { 180 | EXPECTED_OUTPUT="America/Phoenix" 181 | ACTUAL_OUTPUT=$(echo $TZ) 182 | 183 | # Compare the actual output to the expected output 184 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 185 | echo "PASS: 'echo \$TZ' returns $EXPECTED_OUTPUT" 186 | else 187 | echo "FAIL: TimzeZone does not match expected output." 188 | echo "Expected: $EXPECTED_OUTPUT" 189 | echo "Got: $ACTUAL_OUTPUT" 190 | exit 1 191 | fi 192 | 193 | ACTUAL_OUTPUT=$(date | grep MST) 194 | # Use 'date | grep MST' to check if it returns something 195 | if [[ $ACTUAL_OUTPUT ]]; then 196 | echo "PASS: 'date | grep MST' returns the correct TZ." 197 | else 198 | echo "FAIL: 'date | grep MST' did notthe correct TZ." 199 | echo "Got: $date" 200 | exit 1 201 | fi 202 | } 203 | 204 | test_alpine_release() { 205 | # Capture the output of the command 206 | local output=$(cat /etc/alpine-release) 207 | 208 | # Check if the output starts with "3.18" 209 | if [[ $output == $ALPINE_VERSION* ]]; then 210 | echo "PASS: Alpine release is correct." 211 | else 212 | echo "FAIL: Alpine release." 213 | echo "Expected:" 214 | echo "$ALPINE_VERSION*" 215 | echo "Actual" 216 | echo "$output" 217 | exit 1 218 | fi 219 | } 220 | 221 | test_python() { 222 | EXPECTED_OUTPUT="/usr/bin/python3" 223 | ACTUAL_OUTPUT=$(which python3) 224 | 225 | # Compare the actual output to the expected output 226 | if [ "$ACTUAL_OUTPUT" == "$EXPECTED_OUTPUT" ]; then 227 | echo "PASS: 'which python' returns $EXPECTED_OUTPUT" 228 | else 229 | echo "FAIL: Python does not match expected output." 230 | echo "Expected: $EXPECTED_OUTPUT" 231 | echo "Got: $ACTUAL_OUTPUT" 232 | exit 1 233 | fi 234 | 235 | # Use 'python --version' to check if it returns something 236 | if [[ $(python3 --version) ]]; then 237 | echo "PASS: 'python3 --version' returns a value." 238 | else 239 | echo "FAIL: 'python3 --version' did not return a value." 240 | exit 1 241 | fi 242 | } 243 | 244 | 245 | test_self_container_id() { 246 | if [[ $(echo $SELF_CONTAINER_ID) ]]; then 247 | echo "PASS: 'SELF_CONTAINER_ID' returns a value." 248 | else 249 | echo "FAIL: 'SELF_CONTAINER_ID' did not return a value." 250 | exit 1 251 | fi 252 | } 253 | 254 | # Function to test if environment variables have expected values 255 | test_env_vars() { 256 | local -n env_vars_to_test=$1 # Use nameref to pass associative array by reference 257 | local test_passed=true 258 | 259 | for var in "${!env_vars_to_test[@]}"; do 260 | if [ "${!var}" != "${env_vars_to_test[$var]}" ]; then 261 | echo "FAIL: '$var' expected value '${env_vars_to_test[$var]}', got '${!var}'." 262 | test_passed=false 263 | else 264 | echo "PASS: '$var' has expected value '${env_vars_to_test[$var]}'." 265 | fi 266 | done 267 | 268 | if [ "$test_passed" = true ]; then 269 | echo "All environment variables have expected values." 270 | else 271 | echo "Some environment variables do not have expected values." 272 | exit 1 273 | fi 274 | } 275 | 276 | # Declare an associative array with environment variable names and expected values 277 | declare -A expected_env_vars=( 278 | ["TZ"]="Etc/UTC" 279 | ["TEST_MODE"]="2" # This not actually the default, but the mode that checks this value is #2 280 | ["CRON_SCHEDULE"]="0 4 * * *" 281 | ["CRON_SCHEDULE_ENABLED"]="true" 282 | ["REPORT_FILE"]="true" 283 | ["BACKUP_ON_START"]="false" 284 | ["USE_DEFAULT_RSYNC_ARGS"]="true" 285 | ["REQUIRE_LABEL"]="false" 286 | ["LOG_LEVEL"]="INFO" 287 | ["REPORT_FILE_LOG_LEVEL"]="INFO" 288 | ["REPORT_FILE_ON_BACKUP_ONLY"]="true" 289 | ["KEEP_SRC_DIR_NAME"]="true" 290 | ["EXIT_AFTER_INIT"]="false" 291 | ["LOG_RSYNC_COMMANDS"]="false" 292 | ["RUN_ONCE"]="false" 293 | ["SOURCE_LOCATION"]="/app/source" 294 | ["DEST_LOCATION"]="/app/destination" 295 | ["SKIP_CONTAINERS"]="" 296 | ["SKIP_STOPPING"]="" 297 | ["RSYNC_CUSTOM_ARGS"]="" 298 | ["OVERRIDE_SOURCE_DIR"]="" 299 | ["DEFAULT_OVERRIDE_DEST_DIR"]="" 300 | ["ADDITIONAL_FOLDERS"]="" 301 | ["ADDITIONAL_FOLDERS_WHEN"]="before" 302 | ["PRE_BACKUP_CURL"]="" 303 | ["POST_BACKUP_CURL"]="" 304 | ["HTTP_REST_API_ENABLED"]="true" 305 | ["HTTP_REST_API_USERNAME"]="admin" 306 | ["HTTP_REST_API_PASSWORD"]="password" 307 | 308 | ) 309 | 310 | if [ "$1" == "test1" ]; then 311 | bash /app/entry.sh 312 | 313 | echo "Running integation tests..." 314 | 315 | test_docker 316 | test_cron 317 | test_tz 318 | test_bash 319 | test_rsync 320 | test_jq 321 | test_curl 322 | test_timeout 323 | test_alpine_release 324 | test_python 325 | test_self_container_id 326 | 327 | echo "All tests passed!" 328 | elif [ "$1" == "test2" ]; then 329 | source /app/env.sh 330 | source /app/entry.sh 331 | echo "Testing default enviornment variables..." 332 | test_env_vars expected_env_vars 333 | 334 | elif [ "$1" == "test3" ]; then 335 | # Complete integration tests 336 | exit 0 337 | else 338 | echo "Invalid argument. Use either 'test1' or 'test2'." 339 | exit 1 340 | fi 341 | -------------------------------------------------------------------------------- /tests/_validate_dockerfile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Validating Dockerfile supports both amd64 and arm64 architectures..." 4 | 5 | # Read the Dockerfile and find the FROM line 6 | from_line=$(grep '^FROM ' Dockerfile) 7 | 8 | # Extract the image name and SHA hash 9 | if [[ $from_line =~ FROM[[:space:]]+([^@]+)@([^[:space:]]+) ]]; then 10 | image_name="${BASH_REMATCH[1]}" 11 | sha_from_dockerfile="${BASH_REMATCH[2]}" 12 | full_image_name="$image_name@$sha_from_dockerfile" 13 | 14 | echo "Image Name: '$image_name'" 15 | echo "Full Image Name: '$full_image_name'" 16 | echo "SHA from image: '$sha_from_dockerfile'" 17 | else 18 | echo "FAIL: 'FROM' line with SHA not found in Dockerfile" 19 | fi 20 | 21 | # Run the docker command to get the SHA from mquery 22 | # We use $full_image_name since it will fail on just $image_name if an update is pushed but the dockerfile uses an older SHA 23 | mquery_output=$(docker run --rm mplatform/mquery:latest@sha256:938c26673f9b81f1c574e5911b79c4a9accf6aa918af575c94c9d488121db63c $full_image_name --platforms linux/amd64,linux/arm64) 24 | sha_from_mquery=$(echo "$mquery_output" | grep -oP '(?<=digest: )[^\)]+') 25 | 26 | echo "SHA from mquery: '$sha_from_mquery'" 27 | 28 | # Compare the two SHAs 29 | if [[ "$sha_from_dockerfile" == "$sha_from_mquery" ]]; then 30 | echo "PASS: SHAs match. All is well." 31 | else 32 | echo "FAIL: SHAs do not match. Check your Dockerfile." 33 | exit 1 34 | fi 35 | 36 | # Check for the presence of linux/amd64 and linux/arm64 in the output 37 | if echo "$mquery_output" | grep -q "linux/amd64" && echo "$mquery_output" | grep -q "linux/arm64"; then 38 | echo "PASS: Both linux/amd64 and linux/arm64 are supported." 39 | else 40 | echo "FAIL: One of linux/amd64 or linux/arm64 is not supported." 41 | exit 1 42 | fi -------------------------------------------------------------------------------- /tests/_validate_rsync.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | test_watchtower_backup_file() { 4 | 5 | local expected="This is a test file" 6 | local report_file="test-file.txt" 7 | local dest_location="destination/watchtower-test" 8 | 9 | local file_path="$dest_location/$report_file" 10 | 11 | 12 | if [ ! -f "$file_path" ]; then 13 | echo "'$file_path' not found. Exiting..." 14 | exit 1 15 | fi 16 | 17 | actual=$(tail -n 1 "$file_path") 18 | if [[ ! "$actual" =~ "$expected" ]]; then 19 | echo "Test Failed: Expected message not found in report file." 20 | echo "Actual:" 21 | echo "$actual" 22 | echo "Expected:" 23 | echo "$expected" 24 | exit 1 25 | fi 26 | 27 | echo "PASS: $report_file found in $dest_location" 28 | } 29 | 30 | 31 | test_config_json_file() { 32 | 33 | local report_file="nautical-db.json" 34 | local dest_location="config" 35 | 36 | local file_path="$dest_location/$report_file" 37 | 38 | 39 | if [ ! -f "$file_path" ]; then 40 | echo "'$file_path' not found. Exiting..." 41 | exit 1 42 | fi 43 | 44 | echo "PASS: $report_file found in $dest_location" 45 | } 46 | 47 | test_watchtower_backup_file 48 | test_config_json_file -------------------------------------------------------------------------------- /tests/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # How to run these tests (if not using the 'nb' command) 2 | 3 | # cd tests 4 | # docker compose run nautical-backup-test1 --exit-code-from nautical-backup-test1 5 | # docker compose run nautical-backup-test2 --exit-code-from nautical-backup-test2 6 | # docker compose run nautical-backup-test3 --exit-code-from nautical-backup-test3 7 | 8 | # TODO: The $PWD mounts do not work on Windows. Only in Github runners. Need to find a solution for this. 9 | 10 | services: 11 | # The file that determines which tests to run is located at 's6-overlay/etc/s6-overlay/s6-rc.d/ci-test/run.sh' 12 | nautical-backup-test1: 13 | image: minituff/nautical-test # Use the local image 14 | container_name: nautical-backup-test1 15 | hostname: nautical-backup-test1 16 | volumes: 17 | - /var/run/docker.sock:/var/run/docker.sock 18 | # - ./source:/app/source 19 | # - ./destination:/app/destination 20 | - $PWD:/tests # This file will be run from the /tests directory, therefore it will be /tests:/tests 21 | environment: 22 | - TZ=America/Phoenix # Must not observe DST 23 | - BACKUP_ON_START=true 24 | - EXIT_AFTER_INIT=true 25 | - CRON_SCHEDULE=0 8 * * * 26 | - REPORT_FILE=false 27 | - TEST_MODE=1 28 | - S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 # Required since the tests take so long 29 | - S6_KILL_GRACETIME=100 # How long until S6 kills 30 | labels: 31 | - "nautical-backup.enable=false" 32 | 33 | # The file that determines which tests to run is located at 's6-overlay/etc/s6-overlay/s6-rc.d/ci-test/run.sh' 34 | nautical-backup-test2: 35 | image: minituff/nautical-test # Use the local image 36 | container_name: nautical-backup-test2 37 | hostname: nautical-backup-test2 38 | volumes: 39 | - /var/run/docker.sock:/var/run/docker.sock 40 | # - ./source:/app/source 41 | # - ./destination:/app/destination 42 | - $PWD:/tests # This file will be run from the /tests directory, therefore it will be /tests:/tests 43 | environment: 44 | - TEST_MODE=2 45 | - S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 # Required since the tests take so long 46 | - REPORT_FILE=true 47 | - S6_KILL_GRACETIME=100 # How long until S6 kills 48 | labels: 49 | - "nautical-backup.enable=false" 50 | 51 | nautical-backup-test3: 52 | image: minituff/nautical-test # Use the local image 53 | container_name: nautical-backup-test3 54 | hostname: nautical-backup-test3 55 | volumes: 56 | - /var/run/docker.sock:/var/run/docker.sock 57 | - $PWD/source:/app/source 58 | - $PWD/destination:/app/destination 59 | - $PWD/config:/config 60 | environment: 61 | - S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 # Required since the tests take so long 62 | - S6_KILL_GRACETIME=100 # How long until S6 kills 63 | - BACKUP_ON_START=true 64 | - RUN_ONCE=true 65 | - LOG_LEVEL=TRACE 66 | - REPORT_FILE=false 67 | labels: 68 | - "nautical-backup.enable=false" 69 | 70 | 71 | watchtower: 72 | image: containrrr/watchtower 73 | container_name: watchtower 74 | volumes: 75 | - /var/run/docker.sock:/var/run/docker.sock 76 | environment: 77 | - WATCHTOWER_SCHEDULE=0 0 4 * * * # Every day at 4am 78 | labels: 79 | - "nautical-backup.enable=true" 80 | 81 | # NOT RUN IN CI 82 | nautical-backup-test4: 83 | image: minituff/nautical-test # Use the local image 84 | container_name: nautical-backup-test4 85 | hostname: nautical-backup-test4 86 | volumes: 87 | - /var/run/docker.sock:/var/run/docker.sock 88 | - ${LOCAL_WORKSPACE_FOLDER-./}\tests\source:/app/source 89 | - ${LOCAL_WORKSPACE_FOLDER-./}\tests\destination:/app/destination 90 | - ${LOCAL_WORKSPACE_FOLDER-./}\pkg:/app 91 | - ${LOCAL_WORKSPACE_FOLDER-./}\tests:/tests 92 | ports: 93 | - "8069:8069/tcp" 94 | environment: 95 | - TEST_MODE=4 # Do not run coverage, but run all unit tests 96 | - S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 # Required since the tests take so long 97 | - S6_KILL_GRACETIME=100 # How long until S6 kills 98 | 99 | # NOT RUN IN CI 100 | nautical-backup-test5: 101 | image: minituff/nautical-test # Use the local image 102 | container_name: nautical-backup-test5 103 | volumes: 104 | - /var/run/docker.sock:/var/run/docker.sock 105 | - ${LOCAL_WORKSPACE_FOLDER-./}\dev/source:/app/source 106 | - ${LOCAL_WORKSPACE_FOLDER-./}\dev/destination:/app/destination 107 | - ${LOCAL_WORKSPACE_FOLDER-./}\app:/app 108 | - ${LOCAL_WORKSPACE_FOLDER-./}\dev/config:/config 109 | - ${LOCAL_WORKSPACE_FOLDER-./}\api:/api 110 | - ${LOCAL_WORKSPACE_FOLDER-./}\tests:/tests 111 | ports: 112 | - "8069:8069/tcp" 113 | environment: 114 | # - TEST_MODE=-1 # Do not run coverage, but run all unit tests 115 | - BACKUP_ON_START=true 116 | - S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 # Required since the tests take so long 117 | - S6_KILL_GRACETIME=100 # How long until S6 kills 118 | - OVERRIDE_SOURCE_DIR=example1:example1-new-source-data,ctr2:ctr2-new-source 119 | - OVERRIDE_DEST_DIR=Pi.Alert:pialert 120 | # entrypoint: ["sleep", "infinity"] 121 | # entrypoint: ["python3", "/app/backup.py"] -------------------------------------------------------------------------------- /tests/watchtower.yml: -------------------------------------------------------------------------------- 1 | # docker compose -f watchtower.yml up -d 2 | --- 3 | services: 4 | watchtower-test: 5 | image: containrrr/watchtower 6 | container_name: watchtower-test 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | ports: 10 | - 8080:8080 11 | environment: 12 | - TZ=America/Los_Angeles 13 | - WATCHTOWER_SCHEDULE=0 0 4 * * * # Every day at 4am 14 | labels: 15 | - "nautical-backup.enable=true" --------------------------------------------------------------------------------