├── .devcontainer └── devcontainer.json ├── .dockerignore ├── .env.example ├── .gitattributes ├── .github ├── .release-please-manifest.json ├── CODEOWNERS ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml ├── release-please-config.json └── workflows │ ├── clear_cache.yml │ ├── conventional-commits.yml │ ├── docker-image.yml │ ├── dockerhub-description.yml │ ├── fetch-latest-tags.yml │ ├── release-please.yaml │ └── test-discord.yml ├── .gitignore ├── CHANGELOG.md ├── COPYING ├── Dockerfile ├── LICENSE ├── README.md ├── api ├── api_service.py ├── api_state.py ├── connection_manager.py └── routers │ ├── config.py │ ├── health.py │ ├── logs.py │ ├── process.py │ └── websocket_logs.py ├── docker-compose.yml ├── healthcheck.py ├── main.py ├── poetry.lock ├── pyproject.toml └── utils ├── __init__.py ├── auto_update.py ├── config_loader.py ├── dependencies.py ├── dmb_config.json ├── dmb_config_schema.json ├── download.py ├── duplicate_cleanup.py ├── global_logger.py ├── logger.py ├── plex_refresh.py ├── postgres.py ├── processes.py ├── riven_settings.py ├── setup.py ├── user_management.py └── versions.py /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DMB Dev Container", 3 | "image": "iampuid0/dmb:latest", 4 | "workspaceFolder": "/workspace", 5 | "customizations": { 6 | "vscode": { 7 | "extensions": [ 8 | "ms-python.python", 9 | "ms-python.black-formatter" 10 | ], 11 | "settings": { 12 | "terminal.integrated.defaultProfile.linux": "sh", 13 | "python.formatting.provider": "black", 14 | "editor.formatOnSave": true, 15 | "python.defaultInterpreterPath": "/venv/bin/python", 16 | "git.path": "/usr/bin/git" 17 | } 18 | } 19 | }, 20 | "forwardPorts": [ 21 | 3005, 22 | 8000, 23 | 3000, 24 | 9090, 25 | 5050, 26 | 8182 27 | ], 28 | "mounts": [ 29 | "source=${localWorkspaceFolder},target=/workspace,type=bind", 30 | "source=${localWorkspaceFolder}/main.py,target=/main.py,type=bind", 31 | "source=${localWorkspaceFolder}/config,target=/config,type=bind", 32 | "source=${localWorkspaceFolder}/utils,target=/utils,type=bind", 33 | "source=${localWorkspaceFolder}/zurg/RD,target=/zurg/RD,type=bind", 34 | "source=${localWorkspaceFolder}/riven/backend/data,target=/riven/backend/data,type=bind", 35 | "source=${localWorkspaceFolder}/zilean/app/data,target=/zilean/app/data,type=bind", 36 | "source=${localWorkspaceFolder}/log,target=/log,type=bind", 37 | "source=${localWorkspaceFolder}/mnt/debrid,target=/mnt/debrid,type=bind,bind-propagation=rshared", 38 | "source=${localWorkspaceFolder}/postgres_data,target=/postgres_data,type=bind", 39 | "source=${localWorkspaceFolder}/pgadmin/data,target=/pgadmin/data,type=bind", 40 | "source=${localWorkspaceFolder}/plex_debrid/config,target=/plex_debrid/config,type=bind", 41 | "source=${localWorkspaceFolder}/cli_debrid/data,target=/cli_debrid/data,type=bind", 42 | "source=${localWorkspaceFolder}/phalanx_db/data,target=/phalanx_db/data,type=bind" 43 | ], 44 | "runArgs": [ 45 | "--name=dmb_dev", 46 | "--hostname=dmb_dev", 47 | "--dns=8.8.8.8", 48 | "--dns=8.8.4.4", 49 | "--device=/dev/fuse:/dev/fuse:rwm", 50 | "--cap-add=SYS_ADMIN", 51 | "--security-opt=apparmor:unconfined", 52 | "--security-opt=no-new-privileges", 53 | "--shm-size=128m", 54 | "--pull=always" 55 | ], 56 | "postCreateCommand": "apt update && apt install -y gcc python3.11-dev libpq-dev && curl -sSL https://install.python-poetry.org | python - && export PATH=\"$HOME/.local/bin:$PATH\" && poetry config virtualenvs.create false && poetry install --no-root --with dev && git config --global --add safe.directory /workspace" 57 | } -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/*.gitattributes 2 | **/*.gitignore 3 | **/*.github 4 | *Ubuntu* 5 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | #------------------------------------- 2 | # Global Variables 3 | #------------------------------------- 4 | 5 | PUID=1000 6 | PGID=1000 7 | TZ= 8 | 9 | #------------------------------------- 10 | # DMB Variables 11 | #------------------------------------- 12 | 13 | DMB_LOG_LEVEL=INFO 14 | DMB_LOG_NAME=DMB 15 | DMB_LOG_DIR=/log 16 | DMB_LOG_COUNT=2 17 | DMB_LOG_SIZE=10M 18 | DMB_COLOR_LOG=true 19 | DMB_PLEX_TOKEN= 20 | DMB_PLEX_ADDRESS= 21 | DMB_GITHUB_TOKEN= 22 | 23 | #------------------------------------- 24 | # DMB API Variables 25 | #------------------------------------- 26 | 27 | DMB_API_SERVICE_ENABLED=true 28 | DMB_API_SERVICE_PROCESS_NAME=DMB API 29 | DMB_API_SERVICE_LOG_LEVEL=INFO 30 | DMB_API_SERVICE_HOST=127.0.0.1 31 | DMB_API_SERVICE_PORT=8000 32 | 33 | #------------------------------------- 34 | # DMB Frontend Variables 35 | #------------------------------------- 36 | 37 | DMB_FRONTEND_ENABLED=true 38 | DMB_FRONTEND_PROCESS_NAME="DMB Frontend" 39 | DMB_FRONTEND_REPO_OWNER=I-am-PUID-0 40 | DMB_FRONTEND_REPO_NAME=dmbdb 41 | DMB_FRONTEND_RELEASE_VERSION_ENABLED=false 42 | DMB_FRONTEND_RELEASE_VERSION=1.1.0 43 | DMB_FRONTEND_BRANCH_ENABLED=false 44 | DMB_FRONTEND_BRANCH=main 45 | DMB_FRONTEND_SUPPRESS_LOGGING=false 46 | DMB_FRONTEND_LOG_LEVEL=INFO 47 | DMB_FRONTEND_ORIGINS=http://0.0.0.0:3005 48 | DMB_FRONTEND_HOST=0.0.0.0 49 | DMB_FRONTEND_PORT=3005 50 | DMB_FRONTEND_AUTO_UPDATE=false 51 | DMB_FRONTEND_AUTO_UPDATE_INTERVAL=24 52 | DMB_FRONTEND_CLEAR_ON_UPDATE=true 53 | DMB_FRONTEND_EXCLUDE_DIRS= 54 | DMB_FRONTEND_PLATFORMS=pnpm 55 | DMB_FRONTEND_COMMAND=node .output/server/index.mjs 56 | DMB_FRONTEND_CONFIG_DIR=/dmb/frontend 57 | 58 | #------------------------------------- 59 | # PostgreSQL Variables 60 | #------------------------------------- 61 | 62 | POSTGRES_ENABLED=true 63 | POSTGRES_PROCESS_NAME=PostgreSQL 64 | POSTGRES_SUPPRESS_LOGGING=false 65 | POSTGRES_LOG_LEVEL=INFO 66 | POSTGRES_HOST=127.0.0.1 67 | POSTGRES_PORT=5432 68 | POSTGRES_CONFIG_DIR=/postgres_data 69 | POSTGRES_CONFIG_FILE=/postgres_data/postgresql.conf 70 | POSTGRES_INITDB_ARGS=--data-checksums 71 | POSTGRES_USER=DMB 72 | POSTGRES_PASSWORD=postgres 73 | POSTGRES_SHARED_BUFFERS=128MB 74 | POSTGRES_MAX_CONNECTIONS=100 75 | POSTGRES_RUN_DIRECTORY=/run/postgresql 76 | POSTGRES_COMMAND=postgres -D {postgres_config_dir} -c config_file={postgres_config_file} 77 | 78 | #------------------------------------- 79 | # pgAdmin Variables 80 | #------------------------------------- 81 | 82 | PGADMIN_ENABLED=true 83 | PGADMIN_PROCESS_NAME=pgAdmin4 84 | PGADMIN_CONFIG_DIR=/pgadmin/data 85 | PGADMIN_CONFIG_FILE=/pgadmin/data/config_local.py 86 | PGADMIN_LOG_FILE=/pgadmin/data/pgadmin4.log 87 | PGADMIN_PORT=5050 88 | PGADMIN_DEFAULT_SERVER=0.0.0.0 89 | PGADMIN_SETUP_EMAIL=DMB@DMB.DMB 90 | PGADMIN_SETUP_PASSWORD=postgres 91 | 92 | #------------------------------------- 93 | # Rclone Variables 94 | #------------------------------------- 95 | 96 | RCLONE_INSTANCES_REALDEBRID_ENABLED=true 97 | RCLONE_INSTANCES_REALDEBRID_PROCESS_NAME="rclone w/ RealDebrid" 98 | RCLONE_INSTANCES_REALDEBRID_SUPPRESS_LOGGING=false 99 | RCLONE_INSTANCES_REALDEBRID_LOG_LEVEL=INFO 100 | RCLONE_INSTANCES_REALDEBRID_KEY_TYPE=RealDebrid 101 | RCLONE_INSTANCES_REALDEBRID_ZURG_ENABLED=true 102 | RCLONE_INSTANCES_REALDEBRID_MOUNT_DIR=/data 103 | RCLONE_INSTANCES_REALDEBRID_MOUNT_NAME=rclone_RD 104 | RCLONE_INSTANCES_REALDEBRID_CACHE_DIR=/cache 105 | RCLONE_INSTANCES_REALDEBRID_CONFIG_DIR=/config 106 | RCLONE_INSTANCES_REALDEBRID_CONFIG_FILE=/config/rclone.config 107 | RCLONE_INSTANCES_REALDEBRID_ZURG_CONFIG_FILE=/zurg/RD/config.yml 108 | RCLONE_INSTANCES_REALDEBRID_COMMAND= 109 | RCLONE_INSTANCES_REALDEBRID_API_KEY= 110 | 111 | #------------------------------------- 112 | # Riven Variables 113 | #------------------------------------- 114 | 115 | RIVEN_BACKEND_ENABLED=true 116 | RIVEN_BACKEND_PROCESS_NAME="Riven Backend" 117 | RIVEN_BACKEND_REPO_OWNER=rivenmedia 118 | RIVEN_BACKEND_REPO_NAME=riven 119 | RIVEN_BACKEND_RELEASE_VERSION_ENABLED=false 120 | RIVEN_BACKEND_RELEASE_VERSION=v0.20.1 121 | RIVEN_BACKEND_BRANCH_ENABLED=false 122 | RIVEN_BACKEND_BRANCH=release-please--branches--main 123 | RIVEN_BACKEND_SUPPRESS_LOGGING=false 124 | RIVEN_BACKEND_LOG_LEVEL=INFO 125 | RIVEN_BACKEND_HOST=127.0.0.1 126 | RIVEN_BACKEND_PORT=8080 127 | RIVEN_BACKEND_AUTO_UPDATE=false 128 | RIVEN_BACKEND_AUTO_UPDATE_INTERVAL=24 129 | RIVEN_BACKEND_SYMLINK_LIBRARY_PATH=/mnt 130 | RIVEN_BACKEND_CLEAR_ON_UPDATE=true 131 | RIVEN_BACKEND_EXCLUDE_DIRS=/riven/backend/data 132 | RIVEN_BACKEND_ENV_COPY_SOURCE=/riven/backend/data/.env 133 | RIVEN_BACKEND_ENV_COPY_DESTINATION=/riven/backend/src/.env 134 | RIVEN_BACKEND_PLATFORMS=python 135 | RIVEN_BACKEND_COMMAND=/riven/backend/venv/bin/python src/main.py 136 | RIVEN_BACKEND_CONFIG_DIR=/riven/backend 137 | RIVEN_BACKEND_CONFIG_FILE=/riven/backend/data/settings.json 138 | RIVEN_BACKEND_WAIT_FOR_DIR=/data/rclone_RD/__all__ 139 | 140 | #------------------------------------- 141 | # Riven Frontend Variables 142 | #------------------------------------- 143 | 144 | RIVEN_FRONTEND_ENABLED=true 145 | RIVEN_FRONTEND_PROCESS_NAME="Riven Frontend" 146 | RIVEN_FRONTEND_REPO_OWNER=rivenmedia 147 | RIVEN_FRONTEND_REPO_NAME=riven-frontend 148 | RIVEN_FRONTEND_RELEASE_VERSION_ENABLED=false 149 | RIVEN_FRONTEND_RELEASE_VERSION=v0.17.0 150 | RIVEN_FRONTEND_BRANCH_ENABLED=false 151 | RIVEN_FRONTEND_BRANCH=release-please--branches--main 152 | RIVEN_FRONTEND_SUPPRESS_LOGGING=false 153 | RIVEN_FRONTEND_LOG_LEVEL=INFO 154 | RIVEN_FRONTEND_HOST=127.0.0.1 155 | RIVEN_FRONTEND_PORT=3000 156 | RIVEN_FRONTEND_AUTO_UPDATE=false 157 | RIVEN_FRONTEND_AUTO_UPDATE_INTERVAL=24 158 | RIVEN_FRONTEND_CLEAR_ON_UPDATE=true 159 | RIVEN_FRONTEND_EXCLUDE_DIRS= 160 | RIVEN_FRONTEND_PLATFORMS=pnpm 161 | RIVEN_FRONTEND_COMMAND=node build 162 | RIVEN_FRONTEND_CONFIG_DIR=/riven/frontend 163 | RIVEN_FRONTEND_ENV_DIALECT=postgres 164 | 165 | #------------------------------------- 166 | # Zilean Variables 167 | #------------------------------------- 168 | 169 | ZILEAN_ENABLED=true 170 | ZILEAN_PROCESS_NAME=Zilean 171 | ZILEAN_REPO_OWNER=iPromKnight 172 | ZILEAN_REPO_NAME=zilean 173 | ZILEAN_RELEASE_VERSION_ENABLED=false 174 | ZILEAN_RELEASE_VERSION=v3.3.0 175 | ZILEAN_BRANCH_ENABLED=false 176 | ZILEAN_BRANCH=main 177 | ZILEAN_SUPPRESS_LOGGING=false 178 | ZILEAN_LOG_LEVEL=INFO 179 | ZILEAN_HOST=127.0.0.1 180 | ZILEAN_PORT=8182 181 | ZILEAN_AUTO_UPDATE=false 182 | ZILEAN_AUTO_UPDATE_INTERVAL=24 183 | ZILEAN_CLEAR_ON_UPDATE=true 184 | ZILEAN_EXCLUDE_DIRS=/zilean/app/data 185 | ZILEAN_ENV_COPY_SOURCE=/zilean/app/data/.env 186 | ZILEAN_ENV_COPY_DESTINATION=/zilean/app/src/.env 187 | ZILEAN_PLATFORMS=python dotnet 188 | ZILEAN_COMMAND=/zilean/app/zilean-api 189 | ZILEAN_CONFIG_DIR=/zilean 190 | ZILEAN_CONFIG_FILE=/zilean/app/data/settings.json 191 | 192 | #------------------------------------- 193 | # Zurg Variables 194 | #------------------------------------- 195 | 196 | ZURG_INSTANCES_REALDEBRID_ENABLED=true 197 | ZURG_INSTANCES_REALDEBRID_PROCESS_NAME="Zurg w/ RealDebrid" 198 | ZURG_INSTANCES_REALDEBRID_REPO_OWNER=debridmediamanager 199 | ZURG_INSTANCES_REALDEBRID_REPO_NAME=zurg-testing 200 | ZURG_INSTANCES_REALDEBRID_RELEASE_VERSION_ENABLED=false 201 | ZURG_INSTANCES_REALDEBRID_RELEASE_VERSION=v0.9.3-final 202 | ZURG_INSTANCES_REALDEBRID_SUPPRESS_LOGGING=false 203 | ZURG_INSTANCES_REALDEBRID_LOG_LEVEL=INFO 204 | ZURG_INSTANCES_REALDEBRID_AUTO_UPDATE=false 205 | ZURG_INSTANCES_REALDEBRID_AUTO_UPDATE_INTERVAL=1 206 | ZURG_INSTANCES_REALDEBRID_CLEAR_ON_UPDATE=false 207 | ZURG_INSTANCES_REALDEBRID_EXCLUDE_DIRS= 208 | ZURG_INSTANCES_REALDEBRID_KEY_TYPE=RealDebrid 209 | ZURG_INSTANCES_REALDEBRID_CONFIG_DIR=/zurg/RD 210 | ZURG_INSTANCES_REALDEBRID_CONFIG_FILE=/zurg/RD/config.yml 211 | ZURG_INSTANCES_REALDEBRID_COMMAND=/zurg/RD/zurg 212 | ZURG_INSTANCES_REALDEBRID_PORT= 213 | ZURG_INSTANCES_REALDEBRID_USER= 214 | ZURG_INSTANCES_REALDEBRID_PASSWORD= 215 | ZURG_INSTANCES_REALDEBRID_API_KEY= 216 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Set default behavior to automatically normalize line endings. 3 | ############################################################################### 4 | * text=auto 5 | 6 | ############################################################################### 7 | # Set default behavior for command prompt diff. 8 | # 9 | # This is need for earlier builds of msysgit that does not have it on by 10 | # default for csharp files. 11 | # Note: This is only used by command line 12 | ############################################################################### 13 | #*.cs diff=csharp 14 | 15 | ############################################################################### 16 | # Set the merge driver for project and solution files 17 | # 18 | # Merging from the command prompt will add diff markers to the files if there 19 | # are conflicts (Merging from VS is not affected by the settings below, in VS 20 | # the diff markers are never inserted). Diff markers may cause the following 21 | # file extensions to fail to load in VS. An alternative would be to treat 22 | # these files as binary and thus will always conflict and require user 23 | # intervention with every merge. To do so, just uncomment the entries below 24 | ############################################################################### 25 | #*.sln merge=binary 26 | #*.csproj merge=binary 27 | #*.vbproj merge=binary 28 | #*.vcxproj merge=binary 29 | #*.vcproj merge=binary 30 | #*.dbproj merge=binary 31 | #*.fsproj merge=binary 32 | #*.lsproj merge=binary 33 | #*.wixproj merge=binary 34 | #*.modelproj merge=binary 35 | #*.sqlproj merge=binary 36 | #*.wwaproj merge=binary 37 | 38 | ############################################################################### 39 | # behavior for image files 40 | # 41 | # image files are treated as binary by default. 42 | ############################################################################### 43 | #*.jpg binary 44 | #*.png binary 45 | #*.gif binary 46 | 47 | ############################################################################### 48 | # diff behavior for common document formats 49 | # 50 | # Convert binary document formats to text before diffing them. This feature 51 | # is only available from the command line. Turn it on by uncommenting the 52 | # entries below. 53 | ############################################################################### 54 | #*.doc diff=astextplain 55 | #*.DOC diff=astextplain 56 | #*.docx diff=astextplain 57 | #*.DOCX diff=astextplain 58 | #*.dot diff=astextplain 59 | #*.DOT diff=astextplain 60 | #*.pdf diff=astextplain 61 | #*.PDF diff=astextplain 62 | #*.rtf diff=astextplain 63 | #*.RTF diff=astextplain 64 | -------------------------------------------------------------------------------- /.github/.release-please-manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | ".": "6.12.1" 3 | } 4 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @I-am-PUID-0 -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: I-am-PUID-0 -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | 12 | A clear and concise description of what the bug is. 13 | 14 | 15 | **To Reproduce** 16 | 17 | Steps to reproduce the behavior. 18 | 19 | 20 | **Expected behavior** 21 | 22 | A clear and concise description of what you expected to happen. 23 | 24 | 25 | **Screenshots** 26 | 27 | If applicable, add screenshots to help explain your problem. 28 | 29 | 30 | **Please complete the following information:** 31 | - OS: [e.g., Ubuntu, Windows] 32 | - Version: [e.g., 22.04 LTS, WIN 11] 33 | - Supporting Applications: [e.g., Docker, Docker Desktop, WSL2] 34 | 35 | 36 | **Additional context** 37 | 38 | Add any other context about the problem here. 39 | 40 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | 12 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 13 | 14 | 15 | 16 | **Describe the solution you'd like** 17 | 18 | A clear and concise description of what you want to happen. 19 | 20 | 21 | 22 | **Describe alternatives you've considered** 23 | 24 | A clear and concise description of any alternative solutions or features you've considered. 25 | 26 | 27 | 28 | **Additional context** 29 | 30 | Add any other context or screenshots about the feature request here. 31 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "pip" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | reviewers: 13 | - "I-am-PUID-0" 14 | 15 | - package-ecosystem: "github-actions" 16 | directory: "/" 17 | schedule: 18 | interval: "daily" 19 | reviewers: 20 | - "I-am-PUID-0" 21 | 22 | - package-ecosystem: "docker" 23 | directory: "/" 24 | schedule: 25 | interval: "daily" 26 | reviewers: 27 | - "I-am-PUID-0" 28 | 29 | - package-ecosystem: "devcontainers" 30 | directory: "/" 31 | schedule: 32 | interval: "weekly" 33 | -------------------------------------------------------------------------------- /.github/release-please-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", 3 | "release-type": "python", 4 | "include-v-in-tag": false, 5 | "skip-github-release": true, 6 | "changelog-path": "CHANGELOG.md", 7 | "changelog-sections": [ 8 | { 9 | "type": "breaking", 10 | "section": "💥 Breaking Changes", 11 | "hidden": false 12 | }, 13 | { 14 | "type": "feat", 15 | "section": "✨ Features", 16 | "hidden": false 17 | }, 18 | { 19 | "type": "fix", 20 | "section": "🐛 Bug Fixes", 21 | "hidden": false 22 | }, 23 | { 24 | "type": "perf", 25 | "section": "⚡ Performance Improvements", 26 | "hidden": false 27 | }, 28 | { 29 | "type": "chore", 30 | "section": "🤡 Other Changes", 31 | "hidden": false 32 | }, 33 | { 34 | "type": "docs", 35 | "section": "📖 Documentation", 36 | "hidden": false 37 | }, 38 | { 39 | "type": "ci", 40 | "section": "🚀 CI/CD Pipeline", 41 | "hidden": false 42 | }, 43 | { 44 | "type": "tests", 45 | "section": "🔧 Testing", 46 | "hidden": false 47 | }, 48 | { 49 | "type": "refactor", 50 | "section": "🛠️ Refactors", 51 | "hidden": false 52 | }, 53 | { 54 | "type": "style", 55 | "section": "🎨 Styles", 56 | "hidden": false 57 | }, 58 | { 59 | "type": "revert", 60 | "section": "⏪ Reverts", 61 | "hidden": false 62 | }, 63 | { 64 | "type": "build", 65 | "section": "🛠️ Build System", 66 | "hidden": false 67 | } 68 | ], 69 | "packages": { 70 | ".": {} 71 | } 72 | } -------------------------------------------------------------------------------- /.github/workflows/clear_cache.yml: -------------------------------------------------------------------------------- 1 | name: Clear All Caches 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | clear-caches: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Install jq (JSON Processor) 11 | run: sudo apt-get update && sudo apt-get install -y jq 12 | 13 | - name: Get Caches 14 | id: get-caches 15 | run: | 16 | echo "Listing caches..." 17 | curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 18 | -H "Accept: application/vnd.github+json" \ 19 | https://api.github.com/repos/${{ github.repository }}/actions/caches > caches.json 20 | echo "Caches retrieved:" 21 | cat caches.json 22 | 23 | - name: Verify Caches Exist 24 | id: verify-caches 25 | run: | 26 | cache_count=$(jq '.total_count' caches.json) 27 | echo "Number of caches found: $cache_count" 28 | if [ "$cache_count" -eq 0 ]; then 29 | echo "No caches found. Exiting..." 30 | exit 0 31 | fi 32 | 33 | - name: Delete Caches 34 | if: success() 35 | run: | 36 | echo "Deleting caches..." 37 | for cache_id in $(jq -r '.actions_caches[].id' caches.json); do 38 | echo "Attempting to delete cache with ID: $cache_id" 39 | response=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE \ 40 | -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 41 | -H "Accept: application/vnd.github+json" \ 42 | https://api.github.com/repos/${{ github.repository }}/actions/caches/$cache_id) 43 | if [ "$response" -eq 204 ]; then 44 | echo "Successfully deleted cache with ID: $cache_id" 45 | else 46 | echo "Failed to delete cache with ID: $cache_id (HTTP $response)" 47 | fi 48 | sleep 1 49 | done 50 | echo "Cache deletion completed." 51 | 52 | - name: Retry Remaining Cache Deletions 53 | if: success() 54 | run: | 55 | max_attempts=5 56 | attempt=1 57 | 58 | while [ $attempt -le $max_attempts ]; do 59 | echo "Checking for remaining caches (Attempt $attempt)..." 60 | curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 61 | -H "Accept: application/vnd.github+json" \ 62 | https://api.github.com/repos/${{ github.repository }}/actions/caches > remaining_caches.json 63 | remaining_count=$(jq '.total_count' remaining_caches.json) 64 | echo "Number of remaining caches: $remaining_count" 65 | 66 | if [ "$remaining_count" -eq 0 ]; then 67 | echo "All caches successfully deleted." 68 | exit 0 69 | fi 70 | 71 | echo "Retrying deletion for $remaining_count remaining caches..." 72 | for cache_id in $(jq -r '.actions_caches[].id' remaining_caches.json); do 73 | echo "Retrying deletion for cache ID: $cache_id" 74 | response=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE \ 75 | -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 76 | -H "Accept: application/vnd.github+json" \ 77 | https://api.github.com/repos/${{ github.repository }}/actions/caches/$cache_id) 78 | if [ "$response" -eq 204 ]; then 79 | echo "Successfully deleted cache with ID: $cache_id" 80 | else 81 | echo "Failed to delete cache with ID: $cache_id (HTTP $response)" 82 | fi 83 | sleep 1 84 | done 85 | 86 | attempt=$((attempt + 1)) 87 | echo "Retry completed. Attempt $attempt of $max_attempts." 88 | done 89 | 90 | echo "Some caches could not be deleted after $max_attempts attempts." 91 | exit 1 92 | 93 | - name: Verify Cache Deletion 94 | if: success() 95 | run: | 96 | echo "Verifying caches have been deleted..." 97 | curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 98 | -H "Accept: application/vnd.github+json" \ 99 | https://api.github.com/repos/${{ github.repository }}/actions/caches > caches_post_delete.json 100 | echo "Remaining caches:" 101 | cat caches_post_delete.json 102 | cache_count=$(jq '.total_count' caches_post_delete.json) 103 | echo "Number of remaining caches: $cache_count" 104 | if [ "$cache_count" -eq 0 ]; then 105 | echo "All caches successfully deleted." 106 | else 107 | echo "Some caches could not be deleted. Remaining: $cache_count" 108 | exit 1 109 | fi 110 | -------------------------------------------------------------------------------- /.github/workflows/conventional-commits.yml: -------------------------------------------------------------------------------- 1 | name: Conventional Commits 2 | 3 | on: 4 | pull_request: 5 | branches: [ master ] 6 | 7 | jobs: 8 | build: 9 | name: Conventional Commits 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: webiny/action-conventional-commits@v1.3.0 -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image CI 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | types: 7 | - closed 8 | paths-ignore: 9 | - '**/README.md' 10 | - '.github/**' 11 | - '.gitattributes' 12 | - '.gitignore' 13 | 14 | jobs: 15 | fetch-latest-tags: 16 | if: > 17 | github.event_name == 'workflow_dispatch' || 18 | (github.event.pull_request.merged == true && 19 | startsWith(github.event.pull_request.title, 'chore(master): release ')) 20 | runs-on: ubuntu-latest 21 | outputs: 22 | PGAGENT_TAG: ${{ env.PGAGENT_TAG }} 23 | SYS_STATS_TAG: ${{ env.SYS_STATS_TAG }} 24 | ZILEAN_TAG: ${{ env.ZILEAN_TAG }} 25 | RIVEN_TAG: ${{ env.RIVEN_TAG }} 26 | RIVEN_FRONTEND_TAG: ${{ env.RIVEN_FRONTEND_TAG }} 27 | DMB_FRONTEND_TAG: ${{ env.DMB_FRONTEND_TAG }} 28 | PLEX_DEBRID_TAG: ${{ env.PLEX_DEBRID_TAG }} 29 | CLI_DEBRID_TAG: ${{ env.CLI_DEBRID_TAG }} 30 | steps: 31 | - name: Fetch latest pgAgent release tag 32 | run: | 33 | PGAGENT_TAG=$(curl -s https://api.github.com/repos/pgadmin-org/pgagent/releases/latest | jq -r .tag_name) 34 | echo "PGAGENT_TAG=$PGAGENT_TAG" >> $GITHUB_ENV 35 | 36 | - name: Fetch latest system_stats release tag 37 | run: | 38 | SYS_STATS_TAG=$(curl -s https://api.github.com/repos/EnterpriseDB/system_stats/releases/latest | jq -r .tag_name) 39 | echo "SYS_STATS_TAG=$SYS_STATS_TAG" >> $GITHUB_ENV 40 | 41 | - name: Fetch latest zilean release tag 42 | run: | 43 | ZILEAN_TAG=$(curl -s https://api.github.com/repos/iPromKnight/zilean/releases/latest | jq -r .tag_name) 44 | echo "ZILEAN_TAG=$ZILEAN_TAG" >> $GITHUB_ENV 45 | 46 | - name: Fetch latest riven release tag 47 | run: | 48 | RIVEN_TAG=$(curl -s https://api.github.com/repos/rivenmedia/riven/releases/latest | jq -r .tag_name) 49 | echo "RIVEN_TAG=$RIVEN_TAG" >> $GITHUB_ENV 50 | 51 | - name: Fetch latest riven-frontend release tag 52 | run: | 53 | RIVEN_FRONTEND_TAG=$(curl -s https://api.github.com/repos/rivenmedia/riven-frontend/releases/latest | jq -r .tag_name) 54 | echo "RIVEN_FRONTEND_TAG=$RIVEN_FRONTEND_TAG" >> $GITHUB_ENV 55 | 56 | - name: Fetch latest dmbdb release tag 57 | run: | 58 | DMB_FRONTEND_TAG=$(curl -s https://api.github.com/repos/nicocapalbo/dmbdb/releases/latest | jq -r .tag_name) 59 | echo "DMB_FRONTEND_TAG=$DMB_FRONTEND_TAG" >> $GITHUB_ENV 60 | 61 | - name: Fetch latest plex_debrid version from source 62 | run: | 63 | PLEX_DEBRID_TAG=$(curl -s https://raw.githubusercontent.com/elfhosted/plex_debrid/main/ui/ui_settings.py | \ 64 | grep '^version\s*=' | \ 65 | sed -E "s/.*=\s*\[\s*'([^']+)'.*/\1/") 66 | echo "PLEX_DEBRID_TAG=$PLEX_DEBRID_TAG" >> $GITHUB_ENV 67 | 68 | - name: Fetch latest cli_debrid release tag 69 | run: | 70 | CLI_DEBRID_TAG=$(curl -s https://api.github.com/repos/godver3/cli_debrid/releases/latest | jq -r .tag_name) 71 | echo "CLI_DEBRID_TAG=$CLI_DEBRID_TAG" >> $GITHUB_ENV 72 | 73 | build-and-push: 74 | needs: fetch-latest-tags 75 | runs-on: self-hosted 76 | outputs: 77 | version: ${{ env.VERSION }} 78 | repo_owner_lower: ${{ env.REPO_OWNER_LOWER }} 79 | repo_name: ${{ env.REPO_NAME }} 80 | steps: 81 | - name: Checkout 82 | uses: actions/checkout@v4 83 | 84 | - name: Set up QEMU 85 | uses: docker/setup-qemu-action@v3 86 | with: 87 | platforms: 'linux/amd64,linux/arm64,linux/arm/v7' 88 | cache-image: false 89 | 90 | - name: Set up Docker Buildx 91 | uses: docker/setup-buildx-action@v3 92 | 93 | - name: Clean up unused Docker images and containers 94 | run: docker system prune --all --force --volumes 95 | 96 | - name: Pre-Build Disk Space 97 | run: | 98 | echo "Disk space before build:" 99 | df -h 100 | 101 | - name: Extract version and set environment variables 102 | id: setup_env_vars 103 | run: | 104 | VERSION=$(grep -E '^version *= *' pyproject.toml | head -n 1 | cut -d '"' -f2) 105 | REPO_OWNER_LOWER=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]') 106 | REPO_NAME=$(basename "${{ github.repository }}" | tr '[:upper:]' '[:lower:]') 107 | BRANCH_NAME=$(echo "${{ github.ref_name }}" | tr '/' '-') 108 | echo "VERSION=$VERSION" >> $GITHUB_ENV 109 | echo "REPO_OWNER_LOWER=$REPO_OWNER_LOWER" >> $GITHUB_ENV 110 | echo "REPO_NAME=$REPO_NAME" >> $GITHUB_ENV 111 | echo "BRANCH_NAME=$BRANCH_NAME" >> $GITHUB_ENV 112 | 113 | - name: Determine Docker tags 114 | id: determine_tags 115 | run: | 116 | if [ "${{ github.ref_name }}" == "master" ]; then 117 | echo "DOCKER_TAGS=${{ secrets.DOCKER_USERNAME }}/${{ env.REPO_NAME }}:${{ env.VERSION }},${{ secrets.DOCKER_USERNAME }}/${{ env.REPO_NAME }}:latest,ghcr.io/${{ env.REPO_OWNER_LOWER }}/${{ env.REPO_NAME }}:${{ env.VERSION }},ghcr.io/${{ env.REPO_OWNER_LOWER }}/${{ env.REPO_NAME }}:latest" >> $GITHUB_ENV 118 | else 119 | echo "DOCKER_TAGS=${{ secrets.DOCKER_USERNAME }}/${{ env.REPO_NAME }}:${{ env.BRANCH_NAME }},ghcr.io/${{ env.REPO_OWNER_LOWER }}/${{ env.REPO_NAME }}:${{ env.BRANCH_NAME }}" >> $GITHUB_ENV 120 | fi 121 | 122 | - name: Login to Docker Hub 123 | uses: docker/login-action@v3 124 | with: 125 | username: ${{ secrets.DOCKER_USERNAME }} 126 | password: ${{ secrets.DOCKER_PASSWORD }} 127 | 128 | - name: Login to GitHub Container Registry 129 | uses: docker/login-action@v3 130 | with: 131 | registry: ghcr.io 132 | username: ${{ github.actor }} 133 | password: ${{ secrets.GITHUB_TOKEN }} 134 | 135 | - name: Build and Push Docker image 136 | id: build_push 137 | uses: docker/build-push-action@v6 138 | env: 139 | DOCKER_BUILD_RECORD_UPLOAD: false 140 | with: 141 | context: . 142 | file: ./Dockerfile 143 | platforms: linux/amd64,linux/arm64 #,linux/arm/v7 144 | cache-from: type=local,src=/home/docker/buildx-cache/${{ github.repository }} 145 | cache-to: type=local,dest=/home/docker/buildx-cache/${{ github.repository }},mode=max 146 | tags: ${{ env.DOCKER_TAGS }} 147 | build-args: | 148 | PGAGENT_TAG=${{ needs.fetch-latest-tags.outputs.PGAGENT_TAG }} 149 | SYS_STATS_TAG=${{ needs.fetch-latest-tags.outputs.SYS_STATS_TAG }} 150 | ZILEAN_TAG=${{ needs.fetch-latest-tags.outputs.ZILEAN_TAG }} 151 | RIVEN_TAG=${{ needs.fetch-latest-tags.outputs.RIVEN_TAG }} 152 | RIVEN_FRONTEND_TAG=${{ needs.fetch-latest-tags.outputs.RIVEN_FRONTEND_TAG }} 153 | DMB_FRONTEND_TAG=${{ needs.fetch-latest-tags.outputs.DMB_FRONTEND_TAG }} 154 | PLEX_DEBRID_TAG=${{ needs.fetch-latest-tags.outputs.PLEX_DEBRID_TAG }} 155 | CLI_DEBRID_TAG=${{ needs.fetch-latest-tags.outputs.CLI_DEBRID_TAG }} 156 | provenance: true 157 | sbom: true 158 | push: true 159 | 160 | - name: Build and Load Single-Platform Image for Version Extraction 161 | id: build_load_single 162 | uses: docker/build-push-action@v6 163 | env: 164 | DOCKER_BUILD_RECORD_UPLOAD: false 165 | with: 166 | context: . 167 | file: ./Dockerfile 168 | platforms: linux/amd64 169 | cache-from: type=local,src=/home/docker/buildx-cache/${{ github.repository }} 170 | tags: temp-local-image 171 | build-args: | 172 | PGAGENT_TAG=${{ needs.fetch-latest-tags.outputs.PGAGENT_TAG }} 173 | SYS_STATS_TAG=${{ needs.fetch-latest-tags.outputs.SYS_STATS_TAG }} 174 | ZILEAN_TAG=${{ needs.fetch-latest-tags.outputs.ZILEAN_TAG }} 175 | RIVEN_TAG=${{ needs.fetch-latest-tags.outputs.RIVEN_TAG }} 176 | RIVEN_FRONTEND_TAG=${{ needs.fetch-latest-tags.outputs.RIVEN_FRONTEND_TAG }} 177 | DMB_FRONTEND_TAG=${{ needs.fetch-latest-tags.outputs.DMB_FRONTEND_TAG }} 178 | PLEX_DEBRID_TAG=${{ needs.fetch-latest-tags.outputs.PLEX_DEBRID_TAG }} 179 | CLI_DEBRID_TAG=${{ needs.fetch-latest-tags.outputs.CLI_DEBRID_TAG }} 180 | load: true 181 | 182 | - name: Extract Versions from Built Image 183 | run: | 184 | IMAGE_TAG="temp-local-image" 185 | echo "Using IMAGE_TAG: $IMAGE_TAG" 186 | CONTAINER_ID=$(docker run -d --rm --entrypoint /bin/sh $IMAGE_TAG -c "sleep 60") 187 | PSQL_VERSION=$(docker exec $CONTAINER_ID psql --version | awk '{print $3}') 188 | PGADMIN_VERSION=$(docker exec $CONTAINER_ID /pgadmin/venv/bin/python -c "import importlib.metadata; print(importlib.metadata.version('pgadmin4'))" 2>/dev/null || echo "Not Installed") 189 | NODE_VERSION=$(docker exec $CONTAINER_ID node -v 2>/dev/null || echo "Not Installed") 190 | PNPM_VERSION=$(docker exec $CONTAINER_ID pnpm -v 2>/dev/null || echo "Not Installed") 191 | RCLONE_VERSION=$(docker exec $CONTAINER_ID rclone version | head -n 1 | grep -oP 'v[0-9]+\.[0-9]+\.[0-9]+' 2>/dev/null || echo "Not Installed") 192 | echo "PSQL_VERSION=$PSQL_VERSION" >> $GITHUB_ENV 193 | echo "PGADMIN_VERSION=$PGADMIN_VERSION" >> $GITHUB_ENV 194 | echo "NODE_VERSION=$NODE_VERSION" >> $GITHUB_ENV 195 | echo "PNPM_VERSION=$PNPM_VERSION" >> $GITHUB_ENV 196 | echo "RCLONE_VERSION=$RCLONE_VERSION" >> $GITHUB_ENV 197 | docker stop $CONTAINER_ID 198 | docker rmi -f temp-local-image 199 | docker image prune -f 200 | 201 | - name: Post-Build Disk Space 202 | run: | 203 | echo "Disk space after build:" 204 | df -h 205 | 206 | - name: Add Job Summary for Build 207 | run: | 208 | echo "## Build Summary" >> $GITHUB_STEP_SUMMARY 209 | echo "**Build Version:** \`${{ env.VERSION }}\`" >> $GITHUB_STEP_SUMMARY 210 | echo "**Git Branch:** \`${{ github.ref_name }}\`" >> $GITHUB_STEP_SUMMARY 211 | echo "**Docker Tags:**" >> $GITHUB_STEP_SUMMARY 212 | echo "\`${{ env.DOCKER_TAGS }}\`" >> $GITHUB_STEP_SUMMARY 213 | echo "**Dependency Versions:**" >> $GITHUB_STEP_SUMMARY 214 | echo "- DMB Frontend: \`${{ needs.fetch-latest-tags.outputs.DMB_FRONTEND_TAG }}\`" >> $GITHUB_STEP_SUMMARY 215 | echo "- Riven: \`${{ needs.fetch-latest-tags.outputs.RIVEN_TAG }}\`" >> $GITHUB_STEP_SUMMARY 216 | echo "- Riven Frontend: \`${{ needs.fetch-latest-tags.outputs.RIVEN_FRONTEND_TAG }}\`" >> $GITHUB_STEP_SUMMARY 217 | echo "- Zilean: \`${{ needs.fetch-latest-tags.outputs.ZILEAN_TAG }}\`" >> $GITHUB_STEP_SUMMARY 218 | echo "- Plex Debrid: \`${{ needs.fetch-latest-tags.outputs.PLEX_DEBRID_TAG }}\`" >> $GITHUB_STEP_SUMMARY 219 | echo "- CLI Debrid: \`${{ needs.fetch-latest-tags.outputs.CLI_DEBRID_TAG }}\`" >> $GITHUB_STEP_SUMMARY 220 | echo "- PostgreSQL: \`${{ env.PSQL_VERSION }}\`" >> $GITHUB_STEP_SUMMARY 221 | echo "- pgAgent: \`${{ needs.fetch-latest-tags.outputs.PGAGENT_TAG }}\`" >> $GITHUB_STEP_SUMMARY 222 | echo "- pgAdmin4: \`${{ env.PGADMIN_VERSION }}\`" >> $GITHUB_STEP_SUMMARY 223 | echo "- System Stats: \`${{ needs.fetch-latest-tags.outputs.SYS_STATS_TAG }}\`" >> $GITHUB_STEP_SUMMARY 224 | echo "- Node.js: \`${{ env.NODE_VERSION }}\`" >> $GITHUB_STEP_SUMMARY 225 | echo "- PNPM: \`${{ env.PNPM_VERSION }}\`" >> $GITHUB_STEP_SUMMARY 226 | echo "- rclone: \`${{ env.RCLONE_VERSION }}\`" >> $GITHUB_STEP_SUMMARY 227 | echo "**Build and Push Status:** ✅ Successful" >> $GITHUB_STEP_SUMMARY 228 | 229 | release: 230 | needs: build-and-push 231 | if: github.ref_name == 'master' 232 | runs-on: ubuntu-latest 233 | outputs: 234 | release_exists: ${{ steps.check_release.outputs.release_exists }} 235 | env: 236 | VERSION: ${{ needs.build-and-push.outputs.version }} 237 | REPO_OWNER_LOWER: ${{ needs.build-and-push.outputs.repo_owner_lower }} 238 | REPO_NAME: ${{ needs.build-and-push.outputs.repo_name }} 239 | steps: 240 | - name: Checkout 241 | uses: actions/checkout@v4 242 | 243 | - name: Check if Release Exists 244 | id: check_release 245 | env: 246 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 247 | VERSION: ${{ env.VERSION }} 248 | run: | 249 | if gh release view "${{ env.VERSION }}" --repo ${{ github.repository }}; then 250 | echo "Release already exists for version ${{ needs.build-and-push.outputs.version }}" 251 | echo "release_exists=true" >> $GITHUB_ENV 252 | echo "release_exists=true" >> $GITHUB_OUTPUT 253 | else 254 | echo "Release does not exist for version ${{ needs.build-and-push.outputs.version }}" 255 | echo "release_exists=false" >> $GITHUB_ENV 256 | echo "release_exists=false" >> $GITHUB_OUTPUT 257 | fi 258 | - name: Create Release with CHANGELOG Notes 259 | if: env.release_exists == 'false' 260 | id: create_release 261 | env: 262 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 263 | run: | 264 | RELEASE_NOTES=$(sed -n '/^## \[[0-9]\+\.[0-9]\+\.[0-9]\+\](/,$p' CHANGELOG.md | sed -n '1!{/^## \[/q;p}') 265 | gh release create ${{ env.VERSION }} \ 266 | --repo ${{ github.repository }} \ 267 | --title "Release ${{ env.VERSION }}" \ 268 | --notes "$RELEASE_NOTES" \ 269 | --draft=false \ 270 | --prerelease=false 271 | 272 | - name: Add Job Summary for Release 273 | run: | 274 | echo "## Release Summary" >> $GITHUB_STEP_SUMMARY 275 | echo "**Release Version:** \`${{ env.VERSION }}\`" >> $GITHUB_STEP_SUMMARY 276 | echo "**Release Status:** ${{ env.release_exists == 'false' && '✅ Created' || '⚠️ Skipped (Already Exists)' }}" >> $GITHUB_STEP_SUMMARY 277 | if [ "${{ env.release_exists }}" == "false" ]; then 278 | echo "**Release Notes:**" >> $GITHUB_STEP_SUMMARY 279 | sed -n '/^## \[[0-9]\+\.[0-9]\+\.[0-9]\+\](/,$p' CHANGELOG.md | sed -n '1!{/^## \[/q;p}' >> $GITHUB_STEP_SUMMARY 280 | fi 281 | 282 | announce: 283 | needs: [release, build-and-push] 284 | if: needs.release.outputs.release_exists == 'false' && github.ref_name == 'master' 285 | runs-on: ubuntu-latest 286 | steps: 287 | - name: Checkout code 288 | uses: actions/checkout@v4 289 | - name: Post announcement to Discord 290 | env: 291 | DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} 292 | VERSION: ${{ needs.build-and-push.outputs.version }} 293 | run: | 294 | RELEASE_NOTES=$(sed -n '/^## \[[0-9]\+\.[0-9]\+\.[0-9]\+\](/,$p' CHANGELOG.md | sed -n '1!{/^## \[/q;p}') 295 | ANNOUNCEMENT_BODY="<@&1360241608649605240> 🚀 **New Release: Version [${{ env.VERSION }}]**${RELEASE_NOTES}" 296 | ESCAPED_BODY=$(echo "$ANNOUNCEMENT_BODY" | jq -Rsa .) 297 | curl -H "Content-Type: application/json" \ 298 | -d "{\"content\": $ESCAPED_BODY, \"flags\": 4}" \ 299 | $DISCORD_WEBHOOK_URL 300 | 301 | update-pr-label: 302 | needs: release 303 | if: needs.release.outputs.release_exists == 'false' && github.ref_name == 'master' 304 | runs-on: ubuntu-latest 305 | steps: 306 | - name: Checkout repository 307 | uses: actions/checkout@v4 308 | 309 | - name: "Remove 'autorelease: pending' label from all merged PRs" 310 | env: 311 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 312 | run: | 313 | PR_NUMBERS=$(gh pr list --state merged --base master --json number,labels --jq '[.[] | select(.labels[].name == "autorelease: pending") | .number] | @sh') 314 | 315 | if [[ -n "$PR_NUMBERS" ]]; then 316 | for PR_NUMBER in $PR_NUMBERS; do 317 | PR_NUMBER=$(echo $PR_NUMBER | tr -d "'") # Remove quotes from jq output 318 | echo "Updating PR #$PR_NUMBER..." 319 | gh pr edit $PR_NUMBER --remove-label "autorelease: pending" 320 | gh pr edit $PR_NUMBER --add-label "autorelease: tagged" 321 | echo "Updated PR #$PR_NUMBER with 'autorelease: tagged'" 322 | done 323 | else 324 | echo "No merged PRs found with 'autorelease: pending' label." 325 | fi 326 | -------------------------------------------------------------------------------- /.github/workflows/dockerhub-description.yml: -------------------------------------------------------------------------------- 1 | name: Update Docker Hub Description 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - README.md 9 | - .github/workflows/dockerhub-description.yml 10 | jobs: 11 | dockerHubDescription: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout Repository 15 | uses: actions/checkout@v4 16 | 17 | - name: Convert Repository Name to Lowercase 18 | run: | 19 | REPO_NAME=$(basename "${{ github.repository }}" | tr '[:upper:]' '[:lower:]') 20 | echo "REPO_NAME=$REPO_NAME" >> $GITHUB_ENV 21 | 22 | - name: Update Docker Hub Description 23 | uses: peter-evans/dockerhub-description@v4 24 | with: 25 | username: ${{ secrets.DOCKER_USERNAME }} 26 | password: ${{ secrets.DOCKER_PASSWORD }} 27 | repository: ${{ secrets.DOCKER_USERNAME }}/${{ env.REPO_NAME }} -------------------------------------------------------------------------------- /.github/workflows/fetch-latest-tags.yml: -------------------------------------------------------------------------------- 1 | name: Check for New Release Tags 2 | 3 | on: 4 | schedule: 5 | - cron: "0 */3 * * *" 6 | workflow_dispatch: 7 | 8 | jobs: 9 | check-latest-tags: 10 | runs-on: ubuntu-latest 11 | outputs: 12 | update_found: ${{ steps.compare_tags.outputs.update_found }} 13 | steps: 14 | - name: Checkout repository 15 | uses: actions/checkout@v4 16 | 17 | - name: Restore previous tags from cache 18 | id: cache-latest-tags 19 | uses: actions/cache@v4 20 | with: 21 | path: latest-tags.txt 22 | key: latest-tags-${{ github.ref_name }}-${{ github.run_id }} 23 | restore-keys: | 24 | latest-tags-${{ github.ref_name }} 25 | 26 | - name: Fetch latest release tags 27 | run: | 28 | echo "Fetching latest release tags..." 29 | ZILEAN_TAG=$(curl -s https://api.github.com/repos/iPromKnight/zilean/releases/latest | jq -r .tag_name) 30 | RIVEN_TAG=$(curl -s https://api.github.com/repos/rivenmedia/riven/releases/latest | jq -r .tag_name) 31 | RIVEN_FRONTEND_TAG=$(curl -s https://api.github.com/repos/rivenmedia/riven-frontend/releases/latest | jq -r .tag_name) 32 | DMB_FRONTEND_TAG=$(curl -s https://api.github.com/repos/nicocapalbo/dmbdb/releases/latest | jq -r .tag_name) 33 | CLI_DEBRID_TAG=$(curl -s https://api.github.com/repos/godver3/cli_debrid/releases/latest | jq -r .tag_name) 34 | PLEX_DEBRID_TAG=$(curl -s https://raw.githubusercontent.com/elfhosted/plex_debrid/main/ui/ui_settings.py | \ 35 | grep '^version\s*=' | \ 36 | sed -E "s/.*=\s*\[\s*'([^']+)'.*/\1/") 37 | 38 | echo "CLI_DEBRID_TAG=$CLI_DEBRID_TAG" >> $GITHUB_ENV 39 | echo "PLEX_DEBRID_TAG=$PLEX_DEBRID_TAG" >> $GITHUB_ENV 40 | echo "ZILEAN_TAG=$ZILEAN_TAG" >> $GITHUB_ENV 41 | echo "RIVEN_TAG=$RIVEN_TAG" >> $GITHUB_ENV 42 | echo "RIVEN_FRONTEND_TAG=$RIVEN_FRONTEND_TAG" >> $GITHUB_ENV 43 | echo "DMB_FRONTEND_TAG=$DMB_FRONTEND_TAG" >> $GITHUB_ENV 44 | 45 | - name: Load previous tags 46 | id: load_previous 47 | run: | 48 | if [ -f latest-tags.txt ]; then 49 | echo "Previous tags found:" 50 | cat latest-tags.txt 51 | echo "PREV_TAGS_FOUND=true" >> $GITHUB_ENV 52 | else 53 | echo "No previous tags found." 54 | echo "PREV_TAGS_FOUND=false" >> $GITHUB_ENV 55 | fi 56 | 57 | - name: Compare tags and detect changes 58 | id: compare_tags 59 | run: | 60 | if [ "$PREV_TAGS_FOUND" == "true" ]; then 61 | DIFF=$(diff latest-tags.txt <(echo -e "$ZILEAN_TAG\n$RIVEN_TAG\n$RIVEN_FRONTEND_TAG\n$DMB_FRONTEND_TAG\n$PLEX_DEBRID_TAG\n$CLI_DEBRID_TAG") || true) 62 | if [ -n "$DIFF" ]; then 63 | echo "New versions found!" 64 | echo "update_found=true" >> $GITHUB_ENV 65 | echo "update_found=true" >> $GITHUB_OUTPUT 66 | else 67 | echo "No updates found." 68 | echo "update_found=false" >> $GITHUB_ENV 69 | fi 70 | else 71 | echo "First run - proceeding with build." 72 | echo "update_found=true" >> $GITHUB_ENV 73 | echo "update_found=true" >> $GITHUB_OUTPUT 74 | fi 75 | 76 | - name: Store latest tags in cache 77 | run: echo -e "$ZILEAN_TAG\n$RIVEN_TAG\n$RIVEN_FRONTEND_TAG\n$DMB_FRONTEND_TAG\n$PLEX_DEBRID_TAG\n$CLI_DEBRID_TAG" > latest-tags.txt 78 | 79 | - name: Cleanup old caches 80 | continue-on-error: true 81 | run: | 82 | echo "Fetching list of cache keys..." 83 | cacheKeys=$(gh cache list --ref $BRANCH --limit 100 --json key,id --jq '.[] | select(.key | startswith("latest-tags-${{ github.ref_name }}")) | .id') 84 | 85 | echo "Deleting caches matching latest-tags-${{ github.ref_name }}..." 86 | for cacheKey in $cacheKeys; do 87 | gh cache delete $cacheKey || true 88 | done 89 | echo "Cleanup complete." 90 | env: 91 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 92 | GH_REPO: ${{ github.repository }} 93 | BRANCH: ${{ github.ref }} 94 | 95 | trigger-docker-ci: 96 | needs: check-latest-tags 97 | runs-on: ubuntu-latest 98 | if: ${{ needs.check-latest-tags.outputs.update_found == 'true' }} 99 | steps: 100 | - name: Trigger Docker CI if new versions found 101 | uses: benc-uk/workflow-dispatch@v1 102 | with: 103 | workflow: Docker Image CI 104 | token: ${{ secrets.GITHUB_TOKEN }} 105 | -------------------------------------------------------------------------------- /.github/workflows/release-please.yaml: -------------------------------------------------------------------------------- 1 | name: "Release Please" 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | jobs: 14 | wait-for-previous-release: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | 20 | - name: Wait for Previous Release PR to be Tagged 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | run: | 24 | echo "Checking for pending release PR..." 25 | while true; do 26 | PENDING_PR=$(gh pr list --repo ${{ github.repository }} --state closed --base master --json number,labels --jq '.[] | select(.labels | any(.name == "autorelease: pending")) | .number') 27 | 28 | if [[ -z "$PENDING_PR" ]]; then 29 | echo "No pending release PR found. Proceeding..." 30 | break 31 | fi 32 | 33 | echo "Previous release PR #$PENDING_PR is still pending. Waiting for it to be tagged..." 34 | sleep 300 # Wait 5 minutes before checking again 35 | done 36 | 37 | release-please: 38 | needs: wait-for-previous-release 39 | runs-on: ubuntu-latest 40 | steps: 41 | - uses: googleapis/release-please-action@v4 42 | id: release 43 | with: 44 | token: ${{ secrets.RELEASE_PLEASE_TOKEN }} 45 | config-file: .github/release-please-config.json 46 | manifest-file: .github/.release-please-manifest.json 47 | skip-github-release: true -------------------------------------------------------------------------------- /.github/workflows/test-discord.yml: -------------------------------------------------------------------------------- 1 | name: Test Discord Webhook 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | test-discord-webhook: 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - name: Checkout code 12 | uses: actions/checkout@v4 13 | 14 | - name: Post test announcement to Discord 15 | env: 16 | DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} 17 | VERSION: "100.100.100" 18 | run: | 19 | RELEASE_NOTES=$(awk '/^## Version \[${{ env.VERSION }}\]/ {flag=1; next} /^## Version \[/ {flag=0} flag' CHANGELOG.md) 20 | ANNOUNCEMENT_BODY="<@&1360241608649605240> 🚀 **New Release: Version [${{ env.VERSION }}]**${RELEASE_NOTES}" 21 | ESCAPED_BODY=$(echo "$ANNOUNCEMENT_BODY" | jq -Rsa .) 22 | curl -H "Content-Type: application/json" \ 23 | -d "{\"content\": $ESCAPED_BODY, \"flags\": 4}" \ 24 | $DISCORD_WEBHOOK_URL -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Mono auto generated files 17 | mono_crash.* 18 | 19 | # Build results 20 | [Dd]ebug/ 21 | [Dd]ebugPublic/ 22 | [Rr]elease/ 23 | #[Rr]eleases/ 24 | x64/ 25 | x86/ 26 | [Ww][Ii][Nn]32/ 27 | [Aa][Rr][Mm]/ 28 | [Aa][Rr][Mm]64/ 29 | bld/ 30 | [Bb]in/ 31 | [Oo]bj/ 32 | [Oo]ut/ 33 | [Ll]og/ 34 | [Ll]ogs/ 35 | 36 | # Visual Studio 2015/2017 cache/options directory 37 | .vs/ 38 | # Uncomment if you have tasks that create the project's static files in wwwroot 39 | #wwwroot/ 40 | 41 | # Visual Studio 2017 auto generated files 42 | Generated\ Files/ 43 | 44 | # MSTest test Results 45 | [Tt]est[Rr]esult*/ 46 | [Bb]uild[Ll]og.* 47 | 48 | # NUnit 49 | *.VisualState.xml 50 | TestResult.xml 51 | nunit-*.xml 52 | 53 | # Build Results of an ATL Project 54 | [Dd]ebugPS/ 55 | [Rr]eleasePS/ 56 | dlldata.c 57 | 58 | # Benchmark Results 59 | BenchmarkDotNet.Artifacts/ 60 | 61 | # .NET Core 62 | project.lock.json 63 | project.fragment.lock.json 64 | artifacts/ 65 | 66 | # ASP.NET Scaffolding 67 | ScaffoldingReadMe.txt 68 | 69 | # StyleCop 70 | StyleCopReport.xml 71 | 72 | # Files built by Visual Studio 73 | *_i.c 74 | *_p.c 75 | *_h.h 76 | *.ilk 77 | *.meta 78 | *.obj 79 | *.iobj 80 | *.pch 81 | *.pdb 82 | *.ipdb 83 | *.pgc 84 | *.pgd 85 | *.rsp 86 | *.sbr 87 | *.tlb 88 | *.tli 89 | *.tlh 90 | *.tmp 91 | *.tmp_proj 92 | *_wpftmp.csproj 93 | *.log 94 | *.vspscc 95 | *.vssscc 96 | .builds 97 | *.pidb 98 | *.svclog 99 | *.scc 100 | 101 | # Chutzpah Test files 102 | _Chutzpah* 103 | 104 | # Visual C++ cache files 105 | ipch/ 106 | *.aps 107 | *.ncb 108 | *.opendb 109 | *.opensdf 110 | *.sdf 111 | *.cachefile 112 | *.VC.db 113 | *.VC.VC.opendb 114 | 115 | # Visual Studio profiler 116 | *.psess 117 | *.vsp 118 | *.vspx 119 | *.sap 120 | 121 | # Visual Studio Trace Files 122 | *.e2e 123 | 124 | # TFS 2012 Local Workspace 125 | $tf/ 126 | 127 | # Guidance Automation Toolkit 128 | *.gpState 129 | 130 | # ReSharper is a .NET coding add-in 131 | _ReSharper*/ 132 | *.[Rr]e[Ss]harper 133 | *.DotSettings.user 134 | 135 | # TeamCity is a build add-in 136 | _TeamCity* 137 | 138 | # DotCover is a Code Coverage Tool 139 | *.dotCover 140 | 141 | # AxoCover is a Code Coverage Tool 142 | .axoCover/* 143 | !.axoCover/settings.json 144 | 145 | # Coverlet is a free, cross platform Code Coverage Tool 146 | coverage*.json 147 | coverage*.xml 148 | coverage*.info 149 | 150 | # Visual Studio code coverage results 151 | *.coverage 152 | *.coveragexml 153 | 154 | # NCrunch 155 | _NCrunch_* 156 | .*crunch*.local.xml 157 | nCrunchTemp_* 158 | 159 | # MightyMoose 160 | *.mm.* 161 | AutoTest.Net/ 162 | 163 | # Web workbench (sass) 164 | .sass-cache/ 165 | 166 | # Installshield output folder 167 | [Ee]xpress/ 168 | 169 | # DocProject is a documentation generator add-in 170 | DocProject/buildhelp/ 171 | DocProject/Help/*.HxT 172 | DocProject/Help/*.HxC 173 | DocProject/Help/*.hhc 174 | DocProject/Help/*.hhk 175 | DocProject/Help/*.hhp 176 | DocProject/Help/Html2 177 | DocProject/Help/html 178 | 179 | # Click-Once directory 180 | publish/ 181 | 182 | # Publish Web Output 183 | *.[Pp]ublish.xml 184 | *.azurePubxml 185 | # Note: Comment the next line if you want to checkin your web deploy settings, 186 | # but database connection strings (with potential passwords) will be unencrypted 187 | *.pubxml 188 | *.publishproj 189 | 190 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 191 | # checkin your Azure Web App publish settings, but sensitive information contained 192 | # in these scripts will be unencrypted 193 | PublishScripts/ 194 | 195 | # NuGet Packages 196 | *.nupkg 197 | # NuGet Symbol Packages 198 | *.snupkg 199 | # The packages folder can be ignored because of Package Restore 200 | **/[Pp]ackages/* 201 | # except build/, which is used as an MSBuild target. 202 | !**/[Pp]ackages/build/ 203 | # Uncomment if necessary however generally it will be regenerated when needed 204 | #!**/[Pp]ackages/repositories.config 205 | # NuGet v3's project.json files produces more ignorable files 206 | *.nuget.props 207 | *.nuget.targets 208 | 209 | # Microsoft Azure Build Output 210 | csx/ 211 | *.build.csdef 212 | 213 | # Microsoft Azure Emulator 214 | ecf/ 215 | rcf/ 216 | 217 | # Windows Store app package directories and files 218 | AppPackages/ 219 | BundleArtifacts/ 220 | Package.StoreAssociation.xml 221 | _pkginfo.txt 222 | *.appx 223 | *.appxbundle 224 | *.appxupload 225 | 226 | # Visual Studio cache files 227 | # files ending in .cache can be ignored 228 | *.[Cc]ache 229 | # but keep track of directories ending in .cache 230 | !?*.[Cc]ache/ 231 | 232 | # Others 233 | ClientBin/ 234 | ~$* 235 | *~ 236 | *.dbmdl 237 | *.dbproj.schemaview 238 | *.jfm 239 | *.pfx 240 | *.publishsettings 241 | orleans.codegen.cs 242 | 243 | # Including strong name files can present a security risk 244 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 245 | #*.snk 246 | 247 | # Since there are multiple workflows, uncomment next line to ignore bower_components 248 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 249 | #bower_components/ 250 | 251 | # RIA/Silverlight projects 252 | Generated_Code/ 253 | 254 | # Backup & report files from converting an old project file 255 | # to a newer Visual Studio version. Backup files are not needed, 256 | # because we have git ;-) 257 | _UpgradeReport_Files/ 258 | Backup*/ 259 | UpgradeLog*.XML 260 | UpgradeLog*.htm 261 | ServiceFabricBackup/ 262 | *.rptproj.bak 263 | 264 | # SQL Server files 265 | *.mdf 266 | *.ldf 267 | *.ndf 268 | 269 | # Business Intelligence projects 270 | *.rdl.data 271 | *.bim.layout 272 | *.bim_*.settings 273 | *.rptproj.rsuser 274 | *- [Bb]ackup.rdl 275 | *- [Bb]ackup ([0-9]).rdl 276 | *- [Bb]ackup ([0-9][0-9]).rdl 277 | 278 | # Microsoft Fakes 279 | FakesAssemblies/ 280 | 281 | # GhostDoc plugin setting file 282 | *.GhostDoc.xml 283 | 284 | # Node.js Tools for Visual Studio 285 | .ntvs_analysis.dat 286 | node_modules/ 287 | 288 | # Visual Studio 6 build log 289 | *.plg 290 | 291 | # Visual Studio 6 workspace options file 292 | *.opt 293 | 294 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 295 | *.vbw 296 | 297 | # Visual Studio LightSwitch build output 298 | **/*.HTMLClient/GeneratedArtifacts 299 | **/*.DesktopClient/GeneratedArtifacts 300 | **/*.DesktopClient/ModelManifest.xml 301 | **/*.Server/GeneratedArtifacts 302 | **/*.Server/ModelManifest.xml 303 | _Pvt_Extensions 304 | 305 | # Paket dependency manager 306 | .paket/paket.exe 307 | paket-files/ 308 | 309 | # FAKE - F# Make 310 | .fake/ 311 | 312 | # CodeRush personal settings 313 | .cr/personal 314 | 315 | # Python Tools for Visual Studio (PTVS) 316 | __pycache__/ 317 | *.pyc 318 | 319 | # Cake - Uncomment if you are using it 320 | # tools/** 321 | # !tools/packages.config 322 | 323 | # Tabs Studio 324 | *.tss 325 | 326 | # Telerik's JustMock configuration file 327 | *.jmconfig 328 | 329 | # BizTalk build output 330 | *.btp.cs 331 | *.btm.cs 332 | *.odx.cs 333 | *.xsd.cs 334 | 335 | # OpenCover UI analysis results 336 | OpenCover/ 337 | 338 | # Azure Stream Analytics local run output 339 | ASALocalRun/ 340 | 341 | # MSBuild Binary and Structured Log 342 | *.binlog 343 | 344 | # NVidia Nsight GPU debugger configuration file 345 | *.nvuser 346 | 347 | # MFractors (Xamarin productivity tool) working folder 348 | .mfractor/ 349 | 350 | # Local History for Visual Studio 351 | .localhistory/ 352 | 353 | # BeatPulse healthcheck temp database 354 | healthchecksdb 355 | 356 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 357 | MigrationBackup/ 358 | 359 | # Ionide (cross platform F# VS Code tools) working folder 360 | .ionide/ 361 | 362 | # Fody - auto-generated XML schema 363 | FodyWeavers.xsd 364 | 365 | # debug 366 | **/data/ 367 | **/log/ 368 | *.env 369 | **/config 370 | **/zurg 371 | /postgres_data* 372 | *.pyproj* 373 | *.db* 374 | *Zone* 375 | /dmb 376 | *.VS* 377 | *.vs* 378 | *.pnpm-store* 379 | settings.json -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | 21 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #################################################################################################################################################### 2 | # Stage 1: pgadmin-builder (Ubuntu 24.04) 3 | #################################################################################################################################################### 4 | FROM ubuntu:24.04 AS pgadmin-builder 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | RUN apt-get update && apt-get install -y software-properties-common && \ 7 | add-apt-repository ppa:deadsnakes/ppa -y && apt-get update && \ 8 | apt-get install -y \ 9 | python3.11 \ 10 | python3.11-venv \ 11 | python3-pip && \ 12 | rm -rf /var/lib/apt/lists/* 13 | RUN python3.11 -m venv /pgadmin/venv && \ 14 | /pgadmin/venv/bin/python -m pip install --upgrade pip && \ 15 | /pgadmin/venv/bin/python -m pip install pgadmin4 16 | 17 | #################################################################################################################################################### 18 | # Stage 2: systemstats-builder (Ubuntu 24.04) 19 | #################################################################################################################################################### 20 | FROM ubuntu:24.04 AS systemstats-builder 21 | ARG SYS_STATS_TAG 22 | ENV DEBIAN_FRONTEND=noninteractive 23 | ENV PATH="/usr/lib/postgresql16/bin:$PATH" 24 | RUN apt-get update && apt-get install -y software-properties-common wget gnupg2 lsb-release && \ 25 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \ 26 | echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \ 27 | apt-get update && \ 28 | apt-get install -y build-essential postgresql-server-dev-16 curl unzip jq && \ 29 | rm -rf /var/lib/apt/lists/* 30 | RUN find /usr -name pg_config && \ 31 | /usr/bin/pg_config --version && \ 32 | /usr/bin/pg_config --includedir && \ 33 | /usr/bin/pg_config --libdir && \ 34 | /usr/bin/pg_config --sharedir && \ 35 | curl -L https://github.com/EnterpriseDB/system_stats/archive/refs/tags/${SYS_STATS_TAG}.zip -o system_stats-latest.zip && \ 36 | unzip system_stats-latest.zip && \ 37 | mv system_stats-*/ system_stats && \ 38 | cd system_stats && export PATH="/usr/bin:$PATH" && \ 39 | make USE_PGXS=1 && make install USE_PGXS=1 && \ 40 | mkdir -p /usr/share/postgresql16/extension && \ 41 | cp system_stats.control /usr/share/postgresql16/extension/ && \ 42 | cp system_stats--*.sql /usr/share/postgresql16/extension/ && \ 43 | cd .. && rm -rf system_stats system_stats-latest.zip 44 | 45 | #################################################################################################################################################### 46 | # Stage 3: zilean-builder (Ubuntu 24.04 .NET SDK) 47 | #################################################################################################################################################### 48 | FROM ubuntu:24.04 AS zilean-builder 49 | ARG TARGETARCH 50 | ARG ZILEAN_TAG 51 | ENV DEBIAN_FRONTEND=noninteractive 52 | RUN apt-get update && apt-get install -y software-properties-common wget gnupg2 lsb-release && \ 53 | add-apt-repository ppa:deadsnakes/ppa -y && add-apt-repository ppa:dotnet/backports -y && apt-get update && \ 54 | apt-get install -y python3.11 python3.11-venv python3.11-dev curl jq unzip dotnet-sdk-9.0 && \ 55 | rm -rf /var/lib/apt/lists/* 56 | RUN curl -L https://github.com/iPromKnight/zilean/archive/refs/tags/${ZILEAN_TAG}.zip -o zilean-latest.zip && \ 57 | unzip zilean-latest.zip && \ 58 | mv zilean-*/ /zilean && \ 59 | echo $ZILEAN_TAG > /zilean/version.txt && \ 60 | cd /zilean && \ 61 | dotnet restore -a $TARGETARCH && \ 62 | cd /zilean/src/Zilean.ApiService && \ 63 | dotnet publish -c Release --no-restore -a $TARGETARCH -o /zilean/app/ && \ 64 | cd /zilean/src/Zilean.Scraper && \ 65 | dotnet publish -c Release --no-restore -a $TARGETARCH -o /zilean/app/ && \ 66 | cd /zilean && \ 67 | python3.11 -m venv /zilean/venv && \ 68 | . /zilean/venv/bin/activate && \ 69 | pip install -r /zilean/requirements.txt 70 | 71 | #################################################################################################################################################### 72 | # Stage 4: riven-frontend-builder (Ubuntu 24.04 with Node.js) 73 | #################################################################################################################################################### 74 | FROM ubuntu:24.04 AS riven-frontend-builder 75 | ARG RIVEN_FRONTEND_TAG 76 | ENV DEBIAN_FRONTEND=noninteractive 77 | RUN apt-get update && apt-get install -y curl unzip gnupg2 lsb-release && \ 78 | curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \ 79 | apt-get install -y nodejs && \ 80 | node -v && npm install -g npm@10 && npm -v && \ 81 | npm install -g pnpm@latest-10 && pnpm -v && \ 82 | rm -rf /var/lib/apt/lists/* 83 | RUN curl -L https://github.com/rivenmedia/riven-frontend/archive/refs/tags/${RIVEN_FRONTEND_TAG}.zip -o riven-frontend.zip && \ 84 | unzip riven-frontend.zip && \ 85 | mkdir -p /riven/frontend && \ 86 | mv riven-frontend-*/* /riven/frontend && rm riven-frontend.zip && \ 87 | cd /riven/frontend && \ 88 | sed -i '/export default defineConfig({/a\ build: {\n minify: false\n },' vite.config.ts && \ 89 | sed -i "s#/riven/version.txt#/riven/frontend/version.txt#g" src/routes/settings/about/+page.server.ts && \ 90 | sed -i "s/export const prerender = true;/export const prerender = false;/g" src/routes/settings/about/+page.server.ts && \ 91 | echo "store-dir=./.pnpm-store" > /riven/frontend/.npmrc && \ 92 | pnpm install && \ 93 | pnpm run build && \ 94 | pnpm prune --prod 95 | 96 | #################################################################################################################################################### 97 | # Stage 5: riven-backend-builder (Ubuntu 24.04 with Python 3.11) 98 | #################################################################################################################################################### 99 | FROM ubuntu:24.04 AS riven-backend-builder 100 | ARG RIVEN_TAG 101 | ENV DEBIAN_FRONTEND=noninteractive 102 | RUN apt-get update && apt-get install -y software-properties-common wget gnupg2 lsb-release unzip && \ 103 | add-apt-repository ppa:deadsnakes/ppa -y && apt-get update && \ 104 | apt-get install -y python3.11 python3.11-venv python3.11-dev \ 105 | curl gcc build-essential libxml2-utils linux-headers-generic && \ 106 | rm -rf /var/lib/apt/lists/* 107 | RUN curl -L https://github.com/rivenmedia/riven/archive/refs/tags/${RIVEN_TAG}.zip -o riven.zip && \ 108 | unzip riven.zip && \ 109 | mkdir -p /riven/backend && \ 110 | mv riven-*/* /riven/backend && rm riven.zip && \ 111 | cd /riven/backend && \ 112 | python3.11 -m venv /riven/backend/venv && \ 113 | . /riven/backend/venv/bin/activate && \ 114 | pip install --upgrade pip && \ 115 | pip install poetry && \ 116 | poetry config virtualenvs.create false && \ 117 | poetry install --no-root --without dev 118 | 119 | #################################################################################################################################################### 120 | # Stage 6: dmb-frontend-builder (Ubuntu 24.04 with Node.js) 121 | #################################################################################################################################################### 122 | FROM ubuntu:24.04 AS dmb-frontend-builder 123 | ARG DMB_FRONTEND_TAG 124 | ENV DEBIAN_FRONTEND=noninteractive 125 | RUN apt-get update && apt-get install -y curl unzip build-essential gnupg2 lsb-release && \ 126 | curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \ 127 | apt-get install -y nodejs && \ 128 | node -v && npm install -g npm@10 && npm -v && \ 129 | npm install -g pnpm@latest-10 && pnpm -v && \ 130 | rm -rf /var/lib/apt/lists/* 131 | RUN curl -L https://github.com/nicocapalbo/dmbdb/archive/refs/tags/${DMB_FRONTEND_TAG}.zip -o dmb-frontend.zip && \ 132 | unzip dmb-frontend.zip && \ 133 | mkdir -p dmb/frontend && \ 134 | mv dmbdb*/* /dmb/frontend && rm dmb-frontend.zip && \ 135 | cd dmb/frontend && \ 136 | echo "store-dir=./.pnpm-store" > /dmb/frontend/.npmrc && \ 137 | pnpm install --reporter=verbose && \ 138 | pnpm run build --log-level verbose 139 | 140 | #################################################################################################################################################### 141 | # Stage 7: plex_debrid-builder (Ubuntu 24.04 with Python 3.11) 142 | #################################################################################################################################################### 143 | FROM ubuntu:24.04 AS plex_debrid-builder 144 | ARG PLEX_DEBRID_TAG 145 | ENV DEBIAN_FRONTEND=noninteractive 146 | RUN apt-get update && apt-get install -y software-properties-common curl unzip && \ 147 | add-apt-repository ppa:deadsnakes/ppa -y && apt-get update && \ 148 | apt-get install -y \ 149 | python3.11 \ 150 | python3.11-venv \ 151 | python3-pip && \ 152 | rm -rf /var/lib/apt/lists/* 153 | RUN curl -L https://github.com/elfhosted/plex_debrid/archive/refs/heads/main.zip -o plex_debrid.zip && \ 154 | unzip plex_debrid.zip && \ 155 | mkdir -p /plex_debrid && \ 156 | mv plex_debrid-main/* /plex_debrid && \ 157 | rm -rf plex_debrid.zip plex_debrid-main 158 | ADD https://raw.githubusercontent.com/I-am-PUID-0/pd_zurg/master/plex_debrid_/settings-default.json /plex_debrid/settings-default.json 159 | RUN python3.11 -m venv /plex_debrid/venv && \ 160 | /plex_debrid/venv/bin/python -m pip install --upgrade pip && \ 161 | /plex_debrid/venv/bin/python -m pip install -r /plex_debrid/requirements.txt 162 | 163 | 164 | #################################################################################################################################################### 165 | # Stage 8: cli_debrid-builder (Ubuntu 24.04 with Python 3.11) 166 | #################################################################################################################################################### 167 | FROM ubuntu:24.04 AS cli_debrid-builder 168 | ARG CLI_DEBRID_TAG 169 | ENV DEBIAN_FRONTEND=noninteractive 170 | RUN apt-get update && apt-get install -y software-properties-common curl unzip && \ 171 | add-apt-repository ppa:deadsnakes/ppa -y && apt-get update && \ 172 | apt-get install -y \ 173 | python3.11 \ 174 | python3.11-venv \ 175 | python3-pip && \ 176 | rm -rf /var/lib/apt/lists/* 177 | RUN curl -L https://github.com/godver3/cli_debrid/archive/refs/tags/${CLI_DEBRID_TAG}.zip -o cli_debrid.zip && \ 178 | unzip cli_debrid.zip && \ 179 | mkdir -p /cli_debrid && \ 180 | mv cli_debrid-*/* /cli_debrid && \ 181 | rm -rf cli_debrid.zip cli_debrid-*/* 182 | RUN python3.11 -m venv /cli_debrid/venv && \ 183 | /cli_debrid/venv/bin/python -m pip install --upgrade pip && \ 184 | /cli_debrid/venv/bin/python -m pip install -r /cli_debrid/requirements-linux.txt 185 | 186 | 187 | #################################################################################################################################################### 188 | # Stage 9: requirements-builder (Ubuntu 24.04 with Python 3.11) 189 | #################################################################################################################################################### 190 | FROM ubuntu:24.04 AS requirements-builder 191 | ENV DEBIAN_FRONTEND=noninteractive 192 | COPY pyproject.toml poetry.lock ./ 193 | RUN apt-get update && apt-get install -y software-properties-common wget gnupg2 lsb-release && \ 194 | add-apt-repository ppa:deadsnakes/ppa -y && apt-get update && \ 195 | apt-get install -y python3.11 python3.11-venv python3.11-dev curl gcc build-essential libxml2-utils linux-headers-generic libpq-dev pkg-config && \ 196 | rm -rf /var/lib/apt/lists/* && \ 197 | python3.11 -m venv /venv && \ 198 | . /venv/bin/activate && \ 199 | pip install --upgrade pip && \ 200 | pip install poetry && \ 201 | poetry config virtualenvs.create false && \ 202 | poetry install --no-root 203 | 204 | #################################################################################################################################################### 205 | # Stage 10: final-stage (Ubuntu 24.04 with Python 3.11, .NET SDK, PostgreSQL, pgAdmin4, Node.js, Rclone, Zilean, SystemStats, Riven, Plex Debrid, & DMB) 206 | #################################################################################################################################################### 207 | FROM ubuntu:24.04 AS final-stage 208 | ARG TARGETARCH 209 | ENV DEBIAN_FRONTEND=noninteractive 210 | ENV PATH="/usr/lib/postgresql/16/bin:$PATH" 211 | LABEL name="DMB" \ 212 | description="Debrid Media Bridge" \ 213 | url="https://github.com/I-am-PUID-0/DMB" \ 214 | maintainer="I-am-PUID-0" 215 | 216 | RUN apt-get update && \ 217 | apt-get install -y software-properties-common && \ 218 | add-apt-repository ppa:dotnet/backports -y && \ 219 | add-apt-repository ppa:deadsnakes/ppa -y && \ 220 | apt-get update && \ 221 | apt-get install -y \ 222 | curl tzdata nano ca-certificates wget fuse3 \ 223 | build-essential linux-headers-generic libpython3.11 python3.11 python3.11-venv python3-pip python3-dev \ 224 | libxml2-utils git htop pkg-config libffi-dev libboost-filesystem-dev libboost-thread-dev \ 225 | ffmpeg jq openssl bash unzip gnupg2 lsb-release dotnet-sdk-9.0 locales && \ 226 | locale-gen en_US.UTF-8 && \ 227 | update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 && \ 228 | update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \ 229 | ln -sf /usr/lib/$(uname -m)-linux-gnu/libpython3.11.so.1 /usr/local/lib/libpython3.11.so.1 && \ 230 | ln -sf /usr/lib/$(uname -m)-linux-gnu/libpython3.11.so.1.0 /usr/local/lib/libpython3.11.so.1.0 && \ 231 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \ 232 | echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \ 233 | apt-get update && \ 234 | apt-get install -y \ 235 | postgresql-client-16 postgresql-16 postgresql-contrib-16 pgagent && \ 236 | rm -rf /var/lib/apt/lists/* && \ 237 | curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \ 238 | apt-get install -y nodejs && \ 239 | node -v && npm install -g npm@10 && npm -v && \ 240 | npm install -g pnpm@latest-10 && pnpm -v 241 | 242 | WORKDIR / 243 | 244 | RUN echo "export PATH=/usr/lib/postgresql/16/bin:$PATH" >> /etc/profile.d/postgresql.sh 245 | RUN echo "export PATH=/usr/lib/postgresql/16/bin:$PATH" >> /root/.bashrc 246 | 247 | COPY --from=requirements-builder /venv /venv 248 | COPY --from=pgadmin-builder /pgadmin/venv /pgadmin/venv 249 | COPY --from=systemstats-builder /usr/share/postgresql/16/extension/system_stats* /usr/share/postgresql/16/extension/ 250 | COPY --from=systemstats-builder /usr/lib/postgresql/16/lib/system_stats.so /usr/lib/postgresql/16/lib/ 251 | COPY --from=zilean-builder /zilean /zilean 252 | COPY --from=riven-frontend-builder /riven/frontend /riven/frontend 253 | COPY --from=riven-backend-builder /riven/backend /riven/backend 254 | COPY --from=dmb-frontend-builder /dmb/frontend /dmb/frontend 255 | COPY --from=plex_debrid-builder /plex_debrid /plex_debrid 256 | COPY --from=cli_debrid-builder /cli_debrid /cli_debrid 257 | COPY --from=rclone/rclone:latest /usr/local/bin/rclone /usr/local/bin/rclone 258 | ADD https://raw.githubusercontent.com/debridmediamanager/zurg-testing/main/config.yml /zurg/ 259 | ADD https://raw.githubusercontent.com/debridmediamanager/zurg-testing/main/scripts/plex_update.sh /zurg/ 260 | RUN sed -i 's/^on_library_update: sh plex_update.sh.*$/# &/' /zurg/config.yml 261 | 262 | COPY . /./ 263 | 264 | ENV XDG_CONFIG_HOME=/config \ 265 | TERM=xterm 266 | 267 | HEALTHCHECK --interval=60s --timeout=10s \ 268 | CMD ["/bin/bash", "-c", ". /venv/bin/activate && python /healthcheck.py"] 269 | 270 | ENTRYPOINT ["/bin/bash", "-c", ". /venv/bin/activate && python /main.py"] 271 | -------------------------------------------------------------------------------- /api/api_service.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from fastapi.middleware.cors import CORSMiddleware 3 | from fastapi.routing import APIRoute, APIWebSocketRoute 4 | from scalar_fastapi import get_scalar_api_reference 5 | from fastapi.responses import HTMLResponse 6 | from contextlib import asynccontextmanager 7 | from uvicorn.config import Config 8 | from uvicorn.server import Server 9 | from utils.dependencies import ( 10 | get_api_state, 11 | get_process_handler, 12 | get_logger, 13 | get_websocket_manager, 14 | ) 15 | from api.routers.process import process_router 16 | from api.routers.config import config_router 17 | from api.routers.health import health_router 18 | from api.routers.logs import logs_router 19 | from api.routers.websocket_logs import websocket_router 20 | from utils.config_loader import CONFIG_MANAGER 21 | import threading, tomllib 22 | 23 | 24 | @asynccontextmanager 25 | async def lifespan(app: FastAPI): 26 | yield 27 | websocket_manager = get_websocket_manager() 28 | logger = get_logger() 29 | logger.info("Shutting down WebSocket manager...") 30 | await websocket_manager.shutdown() 31 | logger.info("WebSocket manager shutdown complete.") 32 | 33 | 34 | def get_version_from_pyproject(path="pyproject.toml") -> str: 35 | try: 36 | with open(path, "rb") as f: 37 | data = tomllib.load(f) 38 | return data["project"]["version"] 39 | except Exception: 40 | return "0.0.0" 41 | 42 | 43 | def create_app() -> FastAPI: 44 | app = FastAPI( 45 | title="Debrid Media Bridge", 46 | version=get_version_from_pyproject(), 47 | redoc_url=None, 48 | lifespan=lifespan, 49 | ) 50 | 51 | app.dependency_overrides[get_process_handler] = get_process_handler 52 | app.dependency_overrides[get_logger] = get_logger 53 | app.dependency_overrides[get_api_state] = get_api_state 54 | app.dependency_overrides[get_websocket_manager] = get_websocket_manager 55 | 56 | app.include_router(process_router, prefix="/process", tags=["Process Management"]) 57 | app.include_router(config_router, prefix="/config", tags=["Configuration"]) 58 | app.include_router(health_router, prefix="/health", tags=["Health"]) 59 | app.include_router(logs_router, prefix="/logs", tags=["Logs"]) 60 | app.include_router(websocket_router, prefix="/ws", tags=["WebSocket Logs"]) 61 | 62 | @app.get("/scalar", include_in_schema=False) 63 | async def scalar_docs(): 64 | return get_scalar_api_reference( 65 | openapi_url=app.openapi_url, 66 | title=app.title, 67 | ) 68 | 69 | logger = get_logger() 70 | for route in app.routes: 71 | if isinstance(route, APIRoute): 72 | logger.debug(f"Route: {route.path} | Methods: {route.methods}") 73 | elif isinstance(route, APIWebSocketRoute): 74 | logger.debug(f"WebSocket Route: {route.path}") 75 | 76 | origin_from_config = ( 77 | CONFIG_MANAGER.config.get("dmb", {}).get("frontend", {}).get("origins", None) 78 | ) 79 | origins = ( 80 | [origin_from_config] 81 | if origin_from_config 82 | else ["http://localhost", "http://localhost:8000"] 83 | ) 84 | logger.info(f"Allowed CORS origins set to: {origins}") 85 | 86 | app.add_middleware( 87 | CORSMiddleware, 88 | allow_origins=["*"], 89 | allow_credentials=True, 90 | allow_methods=["*"], 91 | allow_headers=["*"], 92 | ) 93 | 94 | return app 95 | 96 | 97 | def start_fastapi_process(): 98 | app = create_app() 99 | 100 | host = ( 101 | CONFIG_MANAGER.config.get("dmb", {}) 102 | .get("api_service", {}) 103 | .get("host", "0.0.0.0") 104 | ) 105 | port = CONFIG_MANAGER.config.get("dmb", {}).get("api_service", {}).get("port", 8000) 106 | log_level = ( 107 | CONFIG_MANAGER.config.get("dmb", {}) 108 | .get("api_service", {}) 109 | .get("log_level", "info") 110 | .lower() 111 | ) 112 | logger = get_logger() 113 | 114 | def run_server(): 115 | config = Config( 116 | app=app, 117 | host=host, 118 | port=port, 119 | log_config=None, 120 | log_level=log_level, 121 | ) 122 | server = Server(config) 123 | server.run() 124 | 125 | uvicorn_thread = threading.Thread(target=run_server, daemon=True) 126 | uvicorn_thread.start() 127 | logger.info(f"Started FastAPI server at {host}:{port}") 128 | -------------------------------------------------------------------------------- /api/api_state.py: -------------------------------------------------------------------------------- 1 | import os 2 | from json import load 3 | 4 | 5 | class APIState: 6 | def __init__(self, process_handler, logger): 7 | self.logger = logger 8 | self.process_handler = process_handler 9 | self.status_file_path = "/healthcheck/running_processes.json" 10 | os.makedirs(os.path.dirname(self.status_file_path), exist_ok=True) 11 | self.service_status = self._load_status_from_file() 12 | self.shutdown_in_progress = set() 13 | 14 | def _load_status_from_file(self): 15 | try: 16 | with open(self.status_file_path, "r") as f: 17 | data = load(f) 18 | return data 19 | except FileNotFoundError: 20 | self.logger.debug( 21 | f"Status file {self.status_file_path} not found. Initializing empty status." 22 | ) 23 | return {} 24 | except Exception as e: 25 | self.logger.error(f"Error loading status file: {e}") 26 | return {} 27 | 28 | def get_status(self, process_name): 29 | running_processes = self._load_status_from_file() 30 | 31 | def normalize(name): 32 | return name.replace(" ", "").replace("/ ", "/").strip().lower() 33 | 34 | normalized_input = normalize(process_name) 35 | if normalized_input == "dmbapi": 36 | return "running" 37 | for stored_name in running_processes: 38 | normalized_stored_name = normalize(stored_name) 39 | if normalized_input == normalized_stored_name: 40 | return "running" 41 | return "stopped" 42 | 43 | def debug_state(self): 44 | self.logger.info(f"Current APIState: {self.service_status}") 45 | -------------------------------------------------------------------------------- /api/connection_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import List 3 | from fastapi import WebSocket 4 | 5 | 6 | class ConnectionManager: 7 | def __init__(self): 8 | self.active_connections: List[WebSocket] = [] 9 | self.lock = asyncio.Lock() 10 | 11 | async def connect(self, websocket: WebSocket): 12 | await websocket.accept() 13 | if websocket not in self.active_connections: 14 | self.active_connections.append(websocket) 15 | 16 | async def disconnect(self, websocket: WebSocket): 17 | async with self.lock: 18 | if websocket in self.active_connections: 19 | self.active_connections.remove(websocket) 20 | 21 | async def broadcast(self, message: str): 22 | tasks = [ 23 | asyncio.create_task(connection.send_text(message)) 24 | for connection in self.active_connections 25 | ] 26 | await asyncio.gather(*tasks, return_exceptions=True) 27 | 28 | async def shutdown(self): 29 | self.active_connections.clear() 30 | -------------------------------------------------------------------------------- /api/routers/health.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException 2 | import subprocess 3 | 4 | health_router = APIRouter() 5 | 6 | 7 | @health_router.get("") 8 | async def health_check(): 9 | try: 10 | result = subprocess.run( 11 | ["bash", "-c", "source /venv/bin/activate && python /healthcheck.py"], 12 | capture_output=True, 13 | text=True, 14 | ) 15 | if result.returncode != 0: 16 | return {"status": "unhealthy", "details": result.stderr.strip()} 17 | return {"status": "healthy"} 18 | except Exception as e: 19 | raise HTTPException(status_code=500, detail=f"Failed to run health check: {e}") 20 | -------------------------------------------------------------------------------- /api/routers/logs.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Depends, Query 2 | from pathlib import Path 3 | from utils.dependencies import get_logger 4 | from utils.config_loader import CONFIG_MANAGER 5 | import os, re, asyncio 6 | 7 | logs_router = APIRouter() 8 | 9 | 10 | def find_log_file(process_name: str, logger): 11 | logger.debug(f"Looking up process: {process_name}") 12 | 13 | if "dmb" in process_name.lower(): 14 | log_dir = Path("/log") 15 | if log_dir.exists(): 16 | log_files = sorted( 17 | log_dir.glob("DMB-*.log"), key=os.path.getmtime, reverse=True 18 | ) 19 | return log_files[0] if log_files else None 20 | 21 | key, instance_name = CONFIG_MANAGER.find_key_for_process(process_name) 22 | logger.debug(f"Found key: {key}, instance: {instance_name}") 23 | if not key: 24 | logger.debug(f"No log file found for {process_name}") 25 | return None 26 | 27 | service_config = CONFIG_MANAGER.get_instance(instance_name, key) 28 | if not service_config: 29 | logger.debug(f"No service config found for {process_name}") 30 | return None 31 | 32 | if "log_file" in service_config: 33 | return Path(service_config["log_file"]) 34 | 35 | if "config_file" in service_config: 36 | log_dir = Path(service_config["config_file"]).parent / "logs" 37 | if log_dir.exists(): 38 | log_files = sorted( 39 | log_dir.glob("*.log"), key=os.path.getmtime, reverse=True 40 | ) 41 | return log_files[0] if log_files else None 42 | 43 | if "config_dir" in service_config: 44 | log_dir = Path(service_config["config_dir"]) / "logs" 45 | if log_dir.exists(): 46 | log_files = sorted( 47 | log_dir.glob("*.log"), key=os.path.getmtime, reverse=True 48 | ) 49 | return log_files[0] if log_files else None 50 | 51 | if "zurg" in process_name.lower() and "config_dir" in service_config: 52 | log_path = Path(service_config["config_dir"]) / "logs" / "zurg.log" 53 | if log_path.exists(): 54 | return log_path 55 | 56 | logger.debug(f"No log file found for {process_name}") 57 | return None 58 | 59 | 60 | def filter_dmb_log(log_path, logger): 61 | logger.debug(f"Filtering DMB log for latest startup from {log_path}") 62 | try: 63 | with open(log_path, "r") as log_file: 64 | lines = log_file.readlines() 65 | 66 | for i in range(len(lines) - 1, -1, -1): 67 | if i + 2 < len(lines): 68 | try: 69 | if re.match(r"^.* - INFO - ", lines[i]) and re.match( 70 | r"^\s*DDDDDDDDDDDDD", lines[i + 2] 71 | ): 72 | logger.debug(f"Found latest DMB startup banner at line {i}") 73 | return "".join(lines[i:]) 74 | except Exception as e: 75 | logger.warning(f"Error matching log lines at index {i}: {e}") 76 | 77 | logger.warning("No DMB startup banner found; returning full log") 78 | return "".join(lines) 79 | 80 | except Exception as e: 81 | logger.error(f"Error filtering DMB log file: {e}") 82 | return "" 83 | 84 | 85 | def _read_log_for_process(process_name: str, logger): 86 | log_path = find_log_file(process_name, logger) 87 | logger.debug(f"Resolved log path: {log_path}") 88 | if not log_path or not log_path.exists(): 89 | return "" 90 | 91 | try: 92 | if "dmb" in process_name.lower(): 93 | return filter_dmb_log(log_path, logger) 94 | else: 95 | with open(log_path, "r") as log_file: 96 | return log_file.read() 97 | except Exception as e: 98 | logger.error(f"Error reading log file for {process_name}: {e}") 99 | return "" 100 | 101 | 102 | @logs_router.get("") 103 | async def get_log_file( 104 | process_name: str = Query(..., description="The process name"), 105 | logger=Depends(get_logger), 106 | ): 107 | loop = asyncio.get_running_loop() 108 | log_content = await loop.run_in_executor( 109 | None, lambda: _read_log_for_process(process_name, logger) 110 | ) 111 | 112 | return { 113 | "process_name": process_name, 114 | "log": log_content, 115 | } 116 | -------------------------------------------------------------------------------- /api/routers/process.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException, Depends, Query 2 | from pydantic import BaseModel 3 | from utils.dependencies import get_process_handler, get_logger, get_api_state 4 | from utils.config_loader import CONFIG_MANAGER, find_service_config 5 | from utils.setup import setup_project 6 | from utils.versions import Versions 7 | 8 | 9 | class ServiceRequest(BaseModel): 10 | process_name: str 11 | 12 | 13 | process_router = APIRouter() 14 | versions = Versions() 15 | 16 | STATIC_URLS_BY_KEY = { 17 | "rclone": "https://rclone.org", 18 | "pgadmin": "https://www.pgadmin.org/", 19 | "postgres": "https://www.postgresql.org/", 20 | "dmb_api_service": "https://github.com/I-am-PUID-0/DMB", 21 | "cli_battery": "https://github.com/godver3/cli_debrid/tree/main/cli_battery", 22 | } 23 | 24 | 25 | @process_router.get("/") 26 | async def fetch_process(process_name: str = Query(...), logger=Depends(get_logger)): 27 | try: 28 | if not process_name: 29 | raise HTTPException(status_code=400, detail="process_name is required") 30 | 31 | config = find_service_config(CONFIG_MANAGER.config, process_name) 32 | if not config: 33 | raise HTTPException(status_code=404, detail="Process not found") 34 | 35 | config_key, instance_name = CONFIG_MANAGER.find_key_for_process(process_name) 36 | version, _ = versions.version_check( 37 | process_name=config.get("process_name"), 38 | instance_name=instance_name, 39 | key=config_key, 40 | ) 41 | 42 | return { 43 | "process_name": process_name, 44 | "config": config, 45 | "version": version, 46 | "config_key": config_key, 47 | } 48 | except Exception as e: 49 | logger.error(f"Failed to load process: {e}") 50 | raise HTTPException(status_code=500, detail="Failed to load process") 51 | 52 | 53 | @process_router.get("/processes") 54 | async def fetch_processes(): 55 | logger = (Depends(get_logger),) 56 | try: 57 | processes = [] 58 | config = CONFIG_MANAGER.config 59 | 60 | def find_processes(data, parent_key=""): 61 | if isinstance(data, dict): 62 | for key, value in data.items(): 63 | if isinstance(value, dict) and "process_name" in value: 64 | process_name = value.get("process_name") 65 | enabled = value.get("enabled", False) 66 | display_name = f"{parent_key} {key}".strip() 67 | config_key, instance_name = CONFIG_MANAGER.find_key_for_process( 68 | process_name 69 | ) 70 | version, _ = versions.version_check( 71 | process_name=value.get("process_name"), 72 | instance_name=instance_name, 73 | key=config_key, 74 | ) 75 | repo_owner = value.get("repo_owner") 76 | repo_name = value.get("repo_name") 77 | if repo_owner and repo_name: 78 | repo_url = f"https://github.com/{repo_owner}/{repo_name}" 79 | else: 80 | repo_url = STATIC_URLS_BY_KEY.get(config_key) 81 | processes.append( 82 | { 83 | "name": display_name, 84 | "process_name": process_name, 85 | "enabled": enabled, 86 | "config": value, 87 | "version": version, 88 | "key": key, 89 | "config_key": config_key, 90 | "repo_url": repo_url, 91 | } 92 | ) 93 | elif isinstance(value, dict): 94 | find_processes(value, parent_key=f"{parent_key} {key}".strip()) 95 | 96 | find_processes(config) 97 | return {"processes": processes} 98 | except Exception as e: 99 | logger.error(f"Failed to load processes: {e}") 100 | raise HTTPException(status_code=500, detail="Failed to load processes") 101 | 102 | 103 | @process_router.post("/start-service") 104 | async def start_service( 105 | request: ServiceRequest, 106 | process_handler=Depends(get_process_handler), 107 | logger=Depends(get_logger), 108 | ): 109 | process_name = request.process_name 110 | service_config = find_service_config(CONFIG_MANAGER.config, process_name) 111 | 112 | if not service_config: 113 | raise HTTPException(status_code=404, detail="Service not enabled or found") 114 | 115 | if process_name in process_handler.setup_tracker: 116 | process_handler.setup_tracker.remove(process_name) 117 | success, error = setup_project(process_handler, process_name) 118 | if not success: 119 | raise HTTPException( 120 | status_code=500, detail=f"Failed to setup project: {error}" 121 | ) 122 | 123 | service_config["enabled"] = True 124 | command = service_config.get("command") 125 | if any("{" in c for c in command): 126 | success, error = setup_project(process_handler, process_name) 127 | if not success: 128 | raise HTTPException( 129 | status_code=500, detail=f"Failed to setup project: {error}" 130 | ) 131 | command = service_config.get("command") 132 | 133 | env = service_config.get("env") 134 | if env is not None: 135 | logger.debug(f"Checking for variables in service config. {env}") 136 | if any("{" in c for c in env): 137 | success, error = setup_project(process_handler, process_name) 138 | if not success: 139 | raise HTTPException( 140 | status_code=500, detail=f"Failed to setup project: {error}" 141 | ) 142 | env = service_config.get("env") 143 | 144 | config_dir = service_config.get("config_dir") 145 | suppress_logging = service_config.get("suppress_logging", False) 146 | logger.info(f"Starting {process_name} with command: {command}") 147 | 148 | try: 149 | process, error = process_handler.start_process( 150 | process_name=process_name, 151 | config_dir=config_dir, 152 | command=command, 153 | instance_name=None, 154 | suppress_logging=suppress_logging, 155 | env=env, 156 | ) 157 | if not process: 158 | raise Exception(f"Error starting {process_name}: {error}") 159 | elif process: 160 | logger.info(f"{process_name} started successfully.") 161 | return { 162 | "status": "Service started successfully", 163 | "process_name": process_name, 164 | } 165 | except Exception as e: 166 | detailed_error = f"Service '{process_name}' could not be started due to an internal error: {str(e)}" 167 | logger.error(detailed_error) 168 | raise HTTPException( 169 | status_code=500, 170 | detail=f"Unable to start the service '{process_name}'. Please check the logs for more details.", 171 | ) 172 | 173 | 174 | @process_router.post("/stop-service") 175 | async def stop_service( 176 | request: ServiceRequest, 177 | process_handler=Depends(get_process_handler), 178 | logger=Depends(get_logger), 179 | api_state=Depends(get_api_state), 180 | ): 181 | process_name = request.process_name 182 | logger.info(f"Received request to stop {process_name}") 183 | 184 | # Check if the service exists and is enabled 185 | # service_config = CONFIG_MANAGER.config.get(process_name) 186 | # logger.debug(f"Service config: {service_config}") 187 | # if not service_config or not service_config.get("enabled", False): 188 | # raise HTTPException(status_code=404, detail="Service not enabled or found") 189 | 190 | if process_name in api_state.shutdown_in_progress: 191 | return { 192 | "status": "Shutdown already in progress", 193 | "process_name": process_name, 194 | } 195 | 196 | try: 197 | api_state.shutdown_in_progress.add(process_name) 198 | logger.debug(f"Shutdown in progress: {api_state.shutdown_in_progress}") 199 | process_handler.stop_process(process_name) 200 | logger.info(f"{process_name} stopped successfully.") 201 | return { 202 | "status": "Service stopped successfully", 203 | "process_name": process_name, 204 | } 205 | except Exception as e: 206 | logger.error(f"Failed to stop {process_name}: {e}") 207 | raise HTTPException( 208 | status_code=500, detail=f"Failed to stop {process_name}: {str(e)}" 209 | ) 210 | finally: 211 | api_state.shutdown_in_progress.remove(process_name) 212 | 213 | 214 | @process_router.post("/restart-service") 215 | async def restart_service( 216 | request: ServiceRequest, 217 | process_handler=Depends(get_process_handler), 218 | logger=Depends(get_logger), 219 | api_state=Depends(get_api_state), 220 | ): 221 | process_name = request.process_name 222 | logger.info(f"Received request to restart {process_name}") 223 | 224 | try: 225 | process_handler.stop_process(process_name) 226 | logger.info(f"{process_name} stopped successfully.") 227 | 228 | service_config = find_service_config(CONFIG_MANAGER.config, process_name) 229 | if not service_config: 230 | raise HTTPException( 231 | status_code=404, detail="Service configuration not found." 232 | ) 233 | 234 | if process_name in process_handler.setup_tracker: 235 | process_handler.setup_tracker.remove(process_name) 236 | success, error = setup_project(process_handler, process_name) 237 | if not success: 238 | raise HTTPException( 239 | status_code=500, detail=f"Failed to setup project: {error}" 240 | ) 241 | 242 | process_handler.start_process( 243 | process_name=process_name, 244 | config_dir=service_config.get("config_dir"), 245 | command=service_config.get("command"), 246 | suppress_logging=service_config.get("suppress_logging", False), 247 | env=service_config.get("env"), 248 | ) 249 | logger.info(f"{process_name} started successfully.") 250 | 251 | status = api_state.get_status(process_name) 252 | if status != "running": 253 | raise HTTPException( 254 | status_code=500, 255 | detail=f"Service did not restart successfully. Current status: {status}", 256 | ) 257 | 258 | return { 259 | "status": "Service restarted successfully", 260 | "process_name": process_name, 261 | } 262 | except Exception as e: 263 | logger.error(f"Failed to restart {process_name}: {e}") 264 | raise HTTPException( 265 | status_code=500, detail=f"Failed to restart {process_name}: {str(e)}" 266 | ) 267 | 268 | 269 | @process_router.get("/service-status") 270 | async def service_status( 271 | process_name: str = Query(..., description="The name of the process to check"), 272 | api_state=Depends(get_api_state), 273 | ): 274 | status = api_state.get_status(process_name) 275 | return {"process_name": process_name, "status": status} 276 | -------------------------------------------------------------------------------- /api/routers/websocket_logs.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, WebSocket, WebSocketDisconnect, Depends 2 | from utils.dependencies import get_websocket_manager 3 | 4 | websocket_router = APIRouter() 5 | 6 | 7 | @websocket_router.websocket("/logs") 8 | async def websocket_logs( 9 | websocket: WebSocket, websocket_manager=Depends(get_websocket_manager) 10 | ): 11 | await websocket_manager.connect(websocket) 12 | try: 13 | while True: 14 | await websocket.receive_text() 15 | except WebSocketDisconnect: 16 | await websocket_manager.disconnect(websocket) 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | DMB: 3 | container_name: DMB 4 | image: iampuid0/dmb:latest ## Optionally, specify a specific version of DMB w/ image: iampuid0/dmb:2.0.0 5 | stop_grace_period: 30s ## Adjust as need to allow for graceful shutdown of the container 6 | shm_size: 128mb ## Increased for PostgreSQL 7 | stdin_open: true ## docker run -i 8 | tty: true ## docker run -t 9 | volumes: 10 | - /home/username/docker/DMB/config:/config ## Location of configuration files. If a Zurg config.yml and/or Zurg app is placed here, it will be used to override the default configuration and/or app used at startup. 11 | - /home/username/docker/DMB/log:/log ## Location for logs 12 | - /home/username/docker/DMB/Zurg/RD:/zurg/RD ## Location for Zurg RealDebrid active configuration 13 | - /home/username/docker/DMB/Zurg/mnt:/data:shared ## Location for rclone mount to host 14 | - /home/username/docker/DMB/Riven/data:/riven/backend/data ## Location for Riven backend data 15 | - /home/username/docker/DMB/Riven/mnt:/mnt ## Location for Riven symlinks 16 | - /home/username/docker/DMB/PostgreSQL/data:/postgres_data ## Location for PostgreSQL database 17 | - /home/username/docker/DMB/pgAdmin4/data:/pgadmin/data ## Location for pgAdmin 4 data 18 | - /home/username/docker/DMB/Zilean/data:/zilean/app/data ## Location for Zilean data 19 | - /home/username/docker/DMB/plex_debrid:/plex_debrid/config ## Location for plex_debrid data 20 | - /home/username/docker/DMB/cli_debrid:/cli_debrid/data ## Location for cli_debrid data 21 | - /home/username/docker/DMB/phalanx_db:/phalanx_db/data ## Location for phalanx_db data 22 | environment: 23 | - TZ= 24 | - PUID= 25 | - PGID= 26 | - DMB_LOG_LEVEL=INFO 27 | - ZURG_INSTANCES_REALDEBRID_API_KEY= 28 | - RIVEN_FRONTEND_ENV_ORIGIN=http://0.0.0.0:3000 ## See Riven documentation for more details 29 | # network_mode: container:gluetun ## Example to attach to gluetun vpn container if realdebrid blocks IP address 30 | ports: 31 | - "3005:3005" ## DMB Frontend 32 | - "3000:3000" ## Riven Frontend 33 | - "5050:5050" ## pgAdmin 4 34 | - "5000:5000" ## CLI Debrid Frontend 35 | devices: 36 | - /dev/fuse:/dev/fuse:rwm 37 | cap_add: 38 | - SYS_ADMIN 39 | security_opt: 40 | - apparmor:unconfined 41 | - no-new-privileges -------------------------------------------------------------------------------- /healthcheck.py: -------------------------------------------------------------------------------- 1 | import json 2 | import psutil 3 | import sys 4 | 5 | 6 | def load_running_processes(file_path="/healthcheck/running_processes.json"): 7 | try: 8 | with open(file_path, "r") as f: 9 | return json.load(f) 10 | except FileNotFoundError: 11 | print(f"Error: Running processes file not found: {file_path}", file=sys.stderr) 12 | sys.exit(1) 13 | except json.JSONDecodeError: 14 | print(f"Error: Failed to decode JSON in {file_path}", file=sys.stderr) 15 | sys.exit(1) 16 | 17 | 18 | def verify_processes(running_processes): 19 | error_messages = [] 20 | for process_name, pid in running_processes.items(): 21 | if not psutil.pid_exists(pid): 22 | error_messages.append( 23 | f"The process {process_name} (PID: {pid}) is not running." 24 | ) 25 | return error_messages 26 | 27 | 28 | def main(): 29 | file_path = "/healthcheck/running_processes.json" 30 | running_processes = load_running_processes(file_path) 31 | errors = verify_processes(running_processes) 32 | 33 | if errors: 34 | print(" | ".join(errors), file=sys.stderr) 35 | sys.exit(1) 36 | else: 37 | print("All processes are healthy.") 38 | sys.exit(0) 39 | 40 | 41 | if __name__ == "__main__": 42 | main() 43 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from utils.config_loader import CONFIG_MANAGER as config 2 | from utils.global_logger import logger, websocket_manager 3 | from utils import duplicate_cleanup, user_management 4 | from api.api_service import start_fastapi_process 5 | from utils.processes import ProcessHandler 6 | from utils.auto_update import Update 7 | from utils.dependencies import initialize_dependencies 8 | import subprocess, threading, time, tomllib 9 | from time import sleep 10 | 11 | from utils.setup import phalanx_setup 12 | 13 | 14 | def main(): 15 | 16 | with open("pyproject.toml", "rb") as file: 17 | pyproject = tomllib.load(file) 18 | version = pyproject["tool"]["poetry"]["version"] 19 | 20 | ascii_art = f""" 21 | 22 | DDDDDDDDDDDDD MMMMMMMM MMMMMMMMBBBBBBBBBBBBBBBBB 23 | D::::::::::::DDD M:::::::M M:::::::MB::::::::::::::::B 24 | D:::::::::::::::DD M::::::::M M::::::::MB::::::BBBBBB:::::B 25 | DDD:::::DDDDD:::::D M:::::::::M M:::::::::MBB:::::B B:::::B 26 | D:::::D D:::::D M::::::::::M M::::::::::M B::::B B:::::B 27 | D:::::D D:::::DM:::::::::::M M:::::::::::M B::::B B:::::B 28 | D:::::D D:::::DM:::::::M::::M M::::M:::::::M B::::BBBBBB:::::B 29 | D:::::D D:::::DM::::::M M::::M M::::M M::::::M B:::::::::::::BB 30 | D:::::D D:::::DM::::::M M::::M::::M M::::::M B::::BBBBBB:::::B 31 | D:::::D D:::::DM::::::M M:::::::M M::::::M B::::B B:::::B 32 | D:::::D D:::::DM::::::M M:::::M M::::::M B::::B B:::::B 33 | D:::::D D:::::D M::::::M MMMMM M::::::M B::::B B:::::B 34 | DDD:::::DDDDD:::::D M::::::M M::::::MBB:::::BBBBBB::::::B 35 | D:::::::::::::::DD M::::::M M::::::MB:::::::::::::::::B 36 | D::::::::::::DDD M::::::M M::::::MB::::::::::::::::B 37 | DDDDDDDDDDDDD MMMMMMMM MMMMMMMMBBBBBBBBBBBBBBBBB 38 | 39 | Version: {version} 40 | """ 41 | 42 | logger.info(ascii_art.format(version=version) + "\n" + "\n") 43 | 44 | process_handler = ProcessHandler(logger) 45 | updater = Update(process_handler) 46 | initialize_dependencies( 47 | process_handler=process_handler, 48 | updater=updater, 49 | websocket_manager=websocket_manager, 50 | logger=logger, 51 | ) 52 | 53 | if config.get("dmb", {}).get("api_service", {}).get("enabled"): 54 | start_fastapi_process() 55 | 56 | try: 57 | user_management.create_system_user() 58 | except Exception as e: 59 | logger.error(f"An error occurred while creating system user: {e}") 60 | process_handler.shutdown(exit_code=1) 61 | 62 | try: 63 | dmb_config = config.get("dmb", {}) 64 | frontend_config = dmb_config.get("frontend", {}) 65 | process_name = frontend_config.get("process_name") 66 | api_config = dmb_config.get("api_service", {}) 67 | if frontend_config.get("enabled") and api_config.get("enabled"): 68 | if frontend_config.get("auto_update", False): 69 | updater.auto_update(process_name, True) 70 | else: 71 | updater.auto_update(process_name, False) 72 | else: 73 | logger.info(f"{process_name} is disabled. Skipping process start.") 74 | except Exception as e: 75 | logger.error(f"An error occurred in the DMB Frontend setup: {e}") 76 | process_handler.shutdown(exit_code=1) 77 | 78 | try: 79 | key = "zurg" 80 | zurg_instances = config.get(key, {}).get("instances", {}) 81 | enabled_zurg_instances = [ 82 | name for name, instance in zurg_instances.items() if instance.get("enabled") 83 | ] 84 | 85 | if not enabled_zurg_instances: 86 | logger.info("No Zurg instances are enabled. Skipping Zurg setup.") 87 | else: 88 | for instance_name in enabled_zurg_instances: 89 | instance = zurg_instances[instance_name] 90 | process_name = instance.get("process_name") 91 | if instance.get("auto_update"): 92 | updater.auto_update(process_name, True) 93 | else: 94 | updater.auto_update(process_name, False) 95 | except Exception as e: 96 | logger.error(e) 97 | process_handler.shutdown(exit_code=1) 98 | 99 | try: 100 | key = "rclone" 101 | duplicate_cleanup_enabled = config.get("dmb", {}).get("duplicate_cleanup") 102 | rclone_instances = config.get(key, {}).get("instances", {}) 103 | enabled_rclone_instances = [ 104 | name 105 | for name, instance in rclone_instances.items() 106 | if instance.get("enabled") 107 | ] 108 | 109 | if not enabled_rclone_instances: 110 | logger.info("No rclone instances are enabled. Skipping rclone setup.") 111 | else: 112 | for instance_name in enabled_rclone_instances: 113 | instance_config = rclone_instances[instance_name] 114 | 115 | if mount_name := instance_config.get("mount_name"): 116 | logger.info( 117 | f"Configuring rclone for instance: {instance_name} with mount name: {mount_name}" 118 | ) 119 | try: 120 | # if duplicate_cleanup_enabled: 121 | # logger.info( 122 | # f"Duplicate cleanup is enabled for instance: {instance_name}" 123 | # ) 124 | # duplicate_cleanup.setup() 125 | 126 | process_name = instance_config.get("process_name") 127 | updater.auto_update(process_name, False) 128 | except Exception as e: 129 | logger.error( 130 | f"Error during rclone setup for instance {instance_name}: {e}" 131 | ) 132 | raise 133 | else: 134 | raise ValueError( 135 | f"No mount name found for rclone instance: {instance_name}" 136 | ) 137 | except Exception as e: 138 | logger.error(e) 139 | process_handler.shutdown(exit_code=1) 140 | 141 | try: 142 | plex_debrid_config = config.get("plex_debrid") or {} 143 | cli_debrid_config = config.get("cli_debrid") or {} 144 | cli_battery_config = config.get("cli_battery") or {} 145 | phalanx_db_config = config.get("phalanx_db") or {} 146 | postgres_config = config.get("postgres", {}) 147 | pgadmin_config = config.get("pgadmin", {}) 148 | riven_backend_config = config.get("riven_backend", {}) 149 | riven_frontend_config = config.get("riven_frontend", {}) 150 | zilean_config = config.get("zilean", {}) 151 | 152 | if postgres_config.get("enabled"): 153 | try: 154 | process_name = postgres_config.get("process_name") 155 | updater.auto_update(process_name, False) 156 | except Exception as e: 157 | logger.error(e) 158 | process_handler.shutdown(exit_code=1) 159 | 160 | if pgadmin_config.get("enabled"): 161 | try: 162 | process_name = pgadmin_config.get("process_name") 163 | updater.auto_update(process_name, False) 164 | except Exception as e: 165 | logger.error(e) 166 | process_handler.shutdown(exit_code=1) 167 | 168 | if zilean_config.get("enabled"): 169 | try: 170 | process_name = zilean_config.get("process_name") 171 | if zilean_config.get("auto_update", False): 172 | updater.auto_update(process_name, True) 173 | else: 174 | updater.auto_update(process_name, False) 175 | except Exception as e: 176 | logger.error(e) 177 | process_handler.shutdown(exit_code=1) 178 | 179 | if plex_debrid_config.get("enabled"): 180 | try: 181 | process_name = plex_debrid_config.get("process_name") 182 | if plex_debrid_config.get("auto_update", False): 183 | updater.auto_update(process_name, True) 184 | else: 185 | updater.auto_update(process_name, False) 186 | except Exception as e: 187 | logger.error(e) 188 | process_handler.shutdown(exit_code=1) 189 | 190 | if phalanx_db_config.get("enabled"): 191 | try: 192 | process_name = phalanx_db_config.get("process_name") 193 | if phalanx_db_config.get("auto_update", False): 194 | updater.auto_update(process_name, True) 195 | else: 196 | updater.auto_update(process_name, False) 197 | except Exception as e: 198 | logger.error(e) 199 | process_handler.shutdown(exit_code=1) 200 | 201 | if cli_battery_config.get("enabled"): 202 | try: 203 | process_name = cli_battery_config.get("process_name") 204 | updater.auto_update(process_name, False) 205 | except Exception as e: 206 | logger.error(e) 207 | process_handler.shutdown(exit_code=1) 208 | 209 | if cli_debrid_config.get("enabled"): 210 | try: 211 | process_name = cli_debrid_config.get("process_name") 212 | if cli_debrid_config.get("auto_update", False): 213 | updater.auto_update(process_name, True) 214 | else: 215 | updater.auto_update(process_name, False) 216 | except Exception as e: 217 | logger.error(e) 218 | process_handler.shutdown(exit_code=1) 219 | 220 | sleep(10) 221 | 222 | if riven_backend_config.get("enabled"): 223 | try: 224 | process_name = riven_backend_config.get("process_name") 225 | if riven_backend_config.get("auto_update", False): 226 | updater.auto_update(process_name, True) 227 | else: 228 | updater.auto_update(process_name, False) 229 | except Exception as e: 230 | logger.error(e) 231 | process_handler.shutdown(exit_code=1) 232 | 233 | if riven_frontend_config.get("enabled"): 234 | try: 235 | process_name = riven_frontend_config.get("process_name") 236 | if riven_frontend_config.get("auto_update", False): 237 | updater.auto_update(process_name, True) 238 | else: 239 | updater.auto_update(process_name, False) 240 | except Exception as e: 241 | logger.error(e) 242 | process_handler.shutdown(exit_code=1) 243 | 244 | except Exception as e: 245 | logger.error(e) 246 | process_handler.shutdown(exit_code=1) 247 | 248 | def healthcheck(): 249 | time.sleep(60) 250 | while True: 251 | time.sleep(10) 252 | try: 253 | result = subprocess.run( 254 | ["python", "healthcheck.py"], capture_output=True, text=True 255 | ) 256 | if result.stderr: 257 | logger.error(result.stderr.strip()) 258 | except Exception as e: 259 | logger.error("Error running healthcheck.py: %s", e) 260 | time.sleep(50) 261 | 262 | thread = threading.Thread(target=healthcheck, daemon=True) 263 | thread.start() 264 | 265 | def perpetual_wait(): 266 | stop_event = threading.Event() 267 | stop_event.wait() 268 | 269 | perpetual_wait() 270 | 271 | 272 | if __name__ == "__main__": 273 | main() 274 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "DMB" 3 | version = "6.12.1" 4 | description = "Debrid Media Bridge (DMB) is an all-in-one, containerized platform for managing and automating media workflows using premium debrid services like Real-Debrid." 5 | authors = ["I-am-PUID-0 <36779668+I-am-PUID-0@users.noreply.github.com>"] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = ">=3.11" 10 | python-dotenv = ">=1.1.0,<2.0.0" 11 | regex = ">=2024.11.6,<2025.0.0" 12 | schedule = ">=1.2.2,<2.0.0" 13 | psutil = ">=7.0.0,<8.0.0" 14 | plexapi = ">=4.16.1,<5.0.0" 15 | requests = ">=2.32.3,<3.0.0" 16 | packaging = ">=24.2,<26.0" 17 | ruamel-yaml = ">=0.18.10,<0.19.0" 18 | colorlog = ">=6.9.0,<7.0.0" 19 | pydantic = ">=2.11.2,<3.0.0" 20 | fastapi = ">=0.115.12,<0.116.0" 21 | uvicorn = ">=0.34.0,<0.35.0" 22 | websockets = ">=15.0.1,<16.0.0" 23 | jsonschema = ">=4.23.0,<5.0.0" 24 | scalar-fastapi = ">=1.0.3,<2.0.0" 25 | psycopg2 = "==2.9.10" 26 | pyyaml = ">=6.0.2,<7.0.0" 27 | 28 | [tool.poetry.group.dev.dependencies] 29 | black = ">=25.1.0,<26.0.0" 30 | 31 | [build-system] 32 | requires = ["poetry-core>=2.0.0,<3.0.0"] 33 | build-backend = "poetry.core.masonry.api" 34 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /utils/auto_update.py: -------------------------------------------------------------------------------- 1 | from utils.global_logger import logger 2 | from utils.logger import format_time 3 | from utils.versions import Versions 4 | from utils.setup import setup_project, setup_release_version 5 | from utils.config_loader import CONFIG_MANAGER 6 | import threading, time, os, schedule, requests 7 | 8 | 9 | class Update: 10 | _scheduler_initialized = False 11 | _jobs = {} 12 | 13 | def __init__(self, process_handler): 14 | self.process_handler = process_handler 15 | self.logger = process_handler.logger 16 | self.updating = threading.Lock() 17 | 18 | if not Update._scheduler_initialized: 19 | self.scheduler = schedule.Scheduler() 20 | Update._scheduler_initialized = True 21 | else: 22 | self.scheduler = schedule.default_scheduler 23 | 24 | def update_schedule(self, process_name, config, key, instance_name): 25 | interval_minutes = int(self.auto_update_interval(process_name, config) * 60) 26 | self.logger.debug( 27 | f"Scheduling automatic update check every {interval_minutes} minutes for {process_name}" 28 | ) 29 | 30 | if process_name not in Update._jobs: 31 | self.scheduler.every(interval_minutes).minutes.do( 32 | self.scheduled_update_check, process_name, config, key, instance_name 33 | ) 34 | Update._jobs[process_name] = True 35 | self.logger.debug( 36 | f"Scheduled automatic update check for {process_name}, w/ key: {key}, and job ID: {id(self.scheduler.jobs[-1])}" 37 | ) 38 | 39 | while not self.process_handler.shutting_down: 40 | self.scheduler.run_pending() 41 | time.sleep(1) 42 | 43 | def auto_update_interval(self, process_name, config): 44 | default_interval = 24 45 | try: 46 | interval = config.get("auto_update_interval", default_interval) 47 | except Exception as e: 48 | self.logger.error( 49 | f"Failed to retrieve auto_update_interval for {process_name}: {e}" 50 | ) 51 | interval = default_interval 52 | 53 | return interval 54 | 55 | def auto_update(self, process_name, enable_update): 56 | key, instance_name = CONFIG_MANAGER.find_key_for_process(process_name) 57 | config = CONFIG_MANAGER.get_instance(instance_name, key) 58 | if not config: 59 | raise ValueError(f"Configuration for {process_name} not found.") 60 | if enable_update: 61 | self.logger.info( 62 | f"Automatic updates set to {format_time(self.auto_update_interval(process_name, config))} for {process_name}" 63 | ) 64 | self.schedule_thread = threading.Thread( 65 | target=self.update_schedule, 66 | args=(process_name, config, key, instance_name), 67 | ) 68 | self.schedule_thread.start() 69 | self.initial_update_check(process_name, config, key, instance_name) 70 | else: 71 | self.logger.info(f"Automatic update disabled for {process_name}") 72 | success, error = setup_project(self.process_handler, process_name) 73 | if not success: 74 | raise RuntimeError(error) 75 | self.start_process(process_name, config, key, instance_name) 76 | 77 | def initial_update_check(self, process_name, config, key, instance_name): 78 | with self.updating: 79 | self.logger.info(f"Performing initial update check for {process_name}") 80 | success, error = self.update_check(process_name, config, key, instance_name) 81 | if not success: 82 | if "No updates available" in error: 83 | self.logger.info(error) 84 | from utils.setup import setup_project 85 | 86 | success, error = setup_project(self.process_handler, process_name) 87 | if not success: 88 | return False, f"Failed to set up {process_name}: {error}" 89 | 90 | self.start_process(process_name, config, key, instance_name) 91 | else: 92 | raise RuntimeError(error) 93 | 94 | def scheduled_update_check(self, process_name, config, key, instance_name): 95 | with self.updating: 96 | self.logger.info(f"Performing scheduled update check for {process_name}") 97 | success, error = self.update_check(process_name, config, key, instance_name) 98 | if not success: 99 | if "No updates available" in error: 100 | self.logger.info(error) 101 | # self.start_process(process_name, config, key, instance_name) 102 | else: 103 | raise RuntimeError(error) 104 | 105 | def update_check(self, process_name, config, key, instance_name): 106 | if "nightly" in config["release_version"].lower(): 107 | nightly = True 108 | prerelease = False 109 | self.logger.info(f"Checking for nightly updates for {process_name}.") 110 | elif "prerelease" in config["release_version"].lower(): 111 | nightly = False 112 | prerelease = True 113 | self.logger.info(f"Checking for prerelease updates for {process_name}.") 114 | else: 115 | nightly = False 116 | prerelease = False 117 | self.logger.info(f"Checking for stable updates for {process_name}.") 118 | 119 | versions = Versions() 120 | try: 121 | repo_owner = config["repo_owner"] 122 | repo_name = config["repo_name"] 123 | update_needed, update_info = versions.compare_versions( 124 | process_name, 125 | repo_owner, 126 | repo_name, 127 | instance_name, 128 | key, 129 | nightly=nightly, 130 | prerelease=prerelease, 131 | ) 132 | 133 | if not update_needed: 134 | return False, f"{update_info.get('message')} for {process_name}." 135 | 136 | self.logger.info( 137 | f"Updating {process_name} from {update_info.get('current_version')} to {update_info.get('latest_version')}." 138 | ) 139 | if process_name in self.process_handler.process_names: 140 | self.stop_process(process_name) 141 | if process_name in self.process_handler.setup_tracker: 142 | self.process_handler.setup_tracker.remove(process_name) 143 | release_version = f"{update_info.get('latest_version')}" 144 | if not prerelease and not nightly: 145 | config["release_version"] = release_version 146 | self.logger.info( 147 | f"Updating {process_name} config to {release_version}." 148 | ) 149 | success, error = setup_release_version( 150 | self.process_handler, config, process_name, key 151 | ) 152 | if not success: 153 | return ( 154 | False, 155 | f"Failed to update {process_name} to {release_version}: {error}", 156 | ) 157 | success, error = setup_project(self.process_handler, process_name) 158 | if not success: 159 | return ( 160 | False, 161 | f"Failed to update {process_name} to {release_version}: {error}", 162 | ) 163 | self.start_process(process_name, config, key, instance_name) 164 | return True, f"Updated {process_name} to {release_version}." 165 | 166 | except Exception as e: 167 | return False, f"Update check failed for {process_name}: {e}" 168 | 169 | def stop_process(self, process_name): 170 | self.process_handler.stop_process(process_name) 171 | 172 | def start_process(self, process_name, config, key, instance_name): 173 | if config.get("wait_for_dir", False): 174 | while not os.path.exists(wait_dir := config["wait_for_dir"]): 175 | self.logger.info( 176 | f"Waiting for directory {wait_dir} to become available before starting {process_name}" 177 | ) 178 | time.sleep(10) 179 | 180 | if config.get("wait_for_url", False): 181 | wait_for_urls = config["wait_for_url"] 182 | time.sleep(5) 183 | start_time = time.time() 184 | 185 | for wait_entry in wait_for_urls: 186 | wait_url = wait_entry["url"] 187 | auth = wait_entry.get("auth", None) 188 | 189 | logger.info( 190 | f"Waiting to start {process_name} until {wait_url} is accessible." 191 | ) 192 | 193 | while time.time() - start_time < 600: 194 | try: 195 | if auth: 196 | response = requests.get( 197 | wait_url, auth=(auth["user"], auth["password"]) 198 | ) 199 | # logger.debug( 200 | # f"Authenticating to {wait_url} with {auth['user']}:{auth['password']}" 201 | # ) 202 | else: 203 | response = requests.get(wait_url) 204 | 205 | if 200 <= response.status_code < 300: 206 | logger.info( 207 | f"{wait_url} is accessible with {response.status_code}." 208 | ) 209 | break 210 | else: 211 | logger.debug( 212 | f"Received status code {response.status_code} while waiting for {wait_url} to be accessible." 213 | ) 214 | except requests.RequestException as e: 215 | logger.debug(f"Waiting for {wait_url}: {e}") 216 | time.sleep(5) 217 | else: 218 | raise RuntimeError( 219 | f"Timeout: {wait_url} is not accessible after 600 seconds." 220 | ) 221 | 222 | command = config["command"] 223 | config_dir = config["config_dir"] 224 | 225 | if config.get("suppress_logging", False): 226 | self.logger.info(f"Suppressing {process_name} logging") 227 | suppress_logging = True 228 | else: 229 | suppress_logging = False 230 | 231 | if key == "riven_backend": 232 | if not os.path.exists(os.path.join(config_dir, "data", "settings.json")): 233 | from utils.riven_settings import set_env_variables 234 | 235 | logger.info("Riven initial setup for first run") 236 | threading.Thread(target=set_env_variables).start() 237 | 238 | env = os.environ.copy() 239 | env.update(config.get("env", {})) 240 | 241 | self.process_handler.start_process( 242 | process_name, 243 | config_dir, 244 | command, 245 | instance_name, 246 | suppress_logging=suppress_logging, 247 | env=env, 248 | ) 249 | if key == "riven_backend": 250 | from utils.riven_settings import load_settings 251 | 252 | time.sleep(10) 253 | load_settings() 254 | -------------------------------------------------------------------------------- /utils/config_loader.py: -------------------------------------------------------------------------------- 1 | import os, shutil, copy 2 | from json import load, dump, JSONDecodeError 3 | from jsonschema import validate, ValidationError 4 | from dotenv import load_dotenv, find_dotenv 5 | from collections import OrderedDict 6 | 7 | 8 | class ConfigManager: 9 | def __init__( 10 | self, 11 | file_path="/config/dmb_config.json", 12 | schema_path="/utils/dmb_config_schema.json", 13 | ): 14 | if not os.path.exists(file_path): 15 | shutil.copyfile("/utils/dmb_config.json", file_path) 16 | 17 | load_dotenv(find_dotenv("/config/.env")) 18 | 19 | self.file_path = os.path.abspath(file_path) 20 | self.schema_path = os.path.abspath(schema_path) 21 | 22 | if not os.path.exists(self.file_path): 23 | raise FileNotFoundError(f"Config file not found: {self.file_path}") 24 | if not os.path.exists(self.schema_path): 25 | raise FileNotFoundError(f"Schema file not found: {self.schema_path}") 26 | 27 | self.update_config_with_defaults() 28 | # self.update_config_with_top_level_defaults() 29 | self.schema = self._load_schema() 30 | self.config = self._load_and_validate_config() 31 | 32 | def _load_schema(self): 33 | with open(self.schema_path, "r") as schema_file: 34 | return load(schema_file) 35 | 36 | def _load_config(self): 37 | try: 38 | with open(self.file_path, "r") as config_file: 39 | return load(config_file) 40 | except JSONDecodeError as e: 41 | raise ValueError( 42 | f"JSON syntax error in {self.file_path}: {e.msg} at line {e.lineno}, column {e.colno}" 43 | ) 44 | 45 | def update_config_with_top_level_defaults(self): 46 | try: 47 | with open("/utils/dmb_config.json", "r") as default_file: 48 | default_config = load(default_file, object_pairs_hook=OrderedDict) 49 | 50 | existing_config = self._load_config() 51 | updated_config = OrderedDict() 52 | updated = False 53 | 54 | for key in default_config: 55 | if key in existing_config and existing_config[key]: 56 | updated_config[key] = existing_config[key] 57 | else: 58 | updated_config[key] = default_config[key] 59 | updated = True 60 | 61 | for key in existing_config: 62 | if key not in updated_config: 63 | updated_config[key] = existing_config[key] 64 | 65 | if updated: 66 | with open(self.file_path, "w") as config_file: 67 | dump(updated_config, config_file, indent=4) 68 | except Exception as e: 69 | raise ValueError(f"Error during update_config_with_top_level_defaults: {e}") 70 | 71 | def update_config_with_defaults(self): 72 | try: 73 | with open("/utils/dmb_config.json", "r") as default_file: 74 | default_config = load(default_file, object_pairs_hook=OrderedDict) 75 | 76 | existing_config = self._load_config() 77 | 78 | merged_config = self._merge_configs( 79 | copy.deepcopy(existing_config), default_config 80 | ) 81 | 82 | if merged_config != existing_config: 83 | backup_path = self.file_path + ".bak" 84 | shutil.copyfile(self.file_path, backup_path) 85 | with open(self.file_path, "w") as config_file: 86 | dump(merged_config, config_file, indent=4) 87 | except Exception as e: 88 | raise ValueError(f"Error during update_config_with_defaults: {e}") 89 | 90 | def _merge_configs(self, existing, default): 91 | if not isinstance(existing, dict) or not isinstance(default, dict): 92 | return existing 93 | 94 | merged = OrderedDict() 95 | 96 | for key, default_value in default.items(): 97 | if key == "instances" and isinstance(default_value, dict): 98 | existing_instances = existing.get("instances", {}) 99 | merged_instances = OrderedDict() 100 | default_template = next(iter(default_value.values())) 101 | 102 | if existing_instances: 103 | for instance_name, instance_value in existing_instances.items(): 104 | merged_instances[instance_name] = self._merge_configs( 105 | instance_value, default_template 106 | ) 107 | else: 108 | merged_instances = default_value 109 | 110 | merged["instances"] = merged_instances 111 | continue 112 | 113 | if key in existing: 114 | existing_value = existing[key] 115 | if isinstance(existing_value, dict) and isinstance(default_value, dict): 116 | merged[key] = self._merge_configs(existing_value, default_value) 117 | else: 118 | merged[key] = existing_value 119 | else: 120 | merged[key] = default_value 121 | 122 | for key, existing_value in existing.items(): 123 | if key not in merged: 124 | merged[key] = existing_value 125 | 126 | return merged 127 | 128 | def _load_and_validate_config(self): 129 | config = self._load_config() 130 | try: 131 | validate(instance=config, schema=self.schema) 132 | except ValidationError as e: 133 | error_path = ( 134 | " -> ".join(map(str, e.absolute_path)) if e.absolute_path else "root" 135 | ) 136 | raise ValueError( 137 | f"Configuration validation error at '{error_path}': {e.message}" 138 | ) 139 | return self._merge_with_env(config) 140 | 141 | def _load_and_merge_config(self): 142 | config = self._load_config() 143 | return self._merge_with_env(config) 144 | 145 | def _merge_with_env(self, config, prefix=None): 146 | for key, value in config.items(): 147 | current_keys = prefix + [key] if prefix else [key] 148 | 149 | if isinstance(value, dict): 150 | config[key] = self._merge_with_env(value, current_keys) 151 | else: 152 | # print(f"settings.items: Key: {key}, Default: {value}") 153 | env_value = self._get_env_var(current_keys) 154 | # print(f"Key: {key}, Value: {env_value}") 155 | normalized_value = self._normalize_value(key.lower(), env_value, value) 156 | # print(f"Normalized Value: {normalized_value}") 157 | config[key] = self._validate_value(key.lower(), normalized_value) 158 | 159 | return config 160 | 161 | def _get_env_var(self, keys): 162 | env_var = "_".join([str(k).upper() for k in keys]) 163 | secret_file = f"/run/secrets/{env_var}" 164 | 165 | try: 166 | with open(secret_file, "r") as file: 167 | return file.read().strip() 168 | except IOError: 169 | pass 170 | 171 | value = os.getenv(env_var) 172 | # print(f"_get_env_var Value for {env_var}: {value}") 173 | 174 | return value if value and value.strip() != "" else None 175 | 176 | def _normalize_value(self, key, value, default): 177 | if value is None: 178 | return default 179 | 180 | if key in ["log_level", "loglevel"]: 181 | return value.strip().upper() 182 | 183 | if isinstance(default, bool): 184 | return value.lower() in ["true", "1", "yes"] 185 | 186 | if isinstance(default, str): 187 | return value.strip() 188 | 189 | return self._cast_value(value, default) 190 | 191 | def _validate_value(self, key, value): 192 | if key == "log_level": 193 | valid_levels = {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"} 194 | if value not in valid_levels: 195 | raise ValueError(f"Invalid log level: {value}") 196 | return value 197 | 198 | def _cast_value(self, value, default): 199 | if value is None: 200 | return default 201 | 202 | try: 203 | if isinstance(default, bool): 204 | return value.lower() in ["true", "1", "yes"] 205 | elif isinstance(default, int): 206 | return int(value) 207 | elif isinstance(default, float): 208 | return float(value) 209 | return value 210 | except ValueError: 211 | return default 212 | 213 | def save_config(self, process_name=None): 214 | if process_name: 215 | section_config = find_service_config(self.config, process_name) 216 | if not section_config: 217 | raise ValueError(f"Process {process_name} does not exist in config.") 218 | 219 | with open(self.file_path, "r") as config_file: 220 | full_config = load(config_file) 221 | 222 | def update_nested_config(data, target_name, updated_config): 223 | if isinstance(data, dict): 224 | for key, value in data.items(): 225 | if ( 226 | isinstance(value, dict) 227 | and value.get("process_name") == target_name 228 | ): 229 | data[key] = updated_config 230 | return True 231 | if update_nested_config(value, target_name, updated_config): 232 | return True 233 | return False 234 | 235 | if not update_nested_config(full_config, process_name, section_config): 236 | raise ValueError(f"Failed to locate process {process_name} in file.") 237 | 238 | with open(self.file_path, "w") as config_file: 239 | dump(full_config, config_file, indent=4) 240 | else: 241 | with open(self.file_path, "w") as config_file: 242 | dump(self.config, config_file, indent=4) 243 | 244 | def get(self, key, section=None, normalize_case=False): 245 | value = ( 246 | self.config.get(section, {}).get(key) if section else self.config.get(key) 247 | ) 248 | 249 | if normalize_case and isinstance(value, str): 250 | return value.lower() 251 | 252 | return value 253 | 254 | def get_instance(self, instance_name=None, key=None): 255 | if instance_name: 256 | config = self.get(key).get("instances").get(instance_name) 257 | elif key and key == "dmb_frontend" or key == "dmb_api_service": 258 | section, key = key.split("_") 259 | config = self.get(key, section) 260 | else: 261 | config = self.get(key) 262 | return config 263 | 264 | def set(self, key, value, section=None): 265 | if section: 266 | if section not in self.config: 267 | self.config[section] = {} 268 | self.config[section][key] = value 269 | else: 270 | self.config[key] = value 271 | 272 | def reload(self): 273 | self.config = self._load_and_merge_config() 274 | 275 | def find_key_for_process(self, process_name): 276 | for key, value in self.config.items(): 277 | if isinstance(value, dict) and value.get("process_name") == process_name: 278 | return key, None 279 | 280 | if isinstance(value, dict) and "instances" in value: 281 | for instance_name, instance_config in value["instances"].items(): 282 | if instance_config.get("process_name") == process_name: 283 | return key, instance_name 284 | 285 | if key == "dmb" and isinstance(value, dict): 286 | for subkey, subvalue in value.items(): 287 | if ( 288 | isinstance(subvalue, dict) 289 | and subvalue.get("process_name") == process_name 290 | ): 291 | return key + "_" + subkey, None 292 | 293 | return None, None 294 | 295 | 296 | def find_service_config(config, process_name): 297 | if isinstance(config, dict): 298 | for key, value in config.items(): 299 | if isinstance(value, dict) and value.get("process_name") == process_name: 300 | return value 301 | found = find_service_config(value, process_name) 302 | if found: 303 | return found 304 | return None 305 | 306 | 307 | CONFIG_MANAGER = ConfigManager() 308 | -------------------------------------------------------------------------------- /utils/dependencies.py: -------------------------------------------------------------------------------- 1 | from api.api_state import APIState 2 | from utils.processes import ProcessHandler 3 | from logging import Logger 4 | from api.connection_manager import ConnectionManager 5 | 6 | _shared_instances = {} 7 | 8 | 9 | def initialize_dependencies(process_handler, updater, websocket_manager, logger): 10 | _shared_instances["process_handler"] = process_handler 11 | _shared_instances["updater"] = updater 12 | _shared_instances["websocket_manager"] = websocket_manager 13 | _shared_instances["logger"] = logger 14 | _shared_instances["api_state"] = APIState( 15 | process_handler=process_handler, logger=logger 16 | ) 17 | 18 | 19 | def get_process_handler() -> ProcessHandler: 20 | return _shared_instances["process_handler"] 21 | 22 | 23 | def get_updater() -> object: 24 | return _shared_instances["updater"] 25 | 26 | 27 | def get_websocket_manager() -> ConnectionManager: 28 | return _shared_instances["websocket_manager"] 29 | 30 | 31 | def get_logger() -> Logger: 32 | return _shared_instances["logger"] 33 | 34 | 35 | def get_api_state() -> APIState: 36 | return _shared_instances["api_state"] 37 | -------------------------------------------------------------------------------- /utils/dmb_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "puid": 1000, 3 | "pgid": 1000, 4 | "tz": "", 5 | "dmb": { 6 | "log_level": "INFO", 7 | "log_name": "DMB", 8 | "log_dir": "/log", 9 | "log_count": 2, 10 | "log_size": "10M", 11 | "color_log": true, 12 | "plex_token": "", 13 | "plex_address": "", 14 | "github_token": "", 15 | "github_username": "", 16 | "api_service": { 17 | "enabled": true, 18 | "process_name": "DMB API", 19 | "log_level": "INFO", 20 | "host": "127.0.0.1", 21 | "port": 8000 22 | }, 23 | "frontend": { 24 | "enabled": true, 25 | "process_name": "DMB Frontend", 26 | "repo_owner": "nicocapalbo", 27 | "repo_name": "dmbdb", 28 | "release_version_enabled": false, 29 | "release_version": "v1.2.0", 30 | "branch_enabled": false, 31 | "branch": "main", 32 | "suppress_logging": false, 33 | "log_level": "INFO", 34 | "origins": [ 35 | "http://0.0.0.0:3005" 36 | ], 37 | "host": "0.0.0.0", 38 | "port": 3005, 39 | "auto_update": false, 40 | "auto_update_interval": 24, 41 | "clear_on_update": true, 42 | "exclude_dirs": [], 43 | "platforms": [ 44 | "pnpm" 45 | ], 46 | "command": [ 47 | "node", 48 | ".output/server/index.mjs" 49 | ], 50 | "config_dir": "/dmb/frontend", 51 | "env": {} 52 | } 53 | }, 54 | "cli_debrid": { 55 | "enabled": false, 56 | "process_name": "CLI Debrid", 57 | "repo_owner": "godver3", 58 | "repo_name": "cli_debrid", 59 | "release_version_enabled": false, 60 | "release_version": "v0.6.07", 61 | "branch_enabled": false, 62 | "branch": "main", 63 | "suppress_logging": false, 64 | "log_level": "INFO", 65 | "port": 5000, 66 | "auto_update": false, 67 | "auto_update_interval": 24, 68 | "clear_on_update": true, 69 | "exclude_dirs": [ 70 | "/cli_debrid/data" 71 | ], 72 | "platforms": [ 73 | "python" 74 | ], 75 | "command": [ 76 | "/cli_debrid/venv/bin/python", 77 | "main.py" 78 | ], 79 | "config_dir": "/cli_debrid", 80 | "config_file": "/cli_debrid/data/config/config.json", 81 | "log_file": "/cli_debrid/data/logs/debug.log", 82 | "env": { 83 | "USER_CONFIG": "/cli_debrid/data/config/", 84 | "USER_LOGS": "/cli_debrid/data/logs/", 85 | "USER_DB_CONTENT": "/cli_debrid/data/db_content/", 86 | "CLI_DEBRID_PORT": "{port}" 87 | } 88 | }, 89 | "cli_battery": { 90 | "enabled": false, 91 | "process_name": "CLI Battery", 92 | "suppress_logging": false, 93 | "log_level": "INFO", 94 | "port": 5001, 95 | "platforms": [ 96 | "python" 97 | ], 98 | "command": [ 99 | "/cli_debrid/venv/bin/python", 100 | "cli_battery/main.py" 101 | ], 102 | "config_dir": "/cli_debrid", 103 | "config_file": "/cli_debrid/data/config/settings.json", 104 | "log_file": "/cli_debrid/data/logs/battery_debug.log", 105 | "env": { 106 | "PYTHONPATH": "/cli_debrid", 107 | "USER_CONFIG": "/cli_debrid/data/config/", 108 | "USER_LOGS": "/cli_debrid/data/logs/", 109 | "USER_DB_CONTENT": "/cli_debrid/data/db_content/", 110 | "CLI_DEBRID_BATTERY_PORT": "{port}" 111 | } 112 | }, 113 | "phalanx_db": { 114 | "enabled": false, 115 | "process_name": "Phalanx DB", 116 | "repo_owner": "godver3", 117 | "repo_name": "phalanx_db_hyperswarm", 118 | "release_version_enabled": false, 119 | "release_version": "v0.50", 120 | "branch_enabled": false, 121 | "branch": "main", 122 | "suppress_logging": false, 123 | "log_level": "INFO", 124 | "port": 8888, 125 | "auto_update": false, 126 | "auto_update_interval": 24, 127 | "clear_on_update": true, 128 | "exclude_dirs": [ 129 | "/phalanx_db/data" 130 | ], 131 | "platforms": [ 132 | "pnpm" 133 | ], 134 | "command": [ 135 | "node", 136 | "phalanx_db_rest.js" 137 | ], 138 | "config_dir": "/phalanx_db", 139 | "env": {} 140 | }, 141 | "plex_debrid": { 142 | "enabled": false, 143 | "process_name": "Plex Debrid", 144 | "repo_owner": "elfhosted", 145 | "repo_name": "plex_debrid", 146 | "branch_enabled": false, 147 | "branch": "main", 148 | "suppress_logging": false, 149 | "log_level": "INFO", 150 | "auto_update": false, 151 | "auto_update_interval": 24, 152 | "clear_on_update": true, 153 | "exclude_dirs": [ 154 | "/plex_debrid/config" 155 | ], 156 | "platforms": [ 157 | "python" 158 | ], 159 | "command": [ 160 | "/plex_debrid/venv/bin/python", 161 | "main.py", 162 | "-service", 163 | "--config-dir", 164 | "./config" 165 | ], 166 | "config_dir": "/plex_debrid", 167 | "config_file": "/plex_debrid/config/settings.json", 168 | "log_file": "/plex_debrid/config/plex_debrid.log", 169 | "env": {} 170 | }, 171 | "postgres": { 172 | "enabled": true, 173 | "process_name": "PostgreSQL", 174 | "suppress_logging": false, 175 | "log_level": "INFO", 176 | "host": "127.0.0.1", 177 | "port": 5432, 178 | "databases": [ 179 | { 180 | "name": "postgres", 181 | "enabled": true 182 | }, 183 | { 184 | "name": "pgadmin", 185 | "enabled": true 186 | }, 187 | { 188 | "name": "zilean", 189 | "enabled": true 190 | }, 191 | { 192 | "name": "riven", 193 | "enabled": true 194 | } 195 | ], 196 | "config_dir": "/postgres_data", 197 | "config_file": "/postgres_data/postgresql.conf", 198 | "initdb_args": "--data-checksums", 199 | "user": "DMB", 200 | "password": "postgres", 201 | "shared_buffers": "128MB", 202 | "max_connections": 100, 203 | "run_directory": "/run/postgresql", 204 | "command": "postgres -D {postgres_config_dir} -c config_file={postgres_config_file}", 205 | "env": {} 206 | }, 207 | "pgadmin": { 208 | "enabled": true, 209 | "process_name": "pgAdmin4", 210 | "config_dir": "/pgadmin/data", 211 | "config_file": "/pgadmin/data/config_local.py", 212 | "log_file": "/pgadmin/data/pgadmin4.log", 213 | "port": 5050, 214 | "default_server": "0.0.0.0", 215 | "setup_email": "DMB@DMB.DMB", 216 | "setup_password": "postgres", 217 | "command": [] 218 | }, 219 | "rclone": { 220 | "instances": { 221 | "RealDebrid": { 222 | "enabled": true, 223 | "process_name": "rclone w/ RealDebrid", 224 | "suppress_logging": false, 225 | "log_level": "INFO", 226 | "key_type": "RealDebrid", 227 | "zurg_enabled": true, 228 | "mount_dir": "/data", 229 | "mount_name": "rclone_RD", 230 | "cache_dir": "/cache", 231 | "config_dir": "/config", 232 | "config_file": "/config/rclone.config", 233 | "log_file": "/log/rclone_w_realdebrid.log", 234 | "zurg_config_file": "/zurg/RD/config.yml", 235 | "command": [], 236 | "api_key": "" 237 | } 238 | } 239 | }, 240 | "riven_backend": { 241 | "enabled": true, 242 | "process_name": "Riven Backend", 243 | "repo_owner": "rivenmedia", 244 | "repo_name": "riven", 245 | "release_version_enabled": false, 246 | "release_version": "v0.20.1", 247 | "branch_enabled": false, 248 | "branch": "release-please--branches--main", 249 | "suppress_logging": false, 250 | "log_level": "INFO", 251 | "host": "127.0.0.1", 252 | "port": 8080, 253 | "auto_update": false, 254 | "auto_update_interval": 24, 255 | "symlink_library_path": "/mnt", 256 | "clear_on_update": true, 257 | "exclude_dirs": [ 258 | "/riven/backend/data" 259 | ], 260 | "env_copy": { 261 | "source": "/riven/backend/data/.env", 262 | "destination": "/riven/backend/src/.env" 263 | }, 264 | "platforms": [ 265 | "python" 266 | ], 267 | "command": [ 268 | "/riven/backend/venv/bin/python", 269 | "src/main.py", 270 | "-p", 271 | "{port}" 272 | ], 273 | "config_dir": "/riven/backend", 274 | "config_file": "/riven/backend/data/settings.json", 275 | "env": {}, 276 | "wait_for_dir": "/data/rclone_RD/__all__" 277 | }, 278 | "riven_frontend": { 279 | "enabled": true, 280 | "process_name": "Riven Frontend", 281 | "repo_owner": "rivenmedia", 282 | "repo_name": "riven-frontend", 283 | "release_version_enabled": false, 284 | "release_version": "v0.17.0", 285 | "branch_enabled": false, 286 | "branch": "release-please--branches--main", 287 | "suppress_logging": false, 288 | "log_level": "INFO", 289 | "host": "0.0.0.0", 290 | "port": 3000, 291 | "auto_update": false, 292 | "auto_update_interval": 24, 293 | "clear_on_update": true, 294 | "exclude_dirs": [], 295 | "platforms": [ 296 | "pnpm" 297 | ], 298 | "command": [ 299 | "node", 300 | "build" 301 | ], 302 | "config_dir": "/riven/frontend", 303 | "env": { 304 | "ORIGIN": "http://0.0.0.0:{port}", 305 | "PORT": "{port}", 306 | "HOST": "{host}" 307 | } 308 | }, 309 | "zilean": { 310 | "enabled": true, 311 | "process_name": "Zilean", 312 | "repo_owner": "iPromKnight", 313 | "repo_name": "zilean", 314 | "release_version_enabled": false, 315 | "release_version": "v3.3.0", 316 | "branch_enabled": false, 317 | "branch": "main", 318 | "suppress_logging": false, 319 | "log_level": "INFO", 320 | "host": "127.0.0.1", 321 | "port": 8182, 322 | "auto_update": false, 323 | "auto_update_interval": 24, 324 | "clear_on_update": true, 325 | "exclude_dirs": [ 326 | "/zilean/app/data" 327 | ], 328 | "env_copy": { 329 | "source": "/zilean/app/data/.env", 330 | "destination": "/zilean/app/src/.env" 331 | }, 332 | "platforms": [ 333 | "python", 334 | "dotnet" 335 | ], 336 | "command": [ 337 | "/zilean/app/zilean-api" 338 | ], 339 | "config_dir": "/zilean", 340 | "config_file": "/zilean/app/data/settings.json", 341 | "env": { 342 | "DOTNET_RUNNING_IN_CONTAINER": "true", 343 | "DOTNET_gcServer": "1", 344 | "DOTNET_GCDynamicAdaptationMode": "1", 345 | "DOTNET_SYSTEM_GLOBALIZATION_INVARIANT": "false", 346 | "PYTHONUNBUFFERED": "1", 347 | "ASPNETCORE_URLS": "http://+:{port}", 348 | "PYTHONPATH": "/zilean/venv/lib/python3.11/site-packages", 349 | "PATH": "/zilean/venv/bin:${PATH}", 350 | "ZILEAN_PYTHON_PYLIB": "/usr/local/lib/libpython3.11.so.1.0", 351 | "Zilean__Database__ConnectionString": "Host={postgres_host};Port={postgres_port};Database=zilean;Username={postgres_user};Password={postgres_password};Timeout=300;CommandTimeout=3600;" 352 | } 353 | }, 354 | "zurg": { 355 | "instances": { 356 | "RealDebrid": { 357 | "enabled": true, 358 | "process_name": "Zurg w/ RealDebrid", 359 | "repo_owner": "debridmediamanager", 360 | "repo_name": "zurg-testing", 361 | "release_version_enabled": false, 362 | "release_version": "v0.9.3-final", 363 | "suppress_logging": false, 364 | "log_level": "INFO", 365 | "host": "127.0.0.1", 366 | "port": 9090, 367 | "auto_update": false, 368 | "auto_update_interval": 1, 369 | "clear_on_update": false, 370 | "exclude_dirs": [ 371 | "/zurg/RD" 372 | ], 373 | "key_type": "RealDebrid", 374 | "config_dir": "/zurg/RD", 375 | "config_file": "/zurg/RD/config.yml", 376 | "command": "/zurg/RD/zurg", 377 | "user": "", 378 | "password": "", 379 | "api_key": "" 380 | } 381 | } 382 | } 383 | } -------------------------------------------------------------------------------- /utils/duplicate_cleanup.py: -------------------------------------------------------------------------------- 1 | from utils.global_logger import logger 2 | from utils.logger import format_time, get_start_time, time_to_complete 3 | from plexapi.server import PlexServer 4 | from plexapi import exceptions as plexapi_exceptions 5 | from requests.exceptions import HTTPError 6 | import requests, re, schedule, threading, time 7 | 8 | 9 | max_retry_attempts = 5 10 | retry_interval = 10 11 | 12 | 13 | def delete_media_with_retry(media): 14 | # logger = get_logger(log_name='duplicate_cleanup') 15 | retry_attempt = 0 16 | continue_execution = True 17 | 18 | while retry_attempt < max_retry_attempts: 19 | try: 20 | media.delete() 21 | break 22 | except requests.exceptions.ReadTimeout: 23 | retry_attempt += 1 24 | logger.warning( 25 | f"Read timeout occurred. Retrying delete operation (Attempt {retry_attempt})..." 26 | ) 27 | time.sleep(retry_interval) 28 | except plexapi_exceptions.NotFound as e: 29 | logger.warning( 30 | f"404 Not Found error occurred. Skipping delete operation for media ID: {media.id}" 31 | ) 32 | continue_execution = False 33 | break 34 | else: 35 | logger.error( 36 | f"Max retry attempts reached. Unable to delete media ID: {media.id}" 37 | ) 38 | 39 | return continue_execution 40 | 41 | 42 | def process_tv_shows(): 43 | # logger = get_logger(log_name='duplicate_cleanup') 44 | try: 45 | plex_server = PlexServer(PLEXADD, PLEXTOKEN) 46 | tv_section = None 47 | for section in plex_server.library.sections(): 48 | if section.type == "show": 49 | tv_section = section 50 | break 51 | 52 | if tv_section is not None: 53 | logger.info(f"TV show library section: {tv_section.title}") 54 | duplicate_episodes = tv_section.search(duplicate=True, libtype="episode") 55 | episodes_to_delete = [] 56 | 57 | for episode in duplicate_episodes: 58 | has_RCLONEMN = False 59 | has_other_directory = False 60 | media_id = "" 61 | for media in episode.media: 62 | for part in media.parts: 63 | if re.search(f"/{RCLONEMN}[0-9a-zA-Z_]*?/", part.file): 64 | has_RCLONEMN = True 65 | media_id = media.id 66 | else: 67 | has_other_directory = True 68 | if has_RCLONEMN and has_other_directory: 69 | for part in media.parts: 70 | logger.info( 71 | f"Duplicate TV show episode found: Show: {episode.show().title} - Episode: {episode.title} (Media ID: {media_id})" 72 | ) 73 | episodes_to_delete.append((episode, media_id)) 74 | 75 | if len(episodes_to_delete) > 0: 76 | logger.info( 77 | f"Number of TV show episodes to delete: {len(episodes_to_delete)}" 78 | ) 79 | else: 80 | logger.info("No duplicate TV show episodes found.") 81 | 82 | for episode, media_id in episodes_to_delete: 83 | for media in episode.media: 84 | if media.id == media_id: 85 | for part in media.parts: 86 | logger.info( 87 | f"Deleting TV show episode from Rclone directory: {episode.show().title} - {episode.title} (Media ID: {media_id})" 88 | ) 89 | continue_execution = delete_media_with_retry(media) 90 | if not continue_execution: 91 | break 92 | if not continue_execution: 93 | break 94 | else: 95 | logger.error("TV show library section not found.") 96 | except requests.exceptions.ConnectionError as e: 97 | logger.error( 98 | f"Connection error occurred while processing TV show library section: {str(e)}" 99 | ) 100 | except Exception as e: 101 | logger.error( 102 | f"Error occurred while processing TV show library section: {str(e)}" 103 | ) 104 | 105 | 106 | def process_movies(): 107 | # logger = get_logger(log_name='duplicate_cleanup') 108 | try: 109 | plex_server = PlexServer(PLEXADD, PLEXTOKEN) 110 | movie_section = None 111 | for section in plex_server.library.sections(): 112 | if section.type == "movie": 113 | movie_section = section 114 | break 115 | 116 | if movie_section is not None: 117 | logger.info(f"Movie library section: {movie_section.title}") 118 | duplicate_movies = movie_section.search(duplicate=True, libtype="movie") 119 | movies_to_delete = [] 120 | encountered_404_error = False 121 | 122 | for movie in duplicate_movies: 123 | if encountered_404_error: 124 | logger.warning( 125 | "Skipping remaining episodes due to previous 404 error." 126 | ) 127 | break 128 | has_RCLONEMN = False 129 | has_other_directory = False 130 | media_id = "" 131 | for media in movie.media: 132 | for part in media.parts: 133 | if re.search(f"/{RCLONEMN}[0-9a-zA-Z_]*?/", part.file): 134 | has_RCLONEMN = True 135 | media_id = media.id 136 | else: 137 | has_other_directory = True 138 | if has_RCLONEMN and has_other_directory: 139 | for part in media.parts: 140 | logger.info( 141 | f"Duplicate movie found: {movie.title} (Media ID: {media_id})" 142 | ) 143 | movies_to_delete.append((movie, media_id)) 144 | 145 | if len(movies_to_delete) > 0: 146 | logger.info(f"Number of movies to delete: {len(movies_to_delete)}") 147 | else: 148 | logger.info("No duplicate movies found.") 149 | 150 | for movie, media_id in movies_to_delete: 151 | for media in movie.media: 152 | if media.id == media_id: 153 | for part in media.parts: 154 | logger.info( 155 | f"Deleting movie from Rclone directory: {movie.title} (Media ID: {media_id})" 156 | ) 157 | continue_execution = delete_media_with_retry(media) 158 | if not continue_execution: 159 | break 160 | if not continue_execution: 161 | break 162 | else: 163 | logger.error("Movie library section not found.") 164 | except requests.exceptions.ConnectionError as e: 165 | logger.error( 166 | f"Connection error occurred while processing movie library section: {str(e)}" 167 | ) 168 | except Exception as e: 169 | logger.error(f"Error occurred while processing movie library section: {str(e)}") 170 | 171 | 172 | def setup(): 173 | try: 174 | app_env_variables = { 175 | "PLEX_ADDRESS": PLEXADD, 176 | "PLEX_TOKEN": PLEXTOKEN, 177 | "RCLONE_MOUNT_NAME": RCLONEMN, 178 | } 179 | 180 | logger.info("Checking required duplicate cleanup environment variables.") 181 | for var_name, value in app_env_variables.items(): 182 | if value is None: 183 | logger.error( 184 | f"Application environment variable '{var_name}' is not set." 185 | ) 186 | else: 187 | logger.debug(f"Application environment variable '{var_name}' is set.") 188 | 189 | if all(app_env_variables.values()): 190 | if DUPECLEAN is not None and cleanup_interval() == 24: 191 | logger.info("Duplicate cleanup interval missing") 192 | logger.info("Defaulting to " + format_time(cleanup_interval())) 193 | cleanup_thread() 194 | elif DUPECLEAN is not None: 195 | logger.info( 196 | "Duplicate cleanup interval set to " 197 | + format_time(cleanup_interval()) 198 | ) 199 | cleanup_thread() 200 | except Exception as e: 201 | logger.error(e) 202 | 203 | 204 | def cleanup_interval(): 205 | if CLEANUPINT is None: 206 | interval = 24 207 | else: 208 | interval = float(CLEANUPINT) 209 | return interval 210 | 211 | 212 | def cleanup_schedule(): 213 | time.sleep(60) 214 | interval = cleanup_interval() 215 | interval_minutes = int(interval * 60) 216 | schedule.every(interval_minutes).minutes.do(start_cleanup) 217 | while True: 218 | schedule.run_pending() 219 | time.sleep(1) 220 | 221 | 222 | def start_cleanup(): 223 | logger.info("Starting duplicate cleanup") 224 | start_time = get_start_time() 225 | process_tv_shows() 226 | process_movies() 227 | total_time = time_to_complete(start_time) 228 | logger.info("Duplicate cleanup complete.") 229 | logger.info(f"Total time required: {total_time}") 230 | 231 | 232 | def cleanup_thread(): 233 | thread = threading.Thread(target=cleanup_schedule) 234 | thread.daemon = True 235 | thread.start() 236 | -------------------------------------------------------------------------------- /utils/global_logger.py: -------------------------------------------------------------------------------- 1 | from utils.logger import get_logger 2 | 3 | logger = None 4 | _logger_initialized = False 5 | websocket_manager = None 6 | 7 | 8 | def initialize_logger(): 9 | global logger, _logger_initialized, websocket_manager 10 | if not _logger_initialized: 11 | from api.connection_manager import ConnectionManager 12 | 13 | websocket_manager = ConnectionManager() 14 | logger = get_logger( 15 | log_name=None, log_dir=None, websocket_manager=websocket_manager 16 | ) 17 | # logger.debug(f"Logger initialized with websocket manager: {websocket_manager}") 18 | _logger_initialized = True 19 | else: 20 | logger.debug("Logger already initialized.") 21 | 22 | 23 | initialize_logger() 24 | -------------------------------------------------------------------------------- /utils/plex_refresh.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | from plexapi.server import PlexServer 5 | from pathlib import Path 6 | 7 | plex_refresh_path = Path(__file__).resolve() 8 | project_path = plex_refresh_path.parent.parent.parent 9 | sys.path.append(str(project_path)) 10 | from base import PLEXTOKEN, PLEXADD, PLEXMOUNT, RCLONEMN, RCLONEDIR 11 | 12 | 13 | # Configuration 14 | plex_url = PLEXADD.replace("'", "").replace('"', "") 15 | token = PLEXTOKEN.replace("'", "").replace('"', "") 16 | plex_mount = PLEXMOUNT.replace("'", "").replace('"', "") 17 | zurg_mount = f"{RCLONEDIR}/{RCLONEMN}" 18 | zurg_timeout = 300 # 5 minutes in seconds for Zurg file availability 19 | plex_timeout = 60 # Maximum time to wait for Plex to process the refresh 20 | wait_increment = 1 # Time increment for each wait step 21 | max_retries = 20 # Maximum number of retries for Plex refresh 22 | 23 | ### Do not alter below ### 24 | plex = PlexServer(plex_url, token) 25 | 26 | 27 | def refresh_sections(section_ids, filepath): 28 | for section_id in section_ids: 29 | section = plex.library.sectionByID(section_id) 30 | try: 31 | section.update(path=filepath) 32 | print(f"Refreshed section ID {section_id} with path {filepath}") 33 | except Exception as e: 34 | print(f"Error refreshing section ID {section_id}: {e}") 35 | 36 | 37 | def check_path_in_plex(path, section_ids): 38 | try: 39 | for section_id in section_ids: 40 | section = plex.library.sectionByID(section_id) 41 | recent_items = section.recentlyAdded(maxresults=50) 42 | for item in recent_items: 43 | if section.TYPE == "movie": 44 | for media in item.media: 45 | for part in media.parts: 46 | if path in part.file: 47 | return True 48 | elif section.TYPE == "show": 49 | for episode in item.episodes(): 50 | for media in episode.media: 51 | for part in media.parts: 52 | if path in part.file: 53 | return True 54 | return False 55 | except Exception as e: 56 | print(f"Error during search: {e}") 57 | return False 58 | 59 | 60 | def main(): 61 | section_ids = [section.key for section in plex.library.sections()] 62 | valid_directories = {} 63 | 64 | for arg in sys.argv[1:]: 65 | print(f"Starting Plex Update for {arg}") 66 | directory_name = arg.split("/")[0] 67 | directory_path = os.path.join(plex_mount, directory_name) 68 | directory_exists = any( 69 | directory_path in section.locations for section in plex.library.sections() 70 | ) 71 | 72 | if directory_exists: 73 | valid_directories[arg] = directory_path 74 | print( 75 | f"Directory path {directory_path} exists in Plex. Will process {arg}." 76 | ) 77 | else: 78 | print( 79 | f"Directory path {directory_path} does not exist in Plex. Skipping {arg}." 80 | ) 81 | 82 | for arg, path in valid_directories.items(): 83 | zurg_arg = os.path.join(zurg_mount, arg) 84 | elapsed_time = 0 85 | while not os.path.exists(zurg_arg) and elapsed_time < zurg_timeout: 86 | print( 87 | f"Waiting for {arg} to be available at {zurg_mount}... (Timeout in {zurg_timeout - elapsed_time} seconds)" 88 | ) 89 | time.sleep(10) 90 | elapsed_time += 10 91 | 92 | if not os.path.exists(zurg_arg): 93 | print(f"{arg} not found in {zurg_mount}. Skipping.") 94 | continue 95 | 96 | plex_arg = os.path.join(plex_mount, arg) 97 | retry_count = 0 98 | verified = False 99 | while not verified and retry_count < max_retries: 100 | refresh_sections(section_ids, plex_arg) 101 | total_wait_time = 0 102 | 103 | while total_wait_time < plex_timeout: 104 | print( 105 | f"Waiting for {wait_increment} seconds for Plex to process the refresh (Total waited: {total_wait_time} seconds)..." 106 | ) 107 | time.sleep(wait_increment) 108 | total_wait_time += wait_increment 109 | 110 | if check_path_in_plex(plex_arg, section_ids): 111 | print(f"Verification successful: {plex_arg} found in Plex library.") 112 | verified = True 113 | break 114 | 115 | if not verified: 116 | print( 117 | f"Verification unsuccessful for {plex_arg}. Retrying ({retry_count + 1}/{max_retries})..." 118 | ) 119 | retry_count += 1 120 | 121 | if not verified: 122 | print("All retries failed. Verification unsuccessful for", plex_arg) 123 | 124 | 125 | if __name__ == "__main__": 126 | main() 127 | -------------------------------------------------------------------------------- /utils/processes.py: -------------------------------------------------------------------------------- 1 | from utils.logger import SubprocessLogger 2 | from utils.config_loader import CONFIG_MANAGER 3 | from concurrent.futures import ThreadPoolExecutor, as_completed 4 | import shlex, os, time, signal, threading, subprocess, sys, uvicorn 5 | from json import dump 6 | 7 | 8 | class ProcessHandler: 9 | _instance = None 10 | 11 | def __new__(cls, *args, **kwargs): 12 | if cls._instance is None: 13 | cls._instance = super(ProcessHandler, cls).__new__(cls) 14 | cls._instance.init_attributes(*args, **kwargs) 15 | signal.signal(signal.SIGTERM, cls._instance.shutdown) 16 | signal.signal(signal.SIGINT, cls._instance.shutdown) 17 | signal.signal(signal.SIGCHLD, cls._instance.reap_zombies) 18 | return cls._instance 19 | 20 | def init_attributes(self, logger): 21 | self.logger = logger 22 | self.processes = {} 23 | self.process_names = {} 24 | self.subprocess_loggers = {} 25 | self.stdout = "" 26 | self.stderr = "" 27 | self.returncode = None 28 | self.shutting_down = False 29 | self.setup_tracker = set() 30 | 31 | def _update_running_processes_file(self): 32 | running_processes = { 33 | process_info["name"]: pid for pid, process_info in self.processes.items() 34 | } 35 | file_path = "/healthcheck/running_processes.json" 36 | directory = os.path.dirname(file_path) 37 | 38 | try: 39 | os.makedirs(directory, exist_ok=True) 40 | with open(file_path, "w") as f: 41 | dump(running_processes, f) 42 | except Exception as e: 43 | self.logger.error(f"Failed to write running processes file: {e}") 44 | 45 | def start_process( 46 | self, 47 | process_name, 48 | config_dir=None, 49 | command=None, 50 | instance_name=None, 51 | suppress_logging=False, 52 | env=None, 53 | ): 54 | skip_setup = {"pgAgent"} 55 | key = None 56 | 57 | if process_name in skip_setup: 58 | self.logger.info( 59 | f"{process_name} does not require setup. Skipping setup..." 60 | ) 61 | else: 62 | key, instance_name = CONFIG_MANAGER.find_key_for_process(process_name) 63 | if not key: 64 | self.logger.debug( 65 | f"Failed to locate key for {process_name}. Assuming no setup required." 66 | ) 67 | else: 68 | if process_name not in self.setup_tracker: 69 | self.logger.debug(f"Pre Setup tracker: {self.setup_tracker}") 70 | self.logger.info(f"{process_name} needs setup. Running setup...") 71 | from utils.setup import setup_project 72 | 73 | success, error = setup_project(self, process_name) 74 | if not success: 75 | return False, f"Failed to set up {process_name}: {error}" 76 | 77 | try: 78 | if process_name in self.process_names: 79 | self.logger.info(f"{process_name} is already running. Skipping...") 80 | return True, None 81 | 82 | group_id = CONFIG_MANAGER.get("pgid") 83 | user_id = CONFIG_MANAGER.get("puid") 84 | 85 | if not config_dir or not command or len(command) == 0: 86 | self.logger.debug( 87 | f"Configuration directory or command not provided for {process_name}. Attempting to load from config..." 88 | ) 89 | key, instance_name = CONFIG_MANAGER.find_key_for_process(process_name) 90 | config = CONFIG_MANAGER.get_instance(instance_name, key) 91 | command = config.get("command", command) 92 | self.logger.debug(f"Command for {process_name}: {command}") 93 | config_dir = config.get("config_dir", config_dir) 94 | suppress_logging = config.get("suppress_logging", suppress_logging) 95 | env = env or {} 96 | env.update(config.get("env", {})) 97 | if config.get("wait_for_dir"): 98 | dependency_dir = config["wait_for_dir"] 99 | while not os.path.exists(dependency_dir): 100 | self.logger.info( 101 | f"Waiting for directory {dependency_dir} to become available..." 102 | ) 103 | time.sleep(10) 104 | 105 | def preexec_fn(): 106 | os.setgid(group_id) 107 | os.setuid(user_id) 108 | 109 | process_description = process_name 110 | self.logger.info(f"Starting {process_description} process") 111 | 112 | if isinstance(command, str): 113 | command = shlex.split(command) 114 | 115 | if key or instance_name: 116 | config = CONFIG_MANAGER.get_instance(instance_name, key) 117 | if key == "zurg": 118 | config.get("log_level", "INFO") 119 | env = config.get("env", None) 120 | if env is None: 121 | env = {} 122 | env["LOG_LEVEL"] = config.get("log_level", "INFO") 123 | else: 124 | env = config.get("env", None) 125 | 126 | process_env = os.environ.copy() 127 | if env is not None: 128 | process_env.update(env) 129 | 130 | rclone_instances = CONFIG_MANAGER.get("rclone", {}).get("instances", {}) 131 | enabled_rclone_processes = [ 132 | config.get("process_name") 133 | for config in rclone_instances.values() 134 | if config.get("enabled", False) 135 | ] 136 | 137 | process_static_list = [ 138 | "poetry_install", 139 | "install_poetry", 140 | "poetry_env_setup", 141 | "PostgreSQL_init", 142 | "pnpm_install", 143 | "pnpm_build", 144 | "python_env_setup", 145 | "install_requirements", 146 | "setup_env_and_install", 147 | "dotnet_env_restore", 148 | "dotnet_publish", 149 | ] 150 | 151 | if enabled_rclone_processes: 152 | process_static_list.extend(enabled_rclone_processes) 153 | 154 | skip_preexec = process_name in process_static_list 155 | 156 | process = subprocess.Popen( 157 | command, 158 | stdout=subprocess.PIPE, 159 | stderr=subprocess.PIPE, 160 | start_new_session=True, 161 | cwd=config_dir, 162 | universal_newlines=True, 163 | bufsize=1, 164 | preexec_fn=(preexec_fn if not skip_preexec else None), 165 | env=process_env, 166 | ) 167 | 168 | if not suppress_logging: 169 | subprocess_logger = SubprocessLogger( 170 | self.logger, f"{process_description}" 171 | ) 172 | subprocess_logger.start_logging_stdout(process) 173 | subprocess_logger.start_monitoring_stderr( 174 | process, instance_name, process_name 175 | ) 176 | self.subprocess_loggers[process_name] = subprocess_logger 177 | 178 | # success, error = self._check_immediate_exit_and_log(process, process_name) 179 | # if not success: 180 | # return False, error 181 | 182 | self.logger.info(f"{process_name} process started with PID: {process.pid}") 183 | self.processes[process.pid] = { 184 | "name": process_name, 185 | "description": process_description, 186 | "process_obj": process, 187 | } 188 | self.process_names[process_name] = process 189 | 190 | if process: 191 | self._update_running_processes_file() 192 | return True, None 193 | 194 | except Exception as e: 195 | return False, f"Error running subprocess for {process_name}: {e}" 196 | 197 | def _check_immediate_exit_and_log(self, process, process_name): 198 | time.sleep(0.5) 199 | if process.poll() is not None: 200 | stdout_output = process.stdout.read().strip() 201 | stderr_output = process.stderr.read().strip() 202 | 203 | self.logger.error( 204 | f"{process_name} exited immediately with return code {process.returncode}" 205 | ) 206 | if stdout_output: 207 | self.logger.error(f"{process_name} stdout:\n{stdout_output}") 208 | if stderr_output: 209 | self.logger.error(f"{process_name} stderr:\n{stderr_output}") 210 | return False, f"{process_name} failed to start. See logs for details." 211 | 212 | return True, None 213 | 214 | def reap_zombies(self, signum, frame): 215 | while True: 216 | try: 217 | pid, _ = os.waitpid(-1, os.WNOHANG) 218 | if pid == 0: 219 | break 220 | process_info = self.processes.pop(pid, {"description": "Unknown"}) 221 | process_name = process_info.get("name") 222 | if process_name in self.process_names: 223 | del self.process_names[process_name] 224 | self.logger.debug( 225 | f"Reaped zombie process with PID: {pid}, " 226 | f"Description: {process_info.get('description', 'Unknown')}" 227 | ) 228 | except ChildProcessError: 229 | break 230 | 231 | def wait(self, process_name): 232 | if self.shutting_down: 233 | self.logger.debug(f"Skipping wait for {process_name} due to shutdown mode.") 234 | return 235 | 236 | process = self.process_names.get(process_name) 237 | 238 | if not process: 239 | self.logger.warning( 240 | f"Process {process_name} is not running or has already exited." 241 | ) 242 | return 243 | 244 | try: 245 | process.wait() 246 | self.returncode = process.returncode 247 | if process.stdout: 248 | self.stdout = process.stdout.read().strip() 249 | if process.stderr: 250 | self.stderr = process.stderr.read().strip() 251 | except Exception as e: 252 | self.logger.error(f"Error while waiting for process {process_name}: {e}") 253 | finally: 254 | if process_name in self.subprocess_loggers: 255 | self.subprocess_loggers[process_name].stop_logging_stdout() 256 | self.subprocess_loggers[process_name].stop_monitoring_stderr() 257 | del self.subprocess_loggers[process_name] 258 | 259 | if process.pid in self.processes: 260 | del self.processes[process.pid] 261 | 262 | if process_name in self.process_names: 263 | del self.process_names[process_name] 264 | 265 | def stop_process(self, process_name): 266 | try: 267 | process_description = process_name 268 | self.logger.info(f"Initiating shutdown for {process_description}") 269 | 270 | process = self.process_names.get(process_name) 271 | if process: 272 | self.logger.debug(f"Process {process_name} found: {process}") 273 | process.terminate() 274 | max_attempts = 1 if process_name == "riven_backend" else 3 275 | attempt = 0 276 | while attempt < max_attempts: 277 | self.logger.debug( 278 | f"Waiting for {process_description} to terminate (attempt {attempt + 1})..." 279 | ) 280 | try: 281 | process.wait(timeout=10) 282 | if process.poll() is not None: 283 | self.logger.info( 284 | f"{process_description} process terminated gracefully." 285 | ) 286 | break 287 | except subprocess.TimeoutExpired: 288 | self.logger.warning( 289 | f"{process_description} process did not terminate within 10 seconds on attempt {attempt + 1}." 290 | ) 291 | attempt += 1 292 | time.sleep(5) 293 | if process.poll() is None: 294 | self.logger.warning( 295 | f"{process_description} process did not terminate, forcing shutdown." 296 | ) 297 | process.kill() 298 | process.wait() 299 | self.logger.info( 300 | f"{process_description} process forcefully terminated." 301 | ) 302 | if self.subprocess_loggers.get(process_name): 303 | self.subprocess_loggers[process_name].stop_logging_stdout() 304 | self.subprocess_loggers[process_name].stop_monitoring_stderr() 305 | del self.subprocess_loggers[process_name] 306 | self.logger.debug(f"Stopped logging for {process_description}") 307 | self.process_names.pop(process_name, None) 308 | process_info = self.processes.pop(process.pid, None) 309 | if process_info: 310 | self.logger.debug( 311 | f"Removed {process_description} with PID {process.pid} from tracking." 312 | ) 313 | self.logger.info(f"{process_description} shutdown completed.") 314 | self._update_running_processes_file() 315 | else: 316 | self.logger.warning( 317 | f"{process_description} was not found or has already been stopped." 318 | ) 319 | except Exception as e: 320 | self.logger.error( 321 | f"Error occurred while stopping {process_description}: {e}" 322 | ) 323 | 324 | def shutdown_threads(self, *args, **kwargs): 325 | self.logger.debug( 326 | f"shutdown_threads called with args: {args}, kwargs: {kwargs}" 327 | ) 328 | for thread in threading.enumerate(): 329 | if thread.is_alive() and thread is not threading.main_thread(): 330 | self.logger.info(f"Joining thread: {thread.name}") 331 | thread.join(timeout=5) 332 | if thread.is_alive(): 333 | self.logger.warning( 334 | f"Thread {thread.name} did not terminate in time." 335 | ) 336 | 337 | def shutdown(self, signum=None, frame=None, exit_code=0): 338 | self.shutting_down = True 339 | self.logger.info("Shutdown signal received. Cleaning up...") 340 | processes_to_stop = list(self.process_names.keys()) 341 | self.logger.info(f"Processes to stop: {', '.join(processes_to_stop)}") 342 | 343 | with ThreadPoolExecutor() as executor: 344 | futures = { 345 | executor.submit(self.stop_process, process_name): process_name 346 | for process_name in processes_to_stop 347 | if process_name in self.process_names 348 | } 349 | 350 | for future in as_completed(futures): 351 | process_name = futures[future] 352 | try: 353 | future.result() 354 | self.logger.info(f"{process_name} has been stopped successfully.") 355 | except Exception as e: 356 | self.logger.error(f"Error stopping {process_name}: {e}") 357 | self._update_running_processes_file() 358 | self.shutdown_threads() 359 | time.sleep(5) 360 | self.unmount_all() 361 | uvicorn.Server.should_exit = True 362 | self.logger.info("Shutdown complete.") 363 | sys.exit(exit_code) 364 | 365 | def unmount_all(self): 366 | rclone_instances = CONFIG_MANAGER.get("rclone", {}).get("instances", {}) 367 | for instance_name, instance_config in rclone_instances.items(): 368 | if instance_config.get("enabled", False): 369 | rclone_dir = instance_config.get("mount_dir") 370 | rclone_mount_name = instance_config.get("mount_name") 371 | rclone_mount_path = os.path.join(rclone_dir, rclone_mount_name) 372 | if os.path.ismount(rclone_mount_path): 373 | self.logger.info( 374 | f"Unmounting rclone mount for instance {instance_name} at {rclone_mount_path}..." 375 | ) 376 | umount = subprocess.run( 377 | ["umount", rclone_mount_path], capture_output=True, text=True 378 | ) 379 | if umount.returncode == 0: 380 | self.logger.info( 381 | f"Successfully unmounted rclone mount for instance {instance_name}: {rclone_mount_path}" 382 | ) 383 | else: 384 | self.logger.error( 385 | f"Failed to unmount rclone mount for instance {instance_name}: {rclone_mount_path}: {umount.stderr.strip()}" 386 | ) 387 | -------------------------------------------------------------------------------- /utils/riven_settings.py: -------------------------------------------------------------------------------- 1 | from utils.global_logger import logger 2 | from utils.config_loader import CONFIG_MANAGER 3 | from json import load, dump, JSONDecodeError 4 | import os, time, re, requests 5 | 6 | 7 | dmb_config = CONFIG_MANAGER.config.get("dmb") 8 | riven_backend_config = CONFIG_MANAGER.config.get("riven_backend") 9 | riven_frontend_config = CONFIG_MANAGER.config.get("riven_frontend") 10 | postgres_config = CONFIG_MANAGER.config.get("postgres") 11 | zilean_config = CONFIG_MANAGER.config.get("zilean") 12 | 13 | 14 | SENSITIVE_KEY_PATTERN = re.compile( 15 | r"API|TOKEN|URL|HOST|PASSWORD|KEY|SECRET|USERNAME", re.IGNORECASE 16 | ) 17 | 18 | 19 | def parse_config_keys(config): 20 | config_keys = { 21 | "DOWNLOADERS_REAL_DEBRID_API_KEY": None, 22 | "DOWNLOADERS_ALL_DEBRID_API_KEY": None, 23 | "DOWNLOADERS_TORBOX_API_KEY": None, 24 | "SYMLINK_RCLONE_PATH": None, 25 | } 26 | 27 | rclone_instances = config.get("rclone", {}).get("instances", {}) 28 | zurg_instances = config.get("zurg", {}).get("instances", {}) 29 | 30 | key_map = { 31 | "RealDebrid": "DOWNLOADERS_REAL_DEBRID_API_KEY", 32 | "AllDebrid": "DOWNLOADERS_ALL_DEBRID_API_KEY", 33 | "TorBox": "DOWNLOADERS_TORBOX_API_KEY", 34 | } 35 | 36 | enabled_rclone_instances = [] 37 | for instance_name, rclone_instance in rclone_instances.items(): 38 | if not rclone_instance.get("enabled", False): 39 | continue 40 | 41 | enabled_rclone_instances.append(rclone_instance) 42 | zurg_instance = zurg_instances.get(instance_name, {}) 43 | if rclone_instance.get("zurg_enabled", False) and zurg_instance.get( 44 | "enabled", False 45 | ): 46 | api_key = zurg_instance.get("api_key") 47 | else: 48 | api_key = rclone_instance.get("api_key") 49 | 50 | if instance_name in key_map: 51 | config_keys[key_map[instance_name]] = api_key 52 | 53 | if len(enabled_rclone_instances) == 1: 54 | rclone_instance = enabled_rclone_instances[0] 55 | mount_dir = rclone_instance.get("mount_dir", "") 56 | mount_name = rclone_instance.get("mount_name", "") 57 | symlink_path = f"{mount_dir}/{mount_name}/__all__" 58 | config_keys["SYMLINK_RCLONE_PATH"] = symlink_path 59 | elif len(enabled_rclone_instances) > 1: 60 | first_instance = enabled_rclone_instances[0] 61 | mount_dir = first_instance.get("mount_dir", "") 62 | mount_name = first_instance.get("mount_name", "") 63 | config_keys["SYMLINK_RCLONE_PATH"] = f"{mount_dir}/{mount_name}/__all__" 64 | 65 | CONFIG_MANAGER.config["riven_backend"]["wait_for_dir"] = config_keys.get( 66 | "SYMLINK_RCLONE_PATH" 67 | ) 68 | return config_keys 69 | 70 | 71 | def obfuscate_value(key, value, visible_chars=4): 72 | if isinstance(value, (int, float)): 73 | return value 74 | if isinstance(value, bool): 75 | return str(value) 76 | if not value or not SENSITIVE_KEY_PATTERN.search(key) or value is None: 77 | return value 78 | return value[:visible_chars] + "*" * (len(value) - visible_chars) 79 | 80 | 81 | def save_server_config(backend_url, api_key, config_dir="/config"): 82 | server_config = {"backendUrl": backend_url, "apiKey": api_key} 83 | 84 | os.makedirs(config_dir, exist_ok=True) 85 | 86 | config_file_path = os.path.join(config_dir, "server.json") 87 | 88 | try: 89 | with open(config_file_path, "w") as config_file: 90 | dump(server_config, config_file, indent=4) 91 | logger.info(f"Server config saved to {config_file_path}") 92 | except IOError as e: 93 | logger.error(f"Error saving server config: {e}") 94 | 95 | 96 | def load_api_key_from_file( 97 | settings_file_path="/riven/backend/data/settings.json", timeout=60 98 | ): 99 | start_time = time.time() 100 | while True: 101 | try: 102 | with open(settings_file_path, "r") as file: 103 | settings = load(file) 104 | api_key = settings.get("api_key") 105 | backend_url = f"http://{riven_backend_config.get('host')}:{riven_backend_config.get('port')}" 106 | 107 | if api_key: 108 | logger.info( 109 | f"API key loaded successfully from {settings_file_path}" 110 | ) 111 | 112 | save_server_config(backend_url, api_key) 113 | 114 | return api_key 115 | else: 116 | logger.warning( 117 | f"API key not found in {settings_file_path}, retrying..." 118 | ) 119 | except FileNotFoundError: 120 | logger.error(f"Settings file {settings_file_path} not found, retrying...") 121 | except JSONDecodeError as e: 122 | logger.error(f"Error parsing {settings_file_path}: {e}, retrying...") 123 | 124 | if time.time() - start_time > timeout: 125 | logger.error( 126 | f"Timeout exceeded ({timeout} seconds) while loading API key from {settings_file_path}" 127 | ) 128 | return None 129 | time.sleep(5) 130 | 131 | 132 | def backend_api_key(): 133 | api_key = os.getenv("BACKEND_API_KEY") 134 | if not api_key: 135 | logger.debug( 136 | "BACKEND_API_KEY not set in environment, attempting to load from settings.json" 137 | ) 138 | api_key = load_api_key_from_file() 139 | if api_key: 140 | os.environ["BACKEND_API_KEY"] = api_key 141 | else: 142 | logger.debug("BACKEND_API_KEY not set") 143 | return api_key 144 | 145 | 146 | def set_env_variables(): 147 | keys = parse_config_keys(CONFIG_MANAGER.config) 148 | real_debrid_api_key = keys.get("DOWNLOADERS_REAL_DEBRID_API_KEY") 149 | all_debrid_api_key = keys.get("DOWNLOADERS_ALL_DEBRID_API_KEY") 150 | torbox_api_key = keys.get("DOWNLOADERS_TORBOX_API_KEY") 151 | symlink_rclone_path = keys.get("SYMLINK_RCLONE_PATH") 152 | 153 | def set_env(key, value, default=None): 154 | try: 155 | if value is not None: 156 | os.environ[key] = value 157 | elif default is not None and key not in os.environ: 158 | os.environ[key] = default 159 | # if key in os.environ: 160 | # obfuscated_value = obfuscate_value(key, os.environ[key]) 161 | # logger.debug(f"Successfully set {key} to {obfuscated_value}") 162 | # else: 163 | # logger.debug(f"{key} not set because no value or default was provided") 164 | except Exception as e: 165 | logger.error(f"Error setting {key}: {e}") 166 | 167 | if zilean_config.get("enabled"): 168 | set_env( 169 | "RIVEN_SCRAPING_ZILEAN_URL", 170 | f"http://{zilean_config.get('host')}:{zilean_config.get('port')}", 171 | ) 172 | 173 | env_vars = { 174 | "RIVEN_DOWNLOADERS_REAL_DEBRID_API_KEY": real_debrid_api_key, 175 | "RIVEN_DOWNLOADERS_ALL_DEBRID_API_KEY": all_debrid_api_key, 176 | "RIVEN_DOWNLOADERS_TORBOX_API_KEY": torbox_api_key, 177 | "RIVEN_UPDATERS_PLEX_URL": ( 178 | None 179 | if not dmb_config.get("plex_address") 180 | else dmb_config.get("plex_address") 181 | ), 182 | "RIVEN_UPDATERS_PLEX_TOKEN": ( 183 | None if not dmb_config.get("plex_token") else dmb_config.get("plex_token") 184 | ), 185 | "RIVEN_SYMLINK_RCLONE_PATH": symlink_rclone_path, 186 | "RIVEN_SYMLINK_LIBRARY_PATH": riven_backend_config.get("symlink_library_path"), 187 | "BACKEND_URL": f"http://{riven_backend_config.get('host')}:{riven_backend_config.get('port')}", 188 | "RIVEN_DATABASE_URL": f"postgres://{postgres_config.get('user')}:{postgres_config.get('password')}@{postgres_config.get('host')}/riven", 189 | "RIVEN_DATABASE_HOST": f"postgresql+psycopg2://{postgres_config.get('user')}:{postgres_config.get('password')}@{postgres_config.get('host')}/riven", 190 | } 191 | 192 | default_env_vars = {} 193 | 194 | for key, value in env_vars.items(): 195 | set_env(key, value, default_env_vars.get(key)) 196 | 197 | 198 | def get_api_headers(): 199 | api_key = os.getenv("BACKEND_API_KEY") 200 | 201 | if not api_key: 202 | return {} 203 | 204 | headers = {"X-API-KEY": api_key, "Content-Type": "application/json"} 205 | return headers 206 | 207 | 208 | def get_backend_urls(): 209 | base_url = os.getenv( 210 | "BACKEND_URL", 211 | f"http://{riven_backend_config.get('host')}:{riven_backend_config.get('port')}", 212 | ) 213 | api_key = os.getenv("BACKEND_API_KEY") 214 | 215 | if api_key: 216 | settings_url = f"{base_url}/api/v1/settings" 217 | else: 218 | settings_url = f"{base_url}/settings" 219 | 220 | return { 221 | "settings_url": settings_url, 222 | "set_url": f"{settings_url}/set", 223 | "save_url": f"{settings_url}/save", 224 | "get_all": f"{settings_url}/get/all", 225 | } 226 | 227 | 228 | def fetch_settings(url, headers, max_retries=10, delay=5): 229 | for attempt in range(max_retries): 230 | try: 231 | logger.info( 232 | f"Attempt {attempt + 1}/{max_retries} to fetch settings from {url}" 233 | ) 234 | response = requests.get(url, headers=headers) 235 | if response.status_code == 200: 236 | try: 237 | data = response.json() 238 | if isinstance(data, dict): 239 | logger.info( 240 | f"Successfully fetched settings on attempt {attempt + 1}" 241 | ) 242 | return data, None 243 | else: 244 | return None, f"Unexpected JSON format: {data}" 245 | except ValueError as e: 246 | return None, f"Error parsing JSON: {e}" 247 | elif response.status_code == 404: 248 | logger.error(f"Endpoint not found: {url}. Response: {response.text}") 249 | return None, f"404 Not Found: {response.text}" 250 | else: 251 | logger.error( 252 | f"Failed to fetch settings: Status code {response.status_code}, Response: {response.text}" 253 | ) 254 | except requests.ConnectionError as e: 255 | logger.error(f"Error fetching settings: {e}") 256 | 257 | if attempt < max_retries - 1: 258 | logger.info(f"Retrying in {delay} seconds... ({attempt + 1}/{max_retries})") 259 | time.sleep(delay) 260 | else: 261 | logger.error(f"Max retries reached. Failed to fetch settings from {url}") 262 | return None, "Max retries reached" 263 | 264 | 265 | def get_env_value(key, default=None): 266 | env_key = f"RIVEN_{key.replace('.', '_').upper()}" 267 | value = os.getenv(env_key, default) 268 | obfuscated_value = obfuscate_value(key, value) 269 | # logger.debug( 270 | # f"Checking environment variable for key '{env_key}': '{obfuscated_value}'" 271 | # ) 272 | return value 273 | 274 | 275 | def update_settings(current_settings, updated_settings, payload, prefix=""): 276 | if not isinstance(current_settings, dict): 277 | logger.error( 278 | f"Expected a dictionary for settings, but got {type(current_settings)}: {current_settings}" 279 | ) 280 | return 281 | for key, value in current_settings.items(): 282 | full_key = f"{prefix}.{key}" if prefix else key 283 | # logger.debug(f"Processing key '{full_key}' with value Type: {type(value)}") 284 | if key == "debug": 285 | if riven_backend_config.get("log_level").upper() == "DEBUG": 286 | updated_settings["debug"] = True 287 | payload.append({"key": full_key, "value": True}) 288 | logger.info(f"LOG_LEVEL is DEBUG, setting 'debug' to True") 289 | else: 290 | updated_settings["debug"] = False 291 | payload.append({"key": full_key, "value": False}) 292 | logger.info(f"LOG_LEVEL is not DEBUG, setting 'debug' to False") 293 | continue 294 | env_value = get_env_value(full_key) 295 | if isinstance(value, dict): 296 | nested_updated = {} 297 | update_settings(value, nested_updated, payload, full_key) 298 | if nested_updated: 299 | updated_settings[key] = nested_updated 300 | meaningful_change = any(k in nested_updated for k in value) 301 | if meaningful_change: 302 | if "enabled" in value: 303 | updated_settings[key]["enabled"] = True 304 | payload.append({"key": f"{full_key}.enabled", "value": True}) 305 | # logger.debug( 306 | # f"'{full_key}.enabled' set to True due to updates in nested settings" 307 | # ) 308 | elif "enable" in value: 309 | updated_settings[key]["enable"] = True 310 | payload.append({"key": f"{full_key}.enable", "value": True}) 311 | # logger.debug( 312 | # f"'{full_key}.enable' set to True due to updates in nested settings" 313 | # ) 314 | elif env_value is not None: 315 | try: 316 | if env_value.lower() in ["true", "false"]: 317 | updated_settings[key] = env_value.lower() == "true" 318 | elif env_value.isdigit(): 319 | updated_settings[key] = int(env_value) 320 | else: 321 | try: 322 | updated_settings[key] = float(env_value) 323 | except ValueError: 324 | updated_settings[key] = env_value 325 | payload.append({"key": full_key, "value": updated_settings[key]}) 326 | obfuscated_value = obfuscate_value(full_key, updated_settings[key]) 327 | # logger.debug( 328 | # f"Setting '{full_key}' updated to '{obfuscated_value}' from environment variable" 329 | # ) 330 | except ValueError: 331 | logger.error(f"ValueError converting environment variable '{full_key}'") 332 | # else: 333 | # logger.debug( 334 | # f"No environment variable found for '{full_key}', keeping original value." 335 | # ) 336 | # logger.debug(f"Processed setting for '{key}'") 337 | 338 | 339 | def load_settings(): 340 | time.sleep(10) 341 | logger.info("Loading Riven settings") 342 | set_env_variables() 343 | backend_api_key() 344 | urls = get_backend_urls() 345 | get_all = urls["get_all"] 346 | headers = get_api_headers() 347 | try: 348 | 349 | response, error = fetch_settings(get_all, headers) 350 | 351 | if error: 352 | logger.error(f"Failed to load settings: {error}") 353 | return 354 | 355 | if not isinstance(response, dict): 356 | logger.error(f"Unexpected type for settings response: {type(response)}") 357 | return 358 | 359 | current_settings = response 360 | if not isinstance(current_settings, dict): 361 | logger.error( 362 | f"Unexpected type for current settings: {type(current_settings)}" 363 | ) 364 | return 365 | updated_settings = {} 366 | payload = [] 367 | if current_settings: 368 | update_settings(current_settings, updated_settings, payload) 369 | else: 370 | logger.error("No current settings data to update") 371 | 372 | logger.debug(f"Updated settings payload") 373 | 374 | if not payload: 375 | logger.info("No settings to update.") 376 | return 377 | 378 | set_url = urls["set_url"] 379 | save_url = urls["save_url"] 380 | max_retries = 10 381 | for attempt in range(max_retries): 382 | try: 383 | response = requests.post(set_url, json=payload, headers=headers) 384 | if response.status_code == 200: 385 | save_response = requests.post(save_url, headers=headers) 386 | if save_response.status_code == 200: 387 | logger.info("Settings saved successfully.") 388 | else: 389 | logger.error(f"Failed to save settings: {save_response.text}") 390 | break 391 | else: 392 | logger.error(f"Failed to set settings: {response.text}") 393 | except requests.ConnectionError as e: 394 | logger.error(f"Error loading Riven settings: {e}") 395 | if attempt < max_retries - 1: 396 | logger.info( 397 | f"Retrying in 5 seconds... ({attempt + 1}/{max_retries})" 398 | ) 399 | time.sleep(5) 400 | else: 401 | raise 402 | except Exception as e: 403 | logger.error(f"Error loading Riven settings: {e}") 404 | raise 405 | -------------------------------------------------------------------------------- /utils/user_management.py: -------------------------------------------------------------------------------- 1 | from utils.config_loader import CONFIG_MANAGER as config 2 | from utils.global_logger import logger 3 | from concurrent.futures import ThreadPoolExecutor 4 | import multiprocessing, os, time, grp, pwd, subprocess 5 | 6 | 7 | user_id = config.get("puid") 8 | group_id = config.get("pgid") 9 | 10 | 11 | def chown_single(path, user_id, group_id): 12 | try: 13 | stat_info = os.stat(path) 14 | if stat_info.st_uid == user_id and stat_info.st_gid == group_id: 15 | return 16 | os.chown(path, user_id, group_id) 17 | except FileNotFoundError: 18 | pass 19 | except Exception as e: 20 | logger.error(f"Error changing ownership of '{path}': {e}") 21 | 22 | 23 | def log_directory_size(directory): 24 | try: 25 | num_files = sum([len(files) for r, d, files in os.walk(directory)]) 26 | logger.debug(f"Directory '{directory}' contains {num_files} files.") 27 | except Exception as e: 28 | logger.error(f"Error calculating size of directory '{directory}': {e}") 29 | 30 | 31 | def get_dynamic_workers(): 32 | return multiprocessing.cpu_count() 33 | 34 | 35 | def chown_recursive(directory, user_id, group_id): 36 | try: 37 | max_workers = get_dynamic_workers() 38 | start_time = time.time() 39 | log_directory_size(directory) 40 | logger.debug(f"Using {max_workers} workers for chown operation") 41 | with ThreadPoolExecutor(max_workers=max_workers) as executor: 42 | for root, dirs, files in os.walk(directory): 43 | for dir_name in dirs: 44 | executor.submit( 45 | chown_single, os.path.join(root, dir_name), user_id, group_id 46 | ) 47 | for file_name in files: 48 | executor.submit( 49 | chown_single, os.path.join(root, file_name), user_id, group_id 50 | ) 51 | executor.submit(chown_single, directory, user_id, group_id) 52 | end_time = time.time() 53 | logger.debug( 54 | f"chown_recursive for {directory} took {end_time - start_time:.2f} seconds" 55 | ) 56 | return True, None 57 | except Exception as e: 58 | return False, f"Error changing ownership of '{directory}': {e}" 59 | 60 | 61 | def create_system_user(username="DMB"): 62 | try: 63 | start_time = time.time() 64 | group_check_start = time.time() 65 | try: 66 | grp.getgrgid(group_id) 67 | logger.debug(f"Group with GID {group_id} already exists.") 68 | except KeyError: 69 | logger.info(f"Group with GID {group_id} does not exist. Creating group...") 70 | with open("/etc/group", "a") as group_file: 71 | group_file.write(f"{username}:x:{group_id}:\n") 72 | group_check_end = time.time() 73 | logger.debug( 74 | f"Group check/creation took {group_check_end - group_check_start:.2f} seconds" 75 | ) 76 | 77 | user_check_start = time.time() 78 | try: 79 | pwd.getpwnam(username) 80 | logger.debug(f"User '{username}' with UID {user_id} already exists.") 81 | return 82 | except KeyError: 83 | logger.info(f"User '{username}' does not exist. Creating user...") 84 | user_check_end = time.time() 85 | logger.debug(f"User check took {user_check_end - user_check_start:.2f} seconds") 86 | 87 | home_dir = f"/home/{username}" 88 | if not os.path.exists(home_dir): 89 | os.makedirs(home_dir) 90 | 91 | passwd_write_start = time.time() 92 | with open("/etc/passwd", "a") as passwd_file: 93 | passwd_file.write( 94 | f"{username}:x:{user_id}:{group_id}::/home/{username}:/bin/bash\n" 95 | ) 96 | passwd_write_end = time.time() 97 | logger.debug( 98 | f"Writing to /etc/passwd took {passwd_write_end - passwd_write_start:.2f} seconds" 99 | ) 100 | 101 | user_password = ( 102 | subprocess.check_output("openssl rand -base64 12", shell=True) 103 | .decode() 104 | .strip() 105 | ) 106 | hashed_password = ( 107 | subprocess.check_output(f"openssl passwd -6 {user_password}", shell=True) 108 | .decode() 109 | .strip() 110 | ) 111 | subprocess.run( 112 | f"usermod -p '{hashed_password}' {username}", shell=True, check=True 113 | ) 114 | logger.info(f"Password set for user '{username}'. Stored securely in memory.") 115 | 116 | zurg_dir = "/zurg" 117 | # mnt_dir = config.get("riven_backend").get("symlink_library_path") 118 | log_dir = config.get("dmb").get("log_dir") 119 | config_dir = "/config" 120 | riven_dir = "/riven/backend/data" 121 | zilean_dir = "/zilean/app/data" 122 | plex_debrid_dir = "/plex_debrid/config" 123 | cli_debrid_dir = "/cli_debrid/data" 124 | 125 | rclone_instances = config.get("rclone", {}).get("instances", {}) 126 | 127 | chown_start = time.time() 128 | # chown_recursive(zurg_dir, user_id, group_id) 129 | # os.chown(mnt_dir, user_id, group_id) 130 | os.chown(zurg_dir, user_id, group_id) 131 | chown_recursive(log_dir, user_id, group_id) 132 | chown_recursive(config_dir, user_id, group_id) 133 | chown_recursive(riven_dir, user_id, group_id) 134 | chown_recursive(home_dir, user_id, group_id) 135 | chown_recursive(zilean_dir, user_id, group_id) 136 | chown_recursive(plex_debrid_dir, user_id, group_id) 137 | chown_recursive(cli_debrid_dir, user_id, group_id) 138 | 139 | for instance_name, instance_config in rclone_instances.items(): 140 | if instance_config.get("enabled", False): 141 | rclone_dir = instance_config.get("mount_dir") 142 | if rclone_dir and os.path.exists(rclone_dir): 143 | stat_info = os.stat(rclone_dir) 144 | if stat_info.st_uid == user_id and stat_info.st_gid == group_id: 145 | logger.debug( 146 | f"Directory {rclone_dir} is already owned by {user_id}:{group_id}" 147 | ) 148 | else: 149 | logger.debug( 150 | f"Directory {rclone_dir} is not owned by {user_id}:{group_id}, changing ownership" 151 | ) 152 | logger.debug( 153 | f"Changing ownership of {rclone_dir} for {instance_name}" 154 | ) 155 | chown_recursive(rclone_dir, user_id, group_id) 156 | else: 157 | logger.warning( 158 | f"Mount directory for {instance_name} does not exist or is not set: {rclone_dir}" 159 | ) 160 | 161 | chown_end = time.time() 162 | logger.debug(f"Chown operations took {chown_end - chown_start:.2f} seconds") 163 | end_time = time.time() 164 | logger.info( 165 | f"Total time to create system user '{username}' was {end_time - start_time:.2f} seconds" 166 | ) 167 | 168 | except Exception as e: 169 | logger.error(f"Error creating system user '{username}': {e}") 170 | raise 171 | -------------------------------------------------------------------------------- /utils/versions.py: -------------------------------------------------------------------------------- 1 | from utils.global_logger import logger 2 | from utils.download import Downloader 3 | from utils.config_loader import CONFIG_MANAGER 4 | import subprocess, json, re 5 | 6 | 7 | class Versions: 8 | def __init__(self): 9 | self.logger = logger 10 | self.downloader = Downloader() 11 | 12 | def version_check( 13 | self, process_name=None, instance_name=None, key=None, version_path=None 14 | ): 15 | try: 16 | if key == "dmb_api_service": 17 | version_path = "/pyproject.toml" 18 | is_file = True 19 | if key == "dmb_frontend": 20 | version_path = "/dmb/frontend/package.json" 21 | is_file = True 22 | elif key == "riven_frontend": 23 | version_path = "/riven/frontend/version.txt" 24 | is_file = True 25 | elif key == "cli_debrid": 26 | version_path = "/cli_debrid/version.txt" 27 | is_file = True 28 | elif key == "cli_battery": 29 | version_path = "/cli_debrid/cli_battery/version.txt" 30 | is_file = True 31 | elif key == "phalanx_db": 32 | version_path = "/phalanx_db/version.txt" 33 | is_file = True 34 | elif key == "riven_backend": 35 | version_path = "/riven/backend/pyproject.toml" 36 | is_file = True 37 | elif key == "zilean": 38 | version_path = "/zilean/version.txt" 39 | is_file = True 40 | elif key == "zurg": 41 | config = CONFIG_MANAGER.get_instance(instance_name, key) 42 | if not config: 43 | raise ValueError(f"Configuration for {process_name} not found.") 44 | version_path = config.get("config_dir") + "/zurg" 45 | is_file = False 46 | elif key == "postgres": 47 | try: 48 | result = subprocess.run( 49 | ["psql", "--version"], capture_output=True, text=True 50 | ) 51 | if result.returncode == 0: 52 | version = result.stdout.strip().split()[-1] 53 | return version, None 54 | return None, "psql not found or failed" 55 | except FileNotFoundError: 56 | return None, "psql binary not found" 57 | elif key == "pgadmin": 58 | try: 59 | import glob 60 | 61 | version_files = glob.glob( 62 | "/pgadmin/venv/lib/python*/site-packages/pgadmin4/version.py" 63 | ) 64 | if version_files: 65 | version_globals = {} 66 | with open(version_files[0], "r") as f: 67 | code = f.read() 68 | exec(code, version_globals) 69 | release = version_globals.get("APP_RELEASE") 70 | revision = version_globals.get("APP_REVISION") 71 | suffix = version_globals.get("APP_SUFFIX", "") 72 | if release is not None and revision is not None: 73 | version = f"{release}.{revision}" 74 | if suffix: 75 | version += f"-{suffix}" 76 | return version, None 77 | return None, "pgAdmin version info not found" 78 | except Exception as e: 79 | return None, f"Error extracting pgAdmin version: {e}" 80 | elif key == "rclone": 81 | try: 82 | result = subprocess.run( 83 | ["rclone", "--version"], capture_output=True, text=True 84 | ) 85 | if result.returncode == 0: 86 | version_line = result.stdout.strip().splitlines()[0] 87 | version = version_line.split()[1] 88 | return version, None 89 | else: 90 | return None, "rclone --version failed" 91 | except FileNotFoundError: 92 | return None, "rclone binary not found" 93 | except Exception as e: 94 | return None, f"Error reading rclone version: {e}" 95 | elif key == "plex_debrid": 96 | version_path = "/plex_debrid/ui/ui_settings.py" 97 | is_file = True 98 | 99 | if is_file: 100 | try: 101 | with open(version_path, "r") as f: 102 | if key == "dmb_frontend": 103 | try: 104 | data = json.load(f) 105 | version = f'v{data["version"]}' 106 | except (json.JSONDecodeError, KeyError) as e: 107 | version = None 108 | elif ( 109 | key == "riven_frontend" 110 | or key == "cli_debrid" 111 | or key == "cli_battery" 112 | or key == "phalanx_db" 113 | ): 114 | version = f"v{f.read().strip()}" 115 | elif ( 116 | key == "riven_backend" 117 | or key == "dmb_api_service" 118 | or key == "plex_debrid" 119 | ): 120 | for line in f: 121 | if line.startswith("version = "): 122 | version_raw = ( 123 | line.split("=")[1].strip().strip('"').strip("'") 124 | ) 125 | match = re.search(r"v?\d+(\.\d+)*", version_raw) 126 | version = match.group(0) if match else "" 127 | if key == "riven_backend": 128 | version = f"v{version}" 129 | break 130 | else: 131 | version = None 132 | elif key == "zilean": 133 | version = f.read().strip() 134 | if version: 135 | return version, None 136 | else: 137 | return None, "Version not found" 138 | except FileNotFoundError: 139 | return None, f"Version file not found: {version_path}" 140 | if not is_file: 141 | try: 142 | result = subprocess.run( 143 | [version_path, "version"], capture_output=True, text=True 144 | ) 145 | if result.returncode == 0: 146 | version_info = result.stdout.strip() 147 | version = version_info.split("\n")[-1].split(": ")[-1] 148 | return version, None 149 | else: 150 | return None, "Version not found" 151 | except FileNotFoundError: 152 | return None, f"Version file not found: {version_path}" 153 | except Exception as e: 154 | self.logger.error( 155 | f"Error reading current version for {process_name} from {version_path}: {e}" 156 | ) 157 | return None, str(e) 158 | 159 | def version_write(self, process_name, key=None, version_path=None, version=None): 160 | try: 161 | if key == "dmb_frontend": 162 | version_path = "/dmb/frontend/package.json" 163 | elif key == "riven_frontend": 164 | version_path = "/riven/frontend/version.txt" 165 | elif key == "riven_backend": 166 | version_path = "/riven/backend/pyproject.toml" 167 | elif key == "zilean": 168 | version_path = "/zilean/version.txt" 169 | if key == "dmb_frontend": 170 | try: 171 | with open(version_path, "r", encoding="utf-8") as f: 172 | data = json.load(f) 173 | 174 | data["version"] = version.lstrip("v") 175 | 176 | with open(version_path, "w", encoding="utf-8") as f: 177 | json.dump(data, f, indent=2) 178 | f.write("\n") 179 | except (json.JSONDecodeError, KeyError) as e: 180 | return False, str(e) 181 | elif not key == "riven_backend": 182 | with open(version_path, "w") as f: 183 | f.write(version) 184 | elif key == "riven_backend": 185 | with open(version_path, "r") as file: 186 | lines = file.readlines() 187 | with open(version_path, "w") as f: 188 | for line in lines: 189 | if line.startswith("version = "): 190 | f.write(f'version = "{version}"\n') 191 | else: 192 | f.write(line) 193 | return True, None 194 | except FileNotFoundError: 195 | self.logger.error(f"Version file not found: {version_path}") 196 | return False, f"Version file not found: {version_path}" 197 | except Exception as e: 198 | self.logger.error( 199 | f"Error writing current version for {process_name} to {version_path}: {e}" 200 | ) 201 | return False, str(e) 202 | 203 | def compare_versions( 204 | self, 205 | process_name, 206 | repo_owner, 207 | repo_name, 208 | instance_name, 209 | key, 210 | nightly=False, 211 | prerelease=False, 212 | ): 213 | try: 214 | latest_release_version, error = self.downloader.get_latest_release( 215 | repo_owner, repo_name, nightly=nightly, prerelease=prerelease 216 | ) 217 | if not latest_release_version: 218 | self.logger.error( 219 | f"Failed to get the latest release for {process_name}: {error}" 220 | ) 221 | raise Exception(error) 222 | current_version, error = self.version_check( 223 | process_name, instance_name, key 224 | ) 225 | if not current_version: 226 | self.logger.error( 227 | f"Failed to get the current version for {process_name}: {error}" 228 | ) 229 | current_version = "0.0.0" 230 | self.logger.error( 231 | f"Setting current version to 0.0.0 for {process_name}" 232 | ) 233 | # raise Exception(error) 234 | if nightly: 235 | current_date = ".".join(current_version.split(".")[0:3]) 236 | latest_date = ".".join(latest_release_version.split(".")[0:3]) 237 | if current_date == latest_date: 238 | return False, { 239 | "message": "No updates available (same nightly date)", 240 | "current_version": current_version, 241 | } 242 | if current_version == latest_release_version: 243 | return False, { 244 | "message": "No updates available", 245 | "current_version": current_version, 246 | } 247 | else: 248 | return True, { 249 | "message": "Update available", 250 | "current_version": current_version, 251 | "latest_version": latest_release_version, 252 | } 253 | except Exception as e: 254 | self.logger.error( 255 | f"Exception during version comparison {process_name}: {e}" 256 | ) 257 | return False, str(e) 258 | --------------------------------------------------------------------------------