├── .dockerignore ├── .env.template ├── .github └── workflows │ └── docker-build-push.yml ├── .gitignore ├── Dockerfile.blackhole ├── Dockerfile.plex_authentication ├── Dockerfile.plex_request ├── Dockerfile.plex_request_nginx ├── Dockerfile.scripts ├── Dockerfile.watchlist ├── README.md ├── __init__.py ├── add_next_episode.py ├── blackhole.py ├── blackhole_watcher.py ├── cache └── .gitignore ├── clean_logs.py ├── collect_server_tokens.py ├── delete_non_linked_folders.py ├── docker-compose.yml ├── find_broken_links.py ├── fix_rclonelink.py ├── import_torrent_folder.py ├── logs └── blackhole.log ├── monitor_ram.py ├── move_media_to_directory.py ├── plex_authentication.py ├── plex_authentication_wsgi.py ├── plex_refresh.py ├── plex_refresh.sh ├── plex_request.py ├── plex_request_nginx.conf ├── plex_request_nginx_default.conf ├── plex_request_nginx_entrypoint.sh ├── plex_request_nginx_variables.conf ├── plex_request_wsgi.py ├── reclaim_space.py ├── repair.py ├── requirements.txt ├── row_count.sh ├── shared ├── __init__.py ├── arr.py ├── debrid.py ├── discord.py ├── overseerr.py ├── plex.py ├── requests.py ├── shared.py └── tokens.json ├── sockets └── .gitignore ├── test_ram.py ├── watchlist.py ├── watchlist_runner.py └── zurg_symlink_update.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | .env.template 3 | .gitignore 4 | docker-compose.yml 5 | Dockerfile.* 6 | README.md 7 | shared/tokens.json 8 | sockets/ -------------------------------------------------------------------------------- /.env.template: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------# 2 | # ███████╗ ██████╗██████╗ ██╗██████╗ ████████╗███████╗ # 3 | # ██╔════╝██╔════╝██╔══██╗██║██╔══██╗╚══██╔══╝██╔════╝ # 4 | # ███████╗██║ ██████╔╝██║██████╔╝ ██║ ███████╗ # 5 | # ╚════██║██║ ██╔══██╗██║██╔═══╝ ██║ ╚════██║ # 6 | # ███████║╚██████╗██║ ██║██║██║ ██║ ███████║ # 7 | # ╚══════╝ ╚═════╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝ ╚══════╝ # 8 | #------------------------------------------------------# 9 | 10 | #--------# 11 | # SERVER # 12 | #--------# 13 | 14 | SERVER_DOMAIN= 15 | 16 | #-------------------------------------------------------------------# 17 | # PLEX - WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, PLEX REFRESH # 18 | #-------------------------------------------------------------------# 19 | 20 | PLEX_HOST="https://plex.tv/" 21 | PLEX_METADATA_HOST="https://metadata.provider.plex.tv/" 22 | PLEX_SERVER_HOST= 23 | PLEX_SERVER_MACHINE_ID= 24 | PLEX_SERVER_API_KEY= 25 | PLEX_SERVER_MOVIE_LIBRARY_ID= 26 | PLEX_SERVER_TV_SHOW_LIBRARY_ID= 27 | PLEX_SERVER_PATH= 28 | 29 | #-------------------------------------------------------------------------# 30 | # OVERSEERR - WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, RECLAIM SPACE # 31 | #-------------------------------------------------------------------------# 32 | 33 | OVERSEERR_HOST= 34 | OVERSEERR_API_KEY= 35 | 36 | #------------------------------------------------------------------------------------# 37 | # SONARR - BLACKHOLE, REPAIR, IMPORT TORRENT FOLDER, RECLAIM SPACE, ADD NEXT EPISODE # 38 | #------------------------------------------------------------------------------------# 39 | 40 | SONARR_HOST= 41 | SONARR_API_KEY= 42 | SONARR_ROOT_FOLDER= 43 | 44 | SONARR_HOST_4K= 45 | SONARR_API_KEY_4K= 46 | SONARR_ROOT_FOLDER_4K= 47 | 48 | SONARR_HOST_ANIME= 49 | SONARR_API_KEY_ANIME= 50 | SONARR_ROOT_FOLDER_ANIME= 51 | 52 | SONARR_HOST_MUX= 53 | SONARR_API_KEY_MUX= 54 | SONARR_ROOT_FOLDER_MUX= 55 | 56 | #------------------------------------------------------------------# 57 | # RADARR - BLACKHOLE, REPAIR, IMPORT TORRENT FOLDER, RECLAIM SPACE # 58 | #------------------------------------------------------------------# 59 | 60 | RADARR_HOST= 61 | RADARR_API_KEY= 62 | RADARR_ROOT_FOLDER= 63 | 64 | RADARR_HOST_4K= 65 | RADARR_API_KEY_4K= 66 | RADARR_ROOT_FOLDER_4K= 67 | 68 | RADARR_HOST_ANIME= 69 | RADARR_API_KEY_ANIME= 70 | RADARR_ROOT_FOLDER_ANIME= 71 | 72 | RADARR_HOST_MUX= 73 | RADARR_API_KEY_MUX= 74 | RADARR_ROOT_FOLDER_MUX= 75 | 76 | #--------------------------# 77 | # TAUTULLI - RECLAIM SPACE # 78 | #--------------------------# 79 | 80 | TAUTULLI_HOST= 81 | TAUTULLI_API_KEY= 82 | 83 | #-------------------------------# 84 | # REALDEBRID - BLACKHOLE, REPAIR # 85 | #-------------------------------# 86 | 87 | REALDEBRID_ENABLED=true 88 | REALDEBRID_HOST="https://api.real-debrid.com/rest/1.0/" 89 | REALDEBRID_API_KEY= 90 | REALDEBRID_MOUNT_TORRENTS_PATH= 91 | 92 | #---------------------------# 93 | # TORBOX - BLACKHOLE, REPAIR # 94 | #---------------------------# 95 | 96 | TORBOX_ENABLED=false 97 | TORBOX_HOST="https://api.torbox.app/v1/api/" 98 | TORBOX_API_KEY= 99 | TORBOX_MOUNT_TORRENTS_PATH= 100 | 101 | #-----------------------# 102 | # TRAKT - RECLAIM SPACE # 103 | #-----------------------# 104 | 105 | TRAKT_API_KEY= 106 | 107 | #-------------------------------------# 108 | # WATCHLIST - WATCHLIST, PLEX REQUEST # 109 | #-------------------------------------# 110 | 111 | WATCHLIST_PLEX_PRODUCT="Plex Request Authentication" 112 | WATCHLIST_PLEX_VERSION="1.0.0" 113 | WATCHLIST_PLEX_CLIENT_IDENTIFIER="576101fc-b425-4685-91cb-5d3c1671fd2b" 114 | 115 | #-----------------------# 116 | # BLACKHOLE - BLACKHOLE # 117 | #-----------------------# 118 | 119 | BLACKHOLE_BASE_WATCH_PATH="./blackhole" 120 | BLACKHOLE_RADARR_PATH="Movies" 121 | BLACKHOLE_SONARR_PATH="TV Shows" 122 | BLACKHOLE_FAIL_IF_NOT_CACHED=true 123 | BLACKHOLE_RD_MOUNT_REFRESH_SECONDS=200 124 | BLACKHOLE_WAIT_FOR_TORRENT_TIMEOUT=60 125 | BLACKHOLE_HISTORY_PAGE_SIZE=500 126 | 127 | #-----------------------------------------------------------------------------------------------# 128 | # DISCORD - BLACKHOLE, WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, MONITOR RAM, RECLAIM SPACE # 129 | #-----------------------------------------------------------------------------------------------# 130 | 131 | DISCORD_ENABLED=false 132 | DISCORD_UPDATE_ENABLED=false 133 | DISCORD_WEBHOOK_URL= 134 | 135 | #-----------------# 136 | # REPAIR - REPAIR # 137 | #-----------------# 138 | 139 | REPAIR_REPAIR_INTERVAL="10m" 140 | REPAIR_RUN_INTERVAL="1d" 141 | 142 | #-----------------------# 143 | # GENERAL CONFIGURATION # 144 | #-----------------------# 145 | 146 | PYTHONUNBUFFERED=TRUE 147 | PUID= 148 | PGID= 149 | UMASK=002 150 | DOCKER_NETWORK="scripts_default" 151 | DOCKER_NETWORK_EXTERNAL=false 152 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-push.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push Docker images 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - 'main' 8 | tags: 9 | - 'v*' 10 | pull_request: 11 | branches: 12 | - 'main' 13 | 14 | jobs: 15 | build-and-push: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | include: 20 | - dockerfile: ./Dockerfile.blackhole 21 | image: ghcr.io/${{ github.repository }}/blackhole 22 | - dockerfile: ./Dockerfile.watchlist 23 | image: ghcr.io/${{ github.repository }}/watchlist 24 | - dockerfile: ./Dockerfile.plex_authentication 25 | image: ghcr.io/${{ github.repository }}/plex_authentication 26 | - dockerfile: ./Dockerfile.plex_request 27 | image: ghcr.io/${{ github.repository }}/plex_request 28 | - dockerfile: ./Dockerfile.scripts 29 | image: ghcr.io/${{ github.repository }}/scripts 30 | - dockerfile: ./Dockerfile.plex_request_nginx 31 | image: ghcr.io/${{ github.repository }}/plex_request_nginx 32 | steps: 33 | - name: Checkout code 34 | uses: actions/checkout@v4 35 | 36 | - name: Docker meta 37 | id: meta 38 | uses: docker/metadata-action@v5 39 | with: 40 | images: ${{ matrix.image }} 41 | 42 | - name: Set up QEMU 43 | uses: docker/setup-qemu-action@v3 44 | 45 | - name: Set up Docker Buildx 46 | uses: docker/setup-buildx-action@v3 47 | 48 | - name: Dump GitHub context 49 | env: 50 | GITHUB_CONTEXT: ${{ toJson(github) }} 51 | run: echo "$GITHUB_CONTEXT" 52 | 53 | - name: Log in to GitHub Container Registry 54 | uses: docker/login-action@v3 55 | with: 56 | registry: ghcr.io 57 | username: ${{ github.actor }} 58 | password: ${{ secrets.GITHUB_TOKEN }} 59 | 60 | - name: Build and push docker image 61 | uses: docker/build-push-action@v6 62 | with: 63 | context: . 64 | file: ${{ matrix.dockerfile }} 65 | platforms: linux/amd64,linux/arm,linux/arm64 66 | push: true 67 | tags: ${{ steps.meta.outputs.tags }} 68 | labels: ${{ steps.meta.outputs.labels }} 69 | 70 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | # *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # Logs 163 | logs/blackhole.log 164 | 165 | # Torrents 166 | blackhole/ 167 | 168 | # Tokens 169 | shared/tokens.json -------------------------------------------------------------------------------- /Dockerfile.blackhole: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | # Metadata labels 4 | LABEL org.opencontainers.image.source="https://github.com/westsurname/scripts" 5 | LABEL org.opencontainers.image.description="Docker image for the blackhole service" 6 | 7 | ARG SERVICE_NAME=blackhole 8 | 9 | # Set working directory 10 | WORKDIR /app 11 | 12 | # Copy only the files needed for pip install to maximize cache utilization 13 | COPY requirements.txt ./ 14 | 15 | # Install Python dependencies 16 | RUN grep -E "#.*($SERVICE_NAME|all)" requirements.txt | awk '{print $0}' > service_requirements.txt && \ 17 | pip install --no-cache-dir -r service_requirements.txt 18 | 19 | # Copy the rest of the application 20 | COPY . . 21 | 22 | CMD ["python", "blackhole_watcher.py"] 23 | -------------------------------------------------------------------------------- /Dockerfile.plex_authentication: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | # Metadata labels 4 | LABEL org.opencontainers.image.source="https://github.com/westsurname/scripts" 5 | LABEL org.opencontainers.image.description="Docker image for the plex_authentication service" 6 | 7 | ARG SERVICE_NAME=plex_authentication 8 | 9 | # Set working directory 10 | WORKDIR /app 11 | 12 | # Copy only the files needed for pip install to maximize cache utilization 13 | COPY requirements.txt ./ 14 | 15 | # Install Python dependencies 16 | RUN grep -E "#.*($SERVICE_NAME|all)" requirements.txt | awk '{print $0}' > service_requirements.txt && \ 17 | pip install --no-cache-dir -r service_requirements.txt 18 | 19 | # Copy the rest of the application 20 | COPY . . 21 | 22 | # Run gunicorn using Unix socket 23 | CMD ["gunicorn", "--bind", "unix:/app/sockets/plex_authentication.sock", "plex_authentication_wsgi:app"] -------------------------------------------------------------------------------- /Dockerfile.plex_request: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | # Metadata labels 4 | LABEL org.opencontainers.image.source="https://github.com/westsurname/scripts" 5 | LABEL org.opencontainers.image.description="Docker image for the plex_request service" 6 | 7 | ARG SERVICE_NAME=plex_request 8 | 9 | # Set working directory 10 | WORKDIR /app 11 | 12 | # Copy only the files needed for pip install to maximize cache utilization 13 | COPY requirements.txt ./ 14 | 15 | # Install Python dependencies 16 | RUN grep -E "#.*($SERVICE_NAME|all)" requirements.txt | awk '{print $0}' > service_requirements.txt && \ 17 | pip install --no-cache-dir -r service_requirements.txt 18 | 19 | # Copy the rest of the application 20 | COPY . . 21 | 22 | # Run gunicorn using Unix socket 23 | CMD ["gunicorn", "--bind", "unix:/app/sockets/plex_request.sock", "plex_request_wsgi:app"] -------------------------------------------------------------------------------- /Dockerfile.plex_request_nginx: -------------------------------------------------------------------------------- 1 | FROM nginx:alpine 2 | 3 | # Metadata labels 4 | LABEL org.opencontainers.image.source="https://github.com/westsurname/scripts" 5 | LABEL org.opencontainers.image.description="Docker image for the plex_request_nginx service" 6 | 7 | # Install required packages 8 | RUN apk add --no-cache \ 9 | openssl \ 10 | inotify-tools 11 | 12 | # Create SSL directory 13 | RUN mkdir -p /ssl && \ 14 | chown nginx:nginx /ssl && \ 15 | chmod 700 /ssl 16 | 17 | COPY plex_request_nginx_variables.conf /etc/nginx/templates/10-variables.conf.template 18 | COPY plex_request_nginx_default.conf /etc/nginx/conf.d/default.conf 19 | COPY plex_request_nginx.conf /etc/nginx/nginx.conf 20 | COPY plex_request_nginx_entrypoint.sh /docker-entrypoint.d/40-decrypt-plex-cert.sh 21 | 22 | # Make the entrypoint script executable 23 | RUN chmod +x /docker-entrypoint.d/40-decrypt-plex-cert.sh 24 | 25 | # Expose port 8000 to the outside world 26 | EXPOSE 8000 27 | 28 | CMD ["nginx", "-g", "daemon off;"] 29 | -------------------------------------------------------------------------------- /Dockerfile.scripts: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | # Metadata labels 4 | LABEL org.opencontainers.image.source="https://github.com/westsurname/scripts" 5 | LABEL org.opencontainers.image.description="Docker image for the scripts service" 6 | 7 | ARG SERVICE_NAME=scripts 8 | 9 | # Set working directory 10 | WORKDIR /app 11 | 12 | # Copy only the files needed for pip install to maximize cache utilization 13 | COPY requirements.txt ./ 14 | 15 | # Install Python dependencies 16 | RUN grep -E "#.*($SERVICE_NAME|all)" requirements.txt | awk '{print $0}' > service_requirements.txt && \ 17 | pip install --no-cache-dir -r service_requirements.txt 18 | 19 | # Copy the rest of the application 20 | COPY . . -------------------------------------------------------------------------------- /Dockerfile.watchlist: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | # Metadata labels 4 | LABEL org.opencontainers.image.source="https://github.com/westsurname/scripts" 5 | LABEL org.opencontainers.image.description="Docker image for the watchlist service" 6 | 7 | ARG SERVICE_NAME=watchlist 8 | 9 | # Set working directory 10 | WORKDIR /app 11 | 12 | # Copy only the files needed for pip install to maximize cache utilization 13 | COPY requirements.txt ./ 14 | 15 | # Install Python dependencies 16 | RUN grep -E "#.*($SERVICE_NAME|all)" requirements.txt | awk '{print $0}' > service_requirements.txt && \ 17 | pip install --no-cache-dir -r service_requirements.txt 18 | 19 | # Copy the rest of the application 20 | COPY . . 21 | 22 | CMD ["python", "watchlist_runner.py"] 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Scripts 2 | 3 | ## Installation 4 | 5 | ### Prerequisites 6 | - Python 3.x installed. 7 | - Pip package manager. 8 | 9 | ### Steps 10 | 1. Clone the repository (preferably into the home directory): 11 | 12 | ```bash 13 | git clone https://github.com/westsurname/scripts.git 14 | ``` 15 | 16 | 2. Navigate to the project directory: 17 | 18 | ```bash 19 | cd scripts 20 | ``` 21 | 22 | 3. Install the required packages: 23 | 24 | ```bash 25 | pip install -r requirements.txt 26 | ``` 27 | 4. Copy `.env.template` to `.env` and populate the (applicable) variables: 28 | 29 | - **Server**: 30 | - `SERVER_DOMAIN`: The domain name of your server. 31 | 32 | - **Plex** - Watchlist, Plex Authentication, Plex Request, Plex Refresh: 33 | - `PLEX_HOST`: The URL to general Plex services. 34 | - `PLEX_METADATA_HOST`: The URL to the Plex metadata service. 35 | - `PLEX_SERVER_HOST`: The host address of your Plex server. 36 | - `PLEX_SERVER_MACHINE_ID`: The processed unique machine identifier for your Plex server. 37 | - `PLEX_SERVER_API_KEY`: Your Plex server's API key for authentication. 38 | - `PLEX_SERVER_MOVIE_LIBRARY_ID`: The library ID for movies on your Plex server. 39 | - `PLEX_SERVER_TV_SHOW_LIBRARY_ID`: The library ID for TV shows on your Plex server. 40 | - `PLEX_SERVER_PATH`: The path to your 'Plex Media Server' folder containing Cache/cert-v2.p12 (e.g., /var/lib/plexmediaserver/Library/Application Support/Plex Media Server). 41 | 42 | - **Overseerr** - Watchlist, Plex Authentication, Plex Request, Reclaim Space: 43 | - `OVERSEERR_HOST`: The host address of your Overseeer instance. 44 | - `OVERSEERR_API_KEY`: The API key for accessing Overseeer. 45 | 46 | - **Sonarr** - Blackhole, Repair, Move Media to Directory, Reclaim Space, Add Next Episode: 47 | - `SONARR_HOST`: The host address of your Sonarr instance. 48 | - `SONARR_API_KEY`: The API key for accessing Sonarr. 49 | - `SONARR_ROOT_FOLDER`: The root folder path for Sonarr media files. (Required for repair compose service only) 50 | 51 | - **Radarr** - Blackhole, Repair, Move Media to Directory, Reclaim Space: 52 | - `RADARR_HOST`: The host address of your Radarr instance. 53 | - `RADARR_API_KEY`: The API key for accessing Radarr. 54 | - `RADARR_ROOT_FOLDER`: The root folder path for Radarr media files. (Required for repair compose service only) 55 | 56 | - **Tautulli** - Reclaim Space: 57 | - `TAUTULLI_HOST`: The host address of your Tautulli instance. 58 | - `TAUTULLI_API_KEY`: The API key for accessing Tautulli. 59 | 60 | - **RealDebrid** - Blackhole, Repair: 61 | - `REALDEBRID_ENABLED`: Set to `true` to enable RealDebrid services. 62 | - `REALDEBRID_HOST`: The host address for the RealDebrid API. 63 | - `REALDEBRID_API_KEY`: The API key for accessing RealDebrid services. 64 | - `REALDEBRID_MOUNT_TORRENTS_PATH`: The path to the RealDebrid mount torrents folder. 65 | 66 | - **TorBox** - Blackhole, Repair: 67 | - `TORBOX_ENABLED`: Set to `true` to enable TorBox services. 68 | - `TORBOX_HOST`: The host address for the TorBox API. 69 | - `TORBOX_API_KEY`: The API key for accessing TorBox services. 70 | - `TORBOX_MOUNT_TORRENTS_PATH`: The path to the TorBox mount torrents folder. 71 | 72 | - **Trakt** - Reclaim Space: 73 | - `TRAKT_API_KEY`: The API key for integrating with Trakt. 74 | 75 | - **Watchlist** - Watchlist, Plex Request: 76 | - `WATCHLIST_PLEX_PRODUCT`: Identifier for the Plex product used in watchlists. 77 | - `WATCHLIST_PLEX_VERSION`: The version of the Plex product used. 78 | - `WATCHLIST_PLEX_CLIENT_IDENTIFIER`: A unique identifier for the Plex client. 79 | 80 | - **Blackhole** - Blackhole: 81 | - `BLACKHOLE_BASE_WATCH_PATH`: The base path for watched folders by the blackhole mechanism. Can be relative or absolute. 82 | - `BLACKHOLE_RADARR_PATH`: The path where torrent files will be dropped into by Radarr, relative to the base path. 83 | - `BLACKHOLE_SONARR_PATH`: The path where torrent files will be dropped into by Sonarr, relative to the base path. 84 | - `BLACKHOLE_FAIL_IF_NOT_CACHED`: Whether to fail operations if content is not cached. 85 | - `BLACKHOLE_RD_MOUNT_REFRESH_SECONDS`: How long to wait for the RealDebrid mount to refresh in seconds. 86 | - `BLACKHOLE_WAIT_FOR_TORRENT_TIMEOUT`: The timeout in seconds to wait for a torrent to be successful before failing. 87 | - `BLACKHOLE_HISTORY_PAGE_SIZE`: The number of history items to pull at once when attempting to mark a download as failed. 88 | 89 | - **Discord** - Blackhole, Watchlist, Plex Authentication, Plex Request, Monitor Ram, Reclaim Space: 90 | - `DISCORD_ENABLED`: Set to `true` to enable Discord error notifications. 91 | - `DISCORD_UPDATE_ENABLED`: Set to `true` to enable update notifications as well on Discord. 92 | - `DISCORD_WEBHOOK_URL`: The Discord webhook URL for sending notifications. 93 | 94 | - **Repair** - Repair: 95 | - `REPAIR_REPAIR_INTERVAL`: The interval in smart format (e.g., '1h2m3s') to wait between repairing each media file. 96 | - `REPAIR_RUN_INTERVAL`: The interval in smart format (e.g., '1w2d3h4m5s') to run the repair process. 97 | 98 | - **General Configuration**: 99 | - `PYTHONUNBUFFERED`: Set to `TRUE` to ensure Python output is displayed in the logs in real-time. 100 | - `PUID`: Set this to the user ID that the service should run as. 101 | - `PGID`: Set this to the group ID that the service should run as. 102 | - `UMASK`: Set this to control the default file creation permissions. 103 | - `DOCKER_NETWORK`: Set this to the name of the Docker network to be used by the services. 104 | - `DOCKER_NETWORK_EXTERNAL`: Set this to `true` if specifying an external Docker network above, otherwise set to `false`. 105 | 106 | ## Blackhole 107 | 108 | ### Setup 109 | 110 | 1. Within the arrs, navigate to `Settings > Download Clients` and add a `Torrent Blackhole` client. 111 | 112 | 2. Configure the torrent blackhole download client as follows: 113 | - **Name**: `blackhole` 114 | - **Enable**: Yes 115 | - **Torrent Folder**: Set to `[BLACKHOLE_BASE_WATCH_PATH]/[BLACKHOLE_RADARR_PATH]` for Radarr or `[BLACKHOLE_BASE_WATCH_PATH]/[BLACKHOLE_SONARR_PATH]` for Sonarr 116 | - **Watch Folder**: Set to `[Torrent Folder]/completed` 117 | - **Save Magnet Files**: Yes, with the extension `.magnet` 118 | - **Read Only**: No 119 | - **Client Priority**: Prioritize as you please 120 | - **Tags**: Tag as you please 121 | - **Completed Download Handling**: Remove Completed 122 | 123 | 3. Run the `python_watcher.py` script to start monitoring the blackhole: 124 | 125 | ```bash 126 | python3 python_watcher.py 127 | ``` 128 | 129 | ## Plex Request 130 | 131 | ### Setup 132 | 133 | 1. Ensure your Plex Media Server is properly configured and running. 134 | 135 | 2. In your `.env` file, make sure the following variables are set: 136 | - `PLEX_SERVER_PATH`: Points to your 'Plex Media Server' directory containing the certificate 137 | - `PLEX_SERVER_MACHINE_ID`: Your Plex server's machine identifier 138 | - Other required variables as listed in the configuration section 139 | 140 | 3. Start the Plex Request services: 141 | ```bash 142 | docker-compose --profile plex_request up -d 143 | ``` 144 | 145 | ### SSL Configuration 146 | The service automatically handles SSL by: 147 | - Using your Plex Media Server's certificate (cert-v2.p12) from the Cache directory 148 | - Converting it to the required format for nginx 149 | - Automatically detecting and handling certificate updates 150 | 151 | No manual SSL configuration is needed as long as your Plex Media Server is properly configured. 152 | 153 | ## Repair 154 | 155 | ### Usage 156 | 157 | The repair script can be run with the following command: 158 | 159 | ```bash 160 | python3 repair.py 161 | ``` 162 | The script accepts the following arguments: 163 | 164 | - `--dry-run`: Perform a dry run without making any changes. 165 | - `--no-confirm`: Execute without confirmation prompts. 166 | - `--repair-interval`: Optional interval in smart format (e.g., '1h2m3s') to wait between repairing each media file. 167 | - `--run-interval`: Optional interval in smart format (e.g., '1w2d3h4m5s') to run the repair process. 168 | - `--mode`: Choose repair mode: `symlink` or `file`. `symlink` to repair broken symlinks and `file` to repair missing files. (default: 'symlink'). 169 | - `--season-packs`: Upgrade to season-packs when a non-season-pack is found. Only applicable in symlink mode. 170 | - `--include-unmonitored`: Include unmonitored media in the repair process. 171 | 172 | ### Warning 173 | This script can potentially delete and re-download a large number of files. It is recommended to use the `--dry-run` flag first to see what actions the script will take. 174 | 175 | ### Example 176 | 177 | Here's an example of how you might use this script: 178 | 179 | ```bash 180 | python3 repair.py --mode file --repair-interval 30m --run-interval 1d --dry-run 181 | ``` 182 | 183 | In this example, the script will run in 'file' mode, waiting 30 minutes between each repair and running once a day. It will perform a dry run, printing actions without executing them. 184 | 185 | 186 | ## Import Torrent Folder 187 | 188 | ### Usage 189 | 190 | The script can be run with the following command: 191 | 192 | ```bash 193 | python3 import_torrent_folder.py 194 | ``` 195 | 196 | The script accepts the following arguments: 197 | 198 | - `--directory`: Specifies a specific directory to process. 199 | - `--custom-regex`: Allows you to specify a custom multi-season regex. 200 | - `--dry-run`: If this flag is set, the script will print actions without executing them. 201 | - `--no-confirm`: If this flag is set, the script will execute without asking for confirmation. 202 | - `--radarr`: If this flag is set, the script will use the Radarr symlink directory. 203 | - `--sonarr`: If this flag is set, the script will use the Sonarr symlink directory. 204 | - `--symlink-directory`: Allows you to specify a custom symlink directory. 205 | 206 | ### Example 207 | Here is an example of how you might use this script: 208 | 209 | ```bash 210 | python3 import_torrent_folder.py --directory "Some Movie (2024)" --radarr --dry-run 211 | ``` 212 | 213 | In this example, the script will process the "Some Movie (2024)" directory using the Radarr symlink directory. It will print the actions it would take without actually executing them, because the --dry-run flag is set. 214 | 215 | 216 | ## Delete Non-Linked Folders 217 | 218 | ### Usage 219 | 220 | The script can be run with the following command: 221 | 222 | ```bash 223 | python3 delete_non_linked_folders.py 224 | ``` 225 | 226 | The script accepts the following arguments: 227 | 228 | - `dst_folder`: The destination folder to check for non-linked files. This folder should encompass all folders where symbolic links may exist. 229 | - `--src-folder`: The source folder to check for non-linked files. This is the folder that the symbolic links in the destination folder should point to. 230 | - `--dry-run`: If this flag is provided, the script will only print the non-linked file directories without deleting them. 231 | - `--no-confirm`: If this flag is provided, the script will delete non-linked file directories without asking for confirmation. 232 | - `--only-delete-files`: If this flag is provided, the script will delete only the files in the non-linked directories, not the directories themselves. This is useful for Zurg where directories are automatically removed if they contain no content. 233 | 234 | ### Warning 235 | 236 | This script can potentially delete a large number of files and directories. It is recommended to use the --dry-run flag first to see what the script will delete. Also, make sure to provide the correct source and destination folders, as the script will delete any non-linked files or directories in the destination folder that are not symbolic links to the source folder. 237 | 238 | ### Example 239 | 240 | Here is an example of how you might use this script: 241 | 242 | ```bash 243 | python3 delete_non_linked_folders.py /path/to/destination/folder --src-folder /path/to/source/folder --dry-run 244 | ``` 245 | 246 | In this example, the script will check the "/path/to/destination/folder" directory againts the "/path/to/source/folder" directory to find directories that are not linked to. It will print the the directories that would be deleted without actually deleting them, because the --dry-run flag is set. -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/westsurname/scripts/e600dbf8251021141738db44d37b21392436a222/__init__.py -------------------------------------------------------------------------------- /add_next_episode.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import sys 3 | from shared.shared import sonarr 4 | 5 | 6 | sonarrHost = sonarr['host'] 7 | sonarrAPIkey = sonarr['apiKey'] 8 | 9 | show_tvdbId = sys.argv[1] 10 | ep_num = int(sys.argv[2]) 11 | season_num = int(sys.argv[3]) 12 | 13 | s = requests.get(f"{sonarrHost}/api/v3/series/lookup?apiKey={sonarrAPIkey}&term=tvdb:{show_tvdbId}") 14 | sonarrShow = s.json() 15 | 16 | s = requests.get(f"{sonarrHost}/api/v3/episode?apiKey={sonarrAPIkey}&seriesId={sonarrShow[0]['id']}") 17 | sonarrEpisodes = s.json() 18 | 19 | sonarrEpisode = next(filter(lambda episode: episode['seasonNumber'] == season_num and ep_num + 1 == episode['episodeNumber'], sonarrEpisodes)) 20 | 21 | if (sonarrEpisode and 22 | sonarrEpisode['hasFile'] == False): 23 | monitorJson = {'episodeIds': [sonarrEpisode['id']], 'monitored': True} 24 | s = requests.put(f"{sonarrHost}/api/v3/episode/monitor?apiKey={sonarrAPIkey}", json=monitorJson) 25 | 26 | searchJson = {'name': 'EpisodeSearch', 'episodeIds': [sonarrEpisode['id']]} 27 | s = requests.post(f"{sonarrHost}/api/v3/command?apiKey={sonarrAPIkey}", json=searchJson) 28 | -------------------------------------------------------------------------------- /blackhole.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import time 3 | import traceback 4 | import os 5 | import sys 6 | import re 7 | import requests 8 | import asyncio 9 | import uuid 10 | from datetime import datetime 11 | # import urllib 12 | from shared.discord import discordError, discordUpdate 13 | from shared.shared import realdebrid, torbox, blackhole, plex, checkRequiredEnvs 14 | from shared.arr import Arr, Radarr, Sonarr 15 | from shared.debrid import TorrentBase, RealDebridTorrent, RealDebridMagnet, TorboxTorrent, TorboxMagnet 16 | 17 | _print = print 18 | 19 | def print(*values: object): 20 | _print(f"[{datetime.now()}]", *values) 21 | 22 | requiredEnvs = { 23 | 'Blackhole base watch path': (blackhole['baseWatchPath'],), 24 | 'Blackhole Radarr path': (blackhole['radarrPath'],), 25 | 'Blackhole Sonarr path': (blackhole['sonarrPath'],), 26 | 'Blackhole fail if not cached': (blackhole['failIfNotCached'],), 27 | 'Blackhole RD mount refresh seconds': (blackhole['rdMountRefreshSeconds'],), 28 | 'Blackhole wait for torrent timeout': (blackhole['waitForTorrentTimeout'],), 29 | 'Blackhole history page size': (blackhole['historyPageSize'],) 30 | } 31 | 32 | checkRequiredEnvs(requiredEnvs) 33 | 34 | class TorrentFileInfo(): 35 | class FileInfo(): 36 | def __init__(self, filename, filenameWithoutExt, filePath, filePathProcessing, folderPathCompleted) -> None: 37 | self.filename = filename 38 | self.filenameWithoutExt = filenameWithoutExt 39 | self.filePath = filePath 40 | self.filePathProcessing = filePathProcessing 41 | self.folderPathCompleted = folderPathCompleted 42 | 43 | class TorrentInfo(): 44 | def __init__(self, isTorrentOrMagnet, isDotTorrentFile) -> None: 45 | self.isTorrentOrMagnet = isTorrentOrMagnet 46 | self.isDotTorrentFile = isDotTorrentFile 47 | 48 | def __init__(self, filename, isRadarr) -> None: 49 | print('filename:', filename) 50 | baseBath = getPath(isRadarr) 51 | uniqueId = str(uuid.uuid4())[:8] 52 | isDotTorrentFile = filename.casefold().endswith('.torrent') 53 | isTorrentOrMagnet = isDotTorrentFile or filename.casefold().endswith('.magnet') 54 | filenameWithoutExt, ext = os.path.splitext(filename) 55 | filePath = os.path.join(baseBath, filename) 56 | 57 | # Get the maximum filename length for the target directory 58 | try: 59 | maxNameBytes = os.pathconf(baseBath, 'PC_NAME_MAX') 60 | except (AttributeError, ValueError, OSError): 61 | maxNameBytes = 255 62 | 63 | # Calculate space needed for uniqueId, separator, and extension 64 | extraBytes = len(f"_{uniqueId}{ext}".encode()) 65 | 66 | # Truncate the filename if needed 67 | if len(filenameWithoutExt.encode()) > maxNameBytes - extraBytes: 68 | processingName = truncateBytes(filenameWithoutExt, maxNameBytes - extraBytes) 69 | print(f"Truncated filename from {len(filenameWithoutExt.encode())} to {len(processingName.encode())} bytes") 70 | else: 71 | processingName = filenameWithoutExt 72 | 73 | filePathProcessing = os.path.join(baseBath, 'processing', f"{processingName}_{uniqueId}{ext}") 74 | folderPathCompleted = os.path.join(baseBath, 'completed', filenameWithoutExt) 75 | 76 | self.fileInfo = self.FileInfo(filename, filenameWithoutExt, filePath, filePathProcessing, folderPathCompleted) 77 | self.torrentInfo = self.TorrentInfo(isTorrentOrMagnet, isDotTorrentFile) 78 | 79 | def getPath(isRadarr, create=False): 80 | baseWatchPath = blackhole['baseWatchPath'] 81 | absoluteBaseWatchPath = baseWatchPath if os.path.isabs(baseWatchPath) else os.path.abspath(baseWatchPath) 82 | finalPath = os.path.join(absoluteBaseWatchPath, blackhole['radarrPath'] if isRadarr else blackhole['sonarrPath']) 83 | 84 | if create: 85 | for sub_path in ['', 'processing', 'completed']: 86 | path_to_check = os.path.join(finalPath, sub_path) 87 | if not os.path.exists(path_to_check): 88 | os.makedirs(path_to_check) 89 | 90 | return finalPath 91 | 92 | # From Radarr Radarr/src/NzbDrone.Core/Organizer/FileNameBuilder.cs 93 | def cleanFileName(name): 94 | result = name 95 | badCharacters = ["\\", "/", "<", ">", "?", "*", ":", "|", "\""] 96 | goodCharacters = ["+", "+", "", "", "!", "-", "", "", ""] 97 | 98 | for i, char in enumerate(badCharacters): 99 | result = result.replace(char, goodCharacters[i]) 100 | 101 | return result.strip() 102 | 103 | refreshingTask = None 104 | 105 | def truncateBytes(text: str, maxBytes: int) -> str: 106 | """Truncate a string to a maximum number of bytes in UTF-8 encoding.""" 107 | encoded = text.encode() 108 | return encoded[:maxBytes].decode(errors='ignore') 109 | 110 | async def refreshArr(arr: Arr, count=60): 111 | # TODO: Change to refresh until found/imported 112 | async def refresh(): 113 | for _ in range(count): 114 | arr.refreshMonitoredDownloads() 115 | await asyncio.sleep(1) 116 | 117 | global refreshingTask 118 | if refreshingTask and not refreshingTask.done(): 119 | print("Refresh already in progress, restarting...") 120 | refreshingTask.cancel() 121 | 122 | refreshingTask = asyncio.create_task(refresh()) 123 | try: 124 | await refreshingTask 125 | except asyncio.CancelledError: 126 | pass 127 | 128 | def copyFiles(file: TorrentFileInfo, folderPathMountTorrent, arr: Arr): 129 | # Consider removing this and always streaming 130 | try: 131 | _print = globals()['print'] 132 | 133 | def print(*values: object): 134 | _print(f"[{file.fileInfo.filenameWithoutExt}]", *values) 135 | 136 | count = 0 137 | print('Waiting for arr to delete folders...') 138 | while True: 139 | count += 1 140 | if not os.path.exists(file.fileInfo.folderPathCompleted): 141 | print('Deleted') 142 | print('Copying actual files to arr folder...') 143 | shutil.copytree(folderPathMountTorrent, file.fileInfo.folderPathCompleted) 144 | arr.refreshMonitoredDownloads() 145 | print('Copied') 146 | break 147 | time.sleep(1) 148 | if count == 180: 149 | print('copyCount > 180') 150 | discordError(f"{file.fileInfo.filenameWithoutExt} copy attempt acount > 180", "Shortcut has not finished importing yet") 151 | 152 | except: 153 | e = traceback.format_exc() 154 | 155 | print(f"Error copying files for {file.fileInfo.filenameWithoutExt}") 156 | print(e) 157 | 158 | discordError(f"Error copying files for {file.fileInfo.filenameWithoutExt}", e) 159 | 160 | import signal 161 | 162 | async def processTorrent(torrent: TorrentBase, file: TorrentFileInfo, arr: Arr) -> bool: 163 | _print = globals()['print'] 164 | 165 | def print(*values: object): 166 | _print(f"[{torrent.__class__.__name__}] [{file.fileInfo.filenameWithoutExt}]", *values) 167 | 168 | if not torrent.submitTorrent(): 169 | return False 170 | 171 | count = 0 172 | while True: 173 | count += 1 174 | info = await torrent.getInfo(refresh=True) 175 | if not info: 176 | return False 177 | 178 | status = info['status'] 179 | 180 | print('status:', status) 181 | 182 | if status == torrent.STATUS_WAITING_FILES_SELECTION: 183 | if not await torrent.selectFiles(): 184 | torrent.delete() 185 | return False 186 | elif status == torrent.STATUS_DOWNLOADING: 187 | # Send progress to arr 188 | progress = info['progress'] 189 | print(f"Progress: {progress:.2f}%") 190 | if torrent.skipAvailabilityCheck and torrent.failIfNotCached: 191 | torrent.delete() 192 | return False 193 | await asyncio.sleep(1) 194 | elif status == torrent.STATUS_ERROR: 195 | return False 196 | elif status == torrent.STATUS_COMPLETED: 197 | existsCount = 0 198 | print('Waiting for folders to refresh...') 199 | 200 | while True: 201 | existsCount += 1 202 | 203 | folderPathMountTorrent = await torrent.getTorrentPath() 204 | if folderPathMountTorrent: 205 | multiSeasonRegex1 = r'(?<=[\W_][Ss]eason[\W_])[\d][\W_][\d]{1,2}(?=[\W_])' 206 | multiSeasonRegex2 = r'(?<=[\W_][Ss])[\d]{2}[\W_][Ss]?[\d]{2}(?=[\W_])' 207 | multiSeasonRegexCombined = f'{multiSeasonRegex1}|{multiSeasonRegex2}' 208 | 209 | multiSeasonMatch = re.search(multiSeasonRegexCombined, file.fileInfo.filenameWithoutExt) 210 | 211 | for root, dirs, files in os.walk(folderPathMountTorrent): 212 | relRoot = os.path.relpath(root, folderPathMountTorrent) 213 | for filename in files: 214 | # Check if the file is accessible 215 | # if not await is_accessible(os.path.join(root, filename)): 216 | # print(f"Timeout reached when accessing file: {filename}") 217 | # discordError(f"Timeout reached when accessing file", filename) 218 | # Uncomment the following line to fail the entire torrent if the timeout on any of its files are reached 219 | # fail(torrent) 220 | # return 221 | 222 | if multiSeasonMatch: 223 | seasonMatch = re.search(r'S([\d]{2})E[\d]{2}', filename) 224 | 225 | if seasonMatch: 226 | season = seasonMatch.group(1) 227 | seasonShort = season[1:] if season[0] == '0' else season 228 | 229 | seasonFolderPathCompleted = re.sub(multiSeasonRegex1, seasonShort, file.fileInfo.folderPathCompleted) 230 | seasonFolderPathCompleted = re.sub(multiSeasonRegex2, season, seasonFolderPathCompleted) 231 | 232 | os.makedirs(os.path.join(seasonFolderPathCompleted, relRoot), exist_ok=True) 233 | os.symlink(os.path.join(root, filename), os.path.join(seasonFolderPathCompleted, relRoot, filename)) 234 | print('Season Recursive:', f"{os.path.join(seasonFolderPathCompleted, relRoot, filename)} -> {os.path.join(root, filename)}") 235 | # refreshEndpoint = f"{plex['serverHost']}/library/sections/{plex['serverMovieLibraryId'] if isRadarr else plex['serverTvShowLibraryId']}/refresh?path={urllib.parse.quote_plus(os.path.join(seasonFolderPathCompleted, relRoot))}&X-Plex-Token={plex['serverApiKey']}" 236 | # cancelRefreshRequest = requests.delete(refreshEndpoint, headers={'Accept': 'application/json'}) 237 | # refreshRequest = requests.get(refreshEndpoint, headers={'Accept': 'application/json'}) 238 | 239 | continue 240 | 241 | 242 | os.makedirs(os.path.join(file.fileInfo.folderPathCompleted, relRoot), exist_ok=True) 243 | os.symlink(os.path.join(root, filename), os.path.join(file.fileInfo.folderPathCompleted, relRoot, filename)) 244 | print('Recursive:', f"{os.path.join(file.fileInfo.folderPathCompleted, relRoot, filename)} -> {os.path.join(root, filename)}") 245 | # refreshEndpoint = f"{plex['serverHost']}/library/sections/{plex['serverMovieLibraryId'] if isRadarr else plex['serverTvShowLibraryId']}/refresh?path={urllib.parse.quote_plus(os.path.join(file.fileInfo.folderPathCompleted, relRoot))}&X-Plex-Token={plex['serverApiKey']}" 246 | # cancelRefreshRequest = requests.delete(refreshEndpoint, headers={'Accept': 'application/json'}) 247 | # refreshRequest = requests.get(refreshEndpoint, headers={'Accept': 'application/json'}) 248 | 249 | print('Refreshed') 250 | discordUpdate(f"Sucessfully processed {file.fileInfo.filenameWithoutExt}", f"Now available for immediate consumption! existsCount: {existsCount}") 251 | 252 | # refreshEndpoint = f"{plex['serverHost']}/library/sections/{plex['serverMovieLibraryId'] if isRadarr else plex['serverTvShowLibraryId']}/refresh?X-Plex-Token={plex['serverApiKey']}" 253 | # cancelRefreshRequest = requests.delete(refreshEndpoint, headers={'Accept': 'application/json'}) 254 | # refreshRequest = requests.get(refreshEndpoint, headers={'Accept': 'application/json'}) 255 | await refreshArr(arr) 256 | 257 | # await asyncio.get_running_loop().run_in_executor(None, copyFiles, file, folderPathMountTorrent, arr) 258 | return True 259 | 260 | if existsCount >= blackhole['rdMountRefreshSeconds'] + 1: 261 | print(f"Torrent folder not found in filesystem: {file.fileInfo.filenameWithoutExt}") 262 | discordError("Torrent folder not found in filesystem", file.fileInfo.filenameWithoutExt) 263 | 264 | return False 265 | 266 | await asyncio.sleep(1) 267 | 268 | if torrent.failIfNotCached and count >= blackhole['waitForTorrentTimeout']: 269 | print(f"Torrent timeout: {file.fileInfo.filenameWithoutExt} - {status}") 270 | discordError("Torrent timeout", f"{file.fileInfo.filenameWithoutExt} - {status}") 271 | 272 | return False 273 | 274 | async def processFile(file: TorrentFileInfo, arr: Arr, isRadarr): 275 | try: 276 | _print = globals()['print'] 277 | 278 | def print(*values: object): 279 | _print(f"[{file.fileInfo.filenameWithoutExt}]", *values) 280 | 281 | from concurrent.futures import ThreadPoolExecutor 282 | 283 | def read_file(path): 284 | with open(path, 'r', encoding='utf-8', errors='ignore') as f: 285 | f.read(1) 286 | 287 | async def is_accessible(path, timeout=10): 288 | with ThreadPoolExecutor() as executor: 289 | loop = asyncio.get_event_loop() 290 | try: 291 | await asyncio.wait_for(loop.run_in_executor(executor, read_file, path), timeout=timeout) 292 | discordUpdate('good') 293 | return True 294 | except Exception as e: 295 | discordError('error', e) 296 | return False 297 | finally: 298 | executor.shutdown(wait=False) 299 | 300 | time.sleep(.1) # Wait before processing the file in case it isn't fully written yet. 301 | os.renames(file.fileInfo.filePath, file.fileInfo.filePathProcessing) 302 | 303 | with open(file.fileInfo.filePathProcessing, 'rb' if file.torrentInfo.isDotTorrentFile else 'r') as f: 304 | fileData = f.read() 305 | f.seek(0) 306 | 307 | torrentConstructors = [] 308 | if realdebrid['enabled']: 309 | torrentConstructors.append(RealDebridTorrent if file.torrentInfo.isDotTorrentFile else RealDebridMagnet) 310 | if torbox['enabled']: 311 | torrentConstructors.append(TorboxTorrent if file.torrentInfo.isDotTorrentFile else TorboxMagnet) 312 | 313 | onlyLargestFile = isRadarr or bool(re.search(r'S[\d]{2}E[\d]{2}(?![\W_][\d]{2}[\W_])', file.fileInfo.filename)) 314 | if not blackhole['failIfNotCached']: 315 | torrents = [constructor(f, fileData, file, blackhole['failIfNotCached'], onlyLargestFile) for constructor in torrentConstructors] 316 | results = await asyncio.gather(*(processTorrent(torrent, file, arr) for torrent in torrents)) 317 | 318 | if not any(results): 319 | await asyncio.gather(*(fail(torrent, arr, isRadarr) for torrent in torrents)) 320 | else: 321 | for i, constructor in enumerate(torrentConstructors): 322 | isLast = (i == len(torrentConstructors) - 1) 323 | torrent = constructor(f, fileData, file, blackhole['failIfNotCached'], onlyLargestFile) 324 | 325 | if await processTorrent(torrent, file, arr): 326 | break 327 | elif isLast: 328 | await fail(torrent, arr, isRadarr) 329 | 330 | os.remove(file.fileInfo.filePathProcessing) 331 | except: 332 | e = traceback.format_exc() 333 | 334 | print(f"Error processing {file.fileInfo.filenameWithoutExt}") 335 | print(e) 336 | 337 | discordError(f"Error processing {file.fileInfo.filenameWithoutExt}", e) 338 | 339 | async def fail(torrent: TorrentBase, arr: Arr, isRadarr): 340 | _print = globals()['print'] 341 | 342 | def print(*values: object): 343 | _print(f"[{torrent.__class__.__name__}] [{torrent.file.fileInfo.filenameWithoutExt}]", *values) 344 | 345 | print(f"Failing") 346 | 347 | torrentHash = torrent.getHash() 348 | history = await asyncio.to_thread(arr.getHistory, blackhole['historyPageSize'], includeGrandchildDetails=True) 349 | items = [item for item in history if (item.torrentInfoHash and item.torrentInfoHash.casefold() == torrentHash.casefold()) or cleanFileName(item.sourceTitle.casefold()) == torrent.file.fileInfo.filenameWithoutExt.casefold()] 350 | 351 | if not items: 352 | message = "No history items found to mark as failed. Arr will not attempt to grab an alternative." 353 | print(message) 354 | discordError(message, torrent.file.fileInfo.filenameWithoutExt) 355 | else: 356 | firstItem = items[0] 357 | isSeasonPack = firstItem.releaseType == 'SeasonPack' 358 | 359 | # For season packs, we only need to fail one episode and trigger one search 360 | items = [firstItem] if isSeasonPack else items 361 | 362 | # Mark items as failed 363 | failTasks = [asyncio.to_thread(arr.failHistoryItem, item.id) for item in items] 364 | await asyncio.gather(*failTasks) 365 | 366 | # For season packs, trigger a new search 367 | if isSeasonPack: 368 | for item in items: 369 | series = await asyncio.to_thread(arr.get, item.grandparentId) 370 | await asyncio.to_thread(arr.automaticSearch, series, item.parentId) 371 | 372 | print(f"Failed") 373 | 374 | def getFiles(isRadarr): 375 | print('getFiles') 376 | files = (TorrentFileInfo(filename, isRadarr) for filename in os.listdir(getPath(isRadarr)) if filename not in ['processing', 'completed']) 377 | return [file for file in files if file.torrentInfo.isTorrentOrMagnet] 378 | 379 | async def on_created(isRadarr): 380 | print("Enter 'on_created'") 381 | try: 382 | print('radarr/sonarr:', 'radarr' if isRadarr else 'sonarr') 383 | 384 | if isRadarr: 385 | arr = Radarr() 386 | else: 387 | arr = Sonarr() 388 | 389 | futures: list[asyncio.Future] = [] 390 | firstGo = True 391 | 392 | # Consider switching to a queue 393 | while firstGo or not all(future.done() for future in futures): 394 | files = getFiles(isRadarr) 395 | if files: 396 | futures.append(asyncio.gather(*(processFile(file, arr, isRadarr) for file in files))) 397 | elif firstGo: 398 | print('No torrent files found') 399 | firstGo = False 400 | await asyncio.sleep(1) 401 | 402 | await asyncio.gather(*futures) 403 | except: 404 | e = traceback.format_exc() 405 | 406 | print(f"Error processing") 407 | print(e) 408 | 409 | discordError(f"Error processing", e) 410 | print("Exit 'on_created'") 411 | 412 | if __name__ == "__main__": 413 | asyncio.run(on_created(isRadarr=sys.argv[1] == 'radarr')) 414 | -------------------------------------------------------------------------------- /blackhole_watcher.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from watchdog.observers import Observer 3 | from watchdog.events import FileSystemEventHandler 4 | from blackhole import on_created, getPath 5 | 6 | class BlackholeHandler(FileSystemEventHandler): 7 | def __init__(self, is_radarr): 8 | super().__init__() 9 | self.is_radarr = is_radarr 10 | self.path_name = getPath(is_radarr, create=True) 11 | 12 | def on_created(self, event): 13 | if not event.is_directory and event.src_path.lower().endswith((".torrent", ".magnet")): 14 | asyncio.run(on_created(self.is_radarr)) 15 | 16 | async def on_run(self): 17 | await on_created(self.is_radarr) 18 | 19 | async def main(): 20 | print("Watching blackhole") 21 | 22 | radarr_handler = BlackholeHandler(is_radarr=True) 23 | sonarr_handler = BlackholeHandler(is_radarr=False) 24 | 25 | radarr_observer = Observer() 26 | radarr_observer.schedule(radarr_handler, radarr_handler.path_name) 27 | 28 | sonarr_observer = Observer() 29 | sonarr_observer.schedule(sonarr_handler, sonarr_handler.path_name) 30 | 31 | try: 32 | radarr_observer.start() 33 | sonarr_observer.start() 34 | 35 | await asyncio.gather( 36 | radarr_handler.on_run(), 37 | sonarr_handler.on_run() 38 | ) 39 | except KeyboardInterrupt: 40 | radarr_observer.stop() 41 | sonarr_observer.stop() 42 | 43 | radarr_observer.join() 44 | sonarr_observer.join() 45 | 46 | 47 | if __name__ == "__main__": 48 | asyncio.run(main()) -------------------------------------------------------------------------------- /cache/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /clean_logs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | from shared.shared import pathToScript 4 | 5 | folder = os.path.join(pathToScript, "../logs") 6 | max_lines = 1000 7 | max_size = 1048576 8 | 9 | for file in os.listdir(folder): 10 | with open(os.path.join(folder, file), "r") as f: 11 | line_count = sum(1 for _ in f) 12 | 13 | if line_count > max_lines: 14 | f.seek(0, os.SEEK_END) 15 | fsize = f.tell() 16 | f.seek(max(fsize-max_size, 0), 0) 17 | lines = f.readlines()[-max_lines:] 18 | 19 | with open(os.path.join(folder, file), "w") as f: 20 | f.writelines(lines) 21 | 22 | print(f"[{datetime.datetime.now()}] Cleaned {file}") 23 | -------------------------------------------------------------------------------- /collect_server_tokens.py: -------------------------------------------------------------------------------- 1 | import json 2 | from shared.shared import tokensFilename 3 | from shared.plex import getServerToken 4 | 5 | with open(tokensFilename, 'r+') as tokensFile: 6 | tokens = json.load(tokensFile) 7 | for id, token in tokens.items(): 8 | token['serverToken'] = getServerToken(token['token']) 9 | tokensFile.seek(0) 10 | json.dump(tokens, tokensFile) 11 | tokensFile.truncate() -------------------------------------------------------------------------------- /delete_non_linked_folders.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import shutil 4 | import traceback 5 | from shared.shared import realdebrid 6 | 7 | def find_non_linked_files(src_folder, dst_folder, dry_run=False, no_confirm=False, only_delete_files=False): 8 | # Get the list of links in the dst_folder 9 | dst_links = set() 10 | for root, dirs, files in os.walk(dst_folder, followlinks=True): 11 | for file in files: 12 | dst_path = os.path.join(root, file) 13 | if os.path.islink(dst_path): 14 | dst_links.add(os.path.realpath(dst_path)) 15 | 16 | # Check for non-linked files in the src_folder 17 | for root, dirs, files in os.walk(src_folder, followlinks=True): 18 | # Get the subdirectory of the current root, relative to the src_folder 19 | subdirectory = os.path.relpath(root, src_folder) 20 | subdirectory_any_linked_files = False 21 | for file in files: 22 | src_file = os.path.realpath(os.path.join(root, file)) 23 | 24 | if src_file in dst_links: 25 | subdirectory_any_linked_files = True 26 | # else: 27 | # print(f"File {src_file} is not used!") 28 | 29 | if any(files) and not subdirectory_any_linked_files: 30 | print(f"Directory {subdirectory} is not used!") 31 | if not dry_run: 32 | response = input("Do you want to delete this directory? (y/n): ") if not no_confirm else 'y' 33 | if response.lower() == 'y': 34 | if only_delete_files: 35 | try: 36 | for file in files: 37 | os.remove(os.path.realpath(os.path.join(root, file))) 38 | print(f"Files in directory {subdirectory} deleted!") 39 | except Exception as e: 40 | print(f"Error during file deletion!") 41 | print(traceback.format_exc()) 42 | else: 43 | try: 44 | shutil.rmtree(os.path.realpath(root)) 45 | print(f"Directory {subdirectory} deleted!") 46 | except Exception as e: 47 | print(f"Directory {subdirectory} error during deletion!") 48 | print(traceback.format_exc()) 49 | else: 50 | print(f"Directory {subdirectory} not deleted!") 51 | 52 | if __name__ == '__main__': 53 | parser = argparse.ArgumentParser(description='Find and delete non-linked file directories.') 54 | parser.add_argument('dst_folder', type=str, help='Destination folder to check for non-linked files. WARNING: This folder must encompass ALL folders where symlinks may live otherwise folders will unintentionally be deleted') 55 | parser.add_argument('--src-folder', type=str, default=realdebrid['mountTorrentsPath'], help='Source folder to check for non-linked files') 56 | parser.add_argument('--dry-run', action='store_true', help='print non-linked file directories without deleting') 57 | parser.add_argument('--no-confirm', action='store_true', help='delete non-linked file directories without confirmation') 58 | parser.add_argument('--only-delete-files', action='store_true', help='delete only the files in the non-linked directories') 59 | args = parser.parse_args() 60 | find_non_linked_files(args.src_folder, args.dst_folder, dry_run=args.dry_run, no_confirm=args.no_confirm, only_delete_files=args.only_delete_files) 61 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | x-blackhole: &blackhole 2 | build: 3 | context: . 4 | dockerfile: Dockerfile.blackhole 5 | image: ghcr.io/westsurname/scripts/blackhole:latest 6 | pull_policy: always 7 | user: "${PUID:-}${PGID:+:${PGID}}" 8 | env_file: 9 | - .env 10 | restart: unless-stopped 11 | 12 | x-repair: &repair 13 | build: 14 | context: . 15 | dockerfile: Dockerfile.scripts 16 | image: ghcr.io/westsurname/scripts/scripts:latest 17 | pull_policy: always 18 | command: python repair.py --no-confirm 19 | env_file: 20 | - .env 21 | restart: unless-stopped 22 | 23 | services: 24 | blackhole: 25 | <<: *blackhole 26 | container_name: blackhole_service 27 | environment: 28 | - BLACKHOLE_BASE_WATCH_PATH=/${BLACKHOLE_BASE_WATCH_PATH} 29 | volumes: 30 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 31 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 32 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH}:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} 33 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH}:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} 34 | profiles: [blackhole, blackhole_all, all] 35 | 36 | blackhole_4k: 37 | <<: *blackhole 38 | container_name: blackhole_4k_service 39 | environment: 40 | - SONARR_HOST=${SONARR_HOST_4K} 41 | - SONARR_API_KEY=${SONARR_API_KEY_4K} 42 | - RADARR_HOST=${RADARR_HOST_4K} 43 | - RADARR_API_KEY=${RADARR_API_KEY_4K} 44 | - BLACKHOLE_BASE_WATCH_PATH=/${BLACKHOLE_BASE_WATCH_PATH} 45 | volumes: 46 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 47 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 48 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} 4k:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} 49 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} 4k:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} 50 | profiles: [blackhole_4k, blackhole_all, all] 51 | 52 | blackhole_anime: 53 | <<: *blackhole 54 | container_name: blackhole_anime_service 55 | environment: 56 | - SONARR_HOST=${SONARR_HOST_ANIME} 57 | - SONARR_API_KEY=${SONARR_API_KEY_ANIME} 58 | - RADARR_HOST=${RADARR_HOST_ANIME} 59 | - RADARR_API_KEY=${RADARR_API_KEY_ANIME} 60 | - BLACKHOLE_BASE_WATCH_PATH=/${BLACKHOLE_BASE_WATCH_PATH} 61 | volumes: 62 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 63 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 64 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} anime:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} 65 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} anime:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} 66 | profiles: [blackhole_anime, blackhole_all, all] 67 | 68 | blackhole_mux: 69 | <<: *blackhole 70 | container_name: blackhole_mux_service 71 | environment: 72 | - SONARR_HOST=${SONARR_HOST_MUX} 73 | - SONARR_API_KEY=${SONARR_API_KEY_MUX} 74 | - RADARR_HOST=${RADARR_HOST_MUX} 75 | - RADARR_API_KEY=${RADARR_API_KEY_MUX} 76 | - BLACKHOLE_BASE_WATCH_PATH=/${BLACKHOLE_BASE_WATCH_PATH} 77 | volumes: 78 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 79 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 80 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} mux:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_SONARR_PATH} 81 | - ${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} mux:/${BLACKHOLE_BASE_WATCH_PATH}/${BLACKHOLE_RADARR_PATH} 82 | profiles: [blackhole_mux, blackhole_all, all] 83 | 84 | repair_service: 85 | <<: *repair 86 | container_name: repair_service 87 | volumes: 88 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 89 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 90 | - ${SONARR_ROOT_FOLDER}:${SONARR_ROOT_FOLDER} 91 | - ${RADARR_ROOT_FOLDER}:${RADARR_ROOT_FOLDER} 92 | profiles: [repair, repair_all, all] 93 | 94 | repair_4k: 95 | <<: *repair 96 | container_name: repair_4k_service 97 | environment: 98 | - SONARR_HOST=${SONARR_HOST_4K} 99 | - SONARR_API_KEY=${SONARR_API_KEY_4K} 100 | - RADARR_HOST=${RADARR_HOST_4K} 101 | - RADARR_API_KEY=${RADARR_API_KEY_4K} 102 | volumes: 103 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 104 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 105 | - ${SONARR_ROOT_FOLDER_4K}:${SONARR_ROOT_FOLDER_4K} 106 | - ${RADARR_ROOT_FOLDER_4K}:${RADARR_ROOT_FOLDER_4K} 107 | profiles: [repair_4k, repair_all, all] 108 | 109 | repair_anime: 110 | <<: *repair 111 | container_name: repair_anime_service 112 | environment: 113 | - SONARR_HOST=${SONARR_HOST_ANIME} 114 | - SONARR_API_KEY=${SONARR_API_KEY_ANIME} 115 | - RADARR_HOST=${RADARR_HOST_ANIME} 116 | - RADARR_API_KEY=${RADARR_API_KEY_ANIME} 117 | volumes: 118 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 119 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 120 | - ${SONARR_ROOT_FOLDER_ANIME}:${SONARR_ROOT_FOLDER_ANIME} 121 | - ${RADARR_ROOT_FOLDER_ANIME}:${RADARR_ROOT_FOLDER_ANIME} 122 | profiles: [repair_anime, repair_all, all] 123 | 124 | repair_mux: 125 | <<: *repair 126 | container_name: repair_mux_service 127 | environment: 128 | - SONARR_HOST=${SONARR_HOST_MUX} 129 | - SONARR_API_KEY=${SONARR_API_KEY_MUX} 130 | - RADARR_HOST=${RADARR_HOST_MUX} 131 | - RADARR_API_KEY=${RADARR_API_KEY_MUX} 132 | volumes: 133 | - ${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:${REALDEBRID_MOUNT_TORRENTS_PATH:-${BLACKHOLE_RD_MOUNT_TORRENTS_PATH:-/dev/null}}/../../:rslave 134 | - ${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}:${TORBOX_MOUNT_TORRENTS_PATH:-/dev/null}/../../:rslave 135 | - ${SONARR_ROOT_FOLDER_MUX}:${SONARR_ROOT_FOLDER_MUX} 136 | - ${RADARR_ROOT_FOLDER_MUX}:${RADARR_ROOT_FOLDER_MUX} 137 | profiles: [repair_mux, repair_all, all] 138 | 139 | watchlist: 140 | build: 141 | context: . 142 | dockerfile: Dockerfile.watchlist 143 | container_name: watchlist_service 144 | image: ghcr.io/westsurname/scripts/watchlist:latest 145 | pull_policy: always 146 | volumes: 147 | - ./shared/tokens.json:/app/shared/tokens.json 148 | env_file: 149 | - .env 150 | restart: unless-stopped 151 | profiles: [watchlist, all] 152 | 153 | plex_authentication: 154 | build: 155 | context: . 156 | dockerfile: Dockerfile.plex_authentication 157 | container_name: plex_authentication_service 158 | image: ghcr.io/westsurname/scripts/plex_authentication:latest 159 | pull_policy: always 160 | volumes: 161 | - ./shared/tokens.json:/app/shared/tokens.json 162 | - ./sockets:/app/sockets 163 | env_file: 164 | - .env 165 | restart: unless-stopped 166 | profiles: [plex_authentication, watchlist, plex_request, all] 167 | 168 | plex_request: 169 | build: 170 | context: . 171 | dockerfile: Dockerfile.plex_request 172 | container_name: plex_request_service 173 | image: ghcr.io/westsurname/scripts/plex_request:latest 174 | pull_policy: always 175 | volumes: 176 | - ./shared/tokens.json:/app/shared/tokens.json 177 | - ./sockets:/app/sockets 178 | env_file: 179 | - .env 180 | restart: unless-stopped 181 | profiles: [plex_request, all] 182 | 183 | plex_request_nginx: 184 | build: 185 | context: . 186 | dockerfile: Dockerfile.plex_request_nginx 187 | container_name: plex_request_nginx_service 188 | image: ghcr.io/westsurname/scripts/plex_request_nginx:latest 189 | pull_policy: always 190 | volumes: 191 | - ${PLEX_SERVER_PATH}:/plex:ro 192 | - ./sockets:/app/sockets 193 | ports: 194 | - 8012:8000 195 | env_file: 196 | - .env 197 | restart: unless-stopped 198 | profiles: [plex_request, all] 199 | depends_on: 200 | - plex_request 201 | - plex_authentication 202 | 203 | networks: 204 | default: 205 | name: ${DOCKER_NETWORK:-scripts_default} 206 | external: ${DOCKER_NETWORK_EXTERNAL:-false} 207 | -------------------------------------------------------------------------------- /find_broken_links.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | def find_broken_links(root): 4 | count = 0 5 | for dirpath, dirnames, filenames in os.walk(root): 6 | for linkname in filenames + dirnames: 7 | linkpath = os.path.join(dirpath, linkname) 8 | if os.path.islink(linkpath) and not os.path.exists(os.readlink(linkpath)): 9 | print(f"{linkpath} is a broken link!") 10 | count += 1 11 | print(count) 12 | 13 | root_dir = "/path/to/symlinks/dir" 14 | find_broken_links(root_dir) -------------------------------------------------------------------------------- /fix_rclonelink.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | 5 | mount_path = '/path/to/mount' 6 | files = glob.glob(f'{mount_path}/**/*.rclonelink', recursive=True) 7 | print(files) 8 | 9 | for file in files: 10 | with open(file, 'r') as f: 11 | src = f.read() 12 | dest = file.replace('.rclonelink', '') 13 | print(f"{dest} -> {src}") 14 | os.remove(file) 15 | shutil.copyfile(src, dest) -------------------------------------------------------------------------------- /import_torrent_folder.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import argparse 4 | from shared.shared import blackhole, realdebrid 5 | 6 | parentDirectory = realdebrid['mountTorrentsPath'] 7 | 8 | def get_completed_parent_directory(args): 9 | if args.symlink_directory: 10 | return args.symlink_directory 11 | elif args.radarr: 12 | return f"{blackhole['baseWatchPath']}/{blackhole['radarrPath']}/completed" 13 | elif args.sonarr: 14 | return f"{blackhole['baseWatchPath']}/{blackhole['sonarrPath']}/completed" 15 | else: 16 | return 17 | 18 | def process_directory(directory, completedParentDirectory, custom_regex=None, dry_run=False): 19 | fullDirectory = os.path.join(parentDirectory, directory) 20 | completedFullDirectory = os.path.join(completedParentDirectory, directory) 21 | 22 | multiSeasonRegex1 = r'(?<=[\W_][Ss]eason[\W_])[\d][\W_][\d]{1,2}(?=[\W_])' 23 | multiSeasonRegex2 = r'(?<=[\W_][Ss])[\d]{2}[\W_][Ss]?[\d]{2}(?=[\W_])' 24 | multiSeasonRegexCombined = f'{multiSeasonRegex1}|{multiSeasonRegex2}' 25 | if custom_regex: 26 | multiSeasonRegexCombined += f'|{custom_regex}' 27 | 28 | multiSeasonMatch = re.search(multiSeasonRegexCombined, directory) 29 | 30 | for root, dirs, files in os.walk(fullDirectory): 31 | relRoot = os.path.relpath(root, fullDirectory) 32 | for filename in files: 33 | if multiSeasonMatch: 34 | seasonMatch = re.search(r'S([\d]{2})E[\d]{2}', filename) 35 | 36 | if seasonMatch: 37 | season = seasonMatch.group(1) 38 | seasonShort = season[1:] if season[0] == '0' else season 39 | 40 | seasonDirectory = re.sub(multiSeasonRegex1, seasonShort, directory) 41 | seasonDirectory = re.sub(multiSeasonRegex2, season, seasonDirectory) 42 | if custom_regex: 43 | seasonDirectory = re.sub(custom_regex, f' Season {seasonShort} S{season} ', seasonDirectory) 44 | 45 | completedSeasonFullDirectory = os.path.join(completedParentDirectory, seasonDirectory) 46 | 47 | if not dry_run: 48 | os.makedirs(os.path.join(completedSeasonFullDirectory, relRoot), exist_ok=True) 49 | os.symlink(os.path.join(root, filename), os.path.join(completedSeasonFullDirectory, relRoot, filename)) 50 | print('Season Recursive:', f"{os.path.join(completedSeasonFullDirectory, relRoot, filename)} -> {os.path.join(root, filename)}") 51 | 52 | continue 53 | 54 | if not dry_run: 55 | os.makedirs(os.path.join(completedFullDirectory, relRoot), exist_ok=True) 56 | os.symlink(os.path.join(root, filename), os.path.join(completedFullDirectory, relRoot, filename)) 57 | print('Recursive:', f"{os.path.join(completedFullDirectory, relRoot, filename)} -> {os.path.join(root, filename)}") 58 | 59 | def process(directory, completedParentDirectory, custom_regex, dry_run=False, no_confirm=False): 60 | if directory: 61 | process_directory(directory, completedParentDirectory, custom_regex, dry_run) 62 | else: 63 | for directory in os.listdir(parentDirectory): 64 | fullDirectory = os.path.join(parentDirectory, directory) 65 | if os.path.isdir(fullDirectory): 66 | if dry_run: 67 | print(f"Would process {directory}") 68 | else: 69 | print(f"Processing {directory}...") 70 | response = input("Do you want to process this directory? (y/n): ") if not no_confirm and not dry_run else 'y' 71 | if response.lower() == 'y': 72 | process_directory(directory, completedParentDirectory, custom_regex, dry_run) 73 | else: 74 | print(f"Skipping processing of {directory}") 75 | 76 | if __name__ == '__main__': 77 | parser = argparse.ArgumentParser(description='Process directories for torrent imports.') 78 | parser.add_argument('--directory', type=str, help='Specific directory to process') 79 | parser.add_argument('--custom-regex', type=str, help='Custom multi-season regex') 80 | parser.add_argument('--dry-run', action='store_true', help='Print actions without executing') 81 | parser.add_argument('--no-confirm', action='store_true', help='Execute without confirmation') 82 | parser.add_argument('--radarr', action='store_true', help='Use the Radarr symlink directory') 83 | parser.add_argument('--sonarr', action='store_true', help='Use the Sonarr symlink directory') 84 | parser.add_argument('--symlink-directory', type=str, help='Custom symlink directory') 85 | args = parser.parse_args() 86 | 87 | completedParentDirectory = get_completed_parent_directory(args) 88 | if not completedParentDirectory: 89 | parser.error("One of --radarr, --sonarr, or --symlink-directory is required.") 90 | 91 | process(args.directory, completedParentDirectory, args.custom_regex, args.dry_run, args.no_confirm) 92 | 93 | -------------------------------------------------------------------------------- /logs/blackhole.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/westsurname/scripts/e600dbf8251021141738db44d37b21392436a222/logs/blackhole.log -------------------------------------------------------------------------------- /monitor_ram.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import time 3 | from shared.discord import discordError, discordUpdate 4 | # Set the threshold in gigabytes 5 | warning_threshold = 50 6 | error_threshold = 10 7 | last_warning_time = 0 8 | 9 | 10 | while True: 11 | # Get available RAM in gigabytes 12 | available = psutil.virtual_memory().available / (1024 ** 3) 13 | 14 | # Compare with the threshold 15 | if available <= error_threshold: 16 | # Send a error 17 | discordError( 18 | "Very Low RAM Warning", 19 | f"Very Low RAM! Available RAM: {available:.2f} GB" 20 | ) 21 | time.sleep(5*60) 22 | elif available <= warning_threshold and time.time() - last_warning_time > 5*60: 23 | # Send a warning only if the last warning was more than 5 minutes ago 24 | discordUpdate( 25 | "Low RAM Warning", 26 | f"Low RAM! Available RAM: {available:.2f} GB" 27 | ) 28 | last_warning_time = time.time() 29 | time.sleep(5) 30 | else: 31 | # Sleep for some time before checking again 32 | time.sleep(5) # Adjust the sleep duration (in seconds) as needed -------------------------------------------------------------------------------- /move_media_to_directory.py: -------------------------------------------------------------------------------- 1 | from shared.arr import Arr, Sonarr, Radarr 2 | 3 | src_dir = '/path/to/src/' 4 | dst_dir = '/path/to/dst/' 5 | 6 | def moveMedia(arr: Arr): 7 | items = arr.getAll() 8 | 9 | for item in items: 10 | if item.path.startswith(dst_dir): 11 | continue 12 | 13 | print(f"Moving {item.title} - {item.size/1073741824}GB") 14 | item.path = item.path.replace(src_dir, dst_dir) 15 | arr.put(item) 16 | break 17 | 18 | 19 | moveMedia(Sonarr()) 20 | moveMedia(Radarr()) -------------------------------------------------------------------------------- /plex_authentication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import urllib.parse 4 | from flask import Flask, jsonify, redirect, url_for, request 5 | from shared.shared import watchlist, plexHeaders, tokensFilename 6 | from shared.overseerr import getUserForPlexToken 7 | from shared.plex import getServerToken 8 | from werkzeug.middleware.proxy_fix import ProxyFix 9 | from werkzeug.middleware.dispatcher import DispatcherMiddleware 10 | 11 | # instantiate the app 12 | app = Flask(__name__) 13 | app.config.from_object(__name__) 14 | app.url_map.strict_slashes = False 15 | 16 | 17 | @app.route('/', methods=['GET']) 18 | def setup(): 19 | pinRequest = requests.post('https://plex.tv/api/v2/pins', headers=plexHeaders, json={'strong': True, 'X-Plex-Version': watchlist['plexVersion'], 'X-Plex-Product': watchlist['plexProduct'], 'X-Plex-Client-Identifier': watchlist['plexClientIdentifier']}) 20 | pin = pinRequest.json() 21 | 22 | return redirect('https://app.plex.tv/auth#?' + urllib.parse.urlencode({ 23 | 'clientID': watchlist['plexClientIdentifier'], 24 | 'code': pin['code'], 25 | 'forwardUrl': url_for('setupComplete', pin=pin['id'], _external=True), 26 | 'context[device][product]': watchlist['plexProduct'] 27 | })) 28 | 29 | @app.route('/complete/', methods=['GET']) 30 | def setupComplete(pin): 31 | pinRequest = requests.get(f"https://plex.tv/api/v2/pins/{pin}", headers=plexHeaders, json={'X-Plex-Client-Identifier': watchlist['plexClientIdentifier']}) 32 | 33 | if pinRequest.status_code == 200: 34 | authToken = pinRequest.json()['authToken'] 35 | 36 | if authToken: 37 | return handleToken(authToken) 38 | 39 | return jsonify('There was an error, please try again.') 40 | 41 | @app.route('/token', methods=['POST']) 42 | def receiveToken(): 43 | token = request.json.get('token') 44 | if token: 45 | return handleToken(token) 46 | else: 47 | return createResponse({'error': 'No token provided'}, 400) 48 | 49 | def handleToken(token): 50 | user = getUserForPlexToken(token) 51 | serverToken = getServerToken(token) 52 | userId = user['id'] 53 | 54 | updateTokensFile(userId, token, serverToken) 55 | 56 | return createResponse({'message': 'Token received and stored successfully'}, 201) 57 | 58 | def updateTokensFile(userId, token, serverToken): 59 | with open(tokensFilename, 'r+') as tokensFile: 60 | tokens = json.load(tokensFile) 61 | tokenEntry = tokens.get(userId, {'etag': ''}) 62 | tokenEntry['token'] = token 63 | tokenEntry['serverToken'] = serverToken 64 | tokens[userId] = tokenEntry 65 | tokensFile.seek(0) 66 | json.dump(tokens, tokensFile) 67 | tokensFile.truncate() 68 | 69 | def createResponse(data, statusCode): 70 | response = jsonify(data), statusCode 71 | return response 72 | 73 | app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_port=1, x_prefix=1) 74 | app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {'/auth': app.wsgi_app}) 75 | 76 | if __name__ == '__main__': 77 | app.run('127.0.0.1', 12598) -------------------------------------------------------------------------------- /plex_authentication_wsgi.py: -------------------------------------------------------------------------------- 1 | from plex_authentication import app 2 | 3 | if __name__ == '__main__': 4 | app.run() -------------------------------------------------------------------------------- /plex_refresh.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | from shared.shared import plex 4 | 5 | isRadarr = not not os.getenv('radarr_eventtype') 6 | 7 | refreshEndpoint = f"{plex['serverHost']}/library/sections/{plex['serverMovieLibraryId'] if isRadarr else plex['serverTvShowLibraryId']}/refresh?X-Plex-Token={plex['serverApiKey']}" 8 | cancelRefreshRequest = requests.delete(refreshEndpoint, headers={'Accept': 'application/json'}) 9 | refreshRequest = requests.get(refreshEndpoint, headers={'Accept': 'application/json'}) 10 | 11 | exit(0) -------------------------------------------------------------------------------- /plex_refresh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$(dirname "$0")" 4 | 5 | exec &>> logs/plex_refresh.log 6 | 7 | set -a 8 | . .env 9 | set +a 10 | 11 | isRadarr=false 12 | if [ -n "$radarr_eventtype" ]; then 13 | isRadarr=true 14 | fi 15 | 16 | if $isRadarr; then 17 | libraryId=$PLEX_SERVER_MOVIE_LIBRARY_ID 18 | else 19 | libraryId=$PLEX_SERVER_TV_SHOW_LIBRARY_ID 20 | fi 21 | 22 | refreshEndpoint="$PLEX_SERVER_HOST/library/sections/$libraryId/refresh?X-Plex-Token=$PLEX_SERVER_API_KEY" 23 | 24 | cancelRefreshRequest=$(curl -X DELETE "$refreshEndpoint" -H 'Accept: application/json') 25 | refreshRequest=$(curl -X GET "$refreshEndpoint" -H 'Accept: application/json') -------------------------------------------------------------------------------- /plex_request.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import os 3 | import traceback 4 | import re 5 | import requests 6 | import declxml as xml 7 | from flask import Flask, jsonify, request, Response 8 | from flask_caching import Cache 9 | from shared.discord import discordError, discordUpdate 10 | from shared.shared import plex, plexHeaders, pathToScript 11 | from shared.overseerr import requestItem, getUserForPlexServerToken 12 | from werkzeug.routing import BaseConverter, ValidationError 13 | 14 | mediaTypeNums = { 15 | "movie": "1", 16 | "show": "2", 17 | "season": "3", 18 | "episode": "4" 19 | } 20 | 21 | class MetadataRatingKeyConverter(BaseConverter): 22 | regex = '[0-9a-fA-F]{24}' 23 | 24 | def to_python(self, value): 25 | if re.match(self.regex, value): 26 | return value 27 | raise ValidationError() 28 | 29 | def to_url(self, value): 30 | return value 31 | 32 | # Instantiate the app 33 | app = Flask(__name__) 34 | app.config.from_object(__name__) 35 | app.url_map.strict_slashes = False 36 | app.url_map.converters['metadataRatingKey'] = MetadataRatingKeyConverter 37 | 38 | # Set up caching 39 | cacheDir = os.path.join(pathToScript, "../cache") 40 | for file in os.listdir(cacheDir): 41 | filePath = os.path.join(cacheDir, file) 42 | try: 43 | if os.path.isfile(filePath): 44 | os.unlink(filePath) 45 | except Exception as e: 46 | print(e) 47 | 48 | cache = Cache(app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DEFAULT_TIMEOUT': 300, 'CACHE_DIR': cacheDir}) 49 | 50 | _print = print 51 | 52 | def print(*values: object): 53 | _print(f"[{datetime.now()}]", *values, flush=True) 54 | 55 | def processDict(key, value): 56 | return xml.dictionary(key, [*traverseDict(value, processDict, processList, processElse)], required=False) 57 | 58 | def processList(key, value): 59 | if any(value): 60 | alias = 'Metadata' if key == 'Metadata' else None 61 | key = 'Video' if key == 'Metadata' else key 62 | return xml.array(processDict(key, value[0]), alias) 63 | else: 64 | return xml.array(xml.dictionary(key, [])) 65 | 66 | def processElse(key, _): 67 | return xml.string('.', attribute=key, required=False) 68 | 69 | def traverseDict(thisDict, processDict, processList, processElse): 70 | return (traverse(key, value, processDict, processList, processElse) for key, value in thisDict.items()) 71 | 72 | def traverseList(thisList, key, processDict, processList, processElse): 73 | return (traverse(key, value, processDict, processList, processElse) for value in thisList) 74 | 75 | def traverse(key, value, processDict, processList, processElse): 76 | if processDict and isinstance(value, dict): 77 | return processDict(key, value) 78 | elif processList and isinstance(value, list): 79 | return processList(key, value) 80 | else: 81 | return processElse(key, value) 82 | 83 | @app.route('/library/request///', methods=['GET']) 84 | @app.route('/library/request////', methods=['GET']) 85 | @app.route('/library/request////season/', methods=['GET']) 86 | @app.route('/library/request////season//', methods=['GET']) 87 | def libraryRequest(mediaType, mediaTypeNum, ratingKey, season=None, children=None): 88 | token = request.headers.get('X-Plex-Token', None) or request.args.get('X-Plex-Token', None) 89 | originalRatingKey = ratingKey 90 | 91 | try: 92 | if not mediaTypeNum or mediaTypeNum not in mediaTypeNums.values(): 93 | print(f"Unknown mediaTypeNum: {mediaTypeNum}") 94 | discordError(f"Unknown mediaTypeNum: {mediaTypeNum}") 95 | 96 | if mediaTypeNum == mediaTypeNums['show'] or children: 97 | skipRequest = True 98 | 99 | headers = { 100 | **plexHeaders, 101 | 'X-Plex-Token': token 102 | } 103 | 104 | guid = f"plex://{mediaType}/{ratingKey}" 105 | params = {**request.args} 106 | 107 | if mediaTypeNum != '0': 108 | params['type'] = mediaTypeNum 109 | 110 | if mediaType == 'show': 111 | params['show.guid'] = guid 112 | if mediaTypeNum == mediaTypeNums['season']: 113 | params['season.index'] = season 114 | else: 115 | params['guid'] = guid 116 | 117 | allRequest = requests.get(f"{plex['serverHost']}/library/all", headers=headers, params=params) 118 | all = allRequest.json() 119 | 120 | mediaContainer = all['MediaContainer'] 121 | 122 | # If you try and get a season that doesn't exist you'll get the show instead, if it does exist. 123 | if ('Metadata' in mediaContainer and 124 | ((mediaContainer['Metadata'][0]['type'] == 'season' and mediaTypeNum == mediaTypeNums['season']) 125 | or (mediaContainer['Metadata'][0]['type'] == 'movie' and mediaTypeNum == mediaTypeNums['movie']) 126 | or (mediaContainer['Metadata'][0]['type'] == 'show' and mediaTypeNum == mediaTypeNums['show']))): 127 | 128 | skipRequest = True 129 | 130 | if not children: 131 | if mediaTypeNum == mediaTypeNums['season']: 132 | mediaContainer['Metadata'][0]['key'] = f"/library/request/{mediaType}/{mediaTypeNum}/{ratingKey}/season/{season}/children" 133 | 134 | response = jsonify(all) 135 | response.headers.add('Access-Control-Allow-Origin', 'https://app.plex.tv') 136 | 137 | return response 138 | 139 | if mediaTypeNum == mediaTypeNums['show']: 140 | return children(mediaContainer['Metadata'][0]['ratingKey']) 141 | 142 | metadataUrl = f"{plex['serverHost']}{mediaContainer['Metadata'][0]['key']}" 143 | metadataRequest = requests.get(metadataUrl, headers=headers, params=request.args) 144 | metadata = metadataRequest.json() 145 | 146 | response = jsonify(metadata) 147 | response.headers.add('Access-Control-Allow-Origin', 'https://app.plex.tv') 148 | return response 149 | else: 150 | metadataHeaders = {**plexHeaders, 'X-Plex-Token': plex['serverApiKey']} 151 | args = {k: v for k, v in request.args.items() if k != 'X-Plex-Token'} 152 | 153 | if not children and mediaTypeNum == mediaTypeNums['season']: 154 | metadataSeasonsRequest = requests.get(f"{plex['metadataHost']}library/metadata/{ratingKey}/children", headers=metadataHeaders, params=args) 155 | metadataSeasons = metadataSeasonsRequest.json() 156 | 157 | metadataSeason = next((s for s in metadataSeasons['MediaContainer']['Metadata'] if s['index'] == int(season))) 158 | ratingKey = metadataSeason['ratingKey'] 159 | 160 | urlSuffix = "/children" if children else "" 161 | metadataMetadataRequest = requests.get(f"{plex['metadataHost']}library/metadata/{ratingKey}{urlSuffix}", headers=metadataHeaders, params=args) 162 | metadata = metadataMetadataRequest.json() 163 | 164 | if 'MediaContainer' in metadata and 'Metadata' in metadata['MediaContainer']: 165 | if children: 166 | if mediaTypeNum == mediaTypeNums['season']: 167 | metadata['MediaContainer']['Metadata'] = [] 168 | elif mediaTypeNum == mediaTypeNums['show']: 169 | seasons = metadata['MediaContainer']['Metadata'] 170 | metadata['MediaContainer']['Metadata'] = [] 171 | metadata['MediaContainer'] = addRequestableSeasons(metadata['MediaContainer'], seasons, originalRatingKey) 172 | else: 173 | item = metadata['MediaContainer']['Metadata'][0] 174 | 175 | parentTitle = item.get('parentTitle', '') 176 | title = item.get('title', '') 177 | title = f"{parentTitle}: {title}" if parentTitle else title 178 | 179 | if mediaTypeNum == mediaTypeNums['show']: 180 | item['title'] = f"Request - {title}" 181 | else: 182 | item['title'] = f"{title} - Requesting..." 183 | 184 | if mediaTypeNum == mediaTypeNums['season']: 185 | item['key'] = f"/library/request/{mediaType}/{mediaTypeNum}/{originalRatingKey}/season/{season}/children" 186 | else: 187 | item['key'] = f"/library/request/{mediaType}/{mediaTypeNum}/{originalRatingKey}/children" 188 | 189 | response = jsonify(metadata) 190 | response.headers.add('Access-Control-Allow-Origin', 'https://app.plex.tv') 191 | return response 192 | 193 | except: 194 | e = traceback.format_exc() 195 | 196 | print(f"Error in /library/request") 197 | print(e) 198 | 199 | discordError(f"Error in /library/request", e) 200 | return 'Server Error', 500 201 | finally: 202 | if not locals().get('skipRequest', False): 203 | title = locals().get('title', 'Untitled') 204 | requestMedia(token, originalRatingKey, mediaType, season, title) 205 | 206 | 207 | def requestMedia(token, ratingKey, mediaType, season, title): 208 | try: 209 | cacheKey = ratingKey if mediaType == 'movie' else f"{ratingKey}_{season}" 210 | recentlyRequested = cache.get(cacheKey) or [] 211 | if token not in recentlyRequested: 212 | user = getUserForPlexServerToken(token) 213 | metadataHeaders = {**plexHeaders, 'X-Plex-Token': plex['serverApiKey']} 214 | 215 | requestItem(user, ratingKey, datetime.now().timestamp(), metadataHeaders, getSeason=lambda: [int(season)]) 216 | 217 | recentlyRequested.append(token) 218 | cache.set(cacheKey, recentlyRequested) 219 | 220 | print(f"{title} - Requested by {user['displayName']} via Plex Request") 221 | discordUpdate(f"{title} - Requested by {user['displayName']} via Plex Request", f"User Id: {user['id']}, Media Type: {mediaType}, {f'Season: {season},' if season else ''} Rating Key: {ratingKey}") 222 | except: 223 | e = traceback.format_exc() 224 | print(f"Error in request") 225 | print(e) 226 | 227 | discordError(f"Error in request", e) 228 | 229 | @app.route('/library/all', methods=['GET']) 230 | def all(): 231 | try: 232 | headers = { 233 | **request.headers, 234 | 'Accept': 'application/json', 235 | } 236 | 237 | allRequest = requests.get(f"{plex['serverHost']}/library/all", headers=headers, params=request.args) 238 | 239 | if allRequest.status_code != 200: 240 | return allRequest.text, allRequest.status_code 241 | 242 | all = allRequest.json() 243 | 244 | mediaContainer = all['MediaContainer'] 245 | 246 | if not 'Metadata' in mediaContainer: 247 | fullGuid = (request.args.get('guid', None) or request.args.get('show.guid')) 248 | guidMatch = re.match('plex:\/\/(.+?)\/(.+?)(?:\/|$)', fullGuid) 249 | 250 | mediaType, guid = guidMatch.group(1, 2) 251 | season = request.args.get('season.index') 252 | mediaTypeNum = request.args.get('type', mediaTypeNums['movie'] if mediaType == 'movie' else mediaTypeNums['season'] if season else mediaTypeNums['show']) 253 | 254 | if mediaType != 'episode' and (mediaTypeNum != mediaTypeNums['season'] or season != '0'): 255 | metadataHeaders = {**plexHeaders, 'X-Plex-Token': plex['serverApiKey']} 256 | args = {k: v for k, v in request.args.items() if k != 'X-Plex-Token'} 257 | 258 | urlSuffix = "/children" if mediaTypeNum == mediaTypeNums['season'] else "" 259 | metadataAllRequest = requests.get(f"{plex['metadataHost']}library/metadata/{guid}{urlSuffix}", headers=metadataHeaders, params=args) 260 | if metadataAllRequest.status_code == 200: 261 | libraryId = plex['serverMovieLibraryId'] if mediaType == 'movie' else plex['serverTvShowLibraryId'] 262 | 263 | additionalMetadata = metadataAllRequest.json()['MediaContainer']['Metadata'][0] 264 | if mediaTypeNum == mediaTypeNums['season'] or mediaTypeNum == mediaTypeNums['episode']: 265 | additionalMetadata['key'] = f"/library/request/{mediaType}/{mediaTypeNum}/{guid}/season/{season}" 266 | else: 267 | additionalMetadata['key'] = f"/library/request/{mediaType}/{mediaTypeNum}/{guid}" 268 | 269 | additionalMetadata['ratingKey'] = "12065" 270 | additionalMetadata['librarySectionTitle'] = "Request Season :" if mediaTypeNum == mediaTypeNums['episode'] else "Request :" 271 | additionalMetadata['librarySectionID'] = libraryId 272 | additionalMetadata['librarySectionKey'] = f"/library/sections/{libraryId}" 273 | additionalMetadata['Media'] = [{ 274 | "videoResolution": "Request Season :" if mediaTypeNum == mediaTypeNums['episode'] else "Request :" 275 | }] 276 | additionalMetadata['childCount'] = 0 277 | mediaContainer['size'] = 1 278 | mediaContainer['Metadata'] = [additionalMetadata] 279 | 280 | if request.accept_mimetypes.best_match(['application/xml', 'application/json']) == 'application/json': 281 | response = jsonify(all) 282 | cors = allRequest.headers.get('Access-Control-Allow-Origin', None) 283 | if cors: 284 | response.headers.add('Access-Control-Allow-Origin', cors) 285 | else: 286 | processor = processDict('MediaContainer', mediaContainer) 287 | xmlString = xml.serialize_to_string(processor, mediaContainer, ' ') 288 | response = Response(xmlString, mimetype='application/xml') 289 | 290 | if 'fullGuid' in locals(): 291 | print('Request in xml') 292 | 293 | return response 294 | except: 295 | e = traceback.format_exc() 296 | 297 | print(f"Error in /library/all") 298 | print(e) 299 | 300 | discordError(f"Error in /library/all", e) 301 | return 'Server Error', 500 302 | 303 | @app.route('/library/metadata//children', methods=['GET']) 304 | def metadataChildren(id): 305 | metadataHeaders = {**plexHeaders, 'X-Plex-Token': plex['serverApiKey']} 306 | args = {k: v for k, v in request.args.items() if k != 'X-Plex-Token'} 307 | 308 | metadataChildrenRequest = requests.get(f"{plex['metadataHost']}library/metadata/{id}/children", headers=metadataHeaders, params=args) 309 | if metadataChildrenRequest.status_code != 200: 310 | return metadataChildrenRequest.text, metadataChildrenRequest.status_code 311 | 312 | children = metadataChildrenRequest.json() 313 | mediaContainer = children['MediaContainer'] 314 | 315 | if request.accept_mimetypes.best_match(['application/xml', 'application/json']) == 'application/json': 316 | response = jsonify(children) 317 | cors = metadataChildrenRequest.headers.get('Access-Control-Allow-Origin', None) 318 | if cors: 319 | response.headers.add('Access-Control-Allow-Origin', cors) 320 | else: 321 | processor = processDict('MediaContainer', mediaContainer) 322 | xmlString = xml.serialize_to_string(processor, mediaContainer, ' ') 323 | response = Response(xmlString, mimetype='application/xml') 324 | 325 | if 'fullGuid' in locals(): 326 | print('Request in xml') 327 | 328 | return response 329 | 330 | @app.route('/library/metadata//children', methods=['GET']) 331 | def children(id): 332 | try: 333 | headers = { 334 | **request.headers, 335 | 'Accept': 'application/json', 336 | } 337 | 338 | childrenRequest = requests.get(f"{plex['serverHost']}/library/metadata/{id}/children", headers=headers, params=request.args) 339 | 340 | if childrenRequest.status_code != 200: 341 | return childrenRequest.text, childrenRequest.status_code 342 | 343 | children = childrenRequest.json() 344 | 345 | mediaContainer = children['MediaContainer'] 346 | 347 | if 'viewGroup' in mediaContainer and mediaContainer['viewGroup'] == "season" and 'Metadata' in mediaContainer and mediaContainer['Metadata']: 348 | fullGuid = mediaContainer['Metadata'][0]['parentGuid'] 349 | guidMatch = re.match('plex:\/\/(.+?)\/(.+?)(?:\/|$)', fullGuid) 350 | 351 | _, guid = guidMatch.group(1, 2) 352 | 353 | metadataHeaders = {**plexHeaders, 'X-Plex-Token': plex['serverApiKey']} 354 | args = {k: v for k, v in request.args.items() if k != 'X-Plex-Token'} 355 | 356 | metadataChildrenRequest = requests.get(f"{plex['metadataHost']}library/metadata/{guid}/children", headers=metadataHeaders, params=args) 357 | if metadataChildrenRequest.status_code == 200: 358 | seasons = metadataChildrenRequest.json().get('MediaContainer', {}).get('Metadata', []) 359 | mediaContainer = addRequestableSeasons(mediaContainer, seasons, guid) 360 | 361 | if request.accept_mimetypes.best_match(['application/xml', 'application/json']) == 'application/json': 362 | response = jsonify(children) 363 | cors = childrenRequest.headers.get('Access-Control-Allow-Origin', None) 364 | if cors: 365 | response.headers.add('Access-Control-Allow-Origin', cors) 366 | else: 367 | processor = processDict('MediaContainer', mediaContainer) 368 | xmlString = xml.serialize_to_string(processor, mediaContainer, ' ') 369 | response = Response(xmlString, mimetype='application/xml') 370 | 371 | if 'fullGuid' in locals(): 372 | print('Request in xml') 373 | 374 | return response 375 | except: 376 | e = traceback.format_exc() 377 | 378 | print(f"Error in /library/metadata/{id}/children") 379 | print(e) 380 | 381 | discordError(f"Error in /library/metadata/{id}/children", e) 382 | return 'Server Error', 500 383 | 384 | def addRequestableSeasons(mediaContainer, seasons, ratingKey): 385 | allSeasons = [item for item in seasons if item['index'] != 0] 386 | metadata = mediaContainer.get('Metadata', []) 387 | existingMetadataIndices = {item['index']: item for item in metadata} 388 | 389 | for item in allSeasons: 390 | if item['index'] not in existingMetadataIndices: 391 | item['title'] = f"Request - {item.get('title', '')}" 392 | item['key'] = f"/library/request/show/{mediaTypeNums['season']}/{ratingKey}/season/{item['index']}" 393 | item['ratingKey'] = "12065" 394 | item.pop('Guid', None) 395 | item.pop('Image', None) 396 | item.pop('Role', None) 397 | item.pop('banner', None) 398 | item.pop('contentRating', None) 399 | item.pop('hasGenericTitle', None) 400 | item.pop('originallyAvailableAt', None) 401 | item.pop('parentArt', None) 402 | item.pop('parentType', None) 403 | item.pop('publicPagesURL', None) 404 | item.pop('userState', None) 405 | item.pop('year', None) 406 | item.pop('parentKey', None) 407 | metadata.append(item) 408 | metadata.sort(key=lambda x: x['index']) 409 | mediaContainer['size'] = len(metadata) 410 | mediaContainer['totalSize'] = len(metadata) 411 | 412 | return mediaContainer 413 | 414 | if __name__ == '__main__': 415 | app.run('127.0.0.1', 12599, debug=True) -------------------------------------------------------------------------------- /plex_request_nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes auto; 3 | 4 | error_log /var/log/nginx/error.log notice; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | stream { 13 | upstream http { 14 | server localhost:8001; 15 | } 16 | 17 | upstream https { 18 | server localhost:8002; 19 | } 20 | 21 | map $ssl_preread_protocol $upstream { 22 | default https; 23 | "" http; 24 | } 25 | 26 | server { 27 | listen 8000; 28 | listen [::]:8000; 29 | proxy_pass $upstream; 30 | ssl_preread on; 31 | } 32 | } 33 | 34 | http { 35 | include /etc/nginx/mime.types; 36 | default_type application/octet-stream; 37 | 38 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 39 | '$status $body_bytes_sent "$http_referer" ' 40 | '"$http_user_agent" "$http_x_forwarded_for"'; 41 | 42 | access_log /var/log/nginx/access.log main; 43 | 44 | sendfile on; 45 | #tcp_nopush on; 46 | 47 | keepalive_timeout 65; 48 | 49 | #gzip on; 50 | 51 | include /etc/nginx/conf.d/*.conf; 52 | } -------------------------------------------------------------------------------- /plex_request_nginx_default.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/toomuchio/plex-nginx-reverseproxy/blob/master/nginx.conf 2 | 3 | ssl_session_cache shared:SSL:10m; 4 | ssl_session_timeout 10m; 5 | 6 | server { 7 | send_timeout 100m; #Some players don't reopen a socket and playback stops totally instead of resuming after an extended pause (e.g. Chrome) 8 | 9 | #Faster resolving, improves stapling time. Timeout and nameservers may need to be adjusted for your location Google's have been used here. 10 | resolver 8.8.4.4 8.8.8.8 valid=300s; 11 | resolver_timeout 10s; 12 | 13 | listen 8001; 14 | listen [::]:8001; 15 | listen 8002 ssl; 16 | listen [::]:8002 ssl; 17 | 18 | http2 on; 19 | 20 | # ssl 21 | # ssl on; on; 22 | ssl_certificate /ssl/fullchain.pem; 23 | ssl_certificate_key /ssl/key.pem; 24 | 25 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 26 | ssl_prefer_server_ciphers on; 27 | #Intentionally not hardened for security for player support and encryption video streams has a lot of overhead with something like AES-256-GCM-SHA384. 28 | ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'; 29 | 30 | #Why this is important: https://blog.cloudflare.com/ocsp-stapling-how-cloudflare-just-made-ssl-30/ 31 | ssl_stapling on; 32 | ssl_stapling_verify on; 33 | #For letsencrypt.org you can get your chain like this: https://esham.io/2016/01/ocsp-stapling 34 | # ssl_trusted_certificate /ssl/chain.pem; 35 | 36 | #Reuse ssl sessions, avoids unnecessary handshakes 37 | #Turning this on will increase performance, but at the cost of security. Read below before making a choice. 38 | #https://github.com/mozilla/server-side-tls/issues/135 39 | #https://wiki.mozilla.org/Security/Server_Side_TLS#TLS_tickets_.28RFC_5077.29 40 | #ssl_session_tickets on; 41 | ssl_session_tickets off; 42 | 43 | #Use: openssl dhparam -out dhparam.pem 2048 - 4096 is better but for overhead reasons 2048 is enough for Plex. 44 | # ssl_dhparam /ssl/dhparam.pem; 45 | ssl_ecdh_curve secp384r1; 46 | 47 | #Plex has A LOT of javascript, xml and html. This helps a lot, but if it causes playback issues with devices turn it off. (Haven't encountered any yet) 48 | gzip on; 49 | gzip_vary on; 50 | gzip_min_length 1000; 51 | gzip_proxied any; 52 | gzip_types text/plain text/css text/xml application/xml text/javascript application/x-javascript image/svg+xml; 53 | gzip_disable "MSIE [1-6]\."; 54 | 55 | #Nginx default client_max_body_size is 1MB, which breaks Camera Upload feature from the phones. 56 | #Increasing the limit fixes the issue. Anyhow, if 4K videos are expected to be uploaded, the size might need to be increased even more 57 | client_max_body_size 100M; 58 | 59 | #Forward real ip and host to Plex 60 | proxy_set_header Host $host; 61 | proxy_set_header X-Real-IP $remote_addr; 62 | #When using ngx_http_realip_module change $proxy_add_x_forwarded_for to '$http_x_forwarded_for,$realip_remote_addr' 63 | proxy_set_header Sec-WebSocket-Extensions $http_sec_websocket_extensions; 64 | proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key; 65 | proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version; 66 | # Plex Headers 67 | proxy_set_header X-Plex-Client-Identifier $http_x_plex_client_identifier; 68 | proxy_set_header X-Plex-Container-Size $http_x_plex_container_size; 69 | proxy_set_header X-Plex-Container-Start $http_x_plex_container_start; 70 | proxy_set_header X-Plex-Device $http_x_plex_device; 71 | proxy_set_header X-Plex-Device-Name $http_x_plex_device_name; 72 | proxy_set_header X-Plex-Platform $http_x_plex_platform; 73 | proxy_set_header X-Plex-Platform-Version $http_x_plex_platform_version; 74 | proxy_set_header X-Plex-Product $http_x_plex_product; 75 | proxy_set_header X-Plex-Token $http_x_plex_token; 76 | proxy_set_header X-Plex-Version $http_x_plex_version; 77 | proxy_set_header X-Plex-Nocache $http_x_plex_nocache; 78 | proxy_set_header X-Plex-Provides $http_x_plex_provides; 79 | proxy_set_header X-Plex-Device-Vendor $http_x_plex_device_vendor; 80 | proxy_set_header X-Plex-Model $http_x_plex_model; 81 | proxy_set_header Host $host; 82 | proxy_set_header X-Real-IP $http_x_forwarded_for; 83 | proxy_set_header X-Forwarded-For $remote_addr; 84 | proxy_set_header X-Forwarded-Proto $scheme; 85 | 86 | #Websockets 87 | proxy_http_version 1.1; 88 | proxy_set_header Upgrade $http_upgrade; 89 | proxy_set_header Connection "Upgrade"; 90 | 91 | #Buffering off send to the client as soon as the data is received from Plex. 92 | proxy_redirect off; 93 | proxy_buffering off; 94 | 95 | location / { 96 | proxy_pass ${plex_server_host}; 97 | proxy_read_timeout 86400; 98 | 99 | proxy_hide_header 'Access-Control-Allow-Origin'; 100 | proxy_hide_header 'Access-Control-Allow-Methods'; 101 | proxy_hide_header 'Access-Control-Allow-Headers'; 102 | 103 | add_header 'Access-Control-Allow-Origin' '*'; 104 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, PUT, DELETE, PATCH'; 105 | add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'; 106 | 107 | if ($request_method = 'OPTIONS') { 108 | add_header 'Access-Control-Allow-Origin' '*'; 109 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, PUT, DELETE, PATCH'; 110 | add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization'; 111 | add_header 'Access-Control-Max-Age' 1728000; 112 | add_header 'Content-Type' 'text/plain; charset=utf-8'; 113 | add_header 'Content-Length' 0; 114 | return 204; 115 | } 116 | 117 | # return 302 https://$request_uri; 118 | 119 | # access_log logs/plex.access.log; 120 | 121 | # # enable the next two lines for http auth 122 | # auth_basic "Restricted"; 123 | # auth_basic_user_file /config/nginx/.htpasswd; 124 | 125 | # enable the next two lines for ldap auth 126 | # auth_request /auth; 127 | # error_page 401 =200 /ldaplogin; 128 | } 129 | 130 | location /auth { 131 | proxy_pass http://unix:/app/sockets/plex_authentication.sock; 132 | } 133 | 134 | location /web/index.html { 135 | proxy_pass ${plex_server_host}/web/index.html; 136 | 137 | # access_log logs/plex.access.log; 138 | 139 | sub_filter '' ''; 179 | sub_filter_once on; 180 | } 181 | 182 | location = /library/all { 183 | proxy_pass http://unix:/app/sockets/plex_request.sock; 184 | 185 | # access_log logs/plex.access.log; 186 | } 187 | 188 | location ~ ^/library/metadata/[^/]+/children$ { 189 | proxy_pass http://unix:/app/sockets/plex_request.sock; 190 | 191 | # access_log logs/plex.access.log; 192 | } 193 | 194 | location /library/request/ { 195 | proxy_pass http://unix:/app/sockets/plex_request.sock; 196 | 197 | # access_log logs/plex.access.log; 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /plex_request_nginx_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Function to find the Plex certificate 4 | find_plex_cert() { 5 | find /plex/Cache -name "cert-v2.p12" 2>/dev/null | head -n 1 6 | } 7 | 8 | # Function to process the certificate 9 | process_certificate() { 10 | echo "Starting certificate processing..." 11 | mkdir -p /ssl 12 | 13 | # Find the certificate path 14 | PLEX_CERT=$(find_plex_cert) 15 | echo "Found certificate at: ${PLEX_CERT}" 16 | 17 | if [ -z "$PLEX_CERT" ]; then 18 | echo "Error: Could not find cert-v2.p12 in the Plex Media Server/Cache directory" 19 | return 1 20 | fi 21 | 22 | # Generate the password from the machine ID 23 | echo "Generating certificate password..." 24 | CERT_PASS=$(echo -n "plex${PLEX_SERVER_MACHINE_ID}" | openssl dgst -sha512 | cut -d' ' -f2) 25 | 26 | # Extract private key 27 | echo "Extracting private key..." 28 | if ! openssl pkcs12 -in "${PLEX_CERT}" -nodes -passin "pass:${CERT_PASS}" -out "/ssl/key.pem" -nocerts 2>&1; then 29 | echo "Error extracting private key" 30 | return 1 31 | fi 32 | 33 | # Extract certificate chain 34 | echo "Extracting certificate chain..." 35 | if ! openssl pkcs12 -in "${PLEX_CERT}" -passin "pass:${CERT_PASS}" -out "/ssl/fullchain.pem" -nokeys 2>&1; then 36 | echo "Error extracting certificate chain" 37 | return 1 38 | fi 39 | 40 | # Set proper permissions 41 | echo "Setting file permissions..." 42 | chmod 600 "/ssl/key.pem" 43 | chmod 644 "/ssl/fullchain.pem" 44 | 45 | # Verify the certificate files exist and have content 46 | if [ -s "/ssl/key.pem" ] && [ -s "/ssl/fullchain.pem" ]; then 47 | echo "Certificate files generated successfully:" 48 | ls -l /ssl/key.pem /ssl/fullchain.pem 49 | else 50 | echo "Error: Certificate files are empty or missing" 51 | return 1 52 | fi 53 | 54 | echo "Certificate processing completed successfully" 55 | } 56 | 57 | # Initial certificate processing 58 | process_certificate || exit 1 59 | 60 | # Get the initial certificate path for monitoring 61 | PLEX_CERT=$(find_plex_cert) 62 | 63 | # Function to watch for certificate changes 64 | watch_certificates() { 65 | while inotifywait -e modify,create,move "$(dirname "${PLEX_CERT}")"; do 66 | echo "Changes detected in certificate directory, checking certificate..." 67 | NEW_CERT=$(find_plex_cert) 68 | if [ "$NEW_CERT" != "$PLEX_CERT" ]; then 69 | echo "Certificate path changed from ${PLEX_CERT} to ${NEW_CERT}" 70 | PLEX_CERT=$NEW_CERT 71 | fi 72 | process_certificate 73 | # Reload nginx to pick up the new certificates 74 | nginx -s reload 75 | done 76 | } 77 | 78 | # Start the certificate monitoring in the background, but redirect output to main process 79 | watch_certificates & 80 | WATCH_PID=$! 81 | 82 | # Save the PID to a file for management 83 | echo $WATCH_PID > /var/run/cert-watch.pid -------------------------------------------------------------------------------- /plex_request_nginx_variables.conf: -------------------------------------------------------------------------------- 1 | map $host $plex_server_host { 2 | default "$PLEX_SERVER_HOST"; 3 | } -------------------------------------------------------------------------------- /plex_request_wsgi.py: -------------------------------------------------------------------------------- 1 | from plex_request import app 2 | 3 | if __name__ == '__main__': 4 | app.run() -------------------------------------------------------------------------------- /reclaim_space.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import requests 4 | from datetime import datetime 5 | from shared.shared import sonarr, radarr, overseerr, tautulli, trakt 6 | from shared.discord import discordUpdate, discordError 7 | import FsQuota 8 | 9 | tautulliHost = tautulli['host'] 10 | tautulliAPIkey = tautulli['apiKey'] 11 | 12 | radarrHost = radarr['host'] 13 | radarrAPIkey = radarr['apiKey'] 14 | 15 | sonarrHost = sonarr['host'] 16 | sonarrAPIkey = sonarr['apiKey'] 17 | 18 | overseerrHost = overseerr['host'] 19 | overseerrAPIkey = overseerr['apiKey'] 20 | 21 | traktAPIkey = trakt['apiKey'] 22 | 23 | # This is the section ID for movies in your Tautulli config 24 | tautulliMovieSectionID = "1" 25 | 26 | # This is the section ID for shows in your Tautulli config 27 | tautulliShowSectionID = "2" 28 | 29 | # The number of rows you want to return from Tautulli's media_info table 30 | tautulliNumRows = "2000" 31 | 32 | # Number of days since last watch to delete 33 | daysSinceLastWatch = 20 34 | 35 | # Number of days since last added and nobody has watched 36 | daysWithoutWatch = 10 37 | 38 | # Radarr tag to ignore 39 | radarrTagID = 2 40 | 41 | # Sonarr tag to ignore 42 | sonarrTagID = 2 43 | 44 | # Number of days to ignore above tags 45 | daysToIngoreTags = 30 46 | 47 | # Amount to delete in GB 48 | deleteSize = 200 49 | 50 | # Minimum available space in GB allowed 51 | minSpace = 200 52 | 53 | # Dry-run 54 | dryRun = False 55 | 56 | ## END USER VARIABLES 57 | 58 | print(datetime.now().isoformat()) 59 | 60 | def purgeMovie(movie, movieTatulli): 61 | deletesize = 0 62 | 63 | f = requests.get(f"{radarrHost}/api/v3/movie?apiKey={radarrAPIkey}") 64 | try: 65 | guids = movieTatulli['guids'] 66 | tmdbId = next(guid[len('tmdb://'):] for guid in guids if guid.startswith('tmdb://')) 67 | 68 | r = requests.get(f"{radarrHost}/api/v3/movie/lookup?apiKey={radarrAPIkey}&term=tmdb:{tmdbId}") 69 | radarr = r.json()[0] 70 | 71 | if radarrTagID in radarr['tags']and round((today - int(movie['added_at']))/86400) <= daysToIngoreTags: 72 | # print("SKIPPED: " + movie['title'] + " | Added at: " + datetime.fromtimestamp(int(movie['added_at'])).isoformat() + " | Radarr ID: " + str(radarr['id']) + " | TMDB ID: " + str(radarr['tmdbId'])) 73 | pass 74 | else: 75 | if not dryRun: 76 | response = requests.delete(f"{radarrHost}/api/v3/movie/" + str(radarr['id']) + f"?apiKey={radarrAPIkey}&deleteFiles=true") 77 | 78 | headers = {"X-Api-Key": f"{overseerrAPIkey}"} 79 | o = requests.get(f"{overseerrHost}/api/v1/movie/" + str(radarr['tmdbId']), headers=headers) 80 | overseerr = json.loads(o.text) 81 | if overseerr.get('mediaInfo', False): 82 | o = requests.delete(f"{overseerrHost}/api/v1/media/" + str(overseerr['mediaInfo']['id']), headers=headers) 83 | 84 | print("DELETED: " + movie['title'] + " | Radarr ID: " + str(radarr['id']) + " | TMDB ID: " + str(radarr['tmdbId'])) 85 | deletesize = (int(movie['file_size'])/1073741824) 86 | except Exception as e: 87 | print("ERROR: " + movie['title'] + ": " + repr(e)) 88 | 89 | return deletesize 90 | 91 | def purgeSeason(season, tautulliShow): 92 | deletesize = 0 93 | 94 | # Remove the below? 95 | f = requests.get(f"{sonarrHost}/api/v3/series?apiKey={sonarrAPIkey}") 96 | try: 97 | guids = tautulliShow['guids'] 98 | tvdbId = next(guid[len('tvdb://'):] for guid in guids if guid.startswith('tvdb://')) 99 | 100 | s = requests.get(f"{sonarrHost}/api/v3/series/lookup?apiKey={sonarrAPIkey}&term=tvdb:{tvdbId}") 101 | show = s.json()[0] 102 | 103 | headers = { 104 | "trakt-api-key": f"{traktAPIkey}", 105 | "trakt-api-version": "2" 106 | } 107 | t = requests.get(f"https://api.trakt.tv/search/tvdb/{tvdbId}?type=show", headers=headers) 108 | trakt = json.loads(t.text) 109 | 110 | f = requests.get(f"{sonarrHost}/api/v3/episode?apiKey={sonarrAPIkey}&seriesId={show['id']}") 111 | episodes = f.json() 112 | 113 | for episode in episodes: 114 | if str(episode['seasonNumber']) != season['media_index'] or not episode['episodeFileId']: 115 | # print("SKIPPED: " + season['parent_title'] + " - " + episode['title'] + " | Sonarr ID: " + str(episode['id']) + " | TVDB ID: " + str(episode['tvdbId'])) 116 | continue 117 | if episode['seasonNumber'] == 1 and episode['episodeNumber'] == 1 and radarrTagID in show['tags'] and round((today - int(season['added_at']))/86400) <= daysToIngoreTags: 118 | # print("SKIPPED: " + season['parent_title'] + " - " + episode['title'] + " | Added at: " + datetime.fromtimestamp(int(season['added_at'])).isoformat() + " | Sonarr ID: " + str(episode['id']) + " | TVDB ID: " + str(episode['tvdbId'])) 119 | continue 120 | 121 | episodeFile = requests.get(f"{sonarrHost}/api/v3/episodefile/{episode['episodeFileId']}?apiKey={sonarrAPIkey}").json() 122 | if not dryRun: 123 | response = requests.delete(f"{sonarrHost}/api/v3/episodefile/{episode['episodeFileId']}?apiKey={sonarrAPIkey}") 124 | 125 | print("DELETED: " + season['parent_title'] + " - " + episode['title'] + " | Sonarr ID: " + str(episode['id']) + " | TVDB ID: " + str(episode['tvdbId'])) 126 | 127 | deletesize += (int(episodeFile['size'])/1073741824) 128 | 129 | seasonInfo = next(seasonInfo for seasonInfo in show['seasons'] if str(seasonInfo['seasonNumber']) == season['media_index']) 130 | seasonInfo['monitored'] = False 131 | 132 | response = requests.put(f"{sonarrHost}/api/v3/series/{show['id']}?apiKey={sonarrAPIkey}", json=show) 133 | 134 | headers = {"X-Api-Key": f"{overseerrAPIkey}"} 135 | 136 | o = requests.get(f"{overseerrHost}/api/v1/tv/" + str(trakt[0]['show']['ids']['tmdb']), headers=headers) 137 | overseerr = json.loads(o.text) 138 | if overseerr.get('mediaInfo', False): 139 | # Delete the entire show until we figure out how to delete indiviual seasons 140 | o = requests.delete(f"{overseerrHost}/api/v1/media/" + str(overseerr['mediaInfo']['id']), headers=headers).json 141 | 142 | except Exception as e: 143 | print("ERROR: " + season['parent_title'] + ": " + repr(e)) 144 | 145 | return deletesize 146 | 147 | def getRemaining(): 148 | quota = FsQuota.Quota('../').query(os.getuid()) 149 | remaining = (quota.bhard - quota.bcount)/1000000 #1048576 150 | return remaining 151 | 152 | today = round(datetime.now().timestamp()) 153 | 154 | remaining = getRemaining() 155 | if (remaining > minSpace): 156 | print(f"Cancelling. Remaining space: {remaining}GB. Minimum alllowed space: {minSpace}GB") 157 | else: 158 | print(f"Running. Remaining space: {remaining}GB. Minimum alllowed space: {minSpace}GB") 159 | totalsize = 0 160 | 161 | r = requests.get(f"{tautulliHost}/api/v2/?apikey={tautulliAPIkey}&cmd=get_library_media_info§ion_id={tautulliShowSectionID}&length={tautulliNumRows}&refresh=true&order_column=added_at&order_dir=asc") 162 | shows = json.loads(r.text) 163 | 164 | 165 | for show in shows['response']['data']['data']: 166 | r = requests.get(f"{tautulliHost}/api/v2/?apikey={tautulliAPIkey}&cmd=get_metadata§ion_id={tautulliShowSectionID}&rating_key={show['rating_key']}&media_type=show") 167 | show = json.loads(r.text)['response']['data'] 168 | 169 | r = requests.get(f"{tautulliHost}/api/v2/?apikey={tautulliAPIkey}&cmd=get_children_metadata§ion_id={tautulliShowSectionID}&rating_key={show['rating_key']}&media_type=show") 170 | seasons = json.loads(r.text) 171 | 172 | for season in seasons['response']['data']['children_list']: 173 | r = requests.get(f"{tautulliHost}/api/v2/?apikey={tautulliAPIkey}&cmd=get_history§ion_id={tautulliShowSectionID}&parent_rating_key={season['rating_key']}&media_type=episode&length=1000") 174 | episodePlays = json.loads(r.text)['response']['data']['data'] 175 | lastPlays = [episodePlay['stopped'] for episodePlay in episodePlays] 176 | 177 | if any(lastPlays): 178 | lp = round((today - int(max(lastPlays)))/86400) 179 | if lp > daysSinceLastWatch: 180 | totalsize = totalsize + purgeSeason(season, show) 181 | else: 182 | if season['added_at']: 183 | aa = round((today - int(season['added_at']))/86400) 184 | if aa > daysWithoutWatch: 185 | totalsize = totalsize + purgeSeason(season, show) 186 | 187 | if totalsize >= deleteSize: break 188 | 189 | if totalsize >= deleteSize: break 190 | 191 | 192 | 193 | r = requests.get(f"{tautulliHost}/api/v2/?apikey={tautulliAPIkey}&cmd=get_library_media_info§ion_id={tautulliMovieSectionID}&length={tautulliNumRows}&refresh=true&order_column=added_at&order_dir=asc") 194 | movies = json.loads(r.text) 195 | 196 | 197 | for movie in movies['response']['data']['data']: 198 | r = requests.get(f"{tautulliHost}/api/v2/?apikey={tautulliAPIkey}&cmd=get_metadata§ion_id={tautulliMovieSectionID}&rating_key={movie['rating_key']}&media_type=movie") 199 | movieMeta = json.loads(r.text)['response']['data'] 200 | 201 | if movie['last_played']: 202 | lp = round((today - int(movie['last_played']))/86400) 203 | if lp > daysSinceLastWatch: 204 | totalsize = totalsize + purgeMovie(movie, movieMeta) 205 | else: 206 | if movie['added_at']: 207 | aa = round((today - int(movie['added_at']))/86400) 208 | if aa > daysWithoutWatch: 209 | totalsize = totalsize + purgeMovie(movie, movieMeta) 210 | 211 | if totalsize >= deleteSize * 2: break 212 | 213 | 214 | print("Total space reclaimed: " + str("{:.2f}".format(totalsize)) + "GB") 215 | 216 | try: 217 | remaining = getRemaining() 218 | if (remaining < minSpace): 219 | # Consider running again with stricter requirements and/or pausing downloads in sabnzbd 220 | discordError("Running low on space", f"Remaining space: {remaining}GB.") 221 | except Exception as e: 222 | print("ERROR: " + repr(e)) 223 | -------------------------------------------------------------------------------- /repair.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import time 4 | import traceback 5 | from shared.debrid import validateRealdebridMountTorrentsPath, validateTorboxMountTorrentsPath 6 | from shared.arr import Sonarr, Radarr 7 | from shared.discord import discordUpdate, discordError 8 | from shared.shared import repair, realdebrid, torbox, intersperse, ensureTuple 9 | from datetime import datetime 10 | 11 | def parseInterval(intervalStr): 12 | """Parse a smart interval string (e.g., '1w2d3h4m5s') into seconds.""" 13 | if not intervalStr: 14 | return 0 15 | totalSeconds = 0 16 | timeDict = {'w': 604800, 'd': 86400, 'h': 3600, 'm': 60, 's': 1} 17 | currentNumber = '' 18 | for char in intervalStr: 19 | if char.isdigit(): 20 | currentNumber += char 21 | elif char in timeDict and currentNumber: 22 | totalSeconds += int(currentNumber) * timeDict[char] 23 | currentNumber = '' 24 | return totalSeconds 25 | # Parse arguments for dry run, no confirm options, and optional intervals 26 | parser = argparse.ArgumentParser(description='Repair broken symlinks or missing files.') 27 | parser.add_argument('--dry-run', action='store_true', help='Perform a dry run without making any changes.') 28 | parser.add_argument('--no-confirm', action='store_true', help='Execute without confirmation prompts.') 29 | parser.add_argument('--repair-interval', type=str, default=repair['repairInterval'], help='Optional interval in smart format (e.g. 1h2m3s) to wait between repairing each media file.') 30 | parser.add_argument('--run-interval', type=str, default=repair['runInterval'], help='Optional interval in smart format (e.g. 1w2d3h4m5s) to run the repair process.') 31 | parser.add_argument('--mode', type=str, choices=['symlink', 'file'], default='symlink', help='Choose repair mode: `symlink` or `file`. `symlink` to repair broken symlinks and `file` to repair missing files.') 32 | parser.add_argument('--season-packs', action='store_true', help='Upgrade to season-packs when a non-season-pack is found. Only applicable in symlink mode.') 33 | parser.add_argument('--include-unmonitored', action='store_true', help='Include unmonitored media in the repair process') 34 | args = parser.parse_args() 35 | 36 | _print = print 37 | 38 | def print(*values: object): 39 | _print(f"[{datetime.now()}] [{args.mode}]", *values) 40 | 41 | if not args.repair_interval and not args.run_interval: 42 | print("Running repair once") 43 | else: 44 | print(f"Running repair{' once every ' + args.run_interval if args.run_interval else ''}{', and waiting ' + args.repair_interval + ' between each repair.' if args.repair_interval else '.'}") 45 | 46 | try: 47 | repairIntervalSeconds = parseInterval(args.repair_interval) 48 | except Exception as e: 49 | print(f"Invalid interval format for repair interval: {args.repair_interval}") 50 | exit(1) 51 | 52 | try: 53 | runIntervalSeconds = parseInterval(args.run_interval) 54 | except Exception as e: 55 | print(f"Invalid interval format for run interval: {args.run_interval}") 56 | exit(1) 57 | 58 | def main(): 59 | if unsafe(): 60 | print("One or both debrid services are not working properly. Skipping repair.") 61 | discordError(f"[{args.mode}] One or both debrid services are not working properly. Skipping repair.") 62 | return 63 | 64 | print("Collecting media...") 65 | sonarr = Sonarr() 66 | radarr = Radarr() 67 | sonarrMedia = [(sonarr, media) for media in sonarr.getAll() if args.include_unmonitored or media.anyMonitoredChildren] 68 | radarrMedia = [(radarr, media) for media in radarr.getAll() if args.include_unmonitored or media.anyMonitoredChildren] 69 | print("Finished collecting media.") 70 | 71 | for arr, media in intersperse(sonarrMedia, radarrMedia): 72 | try: 73 | if unsafe(): 74 | print("One or both debrid services are not working properly. Skipping repair.") 75 | discordError(f"[{args.mode}] One or both debrid services are not working properly. Skipping repair.") 76 | return 77 | 78 | getItems = lambda media, childId: arr.getFiles(media=media, childId=childId) if args.mode == 'symlink' else arr.getHistory(media=media, childId=childId, includeGrandchildDetails=True) 79 | childrenIds = media.childrenIds if args.include_unmonitored else media.monitoredChildrenIds 80 | 81 | for childId in childrenIds: 82 | brokenItems = [] 83 | childItems = list(getItems(media=media, childId=childId)) 84 | 85 | for item in childItems: 86 | if args.mode == 'symlink': 87 | fullPath = item.path 88 | if os.path.islink(fullPath): 89 | destinationPath = os.readlink(fullPath) 90 | if ((realdebrid['enabled'] and destinationPath.startswith(realdebrid['mountTorrentsPath']) and not os.path.exists(destinationPath)) or 91 | (torbox['enabled'] and destinationPath.startswith(torbox['mountTorrentsPath']) and not os.path.exists(os.path.realpath(fullPath)))): 92 | brokenItems.append(os.path.realpath(fullPath)) 93 | else: # file mode 94 | if item.reason == 'MissingFromDisk' and item.parentId not in media.fullyAvailableChildrenIds: 95 | brokenItems.append(item.sourceTitle) 96 | 97 | if brokenItems: 98 | print("Title:", media.title) 99 | print("Movie ID/Season Number:", childId) 100 | print("Broken items:") 101 | [print(item) for item in brokenItems] 102 | print() 103 | if args.dry_run or args.no_confirm or input("Do you want to delete and re-grab? (y/n): ").lower() == 'y': 104 | if not args.dry_run: 105 | discordUpdate(f"[{args.mode}] Repairing {media.title}: {childId}") 106 | if args.mode == 'symlink': 107 | print("Deleting files:") 108 | [print(item.path) for item in childItems] 109 | results = arr.deleteFiles(childItems) 110 | print("Re-monitoring") 111 | media = arr.get(media.id) 112 | media.setChildMonitored(childId, False) 113 | arr.put(media) 114 | media.setChildMonitored(childId, True) 115 | arr.put(media) 116 | print("Searching for new files") 117 | results = arr.automaticSearch(media, childId) 118 | print(results) 119 | 120 | if repairIntervalSeconds > 0: 121 | time.sleep(repairIntervalSeconds) 122 | else: 123 | print("Skipping") 124 | print() 125 | elif args.mode == 'symlink': 126 | realPaths = [os.path.realpath(item.path) for item in childItems] 127 | parentFolders = set(os.path.dirname(path) for path in realPaths) 128 | if childId in media.fullyAvailableChildrenIds and len(parentFolders) > 1: 129 | print("Title:", media.title) 130 | print("Movie ID/Season Number:", childId) 131 | print("Non-season-pack folders:") 132 | [print(parentFolder) for parentFolder in parentFolders] 133 | print() 134 | if args.season_packs: 135 | print("Searching for season-pack") 136 | results = arr.automaticSearch(media, childId) 137 | print(results) 138 | 139 | if repairIntervalSeconds > 0: 140 | time.sleep(repairIntervalSeconds) 141 | 142 | except Exception: 143 | e = traceback.format_exc() 144 | 145 | print(f"An error occurred while processing {media.title}: {e}") 146 | discordError(f"[{args.mode}] An error occurred while processing {media.title}", e) 147 | 148 | print("Repair complete") 149 | discordUpdate(f"[{args.mode}] Repair complete") 150 | 151 | def unsafe(): 152 | return (args.mode == 'symlink' and 153 | ((realdebrid['enabled'] and not ensureTuple(validateRealdebridMountTorrentsPath())[0]) or 154 | (torbox['enabled'] and not ensureTuple(validateTorboxMountTorrentsPath())[0]))) 155 | 156 | if runIntervalSeconds > 0: 157 | while True: 158 | try: 159 | main() 160 | time.sleep(runIntervalSeconds) 161 | except Exception: 162 | e = traceback.format_exc() 163 | 164 | print(f"An error occurred in the main loop: {e}") 165 | discordError(f"[{args.mode}] An error occurred in the main loop", e) 166 | time.sleep(runIntervalSeconds) # Still wait before retrying 167 | else: 168 | main() 169 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | environs==9.5.0 #all 2 | discord_webhook==1.3.0 #all 3 | requests==2.28.1 #all 4 | 5 | bencode3==0.1.0 #blackhole 6 | watchdog==4.0.0 #blackhole 7 | 8 | Flask-Caching==2.1.0 #plex_request 9 | declxml==1.1.3 #plex_request 10 | 11 | Werkzeug==3.0.1 #plex_authentication, blackhole 12 | flask==3.0.2 #plex_authentication, plex_request 13 | gunicorn==22.0.0 #plex_authentication, plex_request -------------------------------------------------------------------------------- /row_count.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | db="/path/to/your.db" 4 | 5 | # Temporary file to hold table names and counts 6 | temp_file=$(mktemp) 7 | 8 | # Ensure temporary file gets deleted on script exit 9 | trap "rm -f $temp_file" EXIT 10 | 11 | # Fetch each table name from the database 12 | table_names=$(sqlite3 "$db" "SELECT name FROM sqlite_master WHERE type='table';") 13 | 14 | # Iterate over each table name, count its rows, and write to the temp file 15 | for table in $table_names; do 16 | count=$(sqlite3 "$db" "SELECT COUNT(*) FROM \"$table\";") 17 | echo "$count|$table" >> "$temp_file" 18 | done 19 | 20 | # Sort the temporary file numerically by counts and then print 21 | sort -n "$temp_file" | while IFS='|' read -r count name; do 22 | echo "$name = $count" 23 | done 24 | 25 | # Clean up the temporary file 26 | rm -f "$temp_file" -------------------------------------------------------------------------------- /shared/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/westsurname/scripts/e600dbf8251021141738db44d37b21392436a222/shared/__init__.py -------------------------------------------------------------------------------- /shared/arr.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Type, List 3 | import requests 4 | from shared.shared import sonarr, radarr, checkRequiredEnvs 5 | from shared.requests import retryRequest 6 | 7 | def validateSonarrHost(): 8 | url = f"{sonarr['host']}/login" 9 | try: 10 | response = requests.get(url) 11 | return response.status_code == 200 12 | except Exception as e: 13 | return False 14 | 15 | def validateSonarrApiKey(): 16 | url = f"{sonarr['host']}/api/v3/system/status?apikey={sonarr['apiKey']}" 17 | try: 18 | response = requests.get(url) 19 | if response.status_code == 401: 20 | return False, "Invalid or expired API key." 21 | except Exception as e: 22 | return False 23 | 24 | return True 25 | 26 | def validateRadarrHost(): 27 | url = f"{radarr['host']}/login" 28 | try: 29 | response = requests.get(url) 30 | return response.status_code == 200 31 | except Exception as e: 32 | return False 33 | 34 | def validateRadarrApiKey(): 35 | url = f"{radarr['host']}/api/v3/system/status?apikey={radarr['apiKey']}" 36 | try: 37 | response = requests.get(url) 38 | if response.status_code == 401: 39 | return False, "Invalid or expired API key." 40 | except Exception as e: 41 | return False 42 | 43 | return True 44 | 45 | requiredEnvs = { 46 | 'Sonarr host': (sonarr['host'], validateSonarrHost), 47 | 'Sonarr API key': (sonarr['apiKey'], validateSonarrApiKey, True), 48 | 'Radarr host': (radarr['host'], validateRadarrHost), 49 | 'Radarr API key': (radarr['apiKey'], validateRadarrApiKey, True) 50 | } 51 | 52 | checkRequiredEnvs(requiredEnvs) 53 | 54 | class Media(ABC): 55 | def __init__(self, json) -> None: 56 | super().__init__() 57 | self.json = json 58 | 59 | @property 60 | @abstractmethod 61 | def size(self): 62 | pass 63 | 64 | @property 65 | def id(self): 66 | return self.json['id'] 67 | 68 | @property 69 | def title(self): 70 | return self.json['title'] 71 | 72 | @property 73 | def path(self): 74 | return self.json['path'] 75 | 76 | @path.setter 77 | def path(self, path): 78 | self.json['path'] = path 79 | 80 | @property 81 | def anyMonitoredChildren(self): 82 | return bool(self.monitoredChildrenIds) 83 | 84 | @property 85 | def anyFullyAvailableChildren(self): 86 | return bool(self.fullyAvailableChildrenIds) 87 | 88 | @property 89 | def childrenIds(self): 90 | pass 91 | 92 | @property 93 | @abstractmethod 94 | def monitoredChildrenIds(self): 95 | pass 96 | 97 | @property 98 | @abstractmethod 99 | def fullyAvailableChildrenIds(self): 100 | pass 101 | 102 | @abstractmethod 103 | def setChildMonitored(self, childId: int, monitored: bool): 104 | pass 105 | 106 | class Movie(Media): 107 | @property 108 | def size(self): 109 | return self.json['sizeOnDisk'] 110 | 111 | @property 112 | def childrenIds(self): 113 | return [self.id] 114 | 115 | @property 116 | def monitoredChildrenIds(self): 117 | return [self.id] if self.json['monitored'] else [] 118 | 119 | @property 120 | def fullyAvailableChildrenIds(self): 121 | return [self.id] if self.json['hasFile'] else [] 122 | 123 | def setChildMonitored(self, childId: int, monitored: bool): 124 | self.json["monitored"] = monitored 125 | 126 | class Show(Media): 127 | @property 128 | def size(self): 129 | return self.json['statistics']['sizeOnDisk'] 130 | 131 | @property 132 | def childrenIds(self): 133 | return [season['seasonNumber'] for season in self.json['seasons']] 134 | 135 | @property 136 | def monitoredChildrenIds(self): 137 | return [season['seasonNumber'] for season in self.json['seasons'] if season['monitored']] 138 | 139 | @property 140 | def fullyAvailableChildrenIds(self): 141 | return [season['seasonNumber'] for season in self.json['seasons'] if season['statistics']['percentOfEpisodes'] == 100] 142 | 143 | def setChildMonitored(self, childId: int, monitored: bool): 144 | for season in self.json['seasons']: 145 | if season['seasonNumber'] == childId: 146 | season['monitored'] = monitored 147 | break 148 | 149 | class MediaFile(ABC): 150 | def __init__(self, json) -> None: 151 | super().__init__() 152 | self.json = json 153 | 154 | @property 155 | def id(self): 156 | return self.json['id'] 157 | 158 | @property 159 | def path(self): 160 | return self.json['path'] 161 | 162 | @property 163 | def quality(self): 164 | return self.json['quality']['quality']['name'] 165 | 166 | @property 167 | def size(self): 168 | return self.json['size'] 169 | 170 | @property 171 | @abstractmethod 172 | def parentId(self): 173 | pass 174 | 175 | class EpisodeFile(MediaFile): 176 | @property 177 | def parentId(self): 178 | return self.json['seasonNumber'] 179 | 180 | class MovieFile(MediaFile): 181 | @property 182 | def parentId(self): 183 | return self.json['movieId'] 184 | 185 | 186 | class MediaHistory(ABC): 187 | def __init__(self, json) -> None: 188 | super().__init__() 189 | self.json = json 190 | 191 | @property 192 | def eventType(self): 193 | return self.json['eventType'] 194 | 195 | @property 196 | def reason(self): 197 | return self.json['data'].get('reason') 198 | 199 | @property 200 | def quality(self): 201 | return self.json['quality']['quality']['name'] 202 | 203 | @property 204 | def id(self): 205 | return self.json['id'] 206 | 207 | @property 208 | def sourceTitle(self): 209 | return self.json['sourceTitle'] 210 | 211 | @property 212 | def torrentInfoHash(self): 213 | return self.json['data'].get('torrentInfoHash') 214 | 215 | @property 216 | def releaseType(self): 217 | """Get the release type from the history item data.""" 218 | return self.json['data'].get('releaseType') 219 | 220 | @property 221 | @abstractmethod 222 | def parentId(self): 223 | pass 224 | 225 | @property 226 | @abstractmethod 227 | def grandparentId(self): 228 | """Get the top-level ID (series ID for episodes, same as parentId for movies).""" 229 | pass 230 | 231 | @property 232 | @abstractmethod 233 | def isFileDeletedEvent(self): 234 | pass 235 | 236 | class MovieHistory(MediaHistory): 237 | @property 238 | def parentId(self): 239 | return self.json['movieId'] 240 | 241 | @property 242 | def grandparentId(self): 243 | """For movies, grandparent ID is the same as parent ID.""" 244 | return self.parentId 245 | 246 | @property 247 | def isFileDeletedEvent(self): 248 | return self.eventType == 'movieFileDeleted' 249 | 250 | class EpisodeHistory(MediaHistory): 251 | @property 252 | # Requires includeGrandchildDetails to be true 253 | def parentId(self): 254 | return self.json['episode']['seasonNumber'] 255 | 256 | @property 257 | # Requires includeGrandchildDetails to be true 258 | def grandparentId(self): 259 | """Get the series ID from the history item.""" 260 | return self.json['episode']['seriesId'] 261 | 262 | @property 263 | def isFileDeletedEvent(self): 264 | return self.eventType == 'episodeFileDeleted' 265 | 266 | class Arr(ABC): 267 | def __init__(self, host: str, apiKey: str, endpoint: str, fileEndpoint: str, childIdName: str, childName: str, grandchildName: str, constructor: Type[Media], fileConstructor: Type[MediaFile], historyConstructor: Type[MediaHistory]) -> None: 268 | self.host = host 269 | self.apiKey = apiKey 270 | self.endpoint = endpoint 271 | self.fileEndpoint = fileEndpoint 272 | self.childIdName = childIdName 273 | self.childName = childName 274 | self.grandchildName = grandchildName 275 | self.constructor = constructor 276 | self.fileConstructor = fileConstructor 277 | self.historyConstructor = historyConstructor 278 | 279 | def get(self, id: int): 280 | response = retryRequest(lambda: requests.get(f"{self.host}/api/v3/{self.endpoint}/{id}?apiKey={self.apiKey}")) 281 | return self.constructor(response.json()) 282 | 283 | def getAll(self): 284 | response = retryRequest(lambda: requests.get(f"{self.host}/api/v3/{self.endpoint}?apiKey={self.apiKey}")) 285 | return map(self.constructor, response.json()) 286 | 287 | def put(self, media: Media): 288 | retryRequest(lambda: requests.put(f"{self.host}/api/v3/{self.endpoint}/{media.id}?apiKey={self.apiKey}&moveFiles=true", json=media.json)) 289 | 290 | def getFiles(self, media: Media, childId: int=None): 291 | response = retryRequest(lambda: requests.get(f"{self.host}/api/v3/{self.fileEndpoint}?apiKey={self.apiKey}&{self.endpoint}Id={media.id}")) 292 | 293 | files = map(self.fileConstructor, response.json()) 294 | 295 | if childId != None and childId != media.id: 296 | files = filter(lambda file: file.parentId == childId, files) 297 | 298 | return files 299 | 300 | def deleteFiles(self, files: List[MediaFile]): 301 | fileIds = [file.id for file in files] 302 | response = retryRequest(lambda: requests.delete(f"{self.host}/api/v3/{self.fileEndpoint}/bulk?apiKey={self.apiKey}", json={f"{self.fileEndpoint}ids": fileIds})) 303 | 304 | return response.json() 305 | 306 | def getHistory(self, pageSize: int=None, includeGrandchildDetails: bool=False, media: Media=None, childId: int=None): 307 | endpoint = f"/{self.endpoint}" if media else '' 308 | pageSizeParam = f"pageSize={pageSize}&" if pageSize else '' 309 | includeGrandchildDetailsParam = f"include{self.grandchildName}=true&" if includeGrandchildDetails else '' 310 | idParam = f"{self.endpoint}Id={media.id}&" if media else '' 311 | childIdParam = f"{self.childIdName}={childId}&" if media and childId != None and childId != media.id else '' 312 | response = retryRequest(lambda: requests.get(f"{self.host}/api/v3/history{endpoint}?{pageSizeParam}{includeGrandchildDetailsParam}{idParam}{childIdParam}apiKey={self.apiKey}")) 313 | 314 | history = response.json() 315 | 316 | return map(self.historyConstructor, history['records'] if isinstance(history, dict) else history) 317 | 318 | def failHistoryItem(self, historyId: int): 319 | retryRequest(lambda: requests.post(f"{self.host}/api/v3/history/failed/{historyId}?apiKey={self.apiKey}")) 320 | 321 | def refreshMonitoredDownloads(self): 322 | retryRequest(lambda: requests.post(f"{self.host}/api/v3/command?apiKey={self.apiKey}", json={'name': 'RefreshMonitoredDownloads'}, headers={'Content-Type': 'application/json'})) 323 | 324 | def interactiveSearch(self, media: Media, childId: int): 325 | response = retryRequest(lambda: requests.get(f"{self.host}/api/v3/release?apiKey={self.apiKey}&{self.endpoint}Id={media.id}{f'&{self.childIdName}={childId}' if childId != media.id else ''}")) 326 | return response.json() 327 | 328 | def automaticSearch(self, media: Media, childId: int): 329 | response = retryRequest(lambda: requests.post( 330 | f"{self.host}/api/v3/command?apiKey={self.apiKey}", 331 | json=self._automaticSearchJson(media, childId), 332 | )) 333 | return response.json() 334 | 335 | def _automaticSearchJson(self, media: Media, childId: int): 336 | pass 337 | 338 | class Sonarr(Arr): 339 | host = sonarr['host'] 340 | apiKey = sonarr['apiKey'] 341 | endpoint = 'series' 342 | fileEndpoint = 'episodefile' 343 | childIdName = 'seasonNumber' 344 | childName = 'Season' 345 | grandchildName = 'Episode' 346 | 347 | def __init__(self) -> None: 348 | super().__init__(Sonarr.host, Sonarr.apiKey, Sonarr.endpoint, Sonarr.fileEndpoint, Sonarr.childIdName, Sonarr.childName, Sonarr.grandchildName, Show, EpisodeFile, EpisodeHistory) 349 | 350 | def _automaticSearchJson(self, media: Media, childId: int): 351 | return {"name": f"{self.childName}Search", f"{self.endpoint}Id": media.id, self.childIdName: childId} 352 | 353 | class Radarr(Arr): 354 | host = radarr['host'] 355 | apiKey = radarr['apiKey'] 356 | endpoint = 'movie' 357 | fileEndpoint = 'moviefile' 358 | childIdName = None 359 | childName = 'Movies' 360 | grandchildName = 'Movie' 361 | 362 | def __init__(self) -> None: 363 | super().__init__(Radarr.host, Radarr.apiKey, Radarr.endpoint, Radarr.fileEndpoint, Radarr.childIdName, Radarr.childName, Radarr.grandchildName, Movie, MovieFile, MovieHistory) 364 | 365 | def _automaticSearchJson(self, media: Media, childId: int): 366 | return {"name": f"{self.childName}Search", f"{self.endpoint}Ids": [media.id]} 367 | -------------------------------------------------------------------------------- /shared/debrid.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import re 4 | import hashlib 5 | import requests 6 | from abc import ABC, abstractmethod 7 | from urllib.parse import urljoin 8 | from datetime import datetime 9 | from shared.discord import discordUpdate 10 | from shared.requests import retryRequest 11 | from shared.shared import realdebrid, torbox, mediaExtensions, checkRequiredEnvs 12 | 13 | def validateDebridEnabled(): 14 | if not realdebrid['enabled'] and not torbox['enabled']: 15 | return False, "At least one of RealDebrid or Torbox must be enabled." 16 | return True 17 | 18 | def validateRealdebridHost(): 19 | url = urljoin(realdebrid['host'], "time") 20 | try: 21 | response = requests.get(url) 22 | return response.status_code == 200 23 | except Exception as e: 24 | return False 25 | 26 | def validateRealdebridApiKey(): 27 | url = urljoin(realdebrid['host'], "user") 28 | headers = {'Authorization': f'Bearer {realdebrid["apiKey"]}'} 29 | try: 30 | response = requests.get(url, headers=headers) 31 | 32 | if response.status_code == 401: 33 | return False, "Invalid or expired API key." 34 | elif response.status_code == 403: 35 | return False, "Permission denied, account locked." 36 | except Exception as e: 37 | return False 38 | 39 | return True 40 | 41 | def validateRealdebridMountTorrentsPath(): 42 | path = realdebrid['mountTorrentsPath'] 43 | if os.path.exists(path) and any(os.path.isdir(os.path.join(path, child)) for child in os.listdir(path)): 44 | return True 45 | else: 46 | return False, "Path does not exist or has no children." 47 | 48 | def validateTorboxHost(): 49 | url = urljoin(torbox['host'], "stats") 50 | try: 51 | response = requests.get(url) 52 | return response.status_code == 200 53 | except Exception as e: 54 | return False 55 | 56 | def validateTorboxApiKey(): 57 | url = urljoin(torbox['host'], "user/me") 58 | headers = {'Authorization': f'Bearer {torbox["apiKey"]}'} 59 | try: 60 | response = requests.get(url, headers=headers) 61 | 62 | if response.status_code == 401: 63 | return False, "Invalid or expired API key." 64 | elif response.status_code == 403: 65 | return False, "Permission denied, account locked." 66 | except Exception as e: 67 | return False 68 | 69 | return True 70 | 71 | def validateTorboxMountTorrentsPath(): 72 | path = torbox['mountTorrentsPath'] 73 | if os.path.exists(path) and any(os.path.isdir(os.path.join(path, child)) for child in os.listdir(path)): 74 | return True 75 | else: 76 | return False, "Path does not exist or has no children." 77 | 78 | requiredEnvs = { 79 | 'RealDebrid/TorBox enabled': (True, validateDebridEnabled), 80 | } 81 | 82 | if realdebrid['enabled']: 83 | requiredEnvs.update({ 84 | 'RealDebrid host': (realdebrid['host'], validateRealdebridHost), 85 | 'RealDebrid API key': (realdebrid['apiKey'], validateRealdebridApiKey, True), 86 | 'RealDebrid mount torrents path': (realdebrid['mountTorrentsPath'], validateRealdebridMountTorrentsPath) 87 | }) 88 | 89 | if torbox['enabled']: 90 | requiredEnvs.update({ 91 | 'Torbox host': (torbox['host'], validateTorboxHost), 92 | 'Torbox API key': (torbox['apiKey'], validateTorboxApiKey, True), 93 | 'Torbox mount torrents path': (torbox['mountTorrentsPath'], validateTorboxMountTorrentsPath) 94 | }) 95 | 96 | checkRequiredEnvs(requiredEnvs) 97 | 98 | class TorrentBase(ABC): 99 | STATUS_WAITING_FILES_SELECTION = 'waiting_files_selection' 100 | STATUS_DOWNLOADING = 'downloading' 101 | STATUS_COMPLETED = 'completed' 102 | STATUS_ERROR = 'error' 103 | 104 | def __init__(self, f, fileData, file, failIfNotCached, onlyLargestFile) -> None: 105 | super().__init__() 106 | self.f = f 107 | self.fileData = fileData 108 | self.file = file 109 | self.failIfNotCached = failIfNotCached 110 | self.onlyLargestFile = onlyLargestFile 111 | self.skipAvailabilityCheck = False 112 | self.id = None 113 | self._info = None 114 | self._hash = None 115 | self._instantAvailability = None 116 | 117 | def print(self, *values: object): 118 | print(f"[{datetime.now()}] [{self.__class__.__name__}] [{self.file.fileInfo.filenameWithoutExt}]", *values) 119 | 120 | @abstractmethod 121 | def submitTorrent(self): 122 | pass 123 | 124 | @abstractmethod 125 | def getHash(self): 126 | pass 127 | 128 | @abstractmethod 129 | def addTorrent(self): 130 | pass 131 | 132 | @abstractmethod 133 | async def getInfo(self, refresh=False): 134 | pass 135 | 136 | @abstractmethod 137 | async def selectFiles(self): 138 | pass 139 | 140 | @abstractmethod 141 | def delete(self): 142 | pass 143 | 144 | @abstractmethod 145 | async def getTorrentPath(self): 146 | pass 147 | 148 | @abstractmethod 149 | def _addTorrentFile(self): 150 | pass 151 | 152 | @abstractmethod 153 | def _addMagnetFile(self): 154 | pass 155 | 156 | def _enforceId(self): 157 | if not self.id: 158 | raise Exception("Id is required. Must be acquired via successfully running submitTorrent() first.") 159 | 160 | class RealDebrid(TorrentBase): 161 | def __init__(self, f, fileData, file, failIfNotCached, onlyLargestFile) -> None: 162 | super().__init__(f, fileData, file, failIfNotCached, onlyLargestFile) 163 | self.headers = {'Authorization': f'Bearer {realdebrid["apiKey"]}'} 164 | self.mountTorrentsPath = realdebrid["mountTorrentsPath"] 165 | 166 | def submitTorrent(self): 167 | if self.failIfNotCached: 168 | instantAvailability = self._getInstantAvailability() 169 | self.print('instantAvailability:', not not instantAvailability) 170 | if not instantAvailability: 171 | return False 172 | 173 | return not not self.addTorrent() 174 | 175 | def _getInstantAvailability(self, refresh=False): 176 | torrentHash = self.getHash() 177 | self.print('hash:', torrentHash) 178 | self.skipAvailabilityCheck = True 179 | 180 | return True 181 | 182 | def _getAvailableHost(self): 183 | availableHostsRequest = retryRequest( 184 | lambda: requests.get(urljoin(realdebrid['host'], "torrents/availableHosts"), headers=self.headers), 185 | print=self.print 186 | ) 187 | if availableHostsRequest is None: 188 | return None 189 | 190 | availableHosts = availableHostsRequest.json() 191 | return availableHosts[0]['host'] 192 | 193 | async def getInfo(self, refresh=False): 194 | self._enforceId() 195 | 196 | if refresh or not self._info: 197 | infoRequest = retryRequest( 198 | lambda: requests.get(urljoin(realdebrid['host'], f"torrents/info/{self.id}"), headers=self.headers), 199 | print=self.print 200 | ) 201 | if infoRequest is None: 202 | self._info = None 203 | else: 204 | info = infoRequest.json() 205 | info['status'] = self._normalize_status(info['status']) 206 | self._info = info 207 | 208 | return self._info 209 | 210 | async def selectFiles(self): 211 | self._enforceId() 212 | 213 | info = await self.getInfo() 214 | if info is None: 215 | return False 216 | 217 | self.print('files:', info['files']) 218 | mediaFiles = [file for file in info['files'] if os.path.splitext(file['path'])[1].lower() in mediaExtensions] 219 | 220 | if not mediaFiles: 221 | self.print('no media files found') 222 | return False 223 | 224 | mediaFileIds = {str(file['id']) for file in mediaFiles} 225 | self.print('required fileIds:', mediaFileIds) 226 | 227 | largestMediaFile = max(mediaFiles, key=lambda file: file['bytes']) 228 | largestMediaFileId = str(largestMediaFile['id']) 229 | self.print('only largest file:', self.onlyLargestFile) 230 | self.print('largest file:', largestMediaFile) 231 | 232 | if self.onlyLargestFile and len(mediaFiles) > 1: 233 | discordUpdate('largest file:', largestMediaFile['path']) 234 | 235 | files = {'files': [largestMediaFileId] if self.onlyLargestFile else ','.join(mediaFileIds)} 236 | selectFilesRequest = retryRequest( 237 | lambda: requests.post(urljoin(realdebrid['host'], f"torrents/selectFiles/{self.id}"), headers=self.headers, data=files), 238 | print=self.print 239 | ) 240 | if selectFilesRequest is None: 241 | return False 242 | 243 | return True 244 | 245 | def delete(self): 246 | self._enforceId() 247 | 248 | deleteRequest = retryRequest( 249 | lambda: requests.delete(urljoin(realdebrid['host'], f"torrents/delete/{self.id}"), headers=self.headers), 250 | print=self.print 251 | ) 252 | return not not deleteRequest 253 | 254 | 255 | async def getTorrentPath(self): 256 | filename = (await self.getInfo())['filename'] 257 | originalFilename = (await self.getInfo())['original_filename'] 258 | 259 | folderPathMountFilenameTorrent = os.path.join(self.mountTorrentsPath, filename) 260 | folderPathMountOriginalFilenameTorrent = os.path.join(self.mountTorrentsPath, originalFilename) 261 | folderPathMountOriginalFilenameWithoutExtTorrent = os.path.join(self.mountTorrentsPath, os.path.splitext(originalFilename)[0]) 262 | 263 | if os.path.exists(folderPathMountFilenameTorrent) and os.listdir(folderPathMountFilenameTorrent): 264 | folderPathMountTorrent = folderPathMountFilenameTorrent 265 | elif os.path.exists(folderPathMountOriginalFilenameTorrent) and os.listdir(folderPathMountOriginalFilenameTorrent): 266 | folderPathMountTorrent = folderPathMountOriginalFilenameTorrent 267 | elif (originalFilename.endswith(('.mkv', '.mp4')) and 268 | os.path.exists(folderPathMountOriginalFilenameWithoutExtTorrent) and os.listdir(folderPathMountOriginalFilenameWithoutExtTorrent)): 269 | folderPathMountTorrent = folderPathMountOriginalFilenameWithoutExtTorrent 270 | else: 271 | folderPathMountTorrent = None 272 | 273 | return folderPathMountTorrent 274 | 275 | def _addFile(self, request, endpoint, data): 276 | host = self._getAvailableHost() 277 | if host is None: 278 | return None 279 | 280 | request = retryRequest( 281 | lambda: request(urljoin(realdebrid['host'], endpoint), params={'host': host}, headers=self.headers, data=data), 282 | print=self.print 283 | ) 284 | if request is None: 285 | return None 286 | 287 | response = request.json() 288 | self.print('response info:', response) 289 | self.id = response['id'] 290 | 291 | return self.id 292 | 293 | def _addTorrentFile(self): 294 | return self._addFile(requests.put, "torrents/addTorrent", self.f) 295 | 296 | def _addMagnetFile(self): 297 | return self._addFile(requests.post, "torrents/addMagnet", {'magnet': self.fileData}) 298 | 299 | def _normalize_status(self, status): 300 | if status in ['waiting_files_selection']: 301 | return self.STATUS_WAITING_FILES_SELECTION 302 | elif status in ['magnet_conversion', 'queued', 'downloading', 'compressing', 'uploading']: 303 | return self.STATUS_DOWNLOADING 304 | elif status == 'downloaded': 305 | return self.STATUS_COMPLETED 306 | elif status in ['magnet_error', 'error', 'dead', 'virus']: 307 | return self.STATUS_ERROR 308 | return status 309 | 310 | class Torbox(TorrentBase): 311 | def __init__(self, f, fileData, file, failIfNotCached, onlyLargestFile) -> None: 312 | super().__init__(f, fileData, file, failIfNotCached, onlyLargestFile) 313 | self.headers = {'Authorization': f'Bearer {torbox["apiKey"]}'} 314 | self.mountTorrentsPath = torbox["mountTorrentsPath"] 315 | self.submittedTime = None 316 | self.lastInactiveCheck = None 317 | 318 | userInfoRequest = retryRequest( 319 | lambda: requests.get(urljoin(torbox['host'], "user/me"), headers=self.headers), 320 | print=self.print 321 | ) 322 | if userInfoRequest is not None: 323 | userInfo = userInfoRequest.json() 324 | self.authId = userInfo['data']['auth_id'] 325 | 326 | def submitTorrent(self): 327 | if self.failIfNotCached: 328 | instantAvailability = self._getInstantAvailability() 329 | self.print('instantAvailability:', not not instantAvailability) 330 | if not instantAvailability: 331 | return False 332 | 333 | if self.addTorrent(): 334 | self.submittedTime = datetime.now() 335 | return True 336 | return False 337 | 338 | def _getInstantAvailability(self, refresh=False): 339 | if refresh or not self._instantAvailability: 340 | torrentHash = self.getHash() 341 | self.print('hash:', torrentHash) 342 | 343 | instantAvailabilityRequest = retryRequest( 344 | lambda: requests.get( 345 | urljoin(torbox['host'], "torrents/checkcached"), 346 | headers=self.headers, 347 | params={'hash': torrentHash, 'format': 'object'} 348 | ), 349 | print=self.print 350 | ) 351 | if instantAvailabilityRequest is None: 352 | return None 353 | 354 | instantAvailabilities = instantAvailabilityRequest.json() 355 | self.print('instantAvailabilities:', instantAvailabilities) 356 | 357 | # Check if 'data' exists and is not None or False 358 | if instantAvailabilities and 'data' in instantAvailabilities and instantAvailabilities['data']: 359 | self._instantAvailability = instantAvailabilities['data'] 360 | else: 361 | self._instantAvailability = None 362 | 363 | return self._instantAvailability 364 | 365 | async def getInfo(self, refresh=False): 366 | self._enforceId() 367 | 368 | if refresh or not self._info: 369 | if not self.authId: 370 | return None 371 | 372 | currentTime = datetime.now() 373 | if (currentTime - self.submittedTime).total_seconds() < 300: 374 | if not self.lastInactiveCheck or (currentTime - self.lastInactiveCheck).total_seconds() > 5: 375 | inactiveCheckUrl = f"https://relay.torbox.app/v1/inactivecheck/torrent/{self.authId}/{self.id}" 376 | retryRequest( 377 | lambda: requests.get(inactiveCheckUrl), 378 | print=self.print 379 | ) 380 | self.lastInactiveCheck = currentTime 381 | for _ in range(60): 382 | infoRequest = retryRequest( 383 | lambda: requests.get(urljoin(torbox['host'], "torrents/mylist"), headers=self.headers), 384 | print=self.print 385 | ) 386 | if infoRequest is None: 387 | return None 388 | 389 | torrents = infoRequest.json()['data'] 390 | 391 | for torrent in torrents: 392 | if torrent['id'] == self.id: 393 | torrent['status'] = self._normalize_status(torrent['download_state'], torrent['download_finished']) 394 | self._info = torrent 395 | return self._info 396 | 397 | await asyncio.sleep(1) 398 | return self._info 399 | 400 | async def selectFiles(self): 401 | pass 402 | 403 | def delete(self): 404 | self._enforceId() 405 | 406 | deleteRequest = retryRequest( 407 | lambda: requests.delete(urljoin(torbox['host'], "torrents/controltorrent"), headers=self.headers, data={'torrent_id': self.id, 'operation': "Delete"}), 408 | print=self.print 409 | ) 410 | return not not deleteRequest 411 | 412 | async def getTorrentPath(self): 413 | filename = (await self.getInfo())['files'][0]['name'].split("/")[0] 414 | 415 | folderPathMountFilenameTorrent = os.path.join(self.mountTorrentsPath, filename) 416 | 417 | if os.path.exists(folderPathMountFilenameTorrent) and os.listdir(folderPathMountFilenameTorrent): 418 | folderPathMountTorrent = folderPathMountFilenameTorrent 419 | else: 420 | folderPathMountTorrent = None 421 | 422 | return folderPathMountTorrent 423 | 424 | def _addFile(self, data=None, files=None): 425 | request = retryRequest( 426 | lambda: requests.post(urljoin(torbox['host'], "torrents/createtorrent"), headers=self.headers, data=data, files=files), 427 | print=self.print 428 | ) 429 | if request is None: 430 | return None 431 | 432 | response = request.json() 433 | self.print('response info:', response) 434 | 435 | if response.get('detail') == 'queued': 436 | return None 437 | 438 | self.id = response['data']['torrent_id'] 439 | 440 | return self.id 441 | 442 | def _addTorrentFile(self): 443 | nametorrent = self.f.name.split('/')[-1] 444 | files = {'file': (nametorrent, self.f, 'application/x-bittorrent')} 445 | return self._addFile(files=files) 446 | 447 | def _addMagnetFile(self): 448 | return self._addFile(data={'magnet': self.fileData}) 449 | 450 | def _normalize_status(self, status, download_finished): 451 | if download_finished: 452 | return self.STATUS_COMPLETED 453 | elif status in [ 454 | 'completed', 'cached', 'paused', 'downloading', 'uploading', 455 | 'checkingResumeData', 'metaDL', 'pausedUP', 'queuedUP', 'checkingUP', 456 | 'forcedUP', 'allocating', 'downloading', 'metaDL', 'pausedDL', 457 | 'queuedDL', 'checkingDL', 'forcedDL', 'checkingResumeData', 'moving' 458 | ]: 459 | return self.STATUS_DOWNLOADING 460 | elif status in ['error', 'stalledUP', 'stalledDL', 'stalled (no seeds)', 'missingFiles', 'failed']: 461 | return self.STATUS_ERROR 462 | return status 463 | 464 | class Torrent(TorrentBase): 465 | def getHash(self): 466 | 467 | if not self._hash: 468 | import bencode3 469 | self._hash = hashlib.sha1(bencode3.bencode(bencode3.bdecode(self.fileData)['info'])).hexdigest() 470 | 471 | return self._hash 472 | 473 | def addTorrent(self): 474 | return self._addTorrentFile() 475 | 476 | class Magnet(TorrentBase): 477 | def getHash(self): 478 | 479 | if not self._hash: 480 | # Consider changing when I'm more familiar with hashes 481 | self._hash = re.search('xt=urn:btih:(.+?)(?:&|$)', self.fileData).group(1) 482 | 483 | return self._hash 484 | 485 | def addTorrent(self): 486 | return self._addMagnetFile() 487 | 488 | 489 | class RealDebridTorrent(RealDebrid, Torrent): 490 | pass 491 | 492 | class RealDebridMagnet(RealDebrid, Magnet): 493 | pass 494 | 495 | class TorboxTorrent(Torbox, Torrent): 496 | pass 497 | 498 | class TorboxMagnet(Torbox, Magnet): 499 | pass 500 | -------------------------------------------------------------------------------- /shared/discord.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from discord_webhook import DiscordWebhook, DiscordEmbed 3 | from shared.shared import discord, checkRequiredEnvs 4 | 5 | def validateDiscordWebhookUrl(): 6 | url = discord['webhookUrl'] 7 | try: 8 | response = requests.get(url) 9 | return response.status_code == 200 10 | except Exception as e: 11 | return False 12 | 13 | 14 | requiredEnvs = { 15 | 'Discord webhook URL': (discord['webhookUrl'], validateDiscordWebhookUrl) 16 | } 17 | 18 | if discord['enabled'] or discord['updateEnabled']: 19 | checkRequiredEnvs(requiredEnvs) 20 | 21 | def discordError(title, message=None): 22 | if discord['enabled']: 23 | embed = DiscordEmbed(title, f"```{message}```", color=15548997) 24 | webhook = DiscordWebhook( 25 | url=discord['webhookUrl'], 26 | rate_limit_retry=True, 27 | username='Error Bot', 28 | embeds=[embed] 29 | ) 30 | response = webhook.execute() 31 | 32 | def discordUpdate(title, message=None): 33 | if discord['updateEnabled']: 34 | embed = DiscordEmbed(title, message, color=3066993) 35 | webhook = DiscordWebhook( 36 | url=discord['webhookUrl'], 37 | rate_limit_retry=True, 38 | username='Update Bot', 39 | embeds=[embed] 40 | ) 41 | response = webhook.execute() 42 | -------------------------------------------------------------------------------- /shared/overseerr.py: -------------------------------------------------------------------------------- 1 | import json 2 | import traceback 3 | import requests 4 | import datetime 5 | from shared.discord import discordError, discordUpdate 6 | from shared.shared import plex, overseerr, overseerrHeaders, tokensFilename 7 | 8 | host = plex['host'] 9 | metadataHost = plex['metadataHost'] 10 | 11 | 12 | def getUserForPlexToken(token): 13 | userRequest = requests.post(f"{overseerr['host']}/api/v1/auth/plex", json={'authToken': token}, headers=overseerrHeaders) 14 | user = userRequest.json() 15 | 16 | return user 17 | 18 | def getUserForPlexServerToken(serverToken): 19 | with open(tokensFilename, 'r') as tokensFile: 20 | tokens = json.load(tokensFile).values() 21 | token = next((token['token'] for token in tokens if token['serverToken'] == serverToken), plex['serverApiKey']) 22 | 23 | return getUserForPlexToken(token) 24 | 25 | def requestItem(user, ratingKey, watchlistedAtTimestamp, metadataHeaders, getSeason): 26 | try: 27 | userId = user['id'] 28 | username = user['displayName'] 29 | 30 | watchlistedAt = datetime.datetime.fromtimestamp(watchlistedAtTimestamp) 31 | 32 | metadataRequest = requests.get(f"{metadataHost}library/metadata/{ratingKey}", headers=metadataHeaders) 33 | metadata = next(iter(metadataRequest.json()['MediaContainer']['Metadata']), None) 34 | 35 | if not metadata: 36 | print(f"No metadata found for ratingKey {ratingKey}") 37 | return 38 | 39 | now = datetime.datetime.now() 40 | timespan = now - watchlistedAt 41 | 42 | tmdbId = next(guid[len('tmdb://'):] for guid in (item['id'] for item in metadata['Guid']) if 43 | guid.startswith('tmdb://')) 44 | 45 | data = { 46 | 'mediaType': 'movie' if metadata['type'] == 'movie' else 'tv', 47 | 'userId': userId, 48 | 'mediaId': int(tmdbId), 49 | } 50 | 51 | if metadata['type'] == 'show': 52 | data['seasons'] = getSeason() 53 | 54 | requestRequest = requests.post(f"{overseerr['host']}/api/v1/request", json=data, headers=overseerrHeaders) 55 | print(f"{metadata['title']} - {str(timespan)} - Requested") 56 | 57 | except: 58 | e = traceback.format_exc() 59 | 60 | print(f"Error processing request {ratingKey} for userId {userId} - {username}") 61 | print(e) 62 | 63 | discordError(f"Error processing request {ratingKey} for userId {userId} - {username}", e) -------------------------------------------------------------------------------- /shared/plex.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from shared.shared import plexHeaders, plex 3 | 4 | def getServerToken(token): 5 | response = requests.get(f"{plex['host']}/api/v2/resources?X-Plex-Token={token}", headers=plexHeaders) 6 | resources = response.json() 7 | server_token = next(resource['accessToken'] for resource in resources if resource['clientIdentifier'] == plex['serverMachineId']) 8 | return server_token -------------------------------------------------------------------------------- /shared/requests.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | from typing import Callable, Optional 4 | from shared.discord import discordError, discordUpdate 5 | 6 | 7 | def retryRequest( 8 | requestFunc: Callable[[], requests.Response], 9 | print: Callable[..., None] = print, 10 | retries: int = 1, 11 | delay: int = 1 12 | ) -> Optional[requests.Response]: 13 | """ 14 | Retry a request if the response status code is not in the 200 range. 15 | 16 | :param requestFunc: A callable that returns an HTTP response. 17 | :param print: Optional print function for logging. 18 | :param retries: The number of times to retry the request after the initial attempt. 19 | :param delay: The delay between retries in seconds. 20 | :return: The response object or None if all attempts fail. 21 | """ 22 | attempts = retries + 1 # Total attempts including the initial one 23 | for attempt in range(attempts): 24 | try: 25 | response = requestFunc() 26 | if 200 <= response.status_code < 300: 27 | return response 28 | else: 29 | message = [ 30 | f"URL: {response.url}", 31 | f"Status code: {response.status_code}", 32 | f"Message: {response.reason}", 33 | f"Response: {response.content}", 34 | f"Attempt {attempt + 1} failed" 35 | ] 36 | for line in message: 37 | print(line) 38 | if attempt == retries: 39 | discordError("Request Failed", "\n".join(message)) 40 | else: 41 | update_message = message + [f"Retrying in {delay} seconds..."] 42 | discordUpdate("Retrying Request", "\n".join(update_message)) 43 | print(f"Retrying in {delay} seconds...") 44 | time.sleep(delay) 45 | except requests.RequestException as e: 46 | message = [ 47 | f"URL: {response.url if 'response' in locals() else 'unknown'}", 48 | f"Attempt {attempt + 1} encountered an error: {e}" 49 | ] 50 | for line in message: 51 | print(line) 52 | if attempt == retries: 53 | discordError("Request Exception", "\n".join(message)) 54 | else: 55 | update_message = message + [f"Retrying in {delay} seconds..."] 56 | discordUpdate("Retrying Request", "\n".join(update_message)) 57 | print(f"Retrying in {delay} seconds...") 58 | time.sleep(delay) 59 | 60 | return None -------------------------------------------------------------------------------- /shared/shared.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from environs import Env 4 | 5 | env = Env() 6 | env.read_env() 7 | 8 | default_pattern = r"<[a-z0-9_]+>" 9 | 10 | def commonEnvParser(value, convert=None): 11 | if value is not None and re.match(default_pattern, value): 12 | return None 13 | return convert(value) if convert else value 14 | 15 | @env.parser_for("integer") 16 | def integerEnvParser(value): 17 | return commonEnvParser(value, int) 18 | 19 | @env.parser_for("string") 20 | def stringEnvParser(value): 21 | return commonEnvParser(value) 22 | 23 | watchlist = { 24 | 'plexProduct': env.string('WATCHLIST_PLEX_PRODUCT', default=None), 25 | 'plexVersion': env.string('WATCHLIST_PLEX_VERSION', default=None), 26 | 'plexClientIdentifier': env.string('WATCHLIST_PLEX_CLIENT_IDENTIFIER', default=None) 27 | } 28 | 29 | blackhole = { 30 | 'baseWatchPath': env.string('BLACKHOLE_BASE_WATCH_PATH', default=None), 31 | 'radarrPath': env.string('BLACKHOLE_RADARR_PATH', default=None), 32 | 'sonarrPath': env.string('BLACKHOLE_SONARR_PATH', default=None), 33 | 'failIfNotCached': env.bool('BLACKHOLE_FAIL_IF_NOT_CACHED', default=None), 34 | 'rdMountRefreshSeconds': env.integer('BLACKHOLE_RD_MOUNT_REFRESH_SECONDS', default=None), 35 | 'waitForTorrentTimeout': env.integer('BLACKHOLE_WAIT_FOR_TORRENT_TIMEOUT', default=None), 36 | 'historyPageSize': env.integer('BLACKHOLE_HISTORY_PAGE_SIZE', default=None), 37 | } 38 | 39 | server = { 40 | 'host': env.string('SERVER_DOMAIN', default=None) 41 | } 42 | 43 | plex = { 44 | 'host': env.string('PLEX_HOST', default=None), 45 | 'metadataHost': env.string('PLEX_METADATA_HOST', default=None), 46 | 'serverHost': env.string('PLEX_SERVER_HOST', default=None), 47 | 'serverMachineId': env.string('PLEX_SERVER_MACHINE_ID', default=None), 48 | 'serverApiKey': env.string('PLEX_SERVER_API_KEY', default=None), 49 | 'serverMovieLibraryId': env.integer('PLEX_SERVER_MOVIE_LIBRARY_ID', default=None), 50 | 'serverTvShowLibraryId': env.integer('PLEX_SERVER_TV_SHOW_LIBRARY_ID', default=None), 51 | 'serverPath': env.string('PLEX_SERVER_PATH', default=None), 52 | } 53 | 54 | overseerr = { 55 | 'host': env.string('OVERSEERR_HOST', default=None), 56 | 'apiKey': env.string('OVERSEERR_API_KEY', default=None) 57 | } 58 | 59 | sonarr = { 60 | 'host': env.string('SONARR_HOST', default=None), 61 | 'apiKey': env.string('SONARR_API_KEY', default=None) 62 | } 63 | 64 | radarr = { 65 | 'host': env.string('RADARR_HOST', default=None), 66 | 'apiKey': env.string('RADARR_API_KEY', default=None) 67 | } 68 | 69 | tautulli = { 70 | 'host': env.string('TAUTULLI_HOST', default=None), 71 | 'apiKey': env.string('TAUTULLI_API_KEY', default=None) 72 | } 73 | 74 | realdebrid = { 75 | 'enabled': env.bool('REALDEBRID_ENABLED', default=True), 76 | 'host': env.string('REALDEBRID_HOST', default=None), 77 | 'apiKey': env.string('REALDEBRID_API_KEY', default=None), 78 | 'mountTorrentsPath': env.string('REALDEBRID_MOUNT_TORRENTS_PATH', env.string('BLACKHOLE_RD_MOUNT_TORRENTS_PATH', default=None)) 79 | } 80 | 81 | torbox = { 82 | 'enabled': env.bool('TORBOX_ENABLED', default=None), 83 | 'host': env.string('TORBOX_HOST', default=None), 84 | 'apiKey': env.string('TORBOX_API_KEY', default=None), 85 | 'mountTorrentsPath': env.string('TORBOX_MOUNT_TORRENTS_PATH', default=None) 86 | } 87 | 88 | trakt = { 89 | 'apiKey': env.string('TRAKT_API_KEY', default=None) 90 | } 91 | 92 | discord = { 93 | 'enabled': env.bool('DISCORD_ENABLED', default=None), 94 | 'updateEnabled': env.bool('DISCORD_UPDATE_ENABLED', default=None), 95 | 'webhookUrl': env.string('DISCORD_WEBHOOK_URL', default=None) 96 | } 97 | 98 | repair = { 99 | 'repairInterval': env.string('REPAIR_REPAIR_INTERVAL', default=None), 100 | 'runInterval': env.string('REPAIR_RUN_INTERVAL', default=None) 101 | } 102 | 103 | plexHeaders = { 104 | 'Accept': 'application/json', 105 | 'X-Plex-Product': watchlist['plexProduct'], 106 | 'X-Plex-Version': watchlist['plexVersion'], 107 | 'X-Plex-Client-Identifier': watchlist['plexClientIdentifier'] 108 | } 109 | 110 | overseerrHeaders = {"X-Api-Key": f"{overseerr['apiKey']}"} 111 | 112 | pathToScript = os.path.dirname(os.path.abspath(__file__)) 113 | tokensFilename = os.path.join(pathToScript, 'tokens.json') 114 | 115 | # From Radarr Radarr/src/NzbDrone.Core/MediaFiles/MediaFileExtensions.cs 116 | mediaExtensions = [ 117 | ".m4v", 118 | ".3gp", 119 | ".nsv", 120 | ".ty", 121 | ".strm", 122 | ".rm", 123 | ".rmvb", 124 | ".m3u", 125 | ".ifo", 126 | ".mov", 127 | ".qt", 128 | ".divx", 129 | ".xvid", 130 | ".bivx", 131 | ".nrg", 132 | ".pva", 133 | ".wmv", 134 | ".asf", 135 | ".asx", 136 | ".ogm", 137 | ".ogv", 138 | ".m2v", 139 | ".avi", 140 | ".bin", 141 | ".dat", 142 | ".dvr-ms", 143 | ".mpg", 144 | ".mpeg", 145 | ".mp4", 146 | ".avc", 147 | ".vp3", 148 | ".svq3", 149 | ".nuv", 150 | ".viv", 151 | ".dv", 152 | ".fli", 153 | ".flv", 154 | ".wpl", 155 | ".img", 156 | ".iso", 157 | ".vob", 158 | ".mkv", 159 | ".mk3d", 160 | ".ts", 161 | ".wtv", 162 | ".m2ts", 163 | ".webm" 164 | ] 165 | 166 | def intersperse(arr1, arr2): 167 | i, j = 0, 0 168 | while i < len(arr1) and j < len(arr2): 169 | yield arr1[i] 170 | yield arr2[j] 171 | i += 1 172 | j += 1 173 | 174 | while i < len(arr1): 175 | yield arr1[i] 176 | i += 1 177 | 178 | while j < len(arr2): 179 | yield arr2[j] 180 | j += 1 181 | 182 | def ensureTuple(result): 183 | return result if isinstance(result, tuple) else (result, None) 184 | 185 | def unpackEnvProps(envProps): 186 | envValue = envProps[0] 187 | validate = envProps[1] if len(envProps) > 1 else None 188 | requiresPreviousSuccess = envProps[2] if len(envProps) > 2 else False 189 | return envValue, validate, requiresPreviousSuccess 190 | 191 | def checkRequiredEnvs(requiredEnvs): 192 | previousSuccess = True 193 | for envName, envProps in requiredEnvs.items(): 194 | envValue, validate, requiresPreviousSuccess = unpackEnvProps(envProps) 195 | 196 | if envValue is None or envValue == "": 197 | print(f"Error: {envName} is missing. Please check your .env file.") 198 | previousSuccess = False 199 | elif (previousSuccess or not requiresPreviousSuccess) and validate: 200 | success, message = ensureTuple(validate()) 201 | if not success: 202 | print(f"Error: {envName} is invalid. {message or 'Please check your .env file.'}") 203 | previousSuccess = False 204 | else: 205 | previousSuccess = True 206 | else: 207 | previousSuccess = True -------------------------------------------------------------------------------- /shared/tokens.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /sockets/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /test_ram.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import time 3 | 4 | def test_ram_allocation(): 5 | for i in range(50): 6 | size = (i + 1) * 100 * 1024 * 1024 # 100 MB increments 7 | print(f"Allocating {size / (1024 * 1024)} MB of RAM") 8 | data = " " * size # Allocate memory by using a string 9 | time.sleep(1) # Sleep for 1 second to observe memory usage 10 | ram_info = psutil.virtual_memory() 11 | print(f"Available RAM: {ram_info.available / (1024 * 1024 * 1024)} GB, Used RAM: {ram_info.used / (1024 * 1024 * 1024)} GB") 12 | del data # Free the memory 13 | 14 | test_ram_allocation() -------------------------------------------------------------------------------- /watchlist.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | import requests 3 | import json 4 | import datetime 5 | from typing import List 6 | from shared.discord import discordError, discordUpdate 7 | from shared.shared import plex, plexHeaders, tokensFilename 8 | from shared.overseerr import requestItem, getUserForPlexToken 9 | import xml.etree.ElementTree as ET 10 | 11 | host = plex['host'] 12 | metadataHost = plex['metadataHost'] 13 | serverHost = plex['serverHost'] 14 | serverMachineId = plex['serverMachineId'] 15 | 16 | 17 | class SeasonMetadata: 18 | def __init__(self, json) -> None: 19 | self.viewedLeafCount = json['viewedLeafCount'] 20 | self.leafCount = json['leafCount'] 21 | self.index = json['index'] 22 | 23 | 24 | def getSeasonsMetadata(ratingKey, headers) -> List[SeasonMetadata]: 25 | # excludeAllLeaves=1? 26 | seasonsMetadataRequest = requests.get(f"{metadataHost}library/metadata/{ratingKey}/children?excludeAllLeaves=1&includeUserState=1", headers=headers) 27 | seasonsMetadata = seasonsMetadataRequest.json()['MediaContainer']['Metadata'] 28 | 29 | return list(map(SeasonMetadata, seasonsMetadata)) 30 | 31 | 32 | def getServerSeasonsMetadata(ratingKey, headers, owner) -> List[SeasonMetadata]: 33 | headers = getServerHeaders(headers, owner) 34 | 35 | serverMetadataInfoRequest = requests.get(f"{serverHost}/library/all?type=2&guid=plex%3A%2F%2Fshow%2F{ratingKey}", headers=headers) 36 | serverMetadataInfo = serverMetadataInfoRequest.json()['MediaContainer'] 37 | 38 | if 'Metadata' in serverMetadataInfo: 39 | serverMetadata = serverMetadataInfo['Metadata'] 40 | showServerMetadata = next(iter(serverMetadata), None) 41 | 42 | if showServerMetadata: 43 | serverRatingKey = showServerMetadata['ratingKey'] 44 | serverSeasonsMetadataRequest = requests.get(f"{serverHost}/library/metadata/{serverRatingKey}/children", headers=headers) 45 | serverSeasonsMetadata = serverSeasonsMetadataRequest.json()['MediaContainer']['Metadata'] 46 | 47 | return list(map(SeasonMetadata, serverSeasonsMetadata)) 48 | 49 | return 50 | 51 | 52 | def getCombinedSeasonsMetadata(ratingKey, headers, owner) -> List[SeasonMetadata]: 53 | seasonsMetadata = getSeasonsMetadata(ratingKey, headers) 54 | serverSeasonsMetadata = getServerSeasonsMetadata(ratingKey, headers, owner) 55 | 56 | if not serverSeasonsMetadata: return seasonsMetadata 57 | 58 | combinedSeasonsMetadata = [] 59 | 60 | for seasonMetadata in seasonsMetadata: 61 | serverSeasonMetadata = next(iter(serverSeasonMetadata for serverSeasonMetadata in serverSeasonsMetadata if serverSeasonMetadata.index == seasonMetadata.index), None) 62 | 63 | combinedSeasonMetadata = combineSeasonMetadata(seasonMetadata, serverSeasonMetadata) 64 | combinedSeasonsMetadata.append(combinedSeasonMetadata) 65 | 66 | return combinedSeasonsMetadata 67 | 68 | 69 | def combineSeasonMetadata(seasonMetadata: SeasonMetadata, serverSeasonMetadata: SeasonMetadata) -> SeasonMetadata: 70 | if serverSeasonMetadata and serverSeasonMetadata.viewedLeafCount > seasonMetadata.viewedLeafCount: 71 | seasonMetadata.viewedLeafCount = serverSeasonMetadata.viewedLeafCount 72 | 73 | return seasonMetadata 74 | 75 | 76 | def getServerHeaders(headers, owner): 77 | if owner: return headers 78 | 79 | usersRequest = requests.get(f"{host}api/users", headers=headers) 80 | users = ET.fromstring(usersRequest.content) 81 | 82 | servers = (server.attrib for user in users for server in user) 83 | serverId = next(server['id'] for server in servers if server['machineIdentifier'] == serverMachineId) 84 | 85 | serverRequest = requests.get(f"{host}api/servers/{serverMachineId}/shared_servers/{serverId}", headers=headers) 86 | serverToken = ET.fromstring(serverRequest.content)[0].attrib['accessToken'] 87 | 88 | return { 89 | **headers, 90 | 'X-Plex-Token': serverToken 91 | } 92 | 93 | 94 | def buildRecentItem(item): 95 | return f"{item['ratingKey']}:{item['watchlistedAt']}" 96 | 97 | 98 | def getCurrentSeason(ratingKey, headers, token): 99 | season = [1] 100 | 101 | seasonsMetadata = getCombinedSeasonsMetadata(ratingKey, headers, token.get('owner', False)) 102 | 103 | # Consider logic for choosing the season 104 | for seasonMetadata in reversed(seasonsMetadata): 105 | totalCount = seasonMetadata.leafCount 106 | remainingCount = totalCount - seasonMetadata.viewedLeafCount 107 | if remainingCount <= 0 and totalCount != 0 and seasonMetadata != seasonsMetadata[-1]: 108 | season = [seasonMetadata.index + 1] 109 | break 110 | elif remainingCount < totalCount: 111 | season = [seasonMetadata.index] 112 | break 113 | 114 | return season 115 | 116 | 117 | def getWatchlistedAt(ratingKey, headers): 118 | request = requests.get(f"{metadataHost}library/metadata/{ratingKey}/userState", headers=headers) 119 | 120 | if request.status_code != 200: return 121 | 122 | watchlistedAt = request.json()['MediaContainer']['UserState']['watchlistedAt'] 123 | 124 | return watchlistedAt 125 | 126 | def run(): 127 | print() 128 | print(datetime.datetime.now()) 129 | print('Running Watchlist') 130 | 131 | with open(tokensFilename, 'r') as tokensFile: 132 | tokens = json.load(tokensFile) 133 | 134 | for userId, token in tokens.items(): 135 | try: 136 | headers = { 137 | **plexHeaders, 138 | 'X-Plex-Token': token['token'] 139 | } 140 | 141 | def requestWatchlist(tryAgain=True): 142 | try: 143 | return requests.get( 144 | f"{metadataHost}library/sections/watchlist/all?includeFields=ratingKey%2CwatchlistedAt&sort=watchlistedAt%3Adesc", 145 | headers={ 146 | **headers, 147 | 'If-None-Match': token['etag'], 148 | }) 149 | except: 150 | if tryAgain: 151 | return requestWatchlist(tryAgain=False) 152 | else: 153 | raise 154 | 155 | watchlistRequest = requestWatchlist() 156 | 157 | if watchlistRequest.status_code == 401: 158 | print(f"UserId {userId} no longer authenticated") 159 | discordError(f"UserId {userId} no longer authenticated") 160 | continue 161 | 162 | if watchlistRequest.status_code == 304: 163 | print(f"No changes for userId {userId}") 164 | continue 165 | 166 | if watchlistRequest.status_code != 200: 167 | print(watchlistRequest) 168 | print(watchlistRequest.url) 169 | continue 170 | 171 | etag = watchlistRequest.headers['etag'] 172 | 173 | now = datetime.datetime.now() 174 | recentlyProcessedItems = token.get('recentlyProcessedItems', []) 175 | 176 | watchlist = watchlistRequest.json()['MediaContainer'] 177 | 178 | if not 'Metadata' in watchlist: 179 | continue 180 | 181 | watchlistItems = watchlist['Metadata'] 182 | 183 | recentWatchlist = [] 184 | newRecentlyProcessedItems = [] 185 | 186 | for item in watchlistItems: 187 | try: 188 | watchlistedAt = datetime.datetime.fromtimestamp(item['watchlistedAt']) 189 | discordUpdate('Watchlist has resumed functioning') 190 | except: 191 | ratingKey = item['ratingKey'] 192 | watchlistedAtTimestamp = getWatchlistedAt(ratingKey, headers) 193 | 194 | if not watchlistedAtTimestamp: 195 | print(f"No watchlisted timestamp for RatingKey {ratingKey} and UserId {userId}") 196 | discordError(f"No watchlisted timestamp for RatingKey {ratingKey} and UserId {userId}") 197 | continue 198 | 199 | watchlistedAt = datetime.datetime.fromtimestamp(watchlistedAtTimestamp) 200 | item['watchlistedAt'] = watchlistedAtTimestamp 201 | 202 | if now - watchlistedAt < datetime.timedelta(hours=1): 203 | recentItem = buildRecentItem(item) 204 | newRecentlyProcessedItems.append(recentItem) 205 | 206 | if not recentItem in recentlyProcessedItems: 207 | recentWatchlist.append(item) 208 | else: 209 | break 210 | 211 | with open(tokensFilename, 'r+') as tokensFile: 212 | tokens = json.load(tokensFile) 213 | token = tokens[userId] 214 | token['etag'] = etag 215 | token['recentlyProcessedItems'] = newRecentlyProcessedItems 216 | tokensFile.seek(0) 217 | json.dump(tokens, tokensFile) 218 | tokensFile.truncate() 219 | 220 | user = getUserForPlexToken(token['token']) 221 | userId = user['id'] 222 | username = user['displayName'] 223 | 224 | print(f"Requesting new items for userId {userId} - {username}") 225 | 226 | if not recentWatchlist: 227 | print("No new items were found") 228 | 229 | for item in recentWatchlist: 230 | ratingKey = item['ratingKey'] 231 | watchlistedAt = item['watchlistedAt'] 232 | requestItem(user, ratingKey, watchlistedAt, headers, getSeason=lambda: getCurrentSeason(ratingKey, headers, token)) 233 | 234 | except: 235 | e = traceback.format_exc() 236 | 237 | print(f"Error processing requests for userId {userId}") 238 | print(e) 239 | 240 | discordError(f"Error processing requests for userId {userId}", e) 241 | 242 | if __name__ == "__main__": 243 | run() -------------------------------------------------------------------------------- /watchlist_runner.py: -------------------------------------------------------------------------------- 1 | import time 2 | from watchlist import run 3 | 4 | if __name__ == "__main__": 5 | while True: 6 | try: 7 | run() 8 | except Exception as e: 9 | print(f"An error occurred: {e}") 10 | time.sleep(60) -------------------------------------------------------------------------------- /zurg_symlink_update.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | data_directory = "/path/to/zurg/data" 6 | symlink_directory = "/path/to/symlinks" 7 | switch_to_retain = True 8 | 9 | def update_symlink(src_path, new_name, dry_run): 10 | target_link_path = os.readlink(src_path) 11 | path_parts = os.path.split(os.path.dirname(target_link_path)) 12 | new_target_path = os.path.join(os.path.join(*path_parts[:-1]), new_name, os.path.basename(target_link_path)) 13 | if not dry_run: 14 | os.unlink(src_path) 15 | os.symlink(new_target_path, src_path) 16 | print(f"Updated symlink: {os.path.basename(src_path)} -> {new_name}") 17 | 18 | def main(dry_run, no_confirm): 19 | print("Loading symlinks") 20 | # Load all symlinks from the symlink directory into memory 21 | symlink_map = {} 22 | for root, dirs, files in os.walk(symlink_directory): 23 | for file in files: 24 | full_path = os.path.join(root, file) 25 | if os.path.islink(full_path): 26 | symlink_map[full_path] = os.readlink(full_path) 27 | 28 | print("Loading symlinks complete") 29 | 30 | for filename in os.listdir(data_directory): 31 | file_path = os.path.join(data_directory, filename) 32 | with open(file_path, 'r') as file: 33 | data = json.load(file) 34 | original_name = data.get('OriginalName') 35 | current_name = data.get('Name') 36 | 37 | if not original_name or not current_name: 38 | print(f"Skipping {original_name or current_name}") 39 | continue 40 | 41 | original_name_no_ext = os.path.splitext(original_name)[0] 42 | # Check all symlinks and update if they point to a relevant path 43 | for symlink_path, target_path in symlink_map.items(): 44 | target_dir_name = os.path.basename(os.path.dirname(target_path)) 45 | if switch_to_retain: 46 | if (target_dir_name == original_name or target_dir_name == original_name_no_ext) and target_dir_name != current_name: 47 | if dry_run or no_confirm or input(f"Update symlink for {os.path.basename(symlink_path)} from {original_name} to {current_name}? (y/n): ").lower() == 'y': 48 | update_symlink(symlink_path, current_name, dry_run) 49 | else: 50 | print(f"Skipping {target_dir_name}") 51 | else: 52 | if target_dir_name == current_name and target_dir_name != original_name: 53 | if dry_run or no_confirm or input(f"Revert symlink for {os.path.basename(symlink_path)} from {current_name} to {original_name}? (y/n): ").lower() == 'y': 54 | update_symlink(symlink_path, original_name, dry_run) 55 | else: 56 | print(f"Skipping {target_dir_name}") 57 | 58 | if __name__ == '__main__': 59 | parser = argparse.ArgumentParser(description='Update symlinks using Zurg data folder.') 60 | parser.add_argument('--dry-run', action='store_true', help='Print actions without executing') 61 | parser.add_argument('--no-confirm', action='store_true', help='Execute without confirmation') 62 | args = parser.parse_args() 63 | 64 | main(args.dry_run, args.no_confirm) 65 | 66 | 67 | --------------------------------------------------------------------------------