├── .gitattributes ├── .github ├── FUNDING.yml └── workflows │ └── docker-image.yml ├── .gitignore ├── Dockerfile ├── README.md ├── api_tracker.py ├── branch_id ├── cli_battery ├── README.md ├── app │ ├── __init__.py │ ├── background_jobs.py │ ├── database.py │ ├── direct_api.py │ ├── limiter.py │ ├── logger_config.py │ ├── metadata_manager.py │ ├── routes │ │ ├── api_routes.py │ │ ├── settings_routes.py │ │ ├── site_routes.py │ │ └── trakt_routes.py │ ├── settings.py │ ├── static │ │ ├── android-chrome-192x192.png │ │ ├── android-chrome-512x512.png │ │ ├── apple-touch-icon.png │ │ ├── cmd-terminal-icon.png │ │ ├── css │ │ │ └── style.css │ │ ├── favicon-128x128.png │ │ ├── favicon-16x16.png │ │ ├── favicon-192x192.png │ │ ├── favicon-32x32.png │ │ ├── favicon-48x48.png │ │ ├── favicon-512x512.png │ │ ├── favicon.ico │ │ ├── icon-32x32.png │ │ ├── js │ │ │ └── menu.js │ │ ├── loadingimage.gif │ │ ├── site.webmanifest │ │ ├── white-icon-16x16.png │ │ └── white-icon-32x32.png │ ├── templates │ │ ├── base.html │ │ ├── debug.html │ │ ├── home.html │ │ ├── providers.html │ │ ├── settings.html │ │ └── settings_tabs │ │ │ ├── advanced.html │ │ │ ├── general.html │ │ │ └── trakt.html │ ├── trakt_auth.py │ └── trakt_metadata.py ├── main.py ├── requirements.txt └── version.txt ├── cli_debrid.spec ├── config_manager.py ├── content_checkers ├── __init__.py ├── collected.py ├── content_cache_management.py ├── content_source_detail.py ├── mdb_list.py ├── overseerr.py ├── plex_rss_watchlist.py ├── plex_token_manager.py ├── plex_watchlist.py └── trakt.py ├── database.py ├── database ├── __init__.py ├── blacklist.py ├── collected_items.py ├── core.py ├── database_reading.py ├── database_writing.py ├── duplicate_item_helper.py ├── maintenance.py ├── migrations.py ├── poster_management.py ├── schema_management.py ├── statistics.py ├── symlink_verification.py ├── torrent_tracking.py └── wanted_items.py ├── db_test.py ├── debrid ├── __init__.py ├── base.py ├── common │ ├── __init__.py │ ├── api.py │ ├── c7d9e45f.py │ ├── cache.py │ ├── torrent.py │ └── utils.py ├── generate_encrypted_values.py ├── real_debrid │ ├── __init__.py │ ├── api.py │ ├── client.py │ ├── exceptions.py │ └── torrent.py └── status.py ├── debug_episode.py ├── debug_metadata.py ├── docker-compose.yml ├── docs └── real_debrid.md ├── extensions.py ├── find_imports.py ├── hooks ├── hook-PTT.py ├── hook-aiohttp.py ├── hook-babelfish.py ├── hook-beautifulsoup4.py ├── hook-bencode.py ├── hook-colorlog.py ├── hook-database.py ├── hook-flask.py ├── hook-grpcio.py ├── hook-guessit.py ├── hook-multiprocessing.py ├── hook-nyaapy.py ├── hook-parsedatetime.py ├── hook-pillow.py ├── hook-plexapi.py ├── hook-pykakasi.py ├── hook-pytrakt.py ├── hook-requests.py ├── hook-socket.py ├── hook-sqlalchemy.py ├── hook-supervisor.py ├── hook-threading.py ├── hook-urllib3.py ├── hook-urwid.py └── hook-werkzeug.py ├── initialization.py ├── logging_config.py ├── main.py ├── manual_blacklist.py ├── metadata └── metadata.py ├── not_wanted_magnets.py ├── notifications.py ├── optional_default_versions.json ├── performance_monitor.py ├── poster_cache.py ├── queue_manager.py ├── queue_utils.py ├── queues ├── adding_queue.py ├── anime_matcher.py ├── blacklisted_queue.py ├── checking_queue.py ├── media_matcher.py ├── mock_queue_manager.py ├── pending_uncached_queue.py ├── scraping_queue.py ├── sleeping_queue.py ├── torrent_processor.py ├── unreleased_queue.py ├── upgrading_queue.py └── wanted_queue.py ├── rclone └── config │ └── rclone.conf ├── requirements-linux.txt ├── requirements.txt ├── reset_admin.sh ├── reverse_parser.py ├── routes ├── __init__.py ├── __init__.py.bak ├── api_summary_routes.py ├── auth_routes.py ├── base_routes.py ├── content_requestor_routes.py ├── cors_testing_routes.py ├── database_routes.py ├── debug_routes.py ├── library_management_routes.py ├── log_viewer_routes.py ├── magnet_routes.py ├── models.py ├── onboarding_routes.py ├── performance_routes.py ├── program_operation_routes.py ├── queues_routes.py ├── scraper_routes.py ├── settings_routes.py ├── settings_validation_routes.py ├── statistics_routes.py ├── torrent_status_routes.py ├── trakt_friends_routes.py ├── trakt_routes.py ├── user_management_routes.py ├── utils.py ├── video_routes.py └── webhook_routes.py ├── run_program.py ├── sample.env ├── scraper ├── .alias_disabled ├── __init__.py ├── functions │ ├── __init__.py │ ├── adult_terms.py │ ├── common.py │ ├── deduplicate_results.py │ ├── file_processing.py │ ├── filter_results.py │ ├── logging.py │ ├── other_functions.py │ ├── ptt_parser.py │ ├── rank_results.py │ └── similarity_checks.py ├── jackett.py ├── knightcrawler.py ├── mediafusion.py ├── nyaa.py ├── old_nyaa.py ├── old_scraper.py ├── prowlarr.py ├── scraper.py ├── scraper_manager.py ├── torrentio.py └── zilean.py ├── scraper_tester.py ├── scripts └── analyze_memory.py ├── settings.py ├── settings_schema.py ├── setup.py ├── static ├── android-chrome-192x192.png ├── android-chrome-512x512.png ├── apple-touch-icon.png ├── cmd-terminal-icon.png ├── css │ ├── base.css │ ├── content_requestor.css │ ├── database.css │ ├── debug_functions.css │ ├── magnet_assign.css │ ├── manual_blacklist.css │ ├── onboarding.css │ ├── performance.css │ ├── plyr.css │ ├── queues.css │ ├── rate_limits.css │ ├── reverse_parser.css │ ├── scraper-mobile.css │ ├── scraper.css │ ├── scraper_tester.css │ ├── scraper_trending.css │ ├── settings.css │ ├── statistics.css │ └── vidstack.css ├── favicon-128x128.png ├── favicon-16x16.png ├── favicon-192x192.png ├── favicon-256x256-white.png ├── favicon-32x32.png ├── favicon-48x48.png ├── favicon-512x512-white.png ├── favicon-512x512.png ├── favicon.ico ├── icon-16x16.png ├── icon-32x32.png ├── image │ ├── placeholder-horizontal.png │ └── placeholder.png ├── images │ ├── imdb.png │ ├── placeholder.png │ ├── tmdb.png │ └── trakt.png ├── js │ ├── base.js │ ├── content_requestor.js │ ├── loading.js │ ├── notifications.js │ ├── plyr.js │ ├── program_controls.js │ ├── scraper.js │ ├── scraper_tester.js │ ├── settings.js │ ├── task_monitor.js │ └── tooltips.js ├── loadingimage.gif ├── site.webmanifest ├── white-icon-16x16.png ├── white-icon-32x32.ico └── white-icon-32x32.png ├── supervisord.conf ├── template_utils.py ├── templates ├── api_call_summary.html ├── base.html ├── browse.html ├── content_requestor.html ├── cors_test.html ├── database.html ├── database_pagination.html ├── debug_functions.html ├── debug_not_wanted.html ├── error.html ├── library_management.html ├── login.html ├── logs.html ├── magnet_assign.html ├── manage_users.html ├── manual_blacklist.html ├── onboarding.html ├── onboarding_navigation.html ├── onboarding_step_1.html ├── onboarding_step_2.html ├── onboarding_step_3.html ├── onboarding_step_3a.html ├── onboarding_step_4.html ├── onboarding_step_5.html ├── onboarding_step_6.html ├── over_usage.html ├── performance │ └── dashboard.html ├── play.html ├── plex_auth_callback.html ├── queues.html ├── realtime_api_calls.html ├── reverse_parser.html ├── scraper.html ├── scraper_tester.html ├── settings_base.html ├── settings_tabs │ ├── additional.html │ ├── content_sources.html │ ├── debug.html │ ├── notifications.html │ ├── required.html │ ├── reverse_parser.html │ ├── scrapers.html │ ├── scraping.html │ └── true_debug.html ├── setup_admin.html ├── statistics.html ├── task_timings.html ├── torrent_status.html ├── torrent_tracking.html ├── trakt_friends.html └── watch_history.html ├── test_torrent_status.py ├── tests ├── __init__.py ├── test_local_library_scan.py ├── test_plex_removal_cache.py ├── test_plex_watchlist.py ├── test_session_handling.py └── test_upgrading_queue.py ├── tooltip_schema.json ├── unset_env_variables.sh ├── utilities ├── __init__.py ├── anidb_functions.py ├── config │ ├── __init__.py │ └── downsub_config.py ├── downsub.py ├── emby_functions.py ├── file_lock.py ├── local_library_scan.py ├── log_analyzer.py ├── manual_scrape.py ├── plex_db_functions.py ├── plex_functions.py ├── plex_matching_functions.py ├── plex_removal_cache.py ├── plex_test.py ├── plex_verification.py ├── plex_watch_history_functions.py ├── post_processing.py ├── result_viewer.py ├── test_anidb.py ├── testing_plex.py └── zurg_utilities.py ├── version.txt ├── wake_count_manager.py ├── web_scraper.py ├── web_server.py ├── windows_build.spec ├── windows_wrapper.py ├── zurg └── config │ └── zurg_update.sh └── zurg_update.sh /.gitattributes: -------------------------------------------------------------------------------- 1 | branch_id merge=ours -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [godver3] 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | bin/ 10 | build/ 11 | develop-eggs/ 12 | dist/ 13 | eggs/ 14 | lib/ 15 | lib64/ 16 | parts/ 17 | sdist/ 18 | var/ 19 | *.egg-info/ 20 | .installed.cfg 21 | *.egg 22 | 23 | # Installer logs 24 | pip-log.txt 25 | pip-delete-this-directory.txt 26 | 27 | # Unit test / coverage reports 28 | .tox/ 29 | .coverage 30 | .cache 31 | nosetests.xml 32 | coverage.xml 33 | 34 | # Translations 35 | *.mo 36 | 37 | # Mr Developer 38 | .mr.developer.cfg 39 | .project 40 | .pydevproject 41 | 42 | # Rope 43 | .ropeproject 44 | 45 | # Django stuff: 46 | *.log 47 | *.pot 48 | 49 | # Sphinx documentation 50 | docs/_build/ 51 | 52 | **/__pycache__/ 53 | logs/*.log* 54 | archives 55 | *.pkl 56 | *.db 57 | config.ini 58 | commit.sh 59 | logs 60 | update.sh 61 | db_content 62 | content_checkers/*backup* 63 | scraper/debug_scraper.py 64 | dangling.sh 65 | imdb_to_tmdb.py 66 | old_functions.txt 67 | scraper_framework.py 68 | upgrading_simulation.py 69 | sampling.sh 70 | bk* 71 | *bk 72 | dev-update.sh 73 | helper* 74 | sampling.sh 75 | test.py 76 | utilities/nohup.out 77 | flask_session 78 | *.lock 79 | /config/*.json 80 | .aider* 81 | build.sh 82 | dist 83 | build 84 | venv 85 | user 86 | 87 | # PyInstaller 88 | # *.spec 89 | build_venv/ 90 | build_env/ 91 | 92 | # IDE specific files 93 | .vscode/ 94 | .idea/ 95 | *.swp 96 | *.swo 97 | 98 | # Windows specific 99 | *.exe 100 | *.pyd 101 | *.dll 102 | 103 | # Environment 104 | .env 105 | .venv 106 | env/ 107 | ENV/ 108 | 109 | # Logs and databases 110 | *.log 111 | flask_session/ 112 | *.db 113 | *.sqlite 114 | *.sqlite3 115 | 116 | # Cache 117 | __pycache__/ 118 | .pytest_cache/ 119 | .mypy_cache/ 120 | 121 | # Distribution / packaging 122 | dist/ 123 | build/ 124 | *.egg-info/ 125 | *.egg 126 | 127 | # Project specific 128 | test_output.mp4 129 | user/ 130 | /config/*.json 131 | .aider* 132 | 133 | # Virtual Environment 134 | venv/ 135 | build_venv/ 136 | build_env/ 137 | 138 | nohup.out 139 | run.sh 140 | 141 | ~ 142 | /config/config.json.backup 143 | /config/secret_key 144 | /config 145 | auto_blacklist -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use Python 3 as the base image 2 | FROM python:3.11-slim 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | # Install build dependencies and supervisor 8 | RUN apt-get update && apt-get install -y gcc supervisor gosu && \ 9 | rm -rf /var/lib/apt/lists/* 10 | 11 | # Set default environment variables for PUID/PGID 12 | ENV PUID=0 13 | ENV PGID=0 14 | 15 | # Copy only the requirements file first to leverage Docker cache 16 | COPY requirements-linux.txt . 17 | 18 | # Install the requirements 19 | RUN pip install --no-cache-dir -r requirements-linux.txt 20 | 21 | # Copy the current directory contents into the container at /app 22 | COPY . . 23 | 24 | # Create necessary directories and files with proper permissions 25 | RUN mkdir -p /user/db_content /user/config /user/logs && \ 26 | touch /user/logs/debug.log && \ 27 | chmod -R 755 /user 28 | 29 | # Set the TERM environment variable for proper terminal attachment 30 | ENV TERM=xterm 31 | 32 | # Comment out unwanted commands in shell initialization files 33 | RUN sed -i 's/^export LC_ALL=C.UTF-8/# export LC_ALL=C.UTF-8/' /etc/profile && \ 34 | sed -i 's/^clear/# clear/' /etc/profile 35 | 36 | # Expose ports for both Flask apps 37 | EXPOSE 5000 5001 38 | 39 | # Copy supervisord configuration 40 | COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf 41 | 42 | # Create an entrypoint script 43 | RUN echo '#!/bin/bash\n\ 44 | \n\ 45 | # Function to set permissions\n\ 46 | set_permissions() {\n\ 47 | echo "Setting permissions for /app and /user directories..."\n\ 48 | find /user -type d -exec chmod 755 {} \;\n\ 49 | find /user -type f -exec chmod 644 {} \;\n\ 50 | chown -R $PUID:$PGID /app /user\n\ 51 | echo "Permissions set successfully"\n\ 52 | }\n\ 53 | \n\ 54 | # Create user with specified PUID/PGID or use root\n\ 55 | if [ $PUID != 0 ] || [ $PGID != 0 ]; then\n\ 56 | echo "Starting with custom user - PUID: $PUID, PGID: $PGID"\n\ 57 | groupadd -g $PGID appuser\n\ 58 | useradd -u $PUID -g $PGID -d /app appuser\n\ 59 | set_permissions\n\ 60 | echo "Created user appuser with UID: $PUID and GID: $PGID"\n\ 61 | # Update supervisord config to use the new user\n\ 62 | sed -i "s/user=root/user=appuser/" /etc/supervisor/conf.d/supervisord.conf\n\ 63 | echo "Updated supervisord configuration to use appuser"\n\ 64 | else\n\ 65 | echo "Starting with root user (PUID=0, PGID=0)"\n\ 66 | set_permissions\n\ 67 | fi\n\ 68 | \n\ 69 | # Start supervisord and tail logs\n\ 70 | if [ $PUID != 0 ] || [ $PGID != 0 ]; then\n\ 71 | echo "Starting supervisord as appuser"\n\ 72 | gosu appuser supervisord -n -c /etc/supervisor/conf.d/supervisord.conf & \n\ 73 | else\n\ 74 | echo "Starting supervisord as root"\n\ 75 | supervisord -n -c /etc/supervisor/conf.d/supervisord.conf & \n\ 76 | fi\n\ 77 | \n\ 78 | sleep 2\n\ 79 | exec tail -F /user/logs/debug.log' > /app/entrypoint.sh && \ 80 | chmod +x /app/entrypoint.sh 81 | 82 | # Use the entrypoint script 83 | CMD ["/app/entrypoint.sh"] 84 | -------------------------------------------------------------------------------- /branch_id: -------------------------------------------------------------------------------- 1 | main 2 | -------------------------------------------------------------------------------- /cli_battery/README.md: -------------------------------------------------------------------------------- 1 | ![Python Tests](https://github.com/godver3/cli_battery/actions/workflows/python-tests.yml/badge.svg) 2 | 3 | # CLI Battery 4 | 5 | CLI Battery is a Flask-based web application for managing metadata for movies and TV shows. It integrates with Trakt for fetching and updating metadata. 6 | 7 | ## Features 8 | 9 | - Dashboard with statistics about items and metadata 10 | - Debug view for all items in the database 11 | - Metadata management for movies and TV shows 12 | - Integration with Trakt API for fetching metadata 13 | - Provider management (enable/disable metadata providers) 14 | - Settings management 15 | - Poster image retrieval 16 | 17 | ## API Endpoints 18 | 19 | - `/`: Home page with dashboard statistics 20 | - `/debug`: Debug view of all items 21 | - `/metadata`: View all metadata 22 | - `/providers`: Manage metadata providers 23 | - `/settings`: Application settings 24 | - `/api/metadata/`: Fetch metadata for a specific item 25 | - `/api/seasons/`: Fetch seasons data for a TV show 26 | - `/authorize_trakt`: Initiate Trakt authorization 27 | - `/trakt_callback`: Handle Trakt authorization callback 28 | 29 | ## Setup 30 | 31 | 1. Clone the repository 32 | 2. Install dependencies: `pip install -r requirements.txt` 33 | 3. Set up your Trakt API credentials in the settings 34 | 4. Run the application: `python app.py` 35 | 36 | ## Testing 37 | 38 | Run the tests using: 39 | ```python -m unittest discover tests``` 40 | 41 | ## Contributing 42 | 43 | Contributions are welcome! Please feel free to submit a Pull Request. 44 | -------------------------------------------------------------------------------- /cli_battery/app/__init__.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from .database import init_db 3 | import os 4 | from .limiter import init_limiter 5 | 6 | def create_app(): 7 | app = Flask(__name__) 8 | 9 | # Get db_content directory from environment variable with fallback 10 | db_directory = os.environ.get('USER_DB_CONTENT', '/user/db_content') 11 | os.makedirs(db_directory, exist_ok=True) 12 | 13 | db_path = os.path.join(db_directory, 'cli_battery.db') 14 | app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{db_path}' 15 | app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False 16 | 17 | with app.app_context(): 18 | init_db() 19 | init_limiter(app) # Initialize the rate limiter 20 | 21 | # Import and register blueprints 22 | from app.routes.site_routes import main_bp 23 | from app.routes.api_routes import api_bp 24 | from app.routes.trakt_routes import trakt_bp 25 | from app.routes.settings_routes import settings_bp 26 | #from app.routes.queues_routes import queues_bp 27 | app.register_blueprint(main_bp) 28 | app.register_blueprint(api_bp) 29 | app.register_blueprint(trakt_bp) 30 | app.register_blueprint(settings_bp) 31 | #app.register_blueprint(queues_bp) 32 | 33 | return app -------------------------------------------------------------------------------- /cli_battery/app/direct_api.py: -------------------------------------------------------------------------------- 1 | from .metadata_manager import MetadataManager 2 | from typing import Dict, Any, Tuple, Optional 3 | from .logger_config import logger 4 | from .database import init_db, Session as DbSession 5 | 6 | class DirectAPI: 7 | def __init__(self): 8 | # Initialize database engine and configure session 9 | engine = init_db() 10 | DbSession.configure(bind=engine) 11 | 12 | @staticmethod 13 | def get_movie_metadata(imdb_id: str) -> Tuple[Dict[str, Any], str]: 14 | metadata, source = MetadataManager.get_movie_metadata(imdb_id) 15 | return metadata, source 16 | 17 | @staticmethod 18 | def get_movie_release_dates(imdb_id: str): 19 | release_dates, source = MetadataManager.get_release_dates(imdb_id) 20 | return release_dates, source 21 | 22 | @staticmethod 23 | def get_episode_metadata(imdb_id): 24 | metadata, source = MetadataManager.get_metadata_by_episode_imdb(imdb_id) 25 | return metadata, source 26 | 27 | @staticmethod 28 | def get_show_metadata(imdb_id): 29 | import logging 30 | logging.info(f"DirectAPI.get_show_metadata called for {imdb_id}") 31 | metadata, source = MetadataManager.get_show_metadata(imdb_id) 32 | if metadata and 'seasons' in metadata: 33 | logging.info(f"DirectAPI got {len(metadata['seasons'])} seasons") 34 | #for season_num in metadata['seasons'].keys(): 35 | #logging.info(f"Season {season_num} has {len(metadata['seasons'][season_num].get('episodes', {}))} episodes") 36 | return metadata, source 37 | 38 | @staticmethod 39 | def get_show_seasons(imdb_id: str) -> Tuple[Dict[str, Any], str]: 40 | seasons, source = MetadataManager.get_seasons(imdb_id) 41 | return seasons, source 42 | 43 | @staticmethod 44 | def tmdb_to_imdb(tmdb_id: str, media_type: str = None) -> Optional[str]: 45 | """ 46 | Convert TMDB ID to IMDB ID 47 | Args: 48 | tmdb_id: The TMDB ID to convert 49 | media_type: Either 'movie' or 'show' to specify what type of content to look for 50 | """ 51 | imdb_id, source = MetadataManager.tmdb_to_imdb(tmdb_id, media_type=media_type) 52 | return imdb_id, source 53 | 54 | @staticmethod 55 | def get_show_aliases(imdb_id: str): 56 | """Get all aliases for a show by IMDb ID""" 57 | aliases, source = MetadataManager.get_show_aliases(imdb_id) 58 | return aliases, source 59 | 60 | @staticmethod 61 | def get_movie_aliases(imdb_id: str): 62 | """Get all aliases for a movie by IMDb ID""" 63 | aliases, source = MetadataManager.get_movie_aliases(imdb_id) 64 | return aliases, source -------------------------------------------------------------------------------- /cli_battery/app/limiter.py: -------------------------------------------------------------------------------- 1 | from flask_limiter import Limiter 2 | from flask_limiter.util import get_remote_address 3 | 4 | limiter = Limiter( 5 | key_func=get_remote_address, 6 | storage_uri="memory://" 7 | ) 8 | 9 | def init_limiter(app): 10 | """Initialize the rate limiter with the Flask app""" 11 | limiter.init_app(app) -------------------------------------------------------------------------------- /cli_battery/app/logger_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import colorlog 3 | from logging.handlers import RotatingFileHandler 4 | import os 5 | 6 | class ImmediateRotatingFileHandler(RotatingFileHandler): 7 | """A RotatingFileHandler that flushes immediately after each write""" 8 | def emit(self, record): 9 | super().emit(record) 10 | self.flush() # Force immediate flush 11 | 12 | # Create a filter to exclude logs from specific files 13 | class ExcludeFilter(logging.Filter): 14 | def filter(self, record): 15 | return not (record.filename == 'rules.py' or record.filename == 'rebulk.py' or record.filename == 'processors.py') 16 | 17 | def setup_logger(): 18 | # Get log directory from environment variable with fallback 19 | log_dir = os.environ.get('USER_LOGS', '/user/logs') 20 | os.makedirs(log_dir, exist_ok=True) 21 | 22 | # Create logger 23 | logger = colorlog.getLogger('cli_battery') 24 | logger.setLevel(logging.DEBUG) # Ensure logger itself allows DEBUG 25 | 26 | # Clear any existing handlers 27 | logger.handlers.clear() 28 | 29 | # Create console handler with color formatting 30 | console_handler = colorlog.StreamHandler() 31 | console_handler.setLevel(logging.INFO) # Keep INFO for console 32 | 33 | formatter = colorlog.ColoredFormatter( 34 | '%(log_color)s%(asctime)s - %(filename)s:%(funcName)s - %(levelname)s - %(message)s', 35 | log_colors={ 36 | 'DEBUG': 'cyan', 37 | 'INFO': 'green', 38 | 'WARNING': 'yellow', 39 | 'ERROR': 'red', 40 | 'CRITICAL': 'red,bg_white', 41 | } 42 | ) 43 | 44 | console_handler.setFormatter(formatter) 45 | logger.addHandler(console_handler) 46 | 47 | # Add file handler with immediate flushing for debug logs only 48 | log_file = os.path.join(log_dir, 'battery_debug.log') 49 | file_handler = ImmediateRotatingFileHandler( 50 | log_file, 51 | maxBytes=10*1024*1024, # 10MB - reduced from 50MB 52 | backupCount=2, # Keep 2 backup files for important history 53 | encoding='utf-8', 54 | errors='replace' 55 | ) 56 | file_handler.setLevel(logging.DEBUG) 57 | 58 | # Add filters to exclude unwanted messages 59 | file_handler.addFilter(lambda record: not record.name.startswith(('urllib3', 'requests', 'charset_normalizer'))) 60 | file_handler.addFilter(ExcludeFilter()) 61 | 62 | # Use a simpler formatter for file logs to reduce overhead 63 | file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) 64 | 65 | logger.addHandler(file_handler) 66 | 67 | # Prevent propagation to avoid duplicate logs 68 | logger.propagate = False 69 | 70 | # Configure root logger to allow DEBUG 71 | root_logger = logging.getLogger() 72 | root_logger.setLevel(logging.DEBUG) 73 | 74 | return logger 75 | 76 | # Create and configure the logger 77 | logger = setup_logger() -------------------------------------------------------------------------------- /cli_battery/app/routes/site_routes.py: -------------------------------------------------------------------------------- 1 | from flask import render_template, request, jsonify, send_file, redirect, url_for, Blueprint 2 | from app.settings import Settings 3 | from app.metadata_manager import MetadataManager 4 | import io 5 | from app.trakt_auth import TraktAuth 6 | from flask import flash 7 | from sqlalchemy import inspect 8 | from app.database import Session, Item, Metadata, Season, Poster # Add this line 9 | from app.trakt_metadata import TraktMetadata # Add this import at the top of the file 10 | import json 11 | import time 12 | import os 13 | from ..logger_config import logger 14 | 15 | 16 | settings = Settings() 17 | 18 | main_bp = Blueprint('main', __name__) 19 | 20 | @main_bp.route('/') 21 | def home(): 22 | db_stats = MetadataManager.get_stats() 23 | logger.debug(f"Current staleness_threshold: {settings.staleness_threshold}") 24 | stats = { 25 | 'total_providers': len(settings.providers), 26 | 'active_providers': sum(1 for provider in settings.providers if provider['enabled']), 27 | 'total_items': db_stats['total_items'], 28 | 'total_metadata': db_stats['total_metadata'], 29 | 'last_update': db_stats['last_update'].strftime('%Y-%m-%d %H:%M:%S') if db_stats['last_update'] else 'N/A', 30 | 'staleness_threshold': f"{settings.staleness_threshold} days" 31 | } 32 | logger.debug(f"Stats: {stats}") 33 | return render_template('home.html', stats=stats) 34 | 35 | @main_bp.route('/debug') 36 | def debug(): 37 | items = MetadataManager.get_all_items() 38 | for item in items: 39 | # Find the year from metadata 40 | year_metadata = next((m.value for m in item.item_metadata if m.key == 'year'), None) 41 | 42 | # Use the metadata year if available, otherwise use the item's year 43 | item.display_year = year_metadata or item.year 44 | 45 | return render_template('debug.html', items=items) 46 | 47 | @main_bp.route('/debug/delete_item/', methods=['POST']) 48 | def delete_item(imdb_id): 49 | success = MetadataManager.delete_item(imdb_id) 50 | return jsonify({"success": success}) 51 | 52 | @main_bp.route('/settings') 53 | def settings_page(): 54 | return render_template('settings.html', settings=settings.get_all()) 55 | 56 | @main_bp.route('/debug/schema') 57 | def debug_schema(): 58 | with Session() as session: 59 | inspector = inspect(session.bind) 60 | tables = inspector.get_table_names() 61 | schema = {} 62 | for table in tables: 63 | columns = inspector.get_columns(table) 64 | schema[table] = [{"name": column['name'], "type": str(column['type'])} for column in columns] 65 | return jsonify(schema) 66 | 67 | @main_bp.route('/debug/item/') 68 | def debug_item(imdb_id): 69 | settings = Settings() 70 | if not any(provider['enabled'] for provider in settings.providers): 71 | return jsonify({"error": "No active metadata provider"}), 400 72 | 73 | with Session() as session: 74 | item = session.query(Item).filter_by(imdb_id=imdb_id).first() 75 | if not item: 76 | return jsonify({"error": f"No item found for IMDB ID: {imdb_id}"}), 404 77 | 78 | metadata = {m.key: m.value for m in item.item_metadata} 79 | seasons = [{'season': s.season_number, 'episode_count': s.episode_count} for s in item.seasons] 80 | 81 | return jsonify({ 82 | "item": { 83 | "id": item.id, 84 | "imdb_id": item.imdb_id, 85 | "title": item.title, 86 | "type": item.type, 87 | "year": item.year 88 | }, 89 | "metadata": metadata, 90 | "seasons": seasons 91 | }) 92 | 93 | @main_bp.context_processor 94 | def inject_stats(): 95 | stats = MetadataManager.get_stats() 96 | stats['staleness_threshold'] = f"{settings.staleness_threshold} days" 97 | logger.debug(f"Injected stats: {stats}") 98 | return dict(stats=stats) -------------------------------------------------------------------------------- /cli_battery/app/static/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/android-chrome-192x192.png -------------------------------------------------------------------------------- /cli_battery/app/static/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/android-chrome-512x512.png -------------------------------------------------------------------------------- /cli_battery/app/static/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/apple-touch-icon.png -------------------------------------------------------------------------------- /cli_battery/app/static/cmd-terminal-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/cmd-terminal-icon.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon-128x128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon-128x128.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon-16x16.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon-192x192.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon-32x32.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon-48x48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon-48x48.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon-512x512.png -------------------------------------------------------------------------------- /cli_battery/app/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/favicon.ico -------------------------------------------------------------------------------- /cli_battery/app/static/icon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/icon-32x32.png -------------------------------------------------------------------------------- /cli_battery/app/static/js/menu.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', function() { 2 | const hamburger = document.querySelector('.hamburger'); 3 | const nav = document.querySelector('nav'); 4 | 5 | hamburger.addEventListener('click', function() { 6 | hamburger.classList.toggle('active'); 7 | nav.classList.toggle('active'); 8 | }); 9 | 10 | // Close menu when a link is clicked 11 | document.querySelectorAll('nav a').forEach(link => { 12 | link.addEventListener('click', () => { 13 | hamburger.classList.remove('active'); 14 | nav.classList.remove('active'); 15 | }); 16 | }); 17 | }); -------------------------------------------------------------------------------- /cli_battery/app/static/loadingimage.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/loadingimage.gif -------------------------------------------------------------------------------- /cli_battery/app/static/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cli_debrid", 3 | "short_name": "cli_debrid", 4 | "icons": [ 5 | { 6 | "src": "/static/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/static/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#007bff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } -------------------------------------------------------------------------------- /cli_battery/app/static/white-icon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/white-icon-16x16.png -------------------------------------------------------------------------------- /cli_battery/app/static/white-icon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/cli_battery/app/static/white-icon-32x32.png -------------------------------------------------------------------------------- /cli_battery/app/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | cli_battery - {% block title %}{% endblock %} 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | {% block extra_css %}{% endblock %} 21 | 22 | 23 |
24 |
25 | CLI Battery icon 26 | 31 |
32 |

cli_battery

33 | v0.1.0 34 |
35 |
36 | 37 | 44 |
45 |
46 | 47 | 48 | Items: {{ stats.total_items }} 49 | 50 | 51 | 52 | Metadata: {{ stats.total_metadata }} 53 | 54 | 55 | 56 | Last Update: {{ stats.last_update }} 57 | 58 |
59 |
60 |
61 |
62 | {% block content %}{% endblock %} 63 |
64 | {% block extra_js %}{% endblock %} 65 | 96 | 97 | -------------------------------------------------------------------------------- /cli_battery/app/templates/home.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block title %}Home{% endblock %} 4 | 5 | {% block content %} 6 | 40 | 41 |

CLI Battery Dashboard

42 | 43 |
44 |
45 | 46 |

Total Items

47 |

{{ stats.total_items }}

48 |
49 |
50 | 51 |

Total Metadata

52 |

{{ stats.total_metadata }}

53 |
54 | 64 |
65 | 66 |

Staleness Threshold

67 |

{{ stats.staleness_threshold }}

68 |
69 |
70 | 71 | 90 | 91 | {% endblock %} -------------------------------------------------------------------------------- /cli_battery/app/templates/settings_tabs/advanced.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |

Advanced Settings

4 |
5 |
6 |
7 | 8 | 14 |

Set the logging level for the application.

15 |
16 |
17 |
-------------------------------------------------------------------------------- /cli_battery/app/templates/settings_tabs/general.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |

General Settings

4 |
5 |
6 |
7 | 8 | 9 |

Days after which metadata is considered stale and should be updated on a new request.

10 |
11 |
12 |
-------------------------------------------------------------------------------- /cli_battery/requirements.txt: -------------------------------------------------------------------------------- 1 | colorlog==6.8.2 2 | Flask==3.0.3 3 | iso8601==2.1.0 4 | Pillow==10.4.0 5 | pytrakt==3.4.32 6 | Requests==2.32.3 7 | SQLAlchemy==2.0.31 8 | psycopg2-binary==2.9.9 9 | grpcio==1.66.1 10 | grpcio-tools==1.66.1 11 | protobuf==5.28.1 12 | APScheduler==3.10.4 13 | -------------------------------------------------------------------------------- /cli_battery/version.txt: -------------------------------------------------------------------------------- 1 | 0.1.6 2 | -------------------------------------------------------------------------------- /content_checkers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Content checkers package for handling various content sources like Trakt, Plex, etc. 3 | """ 4 | -------------------------------------------------------------------------------- /content_checkers/collected.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List, Dict, Any, Tuple 3 | from database import get_all_media_items 4 | from settings import get_all_settings, get_setting 5 | import os 6 | import pickle 7 | from datetime import datetime, timedelta 8 | 9 | # Get db_content directory from environment variable with fallback 10 | DB_CONTENT_DIR = os.environ.get('USER_DB_CONTENT', '/user/db_content') 11 | COLLECTED_CACHE_FILE = os.path.join(DB_CONTENT_DIR, 'collected_cache.pkl') 12 | CACHE_EXPIRY_DAYS = 7 13 | 14 | def load_collected_cache(): 15 | try: 16 | if os.path.exists(COLLECTED_CACHE_FILE): 17 | with open(COLLECTED_CACHE_FILE, 'rb') as f: 18 | return pickle.load(f) 19 | except (EOFError, pickle.UnpicklingError, FileNotFoundError) as e: 20 | logging.warning(f"Error loading Collected cache: {e}. Creating a new cache.") 21 | return {} 22 | 23 | def save_collected_cache(cache): 24 | try: 25 | os.makedirs(os.path.dirname(COLLECTED_CACHE_FILE), exist_ok=True) 26 | with open(COLLECTED_CACHE_FILE, 'wb') as f: 27 | pickle.dump(cache, f) 28 | except Exception as e: 29 | logging.error(f"Error saving Collected cache: {e}") 30 | 31 | def get_wanted_from_collected() -> List[Tuple[List[Dict[str, Any]], Dict[str, bool]]]: 32 | content_sources = get_all_settings().get('Content Sources', {}) 33 | collected_sources = [data for source, data in content_sources.items() if source.startswith('Collected') and data.get('enabled', False)] 34 | 35 | if not collected_sources: 36 | logging.info("No enabled Collected sources found in settings.") 37 | return [] 38 | 39 | disable_caching = True # Hardcoded to True 40 | all_wanted_items = [] 41 | cache = {} if disable_caching else load_collected_cache() 42 | current_time = datetime.now() 43 | 44 | for source in collected_sources: 45 | versions = source.get('versions', {}) 46 | 47 | wanted_items = get_all_media_items(state="Wanted", media_type="episode") 48 | collected_items = get_all_media_items(state="Collected", media_type="episode") 49 | 50 | all_items = wanted_items + collected_items 51 | consolidated_items = {} 52 | cache_skipped = 0 53 | 54 | for item in all_items: 55 | imdb_id = item['imdb_id'] 56 | if not imdb_id: # Skip items with no IMDB ID 57 | logging.warning(f"Skipping item with missing IMDB ID: {item}") 58 | continue 59 | 60 | if imdb_id not in consolidated_items: 61 | if not disable_caching: 62 | # Check cache for this item 63 | cache_key = f"{imdb_id}_tv" # All collected items are TV shows 64 | cache_item = cache.get(cache_key) 65 | 66 | if cache_item: 67 | last_processed = cache_item['timestamp'] 68 | if current_time - last_processed < timedelta(days=CACHE_EXPIRY_DAYS): 69 | cache_skipped += 1 70 | continue 71 | 72 | # Add or update cache entry 73 | cache[cache_key] = { 74 | 'timestamp': current_time, 75 | 'data': { 76 | 'imdb_id': imdb_id, 77 | 'media_type': 'tv' 78 | } 79 | } 80 | 81 | consolidated_items[imdb_id] = { 82 | 'imdb_id': imdb_id, 83 | 'media_type': 'tv' 84 | } 85 | 86 | result = list(consolidated_items.values()) 87 | logging.info(f"Found {len(result)} unique TV shows from local database") 88 | 89 | all_wanted_items.append((result, versions)) 90 | 91 | # Save updated cache only if caching is enabled 92 | if not disable_caching: 93 | save_collected_cache(cache) 94 | return all_wanted_items -------------------------------------------------------------------------------- /content_checkers/content_source_detail.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Dict, Any, Optional 3 | 4 | def append_content_source_detail(item: Dict[str, Any], source_type: Optional[str] = None) -> Dict[str, Any]: 5 | """ 6 | Takes an item dictionary and appends appropriate content_source_detail based on the content_source. 7 | For now, returns None for all sources until we implement specific detail logic. 8 | 9 | Args: 10 | item (Dict[str, Any]): The item dictionary containing at minimum a content_source field 11 | source_type (Optional[str]): The type of source (e.g., 'Overseerr', 'Trakt Watchlist', etc.) 12 | If not provided, will attempt to extract from content_source 13 | 14 | Returns: 15 | Dict[str, Any]: The same item dictionary with content_source_detail added 16 | """ 17 | try: 18 | content_source = item.get('content_source') 19 | if not content_source: 20 | logging.warning("No content_source found in item, cannot append detail") 21 | item['content_source_detail'] = None 22 | return item 23 | 24 | # If source_type not provided, try to extract from content_source 25 | if not source_type and content_source: 26 | source_type = content_source.split('_')[0] 27 | 28 | # Get the detail based on source type 29 | detail = None 30 | if source_type == 'My Plex Watchlist': 31 | detail = item.get('content_source_detail', 'Unknown User') 32 | elif source_type == 'Other Plex Watchlist': 33 | detail = item.get('content_source_detail', 'Unknown User') 34 | elif source_type == 'Overseerr': 35 | detail = item.get('content_source_detail') 36 | elif source_type == 'Trakt': 37 | detail = item.get('content_source_detail') 38 | elif source_type == 'MDBList': 39 | detail = item.get('content_source_detail') 40 | elif source_type == 'Magnet_Assigner': 41 | detail = item.get('content_source_detail') 42 | 43 | item['content_source_detail'] = detail 44 | return item 45 | except Exception as e: 46 | logging.error(f"Error appending content source detail: {str(e)}") 47 | item['content_source_detail'] = None 48 | return item -------------------------------------------------------------------------------- /content_checkers/plex_token_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | from datetime import datetime 5 | from config_manager import CONFIG_DIR 6 | 7 | TOKEN_STATUS_FILE = os.path.join(CONFIG_DIR, 'plex_token_status.json') 8 | 9 | def load_token_status(): 10 | """Load the token status from the JSON file.""" 11 | try: 12 | if os.path.exists(TOKEN_STATUS_FILE): 13 | with open(TOKEN_STATUS_FILE, 'r') as f: 14 | return json.load(f) 15 | except Exception as e: 16 | logging.error(f"Error loading token status: {e}") 17 | return {} 18 | 19 | def save_token_status(status): 20 | """Save the token status to the JSON file.""" 21 | try: 22 | with open(TOKEN_STATUS_FILE, 'w') as f: 23 | json.dump(status, f, indent=4, default=str) 24 | except Exception as e: 25 | logging.error(f"Error saving token status: {e}") 26 | 27 | def update_token_status(username, valid, expires_at=None, plex_username=None): 28 | """Update the status for a specific token.""" 29 | status = load_token_status() 30 | status[username] = { 31 | 'valid': valid, 32 | 'last_checked': datetime.now().isoformat(), 33 | 'expires_at': expires_at.isoformat() if expires_at else None, 34 | 'username': plex_username 35 | } 36 | save_token_status(status) 37 | 38 | def get_token_status(): 39 | """Get the current status of all tokens.""" 40 | return load_token_status() 41 | -------------------------------------------------------------------------------- /database.py: -------------------------------------------------------------------------------- 1 | # Import all submodules 2 | from . import core 3 | from . import collected_items 4 | from . import blacklist 5 | from . import schema_management 6 | from . import poster_management 7 | from . import statistics 8 | from . import wanted_items 9 | from . import database_reading 10 | from . import database_writing 11 | 12 | # Import all contents from each submodule 13 | from database.core import * 14 | from database.collected_items import * 15 | from database.blacklist import * 16 | from database.schema_management import * 17 | from database.poster_management import * 18 | from database.statistics import * 19 | from database.wanted_items import * 20 | from database.database_reading import * 21 | from database.database_writing import * 22 | 23 | # Use __all__ to specify everything to be exported 24 | __all__ = ( 25 | core.__all__ + 26 | collected_items.__all__ + 27 | blacklist.__all__ + 28 | schema_management.__all__ + 29 | poster_management.__all__ + 30 | statistics.__all__ + 31 | wanted_items.__all__ + 32 | database_reading.__all__ + 33 | database_writing.__all__ 34 | ) -------------------------------------------------------------------------------- /database/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import inspect 3 | 4 | # List of all submodules 5 | submodules = [ 6 | 'core', 7 | 'collected_items', 8 | 'blacklist', 9 | 'schema_management', 10 | 'poster_management', 11 | 'statistics', 12 | 'wanted_items', 13 | 'database_reading', 14 | 'database_writing', 15 | 'maintenance' 16 | ] 17 | 18 | # Import all submodules 19 | for submodule in submodules: 20 | globals()[submodule] = importlib.import_module(f'.{submodule}', package=__name__) 21 | 22 | # Function to get all public names from a module 23 | def get_public_names(module): 24 | return [name for name, obj in inspect.getmembers(module) 25 | if not name.startswith('_') and not inspect.ismodule(obj)] 26 | 27 | # Generate __all__ for each submodule and the main package 28 | __all__ = [] 29 | for submodule in submodules: 30 | module = globals()[submodule] 31 | module.__all__ = get_public_names(module) 32 | __all__.extend(module.__all__) 33 | 34 | # Import all contents from each submodule 35 | for submodule in submodules: 36 | exec(f'from .{submodule} import *') -------------------------------------------------------------------------------- /database/blacklist.py: -------------------------------------------------------------------------------- 1 | from .core import get_db_connection 2 | import logging 3 | from typing import List 4 | from datetime import datetime 5 | 6 | def get_blacklisted_items(): 7 | conn = get_db_connection() 8 | try: 9 | cursor = conn.execute('SELECT * FROM media_items WHERE state = "Blacklisted"') 10 | items = cursor.fetchall() 11 | return [dict(item) for item in items] 12 | except Exception as e: 13 | logging.error(f"Error retrieving blacklisted items: {str(e)}") 14 | return [] 15 | finally: 16 | conn.close() 17 | 18 | def remove_from_blacklist(item_ids: List[int]): 19 | conn = get_db_connection() 20 | try: 21 | for item_id in item_ids: 22 | conn.execute(''' 23 | UPDATE media_items 24 | SET state = 'Wanted', last_updated = ?, sleep_cycles = 0 25 | WHERE id = ? AND state = 'Blacklisted' 26 | ''', (datetime.now(), item_id)) 27 | conn.commit() 28 | logging.info(f"Removed {len(item_ids)} items from blacklist") 29 | except Exception as e: 30 | logging.error(f"Error removing items from blacklist: {str(e)}") 31 | conn.rollback() 32 | finally: 33 | conn.close() -------------------------------------------------------------------------------- /database/core.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sqlite3 3 | import unicodedata 4 | from typing import Any, Dict 5 | from sqlite3 import Row 6 | from functools import wraps 7 | import logging 8 | import time 9 | import random 10 | 11 | def get_db_connection(db_path=None): 12 | if db_path is None: 13 | # Get db_content directory from environment variable with fallback 14 | db_content_dir = os.environ.get('USER_DB_CONTENT', '/user/db_content') 15 | db_path = os.path.join(db_content_dir, 'media_items.db') 16 | os.makedirs(os.path.dirname(db_path), exist_ok=True) 17 | conn = sqlite3.connect(db_path) 18 | conn.execute('PRAGMA journal_mode=WAL') # Enable WAL mode 19 | conn.row_factory = sqlite3.Row 20 | return conn 21 | 22 | def normalize_string(input_str): 23 | return ''.join( 24 | c for c in unicodedata.normalize('NFKD', input_str) 25 | if unicodedata.category(c) != 'Mn' 26 | ) 27 | 28 | def row_to_dict(row: Row) -> Dict[str, Any]: 29 | return {key: row[key] for key in row.keys()} 30 | 31 | def retry_on_db_lock(max_attempts=5, initial_wait=0.1, backoff_factor=2): 32 | def decorator(func): 33 | @wraps(func) 34 | def wrapper(*args, **kwargs): 35 | attempt = 0 36 | while attempt < max_attempts: 37 | try: 38 | return func(*args, **kwargs) 39 | except sqlite3.OperationalError as e: 40 | if "database is locked" in str(e) and attempt < max_attempts - 1: 41 | attempt += 1 42 | wait_time = initial_wait * (backoff_factor ** attempt) + random.uniform(0, 0.1) 43 | logging.warning(f"Database locked. Retrying in {wait_time:.2f} seconds... (Attempt {attempt + 1}/{max_attempts})") 44 | time.sleep(wait_time) 45 | else: 46 | raise 47 | raise Exception(f"Failed to execute {func.__name__} after {max_attempts} attempts due to database locks") 48 | return wrapper 49 | return decorator 50 | 51 | def get_existing_airtime(conn, imdb_id): 52 | cursor = conn.execute(''' 53 | SELECT airtime FROM media_items 54 | WHERE imdb_id = ? AND type = 'episode' AND airtime IS NOT NULL 55 | LIMIT 1 56 | ''', (imdb_id,)) 57 | result = cursor.fetchone() 58 | return result[0] if result else None 59 | 60 | @retry_on_db_lock() 61 | def reset_item_to_upgrading(imdb_id: str, original_file: str, original_version: str): 62 | conn = get_db_connection() 63 | try: 64 | cursor = conn.cursor() 65 | cursor.execute(''' 66 | UPDATE media_items 67 | SET state = 'Upgrading', 68 | filled_by_file = ?, 69 | filled_by_title = ?, 70 | version = ?, 71 | upgrading_from = NULL 72 | WHERE imdb_id = ? 73 | ''', (original_file, original_file, original_version, imdb_id)) 74 | conn.commit() 75 | logging.info(f"Reset item with IMDB ID {imdb_id} to Upgrading state with original file: {original_file}") 76 | except sqlite3.Error as e: 77 | logging.error(f"Database error: {e}") 78 | conn.rollback() 79 | finally: 80 | conn.close() -------------------------------------------------------------------------------- /database/migrations.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .core import get_db_connection 3 | 4 | def add_statistics_indexes(): 5 | """Add indexes to optimize statistics queries""" 6 | conn = get_db_connection() 7 | try: 8 | cursor = conn.cursor() 9 | 10 | # Index for recently added items 11 | cursor.execute(""" 12 | CREATE INDEX IF NOT EXISTS idx_media_items_collected 13 | ON media_items ( 14 | type, 15 | state, 16 | collected_at DESC 17 | ) 18 | WHERE collected_at IS NOT NULL 19 | """) 20 | 21 | # Index for recently upgraded items 22 | cursor.execute(""" 23 | CREATE INDEX IF NOT EXISTS idx_media_items_upgraded 24 | ON media_items ( 25 | upgraded, 26 | last_updated DESC 27 | ) 28 | WHERE upgraded = 1 AND last_updated IS NOT NULL 29 | """) 30 | 31 | # Index for collection counts 32 | cursor.execute(""" 33 | CREATE INDEX IF NOT EXISTS idx_media_items_collected_counts 34 | ON media_items ( 35 | type, 36 | state, 37 | imdb_id 38 | ) 39 | WHERE state = 'Collected' 40 | """) 41 | 42 | conn.commit() 43 | #logging.info("Successfully added statistics indexes") 44 | 45 | except Exception as e: 46 | logging.error(f"Error adding statistics indexes: {str(e)}") 47 | conn.rollback() 48 | finally: 49 | conn.close() 50 | 51 | def remove_statistics_indexes(): 52 | """Remove statistics indexes if needed""" 53 | conn = get_db_connection() 54 | try: 55 | cursor = conn.cursor() 56 | 57 | indexes = [ 58 | 'idx_media_items_collected', 59 | 'idx_media_items_upgraded', 60 | 'idx_media_items_collected_counts' 61 | ] 62 | 63 | for index in indexes: 64 | cursor.execute(f"DROP INDEX IF EXISTS {index}") 65 | 66 | conn.commit() 67 | #logging.info("Successfully removed statistics indexes") 68 | 69 | except Exception as e: 70 | logging.error(f"Error removing statistics indexes: {str(e)}") 71 | conn.rollback() 72 | finally: 73 | conn.close() -------------------------------------------------------------------------------- /database/poster_management.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from settings import get_setting 3 | import asyncio 4 | from aiohttp import ClientConnectorError, ServerTimeoutError, ClientResponseError 5 | 6 | async def get_poster_url(session, tmdb_id, media_type): 7 | from poster_cache import get_cached_poster_url, cache_poster_url, cache_unavailable_poster, UNAVAILABLE_POSTER 8 | 9 | # Log incoming parameters 10 | #logging.info(f"get_poster_url called with tmdb_id: {tmdb_id}, original media_type: {media_type}") 11 | 12 | # Normalize media type early 13 | normalized_type = 'tv' if media_type.lower() in ['tv', 'show', 'series'] else 'movie' 14 | #logging.info(f"Normalized media_type from '{media_type}' to '{normalized_type}'") 15 | 16 | # First check the cache using normalized type 17 | cached_url = get_cached_poster_url(tmdb_id, normalized_type) 18 | if cached_url: 19 | #logging.info(f"Cache hit for {tmdb_id}_{normalized_type}: {cached_url}") 20 | return cached_url 21 | 22 | if not tmdb_id: 23 | logging.warning("No TMDB ID provided") 24 | cache_unavailable_poster(tmdb_id, normalized_type) 25 | return UNAVAILABLE_POSTER 26 | 27 | tmdb_api_key = get_setting('TMDB', 'api_key', '') 28 | 29 | if not tmdb_api_key: 30 | logging.warning("TMDB API key is missing") 31 | cache_unavailable_poster(tmdb_id, normalized_type) 32 | return UNAVAILABLE_POSTER 33 | 34 | url = f"https://api.themoviedb.org/3/{normalized_type}/{tmdb_id}/images?api_key={tmdb_api_key}" 35 | logging.info(f"Fetching poster from TMDB API for {tmdb_id} as type '{normalized_type}'") 36 | 37 | try: 38 | async with session.get(url, timeout=10) as response: 39 | logging.info(f"TMDB API response status: {response.status} for {tmdb_id}_{normalized_type}") 40 | if response.status == 200: 41 | data = await response.json() 42 | posters = data.get('posters', []) 43 | 44 | if posters: 45 | # First try English posters 46 | english_posters = [p for p in posters if p.get('iso_639_1') == 'en'] 47 | poster = english_posters[0] if english_posters else posters[0] 48 | poster_url = f"https://image.tmdb.org/t/p/w300{poster['file_path']}" 49 | logging.info(f"Found poster for {tmdb_id}_{normalized_type}: {poster_url}") 50 | cache_poster_url(tmdb_id, normalized_type, poster_url) 51 | return poster_url 52 | 53 | logging.warning(f"No posters found for {normalized_type} with TMDB ID {tmdb_id}") 54 | cache_unavailable_poster(tmdb_id, normalized_type) 55 | return UNAVAILABLE_POSTER 56 | 57 | logging.error(f"TMDB API returned status {response.status} for {normalized_type} with TMDB ID {tmdb_id}") 58 | 59 | except Exception as e: 60 | logging.error(f"Error fetching poster URL for {normalized_type} with TMDB ID {tmdb_id}: {e}") 61 | 62 | cache_unavailable_poster(tmdb_id, normalized_type) 63 | return UNAVAILABLE_POSTER -------------------------------------------------------------------------------- /db_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import logging 4 | from datetime import datetime 5 | 6 | # Add the parent directory to the Python path 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | 9 | # Import the function we want to test 10 | from database.collected_items import add_collected_items 11 | 12 | # Set up logging 13 | logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') 14 | 15 | def create_sample_media_items(): 16 | return [ 17 | { 18 | 'title': 'Test Movie 1', 19 | 'imdb_id': 'tt1234567', 20 | 'tmdb_id': '1234', 21 | 'year': 2023, 22 | 'release_date': '2023-01-01', 23 | 'genres': ['Action', 'Sci-Fi'], 24 | 'location': '/path/to/test_movie_1.mp4', 25 | 'type': 'movie' 26 | }, 27 | { 28 | 'title': 'Test TV Show', 29 | 'imdb_id': 'tt7654321', 30 | 'tmdb_id': '5678', 31 | 'year': 2023, 32 | 'release_date': '2023-02-01', 33 | 'genres': ['Drama', 'Mystery'], 34 | 'season_number': 1, 35 | 'episode_number': 1, 36 | 'episode_title': 'Pilot', 37 | 'location': '/path/to/test_tv_show_s01e01.mp4', 38 | 'type': 'episode' 39 | }, 40 | { 41 | 'title': 'TMDB Only Movie', 42 | 'imdb_id': None, 43 | 'tmdb_id': '9876', 44 | 'year': 2023, 45 | 'release_date': '2023-03-01', 46 | 'genres': ['Comedy'], 47 | 'location': '/path/to/tmdb_only_movie.mp4', 48 | 'type': 'movie' 49 | } 50 | ] 51 | 52 | def test_add_collected_items(): 53 | logging.info("Starting test of add_collected_items function") 54 | 55 | # Create sample media items 56 | media_items = create_sample_media_items() 57 | 58 | try: 59 | # Call the function we're testing 60 | add_collected_items(media_items) 61 | logging.info("add_collected_items function completed successfully") 62 | except Exception as e: 63 | logging.error(f"Error occurred while testing add_collected_items: {str(e)}", exc_info=True) 64 | 65 | logging.info("Test completed") 66 | 67 | if __name__ == "__main__": 68 | test_add_collected_items() 69 | -------------------------------------------------------------------------------- /debrid/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from settings import get_setting, ensure_settings_file 3 | from .base import DebridProvider, TooManyDownloadsError, ProviderUnavailableError 4 | from .real_debrid import RealDebridProvider 5 | from .common import ( 6 | extract_hash_from_magnet, 7 | download_and_extract_hash, 8 | timed_lru_cache, 9 | torrent_to_magnet, 10 | is_video_file, 11 | is_unwanted_file 12 | ) 13 | 14 | _provider_instance: Optional[DebridProvider] = None 15 | 16 | def get_debrid_provider() -> DebridProvider: 17 | """ 18 | Factory function that returns the configured debrid provider instance. 19 | Uses singleton pattern to maintain one instance per provider. 20 | """ 21 | global _provider_instance 22 | 23 | if _provider_instance is not None: 24 | return _provider_instance 25 | 26 | # Ensure settings file exists and is properly initialized 27 | ensure_settings_file() 28 | 29 | provider_name = get_setting("Debrid Provider", "provider", "").lower() 30 | 31 | if provider_name == 'realdebrid': 32 | _provider_instance = RealDebridProvider() 33 | else: 34 | raise ValueError(f"Unknown debrid provider: {provider_name}") 35 | 36 | return _provider_instance 37 | 38 | def reset_provider() -> None: 39 | """Reset the debrid provider instance, forcing it to be reinitialized on next use.""" 40 | global _provider_instance 41 | _provider_instance = None 42 | 43 | # Export public interface 44 | __all__ = [ 45 | 'get_debrid_provider', 46 | 'reset_provider', 47 | 'DebridProvider', 48 | 'TooManyDownloadsError', 49 | 'ProviderUnavailableError', 50 | 'RealDebridProvider', 51 | 'extract_hash_from_magnet', 52 | 'download_and_extract_hash', 53 | 'timed_lru_cache', 54 | 'torrent_to_magnet', 55 | 'is_video_file', 56 | 'is_unwanted_file' 57 | ] 58 | -------------------------------------------------------------------------------- /debrid/common/__init__.py: -------------------------------------------------------------------------------- 1 | from .torrent import ( 2 | torrent_to_magnet, 3 | download_and_extract_hash, 4 | download_and_convert_to_magnet, 5 | extract_hash_from_file 6 | ) 7 | from .utils import ( 8 | extract_hash_from_magnet, 9 | is_video_file, 10 | is_unwanted_file 11 | ) 12 | from .cache import timed_lru_cache 13 | from .api import RateLimiter 14 | 15 | __all__ = [ 16 | 'torrent_to_magnet', 17 | 'download_and_extract_hash', 18 | 'download_and_convert_to_magnet', 19 | 'extract_hash_from_magnet', 20 | 'extract_hash_from_file', 21 | 'RateLimiter', 22 | 'timed_lru_cache', 23 | 'is_video_file', 24 | 'is_unwanted_file' 25 | ] 26 | -------------------------------------------------------------------------------- /debrid/common/api.py: -------------------------------------------------------------------------------- 1 | import time 2 | from functools import wraps 3 | from typing import Callable 4 | 5 | class RateLimiter: 6 | """Rate limiter for API calls""" 7 | def __init__(self, calls_per_second: float = 1): 8 | self.calls_per_second = calls_per_second 9 | self.last_call = 0 10 | 11 | def wait(self): 12 | current_time = time.time() 13 | time_since_last_call = current_time - self.last_call 14 | if time_since_last_call < 1 / self.calls_per_second: 15 | time.sleep((1 / self.calls_per_second) - time_since_last_call) 16 | self.last_call = time.time() 17 | 18 | def rate_limited_request(func: Callable) -> Callable: 19 | """Decorator to rate limit API requests""" 20 | rate_limiter = RateLimiter(calls_per_second=0.5) 21 | 22 | @wraps(func) 23 | def wrapper(*args, **kwargs): 24 | rate_limiter.wait() 25 | return func(*args, **kwargs) 26 | return wrapper 27 | -------------------------------------------------------------------------------- /debrid/common/c7d9e45f.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import base64 3 | from cryptography.fernet import Fernet 4 | 5 | class _c: 6 | """Internal descriptor for capability protection""" 7 | 8 | def __init__(self, n: str): 9 | self.n = n 10 | self.c = f"_{hashlib.sha256(n.encode()).hexdigest()[:8]}" 11 | 12 | def __get__(self, i, o=None): 13 | if i is None: 14 | return self 15 | if not hasattr(i, self.c): 16 | v = i._get_capability_value(self.n) 17 | i.__dict__[self.c] = v 18 | return i.__dict__[self.c] 19 | 20 | def __set__(self, i, v): 21 | raise AttributeError("x") 22 | 23 | # Encrypted capability values 24 | _v = { 25 | 'RealDebridProvider': { 26 | 'direct_cache': b'gAAAAABnfA_Klql2nrRmlghouiZSvCczXQj2icYQtzF9MkIsyZtJnKzIwB-LUz8kOFMD1VwmKvTgLrkt6fZMujlqg1ahbzBjKQ==', 27 | 'bulk_cache': b'gAAAAABnfA_K-1UH1Ca0rAeyoJqM-QN4HVMoxSQcl1oRYbXQ6H8g-IpXVyPW6EJpyJaoeW6-igDVetjl32gncwXJupsR7PJRgA==', 28 | 'supports_uncached': b'gAAAAABnfA_Kt_BI1l3hWU4xwCXwuY3owcWYzkHHQVo5cs6QcC7e3Q3T4H8aqwDFDzlckIsddvJaQdbHG2G8mfPZQC0YFIS8NA==' 29 | } 30 | } 31 | 32 | # Property descriptors 33 | _p1 = _c('direct_cache') 34 | _p2 = _c('bulk_cache') 35 | _p3 = _c('supports_uncached') -------------------------------------------------------------------------------- /debrid/common/cache.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from functools import wraps 3 | from typing import Any, Callable 4 | import logging 5 | 6 | def timed_lru_cache(seconds: int, maxsize: int = 128): 7 | """ 8 | Decorator that provides a timed LRU cache. 9 | Cache entries expire after the specified number of seconds. 10 | """ 11 | def wrapper_cache(func: Callable) -> Callable: 12 | cache = {} 13 | expiration_times = {} 14 | lifetime = timedelta(seconds=seconds) 15 | 16 | @wraps(func) 17 | def wrapped_func(*args, **kwargs) -> Any: 18 | key = str(args) + str(kwargs) 19 | now = datetime.utcnow() 20 | 21 | # Check if entry exists and is still valid 22 | if key in cache: 23 | time_until_expiry = expiration_times[key] - now 24 | if now < expiration_times[key]: 25 | #logging.debug(f"Cache hit for {func.__name__} with key {key}. Expires in {time_until_expiry.total_seconds():.1f} seconds") 26 | return cache[key] 27 | 28 | result = func(*args, **kwargs) 29 | cache[key] = result 30 | expiration_times[key] = now + lifetime 31 | #logging.debug(f"Cached new value for {func.__name__} with key {key}. Will expire in {seconds} seconds") 32 | 33 | # Implement LRU by removing oldest entries if we exceed maxsize 34 | if len(cache) > maxsize: 35 | oldest_key = min(expiration_times, key=expiration_times.get) 36 | #logging.debug(f"Cache size exceeded {maxsize}. Removing oldest entry with key {oldest_key}") 37 | del cache[oldest_key] 38 | del expiration_times[oldest_key] 39 | 40 | return result 41 | 42 | return wrapped_func 43 | 44 | return wrapper_cache 45 | -------------------------------------------------------------------------------- /debrid/common/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import logging 3 | from typing import List, Union, Dict, Tuple 4 | 5 | # Common video file extensions 6 | VIDEO_EXTENSIONS = [ 7 | 'mp4', 'mkv', 'avi', 'mov', 'wmv', 'flv', 'm4v', 'webm', 'mpg', 'mpeg', 'm2ts', 'ts' 8 | ] 9 | 10 | def is_video_file(filename: str) -> bool: 11 | """Check if a file is a video file based on its extension""" 12 | result = any(filename.lower().endswith(f'.{ext}') for ext in VIDEO_EXTENSIONS) 13 | #logging.info(f"is_video_file check for {filename}: {result}") 14 | return result 15 | 16 | def is_unwanted_file(filename: str) -> bool: 17 | """Check if a file is unwanted (e.g., sample files)""" 18 | result = 'sample' in filename.lower() 19 | #logging.info(f"is_unwanted_file check for {filename}: {result}") 20 | return result 21 | 22 | def extract_hash_from_magnet(magnet_link: str) -> str: 23 | """Extract hash from magnet link or download and extract from HTTP link.""" 24 | try: 25 | # If it's an HTTP link, download and extract hash 26 | if magnet_link.startswith('http'): 27 | from debrid.common import download_and_extract_hash 28 | return download_and_extract_hash(magnet_link) 29 | 30 | # For magnet links, extract hash directly 31 | if not magnet_link.startswith('magnet:'): 32 | raise ValueError("Invalid magnet link format") 33 | 34 | # Extract hash from magnet link 35 | hash_match = re.search(r'btih:([a-fA-F0-9]{40})', magnet_link) 36 | if not hash_match: 37 | raise ValueError("Could not find valid hash in magnet link") 38 | 39 | return hash_match.group(1).lower() 40 | except Exception as e: 41 | logging.error(f"Error extracting hash: {str(e)}") 42 | raise ValueError("Invalid magnet link format") 43 | 44 | def is_valid_hash(hash_string: str) -> bool: 45 | """Check if a string is a valid hash""" 46 | return bool(re.match(r'^[a-fA-F0-9]{40}$', hash_string)) 47 | 48 | def process_hashes(hashes: Union[str, List[str]], batch_size: int = 100) -> List[str]: 49 | """Process and validate a list of hashes""" 50 | if isinstance(hashes, str): 51 | hashes = [hashes] 52 | 53 | # Remove duplicates and invalid hashes 54 | return list(set(h.lower() for h in hashes if is_valid_hash(h))) 55 | 56 | def format_torrent_status(active_torrents: List[Dict], download_stats: Tuple[int, int]) -> str: 57 | """ 58 | Format torrent status information into a human-readable string. 59 | Shows both active downloads and recently completed downloads. 60 | 61 | Args: 62 | active_torrents: List of dictionaries containing torrent information 63 | download_stats: Tuple of (active_count, max_downloads) 64 | 65 | Returns: 66 | Formatted string containing torrent status information 67 | """ 68 | active_count, max_downloads = download_stats 69 | status_lines = [f"Active Downloads: {active_count}/{max_downloads}"] 70 | 71 | # Split torrents into active and completed 72 | downloading_torrents = [] 73 | completed_torrents = [] 74 | 75 | for torrent in active_torrents: 76 | if torrent.get('progress', 0) == 100 and torrent.get('status', '').lower() == 'downloaded': 77 | completed_torrents.append(torrent) 78 | else: 79 | downloading_torrents.append(torrent) 80 | 81 | # Show active downloads 82 | if not downloading_torrents: 83 | status_lines.append("\nNo active downloads") 84 | else: 85 | status_lines.append("\nActive Downloads:") 86 | for torrent in downloading_torrents: 87 | filename = torrent.get('filename', 'Unknown') 88 | progress = torrent.get('progress', 0) 89 | status = torrent.get('status', 'unknown') 90 | status_lines.append(f"- {filename}") 91 | status_lines.append(f" Progress: {progress}%, Status: {status}") 92 | 93 | # Show completed downloads 94 | if completed_torrents: 95 | status_lines.append("\nRecently Completed:") 96 | for torrent in completed_torrents: 97 | filename = torrent.get('filename', 'Unknown') 98 | status_lines.append(f"- {filename}") 99 | 100 | return "\n".join(status_lines) 101 | -------------------------------------------------------------------------------- /debrid/generate_encrypted_values.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | from cryptography.fernet import Fernet 4 | 5 | def generate_encrypted_values(): 6 | providers = { 7 | 'RealDebridProvider': { 8 | 'direct_cache': False, 9 | 'bulk_cache': False, 10 | 'supports_uncached': True 11 | } 12 | } 13 | 14 | for provider_name, capabilities in providers.items(): 15 | print(f"\n{provider_name}:") 16 | # Generate key from provider name 17 | key_base = provider_name.encode() + b'debrid_capabilities_key' 18 | key = base64.urlsafe_b64encode(hashlib.sha256(key_base).digest()[:32]) 19 | cipher = Fernet(key) 20 | 21 | for capability, value in capabilities.items(): 22 | encrypted = cipher.encrypt(str(value).encode()) 23 | print(f"'{capability}': {encrypted},") 24 | 25 | if __name__ == '__main__': 26 | generate_encrypted_values() -------------------------------------------------------------------------------- /debrid/real_debrid/__init__.py: -------------------------------------------------------------------------------- 1 | from .client import RealDebridProvider 2 | 3 | __all__ = ['RealDebridProvider'] 4 | -------------------------------------------------------------------------------- /debrid/real_debrid/exceptions.py: -------------------------------------------------------------------------------- 1 | from ..base import DebridProviderError 2 | 3 | class RealDebridError(DebridProviderError): 4 | """Base exception class for Real-Debrid specific errors""" 5 | pass 6 | 7 | class RealDebridAPIError(RealDebridError): 8 | """Exception raised when the Real-Debrid API returns an error""" 9 | pass 10 | 11 | class RealDebridAuthError(RealDebridError): 12 | """Exception raised when there are authentication issues""" 13 | pass 14 | -------------------------------------------------------------------------------- /debrid/real_debrid/torrent.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime, timedelta 3 | from typing import Dict, List, Optional, Union, Tuple 4 | import bencodepy 5 | import hashlib 6 | import tempfile 7 | import os 8 | from ..common.utils import extract_hash_from_magnet, is_valid_hash, process_hashes 9 | from .api import make_request 10 | from ..status import TorrentStatus, get_status_flags 11 | 12 | def process_hashes(hashes: Union[str, List[str]], batch_size: int = 100) -> List[str]: 13 | """Process and validate a list of hashes""" 14 | if isinstance(hashes, str): 15 | hashes = [hashes] 16 | 17 | # Remove duplicates and invalid hashes 18 | return list(set(h.lower() for h in hashes if is_valid_hash(h))) 19 | 20 | def get_torrent_info(api_key: str, torrent_id: str) -> Dict: 21 | """Get detailed information about a torrent""" 22 | return make_request('GET', f'/torrents/info/{torrent_id}', api_key) 23 | 24 | def add_torrent(api_key: str, magnet_link: str, temp_file_path: Optional[str] = None) -> Dict: 25 | """Add a torrent to Real-Debrid and return the full response""" 26 | if magnet_link.startswith('magnet:'): 27 | # Add magnet link 28 | data = {'magnet': magnet_link} 29 | result = make_request('POST', '/torrents/addMagnet', api_key, data=data) 30 | else: 31 | # Add torrent file 32 | if not temp_file_path: 33 | raise ValueError("Temp file path required for torrent file upload") 34 | 35 | with open(temp_file_path, 'rb') as f: 36 | files = {'file': f} 37 | result = make_request('PUT', '/torrents/addTorrent', api_key, files=files) 38 | 39 | return result 40 | 41 | def select_files(api_key: str, torrent_id: str, file_ids: List[int]) -> None: 42 | """Select specific files from a torrent""" 43 | data = {'files': ','.join(map(str, file_ids))} 44 | make_request('POST', f'/torrents/selectFiles/{torrent_id}', api_key, data=data) 45 | 46 | def get_torrent_files(api_key: str, hash_value: str) -> List[Dict]: 47 | """Get list of files in a torrent""" 48 | try: 49 | availability = make_request('GET', f'/torrents/instantAvailability/{hash_value}', api_key) 50 | if not availability or hash_value not in availability: 51 | return [] 52 | 53 | rd_data = availability[hash_value].get('rd', []) 54 | if not rd_data: 55 | return [] 56 | 57 | files = [] 58 | for data in rd_data: 59 | if not data: 60 | continue 61 | for file_id, file_info in data.items(): 62 | files.append({ 63 | 'id': file_id, 64 | 'filename': file_info.get('filename', ''), 65 | 'size': file_info.get('filesize', 0) 66 | }) 67 | 68 | return files 69 | except Exception as e: 70 | logging.error(f"Error getting torrent files for hash {hash_value}: {str(e)}") 71 | return [] 72 | 73 | def remove_torrent(api_key: str, torrent_id: str) -> None: 74 | """Remove a torrent from Real-Debrid""" 75 | logging.error(f"Removing torrent {torrent_id} - THIS FUNCTION IS DEPRECATED AND SHOULD NOT BE CALLED") 76 | make_request('DELETE', f'/torrents/delete/{torrent_id}', api_key) 77 | 78 | def list_active_torrents(api_key: str) -> List[Dict]: 79 | """List all active torrents""" 80 | return make_request('GET', '/torrents', api_key) 81 | 82 | def cleanup_stale_torrents(api_key: str) -> None: 83 | """Remove stale torrents that are older than 24 hours""" 84 | try: 85 | torrents = list_active_torrents(api_key) 86 | for torrent in torrents: 87 | added_date = datetime.fromtimestamp(torrent['added']) 88 | if datetime.now() - added_date > timedelta(hours=24): 89 | try: 90 | remove_torrent(api_key, torrent['id']) 91 | logging.info(f"Removed stale torrent {torrent['id']}") 92 | except Exception as e: 93 | logging.error(f"Error removing stale torrent {torrent['id']}: {str(e)}") 94 | except Exception as e: 95 | logging.error(f"Error during stale torrent cleanup: {str(e)}") 96 | -------------------------------------------------------------------------------- /debrid/status.py: -------------------------------------------------------------------------------- 1 | """Common status flags and utilities for debrid providers""" 2 | 3 | from enum import Enum 4 | from typing import Dict 5 | 6 | class TorrentStatus(Enum): 7 | QUEUED = 'queued' 8 | DOWNLOADING = 'downloading' 9 | DOWNLOADED = 'downloaded' # Cached 10 | ERROR = 'error' 11 | UNKNOWN = 'unknown' 12 | SELECTING = 'selecting' 13 | REMOVED = 'removed' 14 | ADDED = 'added' 15 | CACHED = 'cached' 16 | NOT_CACHED = 'not_cached' 17 | 18 | def get_status_flags(status: str) -> Dict[str, bool]: 19 | """Convert a status string to standardized status flags""" 20 | is_cached = status == TorrentStatus.DOWNLOADED.value 21 | is_queued = status in [TorrentStatus.QUEUED.value, TorrentStatus.DOWNLOADING.value] 22 | is_error = status == TorrentStatus.ERROR.value 23 | 24 | return { 25 | 'is_cached': is_cached, 26 | 'is_queued': is_queued, 27 | 'is_error': is_error 28 | } 29 | -------------------------------------------------------------------------------- /debug_episode.py: -------------------------------------------------------------------------------- 1 | from cli_battery.app.direct_api import DirectAPI 2 | import logging 3 | 4 | logging.basicConfig(level=logging.DEBUG) 5 | 6 | show_imdb = "tt5594440" # People Magazine Investigates 7 | season = 8 8 | episode = 10 9 | 10 | # Try to get episode metadata directly 11 | print("\nTrying to get show metadata to find episode IMDb ID:") 12 | show_metadata, source = DirectAPI.get_show_metadata(show_imdb) 13 | if show_metadata: 14 | print(f"Source: {source}") 15 | print(f"Show metadata: {show_metadata}") 16 | 17 | # Look for season 8 episode 10 18 | seasons = show_metadata.get('seasons', {}) 19 | if str(season) in seasons: 20 | season_data = seasons[str(season)] 21 | print(f"\nSeason {season} data:") 22 | for ep in season_data.get('episodes', {}).values(): 23 | if ep.get('episode_number') == episode: 24 | print(f"Found episode {episode}:") 25 | print(ep) 26 | if 'imdb_id' in ep: 27 | print(f"\nTrying to get episode metadata for {ep['imdb_id']}:") 28 | episode_metadata, source = DirectAPI.get_episode_metadata(ep['imdb_id']) 29 | print(f"Source: {source}") 30 | print(f"Episode metadata: {episode_metadata}") 31 | break 32 | -------------------------------------------------------------------------------- /debug_metadata.py: -------------------------------------------------------------------------------- 1 | from cli_battery.app.trakt_metadata import TraktMetadata 2 | import logging 3 | 4 | logging.basicConfig(level=logging.INFO) 5 | 6 | # Initialize Trakt 7 | trakt = TraktMetadata() 8 | 9 | # Get fresh metadata 10 | imdb_id = "tt9253284" 11 | metadata = trakt.get_show_metadata(imdb_id) 12 | print(f"\nFetched metadata:") 13 | print(metadata) 14 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | cli_debrid: 3 | image: godver3/cli_debrid:dev 4 | pull_policy: always 5 | container_name: cli_debrid 6 | ports: 7 | - "5000:5000" 8 | - "5001:5001" 9 | volumes: 10 | - /host/location/db_content:/user/db_content 11 | - /host/location/config:/user/config 12 | - /host/location/logs:/user/logs 13 | - /media/mount:/media/mount # optional - used for symlinking - must match the mount point that Plex uses 14 | environment: 15 | - TZ=America/Edmonton 16 | restart: unless-stopped 17 | tty: true 18 | stdin_open: true 19 | -------------------------------------------------------------------------------- /docs/real_debrid.md: -------------------------------------------------------------------------------- 1 | # Real-Debrid Module Documentation 2 | 3 | This document provides documentation for the Real-Debrid module functionality and its API integration. 4 | 5 | ## Utility Functions 6 | 7 | ### `get_api_key()` 8 | - **Description**: Retrieves the Real-Debrid API key from settings 9 | - **Returns**: String containing the API key 10 | - **Raises**: ValueError if API key is not found in settings 11 | 12 | ### `timed_lru_cache(seconds: int, maxsize: int = 128)` 13 | - **Description**: Decorator that provides a time-based LRU cache 14 | - **Parameters**: 15 | - `seconds`: Cache lifetime in seconds 16 | - `maxsize`: Maximum size of the cache (default: 128) 17 | - **Returns**: Cached function result if within time limit 18 | 19 | ### `is_video_file(filename)` 20 | - **Description**: Checks if a file is a video based on its extension 21 | - **Parameters**: `filename` - Name of the file to check 22 | - **Returns**: Boolean indicating if file is a video 23 | 24 | ### `is_unwanted_file(filename)` 25 | - **Description**: Checks if a file is unwanted (e.g., sample files) 26 | - **Parameters**: `filename` - Name of the file to check 27 | - **Returns**: Boolean indicating if file is unwanted 28 | 29 | ### `extract_hash_from_magnet(magnet_link)` 30 | - **Description**: Extracts hash from a magnet link 31 | - **Parameters**: `magnet_link` - Magnet URL string 32 | - **Returns**: Hash string from magnet link 33 | 34 | ### `is_cached_on_rd(hashes)` 35 | - **Description**: Checks if hash(es) are cached on Real-Debrid 36 | - **Parameters**: `hashes` - Single hash string or list of hashes 37 | - **Returns**: Dictionary mapping hashes to their cache status 38 | 39 | ### `get_cached_files(hash_)` 40 | - **Description**: Retrieves cached files for a specific hash 41 | - **Parameters**: `hash_` - Hash string to check 42 | - **Returns**: List of cached files 43 | 44 | ### `get_active_downloads(check=False)` 45 | - **Description**: Gets number of active downloads and concurrent download limit 46 | - **Parameters**: `check` - Whether to check against limits 47 | - **Returns**: Tuple of (active downloads, download limit) 48 | - **Raises**: RealDebridTooManyDownloadsError if limits exceeded 49 | 50 | ### `get_user_limits()` 51 | - **Description**: Retrieves user account limits from Real-Debrid API 52 | - **Returns**: Dictionary containing user limits 53 | 54 | ### `check_daily_usage()` 55 | - **Description**: Checks daily API usage statistics 56 | - **Returns**: Dictionary containing usage statistics 57 | 58 | ### `get_user_traffic()` 59 | - **Description**: Retrieves user traffic information 60 | - **Returns**: Dictionary containing traffic data 61 | 62 | ## Main Class: RealDebridProvider 63 | 64 | ### Methods 65 | 66 | #### `__init__(self)` 67 | - **Description**: Initializes RealDebrid provider with rate limiting 68 | - **Fields**: 69 | - `API_BASE_URL`: Base URL for Real-Debrid API 70 | - `MAX_DOWNLOADS`: Maximum concurrent downloads (25) 71 | 72 | #### `add_torrent(self, magnet_link, temp_file_path=None)` 73 | - **Description**: Adds a torrent/magnet to Real-Debrid 74 | - **Parameters**: 75 | - `magnet_link`: Magnet URL or hash 76 | - `temp_file_path`: Optional path to torrent file 77 | - **Returns**: Torrent ID if successful, None if failed 78 | 79 | #### `list_torrents(self)` 80 | - **Description**: Lists all torrents in Real-Debrid account 81 | - **Returns**: List of torrent information 82 | 83 | #### `get_torrent_info(self, torrent_id: str)` 84 | - **Description**: Gets detailed information about a specific torrent 85 | - **Parameters**: `torrent_id` - ID of the torrent 86 | - **Returns**: Dictionary containing torrent information 87 | 88 | #### `remove_torrent(self, torrent_id: str)` 89 | - **Description**: Removes a torrent from Real-Debrid 90 | - **Parameters**: `torrent_id` - ID of the torrent to remove 91 | - **Returns**: Boolean indicating success 92 | 93 | ## Error Classes 94 | 95 | ### `RealDebridUnavailableError` 96 | - Raised when Real-Debrid service is unavailable 97 | 98 | ### `RealDebridTooManyDownloadsError` 99 | - Raised when download limits are exceeded 100 | 101 | ## Rate Limiting 102 | 103 | The module implements rate limiting to prevent API abuse: 104 | - Default rate: 0.5 calls per second 105 | - Automatic retry mechanism for failed requests 106 | - Exponential backoff for retries 107 | 108 | ## Caching 109 | 110 | The module implements several caching mechanisms: 111 | - LRU cache with time expiration for API responses 112 | - In-memory cache for frequently accessed data 113 | - Cached responses for hash checks and file information 114 | -------------------------------------------------------------------------------- /find_imports.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ast 3 | import importlib.util 4 | 5 | def get_python_files(directory): 6 | for root, dirs, files in os.walk(directory): 7 | for file in files: 8 | if file.endswith('.py'): 9 | yield os.path.join(root, file) 10 | 11 | def get_imports(file_path): 12 | with open(file_path, 'r', encoding='utf-8') as file: 13 | try: 14 | tree = ast.parse(file.read()) 15 | except SyntaxError: 16 | print(f"Syntax error in {file_path}") 17 | return [] 18 | 19 | imports = set() 20 | for node in ast.walk(tree): 21 | if isinstance(node, ast.Import): 22 | for alias in node.names: 23 | imports.add(alias.name) 24 | elif isinstance(node, ast.ImportFrom): 25 | module = node.module if node.module else '' 26 | for alias in node.names: 27 | imports.add(f"{module}.{alias.name}") 28 | 29 | return imports 30 | 31 | def is_local_module(module_name, base_path): 32 | parts = module_name.split('.') 33 | current_path = base_path 34 | for part in parts: 35 | current_path = os.path.join(current_path, part) 36 | if os.path.isfile(current_path + '.py') or os.path.isdir(current_path): 37 | continue 38 | return False 39 | return True 40 | 41 | def main(): 42 | base_path = os.path.dirname(os.path.abspath(__file__)) # Assumes this script is in the project root 43 | all_imports = set() 44 | 45 | for file_path in get_python_files(base_path): 46 | all_imports.update(get_imports(file_path)) 47 | 48 | local_imports = {imp for imp in all_imports if is_local_module(imp, base_path)} 49 | 50 | print("Potential hidden imports:") 51 | for imp in sorted(local_imports): 52 | print(f"--hidden-import={imp}") 53 | 54 | if __name__ == "__main__": 55 | main() -------------------------------------------------------------------------------- /hooks/hook-PTT.py: -------------------------------------------------------------------------------- 1 | """PyInstaller hook for PTT package.""" 2 | from PyInstaller.utils.hooks import collect_data_files 3 | 4 | # Collect all data files from PTT package 5 | datas = collect_data_files('PTT', include_py_files=False) 6 | -------------------------------------------------------------------------------- /hooks/hook-aiohttp.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('aiohttp') -------------------------------------------------------------------------------- /hooks/hook-babelfish.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('babelfish') 4 | datas = collect_data_files('babelfish') -------------------------------------------------------------------------------- /hooks/hook-beautifulsoup4.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('bs4') -------------------------------------------------------------------------------- /hooks/hook-bencode.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('bencode') -------------------------------------------------------------------------------- /hooks/hook-colorlog.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('colorlog') -------------------------------------------------------------------------------- /hooks/hook-database.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | # Collect all submodules 4 | hiddenimports = collect_submodules('database') 5 | 6 | # Collect all data files (if any) 7 | datas = collect_data_files('database') -------------------------------------------------------------------------------- /hooks/hook-flask.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('flask') 4 | hiddenimports += [ 5 | 'flask.json', 6 | 'flask.json.tag', 7 | 'flask.cli', 8 | 'flask.helpers', 9 | 'flask.app', 10 | 'flask.blueprints', 11 | ] 12 | datas = collect_data_files('flask') -------------------------------------------------------------------------------- /hooks/hook-grpcio.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('grpc') -------------------------------------------------------------------------------- /hooks/hook-guessit.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('guessit') 4 | datas = collect_data_files('guessit') -------------------------------------------------------------------------------- /hooks/hook-multiprocessing.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | # Collect all submodules of multiprocessing 4 | hiddenimports = collect_submodules('multiprocessing') 5 | 6 | # Add Windows-specific modules 7 | hiddenimports += [ 8 | 'multiprocessing.popen_spawn_win32', 9 | 'multiprocessing.synchronize', 10 | 'multiprocessing.heap', 11 | 'multiprocessing.queues', 12 | 'multiprocessing.connection', 13 | 'multiprocessing.context', 14 | 'multiprocessing.reduction', 15 | 'multiprocessing.resource_tracker', 16 | 'multiprocessing.spawn', 17 | 'multiprocessing.util', 18 | 'multiprocessing.forkserver', 19 | 'multiprocessing.process', 20 | 'multiprocessing.shared_memory', 21 | 'multiprocessing.dummy', 22 | ] 23 | 24 | # Collect data files 25 | datas = collect_data_files('multiprocessing') -------------------------------------------------------------------------------- /hooks/hook-nyaapy.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_all, collect_submodules 2 | 3 | datas, binaries, hiddenimports = collect_all('nyaapy') 4 | hiddenimports += collect_submodules('nyaapy') 5 | hiddenimports += ['lxml', 'requests'] # Add nyaapy's dependencies -------------------------------------------------------------------------------- /hooks/hook-parsedatetime.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('parsedatetime') -------------------------------------------------------------------------------- /hooks/hook-pillow.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('PIL') -------------------------------------------------------------------------------- /hooks/hook-plexapi.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('plexapi') -------------------------------------------------------------------------------- /hooks/hook-pykakasi.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('pykakasi') 4 | datas = collect_data_files('pykakasi') -------------------------------------------------------------------------------- /hooks/hook-pytrakt.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('trakt') -------------------------------------------------------------------------------- /hooks/hook-requests.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('requests') 4 | datas = collect_data_files('requests') -------------------------------------------------------------------------------- /hooks/hook-socket.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | # Collect all submodules of socket 4 | hiddenimports = collect_submodules('socket') 5 | 6 | # Add related modules 7 | hiddenimports += [ 8 | 'select', 9 | 'selectors', 10 | 'ssl', 11 | 'encodings.idna', 12 | 'encodings.utf_8', 13 | 'encodings.ascii', 14 | 'encodings.latin_1', 15 | ] -------------------------------------------------------------------------------- /hooks/hook-sqlalchemy.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('sqlalchemy') -------------------------------------------------------------------------------- /hooks/hook-supervisor.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('supervisor') -------------------------------------------------------------------------------- /hooks/hook-threading.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | # Collect all submodules of threading 4 | hiddenimports = collect_submodules('threading') 5 | 6 | # Add related modules 7 | hiddenimports += [ 8 | '_thread', 9 | 'queue', 10 | 'concurrent.futures', 11 | 'concurrent.futures.thread', 12 | 'concurrent.futures.process', 13 | ] -------------------------------------------------------------------------------- /hooks/hook-urllib3.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('urllib3') 4 | datas = collect_data_files('urllib3') -------------------------------------------------------------------------------- /hooks/hook-urwid.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules 2 | 3 | hiddenimports = collect_submodules('urwid') -------------------------------------------------------------------------------- /hooks/hook-werkzeug.py: -------------------------------------------------------------------------------- 1 | from PyInstaller.utils.hooks import collect_submodules, collect_data_files 2 | 3 | hiddenimports = collect_submodules('werkzeug') 4 | datas = collect_data_files('werkzeug') -------------------------------------------------------------------------------- /manual_blacklist.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import logging 4 | from typing import Dict 5 | 6 | # Get db_content directory from environment variable with fallback 7 | DB_CONTENT_DIR = os.environ.get('USER_DB_CONTENT', '/user/db_content') 8 | 9 | # Update the path to use the environment variable 10 | BLACKLIST_FILE = os.path.join(DB_CONTENT_DIR, 'manual_blacklist.json') 11 | 12 | def load_manual_blacklist(): 13 | os.makedirs(os.path.dirname(BLACKLIST_FILE), exist_ok=True) 14 | 15 | if not os.path.exists(BLACKLIST_FILE): 16 | return {} 17 | try: 18 | with open(BLACKLIST_FILE, 'r') as f: 19 | return json.load(f) 20 | except json.JSONDecodeError: 21 | logging.error(f"Error decoding {BLACKLIST_FILE}. Starting with empty blacklist.") 22 | return {} 23 | 24 | def save_manual_blacklist(blacklist): 25 | with open(BLACKLIST_FILE, 'w') as f: 26 | json.dump(blacklist, f) 27 | 28 | def add_to_manual_blacklist(imdb_id: str, media_type: str, title: str, year: str, season: int = None): 29 | blacklist = get_manual_blacklist() 30 | 31 | if season is not None and media_type == 'tv': 32 | # If this is the first season for this show 33 | if imdb_id not in blacklist: 34 | blacklist[imdb_id] = { 35 | 'media_type': 'tv', 36 | 'title': title, 37 | 'year': year, 38 | 'seasons': [season] 39 | } 40 | else: 41 | # Add season if not already blacklisted 42 | if 'seasons' not in blacklist[imdb_id]: 43 | blacklist[imdb_id]['seasons'] = [] 44 | if season not in blacklist[imdb_id]['seasons']: 45 | blacklist[imdb_id]['seasons'].append(season) 46 | blacklist[imdb_id]['seasons'].sort() 47 | else: 48 | # Regular blacklisting for movies or entire shows 49 | blacklist[imdb_id] = { 50 | 'media_type': media_type, 51 | 'title': title, 52 | 'year': year 53 | } 54 | if media_type == 'tv': 55 | blacklist[imdb_id]['seasons'] = [] # Empty list means all seasons 56 | 57 | save_manual_blacklist(blacklist) 58 | if season is not None: 59 | logging.info(f"Added {imdb_id}: {title} ({year}) Season {season} to manual blacklist") 60 | else: 61 | logging.info(f"Added {imdb_id}: {title} ({year}) to manual blacklist as {media_type}") 62 | 63 | def remove_from_manual_blacklist(imdb_id): 64 | blacklist = get_manual_blacklist() 65 | if imdb_id in blacklist: 66 | item = blacklist.pop(imdb_id) 67 | save_manual_blacklist(blacklist) 68 | logging.info(f"Removed {imdb_id}: {item['title']} ({item['year']}) from manual blacklist.") 69 | else: 70 | logging.warning(f"{imdb_id} not found in manual blacklist.") 71 | 72 | def is_blacklisted(imdb_id, season: int = None): 73 | blacklist = get_manual_blacklist() 74 | if imdb_id not in blacklist: 75 | return False 76 | 77 | item = blacklist[imdb_id] 78 | if item['media_type'] != 'tv': 79 | return True 80 | 81 | # If seasons is empty or doesn't exist, the entire show is blacklisted 82 | if 'seasons' not in item or not item['seasons']: 83 | return True 84 | 85 | # If season is None, we're checking the whole show 86 | if season is None: 87 | return False # Show isn't fully blacklisted if specific seasons are listed 88 | 89 | return season in item['seasons'] 90 | 91 | def get_manual_blacklist() -> Dict[str, Dict[str, str]]: 92 | try: 93 | with open(BLACKLIST_FILE, 'r') as f: 94 | return json.load(f) 95 | except FileNotFoundError: 96 | return {} -------------------------------------------------------------------------------- /queue_utils.py: -------------------------------------------------------------------------------- 1 | from queue_manager import QueueManager 2 | import logging 3 | 4 | queue_manager = QueueManager() 5 | 6 | def safe_process_queue(queue_name): 7 | try: 8 | getattr(queue_manager, f'process_{queue_name.lower()}')() 9 | # Update stats if needed 10 | except Exception as e: 11 | logging.error(f"Error processing {queue_name} queue: {str(e)}") 12 | # Update stats if needed -------------------------------------------------------------------------------- /queues/anime_matcher.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from guessit import guessit 3 | from typing import List, Dict, Any, Tuple 4 | 5 | class AnimeMatcher: 6 | def __init__(self, calculate_absolute_episode_func): 7 | logging.info("AnimeMatcher initialized") 8 | -------------------------------------------------------------------------------- /queues/mock_queue_manager.py: -------------------------------------------------------------------------------- 1 | """Mock queue manager for testing""" 2 | 3 | import logging 4 | from typing import Dict, Any, List 5 | from database import ( 6 | update_media_item_state, 7 | get_all_media_items 8 | ) 9 | 10 | class MockQueueManager: 11 | """Minimal queue manager for testing""" 12 | 13 | def move_to_checking(self, item: Dict[str, Any], from_queue: str, title: str, link: str, filled_by_file: str, torrent_id: str = None): 14 | """Move an item to checking state""" 15 | item_id = item['id'] 16 | logging.info(f"Moving item {item_id} to checking state") 17 | update_media_item_state( 18 | item_id, 19 | 'Checking', 20 | filled_by_title=title, 21 | filled_by_magnet=link, 22 | filled_by_file=filled_by_file, 23 | filled_by_torrent_id=torrent_id 24 | ) 25 | 26 | def get_scraping_items(self) -> List[Dict]: 27 | """Get all items in Scraping state""" 28 | return [dict(row) for row in get_all_media_items(state="Scraping")] 29 | -------------------------------------------------------------------------------- /rclone/config/rclone.conf: -------------------------------------------------------------------------------- 1 | [zurg] 2 | type = webdav 3 | url = http://debrid_zurg:9999/dav/ 4 | vendor = other 5 | pacer_min_sleep = 0 6 | -------------------------------------------------------------------------------- /requirements-linux.txt: -------------------------------------------------------------------------------- 1 | aiohttp>=3.9,<4.0 2 | babelfish==0.6.1 3 | beautifulsoup4==4.12.3 4 | bencode.py==4.0.0 5 | bencodepy==0.9.5 6 | dogpile.cache>=0.9.2 7 | flask_session==0.8.0 8 | Flask==3.0.3 9 | Flask_Cors==5.0.0 10 | Flask_Login==0.6.3 11 | flask_sqlalchemy==3.1.1 12 | Flask-Limiter==3.5.0 13 | fuzzywuzzy==0.18.0 14 | grpcio==1.66.1 15 | guessit==3.8.0 16 | MarkupSafe==2.1.5 17 | protobuf==5.28.1 18 | pykakasi==2.3.0 19 | requests==2.32.3 20 | SQLAlchemy==2.0.31 21 | subliminal>=2.1.0 22 | tenacity==9.0.0 23 | urwid==2.6.15 24 | Werkzeug==3.0.4 25 | pytrakt==3.4.32 26 | plexapi==4.15.15 27 | colorlog==6.8.2 28 | iso8601==2.1.0 29 | Pillow==10.4.0 30 | supervisor==4.2.5 31 | ntplib==0.4.0 32 | parsett==1.5.6 33 | pytz==2024.1 34 | psutil==6.0.0 35 | urllib3==1.26.15 36 | idna==3.3 37 | charset-normalizer==2.0.4 38 | certifi==2023.5.7 39 | python-Levenshtein 40 | appdirs==1.4.4 41 | pystray==0.19.5 42 | tzlocal==5.2 43 | APScheduler==3.10.4 44 | PyGithub==2.1.1 45 | nyaapy==0.7 46 | tld>=0.13 47 | feedparser==6.0.11 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp>=3.9,<4.0 2 | babelfish==0.6.1 3 | beautifulsoup4==4.12.3 4 | bencode.py==4.0.0 5 | bencodepy==0.9.5 6 | dogpile.cache>=0.9.2 7 | flask_session==0.8.0 8 | Flask==3.0.3 9 | Flask_Cors==5.0.0 10 | Flask_Login==0.6.3 11 | flask_sqlalchemy==3.1.1 12 | Flask-Limiter==3.5.0 13 | fuzzywuzzy==0.18.0 14 | grpcio==1.66.1 15 | guessit==3.8.0 16 | MarkupSafe==2.1.5 17 | protobuf==5.28.1 18 | pykakasi==2.3.0 19 | requests==2.32.3 20 | SQLAlchemy==2.0.31 21 | subliminal>=2.1.0 22 | tenacity==9.0.0 23 | urwid==2.6.15 24 | Werkzeug==3.0.4 25 | pytrakt==3.4.32 26 | plexapi==4.15.15 27 | colorlog==6.8.2 28 | iso8601==2.1.0 29 | Pillow==10.4.0 30 | supervisor==4.2.5 31 | ntplib==0.4.0 32 | parsett==1.5.6 33 | pytz==2024.1 34 | psutil==6.0.0 35 | urllib3==1.26.15 36 | idna==3.3 37 | charset-normalizer==2.0.4 38 | certifi==2023.5.7 39 | python-Levenshtein 40 | appdirs==1.4.4 41 | pystray==0.19.5 42 | tzlocal==5.2 43 | pywin32==306 44 | APScheduler==3.10.4 45 | PyGithub==2.1.1 46 | nyaapy==0.7 47 | tld>=0.13 48 | feedparser==6.0.11 49 | -------------------------------------------------------------------------------- /reset_admin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Navigate to the project directory 4 | cd "$(dirname "$0")" 5 | 6 | # Run a Python script to reset the admin account 7 | python3 << END 8 | from web_server import app, db 9 | from routes.auth_routes import User 10 | from werkzeug.security import generate_password_hash 11 | 12 | with app.app_context(): 13 | # Delete all existing users 14 | User.query.delete() 15 | db.session.commit() 16 | 17 | # Create new admin user 18 | hashed_password = generate_password_hash('admin') 19 | new_admin = User(username='admin', password=hashed_password, role='admin', is_default=True) 20 | db.session.add(new_admin) 21 | db.session.commit() 22 | print("Admin account reset successfully.") 23 | END 24 | -------------------------------------------------------------------------------- /routes/cors_testing_routes.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint, jsonify, request, render_template, current_app 2 | import logging 3 | import time 4 | from flask_login import current_user 5 | 6 | cors_testing_bp = Blueprint('cors_testing', __name__, url_prefix='/cors_testing') 7 | 8 | @cors_testing_bp.route('/') 9 | def cors_test_page(): 10 | """Serve the CORS test page""" 11 | return render_template('cors_test.html') 12 | 13 | @cors_testing_bp.route('/cookie-check') 14 | def cookie_check(): 15 | """Check cookies and request details""" 16 | cookies = request.cookies 17 | cookie_list = [] 18 | 19 | for name, value in cookies.items(): 20 | cookie_list.append({ 21 | 'name': name, 22 | 'value': value[:20] + '...' if len(value) > 20 else value, 23 | 'secure': current_app.config['SESSION_COOKIE_SECURE'] if name == 'session' else None, 24 | 'httpOnly': current_app.config['SESSION_COOKIE_HTTPONLY'] if name == 'session' else None, 25 | 'sameSite': current_app.config['SESSION_COOKIE_SAMESITE'] if name == 'session' else None 26 | }) 27 | 28 | # Log cookie information 29 | logging.debug("\n=== Cookie Check ===") 30 | logging.debug(f"Cookies received: {len(cookie_list)}") 31 | for cookie in cookie_list: 32 | logging.debug(f" {cookie['name']}: {cookie['value']}") 33 | logging.debug(f" Secure: {cookie['secure']}") 34 | logging.debug(f" HttpOnly: {cookie['httpOnly']}") 35 | logging.debug(f" SameSite: {cookie['sameSite']}") 36 | 37 | # Get protocol information 38 | protocol = request.headers.get('X-Forwarded-Proto', request.scheme) 39 | 40 | # Send response with enhanced information 41 | response = { 42 | 'cookies_present': bool(cookie_list), 43 | 'cookie_count': len(cookie_list), 44 | 'cookies': cookie_list, 45 | 'request': { 46 | 'host': request.host, 47 | 'origin': request.headers.get('Origin'), 48 | 'protocol': protocol, 49 | 'user_agent': request.headers.get('User-Agent'), 50 | 'x_forwarded_proto': request.headers.get('X-Forwarded-Proto'), 51 | 'x_forwarded_for': request.headers.get('X-Forwarded-For') 52 | }, 53 | 'server': { 54 | 'time': time.strftime('%Y-%m-%d %H:%M:%S'), 55 | 'session_config': { 56 | 'cookie_secure': current_app.config['SESSION_COOKIE_SECURE'], 57 | 'cookie_httponly': current_app.config['SESSION_COOKIE_HTTPONLY'], 58 | 'cookie_samesite': current_app.config['SESSION_COOKIE_SAMESITE'], 59 | 'cookie_domain': current_app.config.get('SESSION_COOKIE_DOMAIN'), 60 | 'permanent': current_app.config['SESSION_PERMANENT'] 61 | }, 62 | 'user_authenticated': current_user.is_authenticated if not current_user.is_anonymous else False 63 | } 64 | } 65 | 66 | return jsonify(response) 67 | 68 | @cors_testing_bp.route('/test-post', methods=['POST']) 69 | def test_post(): 70 | """Test POST request handling""" 71 | response = { 72 | 'status': 'success', 73 | 'method': request.method, 74 | 'headers': dict(request.headers), 75 | 'form_data': dict(request.form), 76 | 'user_authenticated': current_user.is_authenticated if not current_user.is_anonymous else False 77 | } 78 | return jsonify(response) 79 | 80 | @cors_testing_bp.route('/test-options', methods=['OPTIONS']) 81 | def test_options(): 82 | """Test OPTIONS request handling""" 83 | response = jsonify({ 84 | 'status': 'success', 85 | 'allowed_methods': ['GET', 'POST', 'OPTIONS'], 86 | 'cors_enabled': True 87 | }) 88 | return response -------------------------------------------------------------------------------- /routes/library_management_routes.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint, render_template, jsonify, request 2 | from flask_login import login_required, current_user 3 | from functools import wraps 4 | import os 5 | from pathlib import Path 6 | from utilities.local_library_scan import scan_for_broken_symlinks, repair_broken_symlink 7 | 8 | library_management = Blueprint('library_management', __name__) 9 | 10 | def admin_required(f): 11 | @wraps(f) 12 | def decorated_function(*args, **kwargs): 13 | if not current_user.is_authenticated or current_user.role != 'admin': 14 | return jsonify({'error': 'Admin privileges required'}), 403 15 | return f(*args, **kwargs) 16 | return decorated_function 17 | 18 | @library_management.route('/library-management') 19 | @login_required 20 | @admin_required 21 | def manage_libraries(): 22 | return render_template('library_management.html') 23 | 24 | @library_management.route('/api/libraries', methods=['GET']) 25 | @login_required 26 | @admin_required 27 | def get_libraries(): 28 | # TODO: Implement logic to get all configured libraries 29 | libraries = [] # This will be populated with actual library data 30 | return jsonify(libraries) 31 | 32 | @library_management.route('/api/libraries', methods=['POST']) 33 | @login_required 34 | @admin_required 35 | def create_library(): 36 | data = request.get_json() 37 | # TODO: Implement library creation logic 38 | return jsonify({'message': 'Library created successfully'}) 39 | 40 | @library_management.route('/api/libraries/', methods=['PUT']) 41 | @login_required 42 | @admin_required 43 | def update_library(library_id): 44 | data = request.get_json() 45 | # TODO: Implement library update logic 46 | return jsonify({'message': 'Library updated successfully'}) 47 | 48 | @library_management.route('/api/libraries/', methods=['DELETE']) 49 | @login_required 50 | @admin_required 51 | def delete_library(library_id): 52 | # TODO: Implement library deletion logic 53 | return jsonify({'message': 'Library deleted successfully'}) 54 | 55 | @library_management.route('/api/libraries/verify', methods=['POST']) 56 | @login_required 57 | @admin_required 58 | def verify_library_path(): 59 | data = request.get_json() 60 | path = data.get('path') 61 | 62 | if not path: 63 | return jsonify({'error': 'Path is required'}), 400 64 | 65 | path_obj = Path(path) 66 | exists = path_obj.exists() 67 | is_dir = path_obj.is_dir() if exists else False 68 | is_symlink = path_obj.is_symlink() if exists else False 69 | 70 | return jsonify({ 71 | 'exists': exists, 72 | 'is_directory': is_dir, 73 | 'is_symlink': is_symlink, 74 | 'valid': exists and is_dir 75 | }) 76 | 77 | @library_management.route('/api/libraries/scan-broken', methods=['POST']) 78 | @login_required 79 | @admin_required 80 | def scan_broken_symlinks(): 81 | """Scan for broken symlinks in the library.""" 82 | data = request.get_json() 83 | library_path = data.get('path') if data else None 84 | 85 | results = scan_for_broken_symlinks(library_path) 86 | return jsonify(results) 87 | 88 | @library_management.route('/api/libraries/repair-symlink', methods=['POST']) 89 | @login_required 90 | @admin_required 91 | def repair_symlink(): 92 | """Attempt to repair a broken symlink.""" 93 | data = request.get_json() 94 | 95 | if not data or 'symlink_path' not in data: 96 | return jsonify({'error': 'Symlink path is required'}), 400 97 | 98 | symlink_path = data.get('symlink_path') 99 | new_target_path = data.get('new_target_path') # Optional 100 | 101 | result = repair_broken_symlink(symlink_path, new_target_path) 102 | return jsonify(result) -------------------------------------------------------------------------------- /routes/models.py: -------------------------------------------------------------------------------- 1 | from flask import redirect, url_for 2 | from functools import wraps, lru_cache 3 | from flask_login import current_user, login_required 4 | from .utils import is_user_system_enabled 5 | import logging 6 | import time 7 | 8 | # Cache the logging calls for 5 minutes 9 | @lru_cache(maxsize=128) 10 | def _cached_log_debug(message, timestamp): 11 | # timestamp is used to ensure we get a new cache entry every 5 minutes 12 | logging.debug(message) 13 | 14 | def _rate_limited_log_debug(message): 15 | # Create a new timestamp every 5 minutes 16 | timestamp = int(time.time() / 300) # 300 seconds = 5 minutes 17 | _cached_log_debug(message, timestamp) 18 | 19 | def admin_required(f): 20 | @wraps(f) 21 | def decorated_function(*args, **kwargs): 22 | if not is_user_system_enabled(): 23 | return f(*args, **kwargs) 24 | if not current_user.is_authenticated or current_user.role != 'admin': 25 | return redirect(url_for('auth.unauthorized')) 26 | return f(*args, **kwargs) 27 | return decorated_function 28 | 29 | def user_required(f): 30 | @wraps(f) 31 | def decorated_function(*args, **kwargs): 32 | if not is_user_system_enabled(): 33 | _rate_limited_log_debug("User system disabled, allowing access") 34 | return f(*args, **kwargs) 35 | if not current_user.is_authenticated: 36 | _rate_limited_log_debug("User not authenticated, redirecting to login") 37 | return redirect(url_for('auth.login')) 38 | return f(*args, **kwargs) 39 | return decorated_function 40 | 41 | def onboarding_required(f): 42 | @wraps(f) 43 | def decorated_function(*args, **kwargs): 44 | if not is_user_system_enabled(): 45 | _rate_limited_log_debug("User system disabled, allowing access") 46 | return f(*args, **kwargs) 47 | if not current_user.is_authenticated: 48 | _rate_limited_log_debug("User not authenticated, redirecting to login") 49 | return redirect(url_for('auth.login')) 50 | if not current_user.onboarding_complete: 51 | _rate_limited_log_debug("User onboarding not complete, redirecting to onboarding") 52 | return redirect(url_for('onboarding.onboarding_step', step=1)) 53 | return f(*args, **kwargs) 54 | return decorated_function 55 | 56 | def scraper_permission_required(f): 57 | @wraps(f) 58 | def decorated_function(*args, **kwargs): 59 | if not is_user_system_enabled(): 60 | _rate_limited_log_debug("User system disabled, allowing access") 61 | return f(*args, **kwargs) 62 | if not current_user.is_authenticated: 63 | _rate_limited_log_debug("User not authenticated, redirecting to login") 64 | return redirect(url_for('auth.login')) 65 | if current_user.role == 'requester': 66 | _rate_limited_log_debug("User is a requester, not allowed to use scraper") 67 | return redirect(url_for('auth.unauthorized')) 68 | return f(*args, **kwargs) 69 | return decorated_function 70 | 71 | def scraper_view_access_required(f): 72 | @wraps(f) 73 | def decorated_function(*args, **kwargs): 74 | if not is_user_system_enabled(): 75 | _rate_limited_log_debug("User system disabled, allowing access") 76 | return f(*args, **kwargs) 77 | if not current_user.is_authenticated: 78 | _rate_limited_log_debug("User not authenticated, redirecting to login") 79 | return redirect(url_for('auth.login')) 80 | # Allow all authenticated users, including requesters, to view 81 | return f(*args, **kwargs) 82 | return decorated_function -------------------------------------------------------------------------------- /routes/torrent_status_routes.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint, render_template, jsonify 2 | from debrid.common.utils import format_torrent_status 3 | from debrid import get_debrid_provider 4 | from settings import get_setting 5 | 6 | # Create Blueprint 7 | torrent_status_bp = Blueprint('torrent_status', __name__, url_prefix='/torrent_status') 8 | 9 | @torrent_status_bp.route('/') 10 | def torrent_status(): 11 | """Display the torrent status page""" 12 | return render_template('torrent_status.html') 13 | 14 | @torrent_status_bp.route('/api/torrent-status') 15 | def get_torrent_status(): 16 | """API endpoint to get current torrent status""" 17 | try: 18 | 19 | # Get the configured debrid provider instance 20 | provider = get_debrid_provider() 21 | 22 | # Get active torrents and stats using the provider 23 | active_torrents, download_stats = provider.get_torrent_status() 24 | 25 | # Format the status 26 | status_text = format_torrent_status(active_torrents, download_stats) 27 | 28 | # Split the status text into sections for better frontend rendering 29 | sections = {} 30 | current_section = None 31 | current_content = [] 32 | 33 | for line in status_text.split('\n'): 34 | if not line.strip(): 35 | continue 36 | 37 | if line.endswith(':'): # This is a section header 38 | if current_section: 39 | sections[current_section] = current_content 40 | current_section = line.strip(':') 41 | current_content = [] 42 | else: 43 | current_content.append(line) 44 | 45 | # Add the last section 46 | if current_section: 47 | sections[current_section] = current_content 48 | 49 | return jsonify({ 50 | 'success': True, 51 | 'sections': sections, 52 | 'raw_status': status_text 53 | }) 54 | 55 | except Exception as e: 56 | return jsonify({ 57 | 'success': False, 58 | 'error': str(e) 59 | }), 500 60 | -------------------------------------------------------------------------------- /routes/utils.py: -------------------------------------------------------------------------------- 1 | from flask import current_app 2 | import logging 3 | from settings import get_setting 4 | 5 | def is_user_system_enabled(): 6 | enable_user_system = get_setting('UI Settings', 'enable_user_system', False) 7 | return enable_user_system -------------------------------------------------------------------------------- /sample.env: -------------------------------------------------------------------------------- 1 | # Traditional environment variables for auth settings 2 | DEFAULT_ADMIN_USER=admin 3 | DEFAULT_ADMIN_PASSWORD=your_secure_password 4 | DISABLE_ONBOARDING=false 5 | 6 | # Configuration can be specified as a JSON structure between CONFIG_JSON_START and CONFIG_JSON_END markers 7 | CONFIG_JSON_START 8 | { 9 | "UI Settings": { 10 | "enable_user_system": true, 11 | "use_24hour_format": true, 12 | "compact_view": false 13 | }, 14 | "Plex": { 15 | "url": "", 16 | "token": "", 17 | "movie_libraries": "", 18 | "shows_libraries": "" 19 | }, 20 | "File Management": { 21 | "file_collection_management": "Plex", 22 | "original_files_path": "/mnt/zurg/__all__", 23 | "symlinked_files_path": "/mnt/symlinked", 24 | "symlink_organize_by_type": true, 25 | "plex_url_for_symlink": "", 26 | "plex_token_for_symlink": "" 27 | }, 28 | "Debrid Provider": { 29 | "provider": "RealDebrid", 30 | "api_key": "demo_key" 31 | }, 32 | "TMDB": { 33 | "api_key": "" 34 | }, 35 | "Staleness Threshold": { 36 | "staleness_threshold": 7 37 | }, 38 | "Sync Deletions": { 39 | "sync_deletions": false 40 | }, 41 | "Metadata Battery": { 42 | "url": "http://localhost:5001" 43 | }, 44 | "Queue": { 45 | "wake_limit": "24", 46 | "movie_airtime_offset": "0", 47 | "episode_airtime_offset": "0", 48 | "blacklist_duration": "30" 49 | }, 50 | "Scraping": { 51 | "uncached_content_handling": "None", 52 | "upgrade_similarity_threshold": 0.95, 53 | "hybrid_mode": false, 54 | "jackett_seeders_only": false, 55 | "ultimate_sort_order": "None", 56 | "soft_max_size_gb": false, 57 | "enable_upgrading": false, 58 | "enable_upgrading_cleanup": false, 59 | "disable_adult": true, 60 | "trakt_early_releases": false, 61 | "versions": { 62 | "Default": { 63 | "enable_hdr": false, 64 | "max_resolution": "1080p", 65 | "resolution_wanted": "<=", 66 | "resolution_weight": "3", 67 | "hdr_weight": "3", 68 | "similarity_weight": "3", 69 | "similarity_threshold": "0.8", 70 | "similarity_threshold_anime": "0.35", 71 | "size_weight": "3", 72 | "bitrate_weight": "3", 73 | "preferred_filter_in": "", 74 | "preferred_filter_out": "", 75 | "filter_in": "", 76 | "filter_out": "", 77 | "min_size_gb": "0.01", 78 | "max_size_gb": "" 79 | } 80 | } 81 | }, 82 | "Trakt": { 83 | "client_id": "", 84 | "client_secret": "" 85 | }, 86 | "Debug": { 87 | "skip_initial_plex_update": false, 88 | "auto_run_program": false, 89 | "disable_initialization": false, 90 | "sort_by_uncached_status": false, 91 | "content_source_check_period": {}, 92 | "checking_queue_period": 3600, 93 | "rescrape_missing_files": false, 94 | "enable_reverse_order_scraping": false, 95 | "disable_not_wanted_check": false, 96 | "plex_watchlist_removal": false, 97 | "plex_watchlist_keep_series": false, 98 | "trakt_watchlist_removal": false, 99 | "trakt_watchlist_keep_series": false, 100 | "symlink_movie_template": "{title} ({year})/{title} ({year}) - {imdb_id} - {version} - ({original_filename})", 101 | "symlink_episode_template": "{title} ({year})/Season {season_number:02d}/{title} ({year}) - S{season_number:02d}E{episode_number:02d} - {episode_title} - {imdb_id} - {version} - ({original_filename})", 102 | "allow_partial_overseerr_requests": false, 103 | "timezone_override": "", 104 | "filename_filter_out_list": "", 105 | "anime_renaming_using_anidb": false, 106 | "check_for_updates": true 107 | }, 108 | "Scrapers": { 109 | "Torrentio_1": { 110 | "type": "Torrentio", 111 | "enabled": true, 112 | "opts": "" 113 | } 114 | }, 115 | "Content Sources": { 116 | "My Plex Watchlist_1": { 117 | "enabled": true, 118 | "versions": [ 119 | "1080p", 120 | "2160p" 121 | ], 122 | "display_name": "", 123 | "type": "My Plex Watchlist" 124 | } 125 | }, 126 | "Notifications": {}, 127 | "Reverse Parser": { 128 | "version_terms": {}, 129 | "default_version": "" 130 | } 131 | } 132 | CONFIG_JSON_END 133 | 134 | # The JSON structure can be formatted across multiple lines for better readability 135 | # The code will look for CONFIG_JSON_START and CONFIG_JSON_END markers 136 | # For backward compatibility, it also supports the single-line CONFIG_JSON=... format 137 | -------------------------------------------------------------------------------- /scraper/.alias_disabled: -------------------------------------------------------------------------------- 1 | disabled 2 | -------------------------------------------------------------------------------- /scraper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/scraper/__init__.py -------------------------------------------------------------------------------- /scraper/functions/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions module for scraper package. 3 | Contains various utility functions for scraping, filtering, and processing results. 4 | """ 5 | 6 | from .common import * 7 | from .adult_terms import * 8 | from .deduplicate_results import * 9 | from .file_processing import * 10 | from .filter_results import * 11 | from .logging import * 12 | from .other_functions import * 13 | from .rank_results import * 14 | from .similarity_checks import * 15 | -------------------------------------------------------------------------------- /scraper/functions/adult_terms.py: -------------------------------------------------------------------------------- 1 | # Add our own adult terms list for additional filtering 2 | adult_terms = [ 3 | 'xxx', 'porn', 'hentai', 'brazzers', 'playboy', 'penthouse', 4 | 'bangbros', 'naughtyamerica', 'blacked', 'tushy', 'vixen', 'pornhub', 5 | 'xvideos', 'xhamster', 'redtube', 'youporn', 'eporner', 'xnxx', 'spankbang', 6 | 'pornhd', 'xmovies', 'beeg', 'porntrex', 'chaturbate', 'myfreecams', 7 | 'cam4', 'camsoda', 'livejasmin', 'streamate', 'stripchat', 'bongacams', 8 | 'adultfriendfinder', 'fling', 'ashleymadison', 'seekingarrangement', 9 | 'onlyfans', 'manyvids', 'clips4sale', 'pornhubpremium', 'brazzersnetwork', 10 | 'realitykings', 'mofos', 'digitalplayground', 'twistys', 'evilangel', 'kink', 11 | 'gfrevenge', 'puba', 'fakehub', 'naughtyamerica', 'teamskeet', 'bangbros', 12 | 'blacked', 'blackedraw', 'tushy', 'tushyraw', 'vixen', 'sweetsinner', 13 | 'sweetheartvideo', 'girlsway', 'whiteghetto', 'devilsfilm', 'peternorth', 14 | 'roccocontent', 'julesjordan', 'manuelferrara', 'legalpornonetwork', 'vivid', 15 | 'wickedpictures', 'hustler', 'penthouse', 'playboy', 'playboyplus', 'adulttime', 16 | 'brazzers', 'realitykings', 'mofos', 'digitalplayground', 'fakehub' 17 | ] -------------------------------------------------------------------------------- /scraper/functions/common.py: -------------------------------------------------------------------------------- 1 | """Common utility functions shared across the scraper module.""" 2 | import re 3 | import logging 4 | from typing import Dict, Any, Union, List, Optional 5 | from guessit import guessit 6 | 7 | def trim_magnet(magnet: str): 8 | """Remove unnecessary parts from magnet link.""" 9 | if '&' in magnet: 10 | magnet = magnet.split('&')[0] 11 | return magnet.split('&tr=')[0] 12 | 13 | def round_size(size: str): 14 | """Round file size to 2 decimal places.""" 15 | try: 16 | return round(float(size), 2) 17 | except (ValueError, TypeError): 18 | return 0.0 19 | 20 | def detect_season_episode_info(parsed_info: Union[Dict[str, Any], str]) -> Dict[str, Any]: 21 | """ 22 | Detect season and episode information from parsed torrent info. 23 | Returns a dictionary containing season pack info, multi-episode flag, and lists of seasons/episodes. 24 | """ 25 | result = { 26 | 'season_pack': 'Unknown', 27 | 'multi_episode': False, 28 | 'seasons': [], 29 | 'episodes': [] 30 | } 31 | 32 | if isinstance(parsed_info, str): 33 | try: 34 | parsed_info = guessit(parsed_info) 35 | except Exception as e: 36 | logging.error(f"Error parsing title with guessit: {str(e)}") 37 | return result 38 | 39 | # Check for complete series indicators 40 | title = parsed_info.get('title', '').lower() 41 | if any(indicator in title for indicator in ['complete', 'collection', 'all.seasons']): 42 | result['season_pack'] = 'Complete' 43 | return result 44 | 45 | # Get season info - PTT uses 'seasons', guessit uses 'season' 46 | season_info = parsed_info.get('seasons') or parsed_info.get('season') 47 | episode_info = parsed_info.get('episodes') or parsed_info.get('episode') 48 | 49 | # Handle season information 50 | if season_info is not None: 51 | if isinstance(season_info, list): 52 | # If we have multiple seasons, it's a season pack 53 | result['season_pack'] = ','.join(str(s) for s in sorted(set(season_info))) 54 | result['seasons'] = sorted(set(season_info)) 55 | else: 56 | # Single season - check if it's a pack or single episode 57 | if episode_info is None: 58 | # No episode number means it's a season pack 59 | result['season_pack'] = str(season_info) 60 | else: 61 | result['season_pack'] = 'N/A' # Single episode 62 | result['seasons'] = [season_info] 63 | else: 64 | # No season info 65 | if episode_info is not None: 66 | # Has episode but no season - assume season 1 67 | result['season_pack'] = 'N/A' 68 | result['seasons'] = [1] 69 | else: 70 | # No season or episode info - might be a complete pack 71 | if any(word in title.lower() for word in ['season', 'complete', 'collection']): 72 | result['season_pack'] = 'Complete' 73 | else: 74 | result['season_pack'] = 'Unknown' 75 | 76 | # Handle episode information 77 | if episode_info is not None: 78 | if isinstance(episode_info, list): 79 | result['multi_episode'] = True 80 | result['episodes'] = sorted(set(episode_info)) 81 | else: 82 | result['episodes'] = [episode_info] 83 | if not result['seasons']: 84 | result['seasons'] = [1] 85 | 86 | #logging.debug(f"Season/episode detection for title '{title}': {result}") 87 | return result 88 | -------------------------------------------------------------------------------- /scraper/functions/deduplicate_results.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any 2 | from scraper.functions.common import round_size, trim_magnet 3 | 4 | def deduplicate_results(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 5 | unique_results = {} 6 | title_size_map = {} 7 | 8 | for index, result in enumerate(results): 9 | magnet = result.get('magnet', '') 10 | title = result.get('title', '').lower() # Convert to lowercase for case-insensitive comparison 11 | size = result.get('size', '') 12 | rounded_size = round_size(size) 13 | 14 | # First check: Use magnet link 15 | if magnet: 16 | trimmed_magnet = trim_magnet(magnet) 17 | unique_id = trimmed_magnet 18 | else: 19 | unique_id = f"{title}_{rounded_size}" 20 | 21 | is_duplicate = False 22 | 23 | # Check for duplicates using magnet or title_size 24 | if unique_id in unique_results: 25 | is_duplicate = True 26 | existing_result = unique_results[unique_id] 27 | elif f"{title}_{rounded_size}" in title_size_map: 28 | is_duplicate = True 29 | existing_result = title_size_map[f"{title}_{rounded_size}"] 30 | 31 | if is_duplicate: 32 | #logging.debug(f"Existing: '{existing_result.get('title')}', New: '{title}'") 33 | if len(result) > len(existing_result): 34 | unique_results[unique_id] = result 35 | title_size_map[f"{title}_{rounded_size}"] = result 36 | elif len(result) == len(existing_result) and result.get('seeders', 0) > existing_result.get('seeders', 0): 37 | unique_results[unique_id] = result 38 | title_size_map[f"{title}_{rounded_size}"] = result 39 | else: 40 | unique_results[unique_id] = result 41 | title_size_map[f"{title}_{rounded_size}"] = result 42 | 43 | return list(unique_results.values()) -------------------------------------------------------------------------------- /scraper/functions/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from scraper.functions import * 3 | from logging_config import * 4 | 5 | def setup_scraper_logger(): 6 | 7 | scraper_logger = logging.getLogger('scraper_logger') 8 | scraper_logger.addHandler(logging.NullHandler()) 9 | scraper_logger.propagate = False 10 | 11 | return scraper_logger 12 | 13 | scraper_logger = setup_scraper_logger() 14 | 15 | def log_filter_result(title: str, resolution: str, filter_reason: str = None): 16 | if filter_reason: 17 | logging.debug(f"Release: '{title}' (Resolution: {resolution}) - Filtered out: {filter_reason}") 18 | else: 19 | logging.debug(f"Release: '{title}' (Resolution: {resolution}) - Passed filters") -------------------------------------------------------------------------------- /scraper/functions/ptt_parser.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared PTT parsing functionality for consistent parsing across the application. 3 | """ 4 | import logging 5 | from typing import Dict, Any 6 | from functools import lru_cache 7 | from PTT import parse_title 8 | 9 | @lru_cache(maxsize=1024) 10 | def parse_with_ptt(title: str) -> Dict[str, Any]: 11 | """ 12 | Parse a title using PTT with caching. 13 | Returns a standardized format that can be used across the application. 14 | """ 15 | try: 16 | result = parse_title(title) 17 | logging.debug(f"PTT parsed '{title}' into: {result}") 18 | 19 | # Convert to our standard format 20 | processed = { 21 | 'title': result.get('title', title), 22 | 'original_title': title, 23 | 'type': 'movie' if not result.get('seasons') and not result.get('episodes') else 'episode', 24 | 'year': result.get('year'), 25 | 'resolution': result.get('resolution', 'Unknown'), 26 | 'source': result.get('source'), 27 | 'audio': result.get('audio'), 28 | 'codec': result.get('codec'), 29 | 'group': result.get('group'), 30 | 'seasons': result.get('seasons', []), 31 | 'episodes': result.get('episodes', []) 32 | } 33 | logging.debug(f"PTT Parser - Resolution set to: {processed['resolution']} for title: {title}") 34 | 35 | # Handle single season/episode for compatibility 36 | if len(processed['seasons']) == 1: 37 | processed['season'] = processed['seasons'][0] 38 | if len(processed['episodes']) == 1: 39 | processed['episode'] = processed['episodes'][0] 40 | 41 | return processed 42 | except Exception as e: 43 | logging.error(f"Error parsing title with PTT: {str(e)}") 44 | return { 45 | 'title': title, 46 | 'original_title': title, 47 | 'parsing_error': True 48 | } 49 | -------------------------------------------------------------------------------- /scraper/knightcrawler.py: -------------------------------------------------------------------------------- 1 | from api_tracker import api 2 | import logging 3 | import re 4 | from typing import List, Dict, Any, Tuple 5 | from settings import get_setting 6 | 7 | KNIGHTCRAWLER_URL = get_setting('Knightcrawler', 'url') 8 | 9 | def scrape_knightcrawler(imdb_id: str, title: str, year: int, content_type: str, season: int = None, episode: int = None, multi: bool = False) -> Tuple[str, List[Dict[str, Any]]]: 10 | try: 11 | url = construct_url(imdb_id, content_type, season, episode) 12 | response = fetch_data(url) 13 | 14 | if not response: 15 | logging.warning(f"No response received for IMDb ID: {imdb_id}") 16 | return url, [] 17 | 18 | if 'streams' not in response: 19 | logging.warning(f"No 'streams' key in response for IMDb ID: {imdb_id}") 20 | return url, [] 21 | 22 | parsed_results = parse_results(response['streams']) 23 | 24 | return url, parsed_results 25 | except Exception as e: 26 | logging.error(f"Error in scrape_knightcrawler: {str(e)}", exc_info=True) 27 | return "", [] 28 | 29 | def construct_url(imdb_id: str, content_type: str, season: int = None, episode: int = None) -> str: 30 | opts = "sort=qualitysize|qualityfilter=480p,scr,cam" 31 | if content_type == "movie": 32 | return f"{KNIGHTCRAWLER_URL}/{opts}/stream/movie/{imdb_id}.json" 33 | elif content_type == "episode" and season is not None and episode is not None: 34 | return f"{KNIGHTCRAWLER_URL}/{opts}/stream/series/{imdb_id}:{season}:{episode}.json" 35 | elif content_type == "episode": 36 | return f"{KNIGHTCRAWLER_URL}/{opts}/stream/series/{imdb_id}.json" 37 | else: 38 | return "" 39 | 40 | def fetch_data(url: str) -> Dict: 41 | try: 42 | response = api.get(url) 43 | if response.status_code == 200: 44 | return response.json() 45 | except Exception as e: 46 | logging.error(f"Error fetching data: {str(e)}", exc_info=True) 47 | return {} 48 | 49 | def parse_seeds(title: str) -> int: 50 | seeds_match = re.search(r'👤\s*(\d+)', title) 51 | return int(seeds_match.group(1)) if seeds_match else 0 52 | 53 | def parse_source(name: str) -> str: 54 | return name.split('\n')[0].strip() if name else "unknown" 55 | 56 | def parse_results(streams: List[Dict[str, Any]]) -> List[Dict[str, Any]]: 57 | results = [] 58 | for stream in streams: 59 | try: 60 | title = stream.get('title', '') 61 | title_parts = title.split('\n') 62 | 63 | name = title_parts[1].strip() if len(title_parts) > 1 else '' 64 | size_info = title_parts[2].strip() if len(title_parts) > 2 else '' 65 | 66 | size = parse_size(size_info) 67 | #quality = parse_quality(stream.get('name', '')) 68 | 69 | info_hash = stream.get("infoHash", "") 70 | magnet_link = f'magnet:?xt=urn:btih:{info_hash}' 71 | if stream.get('fileIdx') is not None: 72 | magnet_link += f'&dn={name}&so={stream["fileIdx"]}' 73 | 74 | results.append({ 75 | 'title': name, 76 | 'size': size, 77 | 'source': 'Knightcrawler', 78 | 'magnet': magnet_link 79 | }) 80 | except Exception as e: 81 | logging.error(f"Error parsing result: {str(e)}") 82 | continue 83 | return results 84 | 85 | def parse_size(size_info: str) -> float: 86 | size_match = re.search(r'([\d.]+)\s*(\w+)', size_info) 87 | if size_match: 88 | size, unit = size_match.groups() 89 | size = float(size) 90 | if unit.lower() == 'gb': 91 | return size 92 | elif unit.lower() == 'mb': 93 | return size / 1024 94 | return 0 95 | 96 | def parse_quality(name: str) -> str: 97 | quality_match = re.search(r'\n(.+)$', name) 98 | return quality_match.group(1).strip() if quality_match else "unknown" 99 | -------------------------------------------------------------------------------- /scraper/prowlarr.py: -------------------------------------------------------------------------------- 1 | from api_tracker import api 2 | import logging 3 | from typing import List, Dict, Any, Tuple 4 | from settings import load_config 5 | from urllib.parse import quote 6 | 7 | def scrape_prowlarr_instances(instances: List[Tuple[str, Dict[str, Any]]], imdb_id: str, title: str, year: int, content_type: str, season: int = None, episode: int = None, multi: bool = False) -> List[Dict[str, Any]]: 8 | all_results = [] 9 | 10 | for instance, settings in instances: 11 | 12 | try: 13 | instance_results = scrape_prowlarr_instance(instance, settings, imdb_id, title, year, content_type, season, episode, multi) 14 | all_results.extend(instance_results) 15 | except Exception as e: 16 | logging.error(f"Error scraping Prowlarr instance '{instance}': {str(e)}", exc_info=True) 17 | 18 | return all_results 19 | 20 | def scrape_prowlarr_instance(instance: str, settings: Dict[str, Any], imdb_id: str, title: str, year: int, content_type: str, season: int = None, episode: int = None, multi: bool = False) -> List[Dict[str, Any]]: 21 | prowlarr_url = settings.get('url', '') 22 | prowlarr_api = settings.get('api', '') 23 | 24 | if content_type.lower() == 'movie': 25 | params = f"{title} {year}" 26 | else: 27 | params = f"{title}" 28 | if season is not None: 29 | params = f"{params}.s{season:02d}" 30 | if episode is not None and not multi: 31 | params = f"{params}e{episode:02d}" 32 | 33 | headers = {'X-Api-Key': prowlarr_api, 'accept': 'application/json'} 34 | encoded_params = quote(params) 35 | search_endpoint = f"{prowlarr_url}/api/v1/search?query={encoded_params}&type=search&limit=1000&offset=0" 36 | full_url = f"{search_endpoint}" 37 | 38 | logging.debug(f"Attempting to access Prowlarr API for {instance} with URL: {full_url}") 39 | 40 | try: 41 | response = api.get(full_url, headers=headers, timeout=60) 42 | 43 | logging.debug(f"Prowlarr API status code for {instance}: {response.status_code}") 44 | 45 | if response.status_code == 200: 46 | try: 47 | data = response.json() 48 | return parse_prowlarr_results(data[:], instance) 49 | except api.exceptions.JSONDecodeError as json_error: 50 | logging.error(f"Failed to parse JSON response for {instance}: {str(json_error)}") 51 | return [] 52 | else: 53 | logging.error(f"Prowlarr API error for {instance}: Status code {response.status_code}") 54 | return [] 55 | except Exception as e: 56 | logging.error(f"Error in scrape_prowlarr_instance for {instance}: {str(e)}", exc_info=True) 57 | return [] 58 | 59 | def parse_prowlarr_results(data: List[Dict[str, Any]], instance: str) -> List[Dict[str, Any]]: 60 | results = [] 61 | for item in data: 62 | if item.get('indexer') is not None and item.get('size') is not None: 63 | if 'infoHash' in item: 64 | result = { 65 | 'title': item.get('title', 'N/A'), 66 | 'size': item.get('size', 0) / (1024 * 1024 * 1024), # Convert to GB 67 | 'source': f"{instance} - {item.get('indexer', 'N/A')}", 68 | 'magnet': f"magnet:?xt=urn:btih:{item.get('infoHash', '')}", 69 | 'seeders': item.get('seeders', 0) 70 | } 71 | results.append(result) 72 | return results 73 | -------------------------------------------------------------------------------- /scraper/zilean.py: -------------------------------------------------------------------------------- 1 | from api_tracker import api, requests 2 | import logging 3 | from typing import List, Dict, Any, Tuple 4 | from settings import load_config 5 | from urllib.parse import urlencode 6 | 7 | def scrape_zilean_instance(instance: str, settings: Dict[str, Any], imdb_id: str, title: str, year: int, content_type: str, season: int = None, episode: int = None, multi: bool = False) -> List[Dict[str, Any]]: 8 | zilean_url = settings.get('url', '') 9 | if not zilean_url: 10 | logging.warning(f"Zilean URL is not set or invalid for instance {instance}. Skipping.") 11 | return [] 12 | 13 | params = {'Query': title} 14 | 15 | if content_type.lower() == 'movie': 16 | params['Year'] = year 17 | else: 18 | if season is not None: 19 | params['Season'] = season 20 | if episode is not None and not multi: 21 | params['Episode'] = episode 22 | 23 | search_endpoint = f"{zilean_url}/dmm/filtered" 24 | encoded_params = urlencode(params) 25 | full_url = f"{search_endpoint}?{encoded_params}" 26 | 27 | try: 28 | response = api.get(full_url, headers={'accept': 'application/json'}) 29 | 30 | if response.status_code == 200: 31 | try: 32 | data = response.json() 33 | return parse_zilean_results(data, instance) 34 | except api.exceptions.JSONDecodeError as json_error: 35 | logging.error(f"Failed to parse JSON response for {instance}: {str(json_error)}") 36 | return [] 37 | else: 38 | logging.error(f"Zilean API error for {instance}: Status code {response.status_code}") 39 | return [] 40 | except api.exceptions.RequestException as e: 41 | logging.error(f"Error in scrape_zilean_instance for {instance}: {str(e)}", exc_info=True) 42 | return [] 43 | 44 | def parse_zilean_results(data: List[Dict[str, Any]], instance: str) -> List[Dict[str, Any]]: 45 | results = [] 46 | for item in data: 47 | size = item.get('size', 0) 48 | # Convert size to float if it's a string, otherwise use as is 49 | size_gb = float(size) / (1024 * 1024 * 1024) if isinstance(size, str) else size / (1024 * 1024 * 1024) 50 | 51 | result = { 52 | 'title': item.get('raw_title', 'N/A'), 53 | 'size': round(size_gb, 2), # Round to 2 decimal places 54 | 'source': f'{instance}', 55 | 'magnet': f"magnet:?xt=urn:btih:{item.get('info_hash', '')}" 56 | } 57 | results.append(result) 58 | return results -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from cx_Freeze import setup, Executable 4 | 5 | # Read version from version.txt 6 | with open('version.txt', 'r') as f: 7 | version = f.read().strip() 8 | 9 | # Get the base directory 10 | base_dir = os.path.abspath(os.path.dirname(__file__)) 11 | 12 | # Function to create data_files list with proper paths 13 | def get_data_files(directory): 14 | data_files = [] 15 | for root, dirs, files in os.walk(directory): 16 | for file in files: 17 | source = os.path.join(root, file) 18 | # Get the relative path from the base directory 19 | rel_path = os.path.relpath(source, base_dir) 20 | # Get the target directory 21 | target_dir = os.path.dirname(rel_path) 22 | data_files.append((target_dir, [source])) 23 | return data_files 24 | 25 | # Collect all data files 26 | data_dirs = [ 27 | 'templates', 'cli_battery', 'database', 'content_checkers', 28 | 'debrid', 'metadata', 'queues', 'routes', 'scraper', 29 | 'static', 'utilities' 30 | ] 31 | 32 | data_files = [] 33 | for directory in data_dirs: 34 | data_files.extend(get_data_files(directory)) 35 | 36 | # Add individual files 37 | additional_files = [ 38 | 'version.txt', 39 | 'tooltip_schema.json', 40 | os.path.join('static', 'white-icon-32x32.png'), 41 | os.path.join('static', 'white-icon-32x32.ico'), 42 | ] 43 | 44 | for file in additional_files: 45 | if os.path.exists(file): 46 | target_dir = os.path.dirname(file) if os.path.dirname(file) else '.' 47 | data_files.append((target_dir, [file])) 48 | 49 | # Build options 50 | build_exe_options = { 51 | "packages": [ 52 | "os", "flask", "sqlalchemy", "requests", "aiohttp", "bs4", 53 | "grpc", "guessit", "urwid", "plexapi", "PIL", "supervisor", 54 | "psutil", "api_tracker", "multiprocessing", "bencodepy", "tenacity", 55 | "appdirs", "pytrakt", "tzlocal" 56 | ], 57 | "includes": [ 58 | "database", 59 | "database.core", 60 | "database.collected_items", 61 | "database.blacklist", 62 | "database.schema_management", 63 | "database.poster_management", 64 | "database.statistics", 65 | "database.wanted_items", 66 | "database.database_reading", 67 | "database.database_writing", 68 | "content_checkers.trakt", 69 | "logging_config", 70 | "main", 71 | "metadata.Metadata", 72 | ], 73 | "include_files": data_files, 74 | "excludes": ["tkinter"], 75 | } 76 | 77 | # Executable configuration 78 | target = Executable( 79 | script="windows_wrapper.py", 80 | target_name="cli_debrid.exe", 81 | base=None, # "Win32GUI" if sys.platform == "win32" else None, 82 | icon="static/white-icon-32x32.ico" if os.path.exists("static/white-icon-32x32.ico") else None, 83 | ) 84 | 85 | setup( 86 | name="cli_debrid", 87 | version=version, 88 | description="CLI Debrid Application", 89 | options={"build_exe": build_exe_options}, 90 | executables=[target] 91 | ) -------------------------------------------------------------------------------- /static/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/android-chrome-192x192.png -------------------------------------------------------------------------------- /static/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/android-chrome-512x512.png -------------------------------------------------------------------------------- /static/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/apple-touch-icon.png -------------------------------------------------------------------------------- /static/cmd-terminal-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/cmd-terminal-icon.png -------------------------------------------------------------------------------- /static/css/manual_blacklist.css: -------------------------------------------------------------------------------- 1 | select#media_type, 2 | input#imdb_id, button.btn.btn-primary, button.btn.btn-danger { 3 | padding: 10px; 4 | background-color: #3a3a3a; 5 | border: 1px solid #555; 6 | color: #fff; 7 | border-radius: 4px; 8 | font-size: 14px; 9 | transition: border-color 0.3s, box-shadow 0.3s; 10 | height: 38px; 11 | } 12 | 13 | .input-group { 14 | display: flex; 15 | align-items: stretch; 16 | gap: 10px; 17 | width: 500px; 18 | } 19 | 20 | .input-group-text { 21 | color: #fff; 22 | padding: 0 15px; 23 | display: flex; 24 | align-items: center; 25 | border-radius: 4px; 26 | height: 38px; 27 | width: 100px; 28 | } 29 | 30 | .input-group .form-control { 31 | flex: 2; 32 | } 33 | 34 | .input-group .btn { 35 | flex: 1; 36 | white-space: nowrap; 37 | display: flex; 38 | align-items: center; 39 | justify-content: center; 40 | gap: 5px; 41 | } 42 | 43 | .toggle-seasons { 44 | text-decoration: none; 45 | color: #6c757d; 46 | padding: 0 5px; 47 | transition: transform 0.2s; 48 | } 49 | 50 | .toggle-seasons:hover { 51 | text-decoration: none; 52 | color: #fff; 53 | } 54 | 55 | .toggle-seasons.active i { 56 | transform: rotate(180deg); 57 | } 58 | 59 | .form-check { 60 | margin-bottom: 5px; 61 | } 62 | 63 | .form-check.mb-2 { 64 | display: flex; 65 | align-items: center; 66 | gap: 8px; 67 | margin-bottom: 10px !important; 68 | } 69 | 70 | .form-check-label { 71 | margin-right: 5px; 72 | } 73 | 74 | .season-check { 75 | margin-left: 20px; 76 | } 77 | 78 | .seasons-list { 79 | max-height: 300px; 80 | overflow-y: auto; 81 | padding: 10px; 82 | border: 1px solid #dee2e6; 83 | border-radius: 4px; 84 | margin: 10px 0; 85 | } 86 | 87 | .btn.btn-link.text-danger.p-0 { 88 | background-color: rgba(255, 255, 255, 0.664); 89 | } 90 | 91 | button.btn.btn-danger.btn-sm { 92 | background-color: #dc3545; 93 | } 94 | 95 | button.btn.btn-danger.btn-sm:hover { 96 | background-color: #c82333; 97 | } 98 | 99 | button.btn.btn-primary, button.btn.btn-danger:hover { 100 | background-color: #45a049; 101 | } 102 | -------------------------------------------------------------------------------- /static/css/queues.css: -------------------------------------------------------------------------------- 1 | /* Queue Container Styles */ 2 | .queue-container { 3 | display: flex; 4 | flex-direction: column; 5 | } 6 | 7 | .queue { 8 | margin: 10px 0; 9 | background: #444; 10 | padding: 15px; 11 | border-radius: 5px; 12 | } 13 | 14 | .queue-title { 15 | font-size: 18px; 16 | font-weight: bold; 17 | cursor: pointer; 18 | display: flex; 19 | justify-content: space-between; 20 | align-items: center; 21 | } 22 | 23 | .queue-title:hover { 24 | color: #007bff; 25 | } 26 | 27 | .queue-count { 28 | margin-left: auto; 29 | } 30 | 31 | .queue-items { 32 | display: none; 33 | margin-top: 10px; 34 | } 35 | 36 | .item { 37 | margin-left: 20px; 38 | padding: 5px 0; 39 | word-wrap: break-word; 40 | word-break: break-all; 41 | overflow-wrap: break-word; 42 | max-width: 100%; 43 | } 44 | 45 | .item-content { 46 | display: flex; 47 | flex-direction: column; 48 | gap: 5px; 49 | } 50 | 51 | .item-detail { 52 | display: block; 53 | padding-left: 20px; 54 | } 55 | 56 | .filename-toggle { 57 | cursor: pointer; 58 | margin: 0 5px; 59 | font-size: 1em; 60 | transition: transform 0.3s ease; 61 | } 62 | 63 | .filename-toggle.active i { 64 | transform: rotate(45deg); 65 | } 66 | 67 | .filename-content { 68 | display: none; 69 | } 70 | 71 | .item-progress { 72 | margin-left: 10px; 73 | display: inline-flex; 74 | align-items: center; 75 | min-width: 200px; 76 | } 77 | 78 | .progress { 79 | flex-grow: 1; 80 | height: 20px; 81 | background-color: #444; 82 | border-radius: 4px; 83 | overflow: hidden; 84 | position: relative; 85 | } 86 | 87 | .progress-bar { 88 | height: 100%; 89 | background-color: #4CAF50; 90 | transition: width 0.3s ease; 91 | min-width: 24px; 92 | } 93 | 94 | .progress-text { 95 | position: absolute; 96 | left: 8px; 97 | top: 50%; 98 | transform: translateY(-50%); 99 | color: white; 100 | white-space: nowrap; 101 | z-index: 1; 102 | } 103 | 104 | .group-progress { 105 | margin-top: 10px; 106 | margin-left: 20px; 107 | } 108 | 109 | .checking-group h5 { 110 | color: #aaa; 111 | margin-bottom: 15px; 112 | border-bottom: 1px solid #666; 113 | padding-bottom: 5px; 114 | } 115 | 116 | #loading-indicator { 117 | position: absolute; 118 | left: 50%; 119 | top: 50%; 120 | transform: translate(-50%, -50%); 121 | text-align: center; 122 | } 123 | 124 | #loading-indicator p { 125 | margin-top: 10px; 126 | margin-bottom: 0; 127 | } 128 | 129 | .container { 130 | position: relative; 131 | min-height: 200px; 132 | } 133 | 134 | .physical-release { 135 | color: #8a8a8a; 136 | font-style: italic; 137 | margin-left: 5px; 138 | } -------------------------------------------------------------------------------- /static/css/reverse_parser.css: -------------------------------------------------------------------------------- 1 | .version-terms-container { 2 | display: flex; 3 | flex-direction: column; 4 | gap: 10px; 5 | } 6 | .version-terms-input { 7 | display: flex; 8 | align-items: center; 9 | background-color: #444; 10 | padding: 10px; 11 | border-radius: 5px; 12 | transition: background-color 0.3s ease; 13 | } 14 | .version-name { 15 | font-weight: bold; 16 | min-width: 100px; 17 | margin-right: 10px; 18 | } 19 | .version-terms { 20 | flex-grow: 1; 21 | margin-right: 10px; 22 | } 23 | .move-btn { 24 | background-color: #555; 25 | color: white; 26 | border: none; 27 | padding: 5px 10px; 28 | margin: 0 2px; 29 | cursor: pointer; 30 | transition: background-color 0.3s ease; 31 | } 32 | .move-btn:hover { 33 | background-color: #777; 34 | } 35 | .move-btn:disabled { 36 | opacity: 0.5; 37 | cursor: not-allowed; 38 | } 39 | .default-version-container { 40 | margin-top: 20px; 41 | } 42 | @keyframes moveUp { 43 | from { transform: translateY(0); } 44 | to { transform: translateY(-100%); } 45 | } 46 | @keyframes moveDown { 47 | from { transform: translateY(0); } 48 | to { transform: translateY(100%); } 49 | } 50 | .moving-up { 51 | animation: moveUp 0.3s ease-in-out; 52 | } 53 | .moving-down { 54 | animation: moveDown 0.3s ease-in-out; 55 | } 56 | 57 | .settings-input { 58 | width: 100%; 59 | padding: 8px; 60 | background-color: #444; 61 | border: 1px solid #666; 62 | color: #fff; 63 | border-radius: 3px; 64 | box-sizing: border-box; 65 | font-family: inherit; 66 | } 67 | 68 | /* If there's a striped table effect, override it for matched rows */ 69 | #items-table tbody tr.matched:nth-child(even), 70 | #items-table tbody tr.matched:nth-child(odd) { 71 | background-color: #20415f !important; 72 | } 73 | 74 | /* If there's a striped table effect, override it for matched rows */ 75 | #items-table tbody tr.matched:nth-child(even):hover, 76 | #items-table tbody tr.matched:nth-child(odd):hover { 77 | background-color: #42678a !important; 78 | } 79 | 80 | /* Default version rows (unmatched) */ 81 | #items-table tbody tr.default-version:nth-child(even), 82 | #items-table tbody tr.default-version:nth-child(odd) { 83 | background-color: #783d3b !important; /* Orange */ 84 | } 85 | 86 | #items-table tbody tr.default-version:nth-child(even):hover, 87 | #items-table tbody tr.default-version:nth-child(odd):hover { 88 | background-color: #aa5151 !important; /* Dark Orange */ 89 | } 90 | 91 | .assigned-version { 92 | font-weight: bold; 93 | } 94 | 95 | .save-button, .filter-button { 96 | margin-top: 10px; 97 | padding: 15px 32px; 98 | font-size: 16px; 99 | background-color: #4CAF50; 100 | border: none; 101 | color: white; 102 | padding: 5px 10px; 103 | text-align: center; 104 | text-decoration: none; 105 | display: inline-block; 106 | font-size: 14px; 107 | margin-bottom: 10px; 108 | cursor: pointer; 109 | border-radius: 4px; 110 | transition: background-color 0.3s; 111 | } 112 | -------------------------------------------------------------------------------- /static/css/scraper_trending.css: -------------------------------------------------------------------------------- 1 | .trending-container { 2 | max-width: 1600px; 3 | margin: 0 auto; 4 | padding-top: 20px; 5 | } 6 | 7 | .trending-movies { 8 | background-color: #1e1e1e; 9 | border-radius: 8px; 10 | padding: 20px; 11 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); 12 | margin-bottom: 30px; 13 | } 14 | 15 | .trending-shows { 16 | background-color: #1e1e1e; 17 | border-radius: 8px; 18 | padding: 20px; 19 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); 20 | } 21 | 22 | .trending-header { 23 | display: flex; 24 | justify-content: space-between; 25 | align-items: center; 26 | margin-bottom: 10px; 27 | } 28 | 29 | .trending-title { 30 | font-size: 24px; 31 | font-weight: bold; 32 | color: #ffffff; 33 | } 34 | 35 | .trending-navigation { 36 | display: flex; 37 | gap: 10px; 38 | } 39 | 40 | .trending-nav-button { 41 | background-color: transparent; 42 | color: white; 43 | border: none; 44 | border-radius: 50%; 45 | width: 40px; 46 | height: 40px; 47 | display: flex; 48 | justify-content: center; 49 | align-items: center; 50 | cursor: pointer; 51 | transition: color 0.3s ease; 52 | } 53 | 54 | .trending-nav-button:hover { 55 | opacity: 0.8; 56 | } 57 | 58 | .trending-nav-button:disabled { 59 | color: #4a4a4a; 60 | cursor: not-allowed; 61 | } 62 | 63 | #trending-rating { 64 | position: absolute; 65 | margin: 5px; 66 | width: 26px; 67 | height: 16px; 68 | color: rgba(255, 255, 255, 0.8); 69 | font-size: 11px; 70 | font-weight: bold; 71 | line-height: 16px; 72 | text-align: center; 73 | border-radius: 11px; 74 | background-color: #ed1c23c0; 75 | z-index: 1; 76 | } 77 | 78 | #trending-watchers { 79 | position: absolute; 80 | right: 0; 81 | margin: 5px; 82 | width: 52px; 83 | height: 16px; 84 | color: rgba(255, 255, 255, 0.8); 85 | font-size: 11px; 86 | font-weight: bold; 87 | line-height: 16px; 88 | text-align: center; 89 | border-radius: 11px; 90 | background-color: rgb(0 0 0 / 72%); 91 | z-index: 1; 92 | } -------------------------------------------------------------------------------- /static/css/settings.css: -------------------------------------------------------------------------------- 1 | /* Common styles */ 2 | .settings-container { 3 | width: 95%; 4 | max-width: 1200px; 5 | margin: 0 auto; 6 | padding: 20px; 7 | } 8 | 9 | /* Description styles */ 10 | .settings-description { 11 | color: #aaa; 12 | font-size: 0.9em; 13 | line-height: 1.4; 14 | margin: 8px 0; 15 | } 16 | 17 | .settings-description p { 18 | margin: 0 0 8px 0; 19 | } 20 | 21 | .settings-description ul { 22 | margin: 4px 0 8px 24px; 23 | padding: 0; 24 | list-style-type: disc; 25 | } 26 | 27 | .settings-description li { 28 | margin: 4px 0; 29 | padding: 0; 30 | display: list-item; 31 | } 32 | 33 | .settings-description a { 34 | color: #dfdfdf; 35 | } 36 | 37 | /* Form group styles */ 38 | .settings-form-group { 39 | margin-bottom: 15px; 40 | padding: 10px; 41 | border-radius: 4px; 42 | } 43 | 44 | /* Checkbox specific styles */ 45 | .settings-form-group:has(input[type="checkbox"]) { 46 | margin-bottom: 0; 47 | padding: 5px 10px; 48 | } 49 | 50 | .settings-form-group:has(input[type="checkbox"]) .settings-input-wrapper { 51 | display: flex; 52 | align-items: center; 53 | gap: 8px; 54 | } 55 | 56 | .settings-form-group:has(input[type="checkbox"]) .settings-description { 57 | margin: 0; 58 | display: inline; 59 | } 60 | 61 | .settings-form-group:has(input[type="checkbox"]) .settings-description p { 62 | margin: 0; 63 | display: inline; 64 | } 65 | 66 | .settings-title { 67 | font-weight: 500; 68 | margin-bottom: 5px; 69 | display: block; 70 | color: #fff; 71 | } 72 | 73 | .settings-input { 74 | width: 100%; 75 | padding: 8px; 76 | background-color: #444; 77 | border: 1px solid #666; 78 | color: #fff; 79 | border-radius: 3px; 80 | box-sizing: border-box; 81 | font-family: inherit; 82 | } 83 | 84 | /* Delete button styles */ 85 | .delete-scraper-btn, 86 | .delete-source-btn, 87 | .delete-version-btn { 88 | background-color: #f44336; 89 | color: white; 90 | border: none !important; 91 | padding: 5px 10px; 92 | border-radius: 4px; 93 | cursor: pointer; 94 | transition: all 0.3s ease; 95 | display: inline-flex; 96 | align-items: center; 97 | justify-content: center; 98 | height: 28px; 99 | margin: 0 5px; 100 | } 101 | 102 | .delete-scraper-btn:hover:not(:disabled), 103 | .delete-source-btn:hover:not(:disabled), 104 | .delete-version-btn:hover:not(:disabled) { 105 | background-color: #d32f2f; 106 | } 107 | 108 | /* Disabled delete button styles */ 109 | .settings-section-header button.delete-scraper-btn[disabled], 110 | .settings-section-header button.delete-scraper-btn:disabled, 111 | button.delete-scraper-btn[disabled], 112 | button.delete-scraper-btn:disabled, 113 | .delete-scraper-btn[disabled], 114 | .delete-scraper-btn:disabled { 115 | background-color: #666666 !important; 116 | color: #999999 !important; 117 | cursor: not-allowed !important; 118 | opacity: 0.7 !important; 119 | pointer-events: none !important; 120 | border: none !important; 121 | transition: none !important; 122 | box-shadow: none !important; 123 | } 124 | 125 | /* Environment override icon in delete buttons */ 126 | .delete-scraper-btn .env-override, 127 | .delete-source-btn .env-override, 128 | .delete-version-btn .env-override { 129 | font-size: 0.9em; 130 | margin-left: 5px; 131 | color: #ffa500; 132 | display: inline-flex; 133 | align-items: center; 134 | } 135 | 136 | /* Hide hybrid mode and jackett seeders only checkboxes */ 137 | .settings-form-group.hybrid-mode-group, 138 | .settings-form-group.jackett-seeders-only-group { 139 | display: none !important; 140 | } 141 | 142 | /* Also hide any form group with hybrid_mode or jackett_seeders_only checkbox */ 143 | .settings-form-group:has(#scraping-hybrid_mode), 144 | .settings-form-group:has(input[data-hybrid-mode="true"]), 145 | .settings-form-group:has(#scraping-jackett_seeders_only), 146 | .settings-form-group:has(input[data-jackett-seeders-only="true"]) { 147 | display: none !important; 148 | } 149 | 150 | /* Rest of your existing styles... */ -------------------------------------------------------------------------------- /static/css/vidstack.css: -------------------------------------------------------------------------------- 1 | media-player { 2 | --media-brand: #f5f5f5; 3 | --media-focus-ring: #4f46e5; 4 | --media-focus-ring-offset: 2px; 5 | aspect-ratio: 16/9; 6 | border-radius: 4px; 7 | color-scheme: dark; 8 | } 9 | 10 | media-player[data-view-type='audio'] { 11 | --media-aspect-ratio: 16/9; 12 | } 13 | 14 | .vds-slider-track { 15 | background-color: rgb(255 255 255 / 0.2); 16 | } 17 | 18 | .vds-slider-track-fill { 19 | background-color: rgb(255 255 255 / 0.8); 20 | } 21 | 22 | .vds-slider-thumb { 23 | background-color: rgb(255 255 255); 24 | } 25 | 26 | .vds-time-slider .vds-slider-thumb { 27 | display: none; 28 | } 29 | 30 | .vds-time-slider:hover .vds-slider-thumb { 31 | display: block; 32 | } 33 | -------------------------------------------------------------------------------- /static/favicon-128x128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-128x128.png -------------------------------------------------------------------------------- /static/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-16x16.png -------------------------------------------------------------------------------- /static/favicon-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-192x192.png -------------------------------------------------------------------------------- /static/favicon-256x256-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-256x256-white.png -------------------------------------------------------------------------------- /static/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-32x32.png -------------------------------------------------------------------------------- /static/favicon-48x48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-48x48.png -------------------------------------------------------------------------------- /static/favicon-512x512-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-512x512-white.png -------------------------------------------------------------------------------- /static/favicon-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon-512x512.png -------------------------------------------------------------------------------- /static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/favicon.ico -------------------------------------------------------------------------------- /static/icon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/icon-16x16.png -------------------------------------------------------------------------------- /static/icon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/icon-32x32.png -------------------------------------------------------------------------------- /static/image/placeholder-horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/image/placeholder-horizontal.png -------------------------------------------------------------------------------- /static/image/placeholder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/image/placeholder.png -------------------------------------------------------------------------------- /static/images/imdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/images/imdb.png -------------------------------------------------------------------------------- /static/images/placeholder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/images/placeholder.png -------------------------------------------------------------------------------- /static/images/tmdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/images/tmdb.png -------------------------------------------------------------------------------- /static/images/trakt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/images/trakt.png -------------------------------------------------------------------------------- /static/js/loading.js: -------------------------------------------------------------------------------- 1 | // Add CSS styles for loading 2 | const loadingStyles = document.createElement('style'); 3 | loadingStyles.textContent = ` 4 | .loading { 5 | display: none; 6 | position: fixed; 7 | top: 0; 8 | left: 0; 9 | width: 100%; 10 | height: 100%; 11 | background-color: rgba(0, 0, 0, 0.5); 12 | z-index: 9999; 13 | justify-content: center; 14 | align-items: center; 15 | } 16 | 17 | .loading-content { 18 | background-color: #333; 19 | padding: 20px; 20 | border-radius: 5px; 21 | text-align: center; 22 | } 23 | 24 | .loading-content p { 25 | color: #f4f4f4; 26 | margin-bottom: 15px; 27 | } 28 | 29 | .spinner { 30 | border: 4px solid #f3f3f3; 31 | border-top: 4px solid #3498db; 32 | border-radius: 50%; 33 | width: 40px; 34 | height: 40px; 35 | animation: spin 1s linear infinite; 36 | margin: 0 auto 10px; 37 | } 38 | 39 | @keyframes spin { 40 | 0% { transform: rotate(0deg); } 41 | 100% { transform: rotate(360deg); } 42 | } 43 | 44 | .close-loading { 45 | background-color: #007bff; 46 | color: white; 47 | border: none; 48 | padding: 12px 20px; 49 | text-align: center; 50 | text-decoration: none; 51 | display: inline-block; 52 | font-size: 16px; 53 | margin: 4px 2px; 54 | cursor: pointer; 55 | border-radius: 4px; 56 | transition: background-color 0.3s; 57 | } 58 | 59 | .close-loading:hover { 60 | background-color: #0056b3; 61 | } 62 | `; 63 | document.head.appendChild(loadingStyles); 64 | 65 | // Global loading object 66 | const Loading = { 67 | element: null, 68 | messageElement: null, 69 | 70 | init: function() { 71 | // Create loading element if it doesn't exist 72 | if (!this.element) { 73 | this.element = document.createElement('div'); 74 | this.element.id = 'loading'; 75 | this.element.className = 'loading'; 76 | this.element.innerHTML = ` 77 |
78 |
79 |
80 |

Processing command in background...

81 |

 82 |                     
83 | 84 |
85 | `; 86 | document.body.appendChild(this.element); 87 | 88 | // Store reference to message elements 89 | this.messageElement = this.element.querySelector('.loading-message p'); 90 | this.detailsElement = this.element.querySelector('.loading-details'); 91 | 92 | // Add click handler for close button 93 | this.element.querySelector('.close-loading').addEventListener('click', () => { 94 | this.hide(); 95 | }); 96 | } 97 | }, 98 | 99 | show: function(message, details) { 100 | this.init(); 101 | if (message && this.messageElement) { 102 | this.messageElement.textContent = message; 103 | } 104 | if (details && this.detailsElement) { 105 | this.detailsElement.textContent = details; 106 | } else if (this.detailsElement) { 107 | this.detailsElement.textContent = ''; 108 | } 109 | this.element.style.display = 'flex'; 110 | }, 111 | 112 | updateMessage: function(message, details) { 113 | if (this.messageElement && message) { 114 | this.messageElement.textContent = message; 115 | } 116 | if (this.detailsElement) { 117 | if (details) { 118 | this.detailsElement.textContent = details; 119 | } else { 120 | this.detailsElement.textContent = ''; 121 | } 122 | } 123 | }, 124 | 125 | hide: function() { 126 | if (this.element) { 127 | this.element.style.display = 'none'; 128 | if (this.detailsElement) { 129 | this.detailsElement.textContent = ''; 130 | } 131 | } 132 | } 133 | }; -------------------------------------------------------------------------------- /static/loadingimage.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/loadingimage.gif -------------------------------------------------------------------------------- /static/site.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cli_debrid", 3 | "short_name": "cli_debrid", 4 | "icons": [ 5 | { 6 | "src": "/static/android-chrome-192x192.png", 7 | "sizes": "192x192", 8 | "type": "image/png" 9 | }, 10 | { 11 | "src": "/static/android-chrome-512x512.png", 12 | "sizes": "512x512", 13 | "type": "image/png" 14 | } 15 | ], 16 | "theme_color": "#007bff", 17 | "background_color": "#ffffff", 18 | "display": "standalone" 19 | } -------------------------------------------------------------------------------- /static/white-icon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/white-icon-16x16.png -------------------------------------------------------------------------------- /static/white-icon-32x32.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/white-icon-32x32.ico -------------------------------------------------------------------------------- /static/white-icon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/static/white-icon-32x32.png -------------------------------------------------------------------------------- /supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | logfile=/user/logs/supervisord.log 4 | user=root 5 | 6 | [program:primary_app] 7 | command=python /app/main.py 8 | directory=/app 9 | autostart=true 10 | autorestart=true 11 | startretries=3 12 | stderr_logfile=/user/logs/primary_app_err.log 13 | stdout_logfile=/user/logs/primary_app_out.log 14 | stdout_logfile_maxbytes=50MB 15 | stdout_logfile_backups=0 16 | stderr_logfile_maxbytes=50MB 17 | stderr_logfile_backups=0 18 | 19 | [program:secondary_app] 20 | command=python /app/cli_battery/main.py 21 | directory=/app/cli_battery 22 | autostart=true 23 | autorestart=true 24 | startretries=3 25 | stderr_logfile=/user/logs/secondary_app_err.log 26 | stdout_logfile=/user/logs/secondary_app_out.log 27 | stdout_logfile_maxbytes=50MB 28 | stdout_logfile_backups=0 29 | stderr_logfile_maxbytes=50MB 30 | stderr_logfile_backups=0 -------------------------------------------------------------------------------- /template_utils.py: -------------------------------------------------------------------------------- 1 | from markupsafe import Markup 2 | 3 | def render_settings(settings, section): 4 | html = f"

{section}

" 5 | for key, value in settings.items(): 6 | if isinstance(value, dict): 7 | html += render_settings(value, key) 8 | else: 9 | html += f""" 10 |
11 | 12 | 13 |
14 | """ 15 | return Markup(html) 16 | 17 | def render_content_sources(settings, parent_key): 18 | html = f"

{parent_key}

" 19 | for key, value in settings.items(): 20 | html += f"

{key}

" 21 | for sub_key, sub_value in value.items(): 22 | if sub_key == 'enabled': 23 | html += f""" 24 |
25 | 26 | 30 |
31 | """ 32 | elif sub_key == 'versions': 33 | html += f"
Versions
" 34 | for version, version_enabled in sub_value.items(): 35 | html += f""" 36 |
37 | 38 | 42 |
43 | """ 44 | else: 45 | html += f""" 46 |
47 | 48 | 49 |
50 | """ 51 | return Markup(html) -------------------------------------------------------------------------------- /templates/api_call_summary.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | API Call Summary 7 | 89 | 90 | 91 |

API Call Summary ({{ time_frame }})

92 |
93 | 98 | 99 |
100 | 101 | 102 |
103 | 104 |
105 | 106 | 107 | 108 | 109 | {% for domain in all_domains %} 110 | 111 | {% endfor %} 112 | 113 | 114 | {% for period, domains in summary.items() %} 115 | 116 | 117 | {% for domain in all_domains %} 118 | 119 | {% endfor %} 120 | 121 | 122 | {% endfor %} 123 |
Time Period{{ domain }}Total
{{ period }}{{ domains.get(domain, 0) }}{{ domains.values() | sum }}
124 | 125 | -------------------------------------------------------------------------------- /templates/content_requestor.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block title %}Request Content{% endblock %} 3 | {% block content %} 4 | 5 | 6 | 7 | 8 |
9 |
10 | 11 | 12 |
13 | 14 | 15 | 27 | 28 |
29 | 30 |
31 |
32 | 33 | 34 | 39 | 40 | 44 | 45 | {% endblock %} -------------------------------------------------------------------------------- /templates/database_pagination.html: -------------------------------------------------------------------------------- 1 | {% if alphabet %} 2 | # 3 | {% for letter in alphabet %} 4 | {{ letter }} 5 | {% endfor %} 6 | {% else %} 7 |

No pagination data available

8 | {% endif %} -------------------------------------------------------------------------------- /templates/debug_not_wanted.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |
5 |

Not Wanted Items

6 | 7 |
8 |
9 |

Magnets ({{ magnets|length }})

10 |
11 |
12 | {% if magnets %} 13 |
14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | {% for magnet in magnets %} 23 | 24 | 25 | 31 | 32 | {% endfor %} 33 | 34 |
HashActions
{{ magnet }} 26 |
27 | 28 | 29 |
30 |
35 |
36 | {% else %} 37 |

No magnets in not wanted list.

38 | {% endif %} 39 |
40 |
41 | 42 |
43 |
44 |

URLs ({{ urls|length }})

45 |
46 |
47 | {% if urls %} 48 |
49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | {% for url in urls %} 58 | 59 | 60 | 66 | 67 | {% endfor %} 68 | 69 |
URLActions
{{ url }} 61 |
62 | 63 | 64 |
65 |
70 |
71 | {% else %} 72 |

No URLs in not wanted list.

73 | {% endif %} 74 |
75 |
76 | 77 |
78 |
79 | 80 |
81 |
82 |
83 | {% endblock %} 84 | -------------------------------------------------------------------------------- /templates/error.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block title %}Error{% endblock %} 3 | {% block content %} 4 |
5 |

Error

6 |

{{ error_message }}

7 |
8 | {% endblock %} -------------------------------------------------------------------------------- /templates/onboarding.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block title %}Onboarding{% endblock %} 3 | {% block content %} 4 |
5 |

Welcome to the Onboarding Process

6 |

Let's get you set up with the basics to start using the application.

7 | Start Onboarding 8 |
9 | {% endblock %} -------------------------------------------------------------------------------- /templates/onboarding_step_6.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block title %}Onboarding - Step 6{% endblock %} 3 | {% block content %} 4 |
5 |

Step 6: Confirmation

6 |

You have completed setup. You're now ready to start using cli_debrid!

7 |

For detailed usage information, please refer to our wiki.

8 |
9 | 10 | {% include 'onboarding_navigation.html' %} 11 | {% endblock %} 12 | -------------------------------------------------------------------------------- /templates/over_usage.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block title %}Rate Limit Exceeded{% endblock %} 4 | 5 | {% block content %} 6 | 61 |
62 |

Rate Limit Exceeded

63 |
64 |

The application has exceeded its API rate limits and has been temporarily halted to prevent further issues.

65 |
66 | 67 |

Blocked Domains

68 |
    69 | {% for domain in blocked_domains %} 70 |
  • {{ domain }}
  • 71 | {% else %} 72 |
  • No specific domains are currently blocked.
  • 73 | {% endfor %} 74 |
75 | 76 |

What does this mean?

77 |

To protect our services and comply with API usage policies, we've implemented strict rate limiting. When these limits are exceeded, the application automatically stops to prevent potential abuse or overuse of external APIs.

78 | 79 |

What should I do?

80 |
    81 |
  • Wait for a short period (usually about an hour) before attempting to use the application again.
  • 82 |
  • If you're an administrator, review the application logs to identify any unusual activity or potential issues causing excessive API calls.
  • 83 |
  • Consider optimizing your usage patterns to reduce the frequency of API calls if this occurs regularly.
  • 84 |
85 | 86 |

Current usage limits:

87 |
    88 |
  • Hourly limit: {{ hourly_limit }}
  • 89 |
  • Five-minute limit: {{ five_minute_limit }}
  • 90 |
91 | 92 | 95 |
96 | {% endblock %} 97 | 98 | {% block scripts %} 99 | 100 | {% endblock %} -------------------------------------------------------------------------------- /templates/setup_admin.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block title %}Setup Admin Account{% endblock %} 3 | {% block content %} 4 |
5 |

Setup Admin Account

6 |
7 |
8 | 9 | 10 |
11 |
12 | 13 | 14 |
15 |
16 | 17 | 18 |
19 |
20 | 21 |
22 |
23 |
24 | {% endblock %} -------------------------------------------------------------------------------- /test_torrent_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import logging 3 | from debrid.common.utils import format_torrent_status 4 | from debrid.real_debrid.client import RealDebridProvider 5 | from api_tracker import setup_api_logging, api 6 | 7 | setup_api_logging() 8 | 9 | def main(): 10 | # Configure logging 11 | logging.basicConfig(level=logging.INFO) 12 | 13 | print("Testing Torrent Status...") 14 | print("-" * 50) 15 | 16 | try: 17 | # Initialize api_tracker before using RealDebridProvider 18 | api.rate_limiter.reset_limits() 19 | 20 | provider = RealDebridProvider() 21 | torrents, stats = provider.get_torrent_status() 22 | status_text = format_torrent_status(torrents, stats) 23 | print(status_text) 24 | except Exception as e: 25 | print(f"Error: {str(e)}") 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/godver3/cli_debrid/743decac17c24bd15c63fa2c2624e2aefa163a5b/tests/__init__.py -------------------------------------------------------------------------------- /tooltip_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "global": { 3 | "logout_button": "Logout of the current user session.", 4 | "programControlButton": "Start or stop the program.", 5 | "releaseNotesButton": "View recent changes and updates.", 6 | "updateAvailableButton": "New version available." 7 | }, 8 | "home": { 9 | "toggle_container": "Control panel for adjusting time format, view mode, and displaying active downloads.", 10 | "collection_stats": "Overview of your media collection, including total movies, TV shows, episodes, and system uptime.", 11 | "recently_aired": "Shows episodes that have aired recently.", 12 | "airing_soon": "Upcoming episodes scheduled to air in the near future.", 13 | "upcoming_releases": "New movies or shows expected to be released soon.", 14 | "recently_added_movies": "Latest movies added to your collection.", 15 | "recently_added_shows": "Latest TV shows added to your collection.", 16 | "recently_upgraded": "Media items that have been upgraded to better quality versions." 17 | }, 18 | "queues": { 19 | }, 20 | "settings": { 21 | }, 22 | "logs": { 23 | "hidden-div": "What could this be...?" 24 | } 25 | } -------------------------------------------------------------------------------- /unset_env_variables.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # System environment variables 4 | unset USER_CONFIG 5 | unset USER_LOGS 6 | unset USER_DB_CONTENT 7 | unset CLI_DEBRID_PORT 8 | unset CLI_DEBRID_BATTERY_PORT 9 | 10 | # UI Settings 11 | unset DEBRID_ENABLE_USER_SYSTEM 12 | 13 | # Plex configuration 14 | unset DEBRID_PLEX_URL 15 | unset DEBRID_PLEX_TOKEN 16 | unset DEBRID_PLEX_MOVIE_LIBRARIES 17 | unset DEBRID_PLEX_SHOWS_LIBRARIES 18 | 19 | # File Management 20 | unset DEBRID_FILE_MANAGEMENT_TYPE 21 | unset DEBRID_ORIGINAL_FILES_PATH 22 | unset DEBRID_SYMLINKED_FILES_PATH 23 | unset DEBRID_SYMLINK_ORGANIZE_BY_TYPE 24 | unset DEBRID_PLEX_URL_FOR_SYMLINK 25 | unset DEBRID_PLEX_TOKEN_FOR_SYMLINK 26 | 27 | # Debrid Provider 28 | unset DEBRID_DEBRID_PROVIDER 29 | unset DEBRID_DEBRID_API_KEY 30 | 31 | # TMDB 32 | unset DEBRID_TMDB_API_KEY 33 | 34 | # Staleness Threshold 35 | unset DEBRID_STALENESS_THRESHOLD 36 | 37 | # Sync Deletions 38 | unset DEBRID_SYNC_DELETIONS 39 | 40 | # Metadata Battery 41 | unset DEBRID_METADATA_BATTERY_URL 42 | 43 | # Queue Settings 44 | unset DEBRID_QUEUE_WAKE_LIMIT 45 | unset DEBRID_QUEUE_MOVIE_AIRTIME_OFFSET 46 | unset DEBRID_QUEUE_EPISODE_AIRTIME_OFFSET 47 | unset DEBRID_QUEUE_BLACKLIST_DURATION 48 | 49 | # Scraping Settings 50 | unset DEBRID_UNCACHED_CONTENT_HANDLING 51 | unset DEBRID_UPGRADE_SIMILARITY_THRESHOLD 52 | unset DEBRID_HYBRID_MODE 53 | unset DEBRID_JACKETT_SEEDERS_ONLY 54 | unset DEBRID_ULTIMATE_SORT_ORDER 55 | unset DEBRID_SOFT_MAX_SIZE_GB 56 | unset DEBRID_ENABLE_UPGRADING 57 | unset DEBRID_ENABLE_UPGRADING_CLEANUP 58 | unset DEBRID_DISABLE_ADULT 59 | unset DEBRID_TRAKT_EARLY_RELEASES 60 | 61 | # Trakt Settings 62 | unset DEBRID_TRAKT_CLIENT_ID 63 | unset DEBRID_TRAKT_CLIENT_SECRET 64 | 65 | # Debug Settings 66 | unset DEBRID_CONSOLE_LOGGING_LEVEL 67 | unset DEBRID_SKIP_INITIAL_PLEX_UPDATE 68 | unset DEBRID_AUTO_RUN 69 | unset DEBRID_DISABLE_INIT 70 | unset DEBRID_SORT_BY_UNCACHED_STATUS 71 | unset DEBRID_CHECKING_QUEUE_PERIOD 72 | unset DEBRID_RESCRAPE_MISSING_FILES 73 | unset DEBRID_ENABLE_REVERSE_ORDER_SCRAPING 74 | unset DEBRID_DISABLE_NOT_WANTED_CHECK 75 | unset DEBRID_PLEX_WATCHLIST_REMOVAL 76 | unset DEBRID_PLEX_WATCHLIST_KEEP_SERIES 77 | unset DEBRID_SYMLINK_MOVIE_TEMPLATE 78 | unset DEBRID_SYMLINK_EPISODE_TEMPLATE 79 | 80 | # Reverse Parser Settings 81 | unset DEBRID_DEFAULT_VERSION 82 | 83 | # Complex configurations 84 | unset DEBRID_SCRAPING_VERSIONS 85 | unset DEBRID_SCRAPERS 86 | unset DEBRID_CONTENT_SOURCES 87 | unset DEBRID_NOTIFICATIONS -------------------------------------------------------------------------------- /utilities/__init__.py: -------------------------------------------------------------------------------- 1 | """Utilities package for the application.""" -------------------------------------------------------------------------------- /utilities/config/__init__.py: -------------------------------------------------------------------------------- 1 | """Configuration package for utilities.""" 2 | 3 | from . import downsub_config 4 | 5 | __all__ = ['downsub_config'] -------------------------------------------------------------------------------- /utilities/config/downsub_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | import sys 4 | 5 | # Add the parent directory to the Python path so we can import settings 6 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) 7 | from settings import get_setting 8 | 9 | # Check if subtitle downloading is enabled 10 | SUBTITLES_ENABLED = get_setting('Subtitle Settings', 'enable_subtitles', False) 11 | 12 | # Check if we should only process the current file 13 | ONLY_CURRENT_FILE = get_setting('Subtitle Settings', 'only_current_file', False) 14 | 15 | # Get paths from environment variables with fallbacks 16 | USER_CONFIG = os.environ.get('USER_CONFIG', '/user/config') 17 | USER_LOGS = os.environ.get('USER_LOGS', '/user/logs') 18 | USER_DB_CONTENT = os.environ.get('USER_DB_CONTENT', '/user/db_content') 19 | 20 | # Cache directory within db_content 21 | CACHE_DIR = os.path.join(USER_DB_CONTENT, 'subtitle_cache') 22 | 23 | # Construct video folder paths 24 | SYMLINKED_PATH = get_setting('File Management', 'symlinked_files_path') 25 | MOVIES_FOLDER = get_setting('Debug', 'movies_folder_name') 26 | TV_SHOWS_FOLDER = get_setting('Debug', 'tv_shows_folder_name') 27 | ENABLE_ANIME = get_setting('Debug', 'enable_separate_anime_folders', False) 28 | ANIME_MOVIES_FOLDER = get_setting('Debug', 'anime_movies_folder_name') if ENABLE_ANIME else None 29 | ANIME_TV_SHOWS_FOLDER = get_setting('Debug', 'anime_tv_shows_folder_name') if ENABLE_ANIME else None 30 | 31 | # Get folder application settings 32 | APPLY_TO_MOVIES = get_setting('Subtitle Settings', 'apply_to_movies', True) 33 | APPLY_TO_TV_SHOWS = get_setting('Subtitle Settings', 'apply_to_tv_shows', True) 34 | APPLY_TO_ANIME_MOVIES = get_setting('Subtitle Settings', 'apply_to_anime_movies', True) 35 | APPLY_TO_ANIME_TV_SHOWS = get_setting('Subtitle Settings', 'apply_to_anime_tv_shows', True) 36 | 37 | # Initialize VIDEO_FOLDERS as an empty list 38 | VIDEO_FOLDERS = [] 39 | 40 | # Only add paths if SYMLINKED_PATH is not None 41 | if SYMLINKED_PATH is not None: 42 | if MOVIES_FOLDER and APPLY_TO_MOVIES: 43 | VIDEO_FOLDERS.append(os.path.join(SYMLINKED_PATH, MOVIES_FOLDER)) 44 | 45 | if TV_SHOWS_FOLDER and APPLY_TO_TV_SHOWS: 46 | VIDEO_FOLDERS.append(os.path.join(SYMLINKED_PATH, TV_SHOWS_FOLDER)) 47 | 48 | if ENABLE_ANIME and ANIME_MOVIES_FOLDER and APPLY_TO_ANIME_MOVIES: 49 | VIDEO_FOLDERS.append(os.path.join(SYMLINKED_PATH, ANIME_MOVIES_FOLDER)) 50 | 51 | if ENABLE_ANIME and ANIME_TV_SHOWS_FOLDER and APPLY_TO_ANIME_TV_SHOWS: 52 | VIDEO_FOLDERS.append(os.path.join(SYMLINKED_PATH, ANIME_TV_SHOWS_FOLDER)) 53 | 54 | # Ensure cache directory exists 55 | os.makedirs(CACHE_DIR, exist_ok=True) 56 | 57 | # Cache files 58 | SCAN_CACHE_FILE = os.path.join(CACHE_DIR, 'scan_cache.json') 59 | DIR_CACHE_FILE = os.path.join(CACHE_DIR, 'dir_cache.json') 60 | 61 | # Logging 62 | LOG_FILE = os.path.join(USER_LOGS, 'subtitle_downloader.log') 63 | LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" 64 | LOG_LEVEL = "INFO" 65 | 66 | # Get settings from settings schema 67 | OPENSUBTITLES_USERNAME = get_setting('Subtitle Settings', 'opensubtitles_username') 68 | OPENSUBTITLES_PASSWORD = get_setting('Subtitle Settings', 'opensubtitles_password') 69 | 70 | # Parse subtitle languages from comma-separated string 71 | SUBTITLE_LANGUAGES = [ 72 | lang.strip() 73 | for lang in get_setting('Subtitle Settings', 'subtitle_languages', 'eng,zho').split(',') 74 | if lang.strip() 75 | ] 76 | 77 | # Get subtitle providers from settings 78 | SUBTITLE_PROVIDERS = get_setting('Subtitle Settings', 'subtitle_providers', [ 79 | "opensubtitles", 80 | "opensubtitlescom", 81 | "podnapisi", 82 | "tvsubtitles" 83 | ]) 84 | 85 | # User agent for API requests 86 | SUBLIMINAL_USER_AGENT = get_setting('Subtitle Settings', 'user_agent', 87 | 'SubDownloader/1.0 (your-email@example.com)') 88 | 89 | # Supported video extensions 90 | VIDEO_EXTENSIONS = (".mp4", ".mkv", ".avi", ".mov") -------------------------------------------------------------------------------- /utilities/file_lock.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | class FileLock: 6 | def __init__(self, file_obj): 7 | self.file_obj = file_obj 8 | self.is_windows = sys.platform.startswith('win') 9 | 10 | def acquire(self): 11 | if self.is_windows: 12 | # Windows implementation using msvcrt 13 | while True: 14 | try: 15 | import msvcrt 16 | # Try to acquire lock 17 | msvcrt.locking(self.file_obj.fileno(), msvcrt.LK_NBLCK, 1) 18 | break 19 | except IOError: 20 | # If file is locked, wait and retry 21 | time.sleep(0.1) 22 | else: 23 | # Unix implementation using fcntl 24 | import fcntl 25 | fcntl.flock(self.file_obj, fcntl.LOCK_EX) 26 | 27 | def release(self): 28 | if self.is_windows: 29 | import msvcrt 30 | # Release the lock 31 | try: 32 | self.file_obj.seek(0) 33 | msvcrt.locking(self.file_obj.fileno(), msvcrt.LK_UNLCK, 1) 34 | except IOError: 35 | # If for some reason we can't unlock, just pass 36 | pass 37 | else: 38 | import fcntl 39 | fcntl.flock(self.file_obj, fcntl.LOCK_UN) 40 | 41 | def __enter__(self): 42 | self.acquire() 43 | return self 44 | 45 | def __exit__(self, exc_type, exc_val, exc_tb): 46 | self.release() -------------------------------------------------------------------------------- /utilities/log_analyzer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from collections import defaultdict 4 | from typing import Dict, List, Tuple 5 | import logging 6 | from pathlib import Path 7 | 8 | class LogAnalyzer: 9 | def __init__(self, log_dir: str = '/user/logs'): 10 | self.log_dir = Path(log_dir) 11 | self.log_pattern = re.compile(r'(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2},\d{3})\s-\s([^:]+):([^:]+):(\d+)\s-\s(\w+)\s-\s(.+)') 12 | self.function_stats = defaultdict(lambda: {'count': 0, 'bytes': 0, 'levels': defaultdict(int)}) 13 | self.total_lines = 0 14 | self.total_bytes = 0 15 | self.skipped_lines = 0 16 | 17 | def process_log_file(self, log_file: Path) -> None: 18 | """Process a single log file and update statistics.""" 19 | try: 20 | with open(log_file, 'r', encoding='utf-8') as f: 21 | for line in f: 22 | self.total_lines += 1 23 | self.total_bytes += len(line.encode('utf-8')) 24 | 25 | match = self.log_pattern.match(line) 26 | if match: 27 | _, filename, funcname, _, level, message = match.groups() 28 | key = f"{filename}:{funcname}" 29 | 30 | stats = self.function_stats[key] 31 | stats['count'] += 1 32 | stats['bytes'] += len(line.encode('utf-8')) 33 | stats['levels'][level.lower()] += 1 34 | else: 35 | self.skipped_lines += 1 36 | except Exception as e: 37 | logging.error(f"Error processing {log_file}: {str(e)}") 38 | 39 | def analyze_logs(self) -> None: 40 | """Process all debug log files in the directory.""" 41 | for i in range(6): # Process debug.log through debug.log.5 42 | suffix = '' if i == 0 else f'.{i}' 43 | log_file = self.log_dir / f'debug.log{suffix}' 44 | if log_file.exists(): 45 | self.process_log_file(log_file) 46 | 47 | def get_top_functions(self, limit: int = 20) -> List[Tuple[str, Dict]]: 48 | """Get the top N functions by log entry count.""" 49 | sorted_funcs = sorted( 50 | self.function_stats.items(), 51 | key=lambda x: x[1]['count'], 52 | reverse=True 53 | ) 54 | return sorted_funcs[:limit] 55 | 56 | def print_analysis(self) -> None: 57 | """Print the analysis results.""" 58 | print("\n=== Log Analysis Report ===") 59 | print(f"\nTotal lines processed: {self.total_lines}") 60 | print(f"Total bytes processed: {self.total_bytes:,} bytes") 61 | print(f"Skipped lines: {self.skipped_lines}") 62 | 63 | print("\nTop 20 Functions by Log Entry Count:") 64 | print("-" * 80) 65 | print(f"{'Function':<40} {'Count':<10} {'Size':<15} {'% of Total':<12} {'Log Levels'}") 66 | print("-" * 80) 67 | 68 | for func_name, stats in self.get_top_functions(20): 69 | count = stats['count'] 70 | bytes_size = stats['bytes'] 71 | percentage = (count / self.total_lines) * 100 if self.total_lines > 0 else 0 72 | 73 | # Format the log levels string 74 | levels_str = ', '.join(f"{level}:{count}" for level, count in stats['levels'].items()) 75 | 76 | print(f"{func_name:<40} {count:<10} {bytes_size:,} bytes {percentage:>6.2f}% {levels_str}") 77 | 78 | def main(): 79 | # Get log directory from environment variable with fallback 80 | log_dir = os.environ.get('USER_LOGS', '/user/logs') 81 | 82 | analyzer = LogAnalyzer(log_dir) 83 | analyzer.analyze_logs() 84 | analyzer.print_analysis() 85 | 86 | if __name__ == "__main__": 87 | main() -------------------------------------------------------------------------------- /utilities/plex_removal_cache.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import time 4 | from datetime import datetime, timedelta 5 | import logging 6 | from typing import Dict, List, Optional, Tuple 7 | from settings import get_setting 8 | 9 | # Cache file path 10 | CACHE_FILE = os.path.join(os.environ.get('USER_DB_CONTENT', '/user/db_content'), 'plex_removal_cache.pkl') 11 | 12 | def _load_cache() -> Dict[str, List[Tuple[str, str, Optional[str], float]]]: 13 | """Load the removal cache from disk.""" 14 | try: 15 | if os.path.exists(CACHE_FILE): 16 | with open(CACHE_FILE, 'rb') as f: 17 | return pickle.load(f) 18 | except Exception as e: 19 | logging.error(f"Error loading Plex removal cache: {str(e)}") 20 | return {} 21 | 22 | def _save_cache(cache: Dict[str, List[Tuple[str, str, Optional[str], float]]]) -> None: 23 | """Save the removal cache to disk.""" 24 | try: 25 | os.makedirs(os.path.dirname(CACHE_FILE), exist_ok=True) 26 | with open(CACHE_FILE, 'wb') as f: 27 | pickle.dump(cache, f) 28 | except Exception as e: 29 | logging.error(f"Error saving Plex removal cache: {str(e)}") 30 | 31 | def cache_plex_removal(item_title: str, item_path: str, episode_title: Optional[str] = None) -> None: 32 | """ 33 | Cache a Plex removal operation for later execution. 34 | If caching is disabled via settings, immediately execute the removal. 35 | 36 | Args: 37 | item_title: Title of the item to remove 38 | item_path: Path of the item to remove 39 | episode_title: Optional episode title for TV shows 40 | """ 41 | # Check if caching is enabled 42 | if not get_setting('Debug', 'enable_plex_removal_caching', default=True): 43 | # If caching is disabled, execute removal immediately 44 | from utilities.plex_functions import remove_file_from_plex 45 | remove_file_from_plex(item_title, item_path, episode_title) 46 | return 47 | 48 | cache = _load_cache() 49 | timestamp = time.time() 50 | 51 | # Create a unique key based on the path to avoid duplicates 52 | key = item_path 53 | 54 | if key not in cache: 55 | cache[key] = [] 56 | 57 | # Add the removal request to the cache 58 | cache[key].append((item_title, item_path, episode_title, timestamp)) 59 | 60 | _save_cache(cache) 61 | logging.info(f"Cached Plex removal request for {item_title} ({item_path})") 62 | 63 | def process_removal_cache(min_age_hours: int = 6) -> None: 64 | """ 65 | Process cached removal operations that are older than the specified age. 66 | 67 | Args: 68 | min_age_hours: Minimum age in hours before processing a cached removal 69 | """ 70 | from utilities.plex_functions import remove_file_from_plex 71 | 72 | cache = _load_cache() 73 | if not cache: 74 | return 75 | 76 | current_time = time.time() 77 | min_age_seconds = min_age_hours * 3600 78 | processed_keys = [] 79 | 80 | for key, entries in cache.items(): 81 | for entry in entries: 82 | item_title, item_path, episode_title, timestamp = entry 83 | 84 | # Check if entry is old enough 85 | if current_time - timestamp >= min_age_seconds: 86 | try: 87 | # Actually remove from Plex 88 | remove_file_from_plex(item_title, item_path, episode_title) 89 | logging.info(f"Processed cached Plex removal for {item_title} ({item_path})") 90 | processed_keys.append(key) 91 | except Exception as e: 92 | logging.error(f"Error processing cached Plex removal for {item_title}: {str(e)}") 93 | 94 | # Remove processed entries 95 | for key in processed_keys: 96 | cache.pop(key, None) 97 | 98 | # Save updated cache 99 | _save_cache(cache) -------------------------------------------------------------------------------- /utilities/result_viewer.py: -------------------------------------------------------------------------------- 1 | '''import curses 2 | from typing import List, Dict 3 | 4 | def truncate_string(string, length): 5 | return string[:length - 3] + '...' if len(string) > length else string.ljust(length) 6 | 7 | def display_results(results: List[Dict], filtered_out_results: List[Dict]): 8 | def main(stdscr): 9 | curses.curs_set(0) # Hide the cursor 10 | curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK) # Initialize color pair for red text 11 | stdscr.clear() 12 | height, width = stdscr.getmaxyx() 13 | table_height = height - 6 # Leave room for header, footer, and filtered out results header 14 | current_pos = 0 15 | start_pos = 0 16 | show_filtered_out = False # Toggle for showing filtered out results 17 | 18 | while True: 19 | stdscr.clear() 20 | # Calculate column widths 21 | name_width = width - 95 # Adjust this value to allocate space for other columns 22 | 23 | # Display header 24 | stdscr.addstr(0, 0, "Name".ljust(name_width) + "Size/File".ljust(15) + "Source".ljust(25) + "Est. Bitrate".ljust(15)) 25 | stdscr.addstr(1, 0, "-" * (width - 1)) 26 | 27 | # Display results 28 | display_results = filtered_out_results if show_filtered_out else results 29 | if display_results: # Check if display_results is not None and not empty 30 | for i in range(start_pos, min(start_pos + table_height, len(display_results))): 31 | result = display_results[i] 32 | if i == current_pos: 33 | stdscr.attron(curses.A_REVERSE) 34 | if show_filtered_out: 35 | stdscr.attron(curses.color_pair(1)) 36 | stdscr.addstr(i - start_pos + 2, 0, 37 | truncate_string(result.get('title', 'N/A'), name_width) + 38 | f"{result.get('size', 0):.2f} GB".ljust(15) + 39 | truncate_string(result.get('source', 'N/A'), 25) + 40 | f"{result.get('bitrate', 0):.2f} mbps".ljust(15)) 41 | if show_filtered_out: 42 | stdscr.attroff(curses.color_pair(1)) 43 | if i == current_pos: 44 | stdscr.attroff(curses.A_REVERSE) 45 | else: 46 | stdscr.addstr(2, 0, "No results to display.") 47 | 48 | # Display footer 49 | footer = "Use arrow keys to navigate, Enter to select, q to quit, f to toggle filtered results" 50 | stdscr.addstr(height - 1, 0, footer) 51 | 52 | # Handle key presses 53 | key = stdscr.getch() 54 | if key == ord('q'): 55 | return None 56 | elif key == ord('f'): 57 | show_filtered_out = not show_filtered_out 58 | current_pos = 0 59 | start_pos = 0 60 | # Reset display_results 61 | display_results = filtered_out_results if show_filtered_out else results 62 | if not display_results: 63 | stdscr.addstr(height - 2, 0, "No results to display in this category.") 64 | stdscr.refresh() 65 | #stdscr.getch() # Wait for user input before continuing 66 | elif key == curses.KEY_UP and current_pos > 0: 67 | current_pos -= 1 68 | if current_pos < start_pos: 69 | start_pos = current_pos 70 | elif key == curses.KEY_DOWN and current_pos < len(display_results) - 1: 71 | current_pos += 1 72 | if current_pos >= start_pos + table_height: 73 | start_pos = current_pos - table_height + 1 74 | elif key == curses.KEY_PPAGE: # Page Up 75 | current_pos = max(0, current_pos - table_height) 76 | start_pos = max(0, start_pos - table_height) 77 | elif key == curses.KEY_NPAGE: # Page Down 78 | current_pos = min(len(display_results) - 1, current_pos + table_height) 79 | start_pos = min(len(display_results) - table_height, start_pos + table_height) 80 | elif key == 10: # Enter key 81 | return display_results[current_pos] if not show_filtered_out else None 82 | 83 | return curses.wrapper(main)''' -------------------------------------------------------------------------------- /utilities/test_anidb.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import os 4 | import time 5 | 6 | # Add the root directory to the Python path 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | 9 | from typing import Dict, Any 10 | from utilities.anidb_functions import get_anidb_metadata_for_item, format_filename_with_anidb 11 | from settings import get_setting 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.DEBUG, 15 | format='%(asctime)s - %(levelname)s - %(message)s') 16 | 17 | # Mock settings for testing 18 | MOCK_SETTINGS = { 19 | 'Debug': { 20 | 'use_anidb_metadata': True, 21 | 'symlink_preserve_extension': True, 22 | 'anidb_episode_template': '{title} ({year})/Season {season_number:02d}/{title} ({year}) - S{season_number:02d}E{episode_number:02d} - {episode_title}' 23 | } 24 | } 25 | 26 | # Override get_setting for testing 27 | def mock_get_setting(section: str, key: str, default: Any = None) -> Any: 28 | """Mock get_setting that always returns values from MOCK_SETTINGS.""" 29 | if section in MOCK_SETTINGS and key in MOCK_SETTINGS[section]: 30 | return MOCK_SETTINGS[section][key] 31 | return default # Return the default if not found in mock settings 32 | 33 | # Replace the actual get_setting with our mock 34 | sys.modules['settings'].get_setting = mock_get_setting 35 | 36 | def test_anidb_functions(): 37 | """Test AniDB functions with a sample anime item.""" 38 | 39 | # Sample anime items to test with 40 | test_items = [ 41 | { 42 | 'id': 12345, 43 | 'type': 'episode', 44 | 'is_anime': True, 45 | 'title': 'Jujutsu Kaisen', 46 | 'year': '2020', 47 | 'season_number': 1, 48 | 'episode_number': 1, 49 | 'episode_title': 'Ryomen Sukuna', 50 | 'quality': '1080p', 51 | 'version': 'HDTV', 52 | 'filled_by_file': 'Jujutsu.Kaisen.S01E01.1080p.mkv', 53 | 'filled_by_title': 'Jujutsu Kaisen', 54 | 'state': 'checking' 55 | }, 56 | { 57 | 'id': 12346, 58 | 'type': 'episode', 59 | 'is_anime': True, 60 | 'title': 'One Piece', 61 | 'year': '1999', 62 | 'season_number': 1, 63 | 'episode_number': 1, 64 | 'episode_title': 'I\'m Luffy! The Man Who\'s Gonna Be King of the Pirates!', 65 | 'quality': '1080p', 66 | 'version': 'HDTV', 67 | 'filled_by_file': 'One.Piece.E001.1080p.mkv', 68 | 'filled_by_title': 'One Piece', 69 | 'state': 'checking' 70 | }, 71 | { 72 | 'id': 12347, 73 | 'type': 'episode', 74 | 'is_anime': True, 75 | 'title': 'MF Ghost', 76 | 'year': '2023', 77 | 'season_number': 1, 78 | 'episode_number': 13, # Testing episode from second cour 79 | 'episode_title': 'Episode 13', 80 | 'quality': '1080p', 81 | 'version': 'HDTV', 82 | 'filled_by_file': 'MF.Ghost.S01E13.1080p.mkv', 83 | 'filled_by_title': 'MF Ghost', 84 | 'state': 'checking' 85 | } 86 | ] 87 | 88 | for test_item in test_items: 89 | logging.info(f"\nTesting with anime: {test_item['title']}") 90 | 91 | # Test getting metadata 92 | logging.info("Testing get_anidb_metadata_for_item...") 93 | metadata = get_anidb_metadata_for_item(test_item) 94 | if metadata: 95 | logging.info("Successfully retrieved AniDB metadata:") 96 | for key, value in metadata.items(): 97 | logging.info(f" {key}: {value}") 98 | else: 99 | logging.error("Failed to retrieve AniDB metadata") 100 | 101 | # Test filename formatting 102 | logging.info("\nTesting format_filename_with_anidb...") 103 | filename = format_filename_with_anidb(test_item, '.mkv') 104 | if filename: 105 | logging.info(f"Successfully formatted filename: {filename}") 106 | else: 107 | logging.error("Failed to format filename") 108 | 109 | # Add a delay between tests to respect rate limiting 110 | time.sleep(2) 111 | 112 | if __name__ == '__main__': 113 | test_anidb_functions() -------------------------------------------------------------------------------- /utilities/zurg_utilities.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import logging 3 | 4 | async def run_get_collected_from_zurg(): 5 | pass 6 | 7 | async def run_get_recent_from_zurg(): 8 | pass -------------------------------------------------------------------------------- /version.txt: -------------------------------------------------------------------------------- 1 | 0.6.07 2 | -------------------------------------------------------------------------------- /wake_count_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import logging 4 | 5 | class WakeCountManager: 6 | def __init__(self): 7 | self.wake_counts = {} 8 | # Get db_content directory from environment variable with fallback 9 | db_content_dir = os.environ.get('USER_DB_CONTENT', '/user/db_content') 10 | self.file_path = os.path.join(db_content_dir, 'wake_counts.pkl') 11 | self.load_wake_counts() 12 | 13 | def load_wake_counts(self): 14 | os.makedirs(os.path.dirname(self.file_path), exist_ok=True) 15 | if os.path.exists(self.file_path): 16 | with open(self.file_path, 'rb') as f: 17 | self.wake_counts = pickle.load(f) 18 | #logging.debug(f"Loaded wake counts from {self.file_path}") 19 | else: 20 | logging.debug("No existing wake counts file found. Starting with empty wake counts.") 21 | 22 | def save_wake_counts(self): 23 | os.makedirs(os.path.dirname(self.file_path), exist_ok=True) 24 | with open(self.file_path, 'wb') as f: 25 | pickle.dump(self.wake_counts, f) 26 | #logging.debug(f"Saved wake counts to {self.file_path}") 27 | 28 | def get_wake_count(self, item_id): 29 | count = self.wake_counts.get(item_id, 0) 30 | #logging.debug(f"Retrieved wake count for item {item_id}: {count}") 31 | return count 32 | 33 | def increment_wake_count(self, item_id): 34 | old_count = self.wake_counts.get(item_id, 0) 35 | new_count = old_count + 1 36 | self.wake_counts[item_id] = new_count 37 | self.save_wake_counts() 38 | #logging.debug(f"Incremented wake count for item {item_id}. Old count: {old_count}, New count: {new_count}") 39 | return new_count 40 | 41 | def set_wake_count(self, item_id, count): 42 | self.wake_counts[item_id] = count 43 | self.save_wake_counts() 44 | #logging.debug(f"Set wake count for item {item_id} to {count}") 45 | 46 | wake_count_manager = WakeCountManager() -------------------------------------------------------------------------------- /zurg/config/zurg_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used by Zurg to notify cli_debrid when a file has been added to the mount location 4 | 5 | # Configuration 6 | webhook_url="http://debrid_cli_debrid:5000/webhook/rclone" # Using docker network DNS with project prefix 7 | 8 | # First notify our webhook for each file 9 | for arg in "$@" 10 | do 11 | arg_clean=$(echo "$arg" | sed 's/\\//g') 12 | echo "Notifying webhook for: $arg_clean" 13 | encoded_webhook_arg=$(echo -n "$arg_clean" | python3 -c "import sys, urllib.parse as ul; print(ul.quote(sys.stdin.read()))") 14 | curl -s -X GET "$webhook_url?file=$encoded_webhook_arg" 15 | done 16 | 17 | echo "Updates completed!" 18 | -------------------------------------------------------------------------------- /zurg_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used by Zurg to notify cli_debrid when a file has been added to the mount location 4 | 5 | # Configuration 6 | webhook_url="http://CLIDEBRID_URL:PORT/webhook/rclone" # Replace with your actual webhook URL 7 | 8 | # First notify our webhook for each file 9 | for arg in "$@" 10 | do 11 | arg_clean=$(echo "$arg" | sed 's/\\//g') 12 | echo "Notifying webhook for: $arg_clean" 13 | encoded_webhook_arg=$(echo -n "$arg_clean" | python3 -c "import sys, urllib.parse as ul; print(ul.quote(sys.stdin.read()))") 14 | curl -s -X GET "$webhook_url?file=$encoded_webhook_arg" 15 | done 16 | 17 | echo "Updates completed!" --------------------------------------------------------------------------------