├── robofuse
├── api
│ ├── __init__.py
│ ├── downloads.py
│ ├── unrestrict.py
│ ├── torrents.py
│ └── client.py
├── cli
│ ├── __init__.py
│ └── commands.py
├── utils
│ ├── __init__.py
│ ├── concurrency.py
│ ├── logging.py
│ └── parser.py
├── core
│ ├── __init__.py
│ ├── strm.py
│ └── processor.py
├── __init__.py
├── __main__.py
└── config.py
├── assets
└── logo.png
├── requirements.txt
├── config.json
├── docker-compose.yml
├── setup.py
├── .dockerignore
├── Dockerfile
├── LICENSE
├── .gitignore
└── README.md
/robofuse/api/__init__.py:
--------------------------------------------------------------------------------
1 | """API client for the Real-Debrid API."""
--------------------------------------------------------------------------------
/robofuse/cli/__init__.py:
--------------------------------------------------------------------------------
1 | """Command-line interface for robofuse."""
--------------------------------------------------------------------------------
/robofuse/utils/__init__.py:
--------------------------------------------------------------------------------
1 | """Utilities for the robofuse package."""
--------------------------------------------------------------------------------
/robofuse/core/__init__.py:
--------------------------------------------------------------------------------
1 | """Core functionality for the robofuse package."""
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/itsrenoria/robofuse/HEAD/assets/logo.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.31.0
2 | click==8.1.7
3 | python-dateutil==2.8.2
4 | tqdm==4.66.1
5 | colorama==0.4.6
--------------------------------------------------------------------------------
/robofuse/__init__.py:
--------------------------------------------------------------------------------
1 | """robofuse: A service for interacting with Real-Debrid and generating .strm files."""
2 |
3 | __version__ = "0.3.0"
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "token": "YOUR_RD_API_TOKEN",
3 | "output_dir": "./Library",
4 | "cache_dir": "./cache",
5 | "concurrent_requests": 32,
6 | "general_rate_limit": 60,
7 | "torrents_rate_limit": 25,
8 | "watch_mode_interval": 60,
9 | "repair_torrents": true,
10 | "use_ptt_parser": true
11 | }
--------------------------------------------------------------------------------
/robofuse/__main__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Main entry point for the robofuse application."""
3 |
4 | import sys
5 | from robofuse.cli.commands import cli
6 |
7 |
8 | def main():
9 | """Entry point for the application."""
10 | # Pass an empty list as object to the CLI context
11 | cli(obj={})
12 |
13 |
14 | if __name__ == "__main__":
15 | sys.exit(main())
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | robofuse:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | container_name: robofuse
7 | restart: unless-stopped
8 | volumes:
9 | - ./config.json:/app/config.json
10 | - ./cache:/app/cache
11 | - ./Library:/app/Library
12 | environment:
13 | - TZ=UTC # Set your timezone here, e.g., America/New_York
14 | # Uncomment to set custom command or parameters
15 | # command: python -m robofuse watch --interval 600 --verbosity info
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name="robofuse",
5 | version="0.3.0",
6 | packages=find_packages(),
7 | install_requires=[
8 | "requests",
9 | "click",
10 | "python-dateutil",
11 | "tqdm",
12 | "colorama",
13 | "parsett",
14 | ],
15 | entry_points={
16 | "console_scripts": [
17 | "robofuse=robofuse.__main__:main",
18 | ],
19 | },
20 | author="robofuse Team",
21 | description="A service that interacts with Real-Debrid API to generate .strm files",
22 | keywords="real-debrid, strm, torrent",
23 | python_requires=">=3.7",
24 | )
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Git
2 | .git
3 | .gitignore
4 |
5 | # Python
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 | *.so
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # Virtual environment
28 | venv/
29 | env/
30 | ENV/
31 |
32 | # Libraries and cache directories
33 | Library/
34 | cache/
35 |
36 | # IDE files
37 | .idea/
38 | .vscode/
39 | *.swp
40 | *.swo
41 |
42 | # OS specific files
43 | .DS_Store
44 | Thumbs.db
45 |
46 | # Additional cache and temporary files
47 | .pytest_cache/
48 | .mypy_cache/
49 | .tox/
50 | .cache/
51 | *.tmp
52 | *.temp
53 | *.pid
54 | *.lock
55 | *.log
56 | logs/
57 |
58 | # Docker files (not needed in the build context)
59 | docker-compose.yml
60 | .dockerignore
61 |
62 | # Test files
63 | test_*
64 | tests/
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | LABEL maintainer="robofuse Team"
4 | LABEL description="robofuse - A service that interacts with Real-Debrid API to generate .strm files"
5 |
6 | # Set working directory
7 | WORKDIR /app
8 |
9 | # Install system dependencies including git
10 | RUN apt-get update && \
11 | apt-get install -y --no-install-recommends git && \
12 | apt-get clean && \
13 | rm -rf /var/lib/apt/lists/*
14 |
15 | # Copy requirements first for better caching
16 | COPY requirements.txt .
17 |
18 | # Install dependencies
19 | RUN pip install --no-cache-dir -r requirements.txt && \
20 | pip install --no-cache-dir git+https://github.com/dreulavelle/PTT.git
21 |
22 | # Copy the application code
23 | COPY . .
24 |
25 | # Install the package in development mode
26 | RUN pip install -e .
27 |
28 | # Create directories
29 | RUN mkdir -p /app/Library /app/cache
30 |
31 | # Set environment variables
32 | ENV PYTHONUNBUFFERED=1
33 |
34 | # Set default command
35 | CMD ["python", "-m", "robofuse", "watch"]
36 |
37 | # Expose documentation (not required for functionality)
38 | EXPOSE 8000
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 robofuse
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/robofuse/utils/concurrency.py:
--------------------------------------------------------------------------------
1 | import concurrent.futures
2 | from typing import List, Callable, TypeVar, Any, Dict, Optional
3 | from tqdm import tqdm
4 |
5 | from robofuse.utils.logging import logger
6 |
7 | T = TypeVar('T')
8 | R = TypeVar('R')
9 |
10 |
11 | def parallel_process(
12 | items: List[T],
13 | process_func: Callable[[T], R],
14 | max_workers: int = 32,
15 | desc: str = "Processing",
16 | show_progress: bool = True
17 | ) -> List[R]:
18 | """
19 | Process a list of items in parallel using a ThreadPoolExecutor.
20 |
21 | Args:
22 | items: List of items to process
23 | process_func: Function to apply to each item
24 | max_workers: Maximum number of worker threads
25 | desc: Description for the progress bar
26 | show_progress: Whether to show a progress bar
27 |
28 | Returns:
29 | List of results
30 | """
31 | if not items:
32 | logger.info(f"No items to process for {desc}")
33 | return []
34 |
35 | results = []
36 | n_items = len(items)
37 |
38 | # Adjust max_workers if we have fewer items than workers
39 | actual_workers = min(max_workers, n_items)
40 |
41 | logger.info(f"Processing {n_items} items with {actual_workers} workers ({desc})")
42 |
43 | with concurrent.futures.ThreadPoolExecutor(max_workers=actual_workers) as executor:
44 | # Create a dictionary mapping futures to their indices
45 | future_to_index = {
46 | executor.submit(process_func, item): i
47 | for i, item in enumerate(items)
48 | }
49 |
50 | # Create progress bar if requested
51 | if show_progress:
52 | futures_iter = tqdm(
53 | concurrent.futures.as_completed(future_to_index),
54 | total=len(items),
55 | desc=desc
56 | )
57 | else:
58 | futures_iter = concurrent.futures.as_completed(future_to_index)
59 |
60 | # Process results as they complete
61 | for future in futures_iter:
62 | try:
63 | result = future.result()
64 | results.append(result)
65 | except Exception as e:
66 | logger.error(f"Error processing item: {str(e)}")
67 | # Append None for failed tasks to maintain order
68 | results.append(None)
69 |
70 | # Sort results according to their original order
71 | sorted_results = [None] * n_items
72 | for future, index in future_to_index.items():
73 | try:
74 | sorted_results[index] = future.result()
75 | except Exception:
76 | # We already logged the error above
77 | pass
78 |
79 | # Filter out None results if any
80 | filtered_results = [r for r in sorted_results if r is not None]
81 |
82 | if len(filtered_results) != n_items:
83 | logger.warning(f"Failed to process {n_items - len(filtered_results)} items")
84 |
85 | return filtered_results
--------------------------------------------------------------------------------
/robofuse/api/downloads.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Any, Optional
2 |
3 | from robofuse.api.client import RealDebridClient
4 | from robofuse.utils.logging import logger
5 |
6 |
7 | class DownloadsAPI:
8 | """API client for Real-Debrid downloads endpoints."""
9 |
10 | def __init__(self, client: RealDebridClient):
11 | self.client = client
12 |
13 | def get_downloads(self, page: int = 1, limit: int = 100) -> List[Dict[str, Any]]:
14 | """Get a list of downloads from Real-Debrid."""
15 | logger.info(f"Fetching downloads (page {page}, limit {limit})")
16 | return self.client.get("downloads", params={"page": page, "limit": limit})
17 |
18 | def get_all_downloads(self) -> List[Dict[str, Any]]:
19 | """Get all downloads using pagination."""
20 | logger.info("Fetching all downloads (this may take a while)")
21 | return self.client.get_paginated("downloads", limit_per_page=100)
22 |
23 | def get_download_info(self, download_id: str) -> Dict[str, Any]:
24 | """Get information about a specific download."""
25 | logger.verbose(f"Fetching info for download {download_id}")
26 | return self.client.get(f"downloads/info/{download_id}")
27 |
28 | def delete_download(self, download_id: str) -> Dict[str, Any]:
29 | """Delete a download from Real-Debrid."""
30 | logger.info(f"Deleting download {download_id}")
31 | return self.client.delete(f"downloads/delete/{download_id}")
32 |
33 | def filter_streamable_downloads(self, downloads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
34 | """Filter downloads to only include streamable ones."""
35 | streamable_downloads = [
36 | download for download in downloads
37 | if download.get("streamable") == 1
38 | ]
39 |
40 | logger.info(f"Filtered {len(streamable_downloads)} streamable downloads out of {len(downloads)} total")
41 | return streamable_downloads
42 |
43 | def filter_unique_downloads(self, downloads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
44 | """Filter downloads to remove duplicates based on link."""
45 | # Group downloads by link
46 | downloads_by_link = {}
47 |
48 | for download in downloads:
49 | link = download.get("link")
50 | if not link:
51 | continue
52 |
53 | # If we already have this link, only keep the newer one
54 | if link in downloads_by_link:
55 | existing_generated = downloads_by_link[link].get("generated", "")
56 | current_generated = download.get("generated", "")
57 |
58 | if current_generated > existing_generated:
59 | downloads_by_link[link] = download
60 | else:
61 | downloads_by_link[link] = download
62 |
63 | unique_downloads = list(downloads_by_link.values())
64 |
65 | logger.info(f"Filtered {len(unique_downloads)} unique downloads out of {len(downloads)} total")
66 | return unique_downloads
--------------------------------------------------------------------------------
/robofuse/api/unrestrict.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Any, Optional
2 |
3 | from robofuse.api.client import RealDebridClient, APIError
4 | from robofuse.utils.logging import logger
5 |
6 |
7 | class UnrestrictAPI:
8 | """API client for Real-Debrid unrestrict endpoints."""
9 |
10 | def __init__(self, client: RealDebridClient):
11 | self.client = client
12 |
13 | def unrestrict_link(self, link: str, password: Optional[str] = None, remote: int = 0) -> Dict[str, Any]:
14 | """Unrestrict a link to get download info."""
15 | logger.info(f"Unrestricting link")
16 | logger.verbose(f"Link: {link}")
17 |
18 | data = {"link": link}
19 |
20 | if password:
21 | data["password"] = password
22 |
23 | if remote == 1:
24 | data["remote"] = 1
25 |
26 | try:
27 | result = self.client.post("unrestrict/link", data=data)
28 | logger.success(f"Successfully unrestricted link")
29 | return result
30 | except APIError as e:
31 | logger.error(f"Failed to unrestrict link: {e.message}")
32 | raise
33 |
34 | def check_link(self, link: str) -> Dict[str, Any]:
35 | """Check if a link is supported by Real-Debrid."""
36 | logger.verbose(f"Checking link: {link}")
37 | return self.client.post("unrestrict/check", data={"link": link})
38 |
39 | def batch_unrestrict_links(self, links: List[str], max_retries: int = 3) -> List[Dict[str, Any]]:
40 | """Unrestrict multiple links with retries."""
41 | logger.info(f"Batch unrestricting {len(links)} links")
42 |
43 | results = []
44 | failed_links = []
45 |
46 | for link in links:
47 | try:
48 | result = self.unrestrict_link(link)
49 | results.append(result)
50 | except Exception as e:
51 | logger.warning(f"Failed to unrestrict link on first attempt: {str(e)}")
52 | failed_links.append(link)
53 |
54 | # Retry failed links
55 | if failed_links:
56 | logger.info(f"Retrying {len(failed_links)} failed links")
57 |
58 | retry_count = 0
59 | while failed_links and retry_count < max_retries:
60 | retry_count += 1
61 | logger.info(f"Retry attempt {retry_count}/{max_retries}")
62 |
63 | still_failed = []
64 | for link in failed_links:
65 | try:
66 | result = self.unrestrict_link(link)
67 | results.append(result)
68 | logger.success(f"Successfully unrestricted link on retry {retry_count}")
69 | except Exception as e:
70 | logger.warning(f"Failed on retry {retry_count}: {str(e)}")
71 | still_failed.append(link)
72 |
73 | failed_links = still_failed
74 |
75 | # Final report
76 | if failed_links:
77 | logger.warning(f"Failed to unrestrict {len(failed_links)} links after {max_retries} retries")
78 |
79 | logger.info(f"Successfully unrestricted {len(results)} links")
80 | return results
--------------------------------------------------------------------------------
/robofuse/config.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from pathlib import Path
4 | from typing import Dict, Any
5 |
6 |
7 | DEFAULT_CONFIG = {
8 | "token": "YOUR_RD_API_TOKEN",
9 | "output_dir": "./Library",
10 | "cache_dir": "./cache",
11 | "concurrent_requests": 32,
12 | "general_rate_limit": 60,
13 | "torrents_rate_limit": 25,
14 | "watch_mode_interval": 60,
15 | "repair_torrents": True,
16 | "use_ptt_parser": True,
17 | }
18 |
19 |
20 | class Config:
21 | def __init__(self, config_path: str = None):
22 | self.config_path = config_path or "config.json"
23 | self.config = self._load_config()
24 | self._validate_config()
25 | self._setup_directories()
26 |
27 | def _load_config(self) -> Dict[str, Any]:
28 | """Load configuration from file or create default if not found."""
29 | config = DEFAULT_CONFIG.copy()
30 |
31 | if os.path.exists(self.config_path):
32 | try:
33 | with open(self.config_path, "r") as f:
34 | user_config = json.load(f)
35 | config.update(user_config)
36 | except json.JSONDecodeError:
37 | print(f"Error parsing config file {self.config_path}. Using defaults.")
38 | except Exception as e:
39 | print(f"Error loading config file: {e}. Using defaults.")
40 | else:
41 | print(f"Config file {self.config_path} not found. Using defaults.")
42 | self._save_default_config()
43 |
44 | return config
45 |
46 | def _save_default_config(self):
47 | """Save default configuration to file."""
48 | try:
49 | with open(self.config_path, "w") as f:
50 | json.dump(DEFAULT_CONFIG, f, indent=4)
51 | print(f"Default configuration saved to {self.config_path}")
52 | except Exception as e:
53 | print(f"Error saving default config: {e}")
54 |
55 | def _validate_config(self):
56 | """Validate configuration values."""
57 | if self.config["token"] == DEFAULT_CONFIG["token"]:
58 | print("WARNING: You are using the default API token. Please update your config.json with your Real-Debrid API token.")
59 |
60 | # Convert paths to absolute
61 | self.config["output_dir"] = os.path.abspath(os.path.expanduser(self.config["output_dir"]))
62 | self.config["cache_dir"] = os.path.abspath(os.path.expanduser(self.config["cache_dir"]))
63 |
64 | # Ensure numeric values are reasonable
65 | self.config["concurrent_requests"] = max(1, min(int(self.config["concurrent_requests"]), 64))
66 | self.config["general_rate_limit"] = max(1, int(self.config["general_rate_limit"]))
67 | self.config["torrents_rate_limit"] = max(1, int(self.config["torrents_rate_limit"]))
68 | self.config["watch_mode_interval"] = max(30, int(self.config["watch_mode_interval"]))
69 |
70 | def _setup_directories(self):
71 | """Create output and cache directories if they don't exist."""
72 | for dir_name in ["output_dir", "cache_dir"]:
73 | directory = Path(self.config[dir_name])
74 | if not directory.exists():
75 | try:
76 | directory.mkdir(parents=True)
77 | print(f"Created directory: {directory}")
78 | except Exception as e:
79 | print(f"Error creating directory {directory}: {e}")
80 |
81 | def get(self, key: str, default: Any = None) -> Any:
82 | """Get configuration value by key."""
83 | return self.config.get(key, default)
84 |
85 | def override(self, overrides: Dict[str, Any]):
86 | """Override configuration with provided values."""
87 | self.config.update(overrides)
88 | self._validate_config()
89 |
90 | def __getitem__(self, key: str) -> Any:
91 | """Allow dictionary-like access to config values."""
92 | return self.config[key]
--------------------------------------------------------------------------------
/robofuse/utils/logging.py:
--------------------------------------------------------------------------------
1 | """Logging configuration for robofuse."""
2 |
3 | import logging
4 | import sys
5 | from enum import Enum
6 | from typing import Optional
7 |
8 | import colorama
9 | from colorama import Fore, Style
10 |
11 |
12 | # Initialize colorama
13 | colorama.init()
14 |
15 | # Define custom log levels
16 | VERBOSE = 15
17 | logging.addLevelName(VERBOSE, "VERBOSE")
18 |
19 |
20 | class LogLevel(Enum):
21 | ERROR = 1
22 | WARNING = 2
23 | INFO = 3
24 | VERBOSE = 4
25 | DEBUG = 5
26 |
27 |
28 | class CustomLogger(logging.Logger):
29 | """Custom logger with additional verbosity levels."""
30 |
31 | def __init__(self, name: str, level: LogLevel = LogLevel.INFO):
32 | super().__init__(name)
33 | self.level = level
34 | self._setup_handlers()
35 |
36 | def _setup_handlers(self):
37 | """Setup console handler with colored output."""
38 | console_handler = logging.StreamHandler(sys.stdout)
39 | console_handler.setFormatter(logging.Formatter('%(message)s'))
40 | self.addHandler(console_handler)
41 |
42 | def set_level(self, level: LogLevel):
43 | """Set the logging level."""
44 | self.level = level
45 | if level == LogLevel.DEBUG:
46 | super().setLevel(logging.DEBUG)
47 | elif level == LogLevel.VERBOSE:
48 | super().setLevel(VERBOSE)
49 | elif level == LogLevel.INFO:
50 | super().setLevel(logging.INFO)
51 | elif level == LogLevel.WARNING:
52 | super().setLevel(logging.WARNING)
53 | elif level == LogLevel.ERROR:
54 | super().setLevel(logging.ERROR)
55 |
56 | def verbose(self, msg, *args, **kwargs):
57 | """Log a verbose message."""
58 | if self.isEnabledFor(VERBOSE):
59 | self._log(VERBOSE, f"{Fore.CYAN}[VERBOSE] {msg}{Style.RESET_ALL}", args, **kwargs)
60 |
61 | def debug(self, msg, *args, **kwargs):
62 | """Log a debug message."""
63 | if self.isEnabledFor(logging.DEBUG):
64 | self._log(logging.DEBUG, f"{Fore.MAGENTA}[DEBUG] {msg}{Style.RESET_ALL}", args, **kwargs)
65 |
66 | def info(self, msg, *args, **kwargs):
67 | """Log an info message."""
68 | if self.isEnabledFor(logging.INFO):
69 | self._log(logging.INFO, f"{Fore.GREEN}[INFO] {msg}{Style.RESET_ALL}", args, **kwargs)
70 |
71 | def warning(self, msg, *args, **kwargs):
72 | """Log a warning message."""
73 | if self.isEnabledFor(logging.WARNING):
74 | self._log(logging.WARNING, f"{Fore.YELLOW}[WARNING] {msg}{Style.RESET_ALL}", args, **kwargs)
75 |
76 | def error(self, msg, *args, **kwargs):
77 | """Log an error message."""
78 | if self.isEnabledFor(logging.ERROR):
79 | self._log(logging.ERROR, f"{Fore.RED}[ERROR] {msg}{Style.RESET_ALL}", args, **kwargs)
80 |
81 | def success(self, msg, *args, **kwargs):
82 | """Log a success message."""
83 | if self.isEnabledFor(logging.INFO):
84 | self._log(logging.INFO, f"{Fore.GREEN}[SUCCESS] {msg}{Style.RESET_ALL}", args, **kwargs)
85 |
86 | def progress(self, msg, *args, **kwargs):
87 | """Log a progress message without newline."""
88 | if self.isEnabledFor(logging.INFO):
89 | print(f"{Fore.BLUE}[PROGRESS] {msg}{Style.RESET_ALL}", end="\r", flush=True)
90 |
91 |
92 | # Global logger instance
93 | logger = CustomLogger("robofuse")
94 |
95 | def setup_logging(verbosity: Optional[str] = None) -> None:
96 | """Setup logging configuration.
97 |
98 | Args:
99 | verbosity: Logging level ('debug', 'verbose', 'info', 'warning', 'error')
100 | """
101 | if verbosity:
102 | verbosity = verbosity.lower()
103 | if verbosity == 'debug':
104 | logger.set_level(LogLevel.DEBUG)
105 | elif verbosity == 'verbose':
106 | logger.set_level(LogLevel.VERBOSE)
107 | elif verbosity == 'info':
108 | logger.set_level(LogLevel.INFO)
109 | elif verbosity == 'warning':
110 | logger.set_level(LogLevel.WARNING)
111 | elif verbosity == 'error':
112 | logger.set_level(LogLevel.ERROR)
113 | else:
114 | logger.set_level(LogLevel.INFO) # Default to INFO level
--------------------------------------------------------------------------------
/robofuse/cli/commands.py:
--------------------------------------------------------------------------------
1 | """Command-line interface for robofuse."""
2 |
3 | import click
4 | import os
5 | import sys
6 | from typing import Optional
7 |
8 | from robofuse.config import Config
9 | from robofuse.core.processor import RoboFuseProcessor
10 | from robofuse.utils.logging import logger, LogLevel, setup_logging
11 |
12 |
13 | class Context:
14 | """Context object to hold shared state."""
15 | def __init__(self):
16 | self.config = None
17 | self.verbosity = "info"
18 |
19 |
20 | pass_context = click.make_pass_decorator(Context, ensure=True)
21 |
22 |
23 | @click.group()
24 | @click.version_option(version="0.3.0")
25 | @click.option(
26 | "--config", "-c",
27 | type=click.Path(exists=False),
28 | default="config.json",
29 | help="Path to config file"
30 | )
31 | @click.option(
32 | "--verbosity", "-v",
33 | type=click.Choice(["error", "warning", "info", "verbose", "debug"], case_sensitive=False),
34 | default="info",
35 | help="Verbosity level"
36 | )
37 | @click.option("--debug", is_flag=True, help="Enable debug logging")
38 | @click.option("--verbose", is_flag=True, help="Enable verbose logging")
39 | @click.option("--info", is_flag=True, help="Enable info logging")
40 | @click.option("--warning", is_flag=True, help="Enable warning logging")
41 | @click.option("--error", is_flag=True, help="Enable error logging")
42 | @pass_context
43 | def cli(ctx, config, verbosity, debug, verbose, info, warning, error):
44 | """robofuse: A service for interacting with Real-Debrid and generating .strm files."""
45 |
46 | # Determine verbosity level from flags
47 | if debug:
48 | verbosity = "debug"
49 | elif verbose:
50 | verbosity = "verbose"
51 | elif info:
52 | verbosity = "info"
53 | elif warning:
54 | verbosity = "warning"
55 | elif error:
56 | verbosity = "error"
57 |
58 | # Set up logging
59 | setup_logging(verbosity)
60 |
61 | # Initialize config
62 | try:
63 | ctx.config = Config(config_path=config)
64 | ctx.verbosity = verbosity
65 | except Exception as e:
66 | logger.error(f"Failed to initialize configuration: {str(e)}")
67 | sys.exit(1)
68 |
69 |
70 | @cli.command()
71 | @pass_context
72 | def run(ctx):
73 | """Run robofuse once."""
74 | config = ctx.config
75 |
76 | # Check if token is set
77 | if config["token"] == "YOUR_RD_API_TOKEN":
78 | logger.error("API token not set. Please update your config.json with your Real-Debrid API token.")
79 | sys.exit(1)
80 |
81 | # Run the processor once
82 | try:
83 | processor = RoboFuseProcessor(config)
84 | processor.run()
85 | except Exception as e:
86 | logger.error(f"Error running robofuse: {str(e)}")
87 | sys.exit(1)
88 |
89 |
90 | @cli.command()
91 | @click.option(
92 | "--interval", "-i",
93 | type=int,
94 | default=None,
95 | help="Interval in seconds between processing cycles (defaults to config value)"
96 | )
97 | @pass_context
98 | def watch(ctx, interval):
99 | """Run robofuse in watch mode."""
100 | config = ctx.config
101 |
102 | # Check if token is set
103 | if config["token"] == "YOUR_RD_API_TOKEN":
104 | logger.error("API token not set. Please update your config.json with your Real-Debrid API token.")
105 | sys.exit(1)
106 |
107 | # Run the processor in watch mode
108 | try:
109 | processor = RoboFuseProcessor(config)
110 | processor.watch(interval=interval)
111 | except Exception as e:
112 | logger.error(f"Error in watch mode: {str(e)}")
113 | sys.exit(1)
114 |
115 |
116 | @cli.command()
117 | @pass_context
118 | def dry_run(ctx):
119 | """Run robofuse in dry-run mode (no changes made)."""
120 | config = ctx.config
121 |
122 | # Check if token is set
123 | if config["token"] == "YOUR_RD_API_TOKEN":
124 | logger.error("API token not set. Please update your config.json with your Real-Debrid API token.")
125 | sys.exit(1)
126 |
127 | # Run the processor in dry-run mode
128 | try:
129 | processor = RoboFuseProcessor(config, dry_run=True)
130 | processor.run()
131 | except Exception as e:
132 | logger.error(f"Error in dry-run mode: {str(e)}")
133 | sys.exit(1)
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 | *-env/
131 |
132 | # Spyder project settings
133 | .spyderproject
134 | .spyproject
135 |
136 | # Rope project settings
137 | .ropeproject
138 |
139 | # mkdocs documentation
140 | /site
141 |
142 | # mypy
143 | .mypy_cache/
144 | .dmypy.json
145 | dmypy.json
146 |
147 | # Pyre type checker
148 | .pyre/
149 |
150 | # pytype static type analyzer
151 | .pytype/
152 |
153 | # Cython debug symbols
154 | cython_debug/
155 |
156 | # PyCharm
157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158 | # be added to the global gitignore or merged into this project gitignore. For a PyCharm
159 | # project, it is recommended to include the following files/folders in the gitignore
160 | # file if they are created by the IDE
161 | .idea/
162 |
163 | # VS Code
164 | .vscode/
165 |
166 | # macOS
167 | .DS_Store
168 | .AppleDouble
169 | .LSOverride
170 |
171 | # Thumbnails
172 | ._*
173 |
174 | # Files that might appear in the root of a volume
175 | .DocumentRevisions-V100
176 | .fseventsd
177 | .Spotlight-V100
178 | .TemporaryItems
179 | .Trashes
180 | .VolumeIcon.icns
181 | .com.apple.timemachine.donotpresent
182 |
183 | # Directories potentially created on remote AFP share
184 | .AppleDB
185 | .AppleDesktop
186 | Network Trash Folder
187 | Temporary Items
188 | .apdisk
189 |
190 | # Windows
191 | Thumbs.db
192 | Thumbs.db:encryptable
193 | ehthumbs.db
194 | ehthumbs_vista.db
195 | *.stackdump
196 | [Dd]esktop.ini
197 | $RECYCLE.BIN/
198 | *.cab
199 | *.msi
200 | *.msix
201 | *.msm
202 | *.msp
203 | *.lnk
204 |
205 | # Linux
206 | *~
207 |
208 | # Temporary files
209 | *.tmp
210 | *.temp
211 | *.swp
212 | *.swo
213 | *~
214 |
215 | # Log files
216 | *.log
217 | logs/
218 |
219 | # Additional common patterns
220 | .pytest_cache/
221 | .mypy_cache/
222 | .coverage
223 | .tox/
224 | .cache/
225 | *.pid
226 | *.lock
227 |
228 | # robofuse specific
229 | Library/
230 | cache/
231 | *.strm
232 |
233 | # Build artifacts
234 | robofuse.wiki/
--------------------------------------------------------------------------------
/robofuse/api/torrents.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Any, Optional, Union
2 | import os
3 |
4 | from robofuse.api.client import RealDebridClient
5 | from robofuse.utils.logging import logger
6 |
7 |
8 | class TorrentsAPI:
9 | """API client for Real-Debrid torrents endpoints."""
10 |
11 | def __init__(self, client: RealDebridClient):
12 | self.client = client
13 |
14 | def get_torrents(self, page: int = 1, limit: int = 100) -> List[Dict[str, Any]]:
15 | """Get a list of torrents from Real-Debrid."""
16 | logger.info(f"Fetching torrents (page {page}, limit {limit})")
17 | return self.client.get("torrents", params={"page": page, "limit": limit})
18 |
19 | def get_all_torrents(self) -> List[Dict[str, Any]]:
20 | """Get all torrents using pagination."""
21 | logger.info("Fetching all torrents (this may take a while)")
22 | return self.client.get_paginated("torrents", limit_per_page=100)
23 |
24 | def get_torrent_info(self, torrent_id: str) -> Dict[str, Any]:
25 | """Get information about a specific torrent."""
26 | logger.verbose(f"Fetching info for torrent {torrent_id}")
27 | return self.client.get(f"torrents/info/{torrent_id}")
28 |
29 | def add_magnet(self, magnet_link: str) -> Dict[str, Any]:
30 | """Add a magnet link to Real-Debrid."""
31 | logger.info(f"Adding magnet link")
32 | logger.verbose(f"Magnet: {magnet_link}")
33 | return self.client.post("torrents/addMagnet", data={"magnet": magnet_link})
34 |
35 | def add_torrent_file(self, file_path: str) -> Dict[str, Any]:
36 | """Upload a torrent file to Real-Debrid."""
37 | logger.info(f"Uploading torrent file: {os.path.basename(file_path)}")
38 |
39 | with open(file_path, "rb") as f:
40 | files = {"file": (os.path.basename(file_path), f, "application/x-bittorrent")}
41 | return self.client.post("torrents/addTorrent", files=files)
42 |
43 | def select_files(self, torrent_id: str, file_ids: Union[List[int], str] = "all") -> Dict[str, Any]:
44 | """Select which files to download from the torrent."""
45 | logger.info(f"Selecting files for torrent {torrent_id}")
46 |
47 | if file_ids == "all":
48 | data = {"files": "all"}
49 | else:
50 | # Convert list of IDs to comma-separated string
51 | files_str = ",".join(str(file_id) for file_id in file_ids)
52 | data = {"files": files_str}
53 |
54 | logger.verbose(f"Selected files: {data['files']}")
55 | return self.client.post(f"torrents/selectFiles/{torrent_id}", data=data)
56 |
57 | def delete_torrent(self, torrent_id: str) -> Dict[str, Any]:
58 | """Delete a torrent from Real-Debrid."""
59 | logger.info(f"Deleting torrent {torrent_id}")
60 | return self.client.delete(f"torrents/delete/{torrent_id}")
61 |
62 | def get_torrent_files(self, torrent_id: str) -> List[Dict[str, Any]]:
63 | """Get a list of files in a torrent."""
64 | logger.verbose(f"Fetching files for torrent {torrent_id}")
65 | torrent_info = self.get_torrent_info(torrent_id)
66 | return torrent_info.get("files", [])
67 |
68 | def select_video_files(self, torrent_id: str) -> Dict[str, Any]:
69 | """Select only video files from a torrent (mp4 and mkv)."""
70 | logger.info(f"Selecting video files for torrent {torrent_id}")
71 |
72 | # Get torrent files
73 | files = self.get_torrent_files(torrent_id)
74 |
75 | # Filter for video files
76 | video_file_ids = []
77 | for i, file in enumerate(files):
78 | file_name = file.get("path", "").lower()
79 | if file_name.endswith((".mkv", ".mp4")):
80 | video_file_ids.append(i + 1) # API uses 1-indexed file IDs
81 |
82 | if not video_file_ids:
83 | logger.warning(f"No video files found in torrent {torrent_id}")
84 | return {"status": "error", "message": "No video files found"}
85 |
86 | logger.verbose(f"Selected {len(video_file_ids)} video files")
87 | return self.select_files(torrent_id, video_file_ids)
88 |
89 | def reinsert_torrent(self, hash_value: str) -> Dict[str, Any]:
90 | """Reinsert a torrent using its hash."""
91 | logger.info(f"Reinserting torrent with hash: {hash_value}")
92 |
93 | # Create magnet link from hash
94 | magnet_link = f"magnet:?xt=urn:btih:{hash_value}"
95 |
96 | # Add magnet
97 | add_result = self.add_magnet(magnet_link)
98 |
99 | if "id" not in add_result:
100 | logger.error(f"Failed to add magnet: {add_result}")
101 | return {"status": "error", "message": "Failed to add magnet", "details": add_result}
102 |
103 | torrent_id = add_result["id"]
104 | logger.success(f"Successfully added magnet. Torrent ID: {torrent_id}")
105 |
106 | # Select video files
107 | select_result = self.select_video_files(torrent_id)
108 |
109 | return {
110 | "status": "success",
111 | "torrent_id": torrent_id,
112 | "add_result": add_result,
113 | "select_result": select_result
114 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | A Python service that interacts with the Real-Debrid API to generate .strm files
for media players like Infuse, Jellyfin, and Emby.
14 |
15 |
16 | ---
17 |
18 | ## Table of Contents
19 |
20 | - [Overview](#overview)
21 | - [Features](#features)
22 | - [Quick Start](#quick-start)
23 | - [Documentation](#documentation)
24 | - [Contributing](#contributing)
25 | - [License](#license)
26 |
27 | ---
28 |
29 | ## Overview
30 |
31 | robofuse connects to your Real-Debrid account and efficiently manages your media files by:
32 |
33 | 1. Retrieving your torrents and downloads
34 | 2. Repairing dead torrents when needed
35 | 3. Unrestricting links automatically
36 | 4. Generating .strm files for streamable content
37 | 5. Maintaining your library by updating or removing stale entries
38 | 6. Intelligently organizing media files based on parsed metadata
39 |
40 | ## Features
41 |
42 | - **Efficient API Integration**: Smart rate limiting to prevent API throttling
43 | - **Parallel Processing**: Fast operations with concurrent API requests
44 | - **Smart Organization**: Automatic categorization of media into appropriate folders
45 | - **Metadata Parsing**: Intelligent filename parsing for proper media organization
46 | - **Watch Mode**: Continuous monitoring for new content
47 | - **Caching System**: Reduces redundant API calls
48 | - **Link Management**: Handles expired links and refreshes them automatically
49 | - **Health Checks**: Ensures content integrity
50 | - **Clean UI**: Colorful terminal interface with progress bars
51 | - **Docker Support**: Run in containers for easy deployment
52 | - **Background Services**: Deploy with systemd, launchd, or Docker
53 | - **Log Rotation**: Built-in log management for continuous operation
54 | - **Anime Detection**: Automatically identifies and categorizes anime content
55 |
56 | ## Quick Start
57 |
58 | 1. **Install robofuse**:
59 | ```bash
60 | git clone https://github.com/Renoria/robofuse.git
61 | cd robofuse
62 | pip install -e .
63 | ```
64 |
65 | 2. **Configure your Real-Debrid API token** in the existing `config.json` file
66 |
67 | 3. **Run robofuse**:
68 | ```bash
69 | # Show help
70 | robofuse --help
71 |
72 | # Test with dry run first
73 | robofuse --debug dry-run
74 |
75 | # Run once to process all content
76 | robofuse run
77 |
78 | # Start watch mode for continuous monitoring
79 | robofuse watch
80 |
81 | # Watch mode with custom interval
82 | robofuse watch --interval 300
83 | ```
84 |
85 | 4. **Deploy for continuous operation** (optional):
86 | - See our [Deployment Guide](https://github.com/Renoria/robofuse/wiki/Deployment) for systemd, launchd, or Docker setup
87 |
88 | ## Documentation
89 |
90 | 📚 **Complete documentation is available in our [GitHub Wiki](https://github.com/Renoria/robofuse/wiki)**
91 |
92 | ### Quick Links:
93 | - **[🏠 Home](https://github.com/Renoria/robofuse/wiki/Home)** - Documentation overview and navigation
94 | - **[📦 Installation](https://github.com/Renoria/robofuse/wiki/Installation)** - Complete installation instructions and setup
95 | - **[⚙️ Configuration](https://github.com/Renoria/robofuse/wiki/Configuration)** - API setup, settings, and metadata parsing
96 | - **[🚀 Usage](https://github.com/Renoria/robofuse/wiki/Usage)** - Command reference and usage patterns
97 | - **[🔧 Deployment](https://github.com/Renoria/robofuse/wiki/Deployment)** - Background services, Docker, and production deployment
98 | - **[🛠️ Troubleshooting](https://github.com/Renoria/robofuse/wiki/Troubleshooting)** - Common issues and debugging solutions
99 |
100 | > 💡 **Need help?** Start with the [Troubleshooting Guide](https://github.com/Renoria/robofuse/wiki/Troubleshooting) or [open an issue](https://github.com/Renoria/robofuse/issues) if you encounter problems.
101 |
102 | ## Contributing
103 |
104 | Contributions to robofuse are welcome! Here's how you can help:
105 |
106 | 1. **Bug Reports**: Open an issue describing the bug with steps to reproduce
107 | 2. **Feature Requests**: Open an issue describing the new feature and why it would be useful
108 | 3. **Code Contributions**: Submit a pull request with your improvements
109 | - Fork the repository
110 | - Create a feature branch (`git checkout -b feature/amazing-feature`)
111 | - Commit your changes (`git commit -m 'Add some amazing feature'`)
112 | - Push to the branch (`git push origin feature/amazing-feature`)
113 | - Open a Pull Request
114 |
115 | Please ensure your code follows the existing style and includes appropriate tests.
116 |
117 | ## License
118 |
119 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
--------------------------------------------------------------------------------
/robofuse/api/client.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Optional, Dict, Any, List, Union, Tuple
3 | import json
4 | from urllib.parse import urljoin
5 |
6 | import requests
7 | from requests.exceptions import RequestException
8 |
9 | from robofuse.utils.logging import logger
10 |
11 |
12 | class APIError(Exception):
13 | """Exception raised for API errors."""
14 | def __init__(self, message: str, code: Optional[int] = None, response: Optional[requests.Response] = None):
15 | self.message = message
16 | self.code = code
17 | self.response = response
18 | super().__init__(message)
19 |
20 |
21 | class RealDebridClient:
22 | """Client for the Real-Debrid API."""
23 |
24 | BASE_URL = "https://api.real-debrid.com/rest/1.0/"
25 |
26 | def __init__(self, token: str, general_rate_limit: int = 60, torrents_rate_limit: int = 25):
27 | self.token = token
28 | self.general_rate_limit = general_rate_limit
29 | self.torrents_rate_limit = torrents_rate_limit
30 |
31 | # Track requests for rate limiting
32 | self.last_general_request_time = 0
33 | self.last_torrents_request_time = 0
34 |
35 | # Set up session
36 | self.session = requests.Session()
37 | self.session.headers.update({
38 | "Authorization": f"Bearer {token}",
39 | "User-Agent": "robofuse/0.3.0",
40 | "Content-Type": "application/json",
41 | })
42 |
43 | def _rate_limit(self, endpoint: str):
44 | """Apply rate limiting based on the endpoint."""
45 | current_time = time.time()
46 |
47 | # Torrents API has stricter rate limiting
48 | if "/torrents" in endpoint:
49 | time_since_last = current_time - self.last_torrents_request_time
50 | wait_time = (1 / self.torrents_rate_limit) - time_since_last
51 |
52 | if wait_time > 0:
53 | logger.debug(f"Rate limiting for torrents API: sleeping for {wait_time:.2f}s")
54 | time.sleep(wait_time)
55 |
56 | self.last_torrents_request_time = time.time()
57 | else:
58 | time_since_last = current_time - self.last_general_request_time
59 | wait_time = (1 / self.general_rate_limit) - time_since_last
60 |
61 | if wait_time > 0:
62 | logger.debug(f"Rate limiting for general API: sleeping for {wait_time:.2f}s")
63 | time.sleep(wait_time)
64 |
65 | self.last_general_request_time = time.time()
66 |
67 | def _handle_response(self, response: requests.Response) -> Dict[str, Any]:
68 | """Handle the API response and raise appropriate exceptions."""
69 | try:
70 | # Real-Debrid can return empty response for some successful calls
71 | if not response.text:
72 | return {}
73 |
74 | data = response.json()
75 |
76 | if response.status_code >= 400:
77 | error_code = data.get("error_code", 0)
78 | error_message = data.get("error", f"API Error: {response.status_code}")
79 | logger.error(f"API Error ({error_code}): {error_message}")
80 | raise APIError(error_message, error_code, response)
81 |
82 | return data
83 | except json.JSONDecodeError:
84 | # Handle case where response isn't JSON
85 | if response.status_code >= 400:
86 | logger.error(f"API Error ({response.status_code}): {response.text}")
87 | raise APIError(f"API Error: {response.status_code}", response.status_code, response)
88 | return {"text": response.text}
89 |
90 | def request(
91 | self,
92 | method: str,
93 | endpoint: str,
94 | params: Optional[Dict[str, Any]] = None,
95 | data: Optional[Union[Dict[str, Any], str]] = None,
96 | files: Optional[Dict[str, Any]] = None,
97 | retry_count: int = 3
98 | ) -> Dict[str, Any]:
99 | """Make a request to the API with retries and rate limiting."""
100 | url = urljoin(self.BASE_URL, endpoint)
101 | logger.debug(f"Making {method} request to {url}")
102 |
103 | attempts = 0
104 | while attempts < retry_count:
105 | try:
106 | self._rate_limit(endpoint)
107 |
108 | response = self.session.request(
109 | method=method,
110 | url=url,
111 | params=params,
112 | data=data,
113 | files=files,
114 | timeout=30, # 30 second timeout
115 | )
116 |
117 | return self._handle_response(response)
118 |
119 | except (RequestException, APIError) as e:
120 | attempts += 1
121 | retry_wait = min(2 ** attempts, 60) # Exponential backoff, max 60s
122 |
123 | # Certain errors should not be retried
124 | if isinstance(e, APIError) and e.code in [400, 401, 403, 404]:
125 | raise
126 |
127 | if attempts < retry_count:
128 | logger.warning(f"Request failed: {str(e)}. Retrying in {retry_wait}s (attempt {attempts}/{retry_count})")
129 | time.sleep(retry_wait)
130 | else:
131 | logger.error(f"Request failed after {retry_count} attempts: {str(e)}")
132 | raise
133 |
134 | # This should never happen but just in case
135 | raise APIError("Maximum retries exceeded")
136 |
137 | def get(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
138 | """Make a GET request to the API."""
139 | return self.request("GET", endpoint, params=params)
140 |
141 | def post(
142 | self,
143 | endpoint: str,
144 | params: Optional[Dict[str, Any]] = None,
145 | data: Optional[Union[Dict[str, Any], str]] = None,
146 | files: Optional[Dict[str, Any]] = None
147 | ) -> Dict[str, Any]:
148 | """Make a POST request to the API."""
149 | return self.request("POST", endpoint, params=params, data=data, files=files)
150 |
151 | def delete(self, endpoint: str) -> Dict[str, Any]:
152 | """Make a DELETE request to the API."""
153 | return self.request("DELETE", endpoint)
154 |
155 | def put(
156 | self,
157 | endpoint: str,
158 | params: Optional[Dict[str, Any]] = None,
159 | data: Optional[Dict[str, Any]] = None
160 | ) -> Dict[str, Any]:
161 | """Make a PUT request to the API."""
162 | return self.request("PUT", endpoint, params=params, data=data)
163 |
164 | def get_paginated(
165 | self,
166 | endpoint: str,
167 | params: Optional[Dict[str, Any]] = None,
168 | limit_per_page: int = 100,
169 | max_pages: Optional[int] = None
170 | ) -> List[Dict[str, Any]]:
171 | """Get all pages of results for a paginated endpoint."""
172 | if params is None:
173 | params = {}
174 |
175 | all_results = []
176 | page = 1
177 |
178 | while True:
179 | # Copy params to avoid modifying the original
180 | page_params = params.copy()
181 | page_params.update({
182 | "page": page,
183 | "limit": limit_per_page
184 | })
185 |
186 | logger.verbose(f"Fetching page {page} from {endpoint}")
187 | results = self.get(endpoint, params=page_params)
188 |
189 | # If we get an empty list or dict, we've reached the end
190 | if not results or (isinstance(results, list) and len(results) == 0):
191 | break
192 |
193 | # Add results to our collection
194 | if isinstance(results, list):
195 | all_results.extend(results)
196 |
197 | # If we got fewer results than requested, we've reached the end
198 | if len(results) < limit_per_page:
199 | break
200 | else:
201 | # Handle case where the API doesn't return a list
202 | all_results.append(results)
203 | break
204 |
205 | # Check if we've reached the maximum number of pages
206 | if max_pages and page >= max_pages:
207 | break
208 |
209 | page += 1
210 |
211 | return all_results
--------------------------------------------------------------------------------
/robofuse/core/strm.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | from pathlib import Path
4 | from typing import Dict, List, Set, Optional, Any, Tuple
5 |
6 | from robofuse.utils.logging import logger
7 | from robofuse.utils.parser import MetadataParser
8 |
9 |
10 | class StrmFile:
11 | """Class for handling .strm files."""
12 |
13 | def __init__(self, output_dir: str, use_ptt_parser: bool = True):
14 | self.output_dir = Path(output_dir)
15 | self._ensure_output_dir()
16 | self.use_ptt_parser = use_ptt_parser
17 | self.metadata_parser = MetadataParser(enabled=use_ptt_parser)
18 |
19 | def _ensure_output_dir(self):
20 | """Ensure the output directory exists."""
21 | if not self.output_dir.exists():
22 | logger.info(f"Creating output directory: {self.output_dir}")
23 | self.output_dir.mkdir(parents=True, exist_ok=True)
24 |
25 | def _sanitize_filename(self, filename: str) -> str:
26 | """Sanitize a filename to be safe for the filesystem."""
27 | # Replace illegal characters
28 | sanitized = re.sub(r'[<>:"/\\|?*]', "_", filename)
29 | # Replace multiple spaces with a single space
30 | sanitized = re.sub(r'\s+', ' ', sanitized)
31 | # Trim leading/trailing spaces
32 | sanitized = sanitized.strip()
33 | # Ensure filename isn't too long
34 | if len(sanitized) > 240:
35 | sanitized = sanitized[:240]
36 |
37 | return sanitized
38 |
39 | def create_or_update_strm(
40 | self,
41 | download_url: str,
42 | filename: str,
43 | torrent_name: str,
44 | dry_run: bool = False,
45 | download_id: Optional[str] = None
46 | ) -> Dict[str, Any]:
47 | """
48 | Create or update a .strm file.
49 |
50 | Args:
51 | download_url: The URL to include in the .strm file
52 | filename: The filename for the .strm file (without extension)
53 | torrent_name: The name of the torrent (used for the folder)
54 | dry_run: If True, don't actually create/update the file
55 | download_id: Optional download ID to append to the filename
56 |
57 | Returns:
58 | Dictionary with status and details
59 | """
60 | if self.use_ptt_parser:
61 | # Parse filename to extract metadata
62 | metadata = self.metadata_parser.parse(filename)
63 | logger.verbose(f"Metadata for {filename}: {metadata}")
64 |
65 | # Generate folder structure based on metadata
66 | folder_parts = self.metadata_parser.generate_folder_structure(metadata)
67 | # Create the full path
68 | folder_path = self.output_dir
69 | for part in folder_parts:
70 | folder_path = folder_path / self._sanitize_filename(part)
71 |
72 | # Generate filename based on metadata and download ID
73 | base_filename = self.metadata_parser.generate_filename(metadata, download_id)
74 | safe_filename = self._sanitize_filename(base_filename)
75 | else:
76 | # Fallback to using torrent name as the folder
77 | folder_path = self.output_dir / self._sanitize_filename(torrent_name)
78 |
79 | # Fallback to original filename without adding download_id
80 | safe_filename = self._sanitize_filename(filename)
81 |
82 | # Add .strm extension if missing
83 | if not safe_filename.lower().endswith('.strm'):
84 | strm_filename = f"{safe_filename}.strm"
85 | else:
86 | strm_filename = safe_filename
87 |
88 | # Full path to the .strm file
89 | strm_path = folder_path / strm_filename
90 |
91 | # Check if this is an update or new file
92 | is_update = strm_path.exists()
93 |
94 | # Get current content if file exists
95 | current_url = None
96 | if is_update:
97 | try:
98 | with open(strm_path, 'r') as f:
99 | current_url = f.read().strip()
100 | except Exception as e:
101 | logger.warning(f"Failed to read existing STRM file: {str(e)}")
102 |
103 | # Determine action to take
104 | if is_update and current_url == download_url:
105 | logger.verbose(f"STRM file already exists with current URL: {strm_path}")
106 | return {
107 | "status": "skipped",
108 | "path": str(strm_path),
109 | "reason": "file exists with same URL",
110 | "is_update": False
111 | }
112 |
113 | if dry_run:
114 | action = "Would update" if is_update else "Would create"
115 | logger.info(f"{action} STRM file: {strm_path}")
116 | return {
117 | "status": "dry_run",
118 | "path": str(strm_path),
119 | "action": "update" if is_update else "create",
120 | "is_update": is_update
121 | }
122 |
123 | # Create directory if it doesn't exist
124 | if not folder_path.exists():
125 | folder_path.mkdir(parents=True, exist_ok=True)
126 |
127 | # Write the .strm file
128 | try:
129 | with open(strm_path, 'w') as f:
130 | f.write(download_url)
131 |
132 | action = "Updated" if is_update else "Created"
133 | logger.success(f"{action} STRM file: {strm_path}")
134 |
135 | return {
136 | "status": "success",
137 | "path": str(strm_path),
138 | "action": "update" if is_update else "create",
139 | "is_update": is_update
140 | }
141 | except Exception as e:
142 | logger.error(f"Failed to write STRM file: {str(e)}")
143 | return {
144 | "status": "error",
145 | "path": str(strm_path),
146 | "error": str(e)
147 | }
148 |
149 | def delete_strm(self, strm_path: str) -> Dict[str, Any]:
150 | """Delete a .strm file."""
151 | path = Path(strm_path)
152 |
153 | if not path.exists():
154 | logger.warning(f"STRM file does not exist: {path}")
155 | return {
156 | "status": "error",
157 | "path": str(path),
158 | "error": "File does not exist"
159 | }
160 |
161 | try:
162 | path.unlink()
163 | logger.success(f"Deleted STRM file: {path}")
164 |
165 | # Remove empty parent directory if it's now empty
166 | parent = path.parent
167 | if parent.exists() and not any(parent.iterdir()):
168 | parent.rmdir()
169 | logger.info(f"Removed empty directory: {parent}")
170 |
171 | return {
172 | "status": "success",
173 | "path": str(path)
174 | }
175 | except Exception as e:
176 | logger.error(f"Failed to delete STRM file: {str(e)}")
177 | return {
178 | "status": "error",
179 | "path": str(path),
180 | "error": str(e)
181 | }
182 |
183 | def find_existing_strm_files(self) -> List[Dict[str, Any]]:
184 | """Find all existing .strm files in the output directory."""
185 | logger.info(f"Scanning for existing STRM files in {self.output_dir}")
186 |
187 | strm_files = []
188 |
189 | # Walk through the output directory
190 | for root, _, files in os.walk(self.output_dir):
191 | for file in files:
192 | if file.lower().endswith('.strm'):
193 | strm_path = os.path.join(root, file)
194 |
195 | # Read the URL from the STRM file
196 | try:
197 | with open(strm_path, 'r') as f:
198 | url = f.read().strip()
199 |
200 | # Extract relative path from output_dir
201 | rel_path = os.path.relpath(strm_path, self.output_dir)
202 |
203 | # Get parts of the path for organized content
204 | path_parts = Path(rel_path).parts
205 |
206 | file_info = {
207 | "path": strm_path,
208 | "url": url,
209 | "filename": os.path.basename(strm_path)
210 | }
211 |
212 | # Add path parts info (useful for organized content)
213 | if len(path_parts) >= 2:
214 | file_info["parent_folder"] = path_parts[0]
215 | if len(path_parts) >= 3 and "season" in path_parts[1].lower():
216 | file_info["season_folder"] = path_parts[1]
217 |
218 | # Add metadata from the filename
219 | if self.use_ptt_parser:
220 | try:
221 | file_metadata = self.metadata_parser.parse(os.path.basename(strm_path))
222 | file_info["metadata"] = file_metadata
223 | except Exception as e:
224 | logger.debug(f"Failed to parse metadata for {strm_path}: {str(e)}")
225 |
226 | strm_files.append(file_info)
227 | except Exception as e:
228 | logger.warning(f"Failed to read STRM file {strm_path}: {str(e)}")
229 |
230 | logger.info(f"Found {len(strm_files)} existing STRM files")
231 | return strm_files
--------------------------------------------------------------------------------
/robofuse/utils/parser.py:
--------------------------------------------------------------------------------
1 | """Filename parsing utilities using PTT (Parsett)."""
2 |
3 | from typing import Dict, Any, List, Optional, Tuple
4 | import os
5 |
6 | try:
7 | from PTT import Parser, add_defaults
8 | from PTT.anime import anime_handler
9 | PTT_AVAILABLE = True
10 | except ImportError:
11 | PTT_AVAILABLE = False
12 |
13 | from robofuse.utils.logging import logger
14 |
15 |
16 | # Common anime titles to help with detection
17 | COMMON_ANIME_TITLES = [
18 | "one piece", "dragon ball", "naruto", "attack on titan", "demon slayer",
19 | "my hero academia", "jujutsu kaisen", "bleach", "hunter x hunter",
20 | "evangelion", "fullmetal", "gintama", "death note", "sword art online",
21 | "cowboy bebop", "fairy tail", "jojo", "pokemon", "yu-gi-oh", "sailor moon",
22 | "boku no hero", "shingeki no kyojin", "kimetsu no yaiba", "berserk", "gundam"
23 | ]
24 |
25 |
26 | class MetadataParser:
27 | """Parse filenames to extract metadata using PTT."""
28 |
29 | def __init__(self, enabled: bool = True):
30 | """Initialize the parser.
31 |
32 | Args:
33 | enabled: Whether to use PTT for parsing.
34 | """
35 | self.enabled = enabled and PTT_AVAILABLE
36 |
37 | if self.enabled:
38 | logger.info("PTT parser enabled for metadata extraction")
39 | # Create a parser instance
40 | self.parser = Parser()
41 | # Add default handlers
42 | add_defaults(self.parser)
43 | # Add anime handlers
44 | anime_handler(self.parser)
45 | elif not PTT_AVAILABLE:
46 | logger.warning("PTT library not available. Install with 'pip install git+https://github.com/dreulavelle/PTT.git' for better filename parsing.")
47 | self.enabled = False
48 | else:
49 | logger.info("PTT parser disabled")
50 |
51 | def parse(self, filename: str) -> Dict[str, Any]:
52 | """Parse a filename to extract metadata.
53 |
54 | Args:
55 | filename: The filename to parse
56 |
57 | Returns:
58 | Dictionary with extracted metadata
59 | """
60 | if not self.enabled or not hasattr(self, 'parser'):
61 | logger.warning(f"PTT parser not available for {filename}. File will be placed in Others folder.")
62 | return {"title": os.path.splitext(filename)[0], "type": "unknown"}
63 |
64 | try:
65 | # Use PTT's built-in parsing
66 | metadata = self.parser.parse(filename)
67 | logger.verbose(f"Parsed metadata: {metadata}")
68 |
69 | # Ensure there's a valid title
70 | if not metadata.get("title"):
71 | metadata["type"] = "unknown"
72 | return {"title": os.path.splitext(filename)[0], "type": "unknown"}
73 |
74 | # Determine content type and formatted title
75 | media_type, formatted_title = self._determine_media_type(metadata)
76 | metadata["type"] = media_type.lower()
77 | metadata["formatted_title"] = formatted_title
78 |
79 | return metadata
80 | except Exception as e:
81 | logger.warning(f"Error parsing filename with PTT: {str(e)}. File will be placed in Others folder.")
82 | return {"title": os.path.splitext(filename)[0], "type": "unknown"}
83 |
84 | def _determine_media_type(self, metadata: Dict[str, Any]) -> Tuple[str, str]:
85 | """Determine media type and create formatted title based on PTT parsed data.
86 |
87 | Args:
88 | metadata: The parsed metadata from PTT
89 |
90 | Returns:
91 | Tuple of (media_type, formatted_title)
92 | """
93 | title = metadata.get('title', 'Unknown')
94 | resolution = metadata.get('resolution', '')
95 | quality = metadata.get('quality', '')
96 |
97 | # Create a format suffix if quality information is available
98 | format_suffix = ""
99 | if resolution or quality:
100 | format_parts = []
101 | if resolution:
102 | format_parts.append(resolution)
103 | if quality:
104 | format_parts.append(quality)
105 | if format_parts:
106 | format_suffix = f" [{', '.join(format_parts)}]"
107 |
108 | # Check if it's explicitly marked as anime
109 | if metadata.get('anime', False):
110 | return self._format_anime_title(metadata, title, format_suffix)
111 |
112 | # Check for common anime release groups
113 | if metadata.get('group', '').lower() in ['subsplease', 'erai-raws', 'horrible', 'anime time', 'horriblesubs']:
114 | return self._format_anime_title(metadata, title, format_suffix)
115 |
116 | # Check for common anime titles
117 | if any(anime_title in title.lower() for anime_title in COMMON_ANIME_TITLES):
118 | # If it has episodes but no seasons, it's likely anime
119 | if metadata.get('episodes') and not metadata.get('seasons'):
120 | return self._format_anime_title(metadata, title, format_suffix)
121 |
122 | # If it has seasons and episodes, format as TV Show
123 | if metadata.get('seasons') and metadata.get('episodes'):
124 | # Even with seasons/episodes, it might still be anime if the title matches
125 | if any(anime_title in title.lower() for anime_title in COMMON_ANIME_TITLES):
126 | # This is a special case - anime formatted with TV show season/episode
127 | # We'll keep the TV Show formatting but categorize as anime
128 | season = metadata['seasons'][0]
129 | episode = metadata['episodes'][0]
130 | return "Anime", f"{title} S{season:02d}E{episode:02d}{format_suffix}"
131 |
132 | # Regular TV show
133 | media_type = "TV Show"
134 | season = metadata['seasons'][0]
135 | episode = metadata['episodes'][0]
136 | return media_type, f"{title} S{season:02d}E{episode:02d}{format_suffix}"
137 |
138 | # Files with just episodes but no seasons could be anime
139 | elif metadata.get('episodes') and not metadata.get('seasons'):
140 | # Default to TV Show with E## format if not identified as anime
141 | media_type = "TV Show"
142 | episode = metadata['episodes'][0]
143 | return media_type, f"{title} E{episode:02d}{format_suffix}"
144 |
145 | # Default to movie if no season/episode info
146 | year_str = f" ({metadata['year']})" if metadata.get('year') else ""
147 | return "Movie", f"{title}{year_str}{format_suffix}"
148 |
149 | def _format_anime_title(self, metadata: Dict[str, Any], title: str, format_suffix: str) -> Tuple[str, str]:
150 | """Format anime title consistently.
151 |
152 | Args:
153 | metadata: The parsed metadata
154 | title: The title of the anime
155 | format_suffix: Format suffix with resolution/quality
156 |
157 | Returns:
158 | Tuple of (media_type, formatted_title)
159 | """
160 | media_type = "Anime"
161 | episodes = metadata.get('episodes', [])
162 |
163 | if episodes and metadata.get('seasons'):
164 | # Anime with seasons and episodes
165 | season = metadata['seasons'][0]
166 | episode = episodes[0]
167 | return media_type, f"{title} S{season:02d}E{episode:02d}{format_suffix}"
168 | elif episodes:
169 | # Anime with just episode numbers, no seasons
170 | episode = episodes[0]
171 | return media_type, f"{title} - {episode:03d}{format_suffix}"
172 |
173 | # Anime with no episode info
174 | return media_type, f"{title}{format_suffix}"
175 |
176 | def generate_folder_structure(self, metadata: Dict[str, Any]) -> List[str]:
177 | """Generate a folder structure based on metadata.
178 |
179 | Args:
180 | metadata: The parsed metadata
181 |
182 | Returns:
183 | List of folder names for the path
184 | """
185 | folder_structure = []
186 | media_type = metadata.get("type", "unknown").lower()
187 |
188 | if media_type == "unknown":
189 | folder_structure.append("Others")
190 | elif media_type == "tv show":
191 | folder_structure.append("TV Shows")
192 | folder_structure.append(metadata["title"])
193 | if metadata.get("seasons") and metadata["seasons"]:
194 | season_num = metadata["seasons"][0]
195 | folder_structure.append(f"Season {season_num:02d}")
196 | elif media_type == "movie":
197 | folder_structure.append("Movies")
198 | movie_folder = metadata["title"]
199 | if metadata.get("year"):
200 | movie_folder += f" ({metadata['year']})"
201 | folder_structure.append(movie_folder)
202 | elif media_type == "anime":
203 | folder_structure.append("Anime")
204 | folder_structure.append(metadata["title"])
205 | if metadata.get("seasons") and metadata["seasons"]:
206 | season_num = metadata["seasons"][0]
207 | folder_structure.append(f"Season {season_num:02d}")
208 | else:
209 | folder_structure.append("Others")
210 | if metadata.get("title"):
211 | folder_structure.append(metadata["title"])
212 |
213 | return folder_structure
214 |
215 | def generate_filename(self, metadata: Dict[str, Any], download_id: Optional[str] = None) -> str:
216 | """Generate a clean filename based on metadata.
217 |
218 | Args:
219 | metadata: The parsed metadata
220 | download_id: Optional download ID to append to the filename
221 |
222 | Returns:
223 | A clean filename (without extension)
224 | """
225 | filename = metadata.get("formatted_title", metadata.get("title", "Unknown"))
226 |
227 | # Append download ID if provided
228 | if download_id:
229 | filename = f"{filename} [{download_id}]"
230 |
231 | return filename
--------------------------------------------------------------------------------
/robofuse/core/processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | from datetime import datetime, timedelta
4 | from typing import Dict, List, Any, Set, Optional, Tuple
5 | import json
6 |
7 | from robofuse.api.client import RealDebridClient
8 | from robofuse.api.torrents import TorrentsAPI
9 | from robofuse.api.downloads import DownloadsAPI
10 | from robofuse.api.unrestrict import UnrestrictAPI
11 | from robofuse.core.strm import StrmFile
12 | from robofuse.utils.logging import logger
13 | from robofuse.utils.concurrency import parallel_process
14 | from robofuse.config import Config
15 |
16 |
17 | class RoboFuseProcessor:
18 | """Main processor for robofuse service."""
19 |
20 | def __init__(self, config: Config, dry_run: bool = False):
21 | self.config = config
22 | self.dry_run = dry_run
23 |
24 | # Initialize API client
25 | self.client = RealDebridClient(
26 | token=config["token"],
27 | general_rate_limit=config["general_rate_limit"],
28 | torrents_rate_limit=config["torrents_rate_limit"]
29 | )
30 |
31 | # Initialize API modules
32 | self.torrents_api = TorrentsAPI(self.client)
33 | self.downloads_api = DownloadsAPI(self.client)
34 | self.unrestrict_api = UnrestrictAPI(self.client)
35 |
36 | # Initialize STRM handler with PTT parser option
37 | self.strm_handler = StrmFile(
38 | config["output_dir"],
39 | use_ptt_parser=config.get("use_ptt_parser", True)
40 | )
41 |
42 | # Cache file for state
43 | self.cache_dir = config["cache_dir"]
44 | os.makedirs(self.cache_dir, exist_ok=True)
45 | self.cache_file = os.path.join(self.cache_dir, "state_cache.json")
46 |
47 | def _load_cache(self) -> Dict[str, Any]:
48 | """Load cached state if available."""
49 | if os.path.exists(self.cache_file):
50 | try:
51 | with open(self.cache_file, 'r') as f:
52 | return json.load(f)
53 | except Exception as e:
54 | logger.warning(f"Failed to load cache: {str(e)}")
55 |
56 | return {
57 | "last_run": None,
58 | "torrent_link_map": {},
59 | "processed_torrents": [],
60 | "processed_downloads": []
61 | }
62 |
63 | def _save_cache(self, cache_data: Dict[str, Any]):
64 | """Save state to cache."""
65 | try:
66 | with open(self.cache_file, 'w') as f:
67 | json.dump(cache_data, f, indent=2)
68 | logger.verbose(f"Saved cache to {self.cache_file}")
69 | except Exception as e:
70 | logger.warning(f"Failed to save cache: {str(e)}")
71 |
72 | def run(self):
73 | """Run the main processing pipeline."""
74 | if self.dry_run:
75 | logger.info("Running in DRY RUN mode - no files will be created or modified")
76 |
77 | # Start timing
78 | start_time = time.time()
79 |
80 | # Load cache
81 | cache = self._load_cache()
82 |
83 | # Step 1: Get and filter torrents
84 | torrents = self._get_and_filter_torrents()
85 |
86 | # Step 2: If repair_torrents is enabled, reinsert dead torrents
87 | if self.config["repair_torrents"]:
88 | self._reinsert_dead_torrents(torrents["dead"])
89 |
90 | # Step 3: Get and filter downloads
91 | downloads = self._get_and_filter_downloads()
92 |
93 | # Step 4: Find torrent links without downloads
94 | pending_links, link_to_torrent = self._find_pending_links(torrents["active"], downloads["filtered"])
95 |
96 | # Step 5: Unrestrict pending links
97 | if pending_links:
98 | logger.info(f"Found {len(pending_links)} links without corresponding downloads")
99 | unrestricted = self._unrestrict_links(pending_links)
100 | if unrestricted:
101 | # Refresh downloads list with new unrestricted links
102 | downloads = self._get_and_filter_downloads()
103 | else:
104 | logger.info("All torrent links already have corresponding downloads")
105 |
106 | # Step 6: Generate release candidates
107 | candidates = self._generate_release_candidates(torrents["active"], downloads["filtered"], link_to_torrent)
108 |
109 | # Step 7: Process STRM files (create/update/delete)
110 | self._process_strm_files(candidates)
111 |
112 | # Update cache with latest run info
113 | cache["last_run"] = datetime.now().isoformat()
114 | self._save_cache(cache)
115 |
116 | # Display summary
117 | elapsed_time = time.time() - start_time
118 | logger.info(f"Processing completed in {elapsed_time:.2f} seconds")
119 |
120 | return {
121 | "torrents_processed": len(torrents["active"]),
122 | "downloads_processed": len(downloads["filtered"]),
123 | "pending_links": len(pending_links),
124 | "candidates": len(candidates),
125 | "elapsed_time": elapsed_time
126 | }
127 |
128 | def _get_and_filter_torrents(self) -> Dict[str, List[Dict[str, Any]]]:
129 | """Get and filter torrents."""
130 | logger.info("Retrieving torrents from Real-Debrid")
131 |
132 | # Get all torrents
133 | all_torrents = self.torrents_api.get_all_torrents()
134 | logger.info(f"Retrieved {len(all_torrents)} torrents")
135 |
136 | # Filter out dead torrents
137 | dead_torrents = [t for t in all_torrents if t.get("status") == "dead"]
138 | if dead_torrents:
139 | logger.warning(f"Found {len(dead_torrents)} dead torrents")
140 |
141 | # Filter for downloaded torrents
142 | active_torrents = [t for t in all_torrents if t.get("status") == "downloaded"]
143 | logger.info(f"Filtered {len(active_torrents)} active (downloaded) torrents")
144 |
145 | return {
146 | "all": all_torrents,
147 | "active": active_torrents,
148 | "dead": dead_torrents
149 | }
150 |
151 | def _reinsert_dead_torrents(self, dead_torrents: List[Dict[str, Any]]):
152 | """Reinsert dead torrents."""
153 | if not dead_torrents:
154 | logger.info("No dead torrents to reinsert")
155 | return
156 |
157 | logger.info(f"Reinserting {len(dead_torrents)} dead torrents")
158 |
159 | results = []
160 | for torrent in dead_torrents:
161 | torrent_hash = torrent.get("hash")
162 | if not torrent_hash:
163 | logger.warning(f"Torrent {torrent.get('id')} has no hash, skipping")
164 | continue
165 |
166 | logger.info(f"Reinserting torrent: {torrent.get('filename', 'Unknown')}")
167 |
168 | try:
169 | # Reinsert the torrent
170 | result = self.torrents_api.reinsert_torrent(torrent_hash)
171 |
172 | if result.get("status") == "success":
173 | # Delete the original dead torrent
174 | try:
175 | self.torrents_api.delete_torrent(torrent["id"])
176 | logger.success(f"Deleted original dead torrent {torrent['id']}")
177 | except Exception as e:
178 | logger.error(f"Failed to delete original dead torrent: {str(e)}")
179 |
180 | results.append({
181 | "torrent_id": torrent.get("id"),
182 | "hash": torrent_hash,
183 | "result": result
184 | })
185 | except Exception as e:
186 | logger.error(f"Failed to reinsert torrent {torrent.get('id')}: {str(e)}")
187 |
188 | logger.info(f"Reinserted {sum(1 for r in results if r['result'].get('status') == 'success')} torrents")
189 |
190 | def _get_and_filter_downloads(self) -> Dict[str, List[Dict[str, Any]]]:
191 | """Get and filter downloads."""
192 | logger.info("Retrieving downloads from Real-Debrid")
193 |
194 | # Get all downloads
195 | all_downloads = self.downloads_api.get_all_downloads()
196 | logger.info(f"Retrieved {len(all_downloads)} downloads")
197 |
198 | # Filter for streamable downloads
199 | streamable_downloads = self.downloads_api.filter_streamable_downloads(all_downloads)
200 |
201 | # Filter for unique downloads
202 | unique_downloads = self.downloads_api.filter_unique_downloads(streamable_downloads)
203 |
204 | return {
205 | "all": all_downloads,
206 | "streamable": streamable_downloads,
207 | "filtered": unique_downloads
208 | }
209 |
210 | def _find_pending_links(
211 | self,
212 | torrents: List[Dict[str, Any]],
213 | downloads: List[Dict[str, Any]]
214 | ) -> Tuple[List[str], Dict[str, Dict[str, Any]]]:
215 | """Find torrent links without corresponding downloads."""
216 | # Create a set of download links for faster lookup
217 | download_links = {d.get("link", "") for d in downloads if d.get("link")}
218 |
219 | # Build a mapping from link to torrent for easier reference later
220 | link_to_torrent = {}
221 |
222 | # Find links without corresponding downloads
223 | pending_links = []
224 |
225 | for torrent in torrents:
226 | links = torrent.get("links", [])
227 | torrent_id = torrent.get("id", "")
228 |
229 | for link in links:
230 | # Add to the mapping
231 | link_to_torrent[link] = torrent
232 |
233 | # Check if this link has a download
234 | if link not in download_links:
235 | pending_links.append(link)
236 |
237 | return pending_links, link_to_torrent
238 |
239 | def _unrestrict_links(self, links: List[str]) -> List[Dict[str, Any]]:
240 | """Unrestrict pending links."""
241 | if self.dry_run:
242 | logger.info(f"[DRY RUN] Would unrestrict {len(links)} links")
243 | return []
244 |
245 | logger.info(f"Unrestricting {len(links)} links")
246 |
247 | # Define single link processor function for parallel processing
248 | def unrestrict_single_link(link):
249 | try:
250 | return self.unrestrict_api.unrestrict_link(link)
251 | except Exception as e:
252 | logger.error(f"Failed to unrestrict link: {str(e)}")
253 | return None
254 |
255 | # Process in parallel
256 | results = parallel_process(
257 | links,
258 | unrestrict_single_link,
259 | max_workers=self.config["concurrent_requests"],
260 | desc="Unrestricting links",
261 | show_progress=True
262 | )
263 |
264 | # Filter out None results (failed unrestrictions)
265 | successful_results = [r for r in results if r is not None]
266 |
267 | logger.info(f"Successfully unrestricted {len(successful_results)} out of {len(links)} links")
268 | return successful_results
269 |
270 | def _generate_release_candidates(
271 | self,
272 | torrents: List[Dict[str, Any]],
273 | downloads: List[Dict[str, Any]],
274 | link_to_torrent: Dict[str, Dict[str, Any]]
275 | ) -> List[Dict[str, Any]]:
276 | """Generate release candidates for STRM files."""
277 | logger.info("Generating STRM file candidates")
278 |
279 | candidates = []
280 |
281 | for download in downloads:
282 | link = download.get("link", "")
283 | if not link:
284 | continue
285 |
286 | # Get the corresponding torrent
287 | torrent = link_to_torrent.get(link)
288 | if not torrent:
289 | logger.warning(f"No torrent found for link: {link}")
290 | continue
291 |
292 | download_url = download.get("download", "")
293 | if not download_url:
294 | logger.warning(f"No download URL found for download: {download.get('id', '')}")
295 | continue
296 |
297 | candidates.append({
298 | "download_url": download_url,
299 | "filename": download.get("filename", ""),
300 | "torrent_name": torrent.get("filename", ""),
301 | "download_id": download.get("id", ""),
302 | "torrent_id": torrent.get("id", ""),
303 | "download": download,
304 | "torrent": torrent
305 | })
306 |
307 | logger.info(f"Generated {len(candidates)} STRM file candidates")
308 | return candidates
309 |
310 | def _process_strm_files(self, candidates: List[Dict[str, Any]]):
311 | """Process STRM files for the given candidates."""
312 | logger.info(f"Processing {len(candidates)} STRM files")
313 |
314 | for candidate in candidates:
315 | try:
316 | # Create or update STRM file
317 | result = self.strm_handler.create_or_update_strm(
318 | download_url=candidate["download_url"],
319 | filename=candidate["filename"],
320 | torrent_name=candidate["torrent_name"],
321 | dry_run=self.dry_run,
322 | download_id=candidate.get("download_id")
323 | )
324 |
325 | if result["status"] == "error":
326 | logger.error(f"Failed to process STRM file: {result['error']}")
327 | elif result["status"] == "dry_run":
328 | logger.info(f"[DRY RUN] {result['action']} STRM file: {result['path']}")
329 | elif result["status"] == "success":
330 | logger.success(f"{result['action']} STRM file: {result['path']}")
331 |
332 | except Exception as e:
333 | logger.error(f"Error processing STRM file for {candidate['filename']}: {str(e)}")
334 |
335 | def watch(self, interval: Optional[int] = None):
336 | """Run the service in watch mode."""
337 | watch_interval = interval or self.config["watch_mode_interval"]
338 |
339 | logger.info(f"Starting watch mode (interval: {watch_interval} seconds)")
340 |
341 | try:
342 | while True:
343 | logger.info(f"Running processing cycle at {datetime.now().isoformat()}")
344 | self.run()
345 |
346 | logger.info(f"Sleeping for {watch_interval} seconds until next cycle")
347 | time.sleep(watch_interval)
348 | except KeyboardInterrupt:
349 | logger.info("Watch mode interrupted by user")
350 | except Exception as e:
351 | logger.error(f"Error in watch mode: {str(e)}")
352 | raise
--------------------------------------------------------------------------------