├── .gitattributes ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── build-and-push.yml ├── .gitignore ├── README.md ├── deployment ├── docker-compose-traefik.yml ├── docker-compose.yml ├── install.sh ├── jackett │ └── docker-compose.yml ├── migrate.sh ├── traefik │ ├── docker-compose.yml │ └── traefik.yml └── update.sh ├── source ├── .dockerignore ├── .run │ └── Start Dev.run.xml ├── .vscode │ └── launch.json ├── Dockerfile ├── constants.py ├── debrid │ ├── alldebrid.py │ ├── base_debrid.py │ ├── get_debrid_service.py │ ├── premiumize.py │ ├── realdebrid.py │ └── torbox.py ├── jackett │ ├── jackett_indexer.py │ ├── jackett_result.py │ └── jackett_service.py ├── main.py ├── metdata │ ├── cinemeta.py │ ├── metadata_provider_base.py │ └── tmdb.py ├── models │ ├── media.py │ ├── movie.py │ └── series.py ├── requirements.txt ├── templates │ ├── config.js │ └── index.html ├── torrent │ ├── torrent_item.py │ ├── torrent_service.py │ └── torrent_smart_container.py ├── update-dev.sh ├── utils │ ├── cache.py │ ├── detection.py │ ├── filter │ │ ├── base_filter.py │ │ ├── language_filter.py │ │ ├── max_size_filter.py │ │ ├── quality_exclusion_filter.py │ │ ├── results_per_quality_filter.py │ │ └── title_exclusion_filter.py │ ├── filter_results.py │ ├── general.py │ ├── logger.py │ ├── parse_config.py │ ├── stremio_parser.py │ └── string_encoding.py └── videos │ └── nocache.mp4 └── stremio-jackett.iml /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | ko_fi: aymene 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[Bug]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | 17 | **Expected behavior** 18 | A clear and concise description of what you expected to happen. 19 | 20 | **Screenshots** 21 | If applicable, add screenshots to help explain your problem. 22 | 23 | **Your system info (please complete the following information):** 24 | - OS: [e.g. Ubuntu] 25 | - Browser [e.g. chrome, safari] 26 | - Version [e.g. 22] 27 | - Docker or Node ? 28 | 29 | **Additional context** 30 | Add any other context about the problem here. 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[Feature]" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/build-and-push.yml: -------------------------------------------------------------------------------- 1 | name: Build and push Docker image to Docker Hub 2 | 3 | on: 4 | release: 5 | types: [created] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | build-and-push: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: 🏗 Checkout code 13 | uses: actions/checkout@v4 14 | 15 | - name: 🏗 Set up QEMU 16 | uses: docker/setup-qemu-action@v3 17 | 18 | - name: 🏗 Set up Docker Buildx 19 | uses: docker/setup-buildx-action@v3 20 | 21 | - name: 🏗 Setup Python 22 | uses: actions/setup-python@v5 23 | with: 24 | python-version: '3.12.2' 25 | 26 | - name: 🏗 Installing dependencies 27 | run: python3 -m pip install -r source/requirements.txt 28 | 29 | - name: 🔒 Login to Docker Hub 30 | uses: docker/login-action@v3 31 | with: 32 | username: ${{ secrets.DOCKER_USERNAME }} 33 | password: ${{ secrets.DOCKER_PASSWORD }} 34 | 35 | - name: 🚀 Build and push for latest tag 36 | uses: docker/build-push-action@v5 37 | with: 38 | context: source/. 39 | platforms: linux/amd64,linux/arm64 40 | push: true 41 | tags: belmeg/stremio-addon-jackett:latest 42 | 43 | - name: 🚀 Build and push for version tag 44 | uses: docker/build-push-action@v5 45 | with: 46 | context: source/. 47 | platforms: linux/amd64,linux/arm64 48 | push: true 49 | tags: belmeg/stremio-addon-jackett:${{ github.event.release.tag_name }} 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .idea/ 3 | .DS_Store 4 | .env 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/P5P2TUSN3) 2 | 3 | 🇵🇸 4 | # Stremio Jackett Addon 5 | 6 | Elevate your Stremio experience with seamless access to Jackett torrent links, effortlessly fetching torrents for your 7 | selected movies within the Stremio interface. 8 | 9 | # Disclaimer 10 | 11 | I am not responsible for any content downloaded through this addon. 12 | 13 | # Support on Discord [here](https://discord.gg/7yZ5PzaPYb) 14 | 15 | # Prerequisites 16 | 17 | - A [Jackett](https://github.com/Jackett/Jackett) server running and reachable pubicly. 18 | - Python 3.11+ and pip. 19 | - *(optionnal)* A Real-Debrid, All-Debrid, Premiumize or TorBox account. 20 | 21 | # Installation 22 | 23 | ## If you are a newbie, check the [wiki](https://github.com/aymene69/stremio-jackett/wiki) 24 | 25 | ## Without Docker 26 | 27 | - On your computer, clone the repository 28 | ```sh 29 | git clone https://github.com/aymene69/stremio-jackett 30 | ``` 31 | - Once done, install dependencies 32 | ```sh 33 | pip install -r requirements.txt 34 | ```` 35 | - Now just run your addon, access the link and add it to your Stremio app! 36 | ```sh 37 | python3 -m uvicorn main:app --reload --port YOUR_PORT 38 | ```` 39 | And access it via `your_ip:YOUR_PORT` 40 | 41 | ## With Docker 42 | 43 | - Simply run the Docker image 44 | ```sh 45 | docker run -p 3000:3000 -d belmeg/stremio-addon-jackett 46 | ``` 47 | And access it via `your_ip:3000` 48 | **WARNING** This will only work locally. If you want your addon to be reachable at any time, simply deploy it 49 | on [Beamup](https://github.com/Stremio/stremio-beamup-cli). 50 | Click [here](https://github.com/Stremio/stremio-beamup-cli) to visit their repository and see how you can deploy it. 51 | 52 | - To update your container 53 | 54 | - Find your existing container name 55 | ```sh 56 | docker ps 57 | ``` 58 | 59 | - Stop your existing container 60 | ```sh 61 | docker stop 62 | ``` 63 | 64 | - Remove your existing container 65 | ```sh 66 | docker rm 67 | ``` 68 | 69 | - Pull the latest version from docker hub 70 | ```sh 71 | docker pull belmeg/stremio-addon-jackett:latest 72 | ``` 73 | 74 | - Finally, rerun your docker run command found in step one 75 | 76 | ## With Docker compose (Recommended) (includes Jackett and Flaresolverr) 77 | 78 | - Use the docker-compose.yml from the repo 79 | 80 | - To start the stack 81 | 82 | ```sh 83 | docker compose up -d 84 | ``` 85 | 86 | - To stop the stack 87 | 88 | ```sh 89 | docker compose down 90 | ``` 91 | 92 | - To pull the latest image. 93 | 94 | ```sh 95 | docker pull belmeg/stremio-addon-jackett:latest 96 | ``` 97 | 98 | And access it via `your_ip:3000` 99 | 100 | I recommend also deploying Nginx Proxy Manager and securing your network with SSL. 101 | If you need to have a different root path, you can pass the `ROOT_PATH` environment variable to the app. 102 | 103 | ## Optional Configuration with environment variables 104 | 105 | | Variable | Description | Default | 106 | |-------------------------------|------------------------------------------------|---------| 107 | | `ROOT_PATH` | The root path of your addon | `/` | 108 | | `DISABLE_JACKETT_IMDB_SEARCH` | If you want to disable the Jackett IMDB search | `False` | 109 | 110 | ## Thanks to [elfhosted.com](https://elfhosted.com) for hosting the cache server! 111 | -------------------------------------------------------------------------------- /deployment/docker-compose-traefik.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | addon: 5 | image: belmeg/stremio-addon-jackett:latest 6 | container_name: addon 7 | networks: 8 | - traefik_default 9 | labels: 10 | - "traefik.enable=true" 11 | - "traefik.docker.network=traefik_default" 12 | - "traefik.http.routers.addon.entrypoints=web,websecure" 13 | - "traefik.http.routers.addon.rule=Host(`YOURADDON.COM`)" 14 | - "traefik.http.services.addon.loadbalancer.server.port=3000" 15 | - "traefik.http.routers.addon.tls=true" 16 | - "traefik.http.routers.addon.tls.certresolver=production" 17 | restart: unless-stopped 18 | networks: 19 | traefik_default: 20 | external: true 21 | -------------------------------------------------------------------------------- /deployment/docker-compose.yml: -------------------------------------------------------------------------------- 1 | name: stremio_jacket 2 | services: 3 | flaresolverr: 4 | image: ghcr.io/flaresolverr/flaresolverr:latest 5 | container_name: flaresolverr 6 | environment: 7 | - LOG_LEVEL=${LOG_LEVEL:-info} 8 | - LOG_HTML=${LOG_HTML:-false} 9 | - CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none} 10 | - PUID=$(id -u) 11 | - PGID=$(id -g) 12 | - TZ=America/Los_Angeles 13 | # networks: 14 | # - stremio_jacket_network 15 | ports: 16 | - "${PORT:-8191}:8191" 17 | restart: unless-stopped 18 | jackett: 19 | image: lscr.io/linuxserver/jackett:latest 20 | container_name: jackett 21 | environment: 22 | - PUID=$(id -u) 23 | - PGID=$(id -g) 24 | - TZ=America/Los_Angeles 25 | - AUTO_UPDATE=true #optional 26 | - RUN_OPTS= #optional 27 | depends_on: 28 | - flaresolverr 29 | # networks: 30 | # - stremio_jacket_network 31 | volumes: 32 | - ./data:/config 33 | - /path/to/blackhole:/downloads 34 | ports: 35 | - 9117:9117 36 | restart: unless-stopped 37 | jackett-stremio: 38 | image: belmeg/stremio-addon-jackett 39 | container_name: jackett-stremio 40 | environment: 41 | - PUID=$(id -u) 42 | - PGID=$(id -g) 43 | - TZ=America/Los_Angeles 44 | - ADDON_NAME=stremio_jacket 45 | depends_on: 46 | - jackett 47 | # networks: 48 | # - stremio_jacket_network 49 | ports: 50 | - 3000:3000 51 | restart: unless-stopped 52 | 53 | #networks: 54 | # stremio_jacket_network: 55 | # external: true 56 | # name: stremio_jacket_network 57 | -------------------------------------------------------------------------------- /deployment/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo apt-get update -y 4 | sudo apt-get install -y ca-certificates curl gnupg 5 | 6 | sudo install -m 0755 -d /etc/apt/keyrings 7 | sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc 8 | 9 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ 10 | $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ 11 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 12 | 13 | sudo apt-get update -y 14 | 15 | sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 16 | clear 17 | echo "Please enter your email: " 18 | read userMail 19 | 20 | echo "Please enter your domain name: " 21 | read domainName 22 | 23 | sudo mkdir /etc/traefik 24 | sudo mkdir /etc/traefik/certs 25 | 26 | sudo curl -fsSL https://raw.githubusercontent.com/aymene69/stremio-jackett/main/deployment/traefik/traefik.yml -o /etc/traefik/traefik.yml 27 | 28 | sudo sed -i "s/youremail@domain.com/$userMail/g" /etc/traefik/traefik.yml 29 | 30 | sudo mkdir traefik jackett addon 31 | sudo curl -fsSL https://raw.githubusercontent.com/aymene69/stremio-jackett/main/deployment/traefik/docker-compose.yml -o ./traefik/docker-compose.yml 32 | 33 | sudo iptables -A INPUT -p tcp --dport 80 -j ACCEPT 34 | sudo iptables -A INPUT -p tcp --dport 443 -j ACCEPT 35 | sudo netfilter-persistent save 36 | cd traefik 37 | sudo docker compose up -d 38 | cd ../jackett 39 | sudo mkdir data blackhole 40 | sudo curl -fsSL https://raw.githubusercontent.com/aymene69/stremio-jackett/main/deployment/jackett/docker-compose.yml -o ./docker-compose.yml 41 | sudo sed -i "s/YOURADDON.COM/$domainName/g" ./docker-compose.yml 42 | sudo docker compose up -d 43 | 44 | cd ../addon 45 | sudo curl -fsSL https://raw.githubusercontent.com/aymene69/stremio-jackett/main/deployment/docker-compose-traefik.yml -o ./docker-compose.yml 46 | sudo sed -i "s/YOURADDON.COM/$domainName/g" ./docker-compose.yml 47 | sudo docker compose up -d 48 | cd ../traefik 49 | sudo docker compose down 50 | sudo docker compose up -d 51 | clear 52 | 53 | echo "Your addon is accessible at https://$domainName/" 54 | echo "Jackett is accessible at http://$(curl -4 -s ifconfig.me):9117" 55 | echo "FlareSolverr is accessible at http://$(curl -4 -s ifconfig.me):8191" -------------------------------------------------------------------------------- /deployment/jackett/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | jackett: 5 | image: lscr.io/linuxserver/jackett:latest 6 | container_name: jackett 7 | environment: 8 | - PUID=1000 9 | - PGID=1000 10 | - TZ=Etc/UTC 11 | - AUTO_UPDATE=true 12 | volumes: 13 | - ./data:/config 14 | - ./blackhole:/downloads 15 | ports: 16 | - 9117:9117 17 | restart: unless-stopped 18 | 19 | 20 | flaresolverr: 21 | image: flaresolverr/flaresolverr:latest 22 | container_name: flaresolverr 23 | environment: 24 | - TZ=Europe/London 25 | ports: 26 | - 8191:8191 27 | restart: unless-stopped -------------------------------------------------------------------------------- /deployment/migrate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Please enter your domain name: " 4 | read domainName 5 | cd jackett 6 | sudo rm -rf data 7 | sudo docker compose down 8 | cd ../addon 9 | sudo rm -rf data 10 | sudo docker compose down 11 | cd ../traefik 12 | sudo docker compose down 13 | cd .. 14 | 15 | sudo iptables -A INPUT -p tcp --dport 80 -j ACCEPT 16 | sudo iptables -A INPUT -p tcp --dport 443 -j ACCEPT 17 | sudo iptables -A INPUT -p tcp --dport 9117 -j ACCEPT 18 | sudo iptables -A INPUT -p tcp --dport 8191 -j ACCEPT 19 | 20 | sudo netfilter-persistent save 21 | cd traefik 22 | sudo docker compose up -d 23 | cd ../jackett 24 | sudo mkdir data blackhole 25 | sudo curl -fsSL https://raw.githubusercontent.com/aymene69/stremio-jackett/main/deployment/jackett/docker-compose.yml -o ./docker-compose.yml 26 | sudo sed -i "s/YOURADDON.COM/$domainName/g" ./docker-compose.yml 27 | sudo docker compose up -d 28 | 29 | cd ../addon 30 | sudo curl -fsSL https://raw.githubusercontent.com/aymene69/stremio-jackett/main/deployment/docker-compose-traefik.yml -o ./docker-compose.yml 31 | sudo sed -i "s/YOURADDON.COM/$domainName/g" ./docker-compose.yml 32 | sudo docker compose pull 33 | sudo docker compose up -d 34 | cd ../traefik 35 | sudo docker compose up -d 36 | clear 37 | IP=$(curl -4 -s ifconfig.me) 38 | echo "Your addon is accessible at https://$domainName/" 39 | echo "Jackett is accessible at http://${IP}:9117" 40 | echo "FlareSolverr is accessible at http://${IP}:8191" -------------------------------------------------------------------------------- /deployment/traefik/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | traefik: 5 | image: "traefik:v2.5" 6 | container_name: "traefik" 7 | ports: 8 | - "80:80" 9 | - "443:443" 10 | - "8080:8080" 11 | volumes: 12 | - /etc/traefik:/etc/traefik 13 | - /var/run/docker.sock:/var/run/docker.sock:ro 14 | restart: unless-stopped -------------------------------------------------------------------------------- /deployment/traefik/traefik.yml: -------------------------------------------------------------------------------- 1 | global: 2 | checkNewVersion: true 3 | sendAnonymousUsage: false 4 | 5 | api: 6 | dashboard: true 7 | 8 | entryPoints: 9 | web: 10 | address: :80 11 | http: 12 | redirections: 13 | entryPoint: 14 | to: websecure 15 | scheme: https 16 | websecure: 17 | address: :443 18 | 19 | certificatesResolvers: 20 | staging: 21 | acme: 22 | email: youremail@domain.com 23 | storage: /etc/traefik/certs/acme.json 24 | caServer: "https://acme-staging-v02.api.letsencrypt.org/directory" 25 | httpChallenge: 26 | entryPoint: web 27 | 28 | production: 29 | acme: 30 | email: youremail@domain.com 31 | storage: /etc/traefik/certs/acme.json 32 | caServer: "https://acme-v02.api.letsencrypt.org/directory" 33 | httpChallenge: 34 | entryPoint: web 35 | 36 | providers: 37 | docker: 38 | exposedByDefault: true 39 | file: 40 | directory: /etc/traefik 41 | watch: true -------------------------------------------------------------------------------- /deployment/update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd addon 4 | sudo docker compose down 5 | sudo docker compose pull 6 | sudo docker compose up -d 7 | clear 8 | IP=$(curl -4 -s ifconfig.me) 9 | echo "Update completed!" 10 | echo "Your addon is accessible at https://$domainName/" -------------------------------------------------------------------------------- /source/.dockerignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | venv -------------------------------------------------------------------------------- /source/.run/Start Dev.run.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 27 | -------------------------------------------------------------------------------- /source/.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python Debugger: Current File with Arguments", 9 | "type": "debugpy", 10 | "request": "launch", 11 | "module": "uvicorn", 12 | "console": "integratedTerminal", 13 | "args": [ 14 | "main:app", 15 | "--reload", 16 | "--port", 17 | "1337" 18 | ] 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /source/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.2 2 | 3 | WORKDIR /app 4 | 5 | # This is to prevent Python from buffering stdout and stderr 6 | ENV PYTHONUNBUFFERED 1 7 | 8 | COPY requirements.txt . 9 | 10 | RUN pip install -r requirements.txt 11 | 12 | COPY . . 13 | 14 | EXPOSE 3000 15 | 16 | CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "3000", "--log-level", "critical", "--reload"] -------------------------------------------------------------------------------- /source/constants.py: -------------------------------------------------------------------------------- 1 | NO_CONFIG = {'streams': [{'url': "#", 'title': "No configuration found"}]} 2 | JACKETT_ERROR = {'streams': [{'url': "#", 'title': "An error occured"}]} 3 | 4 | CACHER_URL = "https://stremio-jackett-cacher.elfhosted.com/" 5 | 6 | NO_CACHE_VIDEO_URL = "https://github.com/aymene69/stremio-jackett/raw/main/source/videos/nocache.mp4" 7 | 8 | EXCLUDED_TRACKERS = ['0day.kiev', '1ptbar', '2 Fast 4 You', '2xFree', '3ChangTrai', '3D Torrents', '3Wmg', '4thD', 9 | '52PT', '720pier', 'Abnormal', 'ABtorrents', 'Acid-Lounge', 'Across The Tasman', 'Aftershock', 10 | 'AGSVPT', 'Aidoru!Online', 'Aither (API)', 'AlphaRatio', 'Amigos Share Club', 'AniDUB', 11 | 'Anime-Free', 'AnimeBytes', 'AnimeLayer', 'AnimeTorrents', 'AnimeTorrents.ro', 'AnimeWorld (API)', 12 | 'AniToons', 'Anthelion (API)', 'ArabaFenice', 'ArabP2P', 'ArabTorrents', 'ArenaBG', 'AsianCinema', 13 | 'AsianDVDClub', 'Audiences', 'AudioNews', 'Aussierul.es', 'AvistaZ', 'Azusa', 'Back-ups', 'BakaBT', 14 | 'BeiTai', 'Beload', 'Best-Core', 'Beyond-HD (API)', 'Bibliotik', 'Bit-Bázis', 'BIT-HDTV', 'Bitded', 15 | 'Bithorlo', 'BitHUmen', 'BitPorn', 'Bitspyder', 'Bittorrentfiles', 'BiTTuRK', 'BJ-Share', 16 | 'BlueBird', 'Blutopia (API)', 'BookTracker', 'BootyTape', 'Borgzelle', 'Boxing Torrents', 17 | 'BrasilTracker', 'BroadcasTheNet', 'BroadCity', 'BrokenStones', 'BrSociety (API)', 'BTArg', 18 | 'BTNext', 'BTSCHOOL', 'BwTorrents', 'BYRBT', 'Carp-Hunter', 'Carpathians', 'CarPT', 'CartoonChaos', 19 | 'Cathode-Ray.Tube', 'Catorrent', 'Central Torrent', 'CeskeForum', 'CGPeers', 'CHDBits', 'cheggit', 20 | 'ChileBT', 'Cinemageddon', 'CinemaMovieS_ZT', 'Cinematik', 'CinemaZ', 'Classix', 'Coastal-Crew', 21 | 'Concertos', 'CrazySpirits', 'CrnaBerza', 'CRT2FA', 'Dajiao', 'DanishBytes (API)', 'Dark-Shadow', 22 | 'DataScene (API)', 'Deildu', 'Demonoid', 'DesiTorrents (API)', 'Devil-Torrents', 'Diablo Torrent', 23 | 'DICMusic', 'DigitalCore', 'DimeADozen', 'DiscFan', 'DivTeam', 'DocsPedia', 'Dream Tracker', 24 | 'DreamingTree', 'Drugari', 'DXP', 'Ebooks-Shares', 'Electro-Torrent', 'Empornium', 'Empornium2FA', 25 | 'EniaHD', 'Enthralled', 'Enthralled2FA', 'Erai-Raws', 'eShareNet', 'eStone', 'Ex-torrenty', 26 | 'exitorrent.org', 'ExKinoRay', 'ExoticaZ', 'ExtremeBits', 'ExtremlymTorrents', 27 | 'Falkon Vision Team', 'FANO.IN', 'Fantastiko', 'Fappaizuri', 'FastScene', 'FearNoPeer', 28 | 'Femdomcult', 'File-Tracker', 'FileList', 'FinElite', 'FinVip', 'Flux-Zone', 'Free Farm', 'FSM', 29 | 'FunFile', 'FunkyTorrents', 'FutureTorrent', 'Fuzer', 'Gamera', 'Gay-Torrents.net', 30 | 'gay-torrents.org', 'GAYtorrent.ru', 'GazelleGames', 'GazelleGames (API)', 'Generation-Free (API)', 31 | 'Genesis-Movement', 'GigaTorrents', 'GimmePeers', 'Girotorrent', 'GreatPosterWall', 'GreekDiamond', 32 | 'HaiDan', 'Haitang', 'HappyFappy', 'Hares Club', 'hawke-uno', 'HD-CLUB', 'HD-CzTorrent', 33 | 'HD-Forever', 'HD-Olimpo (API)', 'HD-Only', 'HD-Space', 'HD-Torrents', 'HD-UNiT3D (API)', 34 | 'HD4FANS', 'HDArea', 'HDAtmos', 'HDBits (API)', 'HDC', 'HDDolby', 'HDFans', 'HDFun', 'HDGalaKtik', 35 | 'HDHome', 'HDMaYi', 'HDPT', 'HDRoute', 'HDSky', 'HDtime', 'HDTorrents.it', 'HDTurk', 'HDU', 36 | 'hdvbits', 'HDVIDEO', 'Hebits', 'HellasHut', 'HellTorrents', 'HHanClub', 'HomePornTorrents', 37 | 'House of Devil', 'HQMusic', 'HunTorrent', 'iAnon', 'ICC2022', 'Il Corsaro Blu', 'ilDraGoNeRo', 38 | 'ImmortalSeed', 'Immortuos', 'Indietorrents', 'Infire', 'Insane Tracker', 'IPTorrents', 39 | 'ItaTorrents', 'JME-REUNIT3D (API)', 'JoyHD', 'JPopsuki', 'JPTV (API)', 'KamePT', 'Karagarga', 40 | 'Keep Friends', 'KIMOJI', 'Kinorun', 'Kinozal', 'Kinozal (M)', 'Korsar', 'KrazyZone', 'Kufei', 41 | 'Kufirc', 'LaidBackManor (API)', 'Last Digital Underground', 'LastFiles', 'Lat-Team (API)', 42 | 'Le-Cinephile', 'LearnBits', 'LearnFlakes', 'leech24', 'Les-Cinephiles', 'LeSaloon', 'Lesbians4u', 43 | 'Libble', 'LibraNet', 'LinkoManija', 'Locadora', 'LosslessClub', 'LostFilm.tv', 'LST', 44 | 'M-Team - TP', 'M-Team - TP (2FA)', 'MaDs Revolution', 'Magnetico (Local DHT)', 'Majomparádé', 45 | 'Making Off', 'Marine Tracker', 'Masters-TB', 'Mazepa', 'MDAN', 'MegamixTracker', 46 | 'Mendigos da WEB', 'MeseVilág', 'Metal Tracker', 'MetalGuru', 'Milkie', 'MIRCrew', 'MMA-torrents', 47 | 'MNV', 'MOJBLiNK', 'MonikaDesign (API)', 'MoreThanTV (API)', 'MouseBits', 'Movie-Torrentz', 48 | 'MovieWorld', 'MuseBootlegs', 'MVGroup Forum', 'MVGroup Main', 'MyAnonamouse', 'MySpleen', 'nCore', 49 | 'NebulanceAPI', 'NetHD', 'NewStudioL', 'NicePT', 'NoNaMe ClubL', 'NorBits', 'NORDiCHD', 50 | 'Ntelogo (API)', 'OKPT', 'Old Toons World', 'OnlyEncodes (API)', 'OpenCD', 'Orpheus', 'OshenPT', 51 | 'Ostwiki', 'OurBits', 'P2PBG', 'Panda', 'Party-Tracker', 'PassThePopcorn', 'Peeratiko', 'Peers.FM', 52 | 'PigNetwork', 'PixelCove', 'PixelCove2FA', 'PiXELHD', 'PolishSource', 'PolishTracker (API)', 53 | 'Pornbay', 'PornoLab', 'Portugas (API)', 'PotUK', 'PreToMe', 'PrivateHD', 'ProAudioTorrents', 54 | 'PTCafe', 'PTChina', 'PTerClub', 'PTFiles', 'PThome', 'PTLSP', 'PTSBAO', 'PTTime', 'PT分享站', 55 | "Punk's Horror Tracker", 'PuntoTorrent', 'PussyTorrents', 'PuTao', 'PWTorrents', 'R3V WTF!', 56 | 'Racing4Everyone (API)', 'RacingForMe', 'Rainbow Tracker', 'RareShare2 (API)', 'Red Leaves', 57 | 'Red Star Torrent', 'Redacted', 'RedBits (API)', 'ReelFLiX (API)', 'Resurrect The Net', 58 | 'RetroFlix', 'RevolutionTT', 'RGFootball', 'RinTor', 'RiperAM', 'RM-HD', 'RockBox', 59 | 'Romanian Metal Torrents', 'Rousi', 'RPTScene', 'RUDUB', 'Rustorka', 'RuTracker', 'SATClubbing', 60 | 'SceneHD', 'SceneLinks', 'SceneRush', 'SceneTime', 'Secret Cinema', 'SeedFile', 'seleZen', 61 | 'Shadowflow', 'Shareisland (API)', 'Sharewood', 'Sharewood API', 'SharkPT', 'Shazbat', 'SiamBIT', 62 | 'SkipTheCommercials (API)', 'SkipTheTrailers', 'SkTorrent', 'SkTorrent-org', 'slosoul', 'SnowPT', 63 | 'SoulVoice', 'Speed.cd', 'SpeedApp', 'Speedmaster HD', 'SpeedTorrent Reloaded', 64 | 'Spirit of Revolution', 'SportsCult', 'SpringSunday', 'SugoiMusic', 'Superbits', 'Swarmazon (API)', 65 | 'Tapochek', 'Tasmanit', 'Team CT Game', 'TeamHD', 'TeamOS', 'TEKNO3D', 'teracod', 'The Crazy Ones', 66 | 'The Empire', 'The Falling Angels', 'The Geeks', 'The New Retro', 'The Occult', 67 | 'The Old School (API)', 'The Place', 'The Shinning (API)', 'The Show', 'The Vault', 'The-New-Fun', 68 | 'TheLeachZone', 'themixingbowl', 'TheRebels (API)', 'TheScenePlace', "Thor's Land", 'TJUPT', 69 | 'TLFBits', 'TmGHuB', 'Toca Share', 'Toloka.to', 'ToonsForMe', 'Tornado', 'Torrent Heaven', 70 | 'Torrent Network', 'Torrent Sector Crew', 'Torrent Trader', 'Torrent-Explosiv', 'Torrent-Syndikat', 71 | 'TOrrent-tuRK', 'Torrent.LT', 'TorrentBD', 'TorrentBytes', 'TorrentCCF', 'TorrentDay', 'TorrentDD', 72 | 'Torrenteros (API)', 'TorrentHeaven', 'TorrentHR', 'Torrenting', 'Torrentland', 73 | 'Torrentland (API)', 'TorrentLeech', 'Torrentleech.pl', 'TorrentMasters', 'Torrents-Local', 74 | 'TorrentSeeds (API)', 'TotallyKids', 'ToTheGlory', 'ToTheGloryCookie', 'TrackerMK', 75 | 'TranceTraffic', 'Trellas', 'TreZzoR', 'TreZzoRCookie', 'TribalMixes', 'TurkTorrent', 'TV Store', 76 | 'TVChaosUK', 'TvRoad', 'Twisted-Music', 'U2', 'UBits', 'UHDBits', 'UltraHD', 'Union Fansub', 77 | 'UnionGang', 'UniOtaku', 'Universal-Torrents', 'Unlimitz', 'upload.cx', 'UTOPIA', 'WDT', 78 | 'White Angel', 'WinterSakura', 'World-In-HD', 'World-of-Tomorrow', 'Wukong', 'x-ite.me', 79 | 'XbytesV2', 'Xider-Torrent', 'XSpeeds', 'Xthor (API)', 'xTorrenty', 'Xtreme Bytes', 'XWT-Classics', 80 | 'XWtorrents', 'YDYPT', 'YGGcookie', 'YGGtorrent', 'Zamunda.net', 'Zelka.org', 'ZmPT (织梦)', 81 | 'ZOMB', 'ZonaQ', 'Ztracker'] 82 | -------------------------------------------------------------------------------- /source/debrid/alldebrid.py: -------------------------------------------------------------------------------- 1 | # alldebrid.py 2 | import json 3 | import uuid 4 | from urllib.parse import unquote 5 | 6 | from constants import NO_CACHE_VIDEO_URL 7 | from debrid.base_debrid import BaseDebrid 8 | from utils.general import season_episode_in_filename 9 | from utils.logger import setup_logger 10 | 11 | logger = setup_logger(__name__) 12 | 13 | 14 | class AllDebrid(BaseDebrid): 15 | def __init__(self, config): 16 | super().__init__(config) 17 | self.base_url = "https://api.alldebrid.com/v4.1/" 18 | 19 | def add_magnet(self, magnet, ip): 20 | url = f"{self.base_url}magnet/upload?agent=jackett&apikey={self.config['debridKey']}&magnet={magnet}&ip={ip}" 21 | return self.get_json_response(url) 22 | 23 | def add_torrent(self, torrent_file, ip): 24 | url = f"{self.base_url}magnet/upload/file?agent=jackett&apikey={self.config['debridKey']}&ip={ip}" 25 | files = {"files[0]": (str(uuid.uuid4()) + ".torrent", torrent_file, 'application/x-bittorrent')} 26 | return self.get_json_response(url, method='post', files=files) 27 | 28 | def check_magnet_status(self, id, ip): 29 | url = f"{self.base_url}magnet/status?agent=jackett&apikey={self.config['debridKey']}&id={id}&ip={ip}" 30 | return self.get_json_response(url) 31 | 32 | def unrestrict_link(self, link, ip): 33 | url = f"{self.base_url}link/unlock?agent=jackett&apikey={self.config['debridKey']}&link={link}&ip={ip}" 34 | return self.get_json_response(url) 35 | 36 | def get_stream_link(self, query_string, ip): 37 | query = json.loads(query_string) 38 | 39 | magnet = query['magnet'] 40 | stream_type = query['type'] 41 | torrent_download = unquote(query["torrent_download"]) if query["torrent_download"] is not None else None 42 | 43 | torrent_id = self.__add_magnet_or_torrent(magnet, torrent_download, ip) 44 | logger.info(f"Torrent ID: {torrent_id}") 45 | 46 | if not self.wait_for_ready_status( 47 | lambda: self.check_magnet_status(torrent_id, ip)["data"]["magnets"]["status"] == "Ready"): 48 | logger.error("Torrent not ready, caching in progress.") 49 | return NO_CACHE_VIDEO_URL 50 | logger.info("Torrent is ready.") 51 | 52 | logger.info(f"Getting data for torrent id: {torrent_id}") 53 | data = self.check_magnet_status(torrent_id, ip)["data"] 54 | logger.info(f"Retrieved data for torrent id") 55 | 56 | link = NO_CACHE_VIDEO_URL 57 | if stream_type == "movie": 58 | logger.info("Getting link for movie") 59 | link = data["magnets"]["files"][0]['l'] 60 | elif stream_type == "series": 61 | season = query['season'] 62 | episode = query['episode'] 63 | logger.info(f"Getting link for series {season}, {episode}") 64 | matching_files = [] 65 | rank = 0 66 | if 'e' in data["magnets"]["files"][0].keys(): 67 | for file in data["magnets"]["files"][0]["e"]: 68 | if season_episode_in_filename(file["n"], season, episode): 69 | matching_files.append(file) 70 | rank += 1 71 | else: 72 | for file in data["magnets"]["files"]: 73 | if season_episode_in_filename(file["n"], season, episode): 74 | matching_files.append(file) 75 | rank += 1 76 | 77 | if len(matching_files) == 0: 78 | logger.error(f"No matching files for {season} {episode} in torrent.") 79 | return f"Error: No matching files for {season} {episode} in torrent." 80 | 81 | link = max(matching_files, key=lambda x: x["s"])["l"] 82 | else: 83 | logger.error("Unsupported stream type.") 84 | return "Error: Unsupported stream type." 85 | 86 | if link == NO_CACHE_VIDEO_URL: 87 | return link 88 | 89 | logger.info(f"Alldebrid link: {link}") 90 | 91 | unlocked_link_data = self.unrestrict_link(link, ip) 92 | 93 | if not unlocked_link_data: 94 | logger.error("Failed to unlock link.") 95 | return "Error: Failed to unlock link." 96 | 97 | logger.info(f"Unrestricted link: {unlocked_link_data['data']['link']}") 98 | 99 | return unlocked_link_data["data"]["link"] 100 | 101 | def get_availability_bulk(self, hashes_or_magnets, ip=None): 102 | torrents = f"{self.base_url}magnet/status?agent=jackett&apikey={self.config['debridKey']}&ip={ip}" 103 | ids = [] 104 | for element in self.get_json_response(torrents)["data"]["magnets"]: 105 | if element["hash"] in hashes_or_magnets: 106 | ids.append(element["id"]) 107 | 108 | # if len(hashes_or_magnets) == 0: 109 | # logger.info("No hashes to be sent to All-Debrid.") 110 | # return dict() 111 | # 112 | # url = f"{self.base_url}magnet/instant?agent=jackett&apikey={self.config['debridKey']}&magnets[]={'&magnets[]='.join(hashes_or_magnets)}&ip={ip}" 113 | # print(url) 114 | # return self.get_json_response(url) 115 | 116 | 117 | def __add_magnet_or_torrent(self, magnet, torrent_download=None, ip=None): 118 | torrent_id = "" 119 | if torrent_download is None: 120 | logger.info(f"Adding magnet to AllDebrid") 121 | magnet_response = self.add_magnet(magnet, ip) 122 | logger.info(f"AllDebrid add magnet response: {magnet_response}") 123 | 124 | if not magnet_response or "status" not in magnet_response or magnet_response["status"] != "success": 125 | return "Error: Failed to add magnet." 126 | 127 | torrent_id = magnet_response["data"]["magnets"][0]["id"] 128 | else: 129 | logger.info(f"Downloading torrent file from Jackett") 130 | torrent_file = self.donwload_torrent_file(torrent_download) 131 | logger.info(f"Torrent file downloaded from Jackett") 132 | 133 | logger.info(f"Adding torrent file to AllDebrid") 134 | upload_response = self.add_torrent(torrent_file, ip) 135 | logger.info(f"AllDebrid add torrent file response: {upload_response}") 136 | 137 | if not upload_response or "status" not in upload_response or upload_response["status"] != "success": 138 | return "Error: Failed to add torrent file." 139 | 140 | torrent_id = upload_response["data"]["files"][0]["id"] 141 | 142 | logger.info(f"New torrent ID: {torrent_id}") 143 | return torrent_id 144 | -------------------------------------------------------------------------------- /source/debrid/base_debrid.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import requests 4 | 5 | from utils.logger import setup_logger 6 | 7 | 8 | class BaseDebrid: 9 | def __init__(self, config): 10 | self.config = config 11 | self.logger = setup_logger(__name__) 12 | self.__session = requests.Session() 13 | 14 | def get_json_response(self, url, method='get', data=None, headers=None, files=None): 15 | if method == 'get': 16 | response = self.__session.get(url, headers=headers) 17 | elif method == 'post': 18 | response = self.__session.post(url, data=data, headers=headers, files=files) 19 | elif method == 'put': 20 | response = self.__session.put(url, data=data, headers=headers) 21 | elif method == 'delete': 22 | response = self.__session.delete(url, headers=headers) 23 | else: 24 | raise ValueError(f"Unsupported HTTP method: {method}") 25 | 26 | # Check if the request was successful 27 | if response.ok: 28 | try: 29 | return response.json() 30 | except ValueError: 31 | self.logger.error(f"Failed to parse response as JSON: {response.text}") 32 | return None 33 | else: 34 | self.logger.error(f"Request failed with status code {response.status_code}") 35 | return None 36 | 37 | def wait_for_ready_status(self, check_status_func, timeout=30, interval=5): 38 | self.logger.info(f"Waiting for {timeout} seconds to cache.") 39 | start_time = time.time() 40 | while time.time() - start_time < timeout: 41 | if check_status_func(): 42 | self.logger.info("File is ready!") 43 | return True 44 | time.sleep(interval) 45 | self.logger.info(f"Waiting timed out.") 46 | return False 47 | 48 | def donwload_torrent_file(self, download_url): 49 | response = requests.get(download_url) 50 | response.raise_for_status() 51 | 52 | return response.content 53 | 54 | def get_stream_link(self, query, ip=None): 55 | raise NotImplementedError 56 | 57 | def add_magnet(self, magnet, ip=None): 58 | raise NotImplementedError 59 | 60 | def get_availability_bulk(self, hashes_or_magnets, ip=None): 61 | raise NotImplementedError 62 | -------------------------------------------------------------------------------- /source/debrid/get_debrid_service.py: -------------------------------------------------------------------------------- 1 | from fastapi.exceptions import HTTPException 2 | 3 | from debrid.alldebrid import AllDebrid 4 | from debrid.premiumize import Premiumize 5 | from debrid.realdebrid import RealDebrid 6 | from debrid.torbox import TorBox 7 | 8 | 9 | def get_debrid_service(config): 10 | service_name = config['service'] 11 | if service_name == "realdebrid": 12 | debrid_service = RealDebrid(config) 13 | elif service_name == "alldebrid": 14 | debrid_service = AllDebrid(config) 15 | elif service_name == "premiumize": 16 | debrid_service = Premiumize(config) 17 | elif service_name == "torbox": 18 | debrid_service = TorBox(config) 19 | else: 20 | raise HTTPException(status_code=500, detail="Invalid service configuration.") 21 | 22 | return debrid_service -------------------------------------------------------------------------------- /source/debrid/premiumize.py: -------------------------------------------------------------------------------- 1 | # Assuming the BaseDebrid class and necessary imports are already defined as shown previously 2 | import json 3 | 4 | from constants import NO_CACHE_VIDEO_URL 5 | from debrid.base_debrid import BaseDebrid 6 | from utils.general import get_info_hash_from_magnet, season_episode_in_filename 7 | from utils.logger import setup_logger 8 | 9 | logger = setup_logger(__name__) 10 | 11 | 12 | class Premiumize(BaseDebrid): 13 | def __init__(self, config): 14 | super().__init__(config) 15 | self.base_url = "https://www.premiumize.me/api" 16 | 17 | def add_magnet(self, magnet, ip=None): 18 | url = f"{self.base_url}/transfer/create?apikey={self.config['debridKey']}" 19 | form = {'src': magnet} 20 | return self.get_json_response(url, method='post', data=form) 21 | 22 | # Doesn't work for the time being. Premiumize does not support torrent file torrents 23 | def add_torrent(self, torrent_file): 24 | url = f"{self.base_url}/transfer/create?apikey={self.config['debridKey']}" 25 | form = {'file': torrent_file} 26 | return self.get_json_response(url, method='post', data=form) 27 | 28 | def list_transfers(self): 29 | url = f"{self.base_url}/transfer/list?apikey={self.config['debridKey']}" 30 | return self.get_json_response(url) 31 | 32 | def get_folder_or_file_details(self, item_id, is_folder=True): 33 | if is_folder: 34 | logger.info(f"Getting folder details with id: {item_id}") 35 | url = f"{self.base_url}/folder/list?id={item_id}&apikey={self.config['debridKey']}" 36 | else: 37 | logger.info(f"Getting file details with id: {item_id}") 38 | url = f"{self.base_url}/item/details?id={item_id}&apikey={self.config['debridKey']}" 39 | return self.get_json_response(url) 40 | 41 | def get_availability(self, hash): 42 | url = f"{self.base_url}/cache/check?apikey={self.config['debridKey']}&items[]={hash}" 43 | return self.get_json_response(url) 44 | 45 | def get_availability_bulk(self, hashes_or_magnets, ip=None): 46 | url = f"{self.base_url}/cache/check?apikey={self.config['debridKey']}&items[]=" + "&items[]=".join( 47 | hashes_or_magnets) 48 | return self.get_json_response(url) 49 | 50 | def get_stream_link(self, query, ip=None): 51 | query = json.loads(query) 52 | magnet = query['magnet'] 53 | logger.info(f"Received query for magnet: {magnet}") 54 | info_hash = get_info_hash_from_magnet(magnet) 55 | logger.info(f"Info hash extracted: {info_hash}") 56 | stream_type = query['type'] 57 | logger.info(f"Stream type: {stream_type}") 58 | 59 | transfer_data = self.add_magnet(magnet) 60 | if not transfer_data or 'id' not in transfer_data: 61 | logger.error("Failed to create transfer.") 62 | return "Error: Failed to create transfer." 63 | transfer_id = transfer_data['id'] 64 | logger.info(f"Transfer created with ID: {transfer_id}") 65 | 66 | if not self.wait_for_ready_status(lambda: self.get_availability(info_hash)["transcoded"][0] is True): 67 | logger.info("Torrent not ready, caching in progress") 68 | return NO_CACHE_VIDEO_URL 69 | 70 | logger.info("Torrent is ready.") 71 | 72 | # Assuming the transfer is complete, we need to find whether it's a file or a folder 73 | transfers = self.list_transfers() 74 | item_id, is_folder = None, False 75 | for item in transfers.get('transfers', []): 76 | if item['id'] == transfer_id: 77 | if item.get('folder_id'): 78 | item_id = item['folder_id'] 79 | is_folder = True 80 | else: 81 | item_id = item['file_id'] 82 | break 83 | 84 | if not item_id: 85 | logger.error("Transfer completed but no item ID found.") 86 | return "Error: Transfer completed but no item ID found." 87 | 88 | details = self.get_folder_or_file_details(item_id, is_folder) 89 | logger.info(f"Got details") 90 | 91 | if stream_type == "movie": 92 | logger.info("Getting link for movie") 93 | # For movies, we pick the largest file in the folder or the file itself 94 | if is_folder: 95 | link = max(details.get("content", []), key=lambda x: x["size"])["link"] 96 | else: 97 | link = details.get('link') 98 | elif stream_type == "series": 99 | logger.info("Getting link for series") 100 | if is_folder: 101 | season = query["season"] 102 | episode = query["episode"] 103 | files = details.get("content", []) 104 | matching_files = [] 105 | 106 | for file in files: 107 | if season_episode_in_filename(file["name"], season, episode): 108 | matching_files.append(file) 109 | 110 | if len(matching_files) == 0: 111 | logger.error(f"No matching files for {season} {episode} in torrent.") 112 | return f"Error: No matching files for {season} {episode} in torrent." 113 | 114 | link = max(matching_files, key=lambda x: x["size"])["link"] 115 | else: 116 | link = details.get('link') 117 | else: 118 | logger.error("Unsupported stream type.") 119 | return "Error: Unsupported stream type." 120 | 121 | logger.info(f"Link generated: {link}") 122 | return link 123 | -------------------------------------------------------------------------------- /source/debrid/realdebrid.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from urllib.parse import unquote 4 | 5 | import requests 6 | 7 | from constants import NO_CACHE_VIDEO_URL 8 | from debrid.base_debrid import BaseDebrid 9 | from utils.general import get_info_hash_from_magnet 10 | from utils.general import is_video_file 11 | from utils.general import season_episode_in_filename 12 | from utils.logger import setup_logger 13 | 14 | logger = setup_logger(__name__) 15 | 16 | 17 | class RealDebrid(BaseDebrid): 18 | def __init__(self, config): 19 | super().__init__(config) 20 | self.base_url = "https://api.real-debrid.com" 21 | self.headers = {"Authorization": f"Bearer {self.config['debridKey']}"} 22 | 23 | def add_magnet(self, magnet, ip=None): 24 | url = f"{self.base_url}/rest/1.0/torrents/addMagnet" 25 | data = {"magnet": magnet} 26 | return self.get_json_response(url, method='post', headers=self.headers, data=data) 27 | 28 | def add_torrent(self, torrent_file): 29 | url = f"{self.base_url}/rest/1.0/torrents/addTorrent" 30 | return self.get_json_response(url, method='put', headers=self.headers, data=torrent_file) 31 | 32 | def delete_torrent(self, id): 33 | url = f"{self.base_url}/rest/1.0/torrents/delete/{id}" 34 | return self.get_json_response(url, method='delete', headers=self.headers) 35 | 36 | def get_torrent_info(self, torrent_id): 37 | logger.info(f"Getting torrent info for: {torrent_id}") 38 | url = f"{self.base_url}/rest/1.0/torrents/info/{torrent_id}" 39 | torrent_info = self.get_json_response(url, headers=self.headers) 40 | 41 | if not torrent_info or 'files' not in torrent_info: 42 | return None 43 | 44 | return torrent_info 45 | 46 | def select_files(self, torrent_id, file_id): 47 | logger.info(f"Selecting file(s): {file_id}") 48 | url = f"{self.base_url}/rest/1.0/torrents/selectFiles/{torrent_id}" 49 | data = {"files": str(file_id)} 50 | requests.post(url, headers=self.headers, data=data) 51 | 52 | def unrestrict_link(self, link): 53 | url = f"{self.base_url}/rest/1.0/unrestrict/link" 54 | data = {"link": link} 55 | return self.get_json_response(url, method='post', headers=self.headers, data=data) 56 | 57 | def is_already_added(self, magnet): 58 | hash = magnet.split("urn:btih:")[1].split("&")[0].lower() 59 | url = f"{self.base_url}/rest/1.0/torrents" 60 | torrents = self.get_json_response(url, headers=self.headers) 61 | for torrent in torrents: 62 | if torrent['hash'].lower() == hash: 63 | return torrent['id'] 64 | return False 65 | 66 | def wait_for_link(self, torrent_id, timeout=30, interval=5): 67 | start_time = time.time() 68 | while time.time() - start_time < timeout: 69 | torrent_info = self.get_torrent_info(torrent_id) 70 | if torrent_info and 'links' in torrent_info and len(torrent_info['links']) > 0: 71 | return torrent_info['links'] 72 | time.sleep(interval) 73 | 74 | return None 75 | 76 | def get_availability_bulk(self, hashes_or_magnets, ip=None): 77 | if len(hashes_or_magnets) == 0: 78 | logger.info("No hashes to be sent to Real-Debrid.") 79 | return dict() 80 | 81 | url = f"{self.base_url}/torrents/" 82 | ids = [] 83 | for element in self.get_json_response(url)["data"]["hash"]: 84 | if element["hash"] in hashes_or_magnets: 85 | ids.append(element["id"]) 86 | return self.get_json_response(url, headers=self.headers) 87 | 88 | def get_stream_link(self, query_string, ip=None): 89 | query = json.loads(query_string) 90 | 91 | magnet = query['magnet'] 92 | stream_type = query['type'] 93 | file_index = int(query['file_index']) if query['file_index'] is not None else None 94 | season = query['season'] 95 | episode = query['episode'] 96 | torrent_download = unquote(query["torrent_download"]) if query["torrent_download"] is not None else None 97 | info_hash = get_info_hash_from_magnet(magnet) 98 | logger.info(f"RealDebrid get stream link for {stream_type} with hash: {info_hash}") 99 | 100 | cached_torrent_ids = self.__get_cached_torrent_ids(info_hash) 101 | logger.info(f"Found {len(cached_torrent_ids)} cached torrents with hash: {info_hash}") 102 | 103 | torrent_info = None 104 | if len(cached_torrent_ids) > 0: 105 | if stream_type == "movie": 106 | torrent_info = self.get_torrent_info(cached_torrent_ids[0]) 107 | elif stream_type == "series": 108 | torrent_info = self.__get_cached_torrent_info(cached_torrent_ids, file_index, season, episode) 109 | else: 110 | return "Error: Unsupported stream type." 111 | 112 | # The torrent is not yet added 113 | if torrent_info is None: 114 | torrent_info = self.__add_magnet_or_torrent(magnet, torrent_download) 115 | if not torrent_info or 'files' not in torrent_info: 116 | return "Error: Failed to get torrent info." 117 | 118 | logger.info("Selecting file") 119 | self.__select_file(torrent_info, stream_type, file_index, season, episode) 120 | 121 | # == operator, to avoid adding the season pack twice and setting 5 as season pack treshold 122 | if len(cached_torrent_ids) == 0 and stream_type == "series" and len(torrent_info["files"]) > 5: 123 | logger.info("Prefetching season pack") 124 | prefetched_torrent_info = self.__prefetch_season_pack(magnet, torrent_download) 125 | if len(prefetched_torrent_info["links"]) > 0: 126 | self.delete_torrent(torrent_info["id"]) 127 | torrent_info = prefetched_torrent_info 128 | 129 | torrent_id = torrent_info["id"] 130 | logger.info(f"Waiting for the link(s) to be ready for torrent ID: {torrent_id}") 131 | # Waiting for the link(s) to be ready 132 | links = self.wait_for_link(torrent_id) 133 | if links is None: 134 | return NO_CACHE_VIDEO_URL 135 | 136 | if len(links) > 1: 137 | logger.info("Finding appropiate link") 138 | download_link = self.__find_appropiate_link(torrent_info, links, file_index, season, episode) 139 | else: 140 | download_link = links[0] 141 | 142 | logger.info(f"Unrestricting the download link: {download_link}") 143 | # Unrestricting the download link 144 | unrestrict_response = self.unrestrict_link(download_link) 145 | if not unrestrict_response or 'download' not in unrestrict_response: 146 | return "Error: Failed to unrestrict link." 147 | 148 | logger.info(f"Got download link: {unrestrict_response['download']}") 149 | return unrestrict_response['download'] 150 | 151 | def __get_cached_torrent_ids(self, info_hash): 152 | url = f"{self.base_url}/rest/1.0/torrents" 153 | torrents = self.get_json_response(url, headers=self.headers) 154 | 155 | logger.info(f"Searching users real-debrid downloads for {info_hash}") 156 | torrent_ids = [] 157 | for torrent in torrents: 158 | if torrent['hash'].lower() == info_hash: 159 | torrent_ids.append(torrent['id']) 160 | 161 | return torrent_ids 162 | 163 | def __get_cached_torrent_info(self, cached_ids, file_index, season, episode): 164 | cached_torrents = [] 165 | for cached_torrent_id in cached_ids: 166 | cached_torrent_info = self.get_torrent_info(cached_torrent_id) 167 | if self.__torrent_contains_file(cached_torrent_info, file_index, season, episode): 168 | if len(cached_torrent_info["links"]) > 0: # If the links are ready 169 | return cached_torrent_info 170 | 171 | cached_torrents.append(cached_torrent_info) 172 | 173 | if len(cached_torrents) == 0: 174 | return None 175 | 176 | return max(cached_torrents, key=lambda x: x['progress']) 177 | 178 | def __torrent_contains_file(self, torrent_info, file_index, season, episode): 179 | if not torrent_info or "files" not in torrent_info: 180 | return False 181 | 182 | if file_index is None: 183 | for file in torrent_info["files"]: 184 | if file["selected"] and season_episode_in_filename(file['path'], season, episode): 185 | return True 186 | else: 187 | for file in torrent_info["files"]: 188 | if file['id'] == file_index: 189 | return file["selected"] == 1 190 | 191 | return False 192 | 193 | def __add_magnet_or_torrent(self, magnet, torrent_download=None): 194 | torrent_id = "" 195 | if torrent_download is None: 196 | logger.info(f"Adding magnet to RealDebrid") 197 | magnet_response = self.add_magnet(magnet) 198 | logger.info(f"RealDebrid add magnet response: {magnet_response}") 199 | 200 | if not magnet_response or 'id' not in magnet_response: 201 | return "Error: Failed to add magnet." 202 | 203 | torrent_id = magnet_response['id'] 204 | else: 205 | logger.info(f"Downloading torrent file from Jackett") 206 | torrent_file = self.donwload_torrent_file(torrent_download) 207 | logger.info(f"Torrent file downloaded from Jackett") 208 | 209 | logger.info(f"Adding torrent file to RealDebrid") 210 | upload_response = self.add_torrent(torrent_file) 211 | logger.info(f"RealDebrid add torrent file response: {upload_response}") 212 | 213 | if not upload_response or 'id' not in upload_response: 214 | return "Error: Failed to add torrent file." 215 | 216 | torrent_id = upload_response['id'] 217 | 218 | logger.info(f"New torrent ID: {torrent_id}") 219 | return self.get_torrent_info(torrent_id) 220 | 221 | def __prefetch_season_pack(self, magnet, torrent_download): 222 | torrent_info = self.__add_magnet_or_torrent(magnet, torrent_download) 223 | video_file_indexes = [] 224 | 225 | for file in torrent_info["files"]: 226 | if is_video_file(file["path"]): 227 | video_file_indexes.append(str(file["id"])) 228 | 229 | self.select_files(torrent_info["id"], ",".join(video_file_indexes)) 230 | time.sleep(10) 231 | return self.get_torrent_info(torrent_info["id"]) 232 | 233 | def __select_file(self, torrent_info, stream_type, file_index, season, episode): 234 | torrent_id = torrent_info["id"] 235 | if file_index is not None: 236 | logger.info(f"Selecting file_index: {file_index}") 237 | self.select_files(torrent_id, file_index) 238 | return 239 | 240 | files = torrent_info["files"] 241 | if stream_type == "movie": 242 | largest_file_id = max(files, key=lambda x: x['bytes'])['id'] 243 | logger.info(f"Selecting file_index: {largest_file_id}") 244 | self.select_files(torrent_id, largest_file_id) 245 | elif stream_type == "series": 246 | strict_matching_files = [] 247 | matching_files = [] 248 | for file in files: 249 | if season_episode_in_filename(file["path"], season, episode, strict=True): 250 | strict_matching_files.append(file) 251 | elif season_episode_in_filename(file["path"], season, episode, strict=False): 252 | matching_files.append(file) 253 | 254 | if len(strict_matching_files) > 0: 255 | matching_files = strict_matching_files 256 | 257 | largest_file_id = max(matching_files, key=lambda x: x['bytes'])['id'] 258 | logger.info(f"Selecting file_index: {largest_file_id}") 259 | self.select_files(torrent_id, largest_file_id) 260 | 261 | def __find_appropiate_link(self, torrent_info, links, file_index, season, episode): 262 | selected_files = list(filter(lambda file: file["selected"] == 1, torrent_info["files"])) 263 | 264 | index = 0 265 | if file_index is not None: 266 | for file in selected_files: 267 | if file["id"] == file_index: 268 | break 269 | index += 1 270 | else: 271 | matching_indexes = [] 272 | strict_matching_indexes = [] 273 | for file in selected_files: 274 | if season_episode_in_filename(file["path"], season, episode, strict=True): 275 | strict_matching_indexes.append({"index": index, "file": file}) 276 | elif season_episode_in_filename(file["path"], season, episode, strict=False): 277 | matching_indexes.append({"index": index, "file": file}) 278 | index += 1 279 | 280 | if len(strict_matching_indexes) > 0: 281 | matching_indexes = strict_matching_indexes 282 | 283 | index = max(matching_indexes, lambda x: x["file"]["bytes"])["index"] 284 | 285 | if len(links) - 1 < index: 286 | logger.debug(f"From selected files {selected_files}, index: {index} is out of range for {links}.") 287 | return NO_CACHE_VIDEO_URL 288 | 289 | return links[index] -------------------------------------------------------------------------------- /source/debrid/torbox.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | import time 4 | from urllib.parse import unquote 5 | 6 | from constants import NO_CACHE_VIDEO_URL 7 | from debrid.base_debrid import BaseDebrid 8 | from utils.general import season_episode_in_filename 9 | from utils.logger import setup_logger 10 | 11 | logger = setup_logger(__name__) 12 | 13 | 14 | class TorBox(BaseDebrid): 15 | def __init__(self, config): 16 | super().__init__(config) 17 | self.base_url = "https://api.torbox.app/v1/api/" 18 | self.headers = { 19 | "Authorization": f"Bearer {self.config['debridKey']}", 20 | } 21 | 22 | def wait_for_files(self, torrent_hash, timeout=30, interval=5): 23 | start_time = time.time() 24 | while time.time() - start_time < timeout: 25 | status = self.check_magnet_status(torrent_hash) 26 | if status: 27 | logger.info(f"Torrent status: {status}") 28 | if isinstance(status, list) and len(status) > 0 and "files" in status[0]: 29 | files = status[0]["files"] 30 | if files: 31 | logger.info(f"Files are ready: {files}") 32 | return files 33 | logger.info("Files not ready yet, retrying...") 34 | time.sleep(interval) 35 | logger.error("Timeout while waiting for torrent files.") 36 | return None 37 | 38 | def add_magnet(self, magnet): 39 | url = f"{self.base_url}torrents/createtorrent" 40 | data = { 41 | "magnet": magnet, 42 | "seed": 2 43 | } 44 | logger.info(f"URL: {url}") 45 | logger.info(f"Headers: {self.headers}") 46 | logger.info(f"Form Data: {data}") 47 | 48 | response = self.get_json_response(url, method="post", data=data) 49 | if response and response.get("success", False): 50 | data = response.get("data", {}) 51 | if "torrent_id" not in data: 52 | logger.error(f"Missing 'torrent_id' in response: {response}") 53 | return None 54 | cached = "Found Cached Torrent" in response.get("detail", "") 55 | return { 56 | "torrent_id": data["torrent_id"], 57 | "hash": data["hash"], 58 | "is_cached": cached 59 | } 60 | else: 61 | logger.error(f"Failed to add magnet: {response}") 62 | return None 63 | 64 | def check_magnet_status(self, torrent_hash): 65 | url = f"{self.base_url}torrents/checkcached?hash={torrent_hash}&format=object&list_files=true" 66 | response = self.get_json_response(url) 67 | logger.info(f"Response from check_magnet_status: {response}") 68 | if response and response.get("success", False): 69 | return response["data"] if response["data"] else [] 70 | else: 71 | logger.error(f"Failed to check status for hash {torrent_hash}: {response}") 72 | return None 73 | 74 | def get_file_download_link(self, torrent_id, file_name): 75 | url = f"{self.base_url}torrents/requestdl?token={self.config['debridKey']}&torrent_id={torrent_id}&file_id={file_name}&zip_link=false&torrent_file=false" 76 | response = self.get_json_response(url, method='get') 77 | if response and response.get("success", False): 78 | return response["data"] 79 | else: 80 | logger.error(f"Failed to get download link for torrent_id {torrent_id} and file {file_name}: {response}") 81 | return None 82 | 83 | def __add_magnet_or_torrent(self, magnet, torrent_download=None): 84 | torrent_id = None 85 | if magnet: 86 | logger.info(f"Adding magnet to TorBox") 87 | torrent_id = self.add_magnet(magnet) 88 | logger.info(f"TorBox add magnet response: {torrent_id}") 89 | else: 90 | logger.error("Only magnet links are supported for TorBox.") 91 | return torrent_id 92 | 93 | def get_stream_link(self, query_string, ip): 94 | query = json.loads(query_string) 95 | magnet = query["magnet"] 96 | stream_type = query["type"] 97 | season = query.get("season") 98 | episode = query.get("episode") 99 | 100 | magnet_data = self.add_magnet(magnet) 101 | if not magnet_data: 102 | logger.error("Failed to add magnet or retrieve torrent_id.") 103 | return NO_CACHE_VIDEO_URL 104 | 105 | torrent_id = magnet_data.get("torrent_id") 106 | if not torrent_id: 107 | logger.error("Missing torrent_id in magnet_data.") 108 | return NO_CACHE_VIDEO_URL 109 | 110 | is_cached = magnet_data["is_cached"] 111 | 112 | if is_cached: 113 | logger.info("Magnet is already cached. Files are ready.") 114 | files = self.check_magnet_status(magnet_data["hash"])[magnet_data["hash"]] 115 | if not files or "files" not in files: 116 | logger.error("Files not found in cached torrent.") 117 | return NO_CACHE_VIDEO_URL 118 | else: 119 | files = self.wait_for_files(magnet_data["hash"]) 120 | if not files: 121 | logger.error(f"No files found for magnet {magnet}.") 122 | return NO_CACHE_VIDEO_URL 123 | 124 | if stream_type == "movie": 125 | largest_file_index, largest_file = max( 126 | enumerate(files["files"]), key=lambda x: x[1]["size"] 127 | ) 128 | return self.get_file_download_link(torrent_id, largest_file_index) 129 | elif stream_type == "series": 130 | files = files["files"] 131 | matching_files = [ 132 | (index, file) for index, file in enumerate(files) 133 | if season_episode_in_filename(file["name"], season, episode) 134 | ] 135 | if matching_files: 136 | selected_index, selected_file = max( 137 | matching_files, key=lambda x: x[1]["size"] 138 | ) 139 | return self.get_file_download_link(torrent_id, selected_index) 140 | else: 141 | logger.error(f"No matching files found for {season}x{episode}.") 142 | return NO_CACHE_VIDEO_URL 143 | else: 144 | logger.error("Unsupported stream type.") 145 | return "Error: Unsupported stream type." 146 | 147 | def get_json_response(self, url, method='get', **kwargs): 148 | try: 149 | if method == 'get': 150 | response = requests.get(url, headers=self.headers, **kwargs) 151 | elif method == 'post': 152 | response = requests.post(url, headers=self.headers, **kwargs) 153 | else: 154 | raise ValueError(f"Unsupported HTTP method: {method}") 155 | 156 | response.raise_for_status() 157 | return response.json() 158 | except requests.exceptions.RequestException as e: 159 | logger.error(f"HTTP request failed: {e}") 160 | return None 161 | 162 | def get_availability_bulk(self, hashes_or_magnets, ip=None): 163 | 164 | available_torrents = {} 165 | 166 | for torrent_hash in hashes_or_magnets: 167 | url = f"{self.base_url}torrents/checkcached?hash={torrent_hash}&format=list&list_files=true" 168 | try: 169 | response = self.get_json_response(url) 170 | if response.get("success") and response.get("data"): 171 | torrent_data = response["data"][0] 172 | available_torrents[torrent_hash] = { 173 | "name": torrent_data["name"], 174 | "size": torrent_data["size"], 175 | "files": torrent_data["files"] 176 | } 177 | else: 178 | self.logger.warning(f"Torrent {torrent_hash} is not cached or invalid response: {response}") 179 | 180 | except Exception as e: 181 | self.logger.error(f"Error while checking availability for hash {torrent_hash}: {e}") 182 | continue 183 | 184 | return available_torrents 185 | -------------------------------------------------------------------------------- /source/jackett/jackett_indexer.py: -------------------------------------------------------------------------------- 1 | class JackettIndexer: 2 | def __init__(self): 3 | self.title = None 4 | self.id = None 5 | self.link = None 6 | self.type = None 7 | self.language = None 8 | self.tv_search_capatabilities = None 9 | self.movie_search_capatabilities = None 10 | -------------------------------------------------------------------------------- /source/jackett/jackett_result.py: -------------------------------------------------------------------------------- 1 | from RTN import parse 2 | 3 | from models.series import Series 4 | from torrent.torrent_item import TorrentItem 5 | from utils.logger import setup_logger 6 | 7 | logger = setup_logger(__name__) 8 | 9 | class JackettResult: 10 | def __init__(self): 11 | self.raw_title = None # Raw title of the torrent 12 | self.size = None # Size of the torrent 13 | self.link = None # Download link for the torrent file or magnet url 14 | self.indexer = None # Indexer 15 | self.seeders = None # Seeders count 16 | self.magnet = None # Magnet url 17 | self.info_hash = None # infoHash by Jackett 18 | self.privacy = None # public or private 19 | 20 | # Extra processed details for further filtering 21 | self.languages = None # Language of the torrent 22 | self.type = None # series or movie 23 | 24 | self.parsed_data = None # Ranked result 25 | 26 | def convert_to_torrent_item(self): 27 | return TorrentItem( 28 | self.raw_title, 29 | self.size, 30 | self.magnet, 31 | self.info_hash.lower() if self.info_hash is not None else None, 32 | self.link, 33 | self.seeders, 34 | self.languages, 35 | self.indexer, 36 | self.privacy, 37 | self.type, 38 | self.parsed_data 39 | ) 40 | 41 | def from_cached_item(self, cached_item, media): 42 | if type(cached_item) is not dict: 43 | logger.error(cached_item) 44 | 45 | parsed_result = parse(cached_item['title']) 46 | 47 | self.raw_title = cached_item['title'] 48 | self.indexer = "Cache" # Cache doesn't return an indexer sadly (It stores it tho) 49 | self.magnet = cached_item['magnet'] 50 | self.link = cached_item['magnet'] 51 | self.info_hash = cached_item['hash'] 52 | self.languages = cached_item['language'].split(";") if cached_item['language'] is not None else [] 53 | self.seeders = cached_item['seeders'] 54 | self.size = cached_item['size'] 55 | self.type = media.type 56 | self.parsed_data = parsed_result 57 | 58 | return self 59 | -------------------------------------------------------------------------------- /source/jackett/jackett_service.py: -------------------------------------------------------------------------------- 1 | import os 2 | import queue 3 | import threading 4 | import time 5 | import xml.etree.ElementTree as ET 6 | 7 | import requests 8 | from RTN import parse 9 | 10 | from jackett.jackett_indexer import JackettIndexer 11 | from jackett.jackett_result import JackettResult 12 | from models.movie import Movie 13 | from models.series import Series 14 | from utils.detection import detect_languages 15 | from utils.logger import setup_logger 16 | 17 | 18 | class JackettService: 19 | def __init__(self, config): 20 | self.logger = setup_logger(__name__) 21 | self._indexers = None 22 | self.__api_key = config['jackettApiKey'] 23 | self.__base_url = f"{config['jackettHost']}/api/v2.0" 24 | self.__session = requests.Session() 25 | 26 | def search(self, media): 27 | self.logger.info("Started Jackett search for " + media.type + " " + media.titles[0]) 28 | 29 | indexers = self.get_indexers() 30 | threads = [] 31 | results_queue = queue.Queue() # Create a Queue instance to hold the results 32 | 33 | # Define a wrapper function that calls the actual target function and stores its return value in the queue 34 | def thread_target(media, indexer): 35 | self.logger.info(f"Searching on {indexer.title}") 36 | start_time = time.time() 37 | 38 | # Call the actual function 39 | if isinstance(media, Movie): 40 | result = self.__search_movie_indexer(media, indexer) 41 | elif isinstance(media, Series): 42 | result = self.__search_series_indexer(media, indexer) 43 | else: 44 | raise TypeError("Only Movie and Series is allowed as media!") 45 | 46 | self.logger.info( 47 | f"Search on {indexer.title} took {time.time() - start_time} seconds and found {len([result for sublist in result for result in sublist])} results") 48 | 49 | results_queue.put(result) # Put the result in the queue 50 | 51 | for indexer in indexers: 52 | # Pass the wrapper function as the target to Thread, with necessary arguments 53 | threads.append(threading.Thread(target=thread_target, args=(media, indexer))) 54 | 55 | for thread in threads: 56 | thread.start() 57 | 58 | for thread in threads: 59 | thread.join() 60 | 61 | results = [] 62 | 63 | # Retrieve results from the queue and append them to the results list 64 | while not results_queue.empty(): 65 | results.extend(results_queue.get()) 66 | 67 | flatten_results = [result for sublist in results for result in sublist] 68 | 69 | return self.__post_process_results(flatten_results, media) 70 | 71 | def __search_movie_indexer(self, movie, indexer): 72 | 73 | # url = f"{self.__base_url}/indexers/all/results/torznab/api?apikey={self.__api_key}&t=movie&cat=2000&q={movie.title}&year={movie.year}" 74 | 75 | has_imdb_search_capability = (os.getenv( 76 | "DISABLE_JACKETT_IMDB_SEARCH") != "true" and indexer.movie_search_capatabilities is not None and 'imdbid' in indexer.movie_search_capatabilities) 77 | 78 | if has_imdb_search_capability: 79 | languages = ['en'] 80 | index_of_language = [index for index, lang in enumerate(movie.languages) if lang == 'en'][0] 81 | titles = [movie.titles[index_of_language]] 82 | elif indexer.language == "en": 83 | languages = movie.languages 84 | titles = movie.titles 85 | else: 86 | index_of_language = [index for index, lang in enumerate(movie.languages) if 87 | lang == indexer.language or lang == 'en'] 88 | languages = [movie.languages[index] for index in index_of_language] 89 | titles = [movie.titles[index] for index in index_of_language] 90 | 91 | results = [] 92 | 93 | for index, lang in enumerate(languages): 94 | params = { 95 | 'apikey': self.__api_key, 96 | 't': 'movie', 97 | 'cat': '2000', 98 | 'q': titles[index], 99 | 'year': movie.year, 100 | } 101 | 102 | if has_imdb_search_capability: 103 | params['imdbid'] = movie.id 104 | 105 | url = f"{self.__base_url}/indexers/{indexer.id}/results/torznab/api" 106 | url += '?' + '&'.join([f'{k}={v}' for k, v in params.items()]) 107 | 108 | try: 109 | response = self.__session.get(url) 110 | response.raise_for_status() 111 | results.append(self.__get_torrent_links_from_xml(response.text)) 112 | except Exception: 113 | self.logger.exception( 114 | f"An exception occured while searching for a movie on Jackett with indexer {indexer.title} and " 115 | f"language {lang}.") 116 | 117 | return results 118 | 119 | def __search_series_indexer(self, series, indexer): 120 | season = str(int(series.season.replace('S', ''))) 121 | episode = str(int(series.episode.replace('E', ''))) 122 | 123 | has_imdb_search_capability = (os.getenv("DISABLE_JACKETT_IMDB_SEARCH") != "true" 124 | and indexer.tv_search_capatabilities is not None 125 | and 'imdbid' in indexer.tv_search_capatabilities) 126 | if has_imdb_search_capability: 127 | languages = ['en'] 128 | index_of_language = [index for index, lang in enumerate(series.languages) if lang == 'en'][0] 129 | titles = [series.titles[index_of_language]] 130 | elif indexer.language == "en": 131 | languages = series.languages 132 | titles = series.titles 133 | else: 134 | index_of_language = [index for index, lang in enumerate(series.languages) if 135 | lang == indexer.language or lang == 'en'] 136 | languages = [series.languages[index] for index in index_of_language] 137 | titles = [series.titles[index] for index in index_of_language] 138 | 139 | results = [] 140 | 141 | for index, lang in enumerate(languages): 142 | params = { 143 | 'apikey': self.__api_key, 144 | 't': 'tvsearch', 145 | 'cat': '5000', 146 | 'q': titles[index], 147 | } 148 | 149 | if has_imdb_search_capability: 150 | params['imdbid'] = series.id 151 | 152 | url_title = f"{self.__base_url}/indexers/{indexer.id}/results/torznab/api" 153 | url_title += '?' + '&'.join([f'{k}={v}' for k, v in params.items()]) 154 | 155 | url_season = f"{self.__base_url}/indexers/{indexer.id}/results/torznab/api" 156 | params['season'] = season 157 | url_season += '?' + '&'.join([f'{k}={v}' for k, v in params.items()]) 158 | 159 | url_ep = f"{self.__base_url}/indexers/{indexer.id}/results/torznab/api" 160 | params['ep'] = episode 161 | url_ep += '?' + '&'.join([f'{k}={v}' for k, v in params.items()]) 162 | 163 | try: 164 | # Current functionality is that it returns if the season, episode search was successful. This is subject to change 165 | # TODO: what should we prioritize? season, episode or title? 166 | response_ep = self.__session.get(url_ep) 167 | response_ep.raise_for_status() 168 | 169 | response_season = self.__session.get(url_season) 170 | response_season.raise_for_status() 171 | 172 | data_ep = self.__get_torrent_links_from_xml(response_ep.text) 173 | data_season = self.__get_torrent_links_from_xml(response_season.text) 174 | 175 | if data_ep: 176 | results.append(data_ep) 177 | if data_season: 178 | results.append(data_season) 179 | 180 | if not data_ep and not data_season: 181 | response_title = self.__session.get(url_title) 182 | response_title.raise_for_status() 183 | data_title = self.__get_torrent_links_from_xml(response_title.text) 184 | if data_title: 185 | results.append(data_title) 186 | except Exception: 187 | self.logger.exception( 188 | f"An exception occured while searching for a series on Jackett with indexer {indexer.title} and language {lang}.") 189 | 190 | return results 191 | 192 | def get_indexers(self): 193 | if not self._indexers: 194 | self.logger.info(f"Indexer cache miss. Requesting API...") 195 | url = f"{self.__base_url}/indexers/all/results/torznab/api?apikey={self.__api_key}&t=indexers&configured=true" 196 | 197 | try: 198 | response = self.__session.get(url) 199 | response.raise_for_status() 200 | self._indexers = self.__get_indexer_from_xml(response.text) 201 | self.logger.info(f"Successfully retrieved {len(self._indexers)} indexers from Jackett. Storing in cache...") 202 | except Exception: 203 | self.logger.exception("An exception occured while getting indexers from Jackett.") 204 | return [] 205 | return self._indexers 206 | 207 | def __get_indexer_from_xml(self, xml_content): 208 | xml_root = ET.fromstring(xml_content) 209 | 210 | indexer_list = [] 211 | for item in xml_root.findall('.//indexer'): 212 | indexer = JackettIndexer() 213 | 214 | indexer.title = item.find('title').text 215 | indexer.id = item.attrib['id'] 216 | indexer.link = item.find('link').text 217 | indexer.type = item.find('type').text 218 | if item.find('language').text.split('-')[0] in ['pt']: 219 | indexer.language = item.find('language').text # Add support for localizations (e.g., pt-BR) 220 | else: 221 | indexer.language = item.find('language').text.split('-')[0] 222 | 223 | self.logger.info(f"Indexer: {indexer.title} - {indexer.link} - {indexer.type}") 224 | 225 | movie_search = item.find('.//searching/movie-search[@available="yes"]') 226 | tv_search = item.find('.//searching/tv-search[@available="yes"]') 227 | 228 | if movie_search is not None: 229 | indexer.movie_search_capatabilities = movie_search.attrib['supportedParams'].split(',') 230 | else: 231 | self.logger.info(f"Movie search not available for {indexer.title}") 232 | 233 | if tv_search is not None: 234 | indexer.tv_search_capatabilities = tv_search.attrib['supportedParams'].split(',') 235 | else: 236 | self.logger.info(f"TV search not available for {indexer.title}") 237 | 238 | indexer_list.append(indexer) 239 | 240 | return indexer_list 241 | 242 | def __get_torrent_links_from_xml(self, xml_content): 243 | xml_root = ET.fromstring(xml_content) 244 | 245 | result_list = [] 246 | for item in xml_root.findall('.//item'): 247 | result = JackettResult() 248 | 249 | result.seeders = item.find('.//torznab:attr[@name="seeders"]', 250 | namespaces={'torznab': 'http://torznab.com/schemas/2015/feed'}).attrib['value'] 251 | if int(result.seeders) <= 0: 252 | continue 253 | 254 | result.raw_title = item.find('title').text 255 | result.size = item.find('size').text 256 | result.link = item.find('link').text 257 | result.indexer = item.find('jackettindexer').text 258 | result.privacy = item.find('type').text 259 | 260 | # TODO: I haven't seen this in the Jackett XML response. Is this still relevant? 261 | # Or which indexers provide this? 262 | magnet = item.find('.//torznab:attr[@name="magneturl"]', 263 | namespaces={'torznab': 'http://torznab.com/schemas/2015/feed'}) 264 | result.magnet = magnet.attrib['value'] if magnet is not None else None 265 | 266 | infoHash = item.find('.//torznab:attr[@name="infohash"]', 267 | namespaces={'torznab': 'http://torznab.com/schemas/2015/feed'}) 268 | result.info_hash = infoHash.attrib['value'] if infoHash is not None else None 269 | 270 | result_list.append(result) 271 | 272 | return result_list 273 | 274 | def __post_process_results(self, results, media): 275 | for result in results: 276 | 277 | # self.logger.info(result.title) 278 | # self.logger.info(parse(result.title)) 279 | 280 | parsed_result = parse(result.raw_title) 281 | # result.languages = [languages.get(name=language).alpha2 for language in parsed_result.language] 282 | result.parsed_data = parsed_result 283 | # TODO: replace with parsed_result.lang_codes when RTN is updated 284 | result.languages = detect_languages(result.raw_title) 285 | result.type = media.type 286 | 287 | return results 288 | -------------------------------------------------------------------------------- /source/main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | import re 5 | import shutil 6 | import time 7 | import zipfile 8 | 9 | import requests 10 | import starlette.status as status 11 | from aiocron import crontab 12 | from dotenv import load_dotenv 13 | from fastapi import FastAPI, Request, HTTPException 14 | from fastapi.middleware.cors import CORSMiddleware 15 | from fastapi.responses import RedirectResponse 16 | from fastapi.templating import Jinja2Templates 17 | from starlette.responses import FileResponse 18 | 19 | from debrid.get_debrid_service import get_debrid_service 20 | from jackett.jackett_result import JackettResult 21 | from jackett.jackett_service import JackettService 22 | from metdata.cinemeta import Cinemeta 23 | from metdata.tmdb import TMDB 24 | from torrent.torrent_service import TorrentService 25 | from torrent.torrent_smart_container import TorrentSmartContainer 26 | from utils.cache import search_cache 27 | from utils.filter_results import filter_items, sort_items 28 | from utils.logger import setup_logger 29 | from utils.parse_config import parse_config 30 | from utils.stremio_parser import parse_to_stremio_streams 31 | from utils.string_encoding import decodeb64 32 | 33 | load_dotenv() 34 | 35 | root_path = os.environ.get("ROOT_PATH", None) 36 | if root_path and not root_path.startswith("/"): 37 | root_path = "/" + root_path 38 | app = FastAPI(root_path=root_path) 39 | 40 | VERSION = "4.2.6" 41 | isDev = os.getenv("NODE_ENV") == "development" 42 | COMMUNITY_VERSION = True if os.getenv("IS_COMMUNITY_VERSION") == "true" else False 43 | SPONSOR_MESSAGE = os.getenv("SPONSOR_MESSAGE") 44 | ADDON_ID = os.getenv("ADDON_ID") if os.getenv("ADDON_ID") is not None else "community.aymene69.jackett" 45 | 46 | 47 | class LogFilterMiddleware: 48 | def __init__(self, app): 49 | self.app = app 50 | 51 | async def __call__(self, scope, receive, send): 52 | request = Request(scope, receive) 53 | path = request.url.path 54 | sensible_path = re.sub(r'/ey.*?/', '//', path) 55 | logger.info(f"{request.method} - {sensible_path}") 56 | return await self.app(scope, receive, send) 57 | 58 | 59 | app.add_middleware( 60 | CORSMiddleware, 61 | allow_origins=["*"], 62 | allow_credentials=True, 63 | allow_methods=["*"], 64 | allow_headers=["*"], 65 | ) 66 | 67 | if not isDev: 68 | app.add_middleware(LogFilterMiddleware) 69 | 70 | templates = Jinja2Templates(directory="templates") 71 | 72 | logger = setup_logger(__name__) 73 | 74 | 75 | @app.get("/") 76 | async def root(): 77 | return RedirectResponse(url="/configure") 78 | 79 | 80 | @app.get("/configure") 81 | @app.get("/{config}/configure") 82 | async def configure(request: Request): 83 | return templates.TemplateResponse( 84 | "index.html", 85 | {"request": request, "isCommunityVersion": COMMUNITY_VERSION, "sponsorMessage": SPONSOR_MESSAGE, "version": VERSION} 86 | ) 87 | 88 | 89 | @app.get("/static/{file_path:path}") 90 | async def function(file_path: str): 91 | response = FileResponse(f"templates/{file_path}") 92 | return response 93 | 94 | 95 | @app.get("/manifest.json") 96 | @app.get("/{params}/manifest.json") 97 | async def get_manifest(): 98 | return { 99 | "id": ADDON_ID, 100 | "icon": "https://i.imgur.com/tVjqEJP.png", 101 | "version": VERSION, 102 | "catalogs": [], 103 | "resources": ["stream"], 104 | "types": ["movie", "series"], 105 | "name": "Jackett" + (" Community" if COMMUNITY_VERSION else "") + (" (Dev)" if isDev else ""), 106 | "description": "Elevate your Stremio experience with seamless access to Jackett torrent links, effortlessly " 107 | "fetching torrents for your selected movies within the Stremio interface.", 108 | "behaviorHints": { 109 | "configurable": True, 110 | # "configurationRequired": True 111 | } 112 | } 113 | 114 | 115 | formatter = logging.Formatter('[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s', 116 | '%m-%d %H:%M:%S') 117 | 118 | logger.info("Started Jackett Addon") 119 | 120 | 121 | @app.get("/{config}/stream/{stream_type}/{stream_id}") 122 | async def get_results(config: str, stream_type: str, stream_id: str, request: Request): 123 | jackett_service = None 124 | start = time.time() 125 | stream_id = stream_id.replace(".json", "") 126 | 127 | config = parse_config(config) 128 | logger.info(stream_type + " request") 129 | 130 | if config['metadataProvider'] == "tmdb" and config['tmdbApi']: 131 | metadata_provider = TMDB(config) 132 | if not COMMUNITY_VERSION and config['jackett']: 133 | logger.info(f"Getting indexers' languages from Jackett for setting up TMDB") 134 | jackett_service = JackettService(config) 135 | metadata_provider.indexers = jackett_service.get_indexers() 136 | else: 137 | metadata_provider = Cinemeta(config) 138 | logger.info(f"Getting media from {config['metadataProvider']}") 139 | media = metadata_provider.get_metadata(stream_id, stream_type) 140 | logger.info("Got media and properties: " + str(media.titles)) 141 | 142 | debrid_service = get_debrid_service(config) 143 | 144 | search_results = [] 145 | if COMMUNITY_VERSION or config['cache']: 146 | logger.info("Getting cached results") 147 | cached_results = search_cache(media) 148 | cached_results = [JackettResult().from_cached_item(torrent, media) for torrent in cached_results] 149 | logger.info("Got " + str(len(cached_results)) + " cached results") 150 | 151 | if len(cached_results) > 0: 152 | logger.info("Filtering cached results") 153 | search_results = filter_items(cached_results, media, config=config) 154 | logger.info("Filtered cached results") 155 | 156 | # TODO: if we have results per quality set, most of the time we will not have enough cached results AFTER filtering them 157 | # because we will have less results than the maxResults, so we will always have to search for new results 158 | 159 | if not COMMUNITY_VERSION and config['jackett'] and len(search_results) < int(config['maxResults']): 160 | if len(search_results) > 0 and config['cache']: 161 | logger.info("Not enough cached results found (results: " + str(len(search_results)) + ")") 162 | elif config['cache']: 163 | logger.info("No cached results found") 164 | 165 | logger.info("Searching for results on Jackett") 166 | jackett_service = jackett_service if jackett_service else JackettService(config) 167 | jackett_search_results = jackett_service.search(media) 168 | logger.info("Got " + str(len(jackett_search_results)) + " results from Jackett") 169 | 170 | logger.info("Filtering Jackett results") 171 | filtered_jackett_search_results = filter_items(jackett_search_results, media, config=config) 172 | logger.info("Filtered Jackett results") 173 | 174 | search_results.extend(filtered_jackett_search_results) 175 | 176 | logger.debug("Converting result to TorrentItems (results: " + str(len(search_results)) + ")") 177 | torrent_service = TorrentService() 178 | torrent_results = torrent_service.convert_and_process(search_results) 179 | logger.debug("Converted result to TorrentItems (results: " + str(len(torrent_results)) + ")") 180 | 181 | torrent_smart_container = TorrentSmartContainer(torrent_results, media) 182 | 183 | if config['debrid']: 184 | if config['service'] in ["torbox", "premiumize"]: 185 | logger.debug("Checking availability") 186 | hashes = torrent_smart_container.get_hashes() 187 | ip = request.client.host 188 | result = debrid_service.get_availability_bulk(hashes, ip) 189 | torrent_smart_container.update_availability(result, type(debrid_service), media) 190 | logger.debug("Checked availability (results: " + str(len(result.items())) + ")") 191 | 192 | # TODO: Maybe add an if to only save to cache if caching is enabled? 193 | torrent_smart_container.cache_container_items() 194 | 195 | logger.debug("Getting best matching results") 196 | best_matching_results = torrent_smart_container.get_best_matching() 197 | best_matching_results = sort_items(best_matching_results, config) 198 | logger.debug("Got best matching results (results: " + str(len(best_matching_results)) + ")") 199 | 200 | logger.info("Processing results") 201 | stream_list = parse_to_stremio_streams(best_matching_results, config, media) 202 | logger.info("Processed results (results: " + str(len(stream_list)) + ")") 203 | 204 | logger.info("Total time: " + str(time.time() - start) + "s") 205 | 206 | return {"streams": stream_list} 207 | 208 | 209 | # @app.head("/playback/{config}/{query}") 210 | @app.get("/playback/{config}/{query}") 211 | async def get_playback(config: str, query: str, request: Request): 212 | try: 213 | if not query: 214 | raise HTTPException(status_code=400, detail="Query required.") 215 | config = parse_config(config) 216 | logger.info("Decoding query") 217 | query = decodeb64(query) 218 | logger.info(query) 219 | logger.info("Decoded query") 220 | ip = request.client.host 221 | debrid_service = get_debrid_service(config) 222 | link = debrid_service.get_stream_link(query, ip) 223 | 224 | logger.info("Got link: " + link) 225 | return RedirectResponse(url=link, status_code=status.HTTP_301_MOVED_PERMANENTLY) 226 | 227 | except Exception as e: 228 | logger.error(f"An error occurred: {e}") 229 | raise HTTPException(status_code=500, detail="An error occurred while processing the request.") 230 | 231 | @app.head("/playback/{config}/{query}") 232 | async def get_playback(config: str, query: str, request: Request): 233 | try: 234 | if not query: 235 | raise HTTPException(status_code=400, detail="Query required.") 236 | config = parse_config(config) 237 | logger.info("Decoding query") 238 | query = decodeb64(query) 239 | logger.info(query) 240 | logger.info("Decoded query") 241 | ip = request.client.host 242 | debrid_service = get_debrid_service(config) 243 | link = debrid_service.get_stream_link(query, ip) 244 | 245 | logger.info("Got link: " + link) 246 | return RedirectResponse(url=link, status_code=status.HTTP_301_MOVED_PERMANENTLY) 247 | 248 | except Exception as e: 249 | logger.error(f"An error occurred: {e}") 250 | raise HTTPException(status_code=500, detail="An error occurred while processing the request.") 251 | 252 | 253 | async def update_app(): 254 | try: 255 | current_version = "v" + VERSION 256 | url = "https://api.github.com/repos/aymene69/stremio-jackett/releases/latest" 257 | response = requests.get(url) 258 | data = response.json() 259 | latest_version = data['tag_name'] 260 | if latest_version != current_version: 261 | logger.info("New version available: " + latest_version) 262 | logger.info("Updating...") 263 | logger.info("Getting update zip...") 264 | update_zip = requests.get(data['zipball_url']) 265 | with open("update.zip", "wb") as file: 266 | file.write(update_zip.content) 267 | logger.info("Update zip downloaded") 268 | logger.info("Extracting update...") 269 | with zipfile.ZipFile("update.zip", 'r') as zip_ref: 270 | zip_ref.extractall("update") 271 | logger.info("Update extracted") 272 | 273 | extracted_folder = os.listdir("update")[0] 274 | extracted_folder_path = os.path.join("update", extracted_folder) 275 | for item in os.listdir(extracted_folder_path): 276 | s = os.path.join(extracted_folder_path, item) 277 | d = os.path.join(".", item) 278 | if os.path.isdir(s): 279 | shutil.copytree(s, d, dirs_exist_ok=True) 280 | else: 281 | shutil.copy2(s, d) 282 | logger.info("Files copied") 283 | 284 | logger.info("Cleaning up...") 285 | shutil.rmtree("update") 286 | os.remove("update.zip") 287 | logger.info("Cleaned up") 288 | logger.info("Updated !") 289 | except Exception as e: 290 | logger.error(f"Error during update: {e}") 291 | 292 | 293 | @crontab("* * * * *", start=not isDev) 294 | async def schedule_task(): 295 | await update_app() 296 | 297 | 298 | async def main(): 299 | await asyncio.gather( 300 | schedule_task() 301 | ) 302 | 303 | 304 | if __name__ == "__main__": 305 | asyncio.run(main()) 306 | -------------------------------------------------------------------------------- /source/metdata/cinemeta.py: -------------------------------------------------------------------------------- 1 | import re 2 | import requests 3 | from time import sleep 4 | from metdata.metadata_provider_base import MetadataProvider 5 | from models.movie import Movie 6 | from models.series import Series 7 | 8 | class Cinemeta(MetadataProvider): 9 | def get_metadata(self, id, type): 10 | self.logger.info("Getting metadata for " + type + " with id " + id) 11 | full_id = id.split(":") 12 | url = f"https://v3-cinemeta.strem.io/meta/{type}/{full_id[0]}.json" 13 | 14 | max_retries = 3 15 | retry_count = 0 16 | 17 | while retry_count < max_retries: 18 | try: 19 | response = requests.get(url) 20 | data = response.json() 21 | 22 | # Check if data or data["meta"] is empty 23 | if not data or not data.get("meta"): 24 | retry_count += 1 25 | if retry_count == max_retries: 26 | raise ValueError(f"Empty response after {max_retries} retries for {id}") 27 | sleep(1) # Wait 1 second before retrying 28 | continue 29 | 30 | if type == "movie": 31 | 32 | year = data["meta"].get("year") 33 | if not year: 34 | release_info = data["meta"].get("releaseInfo") 35 | if re.search(r"\d{4}", release_info): 36 | year = re.search(r"\d{4}", release_info).group() 37 | 38 | result = Movie( 39 | id=id, 40 | titles=[self.replace_weird_characters(data["meta"]["name"])], 41 | year=year, 42 | languages=["en"] 43 | ) 44 | else: 45 | result = Series( 46 | id=id, 47 | titles=[self.replace_weird_characters(data["meta"]["name"])], 48 | season="S{:02d}".format(int(full_id[1])), 49 | episode="E{:02d}".format(int(full_id[2])), 50 | languages=["en"] 51 | ) 52 | 53 | self.logger.info("Got metadata for " + type + " with id " + id) 54 | return result 55 | 56 | except Exception as e: 57 | retry_count += 1 58 | if retry_count == max_retries: 59 | raise Exception(f"Failed to get metadata after {max_retries} retries: {str(e)}") 60 | sleep(1) # Wait 1 second before retrying -------------------------------------------------------------------------------- /source/metdata/metadata_provider_base.py: -------------------------------------------------------------------------------- 1 | from utils.logger import setup_logger 2 | 3 | 4 | class MetadataProvider: 5 | 6 | def __init__(self, config): 7 | self.config = config 8 | self.logger = setup_logger(__name__) 9 | 10 | def replace_weird_characters(self, string): 11 | corresp = { 12 | 'ā': 'a', 'ă': 'a', 'ą': 'a', 'ć': 'c', 'č': 'c', 'ç': 'c', 13 | 'ĉ': 'c', 'ċ': 'c', 'ď': 'd', 'đ': 'd', 'è': 'e', 'é': 'e', 14 | 'ê': 'e', 'ë': 'e', 'ē': 'e', 'ĕ': 'e', 'ę': 'e', 'ě': 'e', 15 | 'ĝ': 'g', 'ğ': 'g', 'ġ': 'g', 'ģ': 'g', 'ĥ': 'h', 'î': 'i', 16 | 'ï': 'i', 'ì': 'i', 'í': 'i', 'ī': 'i', 'ĩ': 'i', 'ĭ': 'i', 17 | 'ı': 'i', 'ĵ': 'j', 'ķ': 'k', 'ĺ': 'l', 'ļ': 'l', 'ł': 'l', 18 | 'ń': 'n', 'ň': 'n', 'ñ': 'n', 'ņ': 'n', 'ʼn': 'n', 'ó': 'o', 19 | 'ô': 'o', 'õ': 'o', 'ö': 'o', 'ø': 'o', 'ō': 'o', 'ő': 'o', 20 | 'œ': 'oe', 'ŕ': 'r', 'ř': 'r', 'ŗ': 'r', 'š': 's', 'ş': 's', 21 | 'ś': 's', 'ș': 's', 'ß': 'ss', 'ť': 't', 'ţ': 't', 'ū': 'u', 22 | 'ŭ': 'u', 'ũ': 'u', 'û': 'u', 'ü': 'u', 'ù': 'u', 'ú': 'u', 23 | 'ų': 'u', 'ű': 'u', 'ŵ': 'w', 'ý': 'y', 'ÿ': 'y', 'ŷ': 'y', 24 | 'ž': 'z', 'ż': 'z', 'ź': 'z', 'æ': 'ae', 'ǎ': 'a', 'ǧ': 'g', 25 | 'ə': 'e', 'ƒ': 'f', 'ǐ': 'i', 'ǒ': 'o', 'ǔ': 'u', 'ǚ': 'u', 26 | 'ǜ': 'u', 'ǹ': 'n', 'ǻ': 'a', 'ǽ': 'ae', 'ǿ': 'o', 27 | } 28 | 29 | for weird_char in corresp: 30 | string = string.replace(weird_char, corresp[weird_char]) 31 | 32 | return string 33 | 34 | def get_metadata(self, id, type): 35 | raise NotImplementedError 36 | -------------------------------------------------------------------------------- /source/metdata/tmdb.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from metdata.metadata_provider_base import MetadataProvider 4 | from models.media import Media 5 | from models.movie import Movie 6 | from models.series import Series 7 | from utils.logger import setup_logger 8 | 9 | class TMDB(MetadataProvider): 10 | def __init__(self, config): 11 | super().__init__(config) 12 | self._indexers = None 13 | 14 | @property 15 | def indexers(self): 16 | return self._indexers 17 | @indexers.setter 18 | def indexers(self, indexers_): 19 | self._indexers = indexers_ 20 | 21 | def get_metadata(self, id, type): 22 | self.logger.info("Getting metadata for " + type + " with id " + id) 23 | 24 | full_id = id.split(":") 25 | 26 | result = None 27 | if self.config.get('getAllLanguages', None) and self._indexers and len(self._indexers) > 0: 28 | languages = list({indexer.language for indexer in self._indexers}) # Use set to remove duplicated languages 29 | else: 30 | languages = self.config['languages'] 31 | for lang in languages: 32 | url = f"https://api.themoviedb.org/3/find/{full_id[0]}?api_key={self.config['tmdbApi']}&external_source=imdb_id&language={lang}" 33 | response = requests.get(url) 34 | data = response.json() 35 | 36 | if lang == languages[0]: 37 | if type == "movie": 38 | result = Movie( 39 | id=id, 40 | titles=[self.replace_weird_characters(data["movie_results"][0]["title"])], 41 | year=data["movie_results"][0]["release_date"][:4], 42 | languages=languages 43 | ) 44 | else: 45 | result = Series( 46 | id=id, 47 | titles=[self.replace_weird_characters(data["tv_results"][0]["name"])], 48 | season="S{:02d}".format(int(full_id[1])), 49 | episode="E{:02d}".format(int(full_id[2])), 50 | languages=languages 51 | ) 52 | else: 53 | if type == "movie": 54 | result.titles.append(self.replace_weird_characters(data["movie_results"][0]["title"])) 55 | else: 56 | result.titles.append(self.replace_weird_characters(data["tv_results"][0]["name"])) 57 | 58 | self.logger.info("Got metadata for " + type + " with id " + id) 59 | return result 60 | -------------------------------------------------------------------------------- /source/models/media.py: -------------------------------------------------------------------------------- 1 | class Media: 2 | def __init__(self, id, titles, languages, type): 3 | self.id = id 4 | self.titles = titles 5 | self.languages = languages 6 | self.type = type 7 | -------------------------------------------------------------------------------- /source/models/movie.py: -------------------------------------------------------------------------------- 1 | from models.media import Media 2 | 3 | 4 | class Movie(Media): 5 | def __init__(self, id, titles, year, languages): 6 | super().__init__(id, titles, languages, "movie") 7 | self.year = year 8 | -------------------------------------------------------------------------------- /source/models/series.py: -------------------------------------------------------------------------------- 1 | from models.media import Media 2 | 3 | 4 | class Series(Media): 5 | def __init__(self, id, titles, season, episode, languages): 6 | super().__init__(id, titles, languages, "series") 7 | self.season = season 8 | self.episode = episode 9 | self.seasonfile = None 10 | -------------------------------------------------------------------------------- /source/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | starlette 4 | requests 5 | bencode.py 6 | jinja2 7 | aiocron 8 | python-dotenv 9 | rank-torrent-name >= 1.0.0 -------------------------------------------------------------------------------- /source/templates/config.js: -------------------------------------------------------------------------------- 1 | const sorts = ['quality', 'seedsdesc', 'sizedesc', 'sizeasc', 'qualitythensize']; 2 | const qualityExclusions = ['4k', '1080p', '720p', '480p', 'rips', 'cam', 'unknown']; 3 | const languages = ['en', 'fr', 'es', 'de', 'it', 'pt', 'ru', 'in', 'nl', 'hu', 'la', 'multi']; 4 | 5 | document.addEventListener('DOMContentLoaded', function () { 6 | updateProviderFields(); 7 | }); 8 | 9 | function setElementDisplay(elementId, displayStatus) { 10 | const element = document.getElementById(elementId); 11 | if (!element) { 12 | return; 13 | } 14 | element.style.display = displayStatus; 15 | } 16 | 17 | function updateProviderFields(isChangeEvent = false) { 18 | if (document.getElementById('debrid').checked) { 19 | setElementDisplay('debrid-fields', 'block'); 20 | } else { 21 | setElementDisplay('debrid-fields', 'none'); 22 | } 23 | if (document.getElementById('jackett')?.checked) { 24 | setElementDisplay('jackett-fields', 'block'); 25 | } else { 26 | setElementDisplay('jackett-fields', 'none'); 27 | } 28 | if (document.getElementById('tmdb')?.checked) { 29 | setElementDisplay('tmdb-fields', 'block'); 30 | } else { 31 | setElementDisplay('tmdb-fields', 'none'); 32 | } 33 | if (!document.getElementById('get-all-languages')?.checked) { 34 | setElementDisplay('languages-fields', 'block'); 35 | } else { 36 | setElementDisplay('languages-fields', 'none'); 37 | } 38 | // if (!isChangeEvent) { 39 | // if (document.getElementById('jackett-fields')) { 40 | // document.getElementById('jackett-host').value = ''; 41 | // document.getElementById('jackett-api').value = ''; 42 | // } 43 | // document.getElementById('debrid-api').value = ''; 44 | // } 45 | } 46 | 47 | function loadData() { 48 | const currentUrl = window.location.href; 49 | let data = currentUrl.match(/\/([^\/]+)\/configure$/); 50 | if (data && data[1].startsWith("ey")) { 51 | data = atob(data[1]); 52 | data = JSON.parse(data); 53 | if (document.getElementById('jackett-fields')) { 54 | document.getElementById('jackett-host').value = data.jackettHost; 55 | document.getElementById('jackett-api').value = data.jackettApiKey; 56 | } 57 | document.getElementById('debrid-api').value = data.debridKey; 58 | document.getElementById('tmdb-api').value = data.tmdbApi; 59 | document.getElementById('service').value = data.service; 60 | document.getElementById('exclusion-keywords').value = (data.exclusionKeywords || []).join(', '); 61 | document.getElementById('maxSize').value = data.maxSize; 62 | document.getElementById('resultsPerQuality').value = data.resultsPerQuality; 63 | document.getElementById('maxResults').value = data.maxResults; 64 | if (document.getElementById('jackett')) { 65 | document.getElementById('jackett').checked = data.jackett; 66 | } 67 | if (document.getElementById('cache')) { 68 | document.getElementById('cache').checked = data.cache; 69 | } 70 | document.getElementById('torrenting').checked = data.torrenting; 71 | document.getElementById('debrid').checked = data.debrid; 72 | document.getElementById('tmdb').checked = data.metadataProvider === 'tmdb'; 73 | document.getElementById('cinemeta').checked = data.metadataProvider === 'cinemeta'; 74 | 75 | sorts.forEach(sort => { 76 | if (data.sort === sort) { 77 | document.getElementById(sort).checked = true; 78 | } 79 | }); 80 | 81 | qualityExclusions.forEach(quality => { 82 | if (data.exclusion.includes(quality)) { 83 | document.getElementById(quality).checked = true; 84 | } 85 | }) 86 | 87 | languages.forEach(language => { 88 | if (data.languages.includes(language)) { 89 | document.getElementById(language).checked = true; 90 | } 91 | }); 92 | 93 | document.getElementById('get-all-languages').checked = data.getAllLanguages; 94 | } 95 | } 96 | 97 | let showLanguageCheckBoxes = true; 98 | 99 | function showCheckboxes() { 100 | let checkboxes = document.getElementById("languageCheckBoxes"); 101 | 102 | if (showLanguageCheckBoxes) { 103 | checkboxes.style.display = "block"; 104 | showLanguageCheckBoxes = false; 105 | } else { 106 | checkboxes.style.display = "none"; 107 | showLanguageCheckBoxes = true; 108 | } 109 | } 110 | 111 | loadData(); 112 | 113 | function getLink(method) { 114 | const addonHost = new URL(window.location.href).protocol.replace(':', '') + "://" + new URL(window.location.href).host 115 | const jackettHost = document.getElementById('jackett-host')?.value; 116 | const jackettApi = document.getElementById('jackett-api')?.value; 117 | const debridApi = document.getElementById('debrid-api').value; 118 | const tmdbApi = document.getElementById('tmdb-api').value; 119 | const service = document.getElementById('service').value; 120 | const exclusionKeywords = document.getElementById('exclusion-keywords').value.split(',').map(keyword => keyword.trim()).filter(keyword => keyword !== ''); 121 | let maxSize = document.getElementById('maxSize').value; 122 | let resultsPerQuality = document.getElementById('resultsPerQuality').value; 123 | let maxResults = document.getElementById('maxResults').value; 124 | const jackett = document.getElementById('jackett')?.checked; 125 | const cache = document.getElementById('cache')?.checked; 126 | const torrenting = document.getElementById('torrenting').checked; 127 | const debrid = document.getElementById('debrid').checked; 128 | const metadataProvider = document.getElementById('tmdb').checked ? 'tmdb' : 'cinemeta'; 129 | const selectedQualityExclusion = []; 130 | 131 | console.log('Test'); 132 | 133 | qualityExclusions.forEach(quality => { 134 | console.log(quality, document.getElementById(quality).checked); 135 | if (document.getElementById(quality).checked) { 136 | selectedQualityExclusion.push(quality); 137 | } 138 | }); 139 | 140 | const selectedLanguages = []; 141 | languages.forEach(language => { 142 | if (document.getElementById(language).checked) { 143 | selectedLanguages.push(language); 144 | } 145 | }); 146 | 147 | const getAllLanguages = document.getElementById('get-all-languages').checked; 148 | 149 | let filter; 150 | sorts.forEach(sort => { 151 | if (document.getElementById(sort).checked) { 152 | filter = sort; 153 | } 154 | }); 155 | 156 | if (maxSize === '' || isNaN(maxSize)) { 157 | maxSize = 0; 158 | } 159 | if (maxResults === '' || isNaN(maxResults)) { 160 | maxResults = 5; 161 | } 162 | if (resultsPerQuality === '' || isNaN(resultsPerQuality)) { 163 | resultsPerQuality = 1; 164 | } 165 | let data = { 166 | addonHost, 167 | jackettHost, 168 | 'jackettApiKey': jackettApi, 169 | service, 170 | 'debridKey': debridApi, 171 | maxSize, 172 | exclusionKeywords, 173 | 'languages': selectedLanguages, 174 | getAllLanguages, 175 | 'sort': filter, 176 | resultsPerQuality, 177 | maxResults, 178 | 'exclusion': selectedQualityExclusion, 179 | tmdbApi, 180 | jackett, 181 | cache, 182 | torrenting, 183 | debrid, 184 | metadataProvider 185 | }; 186 | if ((jackett && (jackettHost === '' || jackettApi === '')) || (debrid && debridApi === '') || (metadataProvider === 'tmdb' && tmdbApi === '') || languages.length === 0) { 187 | alert('Please fill all required fields'); 188 | return false; 189 | } 190 | let stremio_link = `${window.location.host}/${btoa(JSON.stringify(data))}/manifest.json`; 191 | 192 | if (method === 'link') { 193 | window.open(`stremio://${stremio_link}`, "_blank"); 194 | } else if (method === 'copy') { 195 | const link = window.location.protocol + '//' + stremio_link; 196 | 197 | if (!navigator.clipboard) { 198 | alert('Your browser does not support clipboard'); 199 | console.log(link); 200 | return; 201 | } 202 | 203 | navigator.clipboard.writeText(link).then(() => { 204 | alert('Link copied to clipboard'); 205 | }, () => { 206 | alert('Error copying link to clipboard'); 207 | }); 208 | } 209 | } -------------------------------------------------------------------------------- /source/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Stremio-Jackett {% if isCommunityVersion %}Community{% endif %} 7 | 50 | 51 | 52 |
53 |
54 | logo 56 |
57 |

Stremio-Jackett{% if isCommunityVersion %} Community{% endif %} 58 | v{{ version }}

59 |

{% if sponsorMessage %}{{ sponsorMessage | safe }}{% endif %}

60 |

62 | Addon configuration

63 | 64 |
65 |

Streaming

66 |

You will enter information 67 | about streaming here

68 |
69 |
70 |
71 | 74 |
75 |
76 | 78 |

Enable debrid service for faster streaming

79 |
80 |
81 |
82 |
83 | 86 |
87 |
88 | 90 |

(ATTENTION: this is illegal in some 92 | countries)

93 |
94 |
95 |
96 | 97 | {% if not isCommunityVersion %} 98 | 99 |
100 |

Torrent Providers

101 |

You will enter information about 102 | your torrent providers here

103 |
104 |
105 |
106 | 109 |
110 |
111 | 113 |

Enable Jackett for more results

114 |
115 |
116 |
117 |
118 | 121 |
122 |
123 | 124 |

Enable caching for faster results

125 |
126 |
127 |
128 | 129 |
130 |

Jackett Information

131 |

You will enter information about your 132 | Jackett server here

133 | 134 |
135 |
136 | 138 |
139 | 143 |
144 |
145 | 146 |
147 | 149 |
150 | 153 |
154 |
155 |
156 |
157 | 158 | {% endif %} 159 | 160 |
161 |

Debrid Information

162 |

You will enter your debrid provider 163 | and its API key here

164 | 165 |
166 |
167 | 169 |
170 | 177 |
178 |
179 |
180 | 182 |
183 | 186 |
187 |
188 |
189 |
190 | 191 |
192 |

Metadata Provider

193 |

You will enter information about 194 | your metadata provider here

195 |
196 |
197 |
198 | 201 |
202 |
203 | 204 |

Use TMDB for more accurate metadata in different 205 | languages

206 |
207 |
208 |
209 |
210 | 213 |
214 |
215 | 216 |

Use Cinemeta for simple metadata

217 |
218 |
219 |
220 | 221 |
222 |

TMDB Informations

223 |

You will enter information 224 | about TMDB here. You can 225 | find your API key in "Settings" after registering here

227 | 228 |
229 |
230 | 232 |
233 | 236 |
237 |
238 |
239 |
240 | 241 |
242 |

Filtering

243 |

Set-up here your filtering 244 | parameters. Suiting results 245 | is a must :)

246 | 247 |
248 |
249 | Sorting 250 |

Choose the sorting that suits 251 | you the best.

252 | 253 |
254 |
255 |
256 | 259 |
260 |
261 | 263 |

Get the best quality on top

264 |
265 |
266 |
267 |
268 | 270 |
271 |
272 | 274 |

Filter results by seeders descending

275 |
276 |
277 |
278 |
279 | 281 |
282 |
283 | 285 |

Filter results by size descending

286 |
287 |
288 |
289 |
290 | 292 |
293 |
294 | 296 |

Filter results by size ascending

297 |
298 |
299 |
300 |
301 | 304 |
305 |
306 | 309 |

Filter results by quality then size 310 | descending

311 |
312 |
313 |
314 |
315 | 316 |
317 | Quality 318 | Exclusion 319 | 320 |

Check qualities that you want 321 | to exclude

322 |
323 |
324 | 326 | 327 |
328 | 329 |
330 | 332 | 334 |
335 | 336 |
337 | 339 | 340 |
341 | 342 |
343 | 345 | 346 |
347 | 348 |
349 | 351 | 353 |
354 | 355 |
356 | 358 | 360 |
361 | 362 |
363 | 365 | 367 |
368 | 369 |
370 | 373 |
374 | 378 |
379 |
380 | 381 |
382 |
383 |
384 |
385 |
386 | 387 |

Choose your languages

388 | 389 |
390 |
391 | 394 |
395 |
396 | 397 |
399 | 403 | 404 | 408 | 412 | 416 | 420 | 424 | 428 | 432 | 436 | 440 | 444 | 448 | 452 |
453 |
454 |
455 |
456 |
457 | 460 |
461 |
462 | 464 |
465 |
466 |
467 |
468 | 470 |

Set a maximum size for 471 | your results in 472 | GB.

473 | 474 |
475 | 478 |
479 |
480 | 481 |
482 | 483 |
484 |
485 | 487 |

Set the amount of maximum 488 | results you want 489 | per 490 | quality.

491 | 492 |
493 | 497 |
498 |
499 | 500 |
501 | 502 |
503 |
504 | 506 |

Set the amount of maximum 507 | results you 508 | want.

509 | 510 |
511 | 515 |
516 |
517 | 518 |
519 |
520 |
521 | 522 |
523 | Install 525 | Copy 527 |
528 |
529 |
530 |
531 | 532 | 533 | 534 | -------------------------------------------------------------------------------- /source/torrent/torrent_item.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import quote 2 | 3 | from models.media import Media 4 | from models.series import Series 5 | from utils.logger import setup_logger 6 | 7 | 8 | class TorrentItem: 9 | def __init__(self, raw_title, size, magnet, info_hash, link, seeders, languages, indexer, 10 | privacy, type=None, parsed_data=None): 11 | self.logger = setup_logger(__name__) 12 | 13 | self.raw_title = raw_title # Raw title of the torrent 14 | self.size = size # Size of the video file inside the torrent - it may be updated during __process_torrent() 15 | self.magnet = magnet # Magnet to torrent 16 | self.info_hash = info_hash # Hash of the torrent 17 | self.link = link # Link to download torrent file or magnet link 18 | self.seeders = seeders # The number of seeders 19 | self.languages = languages # Language of the torrent 20 | self.indexer = indexer # Indexer of the torrent 21 | self.type = type # "series" or "movie" 22 | self.privacy = privacy # "public" or "private" 23 | 24 | self.file_name = None # it may be updated during __process_torrent() 25 | self.files = None # The files inside of the torrent. If it's None, it means that there is only one file inside of the torrent 26 | self.torrent_download = None # The torrent jackett download url if its None, it means that there is only a magnet link provided by Jackett. It also means, that we cant do series file filtering before debrid. 27 | self.trackers = [] # Trackers of the torrent 28 | self.file_index = None # Index of the file inside of the torrent - it may be updated durring __process_torrent() and update_availability(). If the index is None and torrent is not None, it means that the series episode is not inside of the torrent. 29 | 30 | self.availability = False # If it's instantly available on the debrid service 31 | 32 | self.parsed_data = parsed_data # Ranked result 33 | 34 | def to_debrid_stream_query(self, media: Media) -> dict: 35 | return { 36 | "magnet": self.magnet, 37 | "type": self.type, 38 | "file_index": self.file_index, 39 | "season": media.season if isinstance(media, Series) else None, 40 | "episode": media.episode if isinstance(media, Series) else None, 41 | "torrent_download": quote(self.torrent_download) if self.torrent_download is not None else None 42 | } 43 | -------------------------------------------------------------------------------- /source/torrent/torrent_service.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | import queue 4 | import threading 5 | import time 6 | import urllib.parse 7 | from typing import List 8 | 9 | import bencode 10 | import requests 11 | from RTN import parse 12 | 13 | from jackett.jackett_result import JackettResult 14 | from torrent.torrent_item import TorrentItem 15 | from utils.general import get_info_hash_from_magnet 16 | from utils.general import season_episode_in_filename 17 | from utils.logger import setup_logger 18 | 19 | class TorrentService: 20 | def __init__(self): 21 | self.logger = setup_logger(__name__) 22 | self.__session = requests.Session() 23 | 24 | def convert_and_process(self, results: List[JackettResult]): 25 | threads = [] 26 | torrent_items_queue = queue.Queue() 27 | 28 | def thread_target(result: JackettResult): 29 | torrent_item = result.convert_to_torrent_item() 30 | 31 | if torrent_item.link.startswith("magnet:"): 32 | processed_torrent_item = self.__process_magnet(torrent_item) 33 | else: 34 | processed_torrent_item = self.__process_web_url(torrent_item) 35 | 36 | torrent_items_queue.put(processed_torrent_item) 37 | 38 | for result in results: 39 | threads.append(threading.Thread(target=thread_target, args=(result,))) 40 | 41 | for thread in threads: 42 | thread.start() 43 | 44 | for thread in threads: 45 | thread.join() 46 | 47 | torrent_items_result = [] 48 | 49 | while not torrent_items_queue.empty(): 50 | torrent_items_result.append(torrent_items_queue.get()) 51 | 52 | return torrent_items_result 53 | 54 | def __process_web_url(self, result: TorrentItem): 55 | try: 56 | # TODO: is the default timeout enough? 57 | response = self.__session.get(result.link, allow_redirects=False, timeout=os.environ.get("JACKETT_RESOLVER_TIMEOUT", 2)) 58 | except requests.exceptions.ReadTimeout: 59 | self.logger.error(f"Timeout while processing url (took longer than 2 seconds)") 60 | return result 61 | except requests.exceptions.RequestException: 62 | self.logger.error(f"Error while processing url: {result.link}") 63 | return result 64 | 65 | if response.status_code == 200: 66 | return self.__process_torrent(result, response.content) 67 | elif response.status_code == 302: 68 | result.magnet = response.headers['Location'] 69 | return self.__process_magnet(result) 70 | else: 71 | self.logger.error(f"Error code {response.status_code} while processing url: {result.link}") 72 | 73 | return result 74 | 75 | def __process_torrent(self, result: TorrentItem, torrent_file): 76 | metadata = bencode.bdecode(torrent_file) 77 | 78 | result.torrent_download = result.link 79 | result.trackers = self.__get_trackers_from_torrent(metadata) 80 | result.info_hash = self.__convert_torrent_to_hash(metadata["info"]) 81 | result.magnet = self.__build_magnet(result.info_hash, metadata["info"]["name"], result.trackers) 82 | 83 | if "files" not in metadata["info"]: 84 | result.file_index = 1 85 | return result 86 | 87 | result.files = metadata["info"]["files"] 88 | 89 | if result.type == "series": 90 | file_details = self.__find_episode_file(result.files, result.parsed_data.seasons, result.parsed_data.episodes) 91 | 92 | if file_details is not None: 93 | self.logger.info("File details") 94 | self.logger.info(file_details) 95 | result.file_index = file_details["file_index"] 96 | result.file_name = file_details["title"] 97 | result.size = file_details["size"] 98 | else: 99 | result.file_index = self.__find_movie_file(result.files) 100 | 101 | return result 102 | 103 | def __process_magnet(self, result: TorrentItem): 104 | if result.magnet is None: 105 | result.magnet = result.link 106 | 107 | if result.info_hash is None: 108 | result.info_hash = get_info_hash_from_magnet(result.magnet) 109 | 110 | result.trackers = self.__get_trackers_from_magnet(result.magnet) 111 | 112 | return result 113 | 114 | def __convert_torrent_to_hash(self, torrent_contents): 115 | hashcontents = bencode.bencode(torrent_contents) 116 | hexHash = hashlib.sha1(hashcontents).hexdigest() 117 | return hexHash.lower() 118 | 119 | def __build_magnet(self, hash, display_name, trackers): 120 | magnet_base = "magnet:?xt=urn:btih:" 121 | magnet = f"{magnet_base}{hash}&dn={display_name}" 122 | 123 | if len(trackers) > 0: 124 | magnet = f"{magnet}&tr={'&tr='.join(trackers)}" 125 | 126 | return magnet 127 | 128 | def __get_trackers_from_torrent(self, torrent_metadata): 129 | # Sometimes list, sometimes string 130 | announce = torrent_metadata["announce"] if "announce" in torrent_metadata else [] 131 | # Sometimes 2D array, sometimes 1D array 132 | announce_list = torrent_metadata["announce-list"] if "announce-list" in torrent_metadata else [] 133 | 134 | trackers = set() 135 | if isinstance(announce, str): 136 | trackers.add(announce) 137 | elif isinstance(announce, list): 138 | for tracker in announce: 139 | trackers.add(tracker) 140 | 141 | for announce_list_item in announce_list: 142 | if isinstance(announce_list_item, list): 143 | for tracker in announce_list_item: 144 | trackers.add(tracker) 145 | if isinstance(announce_list_item, str): 146 | trackers.add(announce_list_item) 147 | 148 | return list(trackers) 149 | 150 | def __get_trackers_from_magnet(self, magnet: str): 151 | url_parts = urllib.parse.urlparse(magnet) 152 | query_parts = urllib.parse.parse_qs(url_parts.query) 153 | 154 | trackers = [] 155 | if "tr" in query_parts: 156 | trackers = query_parts["tr"] 157 | 158 | return trackers 159 | 160 | def __find_episode_file(self, file_structure, season, episode): 161 | 162 | if len(season) == 0 or len(episode) == 0: 163 | return None 164 | 165 | file_index = 1 166 | strict_episode_files = [] 167 | episode_files = [] 168 | for files in file_structure: 169 | for file in files["path"]: 170 | 171 | parsed_file = parse(file) 172 | 173 | if season[0] in parsed_file.seasons and episode[0] in parsed_file.episodes: 174 | episode_files.append({ 175 | "file_index": file_index, 176 | "title": file, 177 | "size": files["length"] 178 | }) 179 | 180 | # Doesn't that need to be indented? 181 | file_index += 1 182 | 183 | return max(episode_files, key=lambda file: file["size"]) 184 | 185 | def __find_movie_file(self, file_structure): 186 | max_size = 0 187 | max_file_index = 1 188 | current_file_index = 1 189 | for files in file_structure: 190 | if files["length"] > max_size: 191 | max_file_index = current_file_index 192 | max_size = files["length"] 193 | current_file_index += 1 194 | 195 | return max_file_index 196 | -------------------------------------------------------------------------------- /source/torrent/torrent_smart_container.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from typing import List, Dict 3 | 4 | from RTN import parse 5 | 6 | from debrid.alldebrid import AllDebrid 7 | from debrid.premiumize import Premiumize 8 | from debrid.realdebrid import RealDebrid 9 | from debrid.torbox import TorBox 10 | from torrent.torrent_item import TorrentItem 11 | from utils.cache import cache_results 12 | from utils.general import season_episode_in_filename 13 | from utils.logger import setup_logger 14 | 15 | 16 | class TorrentSmartContainer: 17 | def __init__(self, torrent_items: List[TorrentItem], media): 18 | self.logger = setup_logger(__name__) 19 | self.__itemsDict: Dict[TorrentItem] = self.__build_items_dict_by_infohash(torrent_items) 20 | self.__media = media 21 | 22 | def get_hashes(self): 23 | return list(self.__itemsDict.keys()) 24 | 25 | def get_items(self): 26 | return list(self.__itemsDict.values()) 27 | 28 | def get_direct_torrentable(self): 29 | direct_torrentable_items = [] 30 | for torrent_item in self.__itemsDict.values(): 31 | if torrent_item.privacy == "public" and torrent_item.file_index is not None: 32 | direct_torrentable_items.append(torrent_item) 33 | 34 | def get_best_matching(self): 35 | best_matching = [] 36 | self.logger.debug(f"Amount of items: {len(self.__itemsDict)}") 37 | for torrent_item in self.__itemsDict.values(): 38 | self.logger.debug(f"-------------------") 39 | self.logger.debug(f"Checking {torrent_item.raw_title}") 40 | self.logger.debug(f"Has torrent: {torrent_item.torrent_download is not None}") 41 | if torrent_item.torrent_download is not None: # Torrent download 42 | self.logger.debug(f"Has file index: {torrent_item.file_index is not None}") 43 | if torrent_item.file_index is not None: 44 | # If the season/episode is present inside the torrent filestructure (movies always have a 45 | # file_index) 46 | best_matching.append(torrent_item) 47 | else: # Magnet 48 | best_matching.append(torrent_item) # If it's a movie with a magnet link 49 | 50 | return best_matching 51 | 52 | def cache_container_items(self): 53 | threading.Thread(target=self.__save_to_cache).start() 54 | 55 | def __save_to_cache(self): 56 | public_torrents = list(filter(lambda x: x.privacy == "public", self.get_items())) 57 | cache_results(public_torrents, self.__media) 58 | 59 | def update_availability(self, debrid_response, debrid_type, media): 60 | if debrid_type is RealDebrid: 61 | self.__update_availability_realdebrid(debrid_response, media) 62 | elif debrid_type is AllDebrid: 63 | self.__update_availability_alldebrid(debrid_response, media) 64 | elif debrid_type is Premiumize: 65 | self.__update_availability_premiumize(debrid_response) 66 | elif debrid_type is TorBox: 67 | self.__update_availability_torbox(debrid_response, media) 68 | else: 69 | raise NotImplemented 70 | 71 | def __update_availability_realdebrid(self, response, media): 72 | for info_hash, details in response.items(): 73 | if "rd" not in details: 74 | continue 75 | 76 | torrent_item: TorrentItem = self.__itemsDict[info_hash] 77 | 78 | files = [] 79 | self.logger.info(torrent_item.type) 80 | if torrent_item.type == "series": 81 | for variants in details["rd"]: 82 | for file_index, file in variants.items(): 83 | self.logger.info(file["filename"]) 84 | clean_season = media.season.replace("S", "") 85 | clean_episode = media.episode.replace("E", "") 86 | numeric_season = int(clean_season) 87 | numeric_episode = int(clean_episode) 88 | if season_episode_in_filename(file["filename"], numeric_season, numeric_episode): 89 | self.logger.info("File details 2") 90 | self.logger.info(file["filename"]) 91 | files.append({ 92 | "file_index": file_index, 93 | "title": file["filename"], 94 | "size": file["filesize"] 95 | }) 96 | else: 97 | for variants in details["rd"]: 98 | for file_index, file in variants.items(): 99 | self.logger.info("File details 3") 100 | self.logger.info(file["filename"]) 101 | files.append({ 102 | "file_index": file_index, 103 | "title": file["filename"], 104 | "size": file["filesize"] 105 | }) 106 | 107 | self.__update_file_details(torrent_item, files) 108 | 109 | def __update_availability_alldebrid(self, response, media): 110 | if response["status"] != "success": 111 | self.logger.error(f"Error while updating availability: {response}") 112 | return 113 | 114 | for data in response["data"]["magnets"]: 115 | if data["instant"] == False: 116 | continue 117 | 118 | torrent_item: TorrentItem = self.__itemsDict[data["hash"]] 119 | 120 | files = [] 121 | self.__explore_folders(data["files"], files, 1, torrent_item.type, media.season, 122 | media.episode) 123 | 124 | self.__update_file_details(torrent_item, files) 125 | 126 | def __update_availability_torbox(self, response, media): 127 | for torrent_hash, data in response.items(): 128 | 129 | if not torrent_hash or torrent_hash not in self.__itemsDict: 130 | self.logger.warning(f"Hash {torrent_hash} not found in itemsDict.") 131 | continue 132 | torrent_item: TorrentItem = self.__itemsDict[torrent_hash] 133 | files = [] 134 | 135 | self.__explore_folders( 136 | folder=data.get("files", []), 137 | files=files, 138 | file_index=1, 139 | type=torrent_item.type, 140 | season=media.season, 141 | episode=media.episode 142 | ) 143 | self.__update_file_details(torrent_item, files) 144 | 145 | def __update_availability_premiumize(self, response): 146 | if response["status"] != "success": 147 | self.logger.error(f"Error while updating availability: {response}") 148 | return 149 | 150 | torrent_items = self.get_items() 151 | for i in range(len(response["response"])): 152 | if bool(response["response"][i]): 153 | torrent_items[i].availability = response["transcoded"][i] == True 154 | 155 | 156 | def __update_file_details(self, torrent_item, files): 157 | if len(files) == 0: 158 | return 159 | 160 | file = max(files, key=lambda file: file["size"]) 161 | torrent_item.availability = True 162 | torrent_item.file_index = file["file_index"] 163 | torrent_item.file_name = file["title"] 164 | torrent_item.size = file["size"] 165 | 166 | def __build_items_dict_by_infohash(self, items: List[TorrentItem]): 167 | self.logger.debug(f"Building items dict by infohash ({len(items)} items)") 168 | items_dict = dict() 169 | for item in items: 170 | if item.info_hash is not None: 171 | self.logger.debug(f"Adding {item.info_hash} to items dict") 172 | if item.info_hash in items_dict: 173 | self.logger.debug(f"Duplicate info hash found: {item.info_hash}") 174 | items_dict[item.info_hash] = item 175 | return items_dict 176 | 177 | # Simple recursion to traverse the file structure returned by AllDebrid 178 | def __explore_folders(self, folder, files, file_index, type, season=None, episode=None): 179 | if type == "series": 180 | for file in folder: 181 | if "e" in file or "files" in file: 182 | sub_folder = file.get("e") or file.get("files") 183 | file_index = self.__explore_folders(sub_folder, files, file_index, type, season, 184 | episode) 185 | continue 186 | 187 | file_name = file.get("n") or file.get("name") 188 | file_size = file.get("s") or file.get("size", 0) 189 | if not file_name: 190 | self.logger.warning(f"Filename missing for : {file}") 191 | continue 192 | 193 | if season_episode_in_filename(file_name, season, episode): 194 | files.append({ 195 | "file_index": file_index, 196 | "title": file_name, 197 | "size": file_size 198 | }) 199 | file_index += 1 200 | 201 | elif type == "movie": 202 | file_index = 1 203 | for file in folder: 204 | if "e" in file or "files" in file: 205 | sub_folder = file.get("e") or file.get("files") 206 | file_index = self.__explore_folders(sub_folder, files, file_index, type) 207 | continue 208 | 209 | file_name = file.get("n") or file.get("name") 210 | file_size = file.get("s") or file.get("size", 0) 211 | 212 | if not file_name: 213 | self.logger.warning(f"Filename missing for : {file}") 214 | continue 215 | 216 | files.append({ 217 | "file_index": file_index, 218 | "title": file_name, 219 | "size": file_size 220 | }) 221 | file_index += 1 222 | 223 | return file_index 224 | -------------------------------------------------------------------------------- /source/update-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | name="stremio-jackett-dev" 4 | 5 | docker build -t $name . 6 | docker rm -f $name 7 | docker run -p 3001:3000 --net streaming_net --env NODE_ENV=development --name $name $name -------------------------------------------------------------------------------- /source/utils/cache.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from typing import List 4 | 5 | import requests 6 | 7 | from constants import CACHER_URL, EXCLUDED_TRACKERS 8 | from torrent.torrent_item import TorrentItem 9 | from utils.logger import setup_logger 10 | 11 | logger = setup_logger(__name__) 12 | 13 | 14 | def search_cache(media): 15 | logger.info("Searching for cached " + media.type + " results") 16 | url = CACHER_URL + "getResult/" + media.type + "/" 17 | # Without that, the cache doesn't return results. Maybe make multiple requests? One for each language, just like jackett? 18 | cache_search = media.__dict__ 19 | cache_search['title'] = cache_search['titles'][0] 20 | cache_search['language'] = cache_search['languages'][0] 21 | # TODO: Wtf, why do we need to use __dict__ here? And also, why is it stuck when we use media directly? 22 | response = requests.get(url, json=cache_search) 23 | if response.status_code == 200: 24 | return response.json() 25 | else: 26 | return [] 27 | 28 | 29 | def cache_results(torrents: List[TorrentItem], media): 30 | if os.getenv("NODE_ENV") == "development": 31 | return 32 | 33 | logger.info("Started caching results") 34 | 35 | cache_items = [] 36 | for torrent in torrents: 37 | if torrent.indexer in EXCLUDED_TRACKERS: 38 | continue 39 | 40 | try: 41 | cache_item = dict() 42 | 43 | cache_item['title'] = torrent.raw_title 44 | cache_item['trackers'] = "tracker:".join(torrent.trackers) 45 | cache_item['magnet'] = torrent.magnet 46 | cache_item['files'] = [] # I guess keep it empty? 47 | cache_item['hash'] = torrent.info_hash 48 | cache_item['indexer'] = torrent.indexer 49 | cache_item['quality'] = torrent.parsed_data.resolution 50 | cache_item['qualitySpec'] = torrent.parsed_data.quality 51 | cache_item['seeders'] = torrent.seeders 52 | cache_item['size'] = torrent.size 53 | cache_item['language'] = ";".join(torrent.languages) 54 | cache_item['type'] = media.type 55 | cache_item['availability'] = torrent.availability 56 | 57 | if media.type == "movie": 58 | cache_item['year'] = media.year 59 | elif media.type == "series": 60 | cache_item['season'] = media.season 61 | cache_item['episode'] = media.episode 62 | cache_item['seasonfile'] = False # I guess keep it false to not mess up results? 63 | 64 | cache_items.append(cache_item) 65 | except: 66 | logger.exception("An exception occured durring cache parsing") 67 | pass 68 | 69 | try: 70 | url = f"{CACHER_URL}pushResult/{media.type}" 71 | cache_data = json.dumps(cache_items, indent=4) 72 | response = requests.post(url, data=cache_data) 73 | response.raise_for_status() 74 | 75 | if response.status_code == 200: 76 | logger.info(f"Cached {str(len(cache_items))} {media.type} results") 77 | else: 78 | logger.error(f"Failed to cache {media.type} results: {str(response)}") 79 | except: 80 | logger.error("Failed to cache results") 81 | pass 82 | -------------------------------------------------------------------------------- /source/utils/detection.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def detect_languages(torrent_name): 5 | language_patterns = { 6 | "fr": r'\b(FRENCH|FR|VF|VF2|VFF|TRUEFRENCH|VFQ|FRA)\b', 7 | "en": r'\b(ENGLISH|EN|ENG)\b', 8 | "es": r'\b(SPANISH|ES|ESP)\b', 9 | "de": r'\b(GERMAN|DE|GER)\b', 10 | "it": r'\b(ITALIAN|IT|ITA)\b', 11 | "pt": r'\b(PORTUGUESE|PT|POR)\b', 12 | "ru": r'\b(RUSSIAN|RU|RUS)\b', 13 | "in": r'\b(INDIAN|IN|HINDI|TELUGU|TAMIL|KANNADA|MALAYALAM|PUNJABI|MARATHI|BENGALI|GUJARATI|URDU|ODIA|ASSAMESE|KONKANI|MANIPURI|NEPALI|SANSKRIT|SINHALA|SINDHI|TIBETAN|BHOJPURI|DHIVEHI|KASHMIRI|KURUKH|MAITHILI|NEWARI|RAJASTHANI|SANTALI|SINDHI|TULU)\b', 14 | "nl": r'\b(DUTCH|NL|NLD)\b', 15 | "hu": r'\b(HUNGARIAN|HU|HUN)\b', 16 | "la": r'\b(LATIN|LATINO|LA)\b', 17 | "multi": r"\b(MULTI)\b" 18 | } 19 | 20 | languages = [] 21 | for language, pattern in language_patterns.items(): 22 | if re.search(pattern, torrent_name, re.IGNORECASE): 23 | languages.append(language) 24 | 25 | if len(languages) == 0: 26 | return ["en"] 27 | 28 | return languages 29 | -------------------------------------------------------------------------------- /source/utils/filter/base_filter.py: -------------------------------------------------------------------------------- 1 | class BaseFilter: 2 | def __init__(self, config, additional_config=None): 3 | self.config = config 4 | self.item_type = additional_config 5 | 6 | def filter(self, data): 7 | raise NotImplementedError 8 | 9 | def can_filter(self): 10 | raise NotImplementedError 11 | 12 | def __call__(self, data): 13 | if self.config is not None and self.can_filter(): 14 | return self.filter(data) 15 | return data 16 | -------------------------------------------------------------------------------- /source/utils/filter/language_filter.py: -------------------------------------------------------------------------------- 1 | from utils.filter.base_filter import BaseFilter 2 | from utils.logger import setup_logger 3 | 4 | logger = setup_logger(__name__) 5 | 6 | 7 | class LanguageFilter(BaseFilter): 8 | def __init__(self, config): 9 | super().__init__(config) 10 | 11 | def filter(self, data): 12 | 13 | if self.config.get('getAllLanguages', False): 14 | logger.info(f"Skipping language filtering because of 'getAllLanguages' setting.") 15 | return data 16 | 17 | filtered_data = [] 18 | for torrent in data: 19 | if len(torrent.languages) == 0: 20 | continue 21 | 22 | for language in torrent.languages: 23 | if language in self.config['languages']: 24 | filtered_data.append(torrent) 25 | continue 26 | 27 | if "multi" in torrent.languages: 28 | filtered_data.append(torrent) 29 | return filtered_data 30 | 31 | def can_filter(self): 32 | return self.config['languages'] is not None 33 | -------------------------------------------------------------------------------- /source/utils/filter/max_size_filter.py: -------------------------------------------------------------------------------- 1 | from utils.filter.base_filter import BaseFilter 2 | from utils.logger import setup_logger 3 | 4 | logger = setup_logger(__name__) 5 | 6 | 7 | class MaxSizeFilter(BaseFilter): 8 | def __init__(self, config, additional_config=None): 9 | super().__init__(config, additional_config) 10 | 11 | def filter(self, data): 12 | filtered_data = [] 13 | for torrent in data: 14 | if torrent.size <= self.config['maxSize']: 15 | filtered_data.append(torrent) 16 | return filtered_data 17 | 18 | def can_filter(self): 19 | return int(self.config['maxSize']) > 0 and self.item_type == 'movie' 20 | -------------------------------------------------------------------------------- /source/utils/filter/quality_exclusion_filter.py: -------------------------------------------------------------------------------- 1 | from utils.filter.base_filter import BaseFilter 2 | from utils.logger import setup_logger 3 | 4 | logger = setup_logger(__name__) 5 | 6 | 7 | class QualityExclusionFilter(BaseFilter): 8 | def __init__(self, config): 9 | super().__init__(config) 10 | 11 | RIPS = ["HDRIP", "BRRIP", "BDRIP", "WEBRIP", "TVRIP", "VODRIP", "HDRIP"] 12 | CAMS = ["CAM", "TS", "TC", "R5", "DVDSCR", "HDTV", "PDTV", "DSR", "WORKPRINT", "VHSRIP", "HDCAM"] 13 | 14 | def filter(self, data): 15 | filtered_items = [] 16 | excluded_qualities = [quality.upper() for quality in self.config['exclusion']] 17 | rips = "RIPS" in excluded_qualities 18 | cams = "CAM" in excluded_qualities 19 | 20 | for stream in data: 21 | if stream.parsed_data.quality: 22 | if stream.parsed_data.quality.upper() in excluded_qualities: 23 | break 24 | if rips and stream.parsed_data.quality.upper() in self.RIPS: 25 | break 26 | if cams and stream.parsed_data.quality.upper() in self.CAMS: 27 | break 28 | filtered_items.append(stream) 29 | else: 30 | if "Unknown" not in excluded_qualities: 31 | filtered_items.append(stream) 32 | return filtered_items 33 | 34 | def can_filter(self): 35 | return self.config['exclusion'] is not None and len(self.config['exclusion']) > 0 36 | -------------------------------------------------------------------------------- /source/utils/filter/results_per_quality_filter.py: -------------------------------------------------------------------------------- 1 | from utils.filter.base_filter import BaseFilter 2 | from utils.logger import setup_logger 3 | 4 | logger = setup_logger(__name__) 5 | 6 | 7 | class ResultsPerQualityFilter(BaseFilter): 8 | def __init__(self, config): 9 | super().__init__(config) 10 | 11 | def filter(self, data): 12 | filtered_items = [] 13 | resolution_count = {} 14 | for item in data: 15 | logger.info(f"Filtering by quality: {item.parsed_data.resolution}") 16 | if item.parsed_data.resolution not in resolution_count: 17 | resolution_count[item.parsed_data.resolution] = 1 18 | filtered_items.append(item) 19 | else: 20 | if resolution_count[item.parsed_data.resolution] < int(self.config['resultsPerQuality']): 21 | resolution_count[item.parsed_data.resolution] += 1 22 | filtered_items.append(item) 23 | return filtered_items 24 | 25 | def can_filter(self): 26 | return self.config['resultsPerQuality'] is not None and int(self.config['resultsPerQuality']) > 0 27 | -------------------------------------------------------------------------------- /source/utils/filter/title_exclusion_filter.py: -------------------------------------------------------------------------------- 1 | from utils.filter.base_filter import BaseFilter 2 | from utils.logger import setup_logger 3 | 4 | logger = setup_logger(__name__) 5 | 6 | 7 | class TitleExclusionFilter(BaseFilter): 8 | def __init__(self, config): 9 | super().__init__(config) 10 | 11 | def filter(self, data): 12 | filtered_items = [] 13 | excluded_keywords = [keyword.upper() for keyword in self.config['exclusionKeywords']] 14 | for stream in data: 15 | for keyword in excluded_keywords: 16 | if keyword in stream.title.upper(): 17 | break 18 | else: 19 | filtered_items.append(stream) 20 | return filtered_items 21 | 22 | def can_filter(self): 23 | return self.config['exclusionKeywords'] is not None and len(self.config['exclusionKeywords']) > 0 24 | -------------------------------------------------------------------------------- /source/utils/filter_results.py: -------------------------------------------------------------------------------- 1 | from RTN import title_match, RTN, DefaultRanking, SettingsModel, sort_torrents 2 | from RTN.models import CustomRank 3 | from utils.filter.language_filter import LanguageFilter 4 | from utils.filter.max_size_filter import MaxSizeFilter 5 | from utils.filter.quality_exclusion_filter import QualityExclusionFilter 6 | from utils.filter.results_per_quality_filter import ResultsPerQualityFilter 7 | from utils.filter.title_exclusion_filter import TitleExclusionFilter 8 | from utils.logger import setup_logger 9 | 10 | logger = setup_logger(__name__) 11 | 12 | quality_order = {"4k": 0, "2160p": 0, "1080p": 1, "720p": 2, "480p": 3} 13 | 14 | 15 | def sort_quality(item): 16 | if item.parsed_data.data.resolution == None: 17 | return float('inf'), True 18 | 19 | # TODO: first resolution? 20 | return quality_order.get(item.parsed_data.data.resolution, 21 | float('inf')), item.parsed_data.data.resolution is None 22 | 23 | 24 | def items_sort(items, config): 25 | settings = SettingsModel( 26 | require=[], 27 | exclude=config['exclusionKeywords'] + config['exclusion'], 28 | # custom_ranks={ 29 | # "uhd": CustomRank(enable=True, fetch=True, rank=200), 30 | # "hdr": CustomRank(enable=True, fetch=True, rank=100), 31 | # } 32 | ) 33 | 34 | rtn = RTN(settings=settings, ranking_model=DefaultRanking()) 35 | torrents = [rtn.rank(item.raw_title, item.info_hash) for item in items] 36 | sorted_torrents = sort_torrents(set(torrents)) 37 | for key, value in sorted_torrents.items(): 38 | index = next((i for i, item in enumerate(items) if item.info_hash == key), None) 39 | if index is not None: 40 | items[index].parsed_data = value 41 | 42 | if config['sort'] == "quality": 43 | return sorted(items, key=sort_quality) 44 | if config['sort'] == "sizeasc": 45 | return sorted(items, key=lambda x: int(x.size)) 46 | if config['sort'] == "seedsdesc": 47 | return sorted(items, key=lambda x: int(x.seeders), reverse=True) 48 | if config['sort'] == "sizedesc": 49 | return sorted(items, key=lambda x: int(x.size), reverse=True) 50 | if config['sort'] == "qualitythensize": 51 | return sorted(items, key=lambda x: (sort_quality(x), -int(x.size))) 52 | return items 53 | 54 | 55 | # def filter_season_episode(items, season, episode, config): 56 | # filtered_items = [] 57 | # for item in items: 58 | # if config['language'] == "ru": 59 | # if "S" + str(int(season.replace("S", ""))) + "E" + str( 60 | # int(episode.replace("E", ""))) not in item['title']: 61 | # if re.search(rf'\bS{re.escape(str(int(season.replace("S", ""))))}\b', item['title']) is None: 62 | # continue 63 | # if re.search(rf'\b{season}\s?{episode}\b', item['title']) is None: 64 | # if re.search(rf'\b{season}\b', item['title']) is None: 65 | # continue 66 | 67 | # filtered_items.append(item) 68 | # return filtered_items 69 | 70 | # TODO: not needed anymore because of RTN 71 | def filter_out_non_matching(items, season, episode): 72 | filtered_items = [] 73 | for item in items: 74 | logger.info(season) 75 | logger.info(episode) 76 | logger.info(item.parsed_data) 77 | clean_season = season.replace("S", "") 78 | clean_episode = episode.replace("E", "") 79 | numeric_season = int(clean_season) 80 | numeric_episode = int(clean_episode) 81 | try: 82 | if len(item.parsed_data.seasons) == 0 and len(item.parsed_data.episodes) == 0: 83 | continue 84 | 85 | if len(item.parsed_data.episodes) == 0 and numeric_season in item.parsed_data.seasons: 86 | filtered_items.append(item) 87 | continue 88 | if numeric_season in item.parsed_data.seasons and numeric_episode in item.parsed_data.episodes: 89 | filtered_items.append(item) 90 | continue 91 | except Exception as e: 92 | logger.error(f"Error while filtering out non matching torrents", exc_info=e) 93 | return filtered_items 94 | 95 | 96 | def remove_non_matching_title(items, titles): 97 | logger.info(titles) 98 | filtered_items = [] 99 | for item in items: 100 | for title in titles: 101 | if not title_match(title, item.parsed_data.parsed_title): 102 | continue 103 | 104 | filtered_items.append(item) 105 | break 106 | 107 | return filtered_items 108 | 109 | 110 | def filter_items(items, media, config): 111 | filters = { 112 | "languages": LanguageFilter(config), 113 | "maxSize": MaxSizeFilter(config, media.type), # Max size filtering only happens for movies, so it 114 | "exclusionKeywords": TitleExclusionFilter(config), 115 | "exclusion": QualityExclusionFilter(config), 116 | "resultsPerQuality": ResultsPerQualityFilter(config) 117 | } 118 | 119 | # Filtering out 100% non-matching for series 120 | logger.info(f"Item count before filtering: {len(items)}") 121 | if media.type == "series": 122 | logger.info(f"Filtering out non matching series torrents") 123 | items = filter_out_non_matching(items, media.season, media.episode) 124 | logger.info(f"Item count changed to {len(items)}") 125 | 126 | # TODO: is titles[0] always the correct title? Maybe loop through all titles and get the highest match? 127 | items = remove_non_matching_title(items, media.titles) 128 | 129 | for filter_name, filter_instance in filters.items(): 130 | try: 131 | logger.info(f"Filtering by {filter_name}: " + str(config[filter_name])) 132 | items = filter_instance(items) 133 | logger.info(f"Item count changed to {len(items)}") 134 | except Exception as e: 135 | logger.error(f"Error while filtering by {filter_name}", exc_info=e) 136 | logger.info(f"Item count after filtering: {len(items)}") 137 | logger.info("Finished filtering torrents") 138 | 139 | return items 140 | 141 | 142 | def sort_items(items, config): 143 | if config['sort'] is not None: 144 | return items_sort(items, config) 145 | else: 146 | return items 147 | -------------------------------------------------------------------------------- /source/utils/general.py: -------------------------------------------------------------------------------- 1 | from RTN import parse 2 | 3 | from utils.logger import setup_logger 4 | 5 | logger = setup_logger(__name__) 6 | 7 | video_formats = {".mkv", ".mp4", ".avi", ".mov", ".flv", ".wmv", ".webm", ".mpg", ".mpeg", ".m4v", ".3gp", ".3g2", 8 | ".ogv", 9 | ".ogg", ".drc", ".gif", ".gifv", ".mng", ".avi", ".mov", ".qt", ".wmv", ".yuv", ".rm", ".rmvb", ".asf", 10 | ".amv", ".m4p", ".m4v", ".mpg", ".mp2", ".mpeg", ".mpe", ".mpv", ".mpg", ".mpeg", ".m2v", ".m4v", 11 | ".svi", ".3gp", ".3g2", ".mxf", ".roq", ".nsv", ".flv", ".f4v", ".f4p", ".f4a", ".f4b"} 12 | 13 | 14 | def season_episode_in_filename(filename, season, episode): 15 | if not is_video_file(filename): 16 | return False 17 | parsed_name = parse(filename) 18 | return int(season.replace("S", "")) in parsed_name.seasons and int(episode.replace("E", "")) in parsed_name.episodes 19 | 20 | 21 | def get_info_hash_from_magnet(magnet: str): 22 | exact_topic_index = magnet.find("xt=") 23 | if exact_topic_index == -1: 24 | logger.debug(f"No exact topic in magnet {magnet}") 25 | return None 26 | 27 | exact_topic_substring = magnet[exact_topic_index:] 28 | end_of_exact_topic = exact_topic_substring.find("&") 29 | if end_of_exact_topic != -1: 30 | exact_topic_substring = exact_topic_substring[:end_of_exact_topic] 31 | 32 | info_hash = exact_topic_substring[exact_topic_substring.rfind(":") + 1:] 33 | 34 | return info_hash.lower() 35 | 36 | 37 | def is_video_file(filename): 38 | extension_idx = filename.rfind(".") 39 | if extension_idx == -1: 40 | return False 41 | 42 | return filename[extension_idx:] in video_formats 43 | -------------------------------------------------------------------------------- /source/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | class CustomFormatter(logging.Formatter): 5 | """Logging Formatter to add colors and count warning / errors""" 6 | 7 | grey = "\x1b[38;21m" 8 | blue = "\x1b[34;21m" 9 | green = "\x1b[32;21m" 10 | yellow = "\x1b[33;21m" 11 | red = "\x1b[31;21m" 12 | bold_red = "\x1b[31;1m" 13 | reset = "\x1b[0m" 14 | format = '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s' 15 | 16 | FORMATS = { 17 | logging.DEBUG: grey + format + reset, 18 | logging.INFO: grey + format + reset, 19 | logging.WARNING: yellow + format + reset, 20 | logging.ERROR: red + format + reset, 21 | logging.CRITICAL: bold_red + format + reset 22 | } 23 | 24 | def format(self, record): 25 | log_fmt = self.FORMATS.get(record.levelno) 26 | formatter = logging.Formatter(log_fmt, "%m-%d %H:%M:%S") 27 | return formatter.format(record) 28 | 29 | 30 | def setup_logger(name): 31 | logger = logging.getLogger(name) 32 | logger.setLevel(logging.DEBUG) # Adjust as needed 33 | 34 | if len(logger.handlers) > 0: 35 | return logger 36 | 37 | # Create console handler with a higher log level 38 | ch = logging.StreamHandler() 39 | ch.setLevel(logging.DEBUG) # Adjust as needed 40 | 41 | ch.setFormatter(CustomFormatter()) 42 | logger.addHandler(ch) 43 | return logger 44 | 45 | # Example usage 46 | # logger = setup_logger(__name__) 47 | # logger.debug('This is a debug message') 48 | # logger.info('This is an info message') 49 | # logger.warning('This is a warning message') 50 | # logger.error('This is an error message') 51 | # logger.critical('This is a critical message') 52 | -------------------------------------------------------------------------------- /source/utils/parse_config.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from utils.string_encoding import decodeb64 4 | 5 | 6 | def parse_config(b64config): 7 | config = json.loads(decodeb64(b64config)) 8 | 9 | # For backwards compatibility 10 | if "languages" not in config: 11 | config["languages"] = [config["language"]] 12 | 13 | return config 14 | -------------------------------------------------------------------------------- /source/utils/stremio_parser.py: -------------------------------------------------------------------------------- 1 | import json 2 | import queue 3 | import threading 4 | from typing import List 5 | 6 | from models.media import Media 7 | from torrent.torrent_item import TorrentItem 8 | from utils.logger import setup_logger 9 | from utils.string_encoding import encodeb64 10 | 11 | logger = setup_logger(__name__) 12 | 13 | INSTANTLY_AVAILABLE = "[⚡]" 14 | DOWNLOAD_REQUIRED = "[⬇️]" 15 | DIRECT_TORRENT = "[🏴‍☠️]" 16 | 17 | 18 | # TODO: Languages 19 | def get_emoji(language): 20 | emoji_dict = { 21 | "fr": "🇫🇷", 22 | "en": "🇬🇧", 23 | "es": "🇪🇸", 24 | "de": "🇩🇪", 25 | "it": "🇮🇹", 26 | "pt": "🇵🇹", 27 | "ru": "🇷🇺", 28 | "in": "🇮🇳", 29 | "nl": "🇳🇱", 30 | "hu": "🇭🇺", 31 | "la": "🇲🇽", 32 | "multi": "🌍" 33 | } 34 | return emoji_dict.get(language, "🇬🇧") 35 | 36 | 37 | def filter_by_availability(item): 38 | if item["name"].startswith(INSTANTLY_AVAILABLE): 39 | return 0 40 | else: 41 | return 1 42 | 43 | 44 | def filter_by_direct_torrnet(item): 45 | if item["name"].startswith(DIRECT_TORRENT): 46 | return 1 47 | else: 48 | return 0 49 | 50 | 51 | def parse_to_debrid_stream(torrent_item: TorrentItem, configb64, host, torrenting, results: queue.Queue, media: Media): 52 | if torrent_item.availability == True: 53 | name = f"{INSTANTLY_AVAILABLE}\n" 54 | else: 55 | name = f"{DOWNLOAD_REQUIRED}\n" 56 | 57 | parsed_data = torrent_item.parsed_data.data 58 | 59 | name += f"{parsed_data.resolution or "Unknown"}" + (f" ({parsed_data.quality})" if parsed_data.quality else "") 60 | 61 | size_in_gb = round(int(torrent_item.size) / 1024 / 1024 / 1024, 2) 62 | 63 | title = f"{torrent_item.raw_title}\n" 64 | 65 | if torrent_item.file_name is not None: 66 | title += f"{torrent_item.file_name}\n" 67 | 68 | title += f"👥 {torrent_item.seeders} 💾 {size_in_gb}GB 🔍 {torrent_item.indexer}\n" 69 | if parsed_data.codec: 70 | title += f"🎥 {parsed_data.codec.upper()} " 71 | if parsed_data.audio: 72 | title += f"🎧 {', '.join(parsed_data.audio)}" 73 | if parsed_data.codec or parsed_data.audio: 74 | title += "\n" 75 | 76 | for language in torrent_item.languages: 77 | title += f"{get_emoji(language)}/" 78 | title = title[:-1] 79 | 80 | queryb64 = encodeb64(json.dumps(torrent_item.to_debrid_stream_query(media))).replace('=', '%3D') 81 | 82 | results.put({ 83 | "name": name, 84 | "description": title, 85 | "url": f"{host}/playback/{configb64}/{queryb64}", 86 | "behaviorHints":{ 87 | "bingeGroup": f"stremio-jackett-{torrent_item.info_hash}", 88 | "filename": torrent_item.file_name if torrent_item.file_name is not None else torrent_item.raw_title # TODO: Use parsed title? 89 | } 90 | }) 91 | 92 | if torrenting and torrent_item.privacy == "public": 93 | name = f"{DIRECT_TORRENT}\n" 94 | if parsed_data.quality and parsed_data.quality != "Unknown" and \ 95 | parsed_data.quality != "": 96 | name += f"({parsed_data.quality})" 97 | results.put({ 98 | "name": name, 99 | "description": title, 100 | "infoHash": torrent_item.info_hash, 101 | "fileIdx": int(torrent_item.file_index) if torrent_item.file_index else None, 102 | "behaviorHints":{ 103 | "bingeGroup": f"stremio-jackett-{torrent_item.info_hash}", 104 | "filename": torrent_item.file_name if torrent_item.file_name is not None else torrent_item.raw_title # TODO: Use parsed title? 105 | } 106 | # "sources": ["tracker:" + tracker for tracker in torrent_item.trackers] 107 | }) 108 | 109 | 110 | def parse_to_stremio_streams(torrent_items: List[TorrentItem], config, media): 111 | stream_list = [] 112 | threads = [] 113 | thread_results_queue = queue.Queue() 114 | 115 | configb64 = encodeb64(json.dumps(config).replace('=', '%3D')) 116 | for torrent_item in torrent_items[:int(config['maxResults'])]: 117 | thread = threading.Thread(target=parse_to_debrid_stream, 118 | args=(torrent_item, configb64, config['addonHost'], config['torrenting'], 119 | thread_results_queue, media), 120 | daemon=True) 121 | thread.start() 122 | threads.append(thread) 123 | 124 | for thread in threads: 125 | thread.join() 126 | 127 | while not thread_results_queue.empty(): 128 | stream_list.append(thread_results_queue.get()) 129 | 130 | if len(stream_list) == 0: 131 | return [] 132 | 133 | if config['debrid']: 134 | stream_list = sorted(stream_list, key=filter_by_availability) 135 | stream_list = sorted(stream_list, key=filter_by_direct_torrnet) 136 | return stream_list 137 | -------------------------------------------------------------------------------- /source/utils/string_encoding.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | 4 | def encodeb64(data): 5 | return base64.b64encode(data.encode('utf-8')).decode('utf-8') 6 | 7 | 8 | def decodeb64(data): 9 | return base64.b64decode(data).decode('utf-8') 10 | -------------------------------------------------------------------------------- /source/videos/nocache.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aymene69/stremio-jackett/777061f5f93bf1c71e5f038a60222cd057514800/source/videos/nocache.mp4 -------------------------------------------------------------------------------- /stremio-jackett.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | --------------------------------------------------------------------------------