├── .coveragerc
├── .dockerignore
├── .env.example
├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── 🐛-bug-report.md
│ └── 💡-feature-request.md
├── ISSUE_TEMPLAYE
│ ├── ---bug-report.yml
│ ├── ---feature-request.yml
│ └── config.yml
├── dependabot.yml
├── pull_request_template.md
└── workflows
│ ├── backend-battery.yml.disabled
│ ├── conventional-commits.yml
│ ├── docker-build-dev.yml
│ ├── docker-build.yml
│ ├── notify-discord.yml
│ └── release-please.yaml
├── .gitignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── Dockerfile.slim
├── LICENSE.md
├── README.md
├── assets
├── riven-dark.png
└── riven-light.png
├── dev
└── attach-memray.sh
├── docker-compose-dev.yml
├── docker-compose-full.yml
├── docker-compose.yml
├── entrypoint.sh
├── makefile
├── poetry.lock
├── pyproject.toml
└── src
├── .gitignore
├── __init__.py
├── alembic.ini
├── alembic
├── env.py
├── script.py.mako
└── versions
│ ├── 20241105_1300_c99709e3648f_baseline_schema.py
│ ├── 20250210_0739_d6c06f357feb_v0_21_0_add_pause_and_retry.py
│ └── 20250331_2136_834cba7d26b4_add_trakt_id_attribute_to_mediaitem.py
├── auth.py
├── main.py
├── program
├── __init__.py
├── apis
│ ├── __init__.py
│ ├── listrr_api.py
│ ├── mdblist_api.py
│ ├── overseerr_api.py
│ ├── plex_api.py
│ ├── trakt_api.py
│ └── tvmaze_api.py
├── db
│ ├── __init__.py
│ ├── db.py
│ └── db_functions.py
├── managers
│ ├── event_manager.py
│ ├── sse_manager.py
│ └── websocket_manager.py
├── media
│ ├── __init__.py
│ ├── item.py
│ ├── state.py
│ ├── stream.py
│ └── subtitle.py
├── program.py
├── services
│ ├── content
│ │ ├── __init__.py
│ │ ├── listrr.py
│ │ ├── mdblist.py
│ │ ├── overseerr.py
│ │ ├── plex_watchlist.py
│ │ └── trakt.py
│ ├── downloaders
│ │ ├── __init__.py
│ │ ├── alldebrid.py
│ │ ├── models.py
│ │ ├── realdebrid.py
│ │ ├── shared.py
│ │ └── torbox.py
│ ├── indexers
│ │ ├── __init__.py
│ │ ├── tmdb.py
│ │ └── trakt.py
│ ├── libraries
│ │ ├── __init__.py
│ │ └── symlink.py
│ ├── post_processing
│ │ ├── __init__.py
│ │ └── subliminal.py
│ ├── scrapers
│ │ ├── __init__.py
│ │ ├── comet.py
│ │ ├── jackett.py
│ │ ├── knightcrawler.py
│ │ ├── mediafusion.py
│ │ ├── orionoid.py
│ │ ├── prowlarr.py
│ │ ├── shared.py
│ │ ├── torrentio.py
│ │ └── zilean.py
│ └── updaters
│ │ ├── __init__.py
│ │ ├── emby.py
│ │ ├── jellyfin.py
│ │ └── plex.py
├── settings
│ ├── __init__.py
│ ├── manager.py
│ ├── migratable.py
│ ├── models.py
│ └── versions.py
├── state_transition.py
├── symlink.py
├── types.py
└── utils
│ ├── __init__.py
│ ├── cli.py
│ ├── logging.py
│ ├── notifications.py
│ ├── request.py
│ └── useragents.py
├── pytest.ini
├── routers
├── __init__.py
├── models
│ ├── overseerr.py
│ ├── plex.py
│ └── shared.py
└── secure
│ ├── __init__.py
│ ├── default.py
│ ├── items.py
│ ├── scrape.py
│ ├── settings.py
│ ├── stream.py
│ ├── webhooks.py
│ └── ws.py
└── tests
├── test_alldebrid_downloader.py
├── test_cache.sqlite
├── test_container.py
├── test_data
├── alldebrid_magnet_delete.json
├── alldebrid_magnet_instant.json
├── alldebrid_magnet_instant_unavailable.json
├── alldebrid_magnet_status_one_downloading.json
├── alldebrid_magnet_status_one_ready.json
├── alldebrid_magnet_upload_not_ready.json
└── alldebrid_magnet_upload_ready.json
├── test_db_functions.py
├── test_db_item_filters.py
├── test_debrid_matching.py
├── test_ranking.py
├── test_rate_limiting.py
├── test_requests.py
├── test_settings_migration.py
├── test_states_processing.py
├── test_symlink_creation.py
├── test_symlink_library.py
└── test_torbox_downloader.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | branch = True
3 | source = src
4 |
5 | [report]
6 | omit =
7 | */tests/*
8 | */__init__.py
9 |
10 | # Show missing lines in report output
11 | show_missing = False
12 |
13 | exclude_lines =
14 | pragma: no cover
15 |
16 | [html]
17 | # Directory where HTML reports will be saved
18 | directory = htmlcov
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Extras That Shouldn't Be Here
2 | .git/
3 | .gitignore
4 | .dockerignore
5 | docker-compose*
6 | Dockerfile
7 | makefile
8 | htmlcov/
9 | coverage.xml
10 | .coverage*
11 | *.svg
12 | frontend/node_modules/
13 | bin/
14 | *.bin
15 |
16 | .vscode/
17 | .ruff_cache/
18 | *.dat
19 | profile.svg
20 |
21 | # Frontend
22 | .DS_Store
23 | /build
24 | /.svelte-kit
25 | /package
26 | .example*
27 |
28 | # Backend
29 | logs/
30 | settings.json
31 | __pycache__
32 | *.log
33 | data
34 | test*
35 |
36 | # Jupyter Notebooks
37 | .ipynb_checkpoints
38 |
39 | # Environments
40 | .env*
41 | .venv
42 | env/
43 | venv/
44 | ENV/
45 | env.bak/
46 | venv.bak/
47 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [dreulavelle]
2 | ko_fi: spoked # Replace with a single Ko-fi username
3 | # patreon: # Replace with a single Patreon username
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/🐛-bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F41B Bug Report"
3 | about: Create a report to help improve Riven
4 | title: "[Issue]"
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Prerequisites
11 |
12 | - [ ] I have checked the [existing issues](https://github.com/rivenmedia/riven/issues) and confirmed this is not a duplicate
13 | - [ ] I am running the latest version of Riven
14 | - [ ] I have read the [documentation](https://github.com/rivenmedia/riven/blob/main/README.md)
15 | - [ ] I have checked in [Discord channel](https://discord.com/invite/rivenmedia) for a solution
16 |
17 | ## Environment
18 | - **Riven Version**:
19 | - **Deployment Method**:
20 | - **Operating System**:
21 |
22 | ## Current Behavior
23 |
24 |
25 | ## Expected Behavior
26 |
27 |
28 | ## Steps To Reproduce
29 | 1.
30 | 2.
31 | 3.
32 | 4.
33 |
34 | ## Logs
35 |
36 |
37 | Log Output
38 |
39 | ```shell
40 | # Paste your logs here
41 | ```
42 |
43 |
44 | ## Configuration
45 |
46 |
47 | Configuration
48 |
49 | ```json
50 | {
51 | // Your configuration here
52 | }
53 | ```
54 |
55 |
56 | ## Additional Context
57 |
58 |
59 | ## Screenshots
60 |
61 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/💡-feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F4A1 Feature Request"
3 | about: Suggest an idea for Riven
4 | title: "[feaure]"
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Prerequisites
11 |
12 | - [ ] I have checked the [existing issues](https://github.com/rivenmedia/riven/issues) and confirmed this feature has not already been requested
13 | - [ ] I have read the [documentation](https://github.com/rivenmedia/riven/blob/main/README.md) and confirmed this feature does not already exist
14 |
15 | ## Feature Category
16 |
17 | - [ ] Media Management
18 | - [ ] Downloading
19 | - [ ] Scraping
20 | - [ ] Integration (Plex, Jellyfin, etc.)
21 | - [ ] User Interface
22 | - [ ] API
23 | - [ ] Performance
24 | - [ ] Security
25 | - [ ] Documentation
26 | - [ ] Other (please specify below)
27 |
28 | ## Problem Statement
29 |
30 |
31 |
32 | ## Proposed Solution
33 |
34 |
35 |
36 | ## Alternative Solutions
37 |
38 |
39 |
40 | ## Implementation Details
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 | Technical Details (if applicable)
50 |
51 | ```
52 | # Add technical details, code examples, or API specifications here
53 | ```
54 |
55 |
56 | ## Additional Context
57 |
58 |
59 |
60 |
61 |
62 |
63 | ## Contribution
64 |
65 | - [ ] I would be willing to help implement this feature
66 | - [ ] I would be willing to test this feature
67 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLAYE/---bug-report.yml:
--------------------------------------------------------------------------------
1 | name: "\U0001F41E Bug Report"
2 | description: "Riven not working the way it is documented?"
3 | title: "[Bug]: "
4 | labels: ["kind/bug", "status/triage"]
5 | assignees:
6 | - dreulavelle
7 |
8 | body:
9 | - type: markdown
10 | attributes:
11 | value: |
12 | Thank you for taking the time to file a complete bug report.
13 |
14 | - type: textarea
15 | attributes:
16 | label: Description
17 | description: |
18 | Please describe what happened, with as much pertinent information as you can. Feel free to use markdown syntax.
19 |
20 | Also, ensure that the issue is not already fixed in the latest release.
21 | validations:
22 | required: true
23 |
24 | - type: textarea
25 | attributes:
26 | label: Workarounds
27 | description: |
28 | Is there a mitigation or workaround that allows users to avoid the issue today?
29 | validations:
30 | required: true
31 |
32 | - type: textarea
33 | attributes:
34 | label: Attach Error Logs
35 | description: |
36 | Please attach logs or error messages that can help in debugging the issue.
37 | render: 'bash session'
38 | validations:
39 | required: true
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLAYE/---feature-request.yml:
--------------------------------------------------------------------------------
1 | name: "\U0001F381 Feature Request"
2 | description: "Did you find bugs, errors, or anything that isn't straightforward in the documentation?"
3 | title: "[Feature]: "
4 | labels: ["kind/feature", "status/triage"]
5 | assignees:
6 | - dreulavelle
7 |
8 | body:
9 | - type: markdown
10 | attributes:
11 | value: |
12 | Thank you for taking the time to file a complete bug report.
13 |
14 | Before submitting your issue, please search [issues](https://github.com/rivenmedia/riven/issues) to ensure this is not a duplicate.
15 |
16 | If the issue is trivial, why not submit a pull request instead?
17 |
18 | - type: dropdown
19 | attributes:
20 | label: Issue Kind
21 | description: |
22 | What best describes this issue?
23 | options:
24 | - "New Feature Request"
25 | - "Change in current behaviour"
26 | - "Other"
27 | validations:
28 | required: true
29 |
30 | - type: textarea
31 | attributes:
32 | label: Description
33 | description: |
34 | Please describe the issue, with as much pertinent information as you can. Feel free to use markdown syntax.
35 |
36 | validations:
37 | required: true
38 |
39 | - type: textarea
40 | attributes:
41 | label: Impact
42 | description: |
43 | Please describe the motivation for this issue. Describe, as best you can, how this improves or impacts the users of Riven and why this is important.
44 | validations:
45 | required: true
46 |
47 | - type: textarea
48 | attributes:
49 | label: Workarounds
50 | description: |
51 | Is there a mitigation, workaround, or another application that allows users to achieve the same functionality today?
52 | validations:
53 | required: true
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLAYE/config.yml:
--------------------------------------------------------------------------------
1 | # Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
2 | blank_issues_enabled: false
3 | contact_links:
4 | - name: '💬 Discussions'
5 | url: https://github.com/dreulavelle/rank-torrent-name/discussions
6 | about: |
7 | Ask questions about using Riven, features and roadmap, or get support and feedback!
8 | - name: '💬 Discord Server'
9 | url: https://discord.gg/38SFhtN8ph
10 | about: |
11 | Chat with the community and Riven maintainers about both the usage of and development of the project.
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
2 |
3 | version: 2
4 |
5 | updates:
6 |
7 | # Frontend (points to frontend/package.json)
8 | - package-ecosystem: 'npm'
9 | directory: '/frontend'
10 | schedule:
11 | interval: 'weekly'
12 | ignore:
13 | - dependency-name: '*'
14 | update-types: ["version-update:semver-minor"]
15 | commit-message:
16 | prefix: 'chore'
17 | include: 'scope'
18 | assignees:
19 | - 'AyushSehrawat'
20 |
21 | # Backend (points to pyproject.toml in root directory)
22 | - package-ecosystem: 'pip'
23 | directory: '/'
24 | schedule:
25 | interval: 'weekly'
26 | commit-message:
27 | prefix: 'chore'
28 | include: 'scope'
29 | assignees:
30 | - 'dreulavelle'
31 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | # Pull Request Check List
2 |
3 | Resolves: #issue-number-here
4 |
5 | - [ ] Added **tests** for changed code.
6 | - [ ] Updated **documentation** for changed code.
7 |
8 | ## Description:
9 |
10 |
--------------------------------------------------------------------------------
/.github/workflows/backend-battery.yml.disabled:
--------------------------------------------------------------------------------
1 | name: Linting and Testing
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 |
7 | jobs:
8 | battery:
9 | runs-on: ubuntu-latest
10 | strategy:
11 | matrix:
12 | python-version: [3.11]
13 |
14 | steps:
15 | - uses: actions/checkout@v4.1.2
16 |
17 | - name: Set up Python ${{ matrix.python-version }}
18 | uses: actions/setup-python@v5.1.0
19 | with:
20 | python-version: ${{ matrix.python-version }}
21 |
22 | - name: Cache Poetry dependencies
23 | uses: actions/cache@v4.0.2
24 | with:
25 | path: |
26 | ~/.cache/pypoetry
27 | .venv
28 | key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }}
29 | restore-keys: |
30 | ${{ runner.os }}-poetry-
31 |
32 | - name: Install dependencies
33 | run: |
34 | pip install poetry
35 | poetry install --no-root --with dev
36 |
37 | - name: Ruff & Isort Check
38 | run: |
39 | poetry run ruff check ./src
40 | poetry run isort --check-only ./src
41 |
42 | - name: Type check
43 | run: poetry run pyright
44 |
45 | - name: Run Tests & Coverage
46 | run: poetry run pytest --cov=./src --cov-report=xml
47 |
48 | - name: Upload Coverage Report to Codecov
49 | uses: codecov/codecov-action@v4.1.1
50 | with:
51 | token: ${{ secrets.CODECOV_TOKEN }}
52 | file: ./coverage.xml
53 |
--------------------------------------------------------------------------------
/.github/workflows/conventional-commits.yml:
--------------------------------------------------------------------------------
1 | name: Conventional Commits
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 |
7 | jobs:
8 | build:
9 | name: Conventional Commits
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v3
13 | - uses: webiny/action-conventional-commits@v1.3.0
14 | #with:
15 | #GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Optional, for private repositories.
16 | #allowed-commit-types: "feat,fix," # Optional, set if you want a subset of commit types to be allowed.
17 |
--------------------------------------------------------------------------------
/.github/workflows/docker-build-dev.yml:
--------------------------------------------------------------------------------
1 | name: Docker Build and Push Dev
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | build-and-push-dev:
11 | runs-on: ubuntu-latest
12 | permissions:
13 | contents: write
14 | packages: write
15 | security-events: write
16 |
17 | steps:
18 | - name: Checkout code
19 | uses: actions/checkout@v4.1.2
20 |
21 | - name: Docker Setup QEMU
22 | uses: docker/setup-qemu-action@v3
23 | id: qemu
24 | with:
25 | platforms: amd64,arm64
26 |
27 | - name: Log into ghcr.io registry
28 | uses: docker/login-action@v3.1.0
29 | with:
30 | registry: ghcr.io
31 | username: ${{ github.repository_owner }}
32 | password: ${{ secrets.GITHUB_TOKEN }}
33 |
34 | - name: Set up Docker Buildx
35 | uses: docker/setup-buildx-action@v3.2.0
36 |
37 | - name: Log in to Docker Hub
38 | uses: docker/login-action@v3.1.0
39 | with:
40 | username: ${{ secrets.DOCKER_HUB_USERNAME }}
41 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
42 |
43 | - name: Build Docker Metadata
44 | id: docker-metadata
45 | uses: docker/metadata-action@v5
46 | with:
47 | images: |
48 | ghcr.io/rivenmedia/riven
49 | docker.io/spoked/riven
50 | flavor: |
51 | latest=auto
52 | tags: |
53 | type=ref,event=branch
54 | type=sha,commit=${{ github.sha }}
55 | type=raw,value=dev,enable={{is_default_branch}}
56 |
57 | - name: Push Dev Image to repo
58 | uses: docker/build-push-action@v5
59 | with:
60 | context: .
61 | file: ./Dockerfile
62 | push: true
63 | provenance: mode=max
64 | tags: ${{ steps.docker-metadata.outputs.tags }}
65 | labels: ${{ steps.docker-metadata.outputs.labels }}
66 | platforms: linux/amd64,linux/arm64
67 | cache-from: type=gha,scope=${{ github.workflow }}
68 | cache-to: type=gha,mode=max,scope=${{ github.workflow }}
69 |
--------------------------------------------------------------------------------
/.github/workflows/docker-build.yml:
--------------------------------------------------------------------------------
1 | name: Docker Build and Push
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v[0-9]+.[0-9]+.[0-9]+'
7 | workflow_dispatch:
8 |
9 | jobs:
10 | build-and-push:
11 | runs-on: ubuntu-latest
12 | permissions:
13 | contents: write
14 | packages: write
15 | security-events: write
16 |
17 | steps:
18 | - name: Checkout code
19 | uses: actions/checkout@v4.1.2
20 |
21 | - name: Docker Setup QEMU
22 | uses: docker/setup-qemu-action@v3
23 | id: qemu
24 | with:
25 | platforms: amd64,arm64
26 |
27 | - name: Log into ghcr.io registry
28 | uses: docker/login-action@v3.1.0
29 | with:
30 | registry: ghcr.io
31 | username: ${{ github.repository_owner }}
32 | password: ${{ secrets.GITHUB_TOKEN }}
33 |
34 | - name: Set up Docker Buildx
35 | uses: docker/setup-buildx-action@v3.2.0
36 |
37 | - name: Log in to Docker Hub
38 | uses: docker/login-action@v3.1.0
39 | with:
40 | username: ${{ secrets.DOCKER_HUB_USERNAME }}
41 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
42 |
43 | - name: Build Docker Metadata
44 | id: docker-metadata
45 | uses: docker/metadata-action@v5
46 | with:
47 | images: |
48 | ghcr.io/rivenmedia/riven
49 | docker.io/spoked/riven
50 | flavor: |
51 | latest=auto
52 | tags: |
53 | type=ref,event=tag
54 | type=sha,commit=${{ github.sha }}
55 | type=semver,pattern={{version}}
56 | type=raw,value=latest,enable={{is_default_branch}}
57 |
58 | - name: Push Service Image to repo
59 | uses: docker/build-push-action@v5
60 | with:
61 | context: .
62 | file: ./Dockerfile
63 | push: true
64 | provenance: mode=max
65 | tags: ${{ steps.docker-metadata.outputs.tags }}
66 | labels: ${{ steps.docker-metadata.outputs.labels }}
67 | platforms: linux/amd64,linux/arm64
68 | cache-from: type=gha,scope=${{ github.workflow }}
69 | cache-to: type=gha,mode=max,scope=${{ github.workflow }}
70 |
--------------------------------------------------------------------------------
/.github/workflows/notify-discord.yml:
--------------------------------------------------------------------------------
1 | name: "Notify Discord of Riven Release"
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | github-releases-to-discord:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: Checkout
12 | uses: actions/checkout@v4
13 | - name: Github Releases To Discord
14 | uses: SethCohen/github-releases-to-discord@v1.13.1
15 | with:
16 | webhook_url: ${{ secrets.DISCORD_WEBHOOK }}
17 | color: "5378091"
18 | username: "Riven Release Changelog"
19 | avatar_url: "https://raw.githubusercontent.com/rivenmedia/riven/main/assets/riven-light.png"
20 | # content: "||@here||"
21 | footer_title: "Riven (Backend) Changelog"
22 | footer_icon_url: "https://raw.githubusercontent.com/rivenmedia/riven/main/assets/riven-light.png"
23 | footer_timestamp: true
24 |
--------------------------------------------------------------------------------
/.github/workflows/release-please.yaml:
--------------------------------------------------------------------------------
1 | name: "Release Please and Notify Discord"
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | permissions:
10 | contents: write
11 | pull-requests: write
12 |
13 | jobs:
14 | release-please:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: googleapis/release-please-action@v4
18 | id: release
19 | with:
20 | token: ${{ secrets.RELEASE_PLEASE_TOKEN }}
21 | release-type: python
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 | logs/
3 | settings.json
4 | ignore.txt
5 | .vscode
6 | .git
7 | makefile
8 | .ruff_cache/
9 | *.dat
10 | profile.svg
11 | *.gz
12 | *.zip
13 | *.lockb
14 | *.pkl
15 | *.bak
16 | bin/
17 | *.bin
18 | .secrets*
19 | event.json
20 | *.patch
21 | config
22 |
23 | # Python bytecode / Byte-compiled / optimized / DLL files
24 | __pycache__/
25 | __pypackages__/
26 | *.pyc
27 | *.pyo
28 | *.pyd
29 | /.venv/
30 | *.py[cod]
31 | *$py.class
32 | .ruff_cache/
33 |
34 | # Local Poetry artifact cache
35 | /.cache/pypoetry/
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .nox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | *.py,cover
48 | .hypothesis/
49 | .pytest_cache/
50 | cover/
51 |
52 | # Environments
53 | *.env
54 | *.venv
55 | env/
56 | venv/
57 | ENV/
58 | env.bak/
59 | venv.bak/
60 |
61 | # Rider IDE
62 | **/.idea/
63 |
64 | # MacOs
65 | **/.DS_Store
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | admin@debrid.wiki.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | We want to make contributing to this project as easy and transparent as
4 | possible.
5 |
6 | ### Submitting Changes
7 |
8 | 1. **Open an Issue**: For major changes, start by opening an issue to discuss your proposed modifications. This helps us understand your intentions and provide feedback early in the process.
9 | 2. **Pull Requests**: Once your changes are ready, submit a pull request. Ensure your code adheres to our coding standards and passes all tests. Commits should follow [conventional-commits](https://www.conventionalcommits.org/) specification.
10 |
11 | ### Code Formatting
12 |
13 | - **Backend**: We use [Black](https://black.readthedocs.io/en/stable/) for code formatting. Run `black` on your code before submitting.
14 | - **Line Endings**: Use CRLF line endings unless the file is a shell script or another format that requires LF line endings.
15 |
16 | ## License
17 |
18 | By contributing to examples, you agree that your contributions will be licensed
19 | under the LICENSE file in the root directory of this source tree.
20 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Builder Image for Python Dependencies
2 | FROM python:3.11-alpine AS builder
3 |
4 | # Install necessary build dependencies
5 | RUN apk add --no-cache \
6 | gcc \
7 | musl-dev \
8 | libffi-dev \
9 | python3-dev \
10 | build-base \
11 | curl
12 |
13 | # Upgrade pip and install poetry
14 | RUN pip install --upgrade pip && pip install poetry==1.8.3
15 |
16 | ENV POETRY_NO_INTERACTION=1 \
17 | POETRY_VIRTUALENVS_IN_PROJECT=1 \
18 | POETRY_VIRTUALENVS_CREATE=1 \
19 | POETRY_CACHE_DIR=/tmp/poetry_cache
20 |
21 | WORKDIR /app
22 |
23 | COPY pyproject.toml poetry.lock ./
24 | RUN touch README.md
25 | RUN poetry install --without dev --no-root && rm -rf $POETRY_CACHE_DIR
26 |
27 | # Final Image
28 | FROM python:3.11-alpine
29 | LABEL name="Riven" \
30 | description="Riven Media Server" \
31 | url="https://github.com/rivenmedia/riven"
32 |
33 | # Install system dependencies and Node.js
34 | ENV PYTHONUNBUFFERED=1
35 | RUN apk add --no-cache \
36 | curl \
37 | shadow \
38 | rclone \
39 | unzip \
40 | gcc \
41 | ffmpeg \
42 | musl-dev \
43 | libffi-dev \
44 | python3-dev \
45 | libpq-dev \
46 | libtorrent
47 |
48 | # Install Poetry
49 | RUN pip install poetry==1.8.3
50 |
51 | # Set environment variable to force color output
52 | ENV FORCE_COLOR=1
53 | ENV TERM=xterm-256color
54 |
55 | # Set working directory
56 | WORKDIR /riven
57 |
58 | # Copy the virtual environment from the builder stage
59 | COPY --from=builder /app/.venv /app/.venv
60 | ENV VIRTUAL_ENV=/app/.venv
61 | ENV PATH="/app/.venv/bin:$PATH"
62 |
63 | # Copy the rest of the application code
64 | COPY src/ /riven/src
65 | COPY pyproject.toml poetry.lock /riven/
66 | COPY entrypoint.sh /riven/
67 |
68 | # Ensure entrypoint script is executable
69 | RUN chmod +x /riven/entrypoint.sh
70 |
71 | ENTRYPOINT ["/riven/entrypoint.sh"]
72 |
--------------------------------------------------------------------------------
/Dockerfile.slim:
--------------------------------------------------------------------------------
1 | # Riven src Builder
2 |
3 | FROM python:3.11.9-alpine3.19 as Base
4 | LABEL name="Riven" \
5 | description="Riven Debrid Downloader" \
6 | url="https://github.com/rivenmedia/riven"
7 |
8 | # Install system dependencies
9 | RUN apk --update add --no-cache curl bash shadow gcc python3-dev musl-dev linux-headers patchelf clang ccache && \
10 | rm -rf /var/cache/apk/*
11 | RUN pip install --upgrade pip && pip install poetry==1.8.3
12 |
13 | ENV POETRY_NO_INTERACTION=1 \
14 | POETRY_VIRTUALENVS_IN_PROJECT=1 \
15 | POETRY_VIRTUALENVS_CREATE=1 \
16 | POETRY_CACHE_DIR=/tmp/poetry_cache
17 |
18 | # Install Poetry globally
19 | ENV POETRY_HOME="/etc/poetry"
20 | ENV PATH="$POETRY_HOME/bin:$PATH"
21 | #RUN curl -sSL https://install.python-poetry.org | python3 - --yes
22 |
23 | # Setup the application directory
24 | WORKDIR /riven
25 |
26 | # Expose ports
27 | EXPOSE 8080
28 |
29 | # Set environment variable to force color output
30 | ENV FORCE_COLOR=1
31 | ENV TERM=xterm-256color
32 |
33 | # Copy the Python project files
34 | COPY pyproject.toml poetry.lock* /riven/
35 |
36 | # Install Python dependencies
37 | RUN poetry install --without dev --no-root && rm -rf $POETRY_CACHE_DIR
38 |
39 | # Copy src code and other necessary files
40 | COPY src/ /riven/src
41 | COPY VERSION entrypoint.sh /riven/
42 |
43 | RUN cd /riven/src && poetry add nuitka && \
44 | poetry run python3 -m nuitka --standalone --onefile --onefile-tempdir-spec=/onefile_%PID%_%TIME% --python-flag=nosite,-O --nofollow-import-to=pytest --clang --warn-implicit-exceptions --warn-unusual-code --prefer-source-code main.py
45 |
46 | FROM scratch
47 |
48 | COPY --from=Base /riven/src/main.bin /main.bin
49 | COPY VERSION /
50 | VOLUME /data
51 | COPY --from=Base /lib/ /lib/
52 | # Ensure entrypoint script is executable
53 |
54 | ENTRYPOINT ["/main.bin"]
--------------------------------------------------------------------------------
/assets/riven-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/assets/riven-dark.png
--------------------------------------------------------------------------------
/assets/riven-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/assets/riven-light.png
--------------------------------------------------------------------------------
/dev/attach-memray.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Attach memray to the running main.py process
3 | # Usage: ./attach-memray.sh
4 |
5 | pgrep -f "main.py" | head -n 1 | xargs -I{} memray attach {}
6 |
--------------------------------------------------------------------------------
/docker-compose-dev.yml:
--------------------------------------------------------------------------------
1 | services:
2 | riven:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | image: riven:dev
7 | container_name: riven
8 | restart: unless-stopped
9 | network_mode: host
10 | tty: true
11 | environment:
12 | - PUID=1000
13 | - PGID=1000
14 | - TZ=UTC
15 | - ORIGIN=${RIVEN_ORIGIN:-http://localhost:8080}
16 | - RIVEN_FORCE_ENV=true
17 | - RIVEN_DATABASE_HOST=sqlite:////riven/data/media.db
18 | volumes:
19 | - ./data:/riven/data
20 | - /mnt:/mnt
--------------------------------------------------------------------------------
/docker-compose-full.yml:
--------------------------------------------------------------------------------
1 | # This is a full setup for Riven with Plex, Overseerr, and Zilean.
2 | # This compose assumes you already setup rclone and zurg. See notes below!
3 |
4 | ## Notes:
5 |
6 | # Zurg & Rclone will have to be supplied as well and visible to Riven as well as Plex.
7 | # Rclone should be mounted to: /mnt/zurg (optional directory)
8 | # You will need to set the rclone_path in riven to use the `/mnt/zurg/__all__` dir though
9 | # so that Riven can see all the torrents from their parent directory.
10 |
11 | services:
12 |
13 | # Riven Frontend (https://github.com/rivenmedia/riven-frontend)
14 | riven-frontend:
15 | image: spoked/riven-frontend:latest
16 | container_name: riven-frontend
17 | restart: unless-stopped
18 | tty: true
19 | environment:
20 | - TZ=Etc/UTC
21 | ports:
22 | - 3000:3000
23 | volumes:
24 | - ./config:/riven/config # You need to mount a config directory here (different from riven)
25 | depends_on:
26 | riven:
27 | condition: service_started
28 |
29 | # Riven (https://github.com/rivenmedia/riven)
30 | riven:
31 | image: spoked/riven:latest
32 | container_name: riven
33 | restart: unless-stopped
34 | ports:
35 | - "8080:8080"
36 | tty: true
37 | environment:
38 | - PUID=1000
39 | - PGID=1000
40 | - TZ=Etc/UTC
41 | - RIVEN_FORCE_ENV=true # forces the use of env vars to be always used!
42 | - RIVEN_SYMLINK_RCLONE_PATH=/mnt/zurg/__all__ # Set this to your rclone's mount `__all__` dir if using Zurg
43 | - RIVEN_SYMLINK_LIBRARY_PATH=/mnt/library # This is the path that symlinks will be placed in
44 | - RIVEN_DATABASE_HOST=postgresql+psycopg2://postgres:postgres@riven-db/riven
45 | - RIVEN_DOWNLOADERS_REAL_DEBRID_ENABLED=true
46 | - RIVEN_DOWNLOADERS_REAL_DEBRID_API_KEY=xxxxx # set your real debrid api key
47 | - RIVEN_UPDATERS_PLEX_ENABLED=true
48 | - RIVEN_UPDATERS_PLEX_URL=http://plex:32400
49 | - RIVEN_UPDATERS_PLEX_TOKEN=xxxxx # set your plex token
50 | - RIVEN_CONTENT_OVERSEERR_ENABLED=true
51 | - RIVEN_CONTENT_OVERSEERR_URL=http://overseerr:5055
52 | - RIVEN_CONTENT_OVERSEERR_API_KEY=xxxxx # set your overseerr token
53 | - RIVEN_SCRAPING_TORRENTIO_ENABLED=true
54 | - RIVEN_SCRAPING_ZILEAN_ENABLED=true
55 | - RIVEN_SCRAPING_ZILEAN_URL=http://zilean:8181
56 | healthcheck:
57 | test: curl -s http://localhost:8080 >/dev/null || exit 1
58 | interval: 30s
59 | timeout: 10s
60 | retries: 10
61 | volumes:
62 | - ./data:/riven/data
63 | - /mnt:/mnt
64 | depends_on:
65 | riven_postgres:
66 | condition: service_healthy
67 |
68 | riven_postgres:
69 | image: postgres:16.3-alpine3.20
70 | container_name: riven-db
71 | restart: unless-stopped
72 | environment:
73 | PGDATA: /var/lib/postgresql/data/pgdata
74 | POSTGRES_USER: postgres
75 | POSTGRES_PASSWORD: postgres
76 | POSTGRES_DB: riven
77 | volumes:
78 | - ./riven-db:/var/lib/postgresql/data/pgdata
79 | healthcheck:
80 | test: ["CMD-SHELL", "pg_isready -U postgres"]
81 | interval: 10s
82 | timeout: 5s
83 | retries: 5
84 |
85 | ## Plex (optional media server) (https://www.plex.tv/)
86 | plex:
87 | image: plexinc/pms-docker:latest
88 | container_name: plex
89 | restart: unless-stopped
90 | ports:
91 | - "32400:32400"
92 | environment:
93 | - PUID=1000
94 | - PGID=1000
95 | - TZ=Etc/UTC
96 | - VERSION=docker
97 | volumes:
98 | - ./config:/config
99 | - /mnt:/mnt
100 | devices:
101 | - "/dev/dri:/dev/dri"
102 |
103 | ## Overseerr (optional content service) (https://overseerr.dev/)
104 | overseerr:
105 | image: lscr.io/linuxserver/overseerr:latest
106 | container_name: overseerr
107 | restart: unless-stopped
108 | environment:
109 | - PUID=1000
110 | - PGID=1000
111 | - TZ=Etc/UTC
112 | volumes:
113 | - ./config:/config
114 | ports:
115 | - 5055:5055
116 |
117 | ## Zilean (optional scraper service) (https://ipromknight.github.io/zilean/getting-started.html)
118 | zilean:
119 | image: ipromknight/zilean:latest
120 | container_name: zilean
121 | restart: unless-stopped
122 | ports:
123 | - "8181:8181"
124 | volumes:
125 | - zilean_data:/app/data
126 | - zilean_tmp:/tmp
127 | environment:
128 | # You may have to create the zilean database manually with the following command:
129 | # docker exec -it riven-db createdb -U postgres -W zilean
130 | Zilean__Database__ConnectionString: "Host=riven-db;Port=5432;Database=zilean;Username=postgres;Password=postgres"
131 | healthcheck:
132 | test: curl --connect-timeout 10 --silent --show-error --fail http://localhost:8181/healthchecks/ping
133 | timeout: 60s
134 | interval: 30s
135 | retries: 10
136 | depends_on:
137 | riven_postgres:
138 | condition: service_healthy
139 |
140 | volumes:
141 | zilean_data:
142 | zilean_tmp:
143 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | riven-frontend:
3 | image: spoked/riven-frontend:latest
4 | container_name: riven-frontend
5 | restart: unless-stopped
6 | tty: true
7 | environment:
8 | - TZ=Etc/UTC
9 | ports:
10 | - 3000:3000
11 | volumes:
12 | - ./config:/riven/config # You need to mount a config directory here (different from riven)
13 | depends_on:
14 | riven:
15 | condition: service_started
16 |
17 | riven:
18 | image: spoked/riven:latest
19 | container_name: riven
20 | restart: unless-stopped
21 | ports:
22 | - "8080:8080"
23 | tty: true
24 | environment:
25 | - PUID=1000
26 | - PGID=1000
27 | - TZ=America/New_York
28 | - RIVEN_FORCE_ENV=true
29 | - RIVEN_DATABASE_HOST=postgresql+psycopg2://postgres:postgres@riven-db/riven
30 | healthcheck:
31 | test: curl -s http://localhost:8080 >/dev/null || exit 1
32 | interval: 30s
33 | timeout: 10s
34 | retries: 10
35 | volumes:
36 | - ./data:/riven/data
37 | - /mnt:/mnt
38 | depends_on:
39 | riven_postgres:
40 | condition: service_healthy
41 |
42 | riven_postgres:
43 | image: postgres:17-alpine
44 | container_name: riven-db
45 | environment:
46 | PGDATA: /var/lib/postgresql/data/pgdata
47 | POSTGRES_USER: postgres
48 | POSTGRES_PASSWORD: postgres
49 | POSTGRES_DB: riven
50 | volumes:
51 | - ./riven-db:/var/lib/postgresql/data/pgdata
52 | healthcheck:
53 | test: ["CMD-SHELL", "pg_isready -U postgres"]
54 | interval: 10s
55 | timeout: 5s
56 | retries: 5
57 |
--------------------------------------------------------------------------------
/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Default PUID and PGID to 1000 if not set
4 | PUID=${PUID:-1000}
5 | PGID=${PGID:-1000}
6 |
7 | echo "Starting Container with $PUID:$PGID permissions..."
8 |
9 | if [ "$PUID" = "0" ]; then
10 | echo "Running as root user"
11 | USER_HOME="/root"
12 | mkdir -p "$USER_HOME"
13 | else
14 | # Validate PUID and PGID are integers
15 | if ! echo "$PUID" | grep -qE '^[0-9]+$'; then
16 | echo "PUID is not a valid integer. Exiting..."
17 | exit 1
18 | fi
19 |
20 | if ! echo "$PGID" | grep -qE '^[0-9]+$'; then
21 | echo "PGID is not a valid integer. Exiting..."
22 | exit 1
23 | fi
24 |
25 | # Default USERNAME and GROUPNAME if not set
26 | USERNAME=${USERNAME:-riven}
27 | GROUPNAME=${GROUPNAME:-riven}
28 |
29 | # Create group if it doesn't exist
30 | if ! getent group "$PGID" > /dev/null; then
31 | addgroup --gid "$PGID" "$GROUPNAME"
32 | if [ $? -ne 0 ]; then
33 | echo "Failed to create group. Exiting..."
34 | exit 1
35 | fi
36 | else
37 | GROUPNAME=$(getent group "$PGID" | cut -d: -f1)
38 | fi
39 |
40 | # Create user if it doesn't exist
41 | if ! getent passwd "$USERNAME" > /dev/null; then
42 | adduser -D -h "$USER_HOME" -u "$PUID" -G "$GROUPNAME" "$USERNAME"
43 | if [ $? -ne 0 ]; then
44 | echo "Failed to create user. Exiting..."
45 | exit 1
46 | fi
47 | else
48 | if [ "$PUID" -ne 0 ]; then
49 | usermod -u "$PUID" -g "$PGID" "$USERNAME"
50 | if [ $? -ne 0 ]; then
51 | echo "Failed to modify user UID/GID. Exiting..."
52 | exit 1
53 | fi
54 | else
55 | echo "Skipping usermod for root user."
56 | fi
57 | fi
58 |
59 | USER_HOME="/home/$USERNAME"
60 | mkdir -p "$USER_HOME"
61 | chown -R "$PUID:$PGID" "$USER_HOME"
62 | chown -R "$PUID:$PGID" /riven/data
63 | fi
64 |
65 | umask 002
66 |
67 | export XDG_CONFIG_HOME="$USER_HOME/.config"
68 | export XDG_DATA_HOME="$USER_HOME/.local/share"
69 | export POETRY_CACHE_DIR="$USER_HOME/.cache/pypoetry"
70 | export HOME="$USER_HOME"
71 |
72 | # Ensure poetry is in the PATH
73 | export PATH="$PATH:/app/.venv/bin"
74 |
75 | echo "Container Initialization complete."
76 |
77 | echo "Starting Riven (Backend)..."
78 | if [ "$PUID" = "0" ]; then
79 | if [ "${DEBUG}" != "" ]; then # check if DEBUG is set to a truthy value
80 | cd /riven/src && poetry add debugpy && poetry run python3 -m debugpy --listen 0.0.0.0:5678 main.py
81 | else
82 | cd /riven/src && poetry run python3 main.py
83 | fi
84 | else
85 | if [ "${DEBUG}" != "" ]; then # check if DEBUG is set to a truthy value
86 | poetry add debugpy
87 | exec su -m $USERNAME -c "cd /riven/src && poetry run python3 -m debugpy --listen 0.0.0.0:5678 main.py"
88 | else
89 | su -m "$USERNAME" -c "cd /riven/src && poetry run python3 main.py"
90 | fi
91 | fi
--------------------------------------------------------------------------------
/makefile:
--------------------------------------------------------------------------------
1 | .PHONY: help install run start start-dev stop restart logs logs-dev shell build push push-dev clean format check lint sort test coverage pr-ready
2 |
3 | # Detect operating system
4 | ifeq ($(OS),Windows_NT)
5 | # For Windows
6 | DATA_PATH := $(shell echo %cd%)\data
7 | else
8 | # For Linux
9 | DATA_PATH := $(PWD)/data
10 | endif
11 |
12 | BRANCH_NAME := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]')
13 | COMMIT_HASH := $(shell git rev-parse --short HEAD)
14 |
15 | help:
16 | @echo "Riven Local Development Environment"
17 | @echo "-------------------------------------------------------------------------"
18 | @echo "install : Install the required packages"
19 | @echo "run : Run the Riven src"
20 | @echo "start : Build and run the Riven container (requires Docker)"
21 | @echo "start-dev : Build and run the Riven container in development mode (requires Docker)"
22 | @echo "stop : Stop and remove the Riven container (requires Docker)"
23 | @echo "logs : Show the logs of the Riven container (requires Docker)"
24 | @echo "logs-dev : Show the logs of the Riven container in development mode (requires Docker)"
25 | @echo "clean : Remove all the temporary files"
26 | @echo "format : Format the code using isort"
27 | @echo "lint : Lint the code using ruff and isort"
28 | @echo "test : Run the tests using pytest"
29 | @echo "coverage : Run the tests and generate coverage report"
30 | @echo "pr-ready : Run the linter and tests"
31 | @echo "-------------------------------------------------------------------------"
32 | # Docker related commands
33 |
34 | start: stop
35 | @docker compose -f docker-compose.yml up --build -d --force-recreate --remove-orphans
36 | @docker compose -f docker-compose.yml logs -f
37 |
38 | start-dev: stop-dev
39 | @docker compose -f docker-compose-dev.yml up --build -d --force-recreate --remove-orphans
40 | @docker compose -f docker-compose-dev.yml logs -f
41 |
42 | stop:
43 | @docker compose -f docker-compose.yml down
44 |
45 | stop-dev:
46 | @docker compose -f docker-compose-dev.yml down
47 |
48 | restart:
49 | @docker restart riven
50 | @docker logs -f riven
51 |
52 | logs:
53 | @docker logs -f riven
54 |
55 | logs-dev:
56 | @docker compose -f docker-compose-dev.yml logs -f
57 |
58 | shell:
59 | @docker exec -it riven fish
60 |
61 | # Ensure the Buildx builder is set up
62 | setup-builder:
63 | @if ! docker buildx ls | grep -q "mybuilder"; then \
64 | echo "Creating Buildx builder..."; \
65 | docker buildx create --use --name mybuilder --driver docker-container; \
66 | else \
67 | echo "Using existing Buildx builder..."; \
68 | fi
69 |
70 | # Build multi-architecture image (local only, no push)
71 | build: setup-builder
72 | @docker buildx build --platform linux/amd64,linux/arm64 -t riven --load .
73 |
74 | # Build and push multi-architecture release image
75 | push: setup-builder
76 | @echo "Building and pushing release image to Docker Hub..."
77 | @docker buildx build --platform linux/amd64,linux/arm64 -t spoked/riven:latest --push .
78 | @echo "Image 'spoked/riven:latest' pushed to Docker Hub"
79 |
80 | # Build and push multi-architecture dev image
81 | push-dev: setup-builder
82 | @echo "Building and pushing dev image to Docker Hub..."
83 | @docker buildx build --platform linux/amd64,linux/arm64 -t spoked/riven:dev --push .
84 | @echo "Image 'spoked/riven:dev' pushed to Docker Hub"
85 |
86 | push-branch: setup-builder
87 | @echo "Building and pushing branch '${BRANCH_NAME}' image to Docker Hub..."
88 | @docker buildx build --platform linux/amd64,linux/arm64 -t spoked/riven:${BRANCH_NAME} --push .
89 | @echo "Image 'spoked/riven:${BRANCH_NAME}' pushed to Docker Hub"
90 |
91 | tidy:
92 | @docker rmi $(docker images | awk '$1 == "" || $1 == "riven" {print $3}') -f
93 |
94 |
95 | # Poetry related commands
96 |
97 | clean:
98 | @find . -type f -name '*.pyc' -exec rm -f {} +
99 | @find . -type d -name '__pycache__' -exec rm -rf {} +
100 | @find . -type d -name '.pytest_cache' -exec rm -rf {} +
101 | @find . -type d -name '.ruff_cache' -exec rm -rf {} +
102 |
103 | hard_reset: clean
104 | @poetry run python src/main.py --hard_reset_db
105 |
106 | install:
107 | @poetry install --with dev
108 |
109 | update:
110 | @poetry cache clear PyPI --all
111 | @poetry update
112 |
113 | diff:
114 | @git diff HEAD~1 HEAD
115 |
116 | # Run the application
117 | run:
118 | @poetry run python src/main.py
119 |
120 | # Code quality commands
121 | format:
122 | @poetry run isort src
123 |
124 | check:
125 | @poetry run pyright
126 |
127 | lint:
128 | @poetry run ruff check src
129 | @poetry run isort --check-only src
130 |
131 | sort:
132 | @poetry run isort src
133 |
134 | test:
135 | @poetry run pytest src
136 |
137 | coverage: clean
138 | @poetry run pytest src --cov=src --cov-report=xml --cov-report=term
139 |
140 | # Run the linter and tests
141 | pr-ready: clean lint test
142 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "riven"
3 | version = "0.21.21"
4 | description = "Plex torrent streaming through Real Debrid and 3rd party services like Overseerr, Mdblist, etc."
5 | authors = ["Riven Developers"]
6 | license = "GPL-3.0"
7 | readme = "README.md"
8 | package-mode = false
9 |
10 | [tool.poetry.dependencies]
11 | python = "^3.11"
12 | dill = "^0.3.8"
13 | plexapi = "^4.15.10"
14 | requests = "^2.31.0"
15 | xmltodict = "^0.13.0"
16 | lxml = "^5.1.0"
17 | pydantic = "^2.6.3"
18 | fastapi = "^0.110.0"
19 | uvicorn = {extras = ["standard"], version = "^0.30.6"}
20 | apscheduler = "^3.10.4"
21 | regex = "^2023.12.25"
22 | coverage = "^7.6.8"
23 | cachetools = "^5.3.3"
24 | loguru = "^0.7.2"
25 | rich = "^13.7.1"
26 | opentelemetry-api = "^1.25.0"
27 | opentelemetry-sdk = "^1.25.0"
28 | opentelemetry-exporter-prometheus = "^0.46b0"
29 | prometheus-client = "^0.20.0"
30 | sqlalchemy = "^2.0.31"
31 | sqla-wrapper = "^6.0.0"
32 | alembic = "^1.13.2"
33 | psycopg2-binary = "^2.9.9"
34 | apprise = "^1.8.1"
35 | subliminal = "^2.2.1"
36 | rank-torrent-name = ">=1.5,<2.0"
37 | jsonschema = "^4.23.0"
38 | scalar-fastapi = "^1.0.3"
39 | psutil = "^6.0.0"
40 | python-dotenv = "^1.0.1"
41 | requests-ratelimiter = "^0.7.0"
42 | requests-cache = "^1.2.1"
43 | kink = "^0.8.1"
44 | bencodepy = "^0.9.5"
45 |
46 | [tool.poetry.group.dev.dependencies]
47 | pyright = "^1.1.352"
48 | pyperf = "^2.2.0"
49 | pytest = "^8.3.2"
50 | pytest-mock = "^3.14.0"
51 | responses = "0.25.3"
52 | pyfakefs = "^5.4.1"
53 | ruff = "^0.7.2"
54 | isort = "^5.10.1"
55 | codecov = "^2.1.13"
56 | httpx = "^0.27.0"
57 | # memray = "^1.13.4"
58 | testcontainers = "^4.8.0"
59 | mypy = "^1.11.2"
60 |
61 | [tool.poetry.group.test]
62 | optional = true
63 |
64 | [tool.poetry.group.test.dependencies]
65 | pytest = "^8.3.2"
66 |
67 | [build-system]
68 | requires = ["poetry-core"]
69 | build-backend = "poetry.core.masonry.api"
70 |
71 | [tool.isort]
72 | profile = "black"
73 |
74 | [tool.black]
75 | line-length = 88
76 | include = '\.pyi?$'
77 | exclude = '''
78 | /(
79 | \.git
80 | | \.hg
81 | | \.mypy_cache
82 | | \.tox
83 | | \.venv
84 | | _build
85 | | buck-out
86 | | build
87 | | dist
88 | )/
89 | '''
90 |
91 | [tool.ruff.lint]
92 | # https://docs.astral.sh/ruff/rules/
93 | ignore = [
94 | "PLR0913", # flask8-bugbear: Too many arguments for a method or function
95 | "PLR0911", # flask8-bugbear: Too many return statements
96 | "PLR2004", # flake8-bugbear: Magic value used in comparison
97 | "S104", # flake8-bandit: Possible binding to all interfaces
98 | "S108", # flake8-bandit: Probable insecure usage of temp file/directory
99 | "S311", # flake8-bandit: Standard pseudo-random generators are not suitable for security/cryptographic purposes
100 | "S101", # ruff: Ignore assert warnings on tests
101 | "RET505", #
102 | "RET503", # ruff: Ignore required explicit returns (is this desired?)
103 | "SLF001", # private member accessing from pickle
104 | "B904" # ruff: ignore raising exceptions from except for the API
105 | ]
106 | extend-select = [
107 | "I", # isort
108 | "C90", # mccabe complexity
109 | "B", # flake8-bugbear
110 | "PL", # pycodestyle
111 | "S", # flake8-bandit
112 | "T10", # flake8-debugger
113 | "PIE", # flake8-pie
114 | "T20", # flake8-print
115 | "Q", # flake8-quotes
116 | "RSE", # flake8-raise
117 | "RET", # flake8-return
118 | "SLF", # flake8-self
119 | "SIM", # flake8-simplify
120 | "ARG", # flake8-unused-arguments
121 | ]
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/python,pythonvanilla,visualstudiocode,pydev,pycharm
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,pythonvanilla,visualstudiocode,pydev,pycharm
3 |
4 | ### PyCharm ###
5 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
6 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
7 |
8 | # Gaisberg PD Project specific stuff
9 | .vscode
10 | __pycache__
11 | settings.json
12 | *.log
13 | data
14 |
15 | # User-specific stuff
16 | .idea/**/workspace.xml
17 | .idea/**/tasks.xml
18 | .idea/**/usage.statistics.xml
19 | .idea/**/dictionaries
20 | .idea/**/shelf
21 |
22 | # AWS User-specific
23 | .idea/**/aws.xml
24 |
25 | # CMake
26 | cmake-build-*/
27 |
28 | # IntelliJ
29 | out/
30 |
31 | # JIRA plugin
32 | atlassian-ide-plugin.xml
33 |
34 |
35 | # Editor-based Rest Client
36 | .idea/httpRequests
37 |
38 | ### Python ###
39 | # Byte-compiled / optimized / DLL files
40 | __pycache__/
41 | *.py[cod]
42 | *$py.class
43 |
44 | # C extensions
45 | *.so
46 |
47 | # Distribution / packaging
48 | .Python
49 | build/
50 | develop-eggs/
51 | dist/
52 | downloads/
53 | eggs/
54 | .eggs/
55 | lib/
56 | lib64/
57 | parts/
58 | sdist/
59 | var/
60 | wheels/
61 | share/python-wheels/
62 | *.egg-info/
63 | .installed.cfg
64 | *.egg
65 | MANIFEST
66 |
67 | # PyInstaller
68 | # Usually these files are written by a python script from a template
69 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
70 | *.manifest
71 | *.spec
72 |
73 | # Installer logs
74 | pip-log.txt
75 | pip-delete-this-directory.txt
76 |
77 | # Unit test / coverage reports
78 | htmlcov/
79 | .tox/
80 | .nox/
81 | .coverage
82 | .coverage.*
83 | .cache
84 | nosetests.xml
85 | coverage.xml
86 | *.cover
87 | *.py,cover
88 | .hypothesis/
89 | .pytest_cache/
90 | cover/
91 |
92 | # Translations
93 | *.mo
94 | *.pot
95 |
96 | # Flask stuff:
97 | instance/
98 | .webassets-cache
99 |
100 | # Scrapy stuff:
101 | .scrapy
102 |
103 | # Sphinx documentation
104 | docs/_build/
105 |
106 | # PyBuilder
107 | .pybuilder/
108 | target/
109 |
110 | # Jupyter Notebook
111 | .ipynb_checkpoints
112 |
113 | # IPython
114 | profile_default/
115 | ipython_config.py
116 |
117 | # pyenv
118 | # For a library or package, you might want to ignore these files since the code is
119 | # intended to run in multiple environments; otherwise, check them in:
120 | # .python-version
121 |
122 | # pipenv
123 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
124 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
125 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
126 | # install all needed dependencies.
127 | #Pipfile.lock
128 |
129 | # poetry
130 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
131 | # This is especially recommended for binary packages to ensure reproducibility, and is more
132 | # commonly ignored for libraries.
133 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
134 | #poetry.lock
135 |
136 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
137 | __pypackages__/
138 |
139 | # Celery stuff
140 | celerybeat-schedule
141 | celerybeat.pid
142 |
143 | # Environments
144 | .env
145 | .venv
146 | env/
147 | venv/
148 | ENV/
149 | env.bak/
150 | venv.bak/
151 |
152 | # PyCharm
153 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
154 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
155 | # and can be added to the global gitignore or merged into this file. For a more nuclear
156 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
157 | #.idea/
158 |
159 | ### PythonVanilla ###
160 | # Byte-compiled / optimized / DLL files
161 |
162 | # C extensions
163 |
164 | # Distribution / packaging
165 |
166 | # Installer logs
167 |
168 | # Unit test / coverage reports
169 |
170 | # Translations
171 |
172 | # pyenv
173 | # For a library or package, you might want to ignore these files since the code is
174 | # intended to run in multiple environments; otherwise, check them in:
175 | # .python-version
176 |
177 | # pipenv
178 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
179 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
180 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
181 | # install all needed dependencies.
182 |
183 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
184 |
185 |
186 | ### VisualStudioCode ###
187 | .vscode/*
188 | !.vscode/settings.json
189 | !.vscode/tasks.json
190 | !.vscode/launch.json
191 | !.vscode/extensions.json
192 | !.vscode/*.code-snippets
193 |
194 | # Local History for Visual Studio Code
195 | .history/
196 |
197 | # Built Visual Studio Code Extensions
198 | *.vsix
199 |
200 | ### VisualStudioCode Patch ###
201 | # Ignore all local history of files
202 | .history
203 | .ionide
204 |
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/src/__init__.py
--------------------------------------------------------------------------------
/src/alembic.ini:
--------------------------------------------------------------------------------
1 | # A generic, single database configuration.
2 |
3 | [alembic]
4 |
5 | script_location = %(here)s/alembic
6 | file_template = %%(year)d%%(month).2d%%(day).2d_%%(hour).2d%%(minute).2d_%%(rev)s_%%(slug)s
7 | prepend_sys_path = .
8 | truncate_slug_length = 40
9 | version_locations = %(here)s/alembic/versions
10 | version_path_separator = os
11 | output_encoding = utf-8
12 | sqlalchemy.url = driver://user:pass@localhost/dbname
--------------------------------------------------------------------------------
/src/alembic/env.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from loguru import logger
4 | from sqlalchemy import engine_from_config, pool, text
5 | from sqlalchemy.exc import OperationalError, ProgrammingError
6 |
7 | from alembic import context
8 | from program.db.db import db
9 | from program.settings.manager import settings_manager
10 |
11 |
12 | # Loguru handler for alembic logs
13 | class LoguruHandler(logging.Handler):
14 | def emit(self, record):
15 | logger.opt(depth=1, exception=record.exc_info).log("DATABASE", record.getMessage())
16 |
17 | if settings_manager.settings.debug_database:
18 | # Configure only alembic and SQLAlchemy loggers
19 | logging.getLogger("alembic").handlers = [LoguruHandler()]
20 | logging.getLogger("alembic").propagate = False
21 | logging.getLogger("sqlalchemy").handlers = [LoguruHandler()]
22 | logging.getLogger("sqlalchemy").propagate = False
23 |
24 | # Set log levels
25 | logging.getLogger("alembic").setLevel(logging.DEBUG if settings_manager.settings.debug else logging.FATAL)
26 | logging.getLogger("sqlalchemy").setLevel(logging.DEBUG if settings_manager.settings.debug else logging.FATAL)
27 |
28 | # Alembic configuration
29 | config = context.config
30 | config.set_main_option("sqlalchemy.url", settings_manager.settings.database.host)
31 |
32 | # Set MetaData object for autogenerate support
33 | target_metadata = db.Model.metadata
34 |
35 | def reset_database(connection) -> bool:
36 | """Reset database if needed"""
37 | try:
38 | # Drop and recreate schema
39 | if db.engine.name == "postgresql":
40 | connection.execute(text("""
41 | SELECT pg_terminate_backend(pid)
42 | FROM pg_stat_activity
43 | WHERE datname = current_database()
44 | AND pid <> pg_backend_pid()
45 | """))
46 | connection.execute(text("DROP SCHEMA public CASCADE"))
47 | connection.execute(text("CREATE SCHEMA public"))
48 | connection.execute(text("GRANT ALL ON SCHEMA public TO public"))
49 |
50 | logger.log("DATABASE", "Database reset complete")
51 | return True
52 | except Exception as e:
53 | logger.error(f"Database reset failed: {e}")
54 | return False
55 |
56 | def run_migrations_offline() -> None:
57 | """Run migrations in 'offline' mode."""
58 | url = config.get_main_option("sqlalchemy.url")
59 | context.configure(
60 | url=url,
61 | target_metadata=target_metadata,
62 | literal_binds=True,
63 | dialect_opts={"paramstyle": "named"},
64 | )
65 |
66 | with context.begin_transaction():
67 | context.run_migrations()
68 |
69 | def run_migrations_online() -> None:
70 | """Run migrations in 'online' mode."""
71 | connectable = engine_from_config(
72 | config.get_section(config.config_ini_section),
73 | prefix="sqlalchemy.",
74 | poolclass=pool.NullPool,
75 | )
76 |
77 | with connectable.connect() as connection:
78 | connection = connection.execution_options(isolation_level="AUTOCOMMIT")
79 | try:
80 | context.configure(
81 | connection=connection,
82 | target_metadata=target_metadata,
83 | compare_type=True, # Compare column types
84 | compare_server_default=True, # Compare default values
85 | include_schemas=True, # Include schema in migrations
86 | render_as_batch=True, # Enable batch operations
87 | )
88 |
89 | with context.begin_transaction():
90 | logger.debug("Starting migrations...")
91 | context.run_migrations()
92 | logger.debug("Migrations completed successfully")
93 |
94 | except (OperationalError, ProgrammingError) as e:
95 | logger.error(f"Database error during migration: {e}")
96 | logger.warning("Attempting database reset...")
97 |
98 | if reset_database(connection):
99 | # Configure alembic again after reset
100 | context.configure(
101 | connection=connection,
102 | target_metadata=target_metadata,
103 | compare_type=True,
104 | compare_server_default=True,
105 | include_schemas=True,
106 | render_as_batch=True,
107 | )
108 |
109 | # Try migrations again
110 | with context.begin_transaction():
111 | logger.debug("Rerunning migrations after reset...")
112 | context.run_migrations()
113 | logger.debug("Migrations completed successfully")
114 | else:
115 | raise Exception("Migration recovery failed")
116 |
117 | except Exception as e:
118 | logger.error(f"Unexpected error during migration: {e}")
119 | raise
120 |
121 | if context.is_offline_mode():
122 | run_migrations_offline()
123 | else:
124 | run_migrations_online()
--------------------------------------------------------------------------------
/src/alembic/script.py.mako:
--------------------------------------------------------------------------------
1 | """${message}
2 |
3 | Revision ID: ${up_revision}
4 | Revises: ${down_revision | comma,n}
5 | Create Date: ${create_date}
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | ${imports if imports else ""}
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = ${repr(up_revision)}
16 | down_revision: Union[str, None] = ${repr(down_revision)}
17 | branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
18 | depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
19 |
20 |
21 | def upgrade() -> None:
22 | ${upgrades if upgrades else "pass"}
23 |
24 |
25 | def downgrade() -> None:
26 | ${downgrades if downgrades else "pass"}
27 |
--------------------------------------------------------------------------------
/src/alembic/versions/20250210_0739_d6c06f357feb_v0_21_0_add_pause_and_retry.py:
--------------------------------------------------------------------------------
1 | """add failed attempts
2 |
3 | Revision ID: d6c06f357feb
4 | Revises: c99709e3648f
5 | Create Date: 2025-02-10 07:39:51.600870
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 | from sqlalchemy.engine.reflection import Inspector
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = 'd6c06f357feb'
17 | down_revision: Union[str, None] = 'c99709e3648f'
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade():
23 | op.execute("ALTER TYPE states ADD VALUE IF NOT EXISTS 'Paused'")
24 |
25 | conn = op.get_bind()
26 | inspector = Inspector.from_engine(conn)
27 | columns = [col['name'] for col in inspector.get_columns('MediaItem')]
28 |
29 | if 'failed_attempts' not in columns:
30 | op.add_column('MediaItem',
31 | sa.Column('failed_attempts',
32 | sa.Integer(),
33 | nullable=True,
34 | server_default='0')
35 | )
36 |
37 |
38 | def downgrade():
39 | conn = op.get_bind()
40 | inspector = Inspector.from_engine(conn)
41 | columns = [col['name'] for col in inspector.get_columns('MediaItem')]
42 |
43 | if 'failed_attempts' in columns:
44 | op.drop_column('MediaItem', 'failed_attempts')
45 |
46 | # Note: PostgreSQL doesn't support removing enum values
47 | # If we need to remove the states, we'd need to:
48 | # 1. Create a new enum without those values
49 | # 2. Update the column to use the new enum
50 | # 3. Drop the old enum
51 | # This is left commented out as it's usually not worth the complexity
52 | """
53 | # Example of how to remove enum values (if needed):
54 | op.execute('''
55 | CREATE TYPE states_new AS ENUM (
56 | 'Unknown', 'Unreleased', 'Ongoing', 'Requested', 'Indexed',
57 | 'Scraped', 'Downloaded', 'Symlinked', 'Completed', 'PartiallyCompleted'
58 | )
59 | ''')
60 | op.execute('ALTER TABLE "MediaItem" ALTER COLUMN last_state TYPE states_new USING last_state::text::states_new')
61 | op.execute('DROP TYPE states')
62 | op.execute('ALTER TYPE states_new RENAME TO states')
63 | """
--------------------------------------------------------------------------------
/src/alembic/versions/20250331_2136_834cba7d26b4_add_trakt_id_attribute_to_mediaitem.py:
--------------------------------------------------------------------------------
1 | """add trakt_id attribute to mediaitem
2 |
3 | Revision ID: 834cba7d26b4
4 | Revises: d6c06f357feb
5 | Create Date: 2025-03-31 21:36:38.574921
6 |
7 | """
8 | from typing import Sequence, Union
9 |
10 | from alembic import op
11 | import sqlalchemy as sa
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = '834cba7d26b4'
16 | down_revision: Union[str, None] = 'd6c06f357feb'
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | # ### commands auto generated by Alembic - please adjust! ###
23 | with op.batch_alter_table('MediaItem', schema=None) as batch_op:
24 | batch_op.add_column(sa.Column('trakt_id', sa.String(), nullable=True))
25 | batch_op.alter_column('failed_attempts',
26 | existing_type=sa.INTEGER(),
27 | server_default=None,
28 | existing_nullable=True)
29 |
30 | # ### end Alembic commands ###
31 |
32 |
33 | def downgrade() -> None:
34 | # ### commands auto generated by Alembic - please adjust! ###
35 | with op.batch_alter_table('MediaItem', schema=None) as batch_op:
36 | batch_op.alter_column('failed_attempts',
37 | existing_type=sa.INTEGER(),
38 | server_default=sa.text('0'),
39 | existing_nullable=True)
40 | batch_op.drop_column('trakt_id')
41 |
42 | # ### end Alembic commands ###
43 |
--------------------------------------------------------------------------------
/src/auth.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Annotated
2 |
3 | from fastapi import HTTPException, Security, status, Query
4 | from fastapi.security import APIKeyHeader, HTTPAuthorizationCredentials, HTTPBearer
5 |
6 | from program.settings.manager import settings_manager
7 |
8 |
9 | def header_auth(header = Security(APIKeyHeader(name="x-api-key", auto_error=False))):
10 | return header == settings_manager.settings.api_key
11 |
12 | def bearer_auth(bearer: HTTPAuthorizationCredentials = Security(HTTPBearer(auto_error=False))):
13 | return bearer and bearer.credentials == settings_manager.settings.api_key
14 |
15 | def resolve_api_key(
16 | header: Optional[str] = Security(header_auth),
17 | bearer: Optional[HTTPAuthorizationCredentials] = Security(bearer_auth)
18 | ):
19 | if not (header or bearer):
20 | raise HTTPException(
21 | status_code=status.HTTP_401_UNAUTHORIZED,
22 | detail="Invalid authentication credentials"
23 | )
24 |
25 | def resolve_ws_api_key(
26 | api_key: Annotated[str | None, Query()] = None
27 | ):
28 | if not (api_key and api_key == settings_manager.settings.api_key):
29 | raise HTTPException(
30 | status_code=status.HTTP_401_UNAUTHORIZED,
31 | detail="Invalid authentication credentials"
32 | )
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import signal
3 | import sys
4 | import threading
5 | import time
6 | import traceback
7 |
8 | import uvicorn
9 | from dotenv import load_dotenv
10 | load_dotenv() # import required here to support SETTINGS_FILENAME
11 |
12 | from fastapi import FastAPI
13 | from fastapi.middleware.cors import CORSMiddleware
14 | from loguru import logger
15 | from scalar_fastapi import get_scalar_api_reference
16 | from starlette.middleware.base import BaseHTTPMiddleware
17 | from starlette.requests import Request
18 |
19 | from program import Program
20 | from program.settings.models import get_version
21 | from program.utils.cli import handle_args
22 | from routers import app_router
23 |
24 | class LoguruMiddleware(BaseHTTPMiddleware):
25 | async def dispatch(self, request: Request, call_next):
26 | start_time = time.time()
27 | try:
28 | response = await call_next(request)
29 | except Exception as e:
30 | logger.exception(f"Exception during request processing: {e}")
31 | raise
32 | finally:
33 | process_time = time.time() - start_time
34 | logger.log(
35 | "API",
36 | f"{request.method} {request.url.path} - {response.status_code if 'response' in locals() else '500'} - {process_time:.2f}s",
37 | )
38 | return response
39 |
40 | args = handle_args()
41 |
42 | app = FastAPI(
43 | title="Riven",
44 | summary="A media management system.",
45 | version=get_version(),
46 | redoc_url=None,
47 | license_info={
48 | "name": "GPL-3.0",
49 | "url": "https://www.gnu.org/licenses/gpl-3.0.en.html",
50 | },
51 | )
52 |
53 | @app.get("/scalar", include_in_schema=False)
54 | async def scalar_html():
55 | return get_scalar_api_reference(
56 | openapi_url=app.openapi_url,
57 | title=app.title,
58 | )
59 |
60 | app.program = Program()
61 | app.add_middleware(LoguruMiddleware)
62 | app.add_middleware(
63 | CORSMiddleware,
64 | allow_origins=["*"],
65 | allow_credentials=True,
66 | allow_methods=["*"],
67 | allow_headers=["*"],
68 | )
69 |
70 | app.include_router(app_router)
71 |
72 | class Server(uvicorn.Server):
73 | def install_signal_handlers(self):
74 | pass
75 |
76 | @contextlib.contextmanager
77 | def run_in_thread(self):
78 | thread = threading.Thread(target=self.run, name="Riven")
79 | thread.start()
80 | try:
81 | while not self.started:
82 | time.sleep(1e-3)
83 | yield
84 | except Exception as e:
85 | logger.error(f"Error in server thread: {e}")
86 | logger.exception(traceback.format_exc())
87 | raise e
88 | finally:
89 | self.should_exit = True
90 | sys.exit(0)
91 |
92 | def signal_handler(signum, frame):
93 | logger.log("PROGRAM","Exiting Gracefully.")
94 | app.program.stop()
95 | sys.exit(0)
96 |
97 | signal.signal(signal.SIGINT, signal_handler)
98 | signal.signal(signal.SIGTERM, signal_handler)
99 |
100 | config = uvicorn.Config(app, host="0.0.0.0", port=args.port, log_config=None)
101 | server = Server(config=config)
102 |
103 | with server.run_in_thread():
104 | try:
105 | app.program.start()
106 | app.program.run()
107 | except Exception as e:
108 | logger.error(f"Error in main thread: {e}")
109 | logger.exception(traceback.format_exc())
110 | finally:
111 | logger.critical("Server has been stopped")
112 | sys.exit(0)
113 |
--------------------------------------------------------------------------------
/src/program/__init__.py:
--------------------------------------------------------------------------------
1 | """Program main module"""
2 |
3 | from program.media.item import MediaItem # noqa: F401
4 | from program.program import Event, Program # noqa: F401
5 |
--------------------------------------------------------------------------------
/src/program/apis/__init__.py:
--------------------------------------------------------------------------------
1 | from kink import di
2 |
3 | from program.settings.manager import settings_manager
4 |
5 | from .listrr_api import ListrrAPI, ListrrAPIError
6 | from .mdblist_api import MdblistAPI, MdblistAPIError
7 | from .overseerr_api import OverseerrAPI, OverseerrAPIError
8 | from .plex_api import PlexAPI, PlexAPIError
9 | from .trakt_api import TraktAPI, TraktAPIError
10 |
11 |
12 | def bootstrap_apis():
13 | __setup_trakt()
14 | __setup_plex()
15 | __setup_mdblist()
16 | __setup_overseerr()
17 | __setup_listrr()
18 |
19 | def __setup_trakt():
20 | traktApi = TraktAPI(settings_manager.settings.content.trakt)
21 | di[TraktAPI] = traktApi
22 |
23 | def __setup_plex():
24 | if not settings_manager.settings.updaters.plex.enabled:
25 | return
26 | plexApi = PlexAPI(settings_manager.settings.updaters.plex.token, settings_manager.settings.updaters.plex.url)
27 | di[PlexAPI] = plexApi
28 |
29 | def __setup_overseerr():
30 | if not settings_manager.settings.content.overseerr.enabled:
31 | return
32 | overseerrApi = OverseerrAPI(settings_manager.settings.content.overseerr.api_key, settings_manager.settings.content.overseerr.url)
33 | di[OverseerrAPI] = overseerrApi
34 |
35 | def __setup_mdblist():
36 | if not settings_manager.settings.content.mdblist.enabled:
37 | return
38 | mdblistApi = MdblistAPI(settings_manager.settings.content.mdblist.api_key)
39 | di[MdblistAPI] = mdblistApi
40 |
41 | def __setup_listrr():
42 | if not settings_manager.settings.content.listrr.enabled:
43 | return
44 | listrrApi = ListrrAPI(settings_manager.settings.content.listrr.api_key)
45 | di[ListrrAPI] = listrrApi
46 |
--------------------------------------------------------------------------------
/src/program/apis/listrr_api.py:
--------------------------------------------------------------------------------
1 | from kink import di
2 | from loguru import logger
3 | from requests.exceptions import HTTPError
4 |
5 | from program.apis.trakt_api import TraktAPI
6 | from program.media.item import MediaItem
7 | from program.utils.request import (
8 | BaseRequestHandler,
9 | HttpMethod,
10 | ResponseObject,
11 | ResponseType,
12 | Session,
13 | create_service_session,
14 | )
15 |
16 |
17 | class ListrrAPIError(Exception):
18 | """Base exception for ListrrAPI related errors"""
19 |
20 | class ListrrRequestHandler(BaseRequestHandler):
21 | def __init__(self, session: Session, base_url: str, request_logging: bool = False):
22 | super().__init__(session, base_url=base_url, response_type=ResponseType.SIMPLE_NAMESPACE, custom_exception=ListrrAPIError, request_logging=request_logging)
23 |
24 | def execute(self, method: HttpMethod, endpoint: str, **kwargs) -> ResponseObject:
25 | return super()._request(method, endpoint, **kwargs)
26 |
27 | class ListrrAPI:
28 | """Handles Listrr API communication"""
29 |
30 | def __init__(self, api_key: str):
31 | self.BASE_URL = "https://listrr.pro"
32 | self.api_key = api_key
33 | self.headers = {"X-Api-Key": self.api_key}
34 | session = create_service_session()
35 | session.headers.update(self.headers)
36 | self.request_handler = ListrrRequestHandler(session, base_url=self.BASE_URL)
37 | self.trakt_api = di[TraktAPI]
38 |
39 | def validate(self):
40 | return self.request_handler.execute(HttpMethod.GET, "")
41 |
42 | def get_items_from_Listrr(self, content_type, content_lists) -> list[MediaItem] | list[str]: # noqa: C901, PLR0912
43 | """Fetch unique IMDb IDs from Listrr for a given type and list of content."""
44 | unique_ids: set[str] = set()
45 | if not content_lists:
46 | return list(unique_ids)
47 |
48 | for list_id in content_lists:
49 | if not list_id or len(list_id) != 24:
50 | continue
51 |
52 | page, total_pages = 1, 1
53 | while page <= total_pages:
54 | try:
55 | url = f"api/List/{content_type}/{list_id}/ReleaseDate/Descending/{page}"
56 | response = self.request_handler.execute(HttpMethod.GET, url)
57 | data = response.data
58 | total_pages = data.pages if hasattr(data, "pages") else 1
59 | for item in data.items if hasattr(data, "items") else []:
60 |
61 | try:
62 | imdb_id = item.imDbId or (
63 | self.trakt_api.get_imdbid_from_tmdb(item.tmDbId)
64 | if content_type == "Movies" and item.tmDbId
65 | else None
66 | )
67 |
68 | if not imdb_id:
69 | continue
70 | if imdb_id in unique_ids:
71 | logger.warning(f"Skipping duplicate item {imdb_id}")
72 | continue
73 |
74 | unique_ids.add(imdb_id)
75 | except AttributeError:
76 | logger.warning(f"Skipping item {item} as it does not have an IMDb ID or TMDb ID")
77 | continue
78 | except HTTPError as e:
79 | if e.response.status_code in [400, 404, 429, 500]:
80 | break
81 | except Exception as e:
82 | logger.error(f"An error occurred: {e}")
83 | break
84 | page += 1
85 | return list(unique_ids)
86 |
--------------------------------------------------------------------------------
/src/program/apis/mdblist_api.py:
--------------------------------------------------------------------------------
1 | from program.utils.request import (
2 | BaseRequestHandler,
3 | HttpMethod,
4 | ResponseObject,
5 | ResponseType,
6 | Session,
7 | create_service_session,
8 | get_rate_limit_params,
9 | )
10 |
11 |
12 | class MdblistAPIError(Exception):
13 | """Base exception for MdblistAPI related errors"""
14 |
15 | class MdblistRequestHandler(BaseRequestHandler):
16 | def __init__(self, session: Session, base_url: str, api_key: str, request_logging: bool = False):
17 | self.api_key = api_key
18 | super().__init__(session, base_url=base_url, response_type=ResponseType.SIMPLE_NAMESPACE, custom_exception=MdblistAPIError, request_logging=request_logging)
19 |
20 | def execute(self, method: HttpMethod, endpoint: str, ignore_base_url: bool = False, **kwargs) -> ResponseObject:
21 | return super()._request(method, endpoint, ignore_base_url=ignore_base_url, params={"apikey": self.api_key}, **kwargs)
22 |
23 |
24 | class MdblistAPI:
25 | """Handles Mdblist API communication"""
26 | BASE_URL = "https://mdblist.com"
27 |
28 | def __init__(self, api_key: str):
29 | rate_limit_params = get_rate_limit_params(per_minute=60)
30 | session = create_service_session(rate_limit_params=rate_limit_params)
31 | self.request_handler = MdblistRequestHandler(session, base_url=self.BASE_URL, api_key=api_key)
32 |
33 | def validate(self):
34 | return self.request_handler.execute(HttpMethod.GET, f"api/user")
35 |
36 | def my_limits(self):
37 | """Wrapper for mdblist api method 'My limits'"""
38 | response = self.request_handler.execute(HttpMethod.GET,f"api/user")
39 | return response.data
40 |
41 | def list_items_by_id(self, list_id: int):
42 | """Wrapper for mdblist api method 'List items'"""
43 | response = self.request_handler.execute(HttpMethod.GET,f"api/lists/{str(list_id)}/items")
44 | return response.data
45 |
46 | def list_items_by_url(self, url: str):
47 | url = url if url.endswith("/") else f"{url}/"
48 | url = url if url.endswith("json/") else f"{url}json/"
49 | response = self.request_handler.execute(HttpMethod.GET, url, ignore_base_url=True)
50 | return response.data
--------------------------------------------------------------------------------
/src/program/apis/tvmaze_api.py:
--------------------------------------------------------------------------------
1 | import os
2 | from requests import Session
3 |
4 | from program.settings.models import TraktModel
5 | from program.utils.request import (
6 | BaseRequestHandler,
7 | HttpMethod,
8 | ResponseObject,
9 | ResponseType,
10 | create_service_session,
11 | get_cache_params,
12 | get_rate_limit_params
13 | )
14 |
15 |
16 | class TvmazeAPIError(Exception):
17 | """Base exception for TvmazeApi related errors"""
18 |
19 | class TvmazeRequestHandler(BaseRequestHandler):
20 | def __init__(self, session: Session, response_type=ResponseType.SIMPLE_NAMESPACE, request_logging: bool = False):
21 | super().__init__(session, response_type=response_type, custom_exception=TvmazeAPIError, request_logging=request_logging)
22 |
23 | def execute(self, method: HttpMethod, endpoint: str, **kwargs) -> ResponseObject:
24 | return super()._request(method, endpoint, **kwargs)
25 |
26 |
27 | class TvmazeAPI:
28 | """Handles TVMaze API communication"""
29 | BASE_URL = "https://api.tvmaze.com"
30 |
31 | def __init__(self, settings: TraktModel):
32 | self.settings = settings
33 | rate_limit_params = get_rate_limit_params(max_calls=1000, period=300)
34 | tvmaze_cache = get_cache_params("tvmaze", 86400)
35 | use_cache = os.environ.get("SKIP_TVMAZE_CACHE", "false").lower() == "true"
36 | session = create_service_session(rate_limit_params=rate_limit_params, use_cache=use_cache, cache_params=tvmaze_cache)
37 | session.headers.update({"Content-type": "application/json"})
38 | self.request_handler = TvmazeRequestHandler(session)
39 |
40 | def validate(self):
41 | return self.request_handler.execute(HttpMethod.GET, f"{self.BASE_URL}/lists/2")
42 |
43 | def get_show(self, tvdb_id: str = None, imdb_id: str = None) -> dict:
44 | """Wrapper for tvdb.com API show method."""
45 | if not tvdb_id and not imdb_id:
46 | return {}
47 |
48 | tvmaze_id = None
49 | lookup_params = {"thetvdb": tvdb_id, "imdb": imdb_id}
50 | lookup_param = next((key for key, value in lookup_params.items() if value), None)
51 |
52 | if lookup_param:
53 | url = f"{self.BASE_URL}/lookup/shows?{lookup_param}={lookup_params[lookup_param]}"
54 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=30)
55 | if response.is_ok and response.data:
56 | tvmaze_id = response.data[0].id
57 |
58 | if tvmaze_id:
59 | url = f"{self.BASE_URL}/shows/{tvmaze_id}/episodes"
60 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=30)
61 | return response.data if response.is_ok and response.data else {}
62 |
63 | return {}
64 |
--------------------------------------------------------------------------------
/src/program/db/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/src/program/db/__init__.py
--------------------------------------------------------------------------------
/src/program/db/db.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | from sqla_wrapper import SQLAlchemy
3 | from sqlalchemy import text
4 |
5 | from alembic import command
6 | from alembic.config import Config
7 | from program.settings.manager import settings_manager
8 | from program.utils import root_dir
9 |
10 | engine_options = {
11 | "pool_size": 25, # Prom: Set to 1 when debugging sql queries
12 | "max_overflow": 25, # Prom: Set to 0 when debugging sql queries
13 | "pool_pre_ping": True, # Prom: Set to False when debugging sql queries
14 | "pool_recycle": 1800, # Prom: Set to -1 when debugging sql queries
15 | "echo": False, # Prom: Set to true when debugging sql queries
16 | }
17 |
18 | # Prom: This is a good place to set the statement timeout for the database when debugging.
19 | # @event.listens_for(Engine, "connect")
20 | # def set_statement_timeout(dbapi_connection, connection_record):
21 | # cursor = dbapi_connection.cursor()
22 | # cursor.execute("SET statement_timeout = 300000")
23 | # cursor.close()
24 |
25 | db_host = settings_manager.settings.database.host
26 | db = SQLAlchemy(db_host, engine_options=engine_options)
27 |
28 | def get_db():
29 | _db = db.Session()
30 | try:
31 | yield _db
32 | finally:
33 | _db.close()
34 |
35 | def create_database_if_not_exists():
36 | """Create the database if it doesn't exist."""
37 | db_name = db_host.split("/")[-1]
38 | db_base_host = "/".join(db_host.split("/")[:-1])
39 | try:
40 | temp_db = SQLAlchemy(db_base_host, engine_options=engine_options)
41 | with temp_db.engine.connect() as connection:
42 | connection.execution_options(isolation_level="AUTOCOMMIT").execute(text(f"CREATE DATABASE {db_name}"))
43 | return True
44 | except Exception as e:
45 | logger.error(f"Failed to create database {db_name}: {e}")
46 | return False
47 |
48 | def vacuum_and_analyze_index_maintenance() -> None:
49 | # PROM: Use the raw connection to execute VACUUM outside a transaction
50 | try:
51 | with db.engine.connect() as connection:
52 | connection = connection.execution_options(isolation_level="AUTOCOMMIT")
53 | connection.execute(text("VACUUM;"))
54 | connection.execute(text("ANALYZE;"))
55 | logger.log("DATABASE","VACUUM and ANALYZE completed successfully.")
56 | except Exception as e:
57 | logger.error(f"Error during VACUUM and ANALYZE: {e}")
58 |
59 | def run_migrations():
60 | """Run any pending migrations on startup"""
61 | try:
62 | alembic_cfg = Config(root_dir / "src" / "alembic.ini")
63 | command.upgrade(alembic_cfg, "head")
64 | except Exception as e:
65 | logger.error(f"Migration failed: {e}")
66 | raise
--------------------------------------------------------------------------------
/src/program/managers/sse_manager.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Any, Dict
3 |
4 |
5 | class ServerSentEventManager:
6 | def __init__(self):
7 | self.event_queues: Dict[str, asyncio.Queue] = {}
8 |
9 | def publish_event(self, event_type: str, data: Any):
10 | if not data:
11 | return
12 | if event_type not in self.event_queues:
13 | self.event_queues[event_type] = asyncio.Queue()
14 | self.event_queues[event_type].put_nowait(data)
15 |
16 | async def subscribe(self, event_type: str):
17 | if event_type not in self.event_queues:
18 | self.event_queues[event_type] = asyncio.Queue()
19 |
20 | while True:
21 | try:
22 | data = await asyncio.wait_for(self.event_queues[event_type].get(), timeout=1.0)
23 | yield f"{data}\n"
24 | except asyncio.TimeoutError:
25 | pass
26 |
27 | sse_manager = ServerSentEventManager()
--------------------------------------------------------------------------------
/src/program/managers/websocket_manager.py:
--------------------------------------------------------------------------------
1 | from fastapi import WebSocket, WebSocketDisconnect
2 | from typing import Dict, List, Any
3 | import asyncio
4 | import json
5 | from datetime import datetime
6 |
7 | class ConnectionManager:
8 | def __init__(self):
9 | # Store active connections by topic
10 | self.active_connections: Dict[str, List[WebSocket]] = {}
11 | # Message queue for each topic
12 | self.message_queues: Dict[str, asyncio.Queue] = {}
13 | # Background tasks
14 | self.background_tasks: List[asyncio.Task] = []
15 |
16 | async def connect(self, websocket: WebSocket, topic: str):
17 | await websocket.accept()
18 | if topic not in self.active_connections:
19 | self.active_connections[topic] = []
20 | self.active_connections[topic].append(websocket)
21 |
22 | if topic not in self.message_queues:
23 | self.message_queues[topic] = asyncio.Queue()
24 | # Start broadcast task for this topic
25 | task = asyncio.create_task(self._broadcast_messages(topic))
26 | self.background_tasks.append(task)
27 |
28 | async def disconnect(self, websocket: WebSocket, topic: str):
29 | if topic in self.active_connections:
30 | if websocket in self.active_connections[topic]:
31 | self.active_connections[topic].remove(websocket)
32 |
33 | # Clean up if no more connections for this topic
34 | if not self.active_connections[topic]:
35 | del self.active_connections[topic]
36 | # Cancel broadcast task for this topic
37 | for task in self.background_tasks:
38 | if task.get_name() == f"broadcast_{topic}":
39 | task.cancel()
40 | self.background_tasks.remove(task)
41 | break
42 |
43 | def publish(self, topic: str, message: Any):
44 | """Publish a message to a specific topic"""
45 | if topic not in self.message_queues:
46 | return # There are no connections for this topic
47 | #self.message_queues[topic] = asyncio.Queue()
48 |
49 | # Format the message with timestamp
50 | formatted_message = {
51 | "timestamp": datetime.utcnow().isoformat(),
52 | "data": message
53 | }
54 |
55 | try:
56 | self.message_queues[topic].put_nowait(formatted_message)
57 | except asyncio.QueueFull:
58 | print(f"Message queue full for topic {topic}")
59 |
60 | async def _broadcast_messages(self, topic: str):
61 | """Background task to broadcast messages for a specific topic"""
62 | try:
63 | while True:
64 | if topic in self.message_queues:
65 | message = await self.message_queues[topic].get()
66 |
67 | if topic in self.active_connections:
68 | dead_connections = []
69 | for connection in self.active_connections[topic]:
70 | try:
71 | await connection.send_json(message)
72 | except WebSocketDisconnect:
73 | dead_connections.append(connection)
74 | except Exception as e:
75 | print(f"Error sending message: {e}")
76 | dead_connections.append(connection)
77 |
78 | # Clean up dead connections
79 | for dead in dead_connections:
80 | await self.disconnect(dead, topic)
81 | except asyncio.CancelledError:
82 | # Handle task cancellation
83 | pass
84 | except Exception as e:
85 | print(f"Broadcast task error for topic {topic}: {e}")
86 |
87 | # Create a global instance
88 | manager = ConnectionManager()
--------------------------------------------------------------------------------
/src/program/media/__init__.py:
--------------------------------------------------------------------------------
1 | from .item import Episode, MediaItem, Movie, Season, Show # noqa
2 | from .state import States # noqa
3 |
--------------------------------------------------------------------------------
/src/program/media/state.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class States(Enum):
5 | Unknown = "Unknown"
6 | Unreleased = "Unreleased"
7 | Ongoing = "Ongoing"
8 | Requested = "Requested"
9 | Indexed = "Indexed"
10 | Scraped = "Scraped"
11 | Downloaded = "Downloaded"
12 | Symlinked = "Symlinked"
13 | Completed = "Completed"
14 | PartiallyCompleted = "PartiallyCompleted"
15 | Failed = "Failed"
16 | Paused = "Paused"
17 |
--------------------------------------------------------------------------------
/src/program/media/stream.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING
2 |
3 | import sqlalchemy
4 | from RTN import Torrent
5 | from sqlalchemy import Index
6 | from sqlalchemy.orm import Mapped, mapped_column, relationship
7 |
8 | from program.db.db import db
9 |
10 | if TYPE_CHECKING:
11 | from program.media.item import MediaItem
12 |
13 |
14 | class StreamRelation(db.Model):
15 | __tablename__ = "StreamRelation"
16 |
17 | id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True)
18 | parent_id: Mapped[str] = mapped_column(sqlalchemy.ForeignKey("MediaItem.id", ondelete="CASCADE"))
19 | child_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("Stream.id", ondelete="CASCADE"))
20 |
21 | __table_args__ = (
22 | Index("ix_streamrelation_parent_id", "parent_id"),
23 | Index("ix_streamrelation_child_id", "child_id"),
24 | )
25 |
26 | class StreamBlacklistRelation(db.Model):
27 | __tablename__ = "StreamBlacklistRelation"
28 |
29 | id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True)
30 | media_item_id: Mapped[str] = mapped_column(sqlalchemy.ForeignKey("MediaItem.id", ondelete="CASCADE"))
31 | stream_id: Mapped[int] = mapped_column(sqlalchemy.ForeignKey("Stream.id", ondelete="CASCADE"))
32 |
33 | __table_args__ = (
34 | Index("ix_streamblacklistrelation_media_item_id", "media_item_id"),
35 | Index("ix_streamblacklistrelation_stream_id", "stream_id"),
36 | )
37 |
38 | class Stream(db.Model):
39 | __tablename__ = "Stream"
40 |
41 | id: Mapped[int] = mapped_column(sqlalchemy.Integer, primary_key=True)
42 | infohash: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False)
43 | raw_title: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False)
44 | parsed_title: Mapped[str] = mapped_column(sqlalchemy.String, nullable=False)
45 | rank: Mapped[int] = mapped_column(sqlalchemy.Integer, nullable=False)
46 | lev_ratio: Mapped[float] = mapped_column(sqlalchemy.Float, nullable=False)
47 |
48 | parents: Mapped[list["MediaItem"]] = relationship(secondary="StreamRelation", back_populates="streams", lazy="selectin")
49 | blacklisted_parents: Mapped[list["MediaItem"]] = relationship(secondary="StreamBlacklistRelation", back_populates="blacklisted_streams", lazy="selectin")
50 |
51 | __table_args__ = (
52 | Index("ix_stream_infohash", "infohash"),
53 | Index("ix_stream_raw_title", "raw_title"),
54 | Index("ix_stream_parsed_title", "parsed_title"),
55 | Index("ix_stream_rank", "rank"),
56 | )
57 |
58 | def __init__(self, torrent: Torrent):
59 | self.raw_title = torrent.raw_title
60 | self.infohash = torrent.infohash
61 | self.parsed_title = torrent.data.parsed_title
62 | self.parsed_data = torrent.data
63 | self.rank = torrent.rank
64 | self.lev_ratio = torrent.lev_ratio
65 |
66 | def __hash__(self):
67 | return self.infohash
68 |
69 | def __eq__(self, other):
70 | return isinstance(other, Stream) and self.infohash == other.infohash
--------------------------------------------------------------------------------
/src/program/media/subtitle.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import TYPE_CHECKING
3 |
4 | from sqlalchemy import ForeignKey, Index, Integer, String
5 | from sqlalchemy.orm import Mapped, mapped_column, relationship
6 |
7 | from program.db.db import db
8 |
9 | if TYPE_CHECKING:
10 | from program.media.item import MediaItem
11 |
12 |
13 | class Subtitle(db.Model):
14 | __tablename__ = "Subtitle"
15 |
16 | id: Mapped[int] = mapped_column(Integer, primary_key=True)
17 | language: Mapped[str] = mapped_column(String)
18 | file: Mapped[str] = mapped_column(String, nullable=True)
19 |
20 | parent_id: Mapped[str] = mapped_column(ForeignKey("MediaItem.id", ondelete="CASCADE"))
21 | parent: Mapped["MediaItem"] = relationship("MediaItem", back_populates="subtitles")
22 |
23 | __table_args__ = (
24 | Index("ix_subtitle_language", "language"),
25 | Index("ix_subtitle_file", "file"),
26 | Index("ix_subtitle_parent_id", "parent_id"),
27 | )
28 |
29 | def __init__(self, optional={}):
30 | for key in optional.keys():
31 | self.language = key
32 | self.file = optional[key]
33 |
34 | def remove(self):
35 | if self.file and Path(self.file).exists():
36 | Path(self.file).unlink()
37 | self.file = None
38 | return self
39 |
40 | def to_dict(self):
41 | return {
42 | "id": str(self.id),
43 | "language": self.language,
44 | "file": self.file,
45 | "parent_id": self.parent_id
46 | }
--------------------------------------------------------------------------------
/src/program/services/content/__init__.py:
--------------------------------------------------------------------------------
1 | # from typing import Generator
2 | # from program.media.item import MediaItem
3 |
4 | from .listrr import Listrr
5 | from .mdblist import Mdblist
6 | from .overseerr import Overseerr
7 | from .plex_watchlist import PlexWatchlist
8 | from .trakt import TraktContent
9 |
10 | __all__ = ["Listrr", "Mdblist", "Overseerr", "PlexWatchlist", "TraktContent"]
11 |
12 | # class Requester:
13 | # def __init__(self):
14 | # self.key = "content"
15 | # self.initialized = False
16 | # self.services = {
17 | # Listrr: Listrr(),
18 | # Mdblist: Mdblist(),
19 | # Overseerr: Overseerr(),
20 | # PlexWatchlist: PlexWatchlist(),
21 | # TraktContent: TraktContent()
22 | # }
23 | # self.initialized = self.validate()
24 | # if not self.initialized:
25 | # return
26 |
27 | # def validate(self):
28 | # return any(service.initialized for service in self.services.values())
29 |
30 | # def run(self, item: MediaItem) -> Generator[MediaItem, None, None]:
31 | # """Index newly requested items."""
32 | # yield item
33 |
--------------------------------------------------------------------------------
/src/program/services/content/listrr.py:
--------------------------------------------------------------------------------
1 | """Listrr content module"""
2 | from typing import Generator
3 |
4 | from kink import di
5 |
6 | from program.apis.listrr_api import ListrrAPI
7 | from program.media.item import MediaItem
8 | from program.settings.manager import settings_manager
9 | from program.utils.request import logger
10 |
11 |
12 | class Listrr:
13 | """Content class for Listrr"""
14 |
15 | def __init__(self):
16 | self.key = "listrr"
17 | self.settings = settings_manager.settings.content.listrr
18 | self.api = None
19 | self.initialized = self.validate()
20 | if not self.initialized:
21 | return
22 | logger.success("Listrr initialized!")
23 |
24 | def validate(self) -> bool:
25 | """Validate Listrr settings."""
26 | if not self.settings.enabled:
27 | return False
28 | if self.settings.api_key == "" or len(self.settings.api_key) != 64:
29 | logger.error("Listrr api key is not set or invalid.")
30 | return False
31 | valid_list_found = False
32 | for _, content_list in [
33 | ("movie_lists", self.settings.movie_lists),
34 | ("show_lists", self.settings.show_lists),
35 | ]:
36 | if content_list is None or not any(content_list):
37 | continue
38 | for item in content_list:
39 | if item == "" or len(item) != 24:
40 | return False
41 | valid_list_found = True
42 | if not valid_list_found:
43 | logger.error("Both Movie and Show lists are empty or not set.")
44 | return False
45 | try:
46 | self.api = di[ListrrAPI]
47 | response = self.api.validate()
48 | if not response.is_ok:
49 | logger.error(
50 | f"Listrr ping failed - Status Code: {response.status_code}, Reason: {response.response.reason}",
51 | )
52 | return response.is_ok
53 | except Exception as e:
54 | logger.error(f"Listrr ping exception: {e}")
55 | return False
56 |
57 | def run(self) -> Generator[MediaItem, None, None]:
58 | """Fetch new media from `Listrr`"""
59 | try:
60 | movie_items = self.api.get_items_from_Listrr("Movies", self.settings.movie_lists)
61 | show_items = self.api.get_items_from_Listrr("Shows", self.settings.show_lists)
62 | except Exception as e:
63 | logger.error(f"Failed to fetch items from Listrr: {e}")
64 | return
65 |
66 | imdb_ids = movie_items + show_items
67 | listrr_items = [MediaItem({"imdb_id": imdb_id, "requested_by": self.key}) for imdb_id in imdb_ids if imdb_id.startswith("tt")]
68 | logger.info(f"Fetched {len(listrr_items)} items from Listrr")
69 | yield listrr_items
--------------------------------------------------------------------------------
/src/program/services/content/mdblist.py:
--------------------------------------------------------------------------------
1 | """Mdblist content module"""
2 |
3 | from typing import Generator
4 |
5 | from kink import di
6 | from loguru import logger
7 |
8 | from program.apis.mdblist_api import MdblistAPI
9 | from program.media.item import MediaItem
10 | from program.settings.manager import settings_manager
11 | from program.utils.request import RateLimitExceeded
12 |
13 |
14 | class Mdblist:
15 | """Content class for mdblist"""
16 | def __init__(self):
17 | self.key = "mdblist"
18 | self.settings = settings_manager.settings.content.mdblist
19 | self.api = None
20 | self.initialized = self.validate()
21 | if not self.initialized:
22 | return
23 | self.requests_per_2_minutes = self._calculate_request_time()
24 | logger.success("mdblist initialized")
25 |
26 | def validate(self):
27 | if not self.settings.enabled:
28 | return False
29 | if self.settings.api_key == "" or len(self.settings.api_key) != 25:
30 | logger.error("Mdblist api key is not set.")
31 | return False
32 | if not self.settings.lists:
33 | logger.error("Mdblist is enabled, but list is empty.")
34 | return False
35 | self.api = di[MdblistAPI]
36 | response = self.api.validate()
37 | if "Invalid API key!" in response.response.text:
38 | logger.error("Mdblist api key is invalid.")
39 | return False
40 | return True
41 |
42 | def run(self) -> Generator[MediaItem, None, None]:
43 | """Fetch media from mdblist and add them to media_items attribute
44 | if they are not already there"""
45 | items_to_yield = []
46 | try:
47 | for list in self.settings.lists:
48 | if not list:
49 | continue
50 |
51 | if isinstance(list, int):
52 | items = self.api.list_items_by_id(list)
53 | else:
54 | items = self.api.list_items_by_url(list)
55 | for item in items:
56 | if hasattr(item, "error") or not item or item.imdb_id is None:
57 | continue
58 | if item.imdb_id.startswith("tt"):
59 | items_to_yield.append(MediaItem(
60 | {"imdb_id": item.imdb_id, "requested_by": self.key}
61 | ))
62 | except RateLimitExceeded:
63 | pass
64 |
65 | logger.info(f"Fetched {len(items_to_yield)} items from mdblist.com")
66 | yield items_to_yield
67 |
68 | def _calculate_request_time(self):
69 | limits = self.api.my_limits().limits
70 | daily_requests = limits.api_requests
71 | return daily_requests / 24 / 60 * 2
--------------------------------------------------------------------------------
/src/program/services/content/overseerr.py:
--------------------------------------------------------------------------------
1 | """Overseerr content module"""
2 |
3 | from kink import di
4 | from loguru import logger
5 | from requests.exceptions import ConnectionError, RetryError
6 | from urllib3.exceptions import MaxRetryError, NewConnectionError
7 |
8 | from program.apis.overseerr_api import OverseerrAPI
9 | from program.media.item import MediaItem
10 | from program.settings.manager import settings_manager
11 |
12 |
13 | class Overseerr:
14 | """Content class for overseerr"""
15 |
16 | def __init__(self):
17 | self.key = "overseerr"
18 | self.settings = settings_manager.settings.content.overseerr
19 | self.api = None
20 | self.initialized = self.validate()
21 | self.run_once = False
22 | if not self.initialized:
23 | return
24 | logger.success("Overseerr initialized!")
25 |
26 | def validate(self) -> bool:
27 | if not self.settings.enabled:
28 | return False
29 | if self.settings.api_key == "" or len(self.settings.api_key) != 68:
30 | logger.error("Overseerr api key is not set.")
31 | return False
32 | try:
33 | self.api = di[OverseerrAPI]
34 | response = self.api.validate()
35 | if response.status_code >= 201:
36 | logger.error(
37 | f"Overseerr ping failed - Status Code: {response.status_code}, Reason: {response.response.reason}"
38 | )
39 | return False
40 | return response.is_ok
41 | except (ConnectionError, RetryError, MaxRetryError, NewConnectionError):
42 | logger.error("Overseerr URL is not reachable, or it timed out")
43 | return False
44 | except Exception as e:
45 | logger.error(f"Unexpected error during Overseerr validation: {str(e)}")
46 | return False
47 |
48 | def run(self):
49 | """Fetch new media from `Overseerr`"""
50 | if self.settings.use_webhook and self.run_once:
51 | return
52 |
53 | overseerr_items: list[MediaItem] = self.api.get_media_requests(self.key)
54 |
55 | if self.settings.use_webhook:
56 | logger.debug("Webhook is enabled. Running Overseerr once before switching to webhook only mode")
57 | self.run_once = True
58 |
59 | logger.info(f"Fetched {len(overseerr_items)} items from overseerr")
60 |
61 | yield overseerr_items
--------------------------------------------------------------------------------
/src/program/services/content/plex_watchlist.py:
--------------------------------------------------------------------------------
1 | """Plex Watchlist Module"""
2 | from typing import Generator
3 |
4 | from kink import di
5 | from loguru import logger
6 | from requests import HTTPError
7 |
8 | from program.apis.plex_api import PlexAPI
9 | from program.media.item import MediaItem
10 | from program.settings.manager import settings_manager
11 |
12 |
13 | class PlexWatchlist:
14 | """Class for managing Plex Watchlists"""
15 |
16 | def __init__(self):
17 | self.key = "plex_watchlist"
18 | self.settings = settings_manager.settings.content.plex_watchlist
19 | self.api = None
20 | self.initialized = self.validate()
21 | if not self.initialized:
22 | return
23 | logger.success("Plex Watchlist initialized!")
24 |
25 | def validate(self):
26 | if not self.settings.enabled:
27 | return False
28 | if not settings_manager.settings.updaters.plex.token:
29 | logger.error("Plex token is not set!")
30 | return False
31 | try:
32 | self.api = di[PlexAPI]
33 | self.api.validate_account()
34 | except Exception as e:
35 | logger.error(f"Unable to authenticate Plex account: {e}")
36 | return False
37 | if self.settings.rss:
38 | self.api.set_rss_urls(self.settings.rss)
39 | for rss_url in self.settings.rss:
40 | try:
41 | response = self.api.validate_rss(rss_url)
42 | response.response.raise_for_status()
43 | self.api.rss_enabled = True
44 | except HTTPError as e:
45 | if e.response.status_code == 404:
46 | logger.warning(f"Plex RSS URL {rss_url} is Not Found. Please check your RSS URL in settings.")
47 | return False
48 | else:
49 | logger.warning(
50 | f"Plex RSS URL {rss_url} is not reachable (HTTP status code: {e.response.status_code})."
51 | )
52 | return False
53 | except Exception as e:
54 | logger.error(f"Failed to validate Plex RSS URL {rss_url}: {e}", exc_info=True)
55 | return False
56 | return True
57 |
58 | def run(self) -> Generator[MediaItem, None, None]:
59 | """Fetch new media from `Plex Watchlist` and RSS feed if enabled."""
60 | try:
61 | watchlist_items: list[str] = self.api.get_items_from_watchlist()
62 | rss_items: list[str] = self.api.get_items_from_rss() if self.api.rss_enabled else []
63 | except Exception as e:
64 | logger.warning(f"Error fetching items: {e}")
65 | return
66 |
67 | plex_items: set[str] = set(watchlist_items) | set(rss_items)
68 | items_to_yield: list[MediaItem] = [MediaItem({"imdb_id": imdb_id, "requested_by": self.key}) for imdb_id in plex_items if imdb_id and imdb_id.startswith("tt")]
69 |
70 | logger.info(f"Fetched {len(items_to_yield)} items from plex watchlist")
71 | yield items_to_yield
--------------------------------------------------------------------------------
/src/program/services/downloaders/shared.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from datetime import datetime
3 | from typing import Optional, Union
4 |
5 | from RTN import ParsedData, parse
6 |
7 | from program.services.downloaders.models import (
8 | ParsedFileData,
9 | TorrentContainer,
10 | TorrentInfo,
11 | )
12 | from program.settings.manager import settings_manager
13 |
14 |
15 | class DownloaderBase(ABC):
16 | """The abstract base class for all Downloader implementations."""
17 | PROXY_URL: str = settings_manager.settings.downloaders.proxy_url
18 |
19 | @abstractmethod
20 | def validate(self) -> bool:
21 | """
22 | Validate the downloader configuration and premium status
23 |
24 | Returns:
25 | ValidationResult: Contains validation status and any error messages
26 | """
27 | pass
28 |
29 | @abstractmethod
30 | def get_instant_availability(self, infohash: str, item_type: str) -> Optional[TorrentContainer]:
31 | """
32 | Get instant availability for a single infohash
33 |
34 | Args:
35 | infohash: The hash of the torrent to check
36 | item_type: The type of media item being checked
37 |
38 | Returns:
39 | Optional[TorrentContainer]: Cached status and available files for the hash, or None if not available
40 | """
41 | pass
42 |
43 | @abstractmethod
44 | def add_torrent(self, infohash: str) -> Union[int, str]:
45 | """
46 | Add a torrent and return its information
47 |
48 | Args:
49 | infohash: The hash of the torrent to add
50 |
51 | Returns:
52 | Union[int, str]: The ID of the added torrent
53 |
54 | Notes:
55 | The return type changes depending on the downloader
56 | """
57 | pass
58 |
59 | @abstractmethod
60 | def select_files(self, torrent_id: Union[int, str], file_ids: list[int]) -> None:
61 | """
62 | Select which files to download from the torrent
63 |
64 | Args:
65 | torrent_id: ID of the torrent to select files for
66 | file_ids: IDs of the files to select
67 | """
68 | pass
69 |
70 | @abstractmethod
71 | def get_torrent_info(self, torrent_id: Union[int, str]) -> TorrentInfo:
72 | """
73 | Get information about a specific torrent using its ID
74 |
75 | Args:
76 | torrent_id: ID of the torrent to get info for
77 |
78 | Returns:
79 | TorrentInfo: Current information about the torrent
80 | """
81 | pass
82 |
83 | @abstractmethod
84 | def delete_torrent(self, torrent_id: Union[int, str]) -> None:
85 | """
86 | Delete a torrent from the service
87 |
88 | Args:
89 | torrent_id: ID of the torrent to delete
90 | """
91 | pass
92 |
93 |
94 | def parse_filename(filename: str) -> ParsedFileData:
95 | """Parse a filename into a ParsedFileData object"""
96 | parsed_data: ParsedData = parse(filename)
97 | season: int | None = parsed_data.seasons[0] if parsed_data.seasons else None
98 | return ParsedFileData(item_type=parsed_data.type, season=season, episodes=parsed_data.episodes)
99 |
100 |
101 | def premium_days_left(expiration: datetime) -> str:
102 | """Convert an expiration date into a message showing days remaining on the user's premium account"""
103 | time_left = expiration - datetime.utcnow()
104 | days_left = time_left.days
105 | hours_left, minutes_left = divmod(time_left.seconds // 3600, 60)
106 | expiration_message = ""
107 |
108 | if days_left > 0:
109 | expiration_message = f"Your account expires in {days_left} days."
110 | elif hours_left > 0:
111 | expiration_message = (
112 | f"Your account expires in {hours_left} hours and {minutes_left} minutes."
113 | )
114 | else:
115 | expiration_message = "Your account expires soon."
116 | return expiration_message
117 |
--------------------------------------------------------------------------------
/src/program/services/indexers/__init__.py:
--------------------------------------------------------------------------------
1 | from .trakt import TraktIndexer # noqa
2 |
--------------------------------------------------------------------------------
/src/program/services/indexers/trakt.py:
--------------------------------------------------------------------------------
1 | """Trakt updater module"""
2 |
3 | from datetime import datetime, timedelta
4 | from typing import Generator, Union
5 |
6 | from kink import di
7 | from loguru import logger
8 |
9 | from program.apis.trakt_api import TraktAPI
10 | from program.media.item import Episode, MediaItem, Movie, Season, Show
11 | from program.settings.manager import settings_manager
12 |
13 |
14 | class TraktIndexer:
15 | """Trakt updater class"""
16 | key = "TraktIndexer"
17 |
18 | def __init__(self):
19 | self.key = "traktindexer"
20 | self.ids = []
21 | self.initialized = True
22 | self.settings = settings_manager.settings.indexer
23 | self.failed_ids = set()
24 | self.api = di[TraktAPI]
25 |
26 | @staticmethod
27 | def copy_attributes(source, target):
28 | """Copy attributes from source to target."""
29 | attributes = ["file", "folder", "update_folder", "symlinked", "is_anime", "symlink_path", "subtitles", "requested_by", "requested_at", "overseerr_id", "active_stream", "requested_id", "streams"]
30 | for attr in attributes:
31 | target.set(attr, getattr(source, attr, None))
32 |
33 | def copy_items(self, itema: MediaItem, itemb: MediaItem):
34 | """Copy attributes from itema to itemb recursively."""
35 | is_anime = itema.is_anime or itemb.is_anime
36 | if itema.type == "mediaitem" and itemb.type == "show":
37 | itema.seasons = itemb.seasons
38 | if itemb.type == "show" and itema.type != "movie":
39 | for seasona in itema.seasons:
40 | for seasonb in itemb.seasons:
41 | if seasona.number == seasonb.number: # Check if seasons match
42 | for episodea in seasona.episodes:
43 | for episodeb in seasonb.episodes:
44 | if episodea.number == episodeb.number: # Check if episodes match
45 | self.copy_attributes(episodea, episodeb)
46 | episodeb.set("is_anime", is_anime)
47 | seasonb.set("is_anime", is_anime)
48 | itemb.set("is_anime", is_anime)
49 | elif itemb.type == "movie":
50 | self.copy_attributes(itema, itemb)
51 | itemb.set("is_anime", is_anime)
52 | else:
53 | logger.error(f"Item types {itema.type} and {itemb.type} do not match cant copy metadata")
54 | return itemb
55 |
56 | def run(self, in_item: MediaItem, log_msg: bool = True) -> Generator[Union[Movie, Show, Season, Episode], None, None]:
57 | """Run the Trakt indexer for the given item."""
58 | if not in_item:
59 | logger.error("Item is None")
60 | return
61 | if not (imdb_id := in_item.imdb_id):
62 | logger.error(f"Item {in_item.log_string} does not have an imdb_id, cannot index it")
63 | return
64 |
65 | if in_item.imdb_id in self.failed_ids:
66 | return
67 |
68 | item_type = in_item.type if in_item.type != "mediaitem" else None
69 | item = self.api.create_item_from_imdb_id(imdb_id, item_type)
70 |
71 | if item:
72 | if item.type == "show":
73 | self._add_seasons_to_show(item, imdb_id)
74 | elif item.type == "movie":
75 | pass
76 | else:
77 | logger.error(f"Indexed IMDb Id {item.imdb_id} returned the wrong item type: {item.type}")
78 | self.failed_ids.add(in_item.imdb_id)
79 | return
80 | else:
81 | logger.error(f"Failed to index item with imdb_id: {in_item.imdb_id}")
82 | self.failed_ids.add(in_item.imdb_id)
83 | return
84 |
85 | item = self.copy_items(in_item, item)
86 | item.indexed_at = datetime.now()
87 |
88 | if log_msg: # used for mapping symlinks to database, need to hide this log message
89 | logger.info(f"Indexed IMDb id ({in_item.imdb_id}) as {item.type.title()}: {item.log_string}")
90 | yield item
91 |
92 | @staticmethod
93 | def should_submit(item: MediaItem) -> bool:
94 | if not item.indexed_at or not item.title:
95 | return True
96 |
97 | settings = settings_manager.settings.indexer
98 |
99 | try:
100 | interval = timedelta(seconds=settings.update_interval)
101 | return datetime.now() - item.indexed_at > interval
102 | except Exception:
103 | logger.error(f"Failed to parse date: {item.indexed_at} with format: {interval}")
104 | return False
105 |
106 |
107 | def _add_seasons_to_show(self, show: Show, imdb_id: str):
108 | """Add seasons to the given show using Trakt API."""
109 | if not imdb_id or not imdb_id.startswith("tt"):
110 | logger.error(f"Item {show.log_string} does not have an imdb_id, cannot index it")
111 | return
112 |
113 | seasons = self.api.get_show(imdb_id)
114 | for season in seasons:
115 | if season.number == 0:
116 | continue
117 | season_item = self.api.map_item_from_data(season, "season", show.genres)
118 | if season_item:
119 | for episode in season.episodes:
120 | episode_item = self.api.map_item_from_data(episode, "episode", show.genres)
121 | if episode_item:
122 | season_item.add_episode(episode_item)
123 | show.add_season(season_item)
124 |
--------------------------------------------------------------------------------
/src/program/services/libraries/__init__.py:
--------------------------------------------------------------------------------
1 | from .symlink import SymlinkLibrary # noqa: F401
2 |
--------------------------------------------------------------------------------
/src/program/services/post_processing/__init__.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | from loguru import logger
4 |
5 | from program.db.db import db
6 | from program.db.db_functions import clear_streams
7 | from program.media.item import MediaItem
8 | from program.media.state import States
9 | from program.services.post_processing.subliminal import Subliminal
10 | from program.settings.manager import settings_manager
11 | from program.utils.notifications import notify_on_complete
12 |
13 |
14 | class PostProcessing:
15 | def __init__(self):
16 | self.key = "post_processing"
17 | self.initialized = False
18 | self.settings = settings_manager.settings.post_processing
19 | self.services = {
20 | Subliminal: Subliminal()
21 | }
22 | self.initialized = True
23 |
24 | def run(self, item: MediaItem):
25 | if Subliminal.should_submit(item):
26 | self.services[Subliminal].run(item)
27 | if item.last_state == States.Completed:
28 | clear_streams(item)
29 | yield item
30 |
31 |
32 | def notify(item: MediaItem):
33 | show = None
34 | if item.type in ["show", "movie"]:
35 | _notify(item)
36 | elif item.type == "episode":
37 | show = item.parent.parent
38 | elif item.type == "season":
39 | show = item.parent
40 | if show:
41 | with db.Session() as session:
42 | show = session.merge(show)
43 | show.store_state()
44 | if show.last_state == States.Completed:
45 | _notify(show)
46 | session.commit()
47 |
48 | def _notify(item: MediaItem):
49 | duration = round((datetime.now() - item.requested_at).total_seconds())
50 | logger.success(f"{item.log_string} has been completed in {duration} seconds.")
51 | if settings_manager.settings.notifications.enabled:
52 | notify_on_complete(item)
53 |
--------------------------------------------------------------------------------
/src/program/services/scrapers/comet.py:
--------------------------------------------------------------------------------
1 | """ Comet scraper module """
2 | import base64
3 | import json
4 | from typing import Dict
5 |
6 | import regex
7 | from loguru import logger
8 | from requests import ConnectTimeout, ReadTimeout
9 | from requests.exceptions import RequestException
10 |
11 | from program.media.item import MediaItem, Show
12 | from program.services.scrapers.shared import (
13 | ScraperRequestHandler,
14 | _get_stremio_identifier,
15 | )
16 | from program.settings.manager import settings_manager
17 | from program.utils.request import (
18 | HttpMethod,
19 | RateLimitExceeded,
20 | create_service_session,
21 | get_rate_limit_params,
22 | )
23 |
24 |
25 | class Comet:
26 | """Scraper for `Comet`"""
27 |
28 | def __init__(self):
29 | self.key = "comet"
30 | self.settings = settings_manager.settings.scraping.comet
31 | self.timeout = self.settings.timeout or 15
32 | self.encoded_string = base64.b64encode(json.dumps({
33 | "maxResultsPerResolution": 0,
34 | "maxSize": 0,
35 | "cachedOnly": False,
36 | "removeTrash": True,
37 | "resultFormat": [
38 | "title",
39 | "metadata",
40 | "size",
41 | "languages"
42 | ],
43 | "debridService": "torrent",
44 | "debridApiKey": "",
45 | "debridStreamProxyPassword": "",
46 | "languages": {
47 | "required": [],
48 | "exclude": [],
49 | "preferred": []
50 | },
51 | "resolutions": {},
52 | "options": {}
53 | }).encode("utf-8")).decode("utf-8")
54 | rate_limit_params = get_rate_limit_params(per_hour=300) if self.settings.ratelimit else None
55 | session = create_service_session(rate_limit_params=rate_limit_params)
56 | self.request_handler = ScraperRequestHandler(session)
57 | self.initialized = self.validate()
58 | if not self.initialized:
59 | return
60 | logger.success("Comet initialized!")
61 |
62 | def validate(self) -> bool:
63 | """Validate the Comet settings."""
64 | if not self.settings.enabled:
65 | return False
66 | if not self.settings.url:
67 | logger.error("Comet URL is not configured and will not be used.")
68 | return False
69 | if "elfhosted" in self.settings.url.lower():
70 | logger.warning("Elfhosted Comet instance is no longer supported. Please use a different instance.")
71 | return False
72 | if not isinstance(self.settings.ratelimit, bool):
73 | logger.error("Comet ratelimit must be a valid boolean.")
74 | return False
75 | try:
76 | url = f"{self.settings.url}/manifest.json"
77 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=self.timeout)
78 | if response.is_ok:
79 | return True
80 | except Exception as e:
81 | logger.error(f"Comet failed to initialize: {e}", )
82 | return False
83 |
84 | def run(self, item: MediaItem) -> Dict[str, str]:
85 | """Scrape the comet site for the given media items
86 | and update the object with scraped streams"""
87 | try:
88 | return self.scrape(item)
89 | except RateLimitExceeded:
90 | logger.debug(f"Comet ratelimit exceeded for item: {item.log_string}")
91 | except ConnectTimeout:
92 | logger.warning(f"Comet connection timeout for item: {item.log_string}")
93 | except ReadTimeout:
94 | logger.warning(f"Comet read timeout for item: {item.log_string}")
95 | except RequestException as e:
96 | logger.error(f"Comet request exception: {str(e)}")
97 | except Exception as e:
98 | logger.error(f"Comet exception thrown: {str(e)}")
99 | return {}
100 |
101 | def scrape(self, item: MediaItem) -> tuple[Dict[str, str], int]:
102 | """Wrapper for `Comet` scrape method"""
103 | identifier, scrape_type, imdb_id = _get_stremio_identifier(item)
104 | url = f"{self.settings.url}/{self.encoded_string}/stream/{scrape_type}/{imdb_id}{identifier or ''}.json"
105 |
106 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=self.timeout)
107 | if not response.is_ok or not getattr(response.data, "streams", None):
108 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
109 | return {}
110 |
111 | torrents = {
112 | stream.infoHash: stream.description.split("\n")[0]
113 | for stream in response.data.streams if hasattr(stream, "infoHash")
114 | and stream.infoHash
115 | }
116 |
117 | if torrents:
118 | logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}")
119 | else:
120 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
121 |
122 | return torrents
123 |
--------------------------------------------------------------------------------
/src/program/services/scrapers/knightcrawler.py:
--------------------------------------------------------------------------------
1 | """ Knightcrawler scraper module """
2 | from typing import Dict
3 |
4 | from loguru import logger
5 | from requests import ConnectTimeout, ReadTimeout
6 | from requests.exceptions import RequestException
7 |
8 | from program.media.item import MediaItem
9 | from program.services.scrapers.shared import (
10 | ScraperRequestHandler,
11 | _get_stremio_identifier,
12 | )
13 | from program.settings.manager import settings_manager
14 | from program.utils.request import (
15 | HttpMethod,
16 | RateLimitExceeded,
17 | create_service_session,
18 | get_rate_limit_params,
19 | )
20 |
21 |
22 | class Knightcrawler:
23 | """Scraper for `Knightcrawler`"""
24 |
25 | def __init__(self):
26 | self.key = "knightcrawler"
27 | self.settings = settings_manager.settings.scraping.knightcrawler
28 | self.timeout = self.settings.timeout
29 | rate_limit_params = get_rate_limit_params(max_calls=1, period=5) if self.settings.ratelimit else None
30 | session = create_service_session(rate_limit_params=rate_limit_params)
31 | self.request_handler = ScraperRequestHandler(session)
32 | self.initialized = self.validate()
33 | if not self.initialized:
34 | return
35 | logger.success("Knightcrawler initialized!")
36 |
37 | def validate(self) -> bool:
38 | """Validate the Knightcrawler settings."""
39 | if not self.settings.enabled:
40 | return False
41 | if not self.settings.url:
42 | logger.error("Knightcrawler URL is not configured and will not be used.")
43 | return False
44 | if not isinstance(self.timeout, int) or self.timeout <= 0:
45 | logger.error("Knightcrawler timeout is not set or invalid.")
46 | return False
47 | if not isinstance(self.settings.ratelimit, bool):
48 | logger.error("Knightcrawler ratelimit must be a valid boolean.")
49 | return False
50 | try:
51 | url = f"{self.settings.url}/{self.settings.filter}/manifest.json"
52 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=self.timeout)
53 | if response.is_ok:
54 | return True
55 | except Exception as e:
56 | logger.error(f"Knightcrawler failed to initialize: {e}", )
57 | return False
58 | return True
59 |
60 | def run(self, item: MediaItem) -> Dict[str, str]:
61 | """Scrape the knightcrawler site for the given media items
62 | and update the object with scraped streams"""
63 | if not item:
64 | return {}
65 |
66 | try:
67 | return self.scrape(item)
68 | except RateLimitExceeded:
69 | logger.debug(f"Knightcrawler rate limit exceeded for item: {item.log_string}")
70 | except ConnectTimeout:
71 | logger.warning(f"Knightcrawler connection timeout for item: {item.log_string}")
72 | except ReadTimeout:
73 | logger.warning(f"Knightcrawler read timeout for item: {item.log_string}")
74 | except RequestException as e:
75 | if e.response.status_code == 429:
76 | logger.warning(f"Knightcrawler ratelimit exceeded for item: {item.log_string}")
77 | else:
78 | logger.error(f"Knightcrawler request exception: {e}")
79 | except Exception as e:
80 | logger.error(f"Knightcrawler exception thrown: {e}")
81 | return {}
82 |
83 | def scrape(self, item: MediaItem) -> Dict[str, str]:
84 | """Wrapper for `Knightcrawler` scrape method"""
85 | identifier, scrape_type, imdb_id = _get_stremio_identifier(item)
86 |
87 | url = f"{self.settings.url}/{self.settings.filter}/stream/{scrape_type}/{imdb_id}"
88 | if identifier:
89 | url += identifier
90 |
91 | response = self.request_handler.execute(HttpMethod.GET, f"{url}.json", timeout=self.timeout)
92 | if not response.is_ok or len(response.data.streams) <= 0:
93 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
94 | return {}
95 |
96 | torrents = {
97 | stream.infoHash: "\n".join(stream.title.split("\n")[:-1]).split("\n")[0]
98 | for stream in response.data.streams
99 | if stream.infoHash
100 | }
101 |
102 | if torrents:
103 | logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}")
104 | else:
105 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
106 |
107 | return torrents
--------------------------------------------------------------------------------
/src/program/services/scrapers/torrentio.py:
--------------------------------------------------------------------------------
1 | """ Torrentio scraper module """
2 | from typing import Dict
3 |
4 | from loguru import logger
5 |
6 | from program.media.item import MediaItem
7 | from program.services.scrapers.shared import (
8 | ScraperRequestHandler,
9 | _get_stremio_identifier,
10 | )
11 | from program.settings.manager import settings_manager
12 | from program.settings.models import TorrentioConfig
13 | from program.utils.request import (
14 | HttpMethod,
15 | RateLimitExceeded,
16 | create_service_session,
17 | get_rate_limit_params,
18 | )
19 |
20 |
21 | class Torrentio:
22 | """Scraper for `Torrentio`"""
23 |
24 | def __init__(self):
25 | self.key = "torrentio"
26 | self.settings: TorrentioConfig = settings_manager.settings.scraping.torrentio
27 | self.timeout: int = self.settings.timeout
28 | rate_limit_params = get_rate_limit_params(max_calls=1, period=5) if self.settings.ratelimit else None
29 | session = create_service_session(rate_limit_params=rate_limit_params)
30 | self.request_handler = ScraperRequestHandler(session)
31 | self.headers = {"User-Agent": "Mozilla/5.0"}
32 | self.proxies = {"http": self.settings.proxy_url, "https": self.settings.proxy_url} if self.settings.proxy_url else None
33 | self.initialized: bool = self.validate()
34 | if not self.initialized:
35 | return
36 | logger.success("Torrentio initialized!")
37 |
38 | def validate(self) -> bool:
39 | """Validate the Torrentio settings."""
40 | if not self.settings.enabled:
41 | return False
42 | if not self.settings.url:
43 | logger.error("Torrentio URL is not configured and will not be used.")
44 | return False
45 | if not isinstance(self.timeout, int) or self.timeout <= 0:
46 | logger.error("Torrentio timeout is not set or invalid.")
47 | return False
48 | try:
49 | url = f"{self.settings.url}/{self.settings.filter}/manifest.json"
50 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=10, headers=self.headers, proxies=self.proxies)
51 | if response.is_ok:
52 | return True
53 | except Exception as e:
54 | logger.error(f"Torrentio failed to initialize: {e}", )
55 | return False
56 | return True
57 |
58 | def run(self, item: MediaItem) -> Dict[str, str]:
59 | """Scrape Torrentio with the given media item for streams"""
60 | try:
61 | return self.scrape(item)
62 | except RateLimitExceeded:
63 | logger.debug(f"Torrentio rate limit exceeded for item: {item.log_string}")
64 | except Exception as e:
65 | logger.exception(f"Torrentio exception thrown: {str(e)}")
66 | return {}
67 |
68 | def scrape(self, item: MediaItem) -> Dict[str, str]:
69 | """Wrapper for `Torrentio` scrape method"""
70 | identifier, scrape_type, imdb_id = _get_stremio_identifier(item)
71 | if not imdb_id:
72 | return {}
73 |
74 | url = f"{self.settings.url}/{self.settings.filter}/stream/{scrape_type}/{imdb_id}"
75 | if identifier:
76 | url += identifier
77 |
78 | response = self.request_handler.execute(HttpMethod.GET, f"{url}.json", timeout=self.timeout, headers=self.headers, proxies=self.proxies)
79 | if not response.is_ok or not hasattr(response.data, 'streams') or not response.data.streams:
80 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
81 | return {}
82 |
83 | torrents: Dict[str, str] = {}
84 | for stream in response.data.streams:
85 | if not stream.infoHash:
86 | continue
87 |
88 | stream_title = stream.title.split("\n👤")[0]
89 | raw_title = stream_title.split("\n")[0]
90 | torrents[stream.infoHash] = raw_title
91 |
92 | if torrents:
93 | logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}")
94 | else:
95 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
96 |
97 | return torrents
98 |
--------------------------------------------------------------------------------
/src/program/services/scrapers/zilean.py:
--------------------------------------------------------------------------------
1 | """ Zilean scraper module """
2 |
3 | from typing import Dict
4 |
5 | from loguru import logger
6 |
7 | from program.media.item import Episode, MediaItem, Season, Show
8 | from program.services.scrapers.shared import ScraperRequestHandler
9 | from program.settings.manager import settings_manager
10 | from program.utils.request import (
11 | HttpMethod,
12 | RateLimitExceeded,
13 | create_service_session,
14 | get_rate_limit_params,
15 | )
16 |
17 |
18 | class Zilean:
19 | """Scraper for `Zilean`"""
20 |
21 | def __init__(self):
22 | self.key = "zilean"
23 | self.settings = settings_manager.settings.scraping.zilean
24 | self.timeout = self.settings.timeout
25 | rate_limit_params = get_rate_limit_params(max_calls=1, period=2) if self.settings.ratelimit else None
26 | session = create_service_session(rate_limit_params=rate_limit_params)
27 | self.request_handler = ScraperRequestHandler(session)
28 | self.initialized = self.validate()
29 | if not self.initialized:
30 | return
31 | logger.success("Zilean initialized!")
32 |
33 | def validate(self) -> bool:
34 | """Validate the Zilean settings."""
35 | if not self.settings.enabled:
36 | return False
37 | if not self.settings.url:
38 | logger.error("Zilean URL is not configured and will not be used.")
39 | return False
40 | if not isinstance(self.timeout, int) or self.timeout <= 0:
41 | logger.error("Zilean timeout is not set or invalid.")
42 | return False
43 | try:
44 | url = f"{self.settings.url}/healthchecks/ping"
45 | response = self.request_handler.execute(HttpMethod.GET, url, timeout=self.timeout)
46 | return response.is_ok
47 | except Exception as e:
48 | logger.error(f"Zilean failed to initialize: {e}")
49 | return False
50 |
51 | def run(self, item: MediaItem) -> Dict[str, str]:
52 | """Scrape the Zilean site for the given media items and update the object with scraped items"""
53 | try:
54 | return self.scrape(item)
55 | except RateLimitExceeded:
56 | logger.debug(f"Zilean rate limit exceeded for item: {item.log_string}")
57 | except Exception as e:
58 | logger.exception(f"Zilean exception thrown: {e}")
59 | return {}
60 |
61 | def _build_query_params(self, item: MediaItem) -> Dict[str, str]:
62 | """Build the query params for the Zilean API"""
63 | params = {"Query": item.get_top_title()}
64 | if isinstance(item, Show):
65 | params["Season"] = 1
66 | elif isinstance(item, Season):
67 | params["Season"] = item.number
68 | elif isinstance(item, Episode):
69 | params["Season"] = item.parent.number
70 | params["Episode"] = item.number
71 | return params
72 |
73 | def scrape(self, item: MediaItem) -> Dict[str, str]:
74 | """Wrapper for `Zilean` scrape method"""
75 | url = f"{self.settings.url}/dmm/filtered"
76 | params = self._build_query_params(item)
77 |
78 | response = self.request_handler.execute(HttpMethod.GET, url, params=params, timeout=self.timeout)
79 | if not response.is_ok or not response.data:
80 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
81 | return {}
82 |
83 | torrents: Dict[str, str] = {}
84 | for result in response.data:
85 | if not result.raw_title or not result.info_hash:
86 | continue
87 | torrents[result.info_hash] = result.raw_title
88 |
89 | if torrents:
90 | logger.log("SCRAPER", f"Found {len(torrents)} streams for {item.log_string}")
91 | else:
92 | logger.log("NOT_FOUND", f"No streams found for {item.log_string}")
93 |
94 | return torrents
--------------------------------------------------------------------------------
/src/program/services/updaters/__init__.py:
--------------------------------------------------------------------------------
1 | """Updater module"""
2 | from loguru import logger
3 |
4 | from program.media.item import MediaItem
5 | from program.services.updaters.emby import EmbyUpdater
6 | from program.services.updaters.jellyfin import JellyfinUpdater
7 | from program.services.updaters.plex import PlexUpdater
8 |
9 |
10 | class Updater:
11 | def __init__(self):
12 | self.key = "updater"
13 | self.services = {
14 | PlexUpdater: PlexUpdater(),
15 | JellyfinUpdater: JellyfinUpdater(),
16 | EmbyUpdater: EmbyUpdater(),
17 | }
18 | self.initialized = True
19 |
20 | def validate(self) -> bool:
21 | """Validate that at least one updater service is initialized."""
22 | initialized_services = [service for service in self.services.values() if service.initialized]
23 | return len(initialized_services) > 0
24 |
25 | def run(self, item: MediaItem):
26 | if not self.initialized:
27 | logger.error("Updater is not initialized properly.")
28 | return
29 |
30 | for service_cls, service in self.services.items():
31 | if service.initialized:
32 | try:
33 | item = next(service.run(item))
34 | except Exception as e:
35 | logger.error(f"{service_cls.__name__} failed to update {item.log_string}: {e}")
36 |
37 | # Lets update the attributes of the item and its children, we dont care if the service updated it or not.
38 | for _item in get_items_to_update(item):
39 | _item.set("update_folder", "updated")
40 | yield item
41 |
42 | def get_items_to_update(item: MediaItem) -> list[MediaItem]:
43 | """Get items to update for a given item."""
44 | items_to_update = []
45 | if item.type in ["movie", "episode"]:
46 | items_to_update = [item]
47 | if item.type == "show":
48 | items_to_update = [e for s in item.seasons for e in s.episodes if e.symlinked and e.get("update_folder") != "updated"]
49 | elif item.type == "season":
50 | items_to_update = [e for e in item.episodes if e.symlinked and e.update_folder != "updated"]
51 | return items_to_update
--------------------------------------------------------------------------------
/src/program/services/updaters/emby.py:
--------------------------------------------------------------------------------
1 | """Emby Updater module"""
2 | from types import SimpleNamespace
3 | from typing import Generator, Optional, Type
4 |
5 | from loguru import logger
6 |
7 | from program.media.item import MediaItem
8 | from program.settings.manager import settings_manager
9 | from program.utils.request import (
10 | BaseRequestHandler,
11 | HttpMethod,
12 | ResponseObject,
13 | ResponseType,
14 | Session,
15 | create_service_session,
16 | )
17 |
18 |
19 | class EmbyRequestHandler(BaseRequestHandler):
20 | def __init__(self, session: Session, response_type=ResponseType.SIMPLE_NAMESPACE, custom_exception: Optional[Type[Exception]] = None, request_logging: bool = False):
21 | super().__init__(session, response_type=response_type, custom_exception=custom_exception, request_logging=request_logging)
22 |
23 | def execute(self, method: HttpMethod, endpoint: str, **kwargs) -> ResponseObject:
24 | return super()._request(method, endpoint, **kwargs)
25 |
26 | class EmbyUpdater:
27 | def __init__(self):
28 | self.key = "emby"
29 | self.initialized = False
30 | self.settings = settings_manager.settings.updaters.emby
31 | session = create_service_session()
32 | self.request_handler = EmbyRequestHandler(session)
33 | self.initialized = self.validate()
34 | if not self.initialized:
35 | return
36 | logger.success("Emby Updater initialized!")
37 |
38 | def validate(self) -> bool:
39 | """Validate Emby library"""
40 | if not self.settings.enabled:
41 | return False
42 | if not self.settings.api_key:
43 | logger.error("Emby API key is not set!")
44 | return False
45 | if not self.settings.url:
46 | logger.error("Emby URL is not set!")
47 | return False
48 | try:
49 | response = self.request_handler.execute(HttpMethod.GET, f"{self.settings.url}/Users?api_key={self.settings.api_key}")
50 | if response.is_ok:
51 | self.initialized = True
52 | return True
53 | except Exception as e:
54 | logger.exception(f"Emby exception thrown: {e}")
55 | return False
56 |
57 | def run(self, item: MediaItem) -> Generator[MediaItem, None, None]:
58 | """Update Emby library for a single item or a season with its episodes"""
59 | items_to_update = []
60 |
61 | if item.type in ["movie", "episode"]:
62 | items_to_update = [item]
63 | elif item.type == "show":
64 | for season in item.seasons:
65 | items_to_update += [e for e in season.episodes if e.symlinked and e.update_folder != "updated"]
66 | elif item.type == "season":
67 | items_to_update = [e for e in item.episodes if e.symlinked and e.update_folder != "updated"]
68 |
69 | if not items_to_update:
70 | logger.debug(f"No items to update for {item.log_string}")
71 | return
72 |
73 | updated = False
74 | updated_episodes = []
75 |
76 | for item_to_update in items_to_update:
77 | if self.update_item(item_to_update):
78 | updated_episodes.append(item_to_update)
79 | updated = True
80 |
81 | if updated:
82 | if item.type in ["show", "season"]:
83 | if len(updated_episodes) == len(items_to_update):
84 | logger.log("EMBY", f"Updated all episodes for {item.log_string}")
85 | else:
86 | updated_episodes_log = ", ".join([str(ep.number) for ep in updated_episodes])
87 | logger.log("EMBY", f"Updated episodes {updated_episodes_log} in {item.log_string}")
88 | else:
89 | logger.log("EMBY", f"Updated {item.log_string}")
90 |
91 | yield item
92 |
93 |
94 | def update_item(self, item: MediaItem) -> bool:
95 | """Update the Emby item"""
96 | if item.symlinked and item.update_folder != "updated" and item.symlink_path:
97 | try:
98 | response = self.request_handler.execute(HttpMethod.POST,
99 | f"{self.settings.url}/Library/Media/Updated",
100 | json={"Updates": [{"Path": item.symlink_path, "UpdateType": "Created"}]},
101 | params={"api_key": self.settings.api_key},
102 | )
103 | if response.is_ok:
104 | return True
105 | except Exception as e:
106 | logger.error(f"Failed to update Emby item: {e}")
107 | return False
108 |
109 | # not needed to update, but maybe useful in the future?
110 | def get_libraries(self) -> list[SimpleNamespace]:
111 | """Get the libraries from Emby"""
112 | try:
113 | response = self.request_handler.execute(HttpMethod.GET,
114 | f"{self.settings.url}/Library/VirtualFolders",
115 | params={"api_key": self.settings.api_key},
116 | )
117 | if response.is_ok and response.data:
118 | return response.data
119 | except Exception as e:
120 | logger.error(f"Failed to get Emby libraries: {e}")
121 | return []
122 |
--------------------------------------------------------------------------------
/src/program/services/updaters/jellyfin.py:
--------------------------------------------------------------------------------
1 | """Jellyfin Updater module"""
2 | from types import SimpleNamespace
3 | from typing import Generator, Optional, Type
4 |
5 | from loguru import logger
6 |
7 | from program.media.item import MediaItem
8 | from program.settings.manager import settings_manager
9 | from program.utils.request import (
10 | BaseRequestHandler,
11 | HttpMethod,
12 | ResponseObject,
13 | ResponseType,
14 | Session,
15 | create_service_session,
16 | )
17 |
18 |
19 | class JellyfinRequestHandler(BaseRequestHandler):
20 | def __init__(self, session: Session, response_type=ResponseType.SIMPLE_NAMESPACE, custom_exception: Optional[Type[Exception]] = None, request_logging: bool = False):
21 | super().__init__(session, response_type=response_type, custom_exception=custom_exception, request_logging=request_logging)
22 |
23 | def execute(self, method: HttpMethod, endpoint: str, **kwargs) -> ResponseObject:
24 | return super()._request(method, endpoint, **kwargs)
25 |
26 | class JellyfinUpdater:
27 | def __init__(self):
28 | self.key = "jellyfin"
29 | self.initialized = False
30 | self.settings = settings_manager.settings.updaters.jellyfin
31 | session = create_service_session()
32 | self.request_handler = JellyfinRequestHandler(session)
33 | self.initialized = self.validate()
34 | if not self.initialized:
35 | return
36 | logger.success("Jellyfin Updater initialized!")
37 |
38 | def validate(self) -> bool:
39 | """Validate Jellyfin library"""
40 | if not self.settings.enabled:
41 | return False
42 | if not self.settings.api_key:
43 | logger.error("Jellyfin API key is not set!")
44 | return False
45 | if not self.settings.url:
46 | logger.error("Jellyfin URL is not set!")
47 | return False
48 |
49 | try:
50 | response = self.request_handler.execute(HttpMethod.GET, f"{self.settings.url}/Users", params={"api_key": self.settings.api_key})
51 | if response.is_ok:
52 | self.initialized = True
53 | return True
54 | except Exception as e:
55 | logger.exception(f"Jellyfin exception thrown: {e}")
56 | return False
57 |
58 | def run(self, item: MediaItem) -> Generator[MediaItem, None, None]:
59 | """Update Jellyfin library for a single item or a season with its episodes"""
60 | self.update_item()
61 | logger.log("JELLYFIN", f"Updated {item.log_string}")
62 | yield item
63 |
64 |
65 | def update_item(self) -> bool:
66 | """Update the Jellyfin item"""
67 | try:
68 | response = self.request_handler.execute(HttpMethod.POST,
69 | f"{self.settings.url}/Library/Refresh",
70 | params={"api_key": self.settings.api_key},
71 | )
72 | if response.is_ok:
73 | return True
74 | except Exception as e:
75 | logger.error(f"Failed to update Jellyfin item: {e}")
76 | return False
77 |
78 | # not needed to update, but maybe useful in the future?
79 | def get_libraries(self) -> list[SimpleNamespace]:
80 | """Get the libraries from Jellyfin"""
81 | try:
82 | response = self.request_handler.execute(HttpMethod.GET,
83 | f"{self.settings.url}/Library/VirtualFolders",
84 | params={"api_key": self.settings.api_key},
85 | )
86 | if response.is_ok and response.data:
87 | return response.data
88 | except Exception as e:
89 | logger.error(f"Failed to get Jellyfin libraries: {e}")
90 | return []
91 |
--------------------------------------------------------------------------------
/src/program/services/updaters/plex.py:
--------------------------------------------------------------------------------
1 | """Plex Updater module"""
2 | import os
3 | from typing import Dict, Generator, List, Union
4 |
5 | from kink import di
6 | from loguru import logger
7 | from plexapi.exceptions import BadRequest, Unauthorized
8 | from plexapi.library import LibrarySection
9 | from requests.exceptions import ConnectionError as RequestsConnectionError
10 | from urllib3.exceptions import MaxRetryError, NewConnectionError, RequestError
11 |
12 | from program.apis.plex_api import PlexAPI
13 | from program.media.item import Episode, Movie, Season, Show
14 | from program.settings.manager import settings_manager
15 |
16 |
17 | class PlexUpdater:
18 | def __init__(self):
19 | self.key = "plexupdater"
20 | self.initialized = False
21 | self.library_path = os.path.abspath(
22 | os.path.dirname(settings_manager.settings.symlink.library_path)
23 | )
24 | self.settings = settings_manager.settings.updaters.plex
25 | self.api = None
26 | self.sections: Dict[LibrarySection, List[str]] = {}
27 | self.initialized = self.validate()
28 | if not self.initialized:
29 | return
30 | logger.success("Plex Updater initialized!")
31 |
32 | def validate(self) -> bool: # noqa: C901
33 | """Validate Plex library"""
34 | if not self.settings.enabled:
35 | return False
36 | if not self.settings.token:
37 | logger.error("Plex token is not set!")
38 | return False
39 | if not self.settings.url:
40 | logger.error("Plex URL is not set!")
41 | return False
42 | if not self.library_path or not os.path.exists(self.library_path):
43 | logger.error("Library path is not set or does not exist!")
44 | return False
45 |
46 | try:
47 | self.api = di[PlexAPI]
48 | self.api.validate_server()
49 | self.sections = self.api.map_sections_with_paths()
50 | self.initialized = True
51 | return True
52 | except Unauthorized as e:
53 | logger.error(f"Plex is not authorized!: {e}")
54 | except TimeoutError as e:
55 | logger.exception(f"Plex timeout error: {e}")
56 | except BadRequest as e:
57 | logger.exception(f"Plex is not configured correctly!: {e}")
58 | except MaxRetryError as e:
59 | logger.exception(f"Plex max retries exceeded: {e}")
60 | except NewConnectionError as e:
61 | logger.exception(f"Plex new connection error: {e}")
62 | except RequestsConnectionError as e:
63 | logger.exception(f"Plex requests connection error: {e}")
64 | except RequestError as e:
65 | logger.exception(f"Plex request error: {e}")
66 | except Exception as e:
67 | logger.exception(f"Plex exception thrown: {e}")
68 | return False
69 |
70 | def run(self, item: Union[Movie, Show, Season, Episode]) -> Generator[Union[Movie, Show, Season, Episode], None, None]:
71 | """Update Plex library section for a single item or a season with its episodes"""
72 |
73 | item_type = "movie" if isinstance(item, Movie) else "show"
74 | updated = False
75 | updated_episodes = []
76 | items_to_update = []
77 |
78 | if isinstance(item, (Movie, Episode)):
79 | items_to_update = [item]
80 | elif isinstance(item, Show):
81 | for season in item.seasons:
82 | items_to_update += [e for e in season.episodes if e.symlinked and e.get("update_folder") != "updated" ]
83 | elif isinstance(item, Season):
84 | items_to_update = [e for e in item.episodes if e.symlinked and e.update_folder != "updated"]
85 |
86 | if not items_to_update:
87 | logger.debug(f"No items to update for {item.log_string}")
88 | return
89 |
90 | section_name = None
91 | # any failures are usually because we are updating Plex too fast
92 | for section, paths in self.sections.items():
93 | if section.type == item_type:
94 | for path in paths:
95 | if isinstance(item, (Show, Season)):
96 | for episode in items_to_update:
97 | if episode.update_folder and str(path) in str(episode.update_folder):
98 | if self.api.update_section(section, episode):
99 | updated_episodes.append(episode)
100 | section_name = section.title
101 | updated = True
102 | elif isinstance(item, (Movie, Episode)):
103 | if item.update_folder and str(path) in str(item.update_folder):
104 | if self.api.update_section(section, item):
105 | section_name = section.title
106 | updated = True
107 |
108 | if updated:
109 | if isinstance(item, (Show, Season)):
110 | if len(updated_episodes) == len(items_to_update):
111 | logger.log("PLEX", f"Updated section {section_name} for {item.log_string}")
112 | else:
113 | updated_episodes_log = ", ".join([str(ep.number) for ep in updated_episodes])
114 | logger.log("PLEX", f"Updated section {section_name} for episodes {updated_episodes_log} in {item.log_string}")
115 | else:
116 | logger.log("PLEX", f"Updated section {section_name} for {item.log_string}")
117 |
118 | yield item
119 |
--------------------------------------------------------------------------------
/src/program/settings/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/src/program/settings/__init__.py
--------------------------------------------------------------------------------
/src/program/settings/manager.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | from loguru import logger
5 | from pydantic import ValidationError
6 |
7 | from program.settings.models import AppModel, Observable
8 | from program.utils import data_dir_path
9 |
10 |
11 | class SettingsManager:
12 | """Class that handles settings, ensuring they are validated against a Pydantic schema."""
13 |
14 | def __init__(self):
15 | self.observers = []
16 | self.filename = os.environ.get("SETTINGS_FILENAME", "settings.json")
17 | self.settings_file = data_dir_path / self.filename
18 |
19 | Observable.set_notify_observers(self.notify_observers)
20 |
21 | if not self.settings_file.exists():
22 | logger.info(f"Settings filename: {self.filename}")
23 | self.settings = AppModel()
24 | self.settings = AppModel.model_validate(
25 | self.check_environment(json.loads(self.settings.model_dump_json()), "RIVEN")
26 | )
27 | self.notify_observers()
28 | else:
29 | self.load()
30 |
31 | def register_observer(self, observer):
32 | self.observers.append(observer)
33 |
34 | def notify_observers(self):
35 | for observer in self.observers:
36 | observer()
37 |
38 | def check_environment(self, settings, prefix="", seperator="_"):
39 | checked_settings = {}
40 | for key, value in settings.items():
41 | if isinstance(value, dict):
42 | sub_checked_settings = self.check_environment(value, f"{prefix}{seperator}{key}")
43 | checked_settings[key] = (sub_checked_settings)
44 | else:
45 | environment_variable = f"{prefix}_{key}".upper()
46 | if os.getenv(environment_variable, None):
47 | new_value = os.getenv(environment_variable)
48 | if isinstance(value, bool):
49 | checked_settings[key] = new_value.lower() == "true" or new_value == "1"
50 | elif isinstance(value, int):
51 | checked_settings[key] = int(new_value)
52 | elif isinstance(value, float):
53 | checked_settings[key] = float(new_value)
54 | elif isinstance(value, list):
55 | checked_settings[key] = json.loads(new_value)
56 | else:
57 | checked_settings[key] = new_value
58 | else:
59 | checked_settings[key] = value
60 | return checked_settings
61 |
62 | def load(self, settings_dict: dict | None = None):
63 | """Load settings from file, validating against the AppModel schema."""
64 | try:
65 | if not settings_dict:
66 | with open(self.settings_file, "r", encoding="utf-8") as file:
67 | settings_dict = json.loads(file.read())
68 | if os.environ.get("RIVEN_FORCE_ENV", "false").lower() == "true":
69 | settings_dict = self.check_environment(settings_dict, "RIVEN")
70 | self.settings = AppModel.model_validate(settings_dict)
71 | self.save()
72 | except ValidationError as e:
73 | formatted_error = format_validation_error(e)
74 | logger.error(f"Settings validation failed:\n{formatted_error}")
75 | raise
76 | except json.JSONDecodeError as e:
77 | logger.error(f"Error parsing settings file: {e}")
78 | raise
79 | except FileNotFoundError:
80 | logger.warning(f"Error loading settings: {self.settings_file} does not exist")
81 | raise
82 | self.notify_observers()
83 |
84 | def save(self):
85 | """Save settings to file, using Pydantic model for JSON serialization."""
86 | with open(self.settings_file, "w", encoding="utf-8") as file:
87 | file.write(self.settings.model_dump_json(indent=4))
88 |
89 |
90 | def format_validation_error(e: ValidationError) -> str:
91 | """Format validation errors in a user-friendly way"""
92 | messages = []
93 | for error in e.errors():
94 | field = ".".join(str(x) for x in error["loc"])
95 | message = error.get("msg")
96 | messages.append(f"• {field}: {message}")
97 | return "\n".join(messages)
98 |
99 |
100 | settings_manager = SettingsManager()
101 |
--------------------------------------------------------------------------------
/src/program/settings/migratable.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 |
4 | class MigratableBaseModel(BaseModel):
5 | def __init__(self, **data):
6 | for field_name, field in self.model_fields.items():
7 | if field_name not in data:
8 | default_value = field.default if field.default is not None else None
9 | data[field_name] = default_value
10 | super().__init__(**data)
11 |
12 |
--------------------------------------------------------------------------------
/src/program/settings/versions.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 | from RTN.models import BaseRankingModel, BestRanking, DefaultRanking
3 |
4 |
5 | class RankModels:
6 | """
7 | The `RankModels` class represents a collection of ranking models for different categories.
8 | Each ranking model is a subclass of the `BaseRankingModel` class.
9 |
10 | Attributes:
11 | `default` (DefaultRanking): The default ranking model for getting best results for non-transcoded releases.
12 | `custom` (BaseRankingModel): Uses a base ranking model for all categories with all ranks set to 0.
13 | `best` (BestRanking): The best ranking model for getting the highest quality releases.
14 |
15 | Methods:
16 | `get(name: str)` -> `BaseRankingModel`: Returns a ranking model based on the given name.
17 |
18 | Note:
19 | If the name is not found, use the `custom` model which uses a base ranking model for all categories with all ranks set to 0.
20 | """
21 |
22 | custom: BaseRankingModel = BaseRankingModel() # All ranks set to 0 by default
23 | default: DefaultRanking = DefaultRanking() # Good for 720p/1080p releases
24 | best: BestRanking = BestRanking() # Good for 4K HDR REMUX releases
25 |
26 | @classmethod
27 | def get(cls, name: str) -> BaseRankingModel:
28 | """Get a ranking model by name."""
29 | model = getattr(cls, name, None)
30 | if model is None:
31 | logger.warning(f"Ranking model '{name}' not found. Setting to custom model.")
32 | return cls.custom
33 | return model
34 |
35 |
36 | models = RankModels()
37 |
--------------------------------------------------------------------------------
/src/program/state_transition.py:
--------------------------------------------------------------------------------
1 | from loguru import logger
2 |
3 | from program.media import MediaItem, States
4 | from program.services.downloaders import Downloader
5 | from program.services.indexers.trakt import TraktIndexer
6 | from program.services.post_processing import PostProcessing, notify
7 | from program.services.post_processing.subliminal import Subliminal
8 | from program.services.scrapers import Scraping
9 | from program.services.updaters import Updater
10 | from program.settings.manager import settings_manager
11 | from program.symlink import Symlinker
12 | from program.types import ProcessedEvent, Service
13 |
14 |
15 | def process_event(emitted_by: Service, existing_item: MediaItem | None = None, content_item: MediaItem | None = None) -> ProcessedEvent:
16 | """Process an event and return the updated item, next service and items to submit."""
17 | next_service: Service = None
18 | no_further_processing: ProcessedEvent = (None, [])
19 | items_to_submit = []
20 |
21 | if existing_item and existing_item.last_state in [States.Paused, States.Failed]:
22 | # logger.debug(f"Skipping {existing_item.log_string}: Item is {existing_item.last_state.name}. Manual intervention required.")
23 | return no_further_processing
24 |
25 | #TODO - Reindex non-released badly indexed items here
26 | if content_item or (existing_item is not None and existing_item.last_state == States.Requested):
27 | next_service = TraktIndexer
28 | logger.debug(f"Submitting {content_item.imdb_id if content_item else existing_item.log_string} to Trakt indexer")
29 | return next_service, [content_item or existing_item]
30 |
31 | elif existing_item is not None and existing_item.last_state in [States.PartiallyCompleted, States.Ongoing]:
32 | if existing_item.type == "show":
33 | for season in existing_item.seasons:
34 | if season.last_state not in [States.Completed, States.Unreleased]:
35 | _, sub_items = process_event(emitted_by, season, None)
36 | items_to_submit += sub_items
37 | elif existing_item.type == "season":
38 | for episode in existing_item.episodes:
39 | if episode.last_state != States.Completed:
40 | _, sub_items = process_event(emitted_by, episode, None)
41 | items_to_submit += sub_items
42 |
43 | elif existing_item is not None and existing_item.last_state == States.Indexed:
44 | next_service = Scraping
45 | if emitted_by != Scraping and Scraping.should_submit(existing_item):
46 | items_to_submit = [existing_item]
47 | elif existing_item.type == "show":
48 | items_to_submit = [s for s in existing_item.seasons if s.last_state in [States.Indexed, States.PartiallyCompleted, States.Unknown] and Scraping.should_submit(s)]
49 | elif existing_item.type == "season":
50 | items_to_submit = [e for e in existing_item.episodes if e.last_state in [States.Indexed, States.Unknown] and Scraping.should_submit(e)]
51 |
52 | elif existing_item is not None and existing_item.last_state == States.Scraped:
53 | next_service = Downloader
54 | items_to_submit = [existing_item]
55 |
56 | elif existing_item is not None and existing_item.last_state == States.Downloaded:
57 | next_service = Symlinker
58 | items_to_submit = [existing_item]
59 |
60 | elif existing_item is not None and existing_item.last_state == States.Symlinked:
61 | next_service = Updater
62 | items_to_submit = [existing_item]
63 |
64 | elif existing_item is not None and existing_item.last_state == States.Completed:
65 | # If a user manually retries an item, lets not notify them again
66 | if emitted_by not in ["RetryItem", PostProcessing]:
67 | notify(existing_item)
68 | # Avoid multiple post-processing runs
69 | if emitted_by != PostProcessing:
70 | if settings_manager.settings.post_processing.subliminal.enabled:
71 | next_service = PostProcessing
72 | if existing_item.type in ["movie", "episode"] and Subliminal.should_submit(existing_item):
73 | items_to_submit = [existing_item]
74 | elif existing_item.type == "show":
75 | items_to_submit = [e for s in existing_item.seasons for e in s.episodes if e.last_state == States.Completed and Subliminal.should_submit(e)]
76 | elif existing_item.type == "season":
77 | items_to_submit = [e for e in existing_item.episodes if e.last_state == States.Completed and Subliminal.should_submit(e)]
78 | if not items_to_submit:
79 | return no_further_processing
80 | else:
81 | return no_further_processing
82 |
83 | # if items_to_submit and next_service:
84 | # for item in items_to_submit:
85 | # logger.debug(f"Submitting {item.log_string} ({item.id}) to {next_service if isinstance(next_service, str) else next_service.__name__}")
86 |
87 | return next_service, items_to_submit
88 |
--------------------------------------------------------------------------------
/src/program/types.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from datetime import datetime
3 | from typing import Generator, Optional, Union
4 |
5 | from program.media.item import MediaItem
6 | from program.services.content import (
7 | Listrr,
8 | Mdblist,
9 | Overseerr,
10 | PlexWatchlist,
11 | TraktContent,
12 | )
13 | from program.services.downloaders import (
14 | RealDebridDownloader,
15 | AllDebridDownloader,
16 | TorBoxDownloader,
17 | )
18 |
19 | from program.services.libraries import SymlinkLibrary
20 | from program.services.scrapers import (
21 | Comet,
22 | Jackett,
23 | Knightcrawler,
24 | Mediafusion,
25 | Orionoid,
26 | Scraping,
27 | Torrentio,
28 | Zilean,
29 | )
30 | from program.services.updaters import Updater
31 | from program.symlink import Symlinker
32 |
33 | # Typehint classes
34 | Scraper = Union[Scraping, Torrentio, Knightcrawler, Mediafusion, Orionoid, Jackett, Zilean, Comet]
35 | Content = Union[Overseerr, PlexWatchlist, Listrr, Mdblist, TraktContent]
36 | Downloader = Union[
37 | RealDebridDownloader,
38 | AllDebridDownloader,
39 | TorBoxDownloader,
40 | ]
41 |
42 | Service = Union[Content, SymlinkLibrary, Scraper, Downloader, Symlinker, Updater]
43 | MediaItemGenerator = Generator[MediaItem, None, MediaItem | None]
44 |
45 | class ProcessedEvent:
46 | service: Service
47 | related_media_items: list[MediaItem]
48 |
49 | @dataclass
50 | class Event:
51 | emitted_by: Service
52 | item_id: Optional[str] = None
53 | content_item: Optional[MediaItem] = None
54 | run_at: datetime = datetime.now()
55 |
56 | @property
57 | def log_message(self):
58 | return f"Item ID {self.item_id}" if self.item_id else f"External ID {self.content_item.imdb_id}"
--------------------------------------------------------------------------------
/src/program/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import secrets
4 | import string
5 | from pathlib import Path
6 |
7 | from loguru import logger
8 |
9 | root_dir = Path(__file__).resolve().parents[3]
10 |
11 | data_dir_path = root_dir / "data"
12 | alembic_dir = data_dir_path / "alembic"
13 |
14 | def get_version() -> str:
15 | with open(root_dir / "pyproject.toml") as file:
16 | pyproject_toml = file.read()
17 |
18 | match = re.search(r'version = "(.+)"', pyproject_toml)
19 | if match:
20 | version = match.group(1)
21 | else:
22 | raise ValueError("Could not find version in pyproject.toml")
23 | return version
24 |
25 | def generate_api_key():
26 | """Generate a secure API key of the specified length."""
27 | API_KEY = os.getenv("API_KEY", "")
28 | if len(API_KEY) != 32:
29 | logger.warning("env.API_KEY is not 32 characters long, generating a new one...")
30 | characters = string.ascii_letters + string.digits
31 |
32 | # Generate the API key
33 | api_key = "".join(secrets.choice(characters) for _ in range(32))
34 | logger.warning(f"New api key: {api_key}")
35 | else:
36 | api_key = API_KEY
37 |
38 | return api_key
--------------------------------------------------------------------------------
/src/program/utils/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from program.db.db_functions import (
4 | hard_reset_database,
5 | hard_reset_database_pre_migration,
6 | )
7 | from program.services.libraries.symlink import fix_broken_symlinks
8 | from program.settings.manager import settings_manager
9 | from program.utils.logging import log_cleaner, logger
10 |
11 |
12 | def handle_args():
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument(
15 | "--ignore_cache",
16 | action="store_true",
17 | help="Ignore the cached metadata, create new data from scratch.",
18 | )
19 | parser.add_argument(
20 | "--hard_reset_db",
21 | action="store_true",
22 | help="Hard reset the database.",
23 | )
24 | parser.add_argument(
25 | "--hard_reset_db_pre_migration",
26 | action="store_true",
27 | help="Hard reset the database.",
28 | )
29 | parser.add_argument(
30 | "--clean_logs",
31 | action="store_true",
32 | help="Clean old logs.",
33 | )
34 | parser.add_argument(
35 | "--fix_symlinks",
36 | action="store_true",
37 | help="Fix broken symlinks.",
38 | )
39 | parser.add_argument(
40 | "-p", "--port",
41 | type=int,
42 | default=8080,
43 | help="Port to run the server on (default: 8000)"
44 | )
45 |
46 | args = parser.parse_args()
47 |
48 | if args.hard_reset_db:
49 | hard_reset_database()
50 | logger.info("Hard reset the database")
51 | exit(0)
52 |
53 | if args.hard_reset_db_pre_migration:
54 | hard_reset_database_pre_migration()
55 | logger.info("Hard reset the database")
56 | exit(0)
57 |
58 | if args.clean_logs:
59 | log_cleaner()
60 | logger.info("Cleaned old logs.")
61 | exit(0)
62 |
63 | if args.fix_symlinks:
64 | fix_broken_symlinks(settings_manager.settings.symlink.library_path, settings_manager.settings.symlink.rclone_path)
65 | exit(0)
66 |
67 | return args
68 |
--------------------------------------------------------------------------------
/src/program/utils/logging.py:
--------------------------------------------------------------------------------
1 | """Logging utils"""
2 |
3 | import os
4 | import sys
5 | from datetime import datetime
6 |
7 | from loguru import logger
8 | from rich.console import Console
9 | from rich.progress import (
10 | BarColumn,
11 | Progress,
12 | SpinnerColumn,
13 | TextColumn,
14 | TimeRemainingColumn,
15 | )
16 |
17 | from program.settings.manager import settings_manager
18 | from program.utils import data_dir_path
19 |
20 | LOG_ENABLED: bool = settings_manager.settings.log
21 |
22 | def setup_logger(level):
23 | """Setup the logger"""
24 | logs_dir_path = data_dir_path / "logs"
25 | os.makedirs(logs_dir_path, exist_ok=True)
26 | timestamp = datetime.now().strftime("%Y%m%d-%H%M")
27 | log_filename = logs_dir_path / f"riven-{timestamp}.log"
28 |
29 | # Helper function to get log settings from environment or use default
30 | def get_log_settings(name, default_color, default_icon):
31 | color = os.getenv(f"RIVEN_LOGGER_{name}_FG", default_color)
32 | icon = os.getenv(f"RIVEN_LOGGER_{name}_ICON", default_icon)
33 | return f"", icon
34 |
35 | # Define log levels and their default settings
36 | log_levels = {
37 | "PROGRAM": (36, "cc6600", "🤖"),
38 | "DATABASE": (37, "d834eb", "🛢️"),
39 | "DEBRID": (38, "cc3333", "🔗"),
40 | "SYMLINKER": (39, "F9E79F", "🔗"),
41 | "SCRAPER": (40, "3D5A80", "👻"),
42 | "COMPLETED": (41, "FFFFFF", "🟢"),
43 | "CACHE": (42, "527826", "📜"),
44 | "NOT_FOUND": (43, "818589", "🤷"),
45 | "NEW": (44, "e63946", "✨"),
46 | "FILES": (45, "FFFFE0", "🗃️ "),
47 | "ITEM": (46, "92a1cf", "🗃️ "),
48 | "DISCOVERY": (47, "e56c49", "🔍"),
49 | "API": (10, "006989", "👾"),
50 | "PLEX": (47, "DAD3BE", "📽️ "),
51 | "LOCAL": (48, "DAD3BE", "📽️ "),
52 | "JELLYFIN": (48, "DAD3BE", "📽️ "),
53 | "EMBY": (48, "DAD3BE", "📽️ "),
54 | "TRAKT": (48, "1DB954", "🎵"),
55 | }
56 |
57 | # Set log levels
58 | for name, (no, default_color, default_icon) in log_levels.items():
59 | color, icon = get_log_settings(name, default_color, default_icon)
60 | logger.level(name, no=no, color=color, icon=icon)
61 |
62 | # Default log levels
63 | debug_color, debug_icon = get_log_settings("DEBUG", "98C1D9", "🐞")
64 | info_color, info_icon = get_log_settings("INFO", "818589", "📰")
65 | warning_color, warning_icon = get_log_settings("WARNING", "ffcc00", "⚠️ ")
66 | critical_color, critical_icon = get_log_settings("CRITICAL", "ff0000", "")
67 | success_color, success_icon = get_log_settings("SUCCESS", "00ff00", "✔️ ")
68 |
69 | logger.level("DEBUG", color=debug_color, icon=debug_icon)
70 | logger.level("INFO", color=info_color, icon=info_icon)
71 | logger.level("WARNING", color=warning_color, icon=warning_icon)
72 | logger.level("CRITICAL", color=critical_color, icon=critical_icon)
73 | logger.level("SUCCESS", color=success_color, icon=success_icon)
74 |
75 | # Log format to match the old log format, but with color
76 | log_format = (
77 | "{time:YY-MM-DD} {time:HH:mm:ss} | "
78 | "{level.icon} {level: <9} | "
79 | "{module}.{function} - {message}"
80 | )
81 |
82 | logger.configure(handlers=[
83 | {
84 | "sink": sys.stderr,
85 | "level": level.upper() or "INFO",
86 | "format": log_format,
87 | "backtrace": False,
88 | "diagnose": False,
89 | "enqueue": True,
90 | },
91 | {
92 | "sink": log_filename,
93 | "level": level.upper(),
94 | "format": log_format,
95 | "rotation": "25 MB",
96 | "retention": "24 hours",
97 | "compression": None,
98 | "backtrace": False,
99 | "diagnose": True,
100 | "enqueue": True,
101 | }
102 | ])
103 |
104 | def log_cleaner():
105 | """Remove old log files based on retention settings, leaving the most recent one."""
106 | cleaned = False
107 | try:
108 | logs_dir_path = data_dir_path / "logs"
109 | log_files = sorted(logs_dir_path.glob("riven-*.log"), key=lambda x: x.stat().st_mtime)
110 | for log_file in log_files[:-1]:
111 | # remove files older than 8 hours
112 | if (datetime.now() - datetime.fromtimestamp(log_file.stat().st_mtime)).total_seconds() / 3600 > 8:
113 | log_file.unlink()
114 | cleaned = True
115 | if cleaned:
116 | logger.debug("Cleaned up old logs that were older than 8 hours.")
117 | except Exception as e:
118 | logger.error(f"Failed to clean old logs: {e}")
119 |
120 | def create_progress_bar(total_items: int) -> tuple[Progress, Console]:
121 | console = Console()
122 | progress = Progress(
123 | SpinnerColumn(),
124 | TextColumn("[progress.description]{task.description}"),
125 | BarColumn(),
126 | TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
127 | TimeRemainingColumn(),
128 | TextColumn("[progress.completed]{task.completed}/{task.total}", justify="right"),
129 | TextColumn("[progress.log]{task.fields[log]}", justify="right"),
130 | console=console,
131 | transient=True
132 | )
133 | return progress, console
134 |
135 |
136 | console = Console()
137 | log_level = "DEBUG" if settings_manager.settings.debug else "INFO"
138 | setup_logger(log_level)
--------------------------------------------------------------------------------
/src/program/utils/notifications.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from apprise import Apprise
4 | from loguru import logger
5 |
6 | from program.media.item import MediaItem
7 | from program.settings.manager import settings_manager
8 | from program.settings.models import NotificationsModel
9 |
10 | ntfy = Apprise()
11 | settings: NotificationsModel = settings_manager.settings.notifications
12 | on_item_type: List[str] = settings.on_item_type
13 |
14 |
15 | for service_url in settings.service_urls:
16 | try:
17 | if "discord" in service_url:
18 | service_url = f"{service_url}?format=markdown"
19 | ntfy.add(service_url)
20 | except Exception as e:
21 | logger.debug(f"Failed to add service URL {service_url}: {e}")
22 | continue
23 |
24 |
25 | def notify(title: str, body: str) -> None:
26 | """Send notifications to all services in settings."""
27 | try:
28 | ntfy.notify(title=title, body=body)
29 | except Exception as e:
30 | logger.debug(f"Failed to send notification: {e}")
31 |
32 | def notify_on_complete(item: MediaItem) -> None:
33 | """Send notifications to all services in settings."""
34 | if item.type not in on_item_type:
35 | return
36 |
37 | title = f"Riven completed a {item.type.title()}!"
38 | body = f"**{item.log_string}** ({item.aired_at.year})"
39 | notify(title, body)
40 |
--------------------------------------------------------------------------------
/src/program/utils/useragents.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 |
4 | class UserAgentFactory:
5 | def __init__(self, user_agents: list):
6 | self.user_agents = user_agents
7 |
8 | def get_random_user_agent(self):
9 | return random.choice(self.user_agents)
10 |
11 |
12 | # Sample user agents pool
13 | user_agents_pool = [
14 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
15 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
16 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
17 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
18 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0",
19 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
20 | "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0",
21 | "Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0",
22 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36",
23 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
24 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36",
25 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
26 | "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0",
27 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0",
28 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36",
29 | "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:27.0) Gecko/20100101 Firefox/27.0",
30 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36",
31 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36",
32 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
33 | "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; AS; rv:11.0) like Gecko",
34 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36",
35 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
36 | "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
37 | "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0",
38 | "Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0",
39 | "Mozilla/5.0 (X11; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0",
40 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:33.0) Gecko/20100101 Firefox/33.0",
41 | "Mozilla/5.0 (X11; Linux x86_64; rv:32.0) Gecko/20100101 Firefox/32.0",
42 | "Mozilla/5.0 (Windows NT 6.1; rv:29.0) Gecko/20100101 Firefox/29.0",
43 | "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
44 | "curl/7.64.1",
45 | "curl/7.58.0",
46 | "curl/7.61.1",
47 | "curl/7.55.1",
48 | "curl/7.54.0",
49 | "curl/7.65.3",
50 | "curl/7.50.3",
51 | "curl/7.67.0",
52 | "curl/7.63.0",
53 | ]
54 | user_agent_factory = UserAgentFactory(user_agents_pool)
--------------------------------------------------------------------------------
/src/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | minversion = 7.0
3 | filterwarnings = ignore::DeprecationWarning
4 | addopts = -vv
5 | pythonpath = .
6 | testpaths = tests
--------------------------------------------------------------------------------
/src/routers/__init__.py:
--------------------------------------------------------------------------------
1 | from fastapi import Depends, Request
2 | from fastapi.routing import APIRouter
3 |
4 | from auth import resolve_api_key, resolve_ws_api_key
5 | from program.settings.manager import settings_manager
6 | from routers.models.shared import RootResponse
7 | from routers.secure.default import router as default_router
8 | from routers.secure.items import router as items_router
9 | from routers.secure.scrape import router as scrape_router
10 | from routers.secure.settings import router as settings_router
11 | from routers.secure.stream import router as stream_router
12 | from routers.secure.webhooks import router as webhooks_router
13 | from routers.secure.ws import router as ws_router
14 |
15 | API_VERSION = "v1"
16 |
17 | app_router = APIRouter(prefix=f"/api/{API_VERSION}")
18 | @app_router.get("/", operation_id="root")
19 | async def root(_: Request) -> RootResponse:
20 | return {
21 | "message": "Riven is running!",
22 | "version": settings_manager.settings.version,
23 | }
24 |
25 | app_router.include_router(default_router, dependencies=[Depends(resolve_api_key)])
26 | app_router.include_router(items_router, dependencies=[Depends(resolve_api_key)])
27 | app_router.include_router(scrape_router, dependencies=[Depends(resolve_api_key)])
28 | app_router.include_router(settings_router, dependencies=[Depends(resolve_api_key)])
29 | app_router.include_router(webhooks_router, dependencies=[Depends(resolve_api_key)])
30 | app_router.include_router(ws_router, dependencies=[Depends(resolve_ws_api_key)])
31 | app_router.include_router(stream_router, dependencies=[Depends(resolve_api_key)])
--------------------------------------------------------------------------------
/src/routers/models/overseerr.py:
--------------------------------------------------------------------------------
1 | from typing import Any, List, Literal, Optional
2 |
3 | from pydantic import BaseModel, field_validator
4 |
5 | MediaType = Literal["movie", "tv"]
6 |
7 |
8 | class Media(BaseModel):
9 | media_type: MediaType
10 | status: str
11 | imdbId: str | None = None
12 | tmdbId: int
13 | tvdbId: int | None = None
14 |
15 | @field_validator("imdbId", mode="after")
16 | @classmethod
17 | def stringify_imdb_id(cls, value: Any):
18 | if value and isinstance(value, int):
19 | return f"tt{int(value):07d}"
20 | return None
21 |
22 | @field_validator("tvdbId", "tmdbId", mode="before")
23 | @classmethod
24 | def validate_ids(cls, value: Any):
25 | if value and isinstance(value, str) and value != "":
26 | return int(value)
27 | return None
28 |
29 |
30 | class RequestInfo(BaseModel):
31 | request_id: str
32 | requestedBy_email: str
33 | requestedBy_username: str
34 | requestedBy_avatar: Optional[str]
35 |
36 | class IssueInfo(BaseModel):
37 | issue_id: str
38 | issue_type: str
39 | issue_status: str
40 | reportedBy_email: str
41 | reportedBy_username: str
42 | reportedBy_avatar: Optional[str]
43 |
44 | class CommentInfo(BaseModel):
45 | comment_message: str
46 | commentedBy_email: str
47 | commentedBy_username: str
48 | commentedBy_avatar: Optional[str]
49 |
50 | class OverseerrWebhook(BaseModel):
51 | notification_type: str
52 | event: str
53 | subject: str
54 | message: Optional[str] = None
55 | image: Optional[str] = None
56 | media: Media
57 | request: Optional[RequestInfo] = None
58 | issue: Optional[IssueInfo] = None
59 | comment: Optional[CommentInfo] = None
60 | extra: List[dict[str, Any]] = []
61 |
62 | @property
63 | def requested_seasons(self) -> Optional[List[int]]:
64 | for extra in self.extra:
65 | if extra["name"] == "Requested Seasons":
66 | return [int(x) for x in extra["value"].split(",")]
67 | return None
68 |
--------------------------------------------------------------------------------
/src/routers/models/plex.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class Account(BaseModel):
7 | id: int
8 | thumb: str
9 | title: str
10 |
11 | class Server(BaseModel):
12 | title: str
13 | uuid: str
14 |
15 | class Player(BaseModel):
16 | local: bool
17 | publicAddress: str
18 | title: str
19 | uuid: str
20 |
21 | class Metadata(BaseModel):
22 | librarySectionType: str
23 | ratingKey: str
24 | key: str
25 | guid: str
26 | type: str
27 | title: str
28 | librarySectionTitle: str
29 | librarySectionID: int
30 | librarySectionKey: str
31 | contentRating: str
32 | summary: str
33 | rating: Optional[float] = Field(None, description="Rating of the media")
34 | audienceRating: Optional[float] = Field(None, description="Audience rating of the media")
35 | year: int
36 | tagline: Optional[str] = Field(None, description="Tagline of the media")
37 | thumb: str
38 |
39 | class PlexPayload(BaseModel):
40 | event: str
41 | user: bool
42 | owner: bool
43 | Account: Account
44 | Server: Server
45 | Player: Player
46 | Metadata: Metadata
47 |
--------------------------------------------------------------------------------
/src/routers/models/shared.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 |
4 | class MessageResponse(BaseModel):
5 | message: str
6 |
7 | class RootResponse(MessageResponse):
8 | version: str
--------------------------------------------------------------------------------
/src/routers/secure/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/src/routers/secure/__init__.py
--------------------------------------------------------------------------------
/src/routers/secure/settings.py:
--------------------------------------------------------------------------------
1 | from copy import copy
2 | from typing import Any, Dict, List
3 |
4 | from fastapi import APIRouter, HTTPException
5 | from pydantic import BaseModel, ValidationError
6 |
7 | from program.settings.manager import settings_manager
8 | from program.settings.models import AppModel
9 |
10 | from ..models.shared import MessageResponse
11 |
12 |
13 | class SetSettings(BaseModel):
14 | key: str
15 | value: Any
16 |
17 |
18 | router = APIRouter(
19 | prefix="/settings",
20 | tags=["settings"],
21 | responses={404: {"description": "Not found"}},
22 | )
23 |
24 |
25 | @router.get("/schema", operation_id="get_settings_schema")
26 | async def get_settings_schema() -> dict[str, Any]:
27 | """
28 | Get the JSON schema for the settings.
29 | """
30 | return settings_manager.settings.model_json_schema()
31 |
32 | @router.get("/load", operation_id="load_settings")
33 | async def load_settings() -> MessageResponse:
34 | settings_manager.load()
35 | return {
36 | "message": "Settings loaded!",
37 | }
38 |
39 | @router.post("/save", operation_id="save_settings")
40 | async def save_settings() -> MessageResponse:
41 | settings_manager.save()
42 | return {
43 | "message": "Settings saved!",
44 | }
45 |
46 |
47 | @router.get("/get/all", operation_id="get_all_settings")
48 | async def get_all_settings() -> AppModel:
49 | return copy(settings_manager.settings)
50 |
51 |
52 | @router.get("/get/{paths}", operation_id="get_settings")
53 | async def get_settings(paths: str) -> dict[str, Any]:
54 | current_settings = settings_manager.settings.model_dump()
55 | data = {}
56 | for path in paths.split(","):
57 | keys = path.split(".")
58 | current_obj = current_settings
59 |
60 | for k in keys:
61 | if k not in current_obj:
62 | return None
63 | current_obj = current_obj[k]
64 |
65 | data[path] = current_obj
66 | return data
67 |
68 |
69 | @router.post("/set/all", operation_id="set_all_settings")
70 | async def set_all_settings(new_settings: Dict[str, Any]) -> MessageResponse:
71 | current_settings = settings_manager.settings.model_dump()
72 |
73 | def update_settings(current_obj, new_obj):
74 | for key, value in new_obj.items():
75 | if isinstance(value, dict) and key in current_obj:
76 | update_settings(current_obj[key], value)
77 | else:
78 | current_obj[key] = value
79 |
80 | update_settings(current_settings, new_settings)
81 |
82 | # Validate and save the updated settings
83 | try:
84 | updated_settings = settings_manager.settings.model_validate(current_settings)
85 | settings_manager.load(settings_dict=updated_settings.model_dump())
86 | settings_manager.save() # Ensure the changes are persisted
87 | except Exception as e:
88 | raise HTTPException(status_code=400, detail=str(e))
89 |
90 | return {
91 | "message": "All settings updated successfully!",
92 | }
93 |
94 | @router.post("/set", operation_id="set_settings")
95 | async def set_settings(settings: List[SetSettings]) -> MessageResponse:
96 | current_settings = settings_manager.settings.model_dump()
97 |
98 | for setting in settings:
99 | keys = setting.key.split(".")
100 | current_obj = current_settings
101 |
102 | # Navigate to the last key's parent object, ensuring all keys exist.
103 | for k in keys[:-1]:
104 | if k not in current_obj:
105 | raise HTTPException(
106 | status_code=400,
107 | detail=f"Path '{'.'.join(keys[:-1])}' does not exist.",
108 | )
109 | current_obj = current_obj[k]
110 |
111 | # Ensure the final key exists before setting the value.
112 | if keys[-1] in current_obj:
113 | current_obj[keys[-1]] = setting.value
114 | else:
115 | raise HTTPException(
116 | status_code=400,
117 | detail=f"Key '{keys[-1]}' does not exist in path '{'.'.join(keys[:-1])}'.",
118 | )
119 |
120 | # Validate and apply the updated settings to the AppModel instance
121 | try:
122 | updated_settings = settings_manager.settings.__class__(**current_settings)
123 | settings_manager.load(settings_dict=updated_settings.model_dump())
124 | settings_manager.save() # Ensure the changes are persisted
125 | except ValidationError as e:
126 | raise HTTPException from e(
127 | status_code=400,
128 | detail=f"Failed to update settings: {str(e)}",
129 | )
130 |
131 | return {"message": "Settings updated successfully."}
132 |
--------------------------------------------------------------------------------
/src/routers/secure/stream.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | from datetime import datetime
4 |
5 | from fastapi import APIRouter, Request
6 | from fastapi.responses import StreamingResponse
7 | from loguru import logger
8 | from pydantic import BaseModel
9 |
10 | from program.managers.sse_manager import sse_manager
11 |
12 | router = APIRouter(
13 | responses={404: {"description": "Not found"}},
14 | prefix="/stream",
15 | tags=["stream"],
16 | )
17 |
18 | class EventResponse(BaseModel):
19 | data: dict
20 |
21 | class SSELogHandler(logging.Handler):
22 | def emit(self, record: logging.LogRecord):
23 | log_entry = {
24 | "time": datetime.fromtimestamp(record.created).isoformat(),
25 | "level": record.levelname,
26 | "message": record.msg
27 | }
28 | sse_manager.publish_event("logging", json.dumps(log_entry))
29 |
30 | logger.add(SSELogHandler())
31 |
32 | @router.get("/event_types")
33 | async def get_event_types():
34 | return {"message": list(sse_manager.event_queues.keys())}
35 |
36 | @router.get("/{event_type}")
37 | async def stream_events(_: Request, event_type: str) -> EventResponse:
38 | return StreamingResponse(sse_manager.subscribe(event_type), media_type="text/event-stream")
--------------------------------------------------------------------------------
/src/routers/secure/webhooks.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import pydantic
4 | from fastapi import APIRouter, Request
5 | from kink import di
6 | from loguru import logger
7 | from requests import RequestException
8 |
9 | from program.apis.trakt_api import TraktAPI
10 | from program.media.item import MediaItem
11 | from program.services.content.overseerr import Overseerr
12 |
13 | from ..models.overseerr import OverseerrWebhook
14 |
15 | router = APIRouter(
16 | prefix="/webhook",
17 | responses={404: {"description": "Not found"}},
18 | )
19 |
20 |
21 | @router.post("/overseerr")
22 | async def overseerr(request: Request) -> Dict[str, Any]:
23 | """Webhook for Overseerr"""
24 | try:
25 | response = await request.json()
26 | if response.get("subject") == "Test Notification":
27 | logger.log("API", "Received test notification, Overseerr configured properly")
28 | return {"success": True}
29 | req = OverseerrWebhook.model_validate(response)
30 | except (Exception, pydantic.ValidationError) as e:
31 | logger.error(f"Failed to process request: {e}")
32 | return {"success": False, "message": str(e)}
33 |
34 | imdb_id = get_imdbid_from_overseerr(req)
35 | if not imdb_id:
36 | logger.error(f"Failed to get imdb_id from Overseerr: {req.media.tmdbId}")
37 | return {"success": False, "message": "Failed to get imdb_id from Overseerr"}
38 |
39 | overseerr: Overseerr = request.app.program.all_services[Overseerr]
40 | if not overseerr.initialized:
41 | logger.error("Overseerr not initialized")
42 | return {"success": False, "message": "Overseerr not initialized"}
43 |
44 | new_item = MediaItem({"imdb_id": imdb_id, "requested_by": "overseerr", "overseerr_id": req.request.request_id})
45 | request.app.program.em.add_item(new_item, service="Overseerr")
46 | return {"success": True}
47 |
48 |
49 | def get_imdbid_from_overseerr(req: OverseerrWebhook) -> str:
50 | """Get the imdb_id from the Overseerr webhook"""
51 | imdb_id = req.media.imdbId
52 | trakt_api = di[TraktAPI]
53 | if not imdb_id:
54 | try:
55 | _type = req.media.media_type
56 | if _type == "tv":
57 | _type = "show"
58 | imdb_id = trakt_api.get_imdbid_from_tmdb(str(req.media.tmdbId), type=_type)
59 | if not imdb_id or not imdb_id.startswith("tt"):
60 | imdb_id = trakt_api.get_imdbid_from_tvdb(str(req.media.tvdbId), type=_type)
61 | except RequestException:
62 | pass
63 | return imdb_id
--------------------------------------------------------------------------------
/src/routers/secure/ws.py:
--------------------------------------------------------------------------------
1 | from fastapi import WebSocket, WebSocketDisconnect, APIRouter
2 | import logging
3 | from datetime import datetime
4 | from program.managers.websocket_manager import manager
5 | import json
6 | from loguru import logger
7 |
8 | class WebSocketLogHandler(logging.Handler):
9 | def emit(self, record: logging.LogRecord):
10 | log_entry = {
11 | "time": datetime.fromtimestamp(record.created).isoformat(),
12 | "level": record.levelname,
13 | "message": record.msg
14 | }
15 | manager.publish("logging", json.dumps(log_entry))
16 |
17 | logger.add(WebSocketLogHandler())
18 |
19 | router = APIRouter(
20 | prefix="/ws",
21 | responses={404: {"description": "Not found"}},
22 | )
23 |
24 |
25 | @router.websocket("/{topic}")
26 | async def websocket_endpoint(websocket: WebSocket, topic: str):
27 | await manager.connect(websocket, topic)
28 | logger.info(f"Client connected to topic: {topic}")
29 | try:
30 | while True:
31 | data = await websocket.receive_text()
32 | try:
33 | parsed_data = json.loads(data)
34 | logger.debug(parsed_data)
35 | except json.JSONDecodeError:
36 | logger.error(f"Invalid JSON data received: {data}")
37 | continue
38 |
39 | except WebSocketDisconnect:
40 | logger.info(f"Client disconnected from topic: {topic}")
41 | await manager.disconnect(websocket, topic)
--------------------------------------------------------------------------------
/src/tests/test_cache.sqlite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rivenmedia/riven/d91dd254c08fbb410706d4fc6cb97f3691ebc67c/src/tests/test_cache.sqlite
--------------------------------------------------------------------------------
/src/tests/test_container.py:
--------------------------------------------------------------------------------
1 | # from queue import Queue
2 | # from unittest.mock import MagicMock
3 | #
4 | # import pytest
5 | # from program import Program
6 | # from program.media.item import Episode, Movie, Season, Show, States
7 | #
8 | #
9 | # @pytest.fixture
10 | # def test_show():
11 | # # Setup Show with a Season and an Episode
12 | # show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
13 | # season = Season({"number": 1})
14 | # episode1 = Episode({"number": 1})
15 | # episode2 = Episode({"number": 2})
16 | # season.add_episode(episode1)
17 | # season.add_episode(episode2)
18 | # show.add_season(season)
19 | # return show
20 | #
21 | # @pytest.fixture
22 | # def test_movie():
23 | # return Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"})
24 | #
25 | # @pytest.fixture
26 | # def program():
27 | # args = MagicMock()
28 | # program = Program(args)
29 | # program.event_queue = Queue() # Reset the event queue
30 | # program.media_items = MediaItemContainer()
31 | # return program
32 | #
33 | # def test_incomplete_items_retrieval(program, test_show):
34 | # program.media_items.upsert(test_show)
35 | # incomplete_items = program.media_items.get_incomplete_items()
36 | # assert len(incomplete_items) == len(program.media_items)
37 | # assert incomplete_items[next(iter(incomplete_items))].state == States.Unknown
38 | #
39 | # def test_upsert_show_with_season_and_episodes():
40 | # container = MediaItemContainer()
41 | # show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
42 | # season = Season({"number": 1})
43 | # episode1 = Episode({"number": 1})
44 | # episode2 = Episode({"number": 2})
45 | # season.add_episode(episode1)
46 | # season.add_episode(episode2)
47 | # show.add_season(season)
48 | #
49 | # container.upsert(show)
50 | #
51 | # assert len(container._shows) == 1
52 | # assert len(container._seasons) == 1
53 | # assert len(container._episodes) == 2
54 | # assert len(container._items) == 4
55 | #
56 | # def test_remove_show_with_season_and_episodes():
57 | # container = MediaItemContainer()
58 | # show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
59 | # season = Season({"number": 1})
60 | # episode1 = Episode({"number": 1})
61 | # episode2 = Episode({"number": 2})
62 | # season.add_episode(episode1)
63 | # season.add_episode(episode2)
64 | # show.add_season(season)
65 | #
66 | # container.upsert(show)
67 | # container.remove(show)
68 | #
69 | # assert len(container._shows) == 1
70 | # assert len(container._seasons) == 1
71 | # assert len(container._episodes) == 2
72 | # assert len(container._items) == 1
73 | #
74 | # def test_merge_items():
75 | # container = MediaItemContainer()
76 | # show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
77 | # season = Season({"number": 1})
78 | # episode1 = Episode({"number": 1})
79 | # episode2 = Episode({"number": 2})
80 | # season.add_episode(episode1)
81 | # season.add_episode(episode2)
82 | # show.add_season(season)
83 | # container.upsert(show)
84 | #
85 | # new_show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
86 | # new_season = Season({"number": 1})
87 | # new_episode = Episode({"number": 3})
88 | # new_season.add_episode(new_episode)
89 | # new_show.add_season(new_season)
90 | # container.upsert(new_show)
91 | #
92 | # assert len(container._items) == 5, "Items should be merged"
93 | # assert len(container._shows) == 1, "Shows should be merged"
94 | # assert len(container._seasons) == 1, "Seasons should be merged"
95 | #
96 | # def test_upsert_movie():
97 | # container = MediaItemContainer()
98 | # movie = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"})
99 | # container.upsert(movie)
100 | #
101 | # assert len(container._movies) == 1
102 | # assert len(container._items) == 1
103 | #
104 | # def test_save_and_load_container(tmpdir):
105 | # container = MediaItemContainer()
106 | # show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
107 | # season = Season({"number": 1})
108 | # episode1 = Episode({"number": 1})
109 | # episode2 = Episode({"number": 2})
110 | # season.add_episode(episode1)
111 | # season.add_episode(episode2)
112 | # show.add_season(season)
113 | # container.upsert(show)
114 | #
115 | # filepath = tmpdir.join("container.pkl")
116 | # container.save(str(filepath))
117 | #
118 | # new_container = MediaItemContainer()
119 | # new_container.load(str(filepath))
120 | #
121 | # assert len(new_container._shows) == 1
122 | # assert len(new_container._seasons) == 1
123 | # assert len(new_container._episodes) == 2
124 | # assert len(new_container._items) == 4
125 | #
126 | # def test_get_missing_items():
127 | # container = MediaItemContainer()
128 | # show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
129 | # season = Season({"number": 1})
130 | # episode1 = Episode({"number": 1})
131 | # episode2 = Episode({"number": 2})
132 | # season.add_episode(episode1)
133 | # season.add_episode(episode2)
134 | # show.add_season(season)
135 | # container.upsert(show)
136 | #
137 | # missing_items = container.get_incomplete_items()
138 | #
139 | # assert len(missing_items) == 4
140 | # assert missing_items[next(iter(missing_items))].state == States.Unknown
141 | # assert missing_items[next(iter(missing_items))].imdb_id == "tt1405406"
142 | # assert missing_items[next(iter(missing_items))].title == "Test Show"
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_delete.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "message": "Magnet was successfully deleted"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_instant.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "magnets": [
5 | {
6 | "magnet": "3648baf850d5930510c1f172b534200ebb5496e6",
7 | "hash": "3648baf850d5930510c1f172b534200ebb5496e6",
8 | "instant": true,
9 | "files": [
10 | {"n": "ubuntu-24.04-desktop-amd64.iso", "s": 6114656256},
11 | {"n": "ubuntu-24.04-live-server-amd64.iso", "s": 2754981888}
12 | ]
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_instant_unavailable.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "magnets": [
5 | {
6 | "magnet": "3648baf850d5930510c1f172b534200ebb5496e6",
7 | "hash": "3648baf850d5930510c1f172b534200ebb5496e6",
8 | "instant": false
9 | }
10 | ]
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_status_one_downloading.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "magnets": {
5 | "id": 251993753,
6 | "filename": "Ubuntu 24.04",
7 | "size": 8869638144,
8 | "hash": "3648baf850d5930510c1f172b534200ebb5496e6",
9 | "status": "Downloading",
10 | "statusCode": 1,
11 | "downloaded": 165063971,
12 | "uploaded": 0,
13 | "seeders": 6,
14 | "downloadSpeed": 4782727,
15 | "processingPerc": 0,
16 | "uploadSpeed": 0,
17 | "uploadDate": 1727454272,
18 | "completionDate": 0,
19 | "links": [],
20 | "type": "m",
21 | "notified": false,
22 | "version": 2
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_status_one_ready.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "magnets": {
5 | "id": 251993753,
6 | "filename": "Ubuntu 24.04",
7 | "size": 8869638144,
8 | "hash": "3648baf850d5930510c1f172b534200ebb5496e6",
9 | "status": "Ready",
10 | "statusCode": 4,
11 | "downloaded": 8869638144,
12 | "uploaded": 0,
13 | "seeders": 0,
14 | "downloadSpeed": 0,
15 | "processingPerc": 0,
16 | "uploadSpeed": 0,
17 | "uploadDate": 1727454272,
18 | "completionDate": 1727454803,
19 | "links": [
20 | {
21 | "filename": "ubuntu-24.04-desktop-amd64.iso",
22 | "size": 6114656256,
23 | "files": [{"n": "ubuntu-24.04-desktop-amd64.iso", "s": 6114656256}],
24 | "link": "https://alldebrid.com/f/REDACTED"
25 | },
26 | {
27 | "filename": "ubuntu-24.04-live-server-amd64.iso",
28 | "size": 2754981888,
29 | "files": [
30 | {"n": "ubuntu-24.04-live-server-amd64.iso", "s": 2754981888}
31 | ],
32 | "link": "https://alldebrid.com/f/REDACTED"
33 | }
34 | ],
35 | "type": "m",
36 | "notified": false,
37 | "version": 2
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_upload_not_ready.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "magnets": [
5 | {
6 | "magnet": "magnet:?xt=urn:btih:3648baf850d5930510c1f172b534200ebb5496e6",
7 | "hash": "3648baf850d5930510c1f172b534200ebb5496e6",
8 | "name": "noname",
9 | "filename_original": "",
10 | "size": 0,
11 | "ready": false,
12 | "id": 251993753
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/tests/test_data/alldebrid_magnet_upload_ready.json:
--------------------------------------------------------------------------------
1 | {
2 | "status": "success",
3 | "data": {
4 | "magnets": [
5 | {
6 | "magnet": "magnet:?xt=urn:btih:3648baf850d5930510c1f172b534200ebb5496e6",
7 | "hash": "3648baf850d5930510c1f172b534200ebb5496e6",
8 | "name": "Ubuntu 24.04",
9 | "filename_original": "",
10 | "size": 8869638144,
11 | "ready": true,
12 | "id": 251993753
13 | }
14 | ]
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/tests/test_debrid_matching.py:
--------------------------------------------------------------------------------
1 | from types import SimpleNamespace
2 |
3 | from program.media.item import Episode, Movie, Season, Show
4 | from program.services.downloaders.realdebrid import RealDebridDownloader
5 |
6 | realdebrid_downloader = RealDebridDownloader()
7 |
8 | def test_matches_item_movie():
9 | torrent_info = SimpleNamespace(
10 | files=[
11 | SimpleNamespace(path="Inception.mkv", selected=1, bytes=2_000_000_000),
12 | ]
13 | )
14 | item = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"})
15 | assert realdebrid_downloader._matches_item(torrent_info, item) is True
16 |
17 | def test_matches_item_episode():
18 | torrent_info = SimpleNamespace(
19 | files=[
20 | SimpleNamespace(path="The Vampire Diaries s01e01.mkv", selected=1, bytes=800_000_000),
21 | ]
22 | )
23 | parent_show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "The Vampire Diaries"})
24 | parent_season = Season({"number": 1})
25 | episode = Episode({"number": 1})
26 | parent_season.add_episode(episode)
27 | parent_show.add_season(parent_season)
28 | episode.parent = parent_season
29 | parent_season.parent = parent_show
30 |
31 | assert realdebrid_downloader._matches_item(torrent_info, episode) is True
32 |
33 | def test_matches_item_season():
34 | torrent_info = SimpleNamespace(
35 | files=[
36 | SimpleNamespace(path="The Vampire Diaries s01e01.mkv", selected=1, bytes=800_000_000),
37 | SimpleNamespace(path="The Vampire Diaries s01e02.mkv", selected=1, bytes=800_000_000),
38 | ]
39 | )
40 | show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "The Vampire Diaries"})
41 | season = Season({"number": 1})
42 | episode1 = Episode({"number": 1})
43 | episode2 = Episode({"number": 2})
44 | season.add_episode(episode1)
45 | season.add_episode(episode2)
46 | show.add_season(season)
47 |
48 | assert realdebrid_downloader._matches_item(torrent_info, season) is True
49 |
50 | def test_matches_item_partial_season():
51 | torrent_info = SimpleNamespace(
52 | files=[
53 | SimpleNamespace(path="show_s01e01.mkv", selected=1, bytes=800_000_000),
54 | ]
55 | )
56 | show = Show({"imdb_id": "tt1405406", "requested_by": "user", "title": "Test Show"})
57 | season = Season({"number": 1})
58 | episode1 = Episode({"number": 1})
59 | episode2 = Episode({"number": 2})
60 | season.add_episode(episode1)
61 | season.add_episode(episode2)
62 | show.add_season(season)
63 |
64 | assert realdebrid_downloader._matches_item(torrent_info, season) is False
65 |
66 | def test_matches_item_no_files():
67 | torrent_info = SimpleNamespace()
68 | item = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"})
69 | assert realdebrid_downloader._matches_item(torrent_info, item) is False
70 |
71 | def test_matches_item_no_selected_files():
72 | torrent_info = SimpleNamespace(
73 | files=[
74 | SimpleNamespace(path="movie.mp4", selected=0, bytes=2_000_000_000),
75 | ]
76 | )
77 | item = Movie({"imdb_id": "tt1375666", "requested_by": "user", "title": "Inception"})
78 | assert realdebrid_downloader._matches_item(torrent_info, item) is False
--------------------------------------------------------------------------------
/src/tests/test_ranking.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from RTN import RTN, DefaultRanking, SettingsModel, Torrent
3 |
4 |
5 | @pytest.fixture
6 | def settings_model():
7 | return SettingsModel()
8 |
9 | @pytest.fixture
10 | def ranking_model():
11 | return DefaultRanking()
12 |
13 | # basic implementation for testing ranking
14 | def test_manual_fetch_check_from_user(settings_model, ranking_model):
15 | rtn = RTN(settings_model, ranking_model, lev_threshold=0.9)
16 |
17 | item: Torrent = rtn.rank(
18 | "Swamp People Serpent Invasion S03E05 720p WEB h264-KOGi[eztv re] mkv",
19 | "c08a9ee8ce3a5c2c08865e2b05406273cabc97e7",
20 | correct_title="Swamp People",
21 | remove_trash=False,
22 | threshold=0.9
23 | )
24 |
25 | assert item.fetch is True, "Fetch should be True"
26 | assert item.lev_ratio > 0, "Levenshtein ratio should be greater than 0"
--------------------------------------------------------------------------------
/src/tests/test_settings_migration.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from pathlib import Path
4 |
5 | from program.settings.manager import SettingsManager
6 |
7 | TEST_VERSION = "9.9.9"
8 | DATA_PATH = Path(os.curdir) / "data"
9 |
10 | # Sample old settings data
11 | old_settings_data = {
12 | "version": "0.7.5",
13 | "debug": True,
14 | "log": True,
15 | "force_refresh": False,
16 | "map_metadata": True,
17 | "tracemalloc": False,
18 | "downloaders": {
19 | # "movie_filesize_min": 200,
20 | # "movie_filesize_max": -1,
21 | # "episode_filesize_min": 40,
22 | # "episode_filesize_max": -1,
23 | "real_debrid": {
24 | "enabled": False,
25 | "api_key": "",
26 | "proxy_enabled": False,
27 | "proxy_url": ""
28 | },
29 | "all_debrid": {
30 | "enabled": True,
31 | "api_key": "12345678",
32 | "proxy_enabled": False,
33 | "proxy_url": "https://no_proxy.com"
34 | },
35 | },
36 | }
37 |
38 |
39 | def test_load_and_migrate_settings():
40 | temp_settings_file = Path.joinpath(DATA_PATH, "settings.json")
41 | version_file = Path.joinpath(DATA_PATH, "VERSION")
42 |
43 | try:
44 | temp_settings_file.write_text(json.dumps(old_settings_data))
45 | version_file.write_text("9.9.9")
46 |
47 | import program.settings.models
48 | program.settings.manager.data_dir_path = DATA_PATH
49 | program.settings.models.version_file_path = version_file
50 | settings_manager = SettingsManager()
51 |
52 | assert settings_manager.settings.debug is True
53 | assert settings_manager.settings.log is True
54 | assert settings_manager.settings.force_refresh is False
55 | assert settings_manager.settings.map_metadata is True
56 | assert settings_manager.settings.tracemalloc is False
57 | # assert settings_manager.settings.downloaders.movie_filesize_min == 200
58 | assert settings_manager.settings.downloaders.real_debrid.enabled is False
59 | assert settings_manager.settings.downloaders.all_debrid.enabled is True
60 | assert settings_manager.settings.downloaders.all_debrid.api_key == "12345678"
61 | assert settings_manager.settings.downloaders.all_debrid.proxy_url == "https://no_proxy.com"
62 | assert settings_manager.settings.database.host == "postgresql+psycopg2://postgres:postgres@localhost/riven"
63 | assert settings_manager.settings.version == TEST_VERSION
64 | finally:
65 | temp_settings_file.unlink()
66 | version_file.unlink()
--------------------------------------------------------------------------------
/src/tests/test_symlink_library.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pytest
4 | from pyfakefs.fake_filesystem_unittest import Patcher
5 |
6 | from program.media.item import Episode, Movie, Season, Show
7 | from program.media.state import States
8 | from program.services.libraries.symlink import SymlinkLibrary
9 | from program.settings.manager import settings_manager
10 |
11 |
12 | class MockSettings:
13 | def __init__(self, library_path):
14 | self.force_refresh = False
15 | self.symlink = type("symlink", (), {
16 | "library_path": Path(library_path),
17 | "separate_anime_dirs": True,
18 | })
19 |
20 | @pytest.fixture
21 | def symlink_library(fs):
22 | library_path = "/fake/library"
23 | fs.create_dir(f"{library_path}/movies")
24 | fs.create_dir(f"{library_path}/shows")
25 | fs.create_dir(f"{library_path}/anime_movies")
26 | fs.create_dir(f"{library_path}/anime_shows")
27 | settings_manager.settings = MockSettings(library_path)
28 | return SymlinkLibrary()
29 |
30 |
31 | def test_valid_library_structure(symlink_library):
32 | assert symlink_library.initialized, "Library should be initialized successfully."
33 |
34 |
35 | def test_invalid_library_structure(fs):
36 | incorrect_path = "/invalid/library"
37 | fs.create_dir(incorrect_path)
38 | settings_manager.settings = MockSettings(incorrect_path)
39 | library = SymlinkLibrary()
40 | assert not library.initialized, "Library should fail initialization with incorrect structure."
41 |
42 |
43 | def test_movie_detection(symlink_library):
44 | with Patcher() as patcher:
45 | fs = patcher.fs
46 | movie_path = "/fake/library/movies"
47 | fs.create_file(f"{movie_path}/Top Gun (1986) tt0092099.mkv")
48 | fs.create_file(f"{movie_path}/The Matrix (1999) tt0133093.mkv")
49 | fs.create_file(f"{movie_path}/The Matrix Reloaded (2003) tt0234215.mkv")
50 |
51 | movies = list(symlink_library.run())
52 | assert len(movies) == 3, "Should detect 3 movies."
53 | assert all(isinstance(movie, Movie) for movie in movies), "Detected objects should be of type Movie."
54 | assert all(movie.state == States.Completed for movie in movies), "Detected objects should be in the Completed state."
55 |
56 |
57 | def test_show_detection(symlink_library, fs):
58 | shows_path = "/fake/library/shows"
59 | fs.create_dir(f"{shows_path}/Vikings (2013) tt2306299/Season 01")
60 | fs.create_file(f"{shows_path}/Vikings (2013) tt2306299/Season 01/Vikings (2013) - s01e01 - Rites of Passage.mkv")
61 | fs.create_file(f"{shows_path}/Vikings (2013) tt2306299/Season 01/Vikings (2013) - s01e02 - Wrath of the Northmen.mkv")
62 | fs.create_dir(f"{shows_path}/The Mandalorian (2019) tt8111088/Season 01")
63 | fs.create_file(f"{shows_path}/The Mandalorian (2019) tt8111088/Season 01/The Mandalorian (2019) - s01e01 - Chapter 1.mkv")
64 | fs.create_file(f"{shows_path}/The Mandalorian (2019) tt8111088/Season 01/The Mandalorian (2019) - s01e02 - Chapter 2.mkv")
65 |
66 | shows = list(symlink_library.run())
67 | assert len(shows) == 2, "Should detect 2 shows."
68 | assert all(isinstance(show, Show) for show in shows), "Detected objects should be of type Show."
69 | assert all(season.state == States.Completed for show in shows for season in show.seasons), "Detected seasons should be in the Completed state."
70 |
71 |
72 | def test_season_detection(symlink_library, fs):
73 | shows_path = "/fake/library/shows"
74 | fs.create_dir(f"{shows_path}/Vikings (2013) tt2306299/Season 01")
75 | fs.create_file(f"{shows_path}/Vikings (2013) tt2306299/Season 01/Vikings (2013) - s01e01 - Rites of Passage.mkv")
76 |
77 | shows = list(symlink_library.run())
78 | assert len(shows[0].seasons) == 1, "Should detect one season."
79 | assert all(isinstance(season, Season) for season in shows[0].seasons), "Detected objects should be of type Season."
80 | assert all(season.state == States.Completed for season in shows[0].seasons), "Detected objects should be in the Completed state."
81 |
82 |
83 | def test_episode_detection(symlink_library, fs):
84 | shows_path = "/fake/library/shows"
85 | fs.create_dir(f"{shows_path}/Vikings (2013) tt2306299/Season 01")
86 | fs.create_file(f"{shows_path}/Vikings (2013) tt2306299/Season 01/Vikings (2013) - s01e01 - Rites of Passage.mkv")
87 |
88 | shows = list(symlink_library.run())
89 | assert len(shows[0].seasons[0].episodes) == 1, "Should detect one episode."
90 | assert all(isinstance(episode, Episode) for episode in shows[0].seasons[0].episodes), "Detected objects should be of type Episode."
91 | assert all(episode.state == States.Completed for episode in shows[0].seasons[0].episodes), "Detected objects should be in the Completed state."
92 |
93 |
94 | def test_media_item_creation(symlink_library, fs):
95 | fs.create_file("/fake/library/movies/Top Gun (1986) tt0092099.mkv")
96 | items = list(symlink_library.run())
97 | assert len(items) == 1, "Should create one media item."
98 | assert items[0].imdb_id == "tt0092099", "Media item should have the correct IMDb ID."
99 | assert isinstance(items[0], Movie), "The created item should be a Movie."
100 | assert items[0].state == States.Completed, "The created item should be in the Completed state."
101 |
--------------------------------------------------------------------------------