├── .github ├── FUNDING.yml └── workflows │ ├── push_to_docker.yml │ └── pytest.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── __init__.py ├── config.yaml.example ├── docker-compose.yml ├── main.py ├── plugins ├── README.md ├── __init__.py ├── arr.py ├── bfi.py ├── criterion.py ├── imdb_chart.py ├── imdb_list.py ├── jellyfin_api.py ├── letterboxd.py ├── listmania.py ├── mdblist.py ├── popular_movies.py ├── trakt.py └── tspdt.py ├── requirements.txt ├── tests └── test_plugins │ └── test_letterboxd.py └── utils ├── __init__.py ├── base_plugin.py ├── jellyfin.py ├── jellyseerr.py └── poster_generation.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: ghomasHudson 2 | -------------------------------------------------------------------------------- /.github/workflows/push_to_docker.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker image 2 | on: 3 | push: 4 | branches: [master] 5 | jobs: 6 | multi_push_to_registry: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Checkout 10 | uses: actions/checkout@v2 11 | - name: Set up QEMU 12 | uses: docker/setup-qemu-action@v1 13 | - name: Set up Docker Buildx 14 | uses: docker/setup-buildx-action@v1 15 | - name: Login to GitHub Container Registry 16 | uses: docker/login-action@v1 17 | with: 18 | registry: ghcr.io 19 | username: ${{ github.repository_owner }} 20 | password: ${{ secrets.GITHUB_TOKEN }} 21 | - name: set lower case owner name 22 | run: | 23 | echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV} 24 | env: 25 | OWNER: '${{ github.repository_owner }}' 26 | - name: Build and push 27 | uses: docker/build-push-action@v2 28 | with: 29 | context: . 30 | file: ./Dockerfile 31 | platforms: linux/amd64, linux/arm64 32 | push: true 33 | tags: | 34 | ghcr.io/${{ env.OWNER_LC }}/jellyfin-auto-collections:latest 35 | ghcr.io/${{ env.OWNER_LC }}/jellyfin-auto-collections:${{ github.sha }} 36 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: Run Pytest 2 | on: 3 | push: 4 | branches: [master] 5 | pull_request: 6 | branches: [master] 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | strategy: 11 | matrix: 12 | python-version: ["3.12"] 13 | steps: 14 | - uses: actions/checkout@v3 15 | - name: Set up Python ${{ matrix.python-version }} 16 | uses: actions/setup-python@v4 17 | with: 18 | python-version: ${{ matrix.python-version }} 19 | - name: Install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi; 23 | pip install pytest pytest-cov 24 | - name: Lint with Ruff 25 | run: | 26 | pip install ruff 27 | ruff --format=github --target-version=py310 . 28 | continue-on-error: true 29 | - name: Test with pytest 30 | run: | 31 | coverage run -m pytest -v -s 32 | - name: Generate Coverage Report 33 | run: | 34 | coverage report -m 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | config.yaml 3 | __pycache__ 4 | .trakt_access_token 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-alpine as base 2 | 3 | LABEL org.opencontainers.image.source https://github.com/ghomasHudson/jellyfin-auto-collections 4 | 5 | ENV RUNNING_IN_DOCKER true 6 | 7 | RUN apk update 8 | FROM base as build 9 | 10 | WORKDIR /app 11 | COPY . . 12 | RUN pip install -r requirements.txt 13 | 14 | FROM build as final 15 | 16 | WORKDIR /app 17 | COPY --from=build /app /app 18 | 19 | VOLUME [ "/app/config" ] 20 | 21 | ENTRYPOINT [ "python3.10", "-u", "main.py", "--config", "/app/config/config.yaml" ] 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Thomas Hudson 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Jellyfin Auto Collections 2 | 3 | A tool to automatically make and update [jellyfin](https://jellyfin.org) collections based on internet lists such as IMDb and letterboxd. Also syncs with [Overseerr](https://overseerr.dev/)/[Jellyseerr](https://github.com/Fallenbagel/jellyseerr). 4 | 5 | ``` 6 | Getting collections list... 7 | 8 | found /r/TrueFilm Canon (1000 films) 015dee24c79dacfa80300afb7577fc37 9 | ************************************************ 10 | 11 | Can't find A Trip to the Moon 12 | Can't find The Birth of a Nation 13 | Can't find Intolerance: Love's Struggle Throughout the Ages 14 | Can't find A Man There Was 15 | Can't find The Cabinet of Dr. Caligari 16 | Added Big Buck Bunny cc561c8b1d5da3a080cdb61ebe44d1a7 17 | Added Big Buck Bunny 2 0515533b716e8fe76d3b630f9b9b6d51 18 | Can't find Nosferatu 19 | Can't find Dr. Mabuse, the Gambler 20 | Can't find Häxan 21 | Added Big Buck Bunny 3 9a6b8002ef8f12a0611e92f5104d8b8e 22 | Can't find Sherlock, Jr. 23 | Can't find Greed 24 | Can't find The Last Laugh 25 | Can't find Battleship Potemkin 26 | Added Big Buck Bunny 5 98690cc73413b12593988687ee737a27 27 | Can't find Ménilmontant 28 | ... 29 | ``` 30 | 31 | ![pic-selected-220609-1405-13](https://user-images.githubusercontent.com/13795113/172853971-8b5ab33b-58a9-4073-8a28-c471e9710cdc.png) 32 | 33 | ## Supported List Sources 34 | 35 | - IMDB Charts - e.g. [Top 250 Movies](https://imdb.com/chart/top), [Top Box Office](https://imdb.com/chart/boxoffice) 36 | - IMDB Lists - e.g. [Top 100 Greatest Movie of All time](https://imdb.com/list/ls055592025) 37 | - Letterboxd - e.g. [Movies everyone should watch at least once...](https://letterboxd.com/fcbarcelona/list/movies-everyone-should-watch-at-least-once) 38 | - mdblist - e.g. [Top Movies of the week](https://mdblist.com/lists/garycrawfordgc/top-movies-of-the-week) 39 | - They Shoot Pictures, Don't They - [The 1,000 Greatest Films](https://www.theyshootpictures.com/gf1000_all1000films_table.php) 40 | - Trakt - e.g. [Popular Movies](https://trakt.tv/movies/popular). See the [Wiki](https://github.com/ghomasHudson/Jellyfin-Auto-Collections/wiki/Plugin-%E2%80%90-Trakt) for instructions. 41 | - [Steven Lu Popular movies](https://github.com/sjlu/popular-movies) 42 | - [The Criterion Channel](https://www.criterionchannel.com/new-collections) 43 | - [Listmania](https://www.listmania.org) 44 | - [BFI](https://www.bfi.org.uk/articles/type/lists) 45 | - Jellyfin API Queries - Make lists which match a particular filter from the [Jellyfin API](https://api.jellyfin.org/). See the [Wiki](https://github.com/ghomasHudson/Jellyfin-Auto-Collections/wiki/Plugin-%E2%80%90-Jellyfin-API) for some usage examples. 46 | - Radarr/Sonarr - Make collections from your *arr tags. 47 | 48 | Please feel free to send pull requests with more! 49 | 50 | ## Usage 51 | 52 | First, copy `config.yaml.example` to `config.yaml` and change the values for your specific jellyfin instance. 53 | 54 | ### Bare Metal 55 | 56 | Make sure you have Python 3 and pip installed. 57 | 58 | Install the requirements with `pip install -r requirements.txt`. 59 | 60 | Then run `python main.py`. 61 | 62 | ### Docker 63 | 64 | The easiest way to get going is to use the provided `docker-compose.yml` configuration. Whatever directory you end up mapping to the `/app/config` directory needs to contain your updated `config.yaml` file: 65 | 66 | ```yaml 67 | services: 68 | jellyfin-auto-collections: 69 | image: ghcr.io/ghomashudson/jellyfin-auto-collections:latest 70 | container_name: jellyfin-auto-collections 71 | environment: 72 | - CRONTAB=0 0 * * * 73 | - TZ=America/New_York 74 | - JELLYFIN_SERVER_URL=https://www.jellyfin.example.com 75 | - JELLYFIN_API_KEY=1a1111aa1a1a1aaaa11a11aa111aaa11 76 | - JELLYFIN_USER_ID=2b2222bb2b2b2bbbb22b22bb222bbb22 77 | volumes: 78 | - ${CONFIG_DIR}/jellyfin-auto-collections/config:/app/config 79 | ``` 80 | 81 | 82 | #### Configuration Options 83 | 84 | | Environment Variable | Description | 85 | | ------------------------------ | ------------------------------------------------------------------------------------------------------------ | 86 | | JELLYFIN_SERVER_URL | The URL of your Jellyfin instance | 87 | | JELLYFIN_API_KEY | Generated API Key | 88 | | JELLYFIN_USER_ID | UserID from the URL of your Profile in Jellyfin | 89 | | CRONTAB | The interval the scripts will be run on in crontab syntax. Blank to disable scheduling (make sure you're not using the docker [restart policy](https://docs.docker.com/engine/containers/start-containers-automatically/)). | 90 | | TZ | Timezone the interval will be run in. No effect if scheduling is disabled. | 91 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ghomasHudson/Jellyfin-Auto-Collections/2d3face2a44a4381cd71e18ab420f09be74c3e7c/__init__.py -------------------------------------------------------------------------------- /config.yaml.example: -------------------------------------------------------------------------------- 1 | # Sample Config file 2 | # 3 | # Note: the !ENV ${VAR:default} syntax is some syntactic sugar used by https://pypi.org/project/pyaml-env/ 4 | # This allows variables to be passed through either in this config file or as environment variables 5 | # 6 | crontab: !ENV ${CRONTAB} # If set, this runs the script on a schedule. Should be in crontab format e.g. `0 0 5 * *` 7 | timezone: !ENV ${TZ} # Timezone the crontab operates on. 8 | jellyfin: 9 | server_url: !ENV ${JELLYFIN_SERVER_URL:https://www.jellyfin.example.com} 10 | api_key: !ENV ${JELLYFIN_API_KEY:1a1111aa1a1a1aaaa11a11aa111aaa11} # Create an API key by going to: Admin>Dashboard>Advanced>API Keys 11 | user_id: !ENV ${JELLYFIN_USER_ID:111111111111aaaaaaaa11111111111a} #ID of your jellyfin user. Found in the URL when you navigate to your user in the Dashboard. 12 | 13 | # jellyseerr: 14 | # server_url: https://jellyseerr.example.com 15 | # email: collections@example.com 16 | # password: mypassword 17 | # user_type: local 18 | 19 | plugins: 20 | imdb_chart: 21 | enabled: true 22 | list_ids: 23 | - top 24 | - boxoffice 25 | - moviemeter 26 | - tvmeter 27 | clear_collection: true # If set, this empties out the collection before re-adding. Useful for lists which change often. 28 | imdb_list: 29 | enabled: true 30 | list_ids: 31 | - ls055592025 32 | - ls068305490 33 | - ls087301829 34 | letterboxd: 35 | enabled: true 36 | imdb_id_filter: true # Uses the imdb id for better matching. This does slow the script down though! 37 | list_ids: 38 | - fcbarcelona/list/movies-everyone-should-watch-at-least-once 39 | - dave/list/official-top-250-narrative-feature-films 40 | mdblist: 41 | enabled: false 42 | list_ids: 43 | - hdlists/crazy-plot-twists 44 | tspdt: 45 | enabled: true 46 | list_ids: 47 | - 1000-greatest-films 48 | trakt: 49 | enabled: false 50 | list_ids: 51 | - "movies/boxoffice" 52 | - "shows/popular" 53 | - walt-disney-animated-feature-films 54 | - "20124699" # Custom list ID for a user's list. Peek at the HTML source of the list page to find 55 | client_id: aaaaaaa111111111 # Trakt API client ID. Create an app at https://trakt.tv/oauth/applications/new and copy the client ID 56 | client_secret: aaaaaaa111111111 # Trakt API client secret. Create an app at https://trakt.tv/oauth/applications/new and copy the client secret 57 | arr: 58 | enabled: false 59 | server_configs: 60 | - base_url: https://radarr.example.com 61 | api_key: aaaaaaaaaaaaaaaa1111111111111111 62 | - base_url: https://sonarr.example.com 63 | api_key: aaaaaaaaaaaaaaaa1111111111111111 64 | list_ids: 65 | - my_tag 66 | jellyfin_api: 67 | enabled: false 68 | list_ids: 69 | - minCriticRating: ["9"] 70 | limit: ["3"] 71 | sortBy: ["Random"] 72 | includeItemTypes: ["Movie"] 73 | list_name: "Random Highly-Rated Movies" # Optional human-readable collection name 74 | list_desc: "A random selection of highly popular movies" 75 | - genres: ["Mystery"] 76 | popular_movies: # popular-movies-data.stevenlu.com 77 | enabled: false 78 | list_ids: 79 | - movies 80 | criterion_channel: 81 | enabled: false 82 | list_ids: 83 | - hong-kong-hits 84 | - british-new-wave 85 | listmania: 86 | enabled: false 87 | list_ids: 88 | - wonderful-movies-you-might-have-missed 89 | - 40-must-have-mafiacrime-movies 90 | bfi: 91 | enabled: true 92 | list_ids: 93 | - 10-great-films-featuring-dual-performances 94 | - 5-things-watch-this-weekend-24-26-january-2025 95 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | jellyfin-auto-collections: 3 | #build: . 4 | image: ghcr.io/ghomashudson/jellyfin-auto-collections:latest 5 | container_name: jellyfin-auto-collections 6 | environment: 7 | - CRONTAB=0 0 * * * 8 | - TZ=America/New_York 9 | - JELLYFIN_SERVER_URL=https://www.jellyfin.example.com 10 | - JELLYFIN_API_KEY=1a1111aa1a1a1aaaa11a11aa111aaa11 11 | - JELLYFIN_USER_ID=2b2222bb2b2b2bbbb22b22bb222bbb22 12 | volumes: 13 | - ${CONFIG_DIR}/jellyfin-auto-collections/config:/app/config 14 | restart: unless-stopped 15 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | from utils.jellyfin import JellyfinClient 3 | from utils.jellyseerr import JellyseerrClient 4 | import pluginlib 5 | from loguru import logger 6 | from pyaml_env import parse_config 7 | import os 8 | import sys 9 | 10 | from apscheduler.schedulers.blocking import BlockingScheduler 11 | from apscheduler.triggers.cron import CronTrigger 12 | 13 | import argparse 14 | parser = argparse.ArgumentParser(description='Jellyfin List Scraper') 15 | parser.add_argument('--config', type=str, help='Path to config file', default='config.yaml') 16 | args = parser.parse_args() 17 | 18 | # Set logging level 19 | log_level = os.getenv("LOG_LEVEL", "INFO").upper() 20 | # Configure Loguru logger 21 | logger.remove() # Remove default configuration 22 | logger.add(sys.stderr, level=log_level) 23 | 24 | # Load config 25 | if not os.path.exists(args.config): 26 | logger.error(f"{args.config} does not exist.") 27 | logger.error(f"Copy config.yaml.example to {args.config} and add your jellyfin config.") 28 | raise Exception("No config file found.") 29 | config = parse_config(args.config, default_value=None) 30 | 31 | def main(config): 32 | # Setup jellyfin connection 33 | jf_client = JellyfinClient( 34 | server_url=config['jellyfin']['server_url'], 35 | api_key=config['jellyfin']['api_key'], 36 | user_id=config['jellyfin']['user_id'] 37 | ) 38 | 39 | if "jellyseerr" in config: 40 | js_client = JellyseerrClient( 41 | server_url=config['jellyseerr']['server_url'], 42 | api_key=config['jellyseerr'].get('api_key', None), 43 | email=config['jellyseerr'].get('email', None), 44 | password=str(config['jellyseerr'].get('password', None)), 45 | user_type=str(config['jellyseerr'].get('user_type', "local")) 46 | ) 47 | else: 48 | js_client = None 49 | 50 | # Load plugins 51 | loader = pluginlib.PluginLoader(modules=['plugins']) 52 | plugins = loader.plugins['list_scraper'] 53 | 54 | # If Jellyfin_api plugin is enabled - pass the jellyfin creds to it 55 | if "jellyfin_api" in config["plugins"] and config["plugins"]["jellyfin_api"].get("enabled", False): 56 | config["plugins"]["jellyfin_api"]["server_url"] = config["jellyfin"]["server_url"] 57 | config["plugins"]["jellyfin_api"]["user_id"] = config["jellyfin"]["user_id"] 58 | config["plugins"]["jellyfin_api"]["api_key"] = config["jellyfin"]["api_key"] 59 | 60 | # Update jellyfin with lists 61 | for plugin_name in config['plugins']: 62 | if config['plugins'][plugin_name]["enabled"] and plugin_name in plugins: 63 | for list_entry in config['plugins'][plugin_name]["list_ids"]: 64 | if isinstance(list_entry, dict): 65 | if "list_id" in list_entry: 66 | list_id = list_entry["list_id"] 67 | else: 68 | list_id = list_entry 69 | list_name = list_entry.get("list_name", None) 70 | else: 71 | list_id = list_entry 72 | list_name = None 73 | 74 | logger.info(f"") 75 | logger.info(f"") 76 | logger.info(f"Getting list info for plugin: {plugin_name}, list id: {list_id}") 77 | 78 | # Match list items to jellyfin items 79 | list_info = plugins[plugin_name].get_list(list_id, config['plugins'][plugin_name]) 80 | 81 | # Find jellyfin collection or create it 82 | collection_id = jf_client.find_collection_with_name_or_create( 83 | list_name or list_info['name'], 84 | list_id, 85 | list_info.get("description", None), 86 | plugin_name 87 | ) 88 | 89 | if config["plugins"][plugin_name].get("clear_collection", False): 90 | # Optionally clear everything from the collection first 91 | jf_client.clear_collection(collection_id) 92 | 93 | # Add items to the collection 94 | for item in list_info['items']: 95 | matched = jf_client.add_item_to_collection( 96 | collection_id, 97 | item, 98 | year_filter=config["plugins"][plugin_name].get("year_filter", True), 99 | jellyfin_query_parameters=config["jellyfin"].get("query_parameters", {}) 100 | ) 101 | if not matched and js_client is not None: 102 | js_client.make_request(item) 103 | 104 | # Add a poster image if collection doesn't have one 105 | if not jf_client.has_poster(collection_id): 106 | logger.info("Collection has no poster - generating one") 107 | jf_client.make_poster(collection_id, list_info["name"]) 108 | 109 | 110 | 111 | if __name__ == "__main__": 112 | logger.info("Starting up") 113 | logger.info("Starting initial run") 114 | main(config) 115 | 116 | # Setup scheduler 117 | if "crontab" in config and config["crontab"] != "": 118 | scheduler = BlockingScheduler() 119 | scheduler.add_job(main, CronTrigger.from_crontab(config['crontab']), args=[config], timezone=config.get("timezone", "UTC")) 120 | logger.info("Starting scheduler using crontab: " + config["crontab"]) 121 | scheduler.start() 122 | -------------------------------------------------------------------------------- /plugins/README.md: -------------------------------------------------------------------------------- 1 | # List Scraper Plugins 2 | 3 | Each of these files implements the [base_plugin.py](https://github.com/ghomasHudson/Jellyfin-Auto-Collections/blob/master/utils/base_plugin.py) base class: 4 | 5 | ```python 6 | import pluginlib 7 | 8 | @pluginlib.Parent('list_scraper') 9 | class ListScraper(object): 10 | 11 | @pluginlib.abstractmethod 12 | def get_list(list_id, config=None): 13 | pass 14 | ``` 15 | 16 | `get_list` should return a dictionary with the format: 17 | 18 | ``` 19 | { 20 | "name": "Ultimate top 100 list", 21 | "description": "100 of my fav films", 22 | "items": [ 23 | {"title": "My Movie", "release_year": "2021", "media_type": "movie"}, 24 | ... 25 | ] 26 | } 27 | ``` 28 | -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ghomasHudson/Jellyfin-Auto-Collections/2d3face2a44a4381cd71e18ab420f09be74c3e7c/plugins/__init__.py -------------------------------------------------------------------------------- /plugins/arr.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | import requests 3 | import json 4 | from utils.base_plugin import ListScraper 5 | from loguru import logger 6 | 7 | #from arrapi import SonarrAPI, RadarrAPI 8 | 9 | class Arr(ListScraper): 10 | '''Generate collections based on Jellyfin API queries''' 11 | 12 | _alias_ = 'arr' 13 | 14 | def get_list(list_id, config=None): 15 | '''Call arr API''' 16 | 17 | items = [] 18 | for server_config in config["server_configs"]: 19 | server_params = {"apikey": server_config["api_key"]} 20 | 21 | # Get tag id 22 | r = requests.get(server_config["base_url"] + "/api/v3/tag", params=server_params) 23 | tag_id = None 24 | for tag in r.json(): 25 | if tag["label"] == list_id: 26 | tag_id = tag["id"] 27 | break 28 | if tag_id is None: 29 | continue 30 | 31 | # Get tag details 32 | r = requests.get(server_config["base_url"] + f"/api/v3/tag/detail/{tag_id}", params=server_params) 33 | 34 | # Get item details 35 | for item_id in r.json().get("movieIds", []): 36 | item_r = requests.get(server_config["base_url"] + f"/api/v3/movie/{item_id}", params=server_params) 37 | item_r = item_r.json() 38 | logger.debug(f"Response from Arr server: {item_r}") 39 | items.append({ 40 | "title": item_r["title"], 41 | "release_year": item_r["year"], 42 | "media_type": "movie", 43 | "imdb_id": item_r.get("imdbId", None) 44 | }) 45 | 46 | for item_id in r.json().get("seriesIds", []): 47 | item_r = requests.get(server_config["base_url"] + f"/api/v3/series/{item_id}", params=server_params) 48 | item_r = item_r.json() 49 | logger.debug(f"Response from Arr server: {item_r}") 50 | items.append({ 51 | "title": item_r["title"], 52 | "release_year": item_r["year"], 53 | "media_type": "show", 54 | "imdb_id": item_r.get("imdbId", None) 55 | }) 56 | 57 | 58 | return { 59 | "name": list_id.replace("_", " ").title(), 60 | "description": f"{list_id} tag from arr server", 61 | "items": items 62 | } 63 | -------------------------------------------------------------------------------- /plugins/bfi.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import re 3 | from utils.base_plugin import ListScraper 4 | import bs4 5 | import requests 6 | from loguru import logger 7 | 8 | class BFI(ListScraper): 9 | 10 | _alias_ = 'bfi' 11 | 12 | def get_list(list_id, config=None): 13 | r = requests.get(f"https://www.bfi.org.uk/lists/{list_id}") 14 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 15 | 16 | # Find the JSON-LD script tag 17 | json_ld_tag = soup.find("script", type="application/ld+json") 18 | if not json_ld_tag: 19 | raise ValueError("No JSON-LD metadata found on the page") 20 | json_ld = yaml.load(json_ld_tag.string, Loader=yaml.SafeLoader) 21 | 22 | # Basic metadata 23 | list_name = json_ld.get("headline", "").strip() 24 | description = json_ld.get("description", "").strip() 25 | 26 | # Movies 27 | items = [] 28 | year_pattern = re.compile(r'\s*\((\d{4})\)\s*$') 29 | for entry in soup.find_all("figcaption"): 30 | title = entry.text 31 | 32 | match = year_pattern.search(title) 33 | if match is not None: 34 | year = int(match.group(1)) 35 | title = year_pattern.sub('', title) 36 | 37 | items.append({ 38 | "title": title, 39 | "release_year": year, 40 | "media_type": "Movie" 41 | }) 42 | 43 | return { 44 | "name": list_name, 45 | "description": description, 46 | "items": items 47 | } 48 | -------------------------------------------------------------------------------- /plugins/criterion.py: -------------------------------------------------------------------------------- 1 | import json 2 | from utils.base_plugin import ListScraper 3 | import bs4 4 | import requests 5 | from loguru import logger 6 | #from requests_cache import CachedSession, FileCache 7 | 8 | class CriterionChannel(ListScraper): 9 | 10 | _alias_ = 'criterion_channel' 11 | 12 | def get_list(list_id, config=None): 13 | r = requests.get(f"https://www.criterionchannel.com/{list_id}") 14 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 15 | 16 | list_name = soup.find("h1", class_="collection-title").text.strip() 17 | description = soup.find("div", class_="collection-description").text.strip() 18 | 19 | items = [] 20 | for item in soup.find_all("li", class_="js-collection-item"): 21 | title = item.find("strong").text.strip() 22 | year = item.find("p") 23 | if year is not None and "•" in year.text: 24 | year = year.text.split("•")[1].strip() 25 | items.append({ 26 | "title": title, 27 | "release_year": year, 28 | "media_type": "movie" 29 | }) 30 | return {'name': list_name, 'items': items, "description": description} 31 | -------------------------------------------------------------------------------- /plugins/imdb_chart.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | import requests 3 | from utils.base_plugin import ListScraper 4 | import json 5 | 6 | class IMDBChart(ListScraper): 7 | 8 | _alias_ = 'imdb_chart' 9 | 10 | def get_list(list_id, config=None): 11 | res = requests.get(f'https://www.imdb.com/chart/{list_id}', headers={'User-Agent': 'Mozilla/5.0', 'Accept-Language': 'en-US'}) 12 | soup = bs4.BeautifulSoup(res.text, 'html.parser') 13 | list_name = soup.find('title').text 14 | description = soup.find('meta', property='og:description')['content'] 15 | movies = [] 16 | 17 | data = soup.find('script', id='__NEXT_DATA__') 18 | data = json.loads(data.text) 19 | 20 | for movie in next(iter(data["props"]["pageProps"]["pageData"].values()))["edges"]: 21 | movie = movie["node"] 22 | if "titleText" not in movie: 23 | # Get item details 24 | res = requests.get(f'https://www.imdb.com/title/{movie["release"]["titles"][0]["id"]}', headers={'User-Agent': 'Mozilla/5.0'}) 25 | soup = bs4.BeautifulSoup(res.text, 'html.parser') 26 | item_data = json.loads(soup.find('script', id='__NEXT_DATA__').text) 27 | movie = item_data["props"]["pageProps"]["aboveTheFoldData"] 28 | 29 | title = movie["titleText"]["text"] 30 | 31 | release_year = None 32 | if movie["releaseYear"] is not None: 33 | release_year = movie["releaseYear"]["year"] 34 | 35 | media_type = movie["titleType"]["id"] 36 | imdb_id = movie["id"] 37 | 38 | movies.append({'title': title, 'release_year': release_year, "media_type": media_type, "imdb_id": imdb_id}) 39 | return {'name': list_name, 'items': movies, "description": description} 40 | -------------------------------------------------------------------------------- /plugins/imdb_list.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | import requests 3 | import json 4 | from utils.base_plugin import ListScraper 5 | 6 | class IMDBList(ListScraper): 7 | 8 | _alias_ = 'imdb_list' 9 | 10 | def get_list(list_id, config=None): 11 | r = requests.get(f'https://www.imdb.com/list/{list_id}', headers={'Accept-Language': 'en-US', 'User-Agent': 'Mozilla/5.0', 'Accept-Language': 'en-US'}) 12 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 13 | list_name = soup.find('h1').text 14 | description = soup.find("div", {"class": "list-description"}).text 15 | 16 | ld_json = soup.find("script", {"type": "application/ld+json"}).text 17 | ld_json = json.loads(ld_json) 18 | movies = [] 19 | for row in ld_json["itemListElement"]: 20 | url_parts = row["item"]["url"].split("/") 21 | url_parts = [p for p in url_parts if p!=""] 22 | 23 | release_year = None 24 | if config.get("add_release_year", False): 25 | # Get release_date 26 | r = requests.get(row["item"]["url"], headers={'Accept-Language': 'en-US', 'User-Agent': 'Mozilla/5.0', 'Accept-Language': 'en-US'}) 27 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 28 | movie_json = soup.find("script", {"type": "application/ld+json"}).text 29 | release_year = json.loads(movie_json)["datePublished"].split("-")[0] 30 | 31 | movies.append({ 32 | "title": row["item"]["name"], 33 | "release_year": release_year, 34 | "media_type": row["item"]["@type"], 35 | "imdb_id": url_parts[-1] 36 | }) 37 | 38 | return {'name': list_name, 'items': movies, "description": description} 39 | -------------------------------------------------------------------------------- /plugins/jellyfin_api.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | import requests 3 | import json 4 | from utils.base_plugin import ListScraper 5 | 6 | class JellyfinAPI(ListScraper): 7 | '''Generate collections based on Jellyfin API queries''' 8 | 9 | _alias_ = 'jellyfin_api' 10 | 11 | def get_list(list_id, config=None): 12 | '''Call jellyfin API 13 | list_id should be a dict to pass to https://api.jellyfin.org/#tag/Items/operation/GetItems 14 | ''' 15 | 16 | # If list name/desc have been manually specified - grab them 17 | list_name = f"{list_id}" 18 | list_desc = f"Movies which match the jellyfin API query: {list_id}" 19 | if "list_name" in list_id: 20 | list_name = list_id["list_name"] 21 | del list_id["list_name"] 22 | if "list_desc" in list_id: 23 | list_desc = list_id["list_desc"] 24 | del list_id["list_desc"] 25 | 26 | params = { 27 | "enableTotalRecordCount": "false", 28 | "enableImages": "false", 29 | "Recursive": "true", 30 | "fields": ["ProviderIds", "ProductionYear"] 31 | } 32 | params = {**params, **list_id} 33 | 34 | res = requests.get(f'{config["server_url"]}/Users/{config["user_id"]}/Items',headers={"X-Emby-Token": config["api_key"]}, params=params) 35 | 36 | items = [] 37 | for item in res.json()["Items"]: 38 | items.append({ 39 | "title": item["Name"], 40 | "release_year": item.get("ProductionYear", None), 41 | "media_type": item["Type"], 42 | "imdb_id": item["ProviderIds"].get("Imdb", None) 43 | }) 44 | 45 | return { 46 | "name": list_name, 47 | "description": list_desc, 48 | "items": items 49 | } 50 | -------------------------------------------------------------------------------- /plugins/letterboxd.py: -------------------------------------------------------------------------------- 1 | import json 2 | from utils.base_plugin import ListScraper 3 | import bs4 4 | import requests 5 | from loguru import logger 6 | from requests_cache import CachedSession, FileCache 7 | 8 | class Letterboxd(ListScraper): 9 | 10 | _alias_ = 'letterboxd' 11 | 12 | def get_list(list_id, config=None): 13 | page_number = 1 14 | list_name = None 15 | description = None 16 | movies = [] 17 | config = config or {} 18 | 19 | # Cache for movie pages - so we don't have to refetch imdb_ids 20 | session = CachedSession(backend='filesystem') 21 | 22 | while True: 23 | logger.info(f"Page number: {page_number}") 24 | watchlist = list_id.endswith("/watchlist") 25 | likeslist = list_id.endswith("/likes/films") 26 | 27 | if watchlist: 28 | list_name = list_id.split("/")[0] + " Watchlist" 29 | description = "Watchlist for " + list_id.split("/")[0] 30 | elif likeslist: 31 | list_name = list_id.split("/")[0] + " Likes" 32 | description = "Likes list for " + list_id.split("/")[0] 33 | 34 | url_format = "https://letterboxd.com/{list_id}{maybe_detail}/by/release-earliest/page/{page_number}/" 35 | maybe_detail = "" if watchlist or likeslist else "/detail" 36 | r = requests.get( 37 | url_format.format(list_id=list_id, maybe_detail=maybe_detail, page_number=page_number), 38 | headers={'User-Agent': 'Mozilla/5.0'}, 39 | ) 40 | 41 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 42 | 43 | if list_name is None: 44 | list_name = soup.find('h1', {'class': 'title-1 prettify'}).text 45 | 46 | if description is None: 47 | description = soup.find('div', {'class': 'body-text'}) 48 | if description is not None: 49 | description = "\n".join([p.text for p in description.find_all('p')]) 50 | else: 51 | description = "" 52 | 53 | if watchlist or likeslist: 54 | page = soup.find_all('li', {'class': 'poster-container'}) 55 | else: 56 | page = soup.find_all('article') 57 | 58 | for movie_soup in page: 59 | if watchlist or likeslist: 60 | movie = {"title": movie_soup.find('img').attrs['alt'], "media_type": "movie"} 61 | link = movie_soup.find('div', {'class': 'film-poster'})['data-target-link'] 62 | else: 63 | movie = {"title": movie_soup.find('h2').find('a').text, "media_type": "movie"} 64 | movie_year = movie_soup.find('small', {'class': 'metadata'}) 65 | if movie_year is not None: 66 | movie["release_year"] = movie_year.text 67 | 68 | link = movie_soup.find('a')['href'] 69 | 70 | 71 | if config.get("imdb_id_filter", False) or 'release_year' not in movie: 72 | logger.debug(f"Getting release year and imdb details for: {movie['title']}") 73 | 74 | # Find the imdb id and release year 75 | r = session.get(f"https://letterboxd.com{link}", headers={'User-Agent': 'Mozilla/5.0'}) 76 | movie_soup = bs4.BeautifulSoup(r.text, 'html.parser') 77 | 78 | imdb_id = movie_soup.find("a", {"data-track-action":"IMDb"}) 79 | movie_year = movie_soup\ 80 | .find("div", {"class": "metablock"})\ 81 | .find("div", {"class": "releaseyear"}) 82 | 83 | if imdb_id is not None: 84 | movie["imdb_id"] = imdb_id["href"].split("/title/")[1].split("/")[0] 85 | 86 | if movie_year is not None: 87 | movie["release_year"] = movie_year.text 88 | 89 | # If a movie doesn't have a year, that means that the movie is only just announced and we don't even know when it's coming out. We can easily ignore these because movies will have a year of release by the time they come out. 90 | if 'release_year' in movie: 91 | movies.append(movie) 92 | 93 | if soup.find('a', {'class': 'next'}): 94 | page_number += 1 95 | else: 96 | break 97 | 98 | return {'name': list_name, 'items': movies, "description": description} 99 | -------------------------------------------------------------------------------- /plugins/listmania.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | from utils.base_plugin import ListScraper 3 | import bs4 4 | import requests 5 | from loguru import logger 6 | 7 | class ListMania(ListScraper): 8 | 9 | _alias_ = 'listmania' 10 | 11 | def get_list(list_id, config=None): 12 | r = requests.get(f"https://www.listmania.org/list/{list_id}") 13 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 14 | 15 | # Find the JSON-LD script tag 16 | json_ld_tag = soup.find("script", type="application/ld+json") 17 | if not json_ld_tag: 18 | raise ValueError("No JSON-LD metadata found on the page") 19 | json_ld = yaml.load(json_ld_tag.string, Loader=yaml.SafeLoader) 20 | 21 | # Basic metadata 22 | list_name = json_ld.get("name", "").strip() 23 | description = json_ld.get("description", "").strip() 24 | 25 | # Movies 26 | items = [] 27 | item_list = json_ld.get("mainEntity", {}).get("itemListElement", []) 28 | for entry in item_list: 29 | item = entry.get("item", {}) 30 | title = item.get("name", "").strip() 31 | if title == "": 32 | continue 33 | year = item.get("datePublished", "").strip() 34 | imdb_url = item.get("sameAs", "") 35 | imdb_id = imdb_url.split('/')[-2] if "imdb.com" in imdb_url else None 36 | 37 | items.append({ 38 | "title": title, 39 | "release_year": year, 40 | "media_type": item.get("@type", "Movie").lower(), 41 | "imdb_id": imdb_id 42 | }) 43 | 44 | return { 45 | "name": list_name, 46 | "description": description, 47 | "items": items 48 | } 49 | -------------------------------------------------------------------------------- /plugins/mdblist.py: -------------------------------------------------------------------------------- 1 | import json 2 | from utils.base_plugin import ListScraper 3 | import bs4 4 | import requests 5 | 6 | class MDBList(ListScraper): 7 | 8 | _alias_ = 'mdblist' 9 | 10 | def get_list(list_id, config=None): 11 | 12 | list_id = list_id.strip("/") 13 | 14 | # Get the list name 15 | r = requests.get(f"https://mdblist.com/lists/{list_id}") 16 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 17 | list_name = soup.find('div', class_='ui form').find('h3').text.strip() 18 | description = soup.find("div", {"class": "ui form"}).find("div", {"class": "fourteen wide field"}).find_all("p") 19 | description = "\n".join([p.text for p in description]) 20 | 21 | # Get the list items 22 | r = requests.get(f"https://mdblist.com/lists/{list_id}/json") 23 | movies = r.json() 24 | movies = [{**movie, 'media_type': movie["mediatype"]} for movie in movies] 25 | 26 | return {'name': list_name, 'items': movies, 'description': description} 27 | -------------------------------------------------------------------------------- /plugins/popular_movies.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | from utils.base_plugin import ListScraper 5 | 6 | 7 | class PopularMovies(ListScraper): 8 | """Movies from stevenlu's nightly list""" 9 | 10 | _alias_ = 'popular_movies' 11 | 12 | def _is_valid_list_id(list_id): 13 | if list_id in [ 14 | "movies", 15 | "all-movies", 16 | "movies-metacritic-min50", 17 | "movies-metacritic-min60", 18 | "movies-metacritic-min70", 19 | "movies-metacritic-min80", 20 | "movies-imdb-min5", 21 | "movies-imdb-min6", 22 | "movies-imdb-min7", 23 | "movies-imdb-min8", 24 | "movies-rottentomatoes-min50", 25 | "movies-rottentomatoes-min60", 26 | "movies-rottentomatoes-min70", 27 | "movies-rottentomatoes-min80" 28 | ]: 29 | return True 30 | 31 | if list_id.startswith("movies-"): 32 | return True 33 | return False 34 | 35 | def get_list(list_id, config=None): 36 | if not PopularMovies._is_valid_list_id(list_id): 37 | raise Exception(f"Invalid list_id \"{list_id}\" for popular-movies") 38 | 39 | # Get the list name 40 | r = requests.get(f"https://popular-movies-data.stevenlu.com/{list_id}.json") 41 | items = [] 42 | for item in r.json(): 43 | items.append({ 44 | "title": item["title"], 45 | "release_year": None, 46 | "imdb_id": item["imdb_id"], 47 | "media_type": "movie" 48 | }) 49 | 50 | description = """Popular Movies uses LLMs to evaluate the popularity of movies that are released and are less than 4 months old. Popular Movies considers a multitude of data points such as ratings, popularity, production companies, actors, and more.""" 51 | 52 | return {'name': "Popular Movies", 'items': items, 'description': description} 53 | -------------------------------------------------------------------------------- /plugins/trakt.py: -------------------------------------------------------------------------------- 1 | import json 2 | from utils.base_plugin import ListScraper 3 | import bs4 4 | import os 5 | import requests 6 | from loguru import logger 7 | import time 8 | 9 | class Trakt(ListScraper): 10 | 11 | _alias_ = 'trakt' 12 | _access_token_file = '.trakt_access_token' 13 | 14 | _chart_types = { 15 | "movies/trending": { 16 | "title": "Trending Movies", 17 | "description": "The most watched movies right now." 18 | }, 19 | "movies/popular": { 20 | "title": "Popular Movies", 21 | "description": "The most popular movies of all time." 22 | }, 23 | "movies/favorited": { 24 | "title": "Most Favorited Movies", 25 | "description": "The most favorited movies for the last week." 26 | }, 27 | "movies/watched": { 28 | "title": "Most Watched Movies", 29 | "description": "The most watched movies for the last week." 30 | }, 31 | "movies/collected": { 32 | "title": "Most Collected Movies", 33 | "description": "The most collected movies for the last week." 34 | }, 35 | "movies/played": { 36 | "title": "Most Played Movies", 37 | "description": "The most played movies for the last week." 38 | }, 39 | "movies/anticipated": { 40 | "title": "Most Anticipated Movies", 41 | "description": "The most anticipated movies based on the number of lists a movie appears on." 42 | }, 43 | "movies/boxoffice": { 44 | "title": "Box Office Movies", 45 | "description": "The top 10 movies in the US box office last weekend." 46 | }, 47 | "shows/trending": { 48 | "title": "Trending Shows", 49 | "description": "The most watched shows right now." 50 | }, 51 | "shows/popular": { 52 | "title": "Popular Shows", 53 | "description": "The most popular shows of all time. Popularity is calculated using the rating percentage and the number of ratings." 54 | }, 55 | "shows/favorited": { 56 | "title": "Most Favorited Shows", 57 | "description": "The most favorited shows for the last week." 58 | }, 59 | "shows/watched": { 60 | "title": "Most Watched Shows", 61 | "description": "The most watched shows for the last week." 62 | }, 63 | "shows/collected": { 64 | "title": "Most Collected Shows", 65 | "description": "The most collected shows for the last week." 66 | }, 67 | "shows/played": { 68 | "title": "Most Played Shows", 69 | "description": "Returns the most played (a single user can watch multiple episodes multiple times) shows for the last week." 70 | }, 71 | "shows/anticipated": { 72 | "title": "Most Anticipated Shows", 73 | "description": "The most anticipated shows based on the number of lists a show appears on." 74 | } 75 | } 76 | 77 | 78 | 79 | def _get_auth_token(config): 80 | '''Get the authentication token for the Trakt API''' 81 | 82 | headers = { 83 | "Content-Type": "application/json", 84 | "trakt-api-version": "2", 85 | "trakt-api-key": config["client_id"] 86 | } 87 | 88 | if os.path.exists(Trakt._access_token_file): 89 | # If we have already authenticated, read the access token from the file 90 | with open(Trakt._access_token_file, 'r') as f: 91 | access_token = f.read() 92 | logger.debug("Existing access token found") 93 | else: 94 | # If we have not authenticated, get the access token from the user 95 | r = requests.post("https://api.trakt.tv/oauth/device/code", headers=headers, json={"client_id": config["client_id"]}) 96 | device_code = r.json()["device_code"] 97 | user_code = r.json()["user_code"] 98 | interval = r.json()["interval"] 99 | 100 | logger.info("Authentication with Trakt API required") 101 | logger.info(f"Please visit the following URL to get your access token: {r.json()['verification_url']}") 102 | logger.info("") 103 | logger.info(f"Your device code is: {user_code}") 104 | logger.info("") 105 | 106 | # Poll the API until the user has authenticated 107 | while True: 108 | r = requests.post("https://api.trakt.tv/oauth/device/token", headers=headers, json={ 109 | "client_id": config["client_id"], 110 | "client_secret": config["client_secret"], 111 | "code": device_code 112 | }) 113 | if r.status_code == 200: 114 | break 115 | time.sleep(interval) 116 | 117 | access_token = r.json()["access_token"] 118 | 119 | # Save the access token to a file 120 | with open(Trakt._access_token_file, 'w') as f: 121 | f.write(access_token) 122 | logger.info("Successfully authenticated with Trakt API") 123 | return access_token 124 | 125 | 126 | def get_list(list_id, config=None): 127 | 128 | headers = { 129 | "Content-Type": "application/json", 130 | "trakt-api-version": "2", 131 | "trakt-api-key": config["client_id"] 132 | } 133 | 134 | access_token = Trakt._get_auth_token(config) 135 | headers["Authorization"] = f"Bearer {access_token}" 136 | logger.debug("Access token loaded") 137 | 138 | if list_id.startswith("users/"): 139 | logger.debug("Trakt Default User list") 140 | r = requests.get(f"https://api.trakt.tv/{list_id}", headers=headers) 141 | components = list_id.split("/") 142 | list_name = f"{components[1]}'s {components[2]}" 143 | description = f"{components[1]}'s {components[2]}" 144 | items_data = r.json() 145 | elif list_id.startswith("shows/") or list_id.startswith("movies/"): 146 | # Chart 147 | logger.debug("Trakt chart list") 148 | r = requests.get(f"https://api.trakt.tv/{list_id}", headers=headers) 149 | list_name = Trakt._chart_types[list_id]["title"] 150 | description = Trakt._chart_types[list_id]["description"] 151 | 152 | if list_id.startswith("shows/"): 153 | item_types = "show" 154 | else: 155 | item_types = "movie" 156 | 157 | items_data = r.json() 158 | else: 159 | logger.debug("Trakt User list") 160 | r = requests.get(f"https://api.trakt.tv/lists/{list_id}", headers=headers) 161 | list_name = r.json()["name"] 162 | description = r.json()["description"] 163 | r = requests.get(f"https://api.trakt.tv/lists/{list_id}/items", headers=headers) 164 | items_data = r.json() 165 | 166 | 167 | # Process the items 168 | logger.debug("Processing items.") 169 | items = [] 170 | for item_data in items_data: 171 | if "type" in item_data: 172 | item = {"media_type": item_data["type"]} 173 | else: 174 | item = {"media_type": item_types} 175 | 176 | if item["media_type"] == "season": 177 | # Ignore seasons 178 | continue 179 | 180 | if "ids" in item_data: 181 | meta = item_data 182 | else: 183 | meta = item_data[item["media_type"]] 184 | 185 | if "imdb" in meta["ids"]: 186 | item["imdb_id"] = meta["ids"]["imdb"] 187 | try: 188 | item["title"] = meta["title"] 189 | except: 190 | breakpoint() 191 | if "year" in meta: 192 | item["release_year"] = meta["year"] 193 | items.append(item) 194 | 195 | return { 196 | "name": list_name, 197 | "description": description, 198 | "items": items 199 | } 200 | -------------------------------------------------------------------------------- /plugins/tspdt.py: -------------------------------------------------------------------------------- 1 | import json 2 | from utils.base_plugin import ListScraper 3 | import bs4 4 | import requests 5 | 6 | class TSPDT(ListScraper): 7 | 8 | _alias_ = 'tspdt' 9 | 10 | def get_list(list_id, config=None): 11 | r = requests.get("https://www.theyshootpictures.com/gf1000_all1000films_table.php") 12 | soup = bs4.BeautifulSoup(r.text, 'html.parser') 13 | movies = [] 14 | 15 | 16 | for row in soup.find_all('tr')[1:]: 17 | values = row.find_all('td') 18 | movie_title = values[2].text 19 | for suffix in ["The", "A", "La", "Le", "L'"]: 20 | if movie_title.endswith(", "+suffix): 21 | movie_title = suffix + " " + movie_title[:-len(suffix)-2] 22 | movie_year = values[4].text 23 | movies.append({'title': movie_title, 'release_year': movie_year, 'media_type': 'movie'}) 24 | return {'name': "TSPDT Top 1000 Greatest", 'items': movies, "description": "Compiled from 16,000+ film lists and ballots, The TSPDT 1,000 Greatest Films is quite possibly the most definitive collection of the most critically acclaimed films you will find."} 25 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | APScheduler==3.10.4 2 | beautifulsoup4==4.12.3 3 | bs4==0.0.2 4 | certifi==2024.6.2 5 | charset-normalizer==3.3.2 6 | contourpy==1.3.1 7 | cycler==0.12.1 8 | fonttools==4.57.0 9 | idna==3.7 10 | kiwisolver==1.4.8 11 | loguru==0.7.2 12 | pluginlib==0.9.2 13 | pyaml-env==1.2.1 14 | pytz==2024.1 15 | PyYAML==6.0.1 16 | requests==2.32.3 17 | requests-cache==1.2.1 18 | setuptools==70.0.0 19 | six==1.16.0 20 | soupsieve==2.5 21 | tzlocal==5.2 22 | urllib3==2.2.1 23 | url-normalize==2.2.0 24 | numpy==2.2.4 25 | packaging==24.2 26 | pillow==11.1.0 27 | pyaml-env==1.2.1 28 | pyparsing==3.2.3 29 | python-dateutil==2.9.0.post0 30 | attrs==25.3.0 31 | cattrs==24.1.3 32 | platformdirs==4.3.7 33 | -------------------------------------------------------------------------------- /tests/test_plugins/test_letterboxd.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from plugins.letterboxd import Letterboxd 3 | 4 | # Test data as fixtures 5 | @pytest.fixture 6 | def test_lists(): 7 | return [ 8 | "jf_auto_collect/watchlist", 9 | "jf_auto_collect/likes/films", 10 | "jf_auto_collect/list/test_list/" 11 | ] 12 | 13 | @pytest.fixture 14 | def test_list_output(): 15 | return [ 16 | {'title': 'The Godfather', 'media_type': 'movie', 'imdb_id': 'tt0068646', 'release_year': '1972'}, 17 | {'title': 'The Godfather Part II', 'media_type': 'movie', 'imdb_id': 'tt0071562', 'release_year': '1974'} 18 | ] 19 | 20 | # Parametrized test for different lists 21 | @pytest.mark.parametrize("test_list", [ 22 | "jf_auto_collect/watchlist", 23 | "jf_auto_collect/likes/films", 24 | "jf_auto_collect/list/test_list/" 25 | ]) 26 | def test_get_list(test_list, test_list_output): 27 | # Assuming Letterboxd.get_list returns a dictionary with a key "items" 28 | result = Letterboxd.get_list(test_list, {"imdb_id_filter": True}) 29 | 30 | # Perform the assertion to check if the "items" key matches the expected output 31 | assert result["items"] == test_list_output 32 | 33 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ghomasHudson/Jellyfin-Auto-Collections/2d3face2a44a4381cd71e18ab420f09be74c3e7c/utils/__init__.py -------------------------------------------------------------------------------- /utils/base_plugin.py: -------------------------------------------------------------------------------- 1 | import pluginlib 2 | 3 | @pluginlib.Parent('list_scraper') 4 | class ListScraper(object): 5 | 6 | @pluginlib.abstractmethod 7 | def get_list(list_id, config=None): 8 | pass 9 | -------------------------------------------------------------------------------- /utils/jellyfin.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from loguru import logger 3 | from base64 import b64encode 4 | import json 5 | import concurrent.futures 6 | from .poster_generation import fetch_collection_posters, safe_download, create_mosaic, get_font 7 | 8 | 9 | class JellyfinClient: 10 | 11 | imdb_to_jellyfin_type_map = { 12 | "movie": ["Movie"], 13 | "short": ["Movie"], 14 | "tvEpisode": ["TvProgram", "Episode"], 15 | "tvSeries": ["Program", "Series"], 16 | "tvShort": ["TvProgram", "Episode", "Program"], 17 | "tvMiniSeries": ["Program", "Series"], 18 | "tvMovie": ["Movie", "TvProgram", "Episode"], 19 | "video": ["Movie", "TvProgram", "Episode", "Series"], 20 | "show": ["Program", "Series"], 21 | } 22 | 23 | def __init__(self, server_url: str, api_key: str, user_id: str): 24 | self.server_url = server_url 25 | self.api_key = api_key 26 | self.user_id = user_id 27 | 28 | # Check if server is reachable 29 | try: 30 | requests.get(self.server_url) 31 | except requests.exceptions.ConnectionError: 32 | raise Exception("Server is not reachable") 33 | 34 | # Check if api key is valid 35 | res = requests.get(f"{self.server_url}/Users/{self.user_id}", headers={"X-Emby-Token": self.api_key}) 36 | if res.status_code != 200: 37 | raise Exception("Invalid API key") 38 | 39 | # Check if user id is valid 40 | res = requests.get(f"{self.server_url}/Users/{self.user_id}", headers={"X-Emby-Token": self.api_key}) 41 | if res.status_code != 200: 42 | raise Exception("Invalid user id") 43 | 44 | 45 | def get_all_collections(self): 46 | params = { 47 | "enableTotalRecordCount": "false", 48 | "enableImages": "false", 49 | "Recursive": "true", 50 | "includeItemTypes": "BoxSet", 51 | "fields": ["Name", "Id", "Tags"] 52 | } 53 | logger.info("Getting collections list...") 54 | res = requests.get(f'{self.server_url}/Users/{self.user_id}/Items',headers={"X-Emby-Token": self.api_key}, params=params) 55 | return res.json()["Items"] 56 | 57 | 58 | def find_collection_with_name_or_create(self, list_name: str, list_id: str, description: str, plugin_name: str) -> str: 59 | '''Returns the collection id of the collection with the given name. If it doesn't exist, it creates a new collection and returns the id of the new collection.''' 60 | collection_id = None 61 | collections = self.get_all_collections() 62 | 63 | # Check if list name in tags 64 | for collection in collections: 65 | if json.dumps(list_id) in collection["Tags"]: 66 | collection_id = collection["Id"] 67 | break 68 | 69 | # if no match - Check if list name == collection name 70 | if collection_id is None: 71 | for collection in collections: 72 | if list_name == collection["Name"]: 73 | collection_id = collection["Id"] 74 | break 75 | 76 | if collection_id is not None: 77 | logger.info("found existing collection: " + list_name + " (" + collection_id + ")") 78 | 79 | if collection_id is None: 80 | # Collection doesn't exist -> Make a new one 81 | logger.info("No matching collection found for: " + list_name + ". Creating new collection...") 82 | res2 = requests.post(f'{self.server_url}/Collections',headers={"X-Emby-Token": self.api_key}, params={"name": list_name}) 83 | collection_id = res2.json()["Id"] 84 | 85 | # Update collection description and add tags to we can find it later 86 | if collection_id is not None: 87 | collection = requests.get(f'{self.server_url}/Users/{self.user_id}/Items/{collection_id}', headers={"X-Emby-Token": self.api_key}).json() 88 | if collection.get("Overview", "") == "" and description is not None: 89 | collection["Overview"] = description 90 | collection["Tags"] = list(set(collection.get("Tags", []) + ["Jellyfin-Auto-Collections", plugin_name, json.dumps(list_id)])) 91 | r = requests.post(f'{self.server_url}/Items/{collection_id}',headers={"X-Emby-Token": self.api_key}, json=collection) 92 | 93 | return collection_id 94 | 95 | def has_poster(self, collection_id): 96 | '''Check if a collection already has a poster''' 97 | poster_url = f"{self.server_url}/Items/{collection_id}/Images/Primary" 98 | r = requests.get(poster_url, headers={"X-Emby-Token": self.api_key}) 99 | if r.status_code == 404: 100 | return False 101 | return True 102 | 103 | 104 | def make_poster(self, collection_id, collection_name, mosaic_limit=20, google_font_url="https://fonts.googleapis.com/css2?family=Dosis:wght@800&display=swap"): 105 | 106 | # Check if collection poster exists 107 | poster_urls = fetch_collection_posters(self.server_url, self.api_key, self.user_id, collection_id)[:mosaic_limit] 108 | headers={"X-Emby-Token": self.api_key} 109 | 110 | # Use a ThreadPoolExecutor to download images in parallel 111 | with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: 112 | futures = [executor.submit(safe_download, url, headers) for url in poster_urls] 113 | results = [future.result() for future in concurrent.futures.as_completed(futures)] 114 | 115 | # Filter out any failed downloads (None values) 116 | poster_images = [img for img in results if img is not None] 117 | 118 | font_path = get_font(google_font_url) 119 | 120 | if poster_images: 121 | safe_name = collection_name.replace(" ", "_").replace("/", "_") 122 | output_path = f"/tmp/{safe_name}_cover.jpg" 123 | create_mosaic(poster_images, collection_name, output_path, font_path) 124 | else: 125 | logger.warning(f"No posters available for collection '{collection_name}'. Skipping mosaic generation.") 126 | return 127 | 128 | # Upload 129 | 130 | from PIL import Image 131 | img = Image.open(output_path) # or whatever format 132 | img = img.convert("RGB") # Ensures it's safe for JPEG 133 | img.save(output_path, format="JPEG") 134 | 135 | with open(output_path, 'rb') as f: 136 | img_data = f.read() 137 | encoded_data = b64encode(img_data) 138 | 139 | headers["Content-Type"] = "image/jpeg" 140 | r = requests.post(f"{self.server_url}/Items/{collection_id}/Images/Primary", headers=headers, data=encoded_data) 141 | 142 | 143 | def add_item_to_collection(self, collection_id: str, item, year_filter: bool = True, jellyfin_query_parameters={}): 144 | '''Adds an item to a collection based on item name and release year''' 145 | 146 | item["media_type"] = self.imdb_to_jellyfin_type_map.get(item["media_type"], item["media_type"]) 147 | 148 | params = { 149 | "enableTotalRecordCount": "false", 150 | "enableImages": "false", 151 | "Recursive": "true", 152 | "IncludeItemTypes": item["media_type"], 153 | "searchTerm": item["title"], 154 | "fields": ["ProviderIds", "ProductionYear"] 155 | } 156 | 157 | params = {**params, **jellyfin_query_parameters} 158 | 159 | res = requests.get(f'{self.server_url}/Users/{self.user_id}/Items',headers={"X-Emby-Token": self.api_key}, params=params) 160 | 161 | # Check if there's an exact imdb_id match first 162 | match = None 163 | if "imdb_id" in item: 164 | for result in res.json()["Items"]: 165 | if result["ProviderIds"].get("Imdb", None) == item["imdb_id"]: 166 | match = result 167 | break 168 | else: 169 | # Check if there's a year match 170 | if match is None and year_filter: 171 | for result in res.json()["Items"]: 172 | if str(result.get("ProductionYear", None)) == str(item["release_year"]): 173 | match = result 174 | break 175 | 176 | # Otherwise, just take the first result 177 | if match is None and len(res.json()["Items"]) == 1: 178 | match = res.json()["Items"][0] 179 | 180 | if match is None: 181 | logger.warning(f"Item {item['title']} ({item.get('release_year','N/A')}) {item.get('imdb_id','')} not found in jellyfin") 182 | return False 183 | else: 184 | try: 185 | item_id = match["Id"] 186 | requests.post(f'{self.server_url}/Collections/{collection_id}/Items?ids={item_id}',headers={"X-Emby-Token": self.api_key}) 187 | logger.info(f"Added {item['title']} to collection") 188 | logger.debug(f"\tList item: {item}") 189 | logger.debug(f"\tMatched JF item: {match}") 190 | return True 191 | except json.decoder.JSONDecodeError: 192 | logger.error(f"Error adding {item['title']} to collection - JSONDecodeError") 193 | return False 194 | 195 | 196 | 197 | def clear_collection(self, collection_id: str): 198 | '''Clears a collection by removing all items from it''' 199 | res = requests.get(f'{self.server_url}/Users/{self.user_id}/Items',headers={"X-Emby-Token": self.api_key}, params={"Recursive": "true", "parentId": collection_id}) 200 | all_ids = [item["Id"] for item in res.json()["Items"]] 201 | 202 | # chunk ids into groups of 10 203 | all_ids = [all_ids[i:i + 10] for i in range(0, len(all_ids), 10)] 204 | for ids in all_ids: 205 | requests.delete(f'{self.server_url}/Collections/{collection_id}/Items',headers={"X-Emby-Token": self.api_key}, params={"ids": ",".join(ids)}) 206 | 207 | logger.info(f"Cleared collection {collection_id}") 208 | -------------------------------------------------------------------------------- /utils/jellyseerr.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import urllib.parse 3 | from loguru import logger 4 | 5 | class JellyseerrClient: 6 | def __init__(self, server_url: str, api_key:str=None, email: str=None, password: str=None, user_type: str="local"): 7 | # Fix common url issues 8 | if server_url.endswith("/"): 9 | server_url = server_url[:-1] # Remove trailing slash 10 | if not server_url.endswith("/api/v1"): 11 | server_url += "/api/v1" 12 | self.server_url = server_url 13 | 14 | if user_type not in ["local", "plex", "jellyfin"]: 15 | raise Exception("Invalid user type. Must be one of: local, plex, jellyfin") 16 | 17 | # Check if server is reachable 18 | try: 19 | r = requests.get(self.server_url + "/status") 20 | if r.status_code != 200: 21 | raise Exception("Jellyseerr Server is not reachable") 22 | except requests.exceptions.ConnectionError: 23 | raise Exception("Jellyseerr Server is not reachable") 24 | 25 | self.session = requests.Session() 26 | self.api_key = api_key 27 | if api_key is not None: 28 | r = self.session.headers.update({ 29 | "X-Api-Key": api_key 30 | }) 31 | if r.status_code != 200: 32 | raise Exception("Invalid jellyseerr API Key") 33 | if email is not None and password is not None: 34 | r = self.session.post(f"{self.server_url}/auth/{user_type}", json={ 35 | "email": email, 36 | "password": password 37 | }) 38 | if r.status_code != 200: 39 | raise Exception("Invalid jellyseerr email or password") 40 | 41 | # Check if user is authenticated 42 | r = self.session.get(f"{self.server_url}/auth/me") 43 | if r.status_code != 200: 44 | raise Exception("jellyseerr user is not authenticated") 45 | 46 | 47 | def make_request(self, item): 48 | '''Request item from jellyseerr''' 49 | 50 | # Search for item 51 | r = self.session.get(f"{self.server_url}/search", params={ 52 | "query": urllib.parse.quote_plus(item["title"]) 53 | }) 54 | 55 | # Find matching item 56 | mediaId = None 57 | for result in r.json()["results"]: 58 | # Try IMDB match first 59 | if "mediaInfo" in result and "ImdbId" in result["mediaInfo"]: 60 | imdb_id = result["mediaInfo"]["ImdbId"] 61 | if imdb_id == item["imdb_id"]: 62 | mediaId = result["id"] 63 | logger.debug(f"Found exact IMDB match for {item['title']}") 64 | break 65 | elif "releaseDate" in result: 66 | # Try year match 67 | release_year = result["releaseDate"].split("-")[0] 68 | if release_year == str(item["release_year"]): 69 | mediaId = result["id"] 70 | logger.debug(f"Found year match for {item['title']}") 71 | break 72 | 73 | # Request item if not found 74 | if mediaId is not None: 75 | if "mediaInfo" not in result or result["mediaInfo"]["jellyfinMediaId"] is None: 76 | # If it's not already in Jellyfin 77 | # Request item 78 | r = self.session.post(f"{self.server_url}/request", json={ 79 | "mediaType": result["mediaType"], 80 | "mediaId": mediaId, 81 | }) 82 | logger.info(f"Requested {item['title']} from Jellyseerr") 83 | 84 | 85 | 86 | if __name__ == "__main__": 87 | from pyaml_env import parse_config 88 | config = parse_config("/home/thomas/Documents/Jellyfin-Auto-Collections/config.yaml", default_value=None) 89 | 90 | client = JellyseerrClient( 91 | server_url=config["jellyseerr"]["server_url"], 92 | api_key=config["jellyseerr"]["api_key"] 93 | ) 94 | client.make_request({ 95 | "title": "The Matrix", 96 | "imdb_id": "tt0133093", 97 | "release_year": 1999 98 | }) 99 | -------------------------------------------------------------------------------- /utils/poster_generation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | from loguru import logger 4 | from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageOps 5 | import math 6 | from io import BytesIO 7 | import concurrent.futures 8 | from pyaml_env import parse_config 9 | 10 | # Canvas dimensions and styling 11 | CANVAS_WIDTH = 2000 12 | CANVAS_HEIGHT = 3000 13 | LINE_SPACING = 25 14 | SHADOW_SIZE = 8 15 | BACKGROUND_COLOR = (0, 0, 0) 16 | OVERLAY_PADDING = 50 17 | 18 | def get_font(url, font_dir="./fonts"): 19 | '''Download ttf from google font css''' 20 | 21 | font_name = url.split("family=")[1].split("&")[0] 22 | font_name = font_name.replace(":", "_") 23 | font_name = font_name.replace("@", "_") 24 | font_name = font_name + ".ttf" 25 | font_path = os.path.join(font_dir, font_name) 26 | 27 | if os.path.exists(font_path): 28 | return font_path 29 | 30 | if not os.path.exists(font_dir): 31 | os.mkdir(font_dir) 32 | 33 | # Download css 34 | r = requests.get(url) 35 | r.raise_for_status() 36 | font_url = r.text.split("url(")[1].split(")")[0] 37 | 38 | # Download font 39 | r = requests.get(font_url) 40 | with open(font_path, 'wb') as f: 41 | f.write(r.content) 42 | r.raise_for_status() 43 | return font_path 44 | 45 | # --- Data Fetching Functions --- 46 | 47 | def fetch_collection_posters(jellyfin_url, api_key, user_id, collection_id): 48 | """ 49 | Fetches the poster URLs for all items in the specified collection. 50 | """ 51 | logger.info(f"Fetching posters for collection ID {collection_id}...") 52 | headers = {'X-Emby-Token': api_key} 53 | url = f"{jellyfin_url}/Users/{user_id}/Items" 54 | params = {'parentId': collection_id} 55 | response = requests.get(url, headers=headers, params=params) 56 | response.raise_for_status() 57 | items = response.json().get('Items', []) 58 | poster_urls = [] 59 | for item in items: 60 | if 'ImageTags' in item and 'Primary' in item['ImageTags']: 61 | poster_url = f"{jellyfin_url}/Items/{item['Id']}/Images/Primary?tag={item['ImageTags']['Primary']}" 62 | poster_urls.append(poster_url) 63 | logger.info(f"Found {len(poster_urls)} poster(s) for collection ID {collection_id}.") 64 | return poster_urls 65 | 66 | def safe_download(url, headers): 67 | """ 68 | Download an image safely; return None if an error occurs. 69 | """ 70 | try: 71 | return download_image(url, headers) 72 | except Exception as e: 73 | logger.error(f"Error downloading image {url}: {e}") 74 | return None 75 | 76 | def download_image(url, headers): 77 | """ 78 | Downloads an image from a URL and returns a Pillow Image object. 79 | """ 80 | response = requests.get(url, headers=headers) 81 | response.raise_for_status() 82 | image = Image.open(BytesIO(response.content)).convert("RGB") 83 | return image 84 | 85 | 86 | # --- Text and Font Functions --- 87 | 88 | def wrap_text(text, font, draw, max_width): 89 | """ 90 | Wrap text into multiple lines so that each line's width doesn't exceed max_width. 91 | """ 92 | words = text.split() 93 | lines = [] 94 | current_line = "" 95 | for word in words: 96 | test_line = current_line + (" " if current_line else "") + word 97 | bbox = draw.textbbox((0, 0), test_line, font=font) 98 | line_width = bbox[2] - bbox[0] 99 | if line_width <= max_width: 100 | current_line = test_line 101 | else: 102 | if current_line: 103 | lines.append(current_line) 104 | current_line = word 105 | if current_line: 106 | lines.append(current_line) 107 | return lines 108 | 109 | 110 | def get_adjusted_font_and_wrapped_text(text, draw, max_width, max_height, font_file, max_font_size=200, min_font_size=20): 111 | """ 112 | Determines a font size that allows the text to be wrapped within max_width and max_height. 113 | Returns the chosen font, the wrapped lines, and the total text block height. 114 | """ 115 | for font_size in range(max_font_size, min_font_size - 1, -1): 116 | font = ImageFont.truetype(font_file, font_size) 117 | lines = wrap_text(text, font, draw, max_width) 118 | ascent, descent = font.getmetrics() 119 | line_height = ascent + descent 120 | total_height = line_height * len(lines) + LINE_SPACING * (len(lines) - 1) 121 | # Check if the text block fits within the limits 122 | max_line_width = max(draw.textbbox((0, 0), line, font=font)[2] - draw.textbbox((0, 0), line, font=font)[0] for line in lines) 123 | if max_line_width <= max_width and total_height <= max_height: 124 | return font, lines, total_height 125 | # Fall back to minimum font size 126 | font = ImageFont.truetype(font_file, min_font_size) 127 | lines = wrap_text(text, font, draw, max_width) 128 | ascent, descent = font.getmetrics() 129 | line_height = ascent + descent 130 | total_height = line_height * len(lines) + LINE_SPACING * (len(lines) - 1) 131 | logger.info("Using minimum font size.") 132 | return font, lines, total_height 133 | 134 | 135 | def draw_text_with_shadow(draw, text, position, font, shadow_size, text_color="white", shadow_color="black"): 136 | """ 137 | Draw text with a shadow effect at the specified position. 138 | """ 139 | x, y = position 140 | # Draw shadow offsets 141 | for dx in range(-shadow_size, shadow_size + 1): 142 | for dy in range(-shadow_size, shadow_size + 1): 143 | if dx == 0 and dy == 0: 144 | continue 145 | draw.text((x + dx, y + dy), text, font=font, fill=shadow_color) 146 | # Draw main text 147 | draw.text((x, y), text, font=font, fill=text_color) 148 | 149 | 150 | def draw_text_block(draw, lines, font, total_text_height, overlay_y, overlay_height): 151 | """ 152 | Draw the text block centered within the overlay area. 153 | """ 154 | ascent, descent = font.getmetrics() 155 | line_height = ascent + descent 156 | # Adjust starting y using the font metrics 157 | current_y = overlay_y + (overlay_height - total_text_height) // 2 158 | for line in lines: 159 | bbox = draw.textbbox((0, 0), line, font=font) 160 | line_width = bbox[2] - bbox[0] 161 | text_x = (CANVAS_WIDTH - line_width) // 2 162 | draw_text_with_shadow(draw, line, (text_x, current_y), font, SHADOW_SIZE) 163 | current_y += line_height + LINE_SPACING 164 | 165 | 166 | # --- Mosaic Creation Functions --- 167 | 168 | def create_mosaic_background(poster_images): 169 | """ 170 | Create the mosaic background from poster images. 171 | Returns a blurred canvas with the images pasted in a grid that fills the entire canvas. 172 | Some parts of the posters may be cut off to ensure a complete fill. 173 | """ 174 | canvas = Image.new('RGB', (CANVAS_WIDTH, CANVAS_HEIGHT), BACKGROUND_COLOR) 175 | num_posters = len(poster_images) 176 | if num_posters == 0: 177 | raise ValueError("No poster images available!") 178 | 179 | # Determine the grid dimensions (number of columns and rows) 180 | grid_cols = math.ceil(math.sqrt(num_posters)) 181 | grid_rows = math.ceil(num_posters / grid_cols) 182 | 183 | # Calculate cell size so that the entire canvas area is used. 184 | cell_width = CANVAS_WIDTH // grid_cols 185 | cell_height = CANVAS_HEIGHT // grid_rows 186 | 187 | # Place each poster image into its respective cell. 188 | # ImageOps.fit will scale and crop the image as necessary to cover the entire cell. 189 | for idx, img in enumerate(poster_images): 190 | fitted_img = ImageOps.fit(img, (cell_width, cell_height), method=Image.Resampling.LANCZOS) 191 | col = idx % grid_cols 192 | row = idx // grid_cols 193 | x = col * cell_width # No horizontal offset 194 | y = row * cell_height # No vertical offset 195 | canvas.paste(fitted_img, (x, y)) 196 | 197 | return canvas.filter(ImageFilter.GaussianBlur(radius=10)) 198 | 199 | 200 | def apply_text_overlay(image, collection_name, font_file): 201 | """ 202 | Applies a semi-transparent overlay and draws the collection name centered within it. 203 | """ 204 | draw = ImageDraw.Draw(image) 205 | max_text_width = int(CANVAS_WIDTH * 0.8) 206 | max_text_height = int(CANVAS_HEIGHT * 0.3) 207 | 208 | font, lines, total_text_height = get_adjusted_font_and_wrapped_text( 209 | collection_name.upper(), draw, max_text_width, max_text_height, font_file 210 | ) 211 | 212 | overlay_width = max_text_width + OVERLAY_PADDING * 2 213 | overlay_height = total_text_height + OVERLAY_PADDING * 2 214 | overlay_x = (CANVAS_WIDTH - overlay_width) // 2 215 | overlay_y = (CANVAS_HEIGHT - overlay_height) // 2 216 | 217 | # Create a transparent overlay image 218 | overlay = Image.new('RGBA', (overlay_width, overlay_height), (0, 0, 0, 0)) 219 | overlay_draw = ImageDraw.Draw(overlay) 220 | 221 | # Draw a rounded rectangle on the overlay 222 | radius = 30 # Adjust the radius for more or less rounding 223 | overlay_draw.rounded_rectangle([(0, 0), (overlay_width, overlay_height)], radius=radius, fill=(0, 0, 0, 200)) 224 | 225 | # Paste the rounded overlay onto the canvas using its alpha channel as mask 226 | image.paste(overlay, (overlay_x, overlay_y), overlay) 227 | 228 | # Draw text on top of the overlay 229 | draw = ImageDraw.Draw(image) 230 | draw_text_block(draw, lines, font, total_text_height, overlay_y, overlay_height) 231 | 232 | 233 | def create_mosaic(poster_images, collection_name, output_path, font_path): 234 | """ 235 | Creates the complete mosaic cover by combining the background and text overlay. 236 | """ 237 | logger.debug("Starting mosaic creation...") 238 | blurred = create_mosaic_background(poster_images) 239 | apply_text_overlay(blurred, collection_name, font_path) 240 | blurred.save(output_path) 241 | logger.debug(f"Cover art saved to {output_path}") 242 | --------------------------------------------------------------------------------