├── .gitignore ├── LICENSE ├── README.md ├── custom_components └── mediarr │ ├── __init__.py │ ├── common │ ├── __init__.py │ ├── const.py │ ├── sensor.py │ └── tmdb_sensor.py │ ├── discovery │ ├── __init__.py │ ├── seer_discovery.py │ ├── tmdb.py │ ├── trakt.py │ └── trakt_user.py_wip │ ├── manager │ ├── __init__.py │ ├── radarr.py │ ├── radarr2.py │ ├── sonarr.py │ └── sonarr2.py │ ├── manifest.json │ ├── sensor.py │ ├── server │ ├── __init__.py │ ├── jellyfin.py │ └── plex.py │ ├── services.yaml │ └── services │ ├── __init__.py │ ├── seer.py │ └── seer_requests.py └── hacs.json /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 [Your Name] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Support This Project If you find this project helpful, please consider supporting it. Your contributions help maintain and improve the project. Any support is greatly appreciated! ❤️ https://buymeacoffee.com/vansmak Thank you for your support! 2 | 3 | 4 | # Mediarr for Home Assistant (inspired by upcoming media card) https://github.com/Vansmak/mediarr-card 5 | ## Services 6 | This integration provides services to interact with Overseerr/Jellyseerr. While the various trending/discover sensors and frontend card are optional, the base seer sensor is required to enable the services. 7 | 8 | ### Minimum Required Configuration just to have overseer\jellyseer request services from ha 9 | To use the seer request services, add this to your configuration.yaml: 10 | 11 | ```yaml 12 | mediarr: 13 | seer: 14 | url: your_seerr_url 15 | api_key: !secret seer_api_key 16 | ``` 17 | and for sensors: 18 | ```yaml 19 | sensor: 20 | - platform: mediarr 21 | seer: 22 | url: your_seerr_url 23 | api_key: your_api_key 24 | ``` 25 | 26 | ## Installation 27 | 28 | ### HACS Installation 29 | 1. Open HACS 30 | 2. Go to "Integrations" 31 | 3. Click the three dots menu and select "Custom repositories" 32 | 4. Add this repository URL and select "Integration" as the category 33 | 5. Click "Add" 34 | 6. Find and install "Mediarr" from HACS 35 | 7. Restart Home Assistant 36 | 37 | 38 | ### Manual Installation 39 | 1. Download the latest before hacs (can break) 40 | 2. Copy all contents from `custom_components/mediarr/` to `/config/custom_components/mediarr/` 41 | 42 | 4. Restart Home Assistant 43 | 44 | ## Configuration 45 | 46 | ### Step 1: Configure Sensors 47 | Add one or more of the following sensors to your `configuration.yaml, sensors`: 48 | **Customizable Filters**: Control content discovery with granular options 49 | ** have multiple instances for sonarr and radarr (2 each) 50 | ```yaml 51 | sensor: 52 | - platform: mediarr 53 | seer: 54 | url: localhost 55 | api_key: your_api_key 56 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 57 | tmdb_api_key: "your_tmdb_api_key" 58 | trending: true # Optional 59 | discover: true # Optional 60 | popular_movies: false # Optional, recommended to use tmdb for popular 61 | popular_tv: false # Optional, recommended to use tmdb for popular 62 | 63 | plex: # Optional 64 | url: Plex url 65 | token: your_token 66 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 67 | tmdb_api_key: "your_tmdb_api_key" 68 | language: en #default 69 | 70 | jellyfin: # Optional 71 | url: jellyfin url 72 | token: your_api_key 73 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 74 | tmdb_api_key: "your_tmdb_api_key" 75 | language: en #default 76 | 77 | sonarr: # Optional 78 | url: http://localhost:8989 79 | api_key: your_sonarr_api_key 80 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 81 | days_to_check: 60 #example 82 | 83 | sonarr2: # Optional 84 | url: http://localhost:8989 85 | api_key: your_sonarr_api_key 86 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 87 | days_to_check: 60 #example 88 | 89 | radarr: # Optional 90 | url: http://localhost:7878 91 | api_key: your_radarr_api_key 92 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 93 | days_to_check: 60 #breaking change 94 | 95 | radarr2: # Optional 96 | url: http://localhost:7878 97 | api_key: your_radarr_api_key 98 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 99 | days_to_check: 60 #breaking change 100 | 101 | 102 | trakt: # Optional 103 | client_id: "your_client_id" 104 | client_secret: "your_client_secret" 105 | tmdb_api_key: "your_tmdb_api_key" # Required for posters 106 | trending_type: both # Options: movies, shows, both 107 | max_items: 45 #example how many items you want in sensor, you can chosse a different amount in card 108 | 109 | 110 | tmdb: # Optional 111 | tmdb_api_key: "!secret tmdb_api_key" #may need quotes 112 | trending_type: all # Options: movie, tv, all 113 | max_items: 45 #example how many items you want in sensor, you can choose a different amount in card 114 | trending: true # Default endpoint 115 | now_playing: true # Optional 116 | upcoming: true # Optional 117 | on_air: true # Optional 118 | airing_today: false # Optional 119 | popular_movies: true 120 | popular_tv: true 121 | filters: 122 | min_year: 2020 # Only show content from 2020 onwards 123 | exclude_talk_shows: true 124 | exclude_genres: [10763, 10764, 10767] 125 | ``` 126 | # Sensor Configuration 127 | - **max_items**: Number of items to display (default: 45) 128 | - **days_to_check**: Days to look ahead for upcoming content (Sonarr only, default: 90) 129 | - **trending_type**: Content type to display for Trakt and TMDB 130 | - Available Filters 131 | 132 | - hide_existing: Toggle library content filtering (default: true) 133 | - min_year: Minimum release year for content 134 | - exclude_talk_shows: Remove talk shows and similar content 135 | - exclude_non_english: Show only English content 136 | - exclude_genres: List of genre IDs to filter out 137 | 138 | Common Genre IDs to Exclude 139 | 140 | 10763: News 141 | 10764: Reality 142 | 10767: Talk Shows 143 | 35: Comedy (if you want to filter out comedy) 144 | 99: Documentary (if you prefer scripted content) 145 | 146 | ### Step 3: if you want a front-end, install Mediarr-card from https://github.com/Vansmak/mediarr-card 147 | Add the Card 148 | 149 | ## Getting API Keys 150 | 151 | ### Plex 152 | 1. Get your Plex token from your Plex account settings 153 | 2. More details at [Plex Support](https://support.plex.tv/articles/204059436-finding-an-authentication-token-x-plex-token/) 154 | 155 | ### Sonarr/Radarr 156 | 1. Go to Settings -> General 157 | 2. Copy your API key 158 | 159 | ### Trakt 160 | 1. Create an application at [Trakt API](https://trakt.tv/oauth/applications) 161 | 2. Get your client ID and secret 162 | 163 | ### TMDB 164 | 1. Create an account at [TMDB](https://www.themoviedb.org/) 165 | 2. Request an API key from your account settings 166 | 167 | ### Overseer\Jellyseer 168 | 1. Go to Settings -> General 169 | 2. Copy your API key 170 | 171 | ## Upcoming Features 172 | 173 | ## Contributors 174 | Vansmak aka Vanhacked 175 | FNXPT 176 | 177 | ## License 178 | MIT 179 | -------------------------------------------------------------------------------- /custom_components/mediarr/__init__.py: -------------------------------------------------------------------------------- 1 | # mediarr/__init__.py 2 | """The Mediarr integration.""" 3 | from __future__ import annotations 4 | import logging 5 | from typing import Any 6 | from homeassistant.core import HomeAssistant 7 | from homeassistant.config_entries import ConfigEntry 8 | from homeassistant.const import Platform 9 | from .services.seer_requests import SeerRequestHandler, async_setup_services, async_unload_services 10 | 11 | DOMAIN = "mediarr" 12 | PLATFORMS = [Platform.SENSOR] 13 | 14 | async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool: 15 | """Set up the Mediarr component.""" 16 | if DOMAIN not in config: 17 | return True 18 | 19 | domain_config = config[DOMAIN] 20 | hass.data.setdefault(DOMAIN, {}) 21 | 22 | if "seer" in domain_config: 23 | seer_config = domain_config["seer"] 24 | 25 | handler = SeerRequestHandler( 26 | hass, 27 | seer_config["url"], 28 | seer_config["api_key"] 29 | ) 30 | 31 | hass.data[DOMAIN]["seer_request_handler"] = handler 32 | 33 | service_setup = await async_setup_services(hass, DOMAIN) 34 | 35 | hass.bus.async_listen_once( 36 | "homeassistant_stop", 37 | lambda _: async_unload_services(hass, DOMAIN) 38 | ) 39 | 40 | return True 41 | 42 | async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: 43 | """Set up Mediarr from a config entry.""" 44 | # Store the config entry data 45 | hass.data.setdefault(DOMAIN, {}) 46 | hass.data[DOMAIN][entry.entry_id] = entry.data 47 | 48 | # Set up all platforms 49 | await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) 50 | 51 | # Reload services 52 | if "seer_request_handler" not in hass.data[DOMAIN]: 53 | seer_config = entry.data.get("seer", {}) 54 | if seer_config: 55 | handler = SeerRequestHandler( 56 | hass, 57 | seer_config["url"], 58 | seer_config["api_key"] 59 | ) 60 | hass.data[DOMAIN]["seer_request_handler"] = handler 61 | await async_setup_services(hass, DOMAIN) 62 | 63 | return True 64 | 65 | async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: 66 | """Unload a config entry.""" 67 | # Unload platforms 68 | unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) 69 | 70 | if unload_ok: 71 | hass.data[DOMAIN].pop(entry.entry_id) 72 | 73 | # If this is the last entry, unload services 74 | if not hass.data[DOMAIN]: 75 | await async_unload_services(hass, DOMAIN) 76 | hass.data.pop(DOMAIN) 77 | 78 | return unload_ok -------------------------------------------------------------------------------- /custom_components/mediarr/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vansmak/mediarr_sensor/5fd94bc3091f39762c32428e5d6fa71f67c044e9/custom_components/mediarr/common/__init__.py -------------------------------------------------------------------------------- /custom_components/mediarr/common/const.py: -------------------------------------------------------------------------------- 1 | # mediarr/common/const.py 2 | from datetime import timedelta 3 | 4 | # Sensor Configuration Constants 5 | CONF_MAX_ITEMS = "max_items" 6 | CONF_DAYS = "days_to_check" 7 | DEFAULT_MAX_ITEMS = 45 8 | DEFAULT_DAYS = 90 9 | 10 | # Scan Interval 11 | SCAN_INTERVAL = timedelta(minutes=10) 12 | 13 | # Server Types 14 | SERVER_TYPES = ["plex", "jellyfin", "emby"] 15 | 16 | # Manager Types 17 | MANAGER_TYPES = ["sonarr", "radarr"] 18 | 19 | # Discovery Types 20 | DISCOVERY_TYPES = ["trakt", "tmdb"] 21 | # Seer Content Types 22 | SEER_CONTENT_TYPES = ["requests", "trending", "popular", "discover"] 23 | 24 | # Search Related Constants 25 | SEARCH_TYPE_MOVIE = "movie" 26 | SEARCH_TYPE_TV = "tv" 27 | SEARCH_TYPES = [SEARCH_TYPE_MOVIE, SEARCH_TYPE_TV] 28 | 29 | # API Endpoints 30 | SEER_SEARCH_ENDPOINT = "/api/v1/search" 31 | SEER_REQUEST_ENDPOINT = "/api/v1/request" 32 | SEER_TV_DETAILS_ENDPOINT = "/api/v1/tv" 33 | 34 | # Service Names and Attributes 35 | SERVICE_MOVIE_REQUEST = "submit_movie_request" 36 | SERVICE_TV_REQUEST = "submit_tv_request" 37 | SERVICE_UPDATE_REQUEST = "update_request" 38 | 39 | ATTR_REQUEST_NAME = "name" 40 | ATTR_REQUEST_TYPE = "type" 41 | ATTR_REQUEST_STATUS = "new_status" 42 | ATTR_REQUEST_ID = "request_id" 43 | ATTR_REQUEST_SEASON = "season" 44 | 45 | # Request Status 46 | REQUEST_STATUS_APPROVED = "approve" 47 | REQUEST_STATUS_DECLINED = "decline" 48 | REQUEST_STATUS_REMOVE = "remove" 49 | 50 | DEFAULT_REQUEST_SEASON = "latest" 51 | 52 | # Endpoints 53 | 54 | TRAKT_ENDPOINTS = [] 55 | -------------------------------------------------------------------------------- /custom_components/mediarr/common/sensor.py: -------------------------------------------------------------------------------- 1 | # mediarr/common/sensor.py 2 | from homeassistant.components.sensor import SensorEntity 3 | 4 | class MediarrSensor(SensorEntity): 5 | """Base class for Mediarr sensors.""" 6 | 7 | def __init__(self): 8 | """Initialize the sensor.""" 9 | self._state = None 10 | self._attributes = {} 11 | self._available = True 12 | 13 | @property 14 | def state(self): 15 | """Return the state of the sensor.""" 16 | return self._state 17 | 18 | @property 19 | def available(self): 20 | """Return True if entity is available.""" 21 | return self._available 22 | 23 | @property 24 | def extra_state_attributes(self): 25 | """Return the state attributes.""" 26 | return self._attributes -------------------------------------------------------------------------------- /custom_components/mediarr/common/tmdb_sensor.py: -------------------------------------------------------------------------------- 1 | """TMDB-based media sensor for Mediarr.""" 2 | import logging 3 | from abc import ABC, abstractmethod 4 | import async_timeout 5 | from datetime import datetime 6 | from ..common.sensor import MediarrSensor 7 | 8 | _LOGGER = logging.getLogger(__name__) 9 | TMDB_BASE_URL = "https://api.themoviedb.org/3" 10 | TMDB_IMAGE_BASE_URL = "https://image.tmdb.org/t/p" 11 | 12 | class TMDBMediaSensor(MediarrSensor, ABC): 13 | """Base class for TMDB-based media sensors.""" 14 | 15 | def __init__(self, session, tmdb_api_key, language='en', filters=None): 16 | """Initialize the sensor.""" 17 | super().__init__() 18 | self._session = session 19 | self._tmdb_api_key = tmdb_api_key 20 | self._language = language 21 | self._available = True 22 | self._cache = {} 23 | 24 | # Initialize default filters 25 | self._filters = { 26 | 'language': language, 27 | 'min_year': 0, 28 | 'exclude_talk_shows': True, 29 | 'exclude_genres': [10763, 10764, 10767], # News, Reality, Talk shows 30 | 'exclude_non_english': True 31 | } 32 | 33 | # Update with user-provided filters 34 | if filters: 35 | self._filters.update(filters) 36 | 37 | 38 | 39 | # In tmdb_sensor.py, update _format_date method 40 | def _format_date(self, date_str): 41 | """Format date string to YYYY-MM-DD format.""" 42 | if not date_str or date_str == 'Unknown': 43 | return 'Unknown' 44 | try: 45 | # Remove any timezone info and clean the string 46 | date_str = str(date_str).split('T')[0].split('.')[0].strip() 47 | try: 48 | date_obj = datetime.strptime(date_str, '%Y-%m-%d') 49 | return date_str 50 | except ValueError: 51 | return 'Unknown' 52 | except Exception: 53 | return 'Unknown' 54 | 55 | def is_talk_show(self, title): 56 | """Check if a show title appears to be a talk show or similar format.""" 57 | if not self._filters.get('exclude_talk_shows', True): 58 | return False 59 | 60 | keywords = [ 61 | 'tonight show', 'late show', 'late night', 'daily show', 62 | 'talk show', 'with seth meyers', 'with james corden', 63 | 'with jimmy', 'with stephen', 'with trevor', 'news', 64 | 'live with', 'watch what happens live', 'the view', 65 | 'good morning', 'today show', 'kimmel', 'colbert', 66 | 'fallon', 'ellen', 'conan', 'graham norton', 'meet the press', 67 | 'face the nation', 'last week tonight', 'real time', 68 | 'kelly and', 'kelly &', 'jeopardy', 'wheel of fortune', 69 | 'daily mail', 'entertainment tonight', 'zeiten', 'schlechte' 70 | ] 71 | 72 | title_lower = title.lower() 73 | return any(keyword in title_lower for keyword in keywords) 74 | 75 | def should_include_item(self, item, media_type): 76 | """Apply filters to determine if an item should be included.""" 77 | # Filter by year 78 | if media_type == 'tv' and 'first_air_date' in item and item['first_air_date']: 79 | try: 80 | year = int(item['first_air_date'].split('-')[0]) 81 | if year < self._filters.get('min_year', 0): 82 | return False 83 | except (ValueError, IndexError): 84 | pass 85 | elif media_type == 'movie' and 'release_date' in item and item['release_date']: 86 | try: 87 | year = int(item['release_date'].split('-')[0]) 88 | if year < self._filters.get('min_year', 0): 89 | return False 90 | except (ValueError, IndexError): 91 | pass 92 | 93 | # Filter by language 94 | if self._filters.get('exclude_non_english', True) and item.get('original_language') != 'en': 95 | return False 96 | 97 | # Filter by genre 98 | excluded_genres = self._filters.get('exclude_genres', []) 99 | if any(genre_id in excluded_genres for genre_id in item.get('genre_ids', [])): 100 | return False 101 | 102 | # Filter for TV talk shows 103 | if media_type == 'tv': 104 | title = item.get('name', '') 105 | if self.is_talk_show(title): 106 | return False 107 | 108 | return True 109 | 110 | async def _fetch_tmdb_data(self, endpoint, params=None): 111 | """Fetch data from TMDB API.""" 112 | try: 113 | if not self._tmdb_api_key: 114 | _LOGGER.error("No TMDB API key provided") 115 | return None 116 | 117 | headers = { 118 | "Authorization": f"Bearer {self._tmdb_api_key}", 119 | "Accept": "application/json" 120 | } 121 | 122 | # Check if endpoint already contains query parameters 123 | if '?' in endpoint: 124 | url = f"{TMDB_BASE_URL}/{endpoint}&api_key={self._tmdb_api_key}" 125 | else: 126 | url = f"{TMDB_BASE_URL}/{endpoint}?api_key={self._tmdb_api_key}" 127 | 128 | if params: 129 | params = {k: str(v) if v is not None else "" for k, v in params.items()} 130 | # Don't add api_key to params as it's already in the URL 131 | async with async_timeout.timeout(10): 132 | async with self._session.get( 133 | url, 134 | params=params, 135 | headers=headers 136 | ) as response: 137 | if response.status == 200: 138 | return await response.json() 139 | elif response.status == 404: 140 | _LOGGER.debug("TMDB resource not found: %s", url) 141 | return None 142 | else: 143 | _LOGGER.error("TMDB API error: %s for URL: %s", response.status, url) 144 | return None 145 | else: 146 | async with async_timeout.timeout(10): 147 | async with self._session.get( 148 | url, 149 | headers=headers 150 | ) as response: 151 | if response.status == 200: 152 | return await response.json() 153 | elif response.status == 404: 154 | _LOGGER.debug("TMDB resource not found: %s", url) 155 | return None 156 | else: 157 | _LOGGER.error("TMDB API error: %s for URL: %s", response.status, url) 158 | return None 159 | except Exception as err: 160 | _LOGGER.error("Error fetching TMDB data: %s", err) 161 | return None 162 | 163 | 164 | 165 | async def _get_tmdb_images(self, tmdb_id, media_type='movie'): 166 | """Get TMDB image URLs without language filtering.""" 167 | if not tmdb_id: 168 | return None, None, None 169 | 170 | cache_key = f"images_{media_type}_{tmdb_id}" 171 | if cache_key in self._cache: 172 | return self._cache[cache_key] 173 | 174 | try: 175 | # Just get all images without language filtering 176 | data = await self._fetch_tmdb_data(f"{media_type}/{tmdb_id}/images") 177 | 178 | poster_url = backdrop_url = main_backdrop_url = None 179 | 180 | if data: 181 | _LOGGER.debug("Image data for %s (%s): Posters: %d, Backdrops: %d", 182 | tmdb_id, media_type, len(data.get('posters', [])), len(data.get('backdrops', []))) 183 | 184 | # Get first available poster 185 | posters = data.get('posters', []) 186 | if posters: 187 | poster_path = posters[0].get('file_path') 188 | poster_url = f"{TMDB_IMAGE_BASE_URL}/w500{poster_path}" if poster_path else None 189 | 190 | # Get first and second available backdrops 191 | backdrops = data.get('backdrops', []) 192 | if backdrops: 193 | # Sort by vote count for better quality 194 | backdrops.sort(key=lambda x: x.get('vote_count', 0), reverse=True) 195 | 196 | backdrop_path = backdrops[0].get('file_path') 197 | main_backdrop_path = backdrops[1].get('file_path') if len(backdrops) > 1 else backdrop_path 198 | 199 | backdrop_url = f"{TMDB_IMAGE_BASE_URL}/w780{backdrop_path}" if backdrop_path else None 200 | main_backdrop_url = f"{TMDB_IMAGE_BASE_URL}/original{main_backdrop_path}" if main_backdrop_path else None 201 | 202 | # Use poster as fallback for backdrop if needed 203 | if poster_url and (not backdrop_url or not main_backdrop_url): 204 | if not backdrop_url: 205 | backdrop_url = poster_url 206 | if not main_backdrop_url: 207 | main_backdrop_url = poster_url 208 | _LOGGER.debug("Using poster as backdrop fallback for %s", tmdb_id) 209 | 210 | _LOGGER.debug("Image URLs for %s: poster=%s, backdrop=%s, main_backdrop=%s", 211 | tmdb_id, poster_url is not None, backdrop_url is not None, main_backdrop_url is not None) 212 | 213 | result = (poster_url, backdrop_url, main_backdrop_url) 214 | self._cache[cache_key] = result 215 | return result 216 | 217 | return None, None, None 218 | 219 | except Exception as err: 220 | _LOGGER.error("Error getting TMDB images for %s: %s", tmdb_id, err) 221 | return None, None, None 222 | 223 | async def _search_tmdb(self, title, year=None, media_type='movie'): 224 | """Search for a title on TMDB.""" 225 | if not title: 226 | return None 227 | 228 | try: 229 | cache_key = f"search_{media_type}_{title}_{year}_{self._language}" 230 | if cache_key in self._cache: 231 | return self._cache[cache_key] 232 | 233 | params = { 234 | "query": title, 235 | "language": self._language 236 | } 237 | if year: 238 | params["year"] = year 239 | 240 | endpoint = f"search/{media_type}" 241 | results = await self._fetch_tmdb_data(endpoint, params) 242 | 243 | if results and results.get("results"): 244 | tmdb_id = results["results"][0]["id"] 245 | self._cache[cache_key] = tmdb_id 246 | return tmdb_id 247 | 248 | return None 249 | 250 | except Exception as err: 251 | _LOGGER.error("Error searching TMDB for %s: %s", title, err) 252 | return None 253 | 254 | async def _get_tmdb_details(self, tmdb_id, media_type): 255 | """Fetch title and overview from TMDB.""" 256 | try: 257 | if not tmdb_id: 258 | return None 259 | 260 | cache_key = f"details_{media_type}_{tmdb_id}_{self._language}" 261 | if cache_key in self._cache: 262 | return self._cache[cache_key] 263 | 264 | params = {"language": self._language} 265 | endpoint = f"{media_type}/{tmdb_id}" 266 | data = await self._fetch_tmdb_data(endpoint, params) 267 | 268 | if data: 269 | details = { 270 | 'title': data.get('title' if media_type == 'movie' else 'name', 'Unknown'), 271 | 'overview': data.get('overview', 'No description available.'), 272 | 'year': data.get('release_date' if media_type == 'movie' else 'first_air_date', '')[:4] 273 | } 274 | self._cache[cache_key] = details 275 | return details 276 | 277 | return None 278 | except Exception as err: 279 | _LOGGER.error("Error fetching TMDB details for %s: %s", tmdb_id, err) 280 | return None 281 | 282 | @abstractmethod 283 | async def async_update(self): 284 | """Update sensor state.""" 285 | pass -------------------------------------------------------------------------------- /custom_components/mediarr/discovery/__init__.py: -------------------------------------------------------------------------------- 1 | # mediarr/discovery/__init__.py 2 | """The Mediarr Discovery integration.""" 3 | 4 | from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET 5 | import homeassistant.helpers.config_validation as cv 6 | import voluptuous as vol 7 | from ..common.const import CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS 8 | 9 | # Trakt schema 10 | TRAKT_SCHEMA = { 11 | vol.Required(CONF_CLIENT_ID): cv.string, 12 | vol.Required(CONF_CLIENT_SECRET): cv.string, 13 | vol.Required('tmdb_api_key'): cv.string, 14 | vol.Optional('trending_type', default="both"): vol.In(["movies", "shows", "both"]), 15 | vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): cv.positive_int, 16 | } 17 | 18 | # TMDB schema 19 | TMDB_SCHEMA = { 20 | vol.Required('api_key'): cv.string, 21 | vol.Optional('trending_type', default='all'): vol.In(['movie', 'tv', 'all']), 22 | vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): cv.positive_int, 23 | } 24 | 25 | # Combined platform schema 26 | PLATFORM_SCHEMA = vol.Schema({ 27 | vol.Optional("trakt"): vol.Schema(TRAKT_SCHEMA), 28 | vol.Optional("tmdb"): vol.Schema(TMDB_SCHEMA), 29 | }) 30 | 31 | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 32 | """Set up the Mediarr discovery platform.""" 33 | session = hass.helpers.aiohttp_client.async_get_clientsession() 34 | sensors = [] 35 | 36 | if "trakt" in config: 37 | from .trakt import TraktMediarrSensor 38 | sensors.append(TraktMediarrSensor( 39 | session, 40 | config["trakt"][CONF_CLIENT_ID], 41 | config["trakt"][CONF_CLIENT_SECRET], 42 | config["trakt"].get('trending_type', "both"), 43 | config["trakt"].get(CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS), 44 | config["trakt"]['tmdb_api_key'] 45 | )) 46 | 47 | if "tmdb" in config: 48 | from .tmdb import TMDBMediarrSensor 49 | sensors.append(TMDBMediarrSensor( 50 | session, 51 | config["tmdb"]['api_key'], 52 | config["tmdb"].get(CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS) 53 | )) 54 | 55 | if sensors: 56 | async_add_entities(sensors, True) -------------------------------------------------------------------------------- /custom_components/mediarr/discovery/seer_discovery.py: -------------------------------------------------------------------------------- 1 | """Jellyseerr/Overseerr discovery features for Mediarr.""" 2 | import logging 3 | import asyncio 4 | from datetime import datetime 5 | from ..common.tmdb_sensor import TMDBMediaSensor 6 | import async_timeout 7 | 8 | _LOGGER = logging.getLogger(__name__) 9 | 10 | # In seer_discovery.py 11 | class SeerDiscoveryMediarrSensor(TMDBMediaSensor): 12 | """Seer sensor for discover/trending/popular.""" 13 | 14 | def __init__(self, session, api_key, url, tmdb_api_key, max_items, content_type, media_type=None, filters=None): 15 | """Initialize the sensor.""" 16 | # Initialize TMDBMediaSensor with tmdb_api_key 17 | super().__init__(session, tmdb_api_key) 18 | 19 | self._seer_api_key = api_key 20 | self._url = url.rstrip('/') 21 | self._max_items = max_items 22 | self._content_type = content_type 23 | self._media_type = media_type 24 | 25 | # Initialize default filters 26 | self._filters = { 27 | 'language': 'en', 28 | 'min_year': 0, 29 | 'exclude_talk_shows': True, 30 | 'exclude_genres': [10763, 10764, 10767], # News, Reality, Talk shows 31 | 'exclude_non_english': True 32 | } 33 | 34 | # Update with user-provided filters 35 | if filters: 36 | self._filters.update(filters) 37 | 38 | # Customize name based on content type and media type 39 | if content_type in ["popular_movies", "popular_tv"]: 40 | self._name = f"Seer Mediarr Popular {'Movies' if media_type == 'movies' else 'TV'}" 41 | else: 42 | self._name = f"Seer Mediarr {content_type.title()}" 43 | 44 | def should_include_item(self, item, media_type): 45 | """Apply filters to determine if an item should be included.""" 46 | # Skip if no item 47 | if not item: 48 | return False 49 | 50 | # Filter by year 51 | year = None 52 | if media_type == 'tv' and item.get('first_air_date'): 53 | try: 54 | year = int(item['first_air_date'].split('-')[0]) 55 | except (ValueError, IndexError, TypeError): 56 | pass 57 | elif media_type == 'movie' and item.get('release_date'): 58 | try: 59 | year = int(item['release_date'].split('-')[0]) 60 | except (ValueError, IndexError, TypeError): 61 | pass 62 | 63 | if year and year < self._filters.get('min_year', 0): 64 | return False 65 | 66 | # Filter by language 67 | if self._filters.get('exclude_non_english', True) and item.get('original_language') != 'en': 68 | return False 69 | 70 | # Filter by genre 71 | excluded_genres = self._filters.get('exclude_genres', []) 72 | if excluded_genres and any(genre_id in excluded_genres for genre_id in item.get('genre_ids', [])): 73 | return False 74 | 75 | # Filter for TV talk shows 76 | if media_type == 'tv' and self._filters.get('exclude_talk_shows', True): 77 | title = item.get('name', '') or item.get('title', '') 78 | if self.is_talk_show(title): 79 | return False 80 | 81 | return True 82 | 83 | def is_talk_show(self, title): 84 | """Check if a show title appears to be a talk show or similar format.""" 85 | if not self._filters.get('exclude_talk_shows', True) or not title: 86 | return False 87 | 88 | keywords = [ 89 | 'tonight show', 'late show', 'late night', 'daily show', 90 | 'talk show', 'with seth meyers', 'with james corden', 91 | 'with jimmy', 'with stephen', 'with trevor', 'news', 92 | 'live with', 'watch what happens live', 'the view', 93 | 'good morning', 'today show', 'kimmel', 'colbert', 94 | 'fallon', 'ellen', 'conan', 'graham norton', 'meet the press', 95 | 'face the nation', 'last week tonight', 'real time', 96 | 'kelly and', 'kelly &', 'jeopardy', 'wheel of fortune', 97 | 'daily mail', 'entertainment tonight', 'zeiten', 'schlechte' 98 | ] 99 | 100 | title_lower = title.lower() 101 | return any(keyword in title_lower for keyword in keywords) 102 | 103 | @property 104 | def name(self): 105 | """Return the name of the sensor.""" 106 | return self._name 107 | 108 | @property 109 | def unique_id(self): 110 | """Return a unique ID.""" 111 | if self._content_type in ["popular_movies", "popular_tv"]: 112 | return f"seer_mediarr_{self._content_type}_{self._url}" 113 | return f"seer_mediarr_{self._content_type}_{self._url}" 114 | 115 | async def _fetch_media_list(self, media_type=None): 116 | """Fetch media list from Seer.""" 117 | try: 118 | headers = {'X-Api-Key': self._seer_api_key} 119 | params = {} 120 | 121 | # Build the correct URL and parameters 122 | if self._content_type == "trending": 123 | url = f"{self._url}/api/v1/discover/trending" 124 | elif self._content_type == "popular_movies": 125 | url = f"{self._url}/api/v1/discover/movies" 126 | params["sortBy"] = "popularity.desc" 127 | elif self._content_type == "popular_tv": 128 | url = f"{self._url}/api/v1/discover/tv" 129 | params["sortBy"] = "popularity.desc" 130 | elif self._content_type == "discover": 131 | # Use provided media_type or default to movies 132 | media_type = media_type or "movies" 133 | url = f"{self._url}/api/v1/discover/{media_type}" 134 | else: 135 | _LOGGER.error("Unknown content type: %s", self._content_type) 136 | return None 137 | 138 | _LOGGER.debug("Making request to URL: %s with params: %s", url, params) 139 | 140 | async with async_timeout.timeout(10): 141 | async with self._session.get(url, headers=headers, params=params) as response: 142 | if response.status == 200: 143 | return await response.json() 144 | _LOGGER.error("Failed request to %s with params %s, status: %s", url, params, response.status) 145 | raise Exception(f"Failed to fetch {self._content_type}. Status: {response.status}") 146 | 147 | except Exception as err: 148 | _LOGGER.error("Error fetching %s: %s", self._content_type, err) 149 | return None 150 | async def _fetch_all_requests(self): 151 | """Fetch all current requests from Overseerr/Jellyseerr.""" 152 | try: 153 | url = f"{self._url}/api/v1/request" 154 | headers = {"X-Api-Key": self._seer_api_key} 155 | params = {"take": 100, "skip": 0} # Adjust take value as needed 156 | all_requests = set() 157 | 158 | async with async_timeout.timeout(10): 159 | async with self._session.get(url, headers=headers, params=params) as response: 160 | if response.status == 200: 161 | data = await response.json() 162 | if data.get('results'): 163 | for request in data['results']: 164 | if request.get('media'): 165 | tmdb_id = request['media'].get('tmdbId') 166 | if tmdb_id: 167 | all_requests.add(str(tmdb_id)) 168 | 169 | return all_requests 170 | except Exception as err: 171 | _LOGGER.error("Error fetching all requests: %s", err) 172 | return set() 173 | 174 | async def _process_media_items(self, data, media_type, requested_ids): 175 | """Process media items in parallel with filtering.""" 176 | if not data or not data.get('results'): 177 | _LOGGER.debug("No data or results to process for %s", media_type) 178 | return [] 179 | 180 | filtered_count = 0 181 | requested_count = 0 182 | detail_failure_count = 0 183 | success_count = 0 184 | 185 | async def process_item(item): 186 | nonlocal filtered_count, requested_count, detail_failure_count, success_count 187 | 188 | try: 189 | tmdb_id = str(item.get('id')) 190 | if not tmdb_id: 191 | _LOGGER.debug("Item has no TMDB ID") 192 | return None 193 | 194 | if tmdb_id in requested_ids: 195 | requested_count += 1 196 | _LOGGER.debug("Item %s already requested, skipping", tmdb_id) 197 | return None 198 | 199 | # Apply filters 200 | if not self.should_include_item(item, media_type): 201 | filtered_count += 1 202 | _LOGGER.debug("Item %s filtered out by criteria", tmdb_id) 203 | return None 204 | 205 | details = await self._get_tmdb_details(tmdb_id, media_type) 206 | if not details: 207 | detail_failure_count += 1 208 | _LOGGER.debug("Failed to get TMDB details for %s", tmdb_id) 209 | return None 210 | 211 | poster_url, backdrop_url, main_backdrop_url = await self._get_tmdb_images(tmdb_id, media_type) 212 | 213 | success_count += 1 214 | return { 215 | 'title': details['title'], 216 | 'overview': details['overview'][:100] + '...' if details.get('overview') else 'No overview available', 217 | 'year': details['year'], 218 | 'poster': str(poster_url or ""), 219 | 'fanart': str(main_backdrop_url or backdrop_url or ""), 220 | 'banner': str(backdrop_url or ""), 221 | 'release': details['year'], 222 | 'type': 'Movie' if media_type == 'movie' else 'TV Show', 223 | 'flag': 1, 224 | 'id': tmdb_id 225 | } 226 | except Exception as err: 227 | _LOGGER.error("Error processing item %s: %s", tmdb_id if 'tmdb_id' in locals() else 'unknown', err) 228 | return None 229 | 230 | # Process items in parallel 231 | _LOGGER.debug("Processing %d items for %s", len(data['results']), media_type) 232 | tasks = [process_item(item) for item in data['results']] 233 | results = await asyncio.gather(*tasks, return_exceptions=True) 234 | 235 | # Handle exceptions 236 | exceptions = [r for r in results if isinstance(r, Exception)] 237 | if exceptions: 238 | _LOGGER.error("Got %d exceptions during processing", len(exceptions)) 239 | for exc in exceptions[:3]: # Log first 3 exceptions 240 | _LOGGER.error("Exception: %s", exc) 241 | 242 | # Filter out None values and exceptions 243 | processed_results = [item for item in results if item is not None and not isinstance(item, Exception)] 244 | 245 | _LOGGER.debug("Processing summary for %s: %d items total, %d already requested, %d filtered out, " 246 | "%d failed to get details, %d successful", 247 | media_type, len(data['results']), requested_count, filtered_count, 248 | detail_failure_count, success_count) 249 | 250 | return processed_results 251 | 252 | async def async_update(self): 253 | """Update the sensor.""" 254 | try: 255 | # Fetch all current requests first 256 | requested_ids = await self._fetch_all_requests() 257 | _LOGGER.debug("Fetched %d requested IDs from Seer", len(requested_ids)) 258 | 259 | all_items = [] 260 | 261 | if self._content_type == "discover": 262 | # Fetch both movies and TV 263 | for media_type in ['movies', 'tv']: 264 | _LOGGER.debug("Fetching %s data from Seer for discover", media_type) 265 | data = await self._fetch_media_list(media_type) 266 | 267 | if data and 'results' in data: 268 | _LOGGER.debug("Received %d %s items from Seer", len(data['results']), media_type) 269 | # Debug the first item to see its structure 270 | if data['results']: 271 | _LOGGER.debug("Sample item structure: %s", data['results'][0]) 272 | else: 273 | _LOGGER.debug("No %s data or no results received from Seer", media_type) 274 | 275 | _LOGGER.debug("Processing %s items through filters", media_type) 276 | processed_items = await self._process_media_items( 277 | data, 278 | 'movie' if media_type == 'movies' else 'tv', 279 | requested_ids 280 | ) 281 | _LOGGER.debug("After filtering: %d %s items remaining", len(processed_items), media_type) 282 | all_items.extend(processed_items) 283 | else: 284 | # Fetch single type (trending, popular movies, or popular TV) 285 | _LOGGER.debug("Fetching %s data from Seer", self._content_type) 286 | data = await self._fetch_media_list() 287 | 288 | if data and 'results' in data: 289 | _LOGGER.debug("Received %d items from Seer for %s", len(data['results']), self._content_type) 290 | # Debug the first item to see its structure 291 | if data['results']: 292 | _LOGGER.debug("Sample item structure: %s", data['results'][0]) 293 | else: 294 | _LOGGER.debug("No data or no results received from Seer for %s", self._content_type) 295 | 296 | media_type = 'movie' if self._content_type == 'popular_movies' else 'tv' 297 | _LOGGER.debug("Processing %s items through filters", self._content_type) 298 | processed_items = await self._process_media_items(data, media_type, requested_ids) 299 | _LOGGER.debug("After filtering: %d items remaining", len(processed_items)) 300 | all_items.extend(processed_items) 301 | 302 | # Ensure max_items limit is respected 303 | all_items = all_items[:self._max_items] 304 | _LOGGER.debug("Final number of items after max_items limit: %d", len(all_items)) 305 | 306 | if not all_items: 307 | _LOGGER.warning("No items passed filters for %s, using fallback", self._content_type) 308 | all_items.append({ 309 | 'title_default': '$title', 310 | 'line1_default': '$type', 311 | 'line2_default': '$overview', 312 | 'line3_default': '$year', 313 | 'icon': 'mdi:movie-search' 314 | }) 315 | 316 | self._state = len(all_items) 317 | self._attributes = {'data': all_items} 318 | self._available = True 319 | 320 | except Exception as err: 321 | _LOGGER.error("Error updating %s sensor: %s", self._content_type, err) 322 | self._state = 0 323 | self._attributes = {'data': []} 324 | self._available = False 325 | -------------------------------------------------------------------------------- /custom_components/mediarr/discovery/tmdb.py: -------------------------------------------------------------------------------- 1 | # mediarr/discovery/tmdb.py 2 | """TMDB integration for Mediarr.""" 3 | import time 4 | import logging 5 | from ..common.sensor import MediarrSensor 6 | 7 | _LOGGER = logging.getLogger(__name__) 8 | 9 | TMDB_ENDPOINTS = { 10 | 'trending': 'trending/all/week', 11 | 'now_playing': 'movie/now_playing', 12 | 'upcoming': 'movie/upcoming', 13 | 'on_air': 'tv/on_the_air', 14 | 'airing_today': 'tv/airing_today', 15 | 'popular_movies': 'movie/popular', 16 | 'popular_tv': 'tv/popular' 17 | } 18 | 19 | class TMDBMediarrSensor(MediarrSensor): 20 | def __init__(self, session, api_key, max_items, endpoint='trending', filters=None): 21 | super().__init__() 22 | 23 | self._session = session 24 | self._api_key = api_key 25 | self._max_items = max_items 26 | self._endpoint = endpoint 27 | self._name = f"TMDB Mediarr {endpoint.replace('_', ' ').title()}" 28 | 29 | self._filters = { 30 | 'language': 'en', 31 | 'min_year': 0, 32 | 'exclude_talk_shows': True, 33 | 'exclude_genres': [10763, 10764, 10767], 34 | 'exclude_non_english': True, 35 | 'hide_existing': True 36 | } 37 | 38 | if filters: 39 | self._filters.update(filters) 40 | 41 | self._library_tmdb_ids = set() 42 | self._library_titles = set() 43 | self._last_library_fetch = 0 44 | 45 | @property 46 | def name(self): 47 | return self._name 48 | 49 | @property 50 | def unique_id(self): 51 | return f"tmdb_mediarr_{self._endpoint}" 52 | 53 | def should_include_item(self, item, media_type): 54 | """Apply filters to determine if an item should be included.""" 55 | # Skip if no item 56 | if not item: 57 | return False 58 | 59 | # Get item basics 60 | item_id = item.get('id') 61 | title = item.get('title') if media_type == 'movie' else item.get('name', '') 62 | 63 | # Check if item exists in any library sensor 64 | if self._filters.get('hide_existing', True): 65 | for entity_id in self.hass.states.async_entity_ids('sensor'): 66 | if any(source in entity_id for source in ['plex_mediarr', 'jellyfin_mediarr', 'sonarr_mediarr', 'radarr_mediarr']): 67 | entity = self.hass.states.get(entity_id) 68 | if entity and entity.attributes.get('data'): 69 | # Check if TMDB ID matches 70 | if str(item_id) in [str(lib_item.get('tmdb_id')) for lib_item in entity.attributes['data']]: 71 | _LOGGER.debug(f"Item {title} (ID: {item_id}) already exists in library") 72 | return False 73 | 74 | # More flexible title matching 75 | for lib_item in entity.attributes['data']: 76 | lib_title = lib_item.get('title') or lib_item.get('name') or '' 77 | # Remove episode details and extra info 78 | clean_lib_title = lib_title.split(' - ')[0].split(' (')[0] 79 | clean_tmdb_title = title.split(' - ')[0].split(' (')[0] 80 | 81 | if clean_lib_title.lower() == clean_tmdb_title.lower(): 82 | _LOGGER.debug(f"Item {title} already exists in library (matched as {clean_lib_title})") 83 | return False 84 | 85 | # Existing filtering logic 86 | lang = item.get('original_language', 'unknown') 87 | 88 | # Filter by year 89 | year = None 90 | if media_type == 'tv' and item.get('first_air_date'): 91 | try: 92 | year = int(item['first_air_date'].split('-')[0]) 93 | if year < self._filters.get('min_year', 0): 94 | _LOGGER.debug(f"Item {title} rejected: year {year} < min_year {self._filters.get('min_year', 0)}") 95 | return False 96 | except (ValueError, IndexError, TypeError): 97 | pass 98 | elif media_type == 'movie' and item.get('release_date'): 99 | try: 100 | year = int(item['release_date'].split('-')[0]) 101 | if year < self._filters.get('min_year', 0): 102 | _LOGGER.debug(f"Item {title} rejected: year {year} < min_year {self._filters.get('min_year', 0)}") 103 | return False 104 | except (ValueError, IndexError, TypeError): 105 | pass 106 | 107 | # Filter by language 108 | if self._filters.get('exclude_non_english', True) and lang != 'en': 109 | _LOGGER.debug(f"Item {title} rejected: language {lang} is not English") 110 | return False 111 | 112 | # Filter by genre 113 | excluded_genres = self._filters.get('exclude_genres', []) 114 | if excluded_genres and any(genre_id in excluded_genres for genre_id in item.get('genre_ids', [])): 115 | _LOGGER.debug(f"Item {title} rejected: genre in excluded list {item.get('genre_ids', [])}") 116 | return False 117 | 118 | # Filter for TV talk shows 119 | if media_type == 'tv' and self._filters.get('exclude_talk_shows', True): 120 | if self.is_talk_show(title): 121 | _LOGGER.debug(f"Item {title} rejected: identified as talk show") 122 | return False 123 | 124 | return True 125 | 126 | async def _fetch_media_libraries(self, hass): 127 | tmdb_ids = set() 128 | 129 | try: 130 | for entity_id in hass.states.async_entity_ids('sensor'): 131 | if 'plex_mediarr' in entity_id or 'jellyfin_mediarr' in entity_id: 132 | entity = hass.states.get(entity_id) 133 | if entity and entity.attributes.get('data'): 134 | for item in entity.attributes['data']: 135 | if item.get('tmdb_id'): 136 | tmdb_ids.add(str(item['tmdb_id'])) 137 | 138 | elif 'radarr_mediarr' in entity_id: 139 | entity = hass.states.get(entity_id) 140 | if entity and entity.attributes.get('data'): 141 | for item in entity.attributes['data']: 142 | if item.get('tmdb_id'): 143 | tmdb_ids.add(str(item['tmdb_id'])) 144 | 145 | elif 'sonarr_mediarr' in entity_id: 146 | entity = hass.states.get(entity_id) 147 | if entity and entity.attributes.get('data'): 148 | for item in entity.attributes['data']: 149 | if item.get('tmdb_id'): 150 | tmdb_ids.add(str(item['tmdb_id'])) 151 | 152 | _LOGGER.debug(f"Found {len(tmdb_ids)} media items in libraries") 153 | return tmdb_ids 154 | 155 | except Exception as e: 156 | _LOGGER.error(f"Error fetching media libraries: {e}") 157 | return set() 158 | 159 | def is_talk_show(self, title): 160 | if not self._filters.get('exclude_talk_shows', True) or not title: 161 | return False 162 | 163 | keywords = [ 164 | 'tonight show', 'late show', 'late night', 'daily show', 165 | 'talk show', 'with seth meyers', 'with james corden', 166 | 'with jimmy', 'with stephen', 'with trevor', 'news', 167 | 'live with', 'watch what happens live', 'the view', 168 | 'good morning', 'today show', 'kimmel', 'colbert', 169 | 'fallon', 'ellen', 'conan', 'graham norton', 'meet the press', 170 | 'face the nation', 'last week tonight', 'real time', 171 | 'kelly and', 'kelly &', 'jeopardy', 'wheel of fortune', 172 | 'daily mail', 'entertainment tonight', 'zeiten', 'schlechte' 173 | ] 174 | 175 | title_lower = title.lower() 176 | return any(keyword in title_lower for keyword in keywords) 177 | 178 | def _get_media_type(self, item): 179 | if self._endpoint in ['now_playing', 'upcoming', 'popular_movies']: 180 | return 'movie' 181 | elif self._endpoint in ['on_air', 'airing_today', 'popular_tv']: 182 | return 'tv' 183 | return item.get('media_type', 'movie') 184 | 185 | async def async_update(self): 186 | try: 187 | hass = self.hass if hasattr(self, 'hass') else None 188 | 189 | current_time = time.time() 190 | if hass and hasattr(self, '_last_library_fetch'): 191 | if current_time - self._last_library_fetch > 3600 and self._filters.get('hide_existing', True): 192 | self._library_tmdb_ids = await self._fetch_media_libraries(hass) 193 | self._last_library_fetch = current_time 194 | else: 195 | self._last_library_fetch = 0 196 | if hass and self._filters.get('hide_existing', True): 197 | self._library_tmdb_ids = await self._fetch_media_libraries(hass) 198 | self._last_library_fetch = current_time 199 | 200 | headers = { 201 | 'Authorization': f'Bearer {self._api_key}', 202 | 'accept': 'application/json' 203 | } 204 | 205 | results = [] 206 | 207 | if self._endpoint == 'popular_tv': 208 | endpoints = ['tv/popular', 'trending/tv/week', 'tv/top_rated'] 209 | 210 | for endpoint in endpoints: 211 | for page in range(1, 3): 212 | params = {'language': self._filters.get('language', 'en-US'), 'page': page} 213 | 214 | _LOGGER.debug(f"Fetching TV data from endpoint: {endpoint}, page: {page}") 215 | 216 | async with self._session.get( 217 | f"https://api.themoviedb.org/3/{endpoint}", 218 | headers=headers, 219 | params=params 220 | ) as response: 221 | if response.status == 200: 222 | data = await response.json() 223 | if 'results' in data: 224 | for item in data.get('results', []): 225 | if not self.should_include_item(item, 'tv'): 226 | continue 227 | 228 | title = item.get('name', '') 229 | 230 | results.append({ 231 | 'title': title, 232 | 'type': 'show', 233 | 'year': self._get_year(item, 'tv'), 234 | 'overview': item.get('overview', ''), 235 | 'poster': f"https://image.tmdb.org/t/p/w500{item.get('poster_path')}" if item.get('poster_path') else None, 236 | 'backdrop': f"https://image.tmdb.org/t/p/original{item.get('backdrop_path')}" if item.get('backdrop_path') else None, 237 | 'tmdb_id': item.get('id'), 238 | 'popularity': item.get('popularity'), 239 | 'vote_average': item.get('vote_average') 240 | }) 241 | else: 242 | params = {'language': self._filters.get('language', 'en-US')} 243 | endpoint_url = TMDB_ENDPOINTS.get(self._endpoint, TMDB_ENDPOINTS['trending']) 244 | 245 | async with self._session.get( 246 | f"https://api.themoviedb.org/3/{endpoint_url}", 247 | headers=headers, 248 | params=params 249 | ) as response: 250 | if response.status == 200: 251 | data = await response.json() 252 | if 'results' in data: 253 | for item in data.get('results', []): 254 | media_type = self._get_media_type(item) 255 | 256 | if media_type not in ['movie', 'tv']: 257 | continue 258 | 259 | if not self.should_include_item(item, media_type): 260 | continue 261 | 262 | title = item.get('title') if media_type == 'movie' else item.get('name') 263 | 264 | results.append({ 265 | 'title': title, 266 | 'type': 'movie' if media_type == 'movie' else 'show', 267 | 'year': self._get_year(item, media_type), 268 | 'overview': item.get('overview', ''), 269 | 'poster': f"https://image.tmdb.org/t/p/w500{item.get('poster_path')}" if item.get('poster_path') else None, 270 | 'backdrop': f"https://image.tmdb.org/t/p/original{item.get('backdrop_path')}" if item.get('backdrop_path') else None, 271 | 'tmdb_id': item.get('id'), 272 | 'popularity': item.get('popularity'), 273 | 'vote_average': item.get('vote_average') 274 | }) 275 | else: 276 | raise Exception(f"Failed to fetch TMDB {self._endpoint}. Status: {response.status}") 277 | 278 | unique_results = [] 279 | seen_ids = set() 280 | for item in results: 281 | if item['tmdb_id'] not in seen_ids: 282 | seen_ids.add(item['tmdb_id']) 283 | unique_results.append(item) 284 | 285 | _LOGGER.debug(f"Found {len(unique_results)} items for {self._endpoint} after filtering") 286 | 287 | filtered_results = [item for item in unique_results if item['tmdb_id'] != 137228] 288 | 289 | self._state = len(filtered_results) 290 | self._attributes = {'data': filtered_results[:self._max_items]} 291 | self._available = True 292 | 293 | except Exception as err: 294 | _LOGGER.error(f"Error updating TMDB sensor: {err}") 295 | self._state = 0 296 | self._attributes = {'data': []} 297 | self._available = False 298 | 299 | def _get_year(self, item, media_type): 300 | if media_type == 'movie': 301 | date = item.get('release_date', '') 302 | else: 303 | date = item.get('first_air_date', '') 304 | return date.split('-')[0] if date else '' -------------------------------------------------------------------------------- /custom_components/mediarr/discovery/trakt.py: -------------------------------------------------------------------------------- 1 | # mediarr/discovery/trakt.py 2 | """Trakt integration for Mediarr.""" 3 | 4 | import logging 5 | from ..common.sensor import MediarrSensor 6 | 7 | _LOGGER = logging.getLogger(__name__) 8 | 9 | class TraktMediarrSensor(MediarrSensor): 10 | def __init__(self, session, client_id, client_secret, trending_type, max_items, tmdb_api_key): 11 | super().__init__() 12 | self._session = session 13 | self._client_id = client_id 14 | self._client_secret = client_secret 15 | self._trending_type = trending_type 16 | self._max_items = max_items 17 | self._tmdb_api_key = tmdb_api_key 18 | self._name = "Trakt Mediarr" 19 | self._access_token = None 20 | self._headers = { 21 | 'Content-Type': 'application/json', 22 | 'trakt-api-version': '2', 23 | 'trakt-api-key': client_id 24 | } 25 | 26 | @property 27 | def name(self): 28 | return self._name 29 | 30 | @property 31 | def unique_id(self): 32 | return f"trakt_mediarr_{self._trending_type}" 33 | 34 | async def _get_access_token(self): 35 | try: 36 | data = { 37 | 'client_id': self._client_id, 38 | 'client_secret': self._client_secret, 39 | 'grant_type': 'client_credentials' 40 | } 41 | 42 | async with self._session.post( 43 | 'https://api.trakt.tv/oauth/token', 44 | json=data, 45 | headers=self._headers 46 | ) as response: 47 | if response.status == 200: 48 | token_data = await response.json() 49 | self._access_token = token_data.get('access_token') 50 | if self._access_token: 51 | self._headers['Authorization'] = f'Bearer {self._access_token}' 52 | self._available = True 53 | return True 54 | return False 55 | except Exception as err: 56 | _LOGGER.error("Error getting Trakt access token: %s", err) 57 | return False 58 | 59 | async def _fetch_popular(self, media_type): 60 | try: 61 | params = {'limit': self._max_items} 62 | 63 | async with self._session.get( 64 | f"https://api.trakt.tv/{media_type}/popular", 65 | headers=self._headers, 66 | params=params 67 | ) as response: 68 | if response.status == 200: 69 | return await response.json() 70 | elif response.status in [401, 403]: 71 | if await self._get_access_token(): 72 | return await self._fetch_popular(media_type) 73 | return [] 74 | except Exception as err: 75 | _LOGGER.error("Error fetching Trakt %s: %s", media_type, err) 76 | return [] 77 | 78 | async def _fetch_tmdb_data(self, tmdb_id, media_type): 79 | try: 80 | endpoint = 'tv' if media_type == 'show' else 'movie' 81 | headers = { 82 | 'Authorization': f'Bearer {self._tmdb_api_key}', 83 | 'accept': 'application/json' 84 | } 85 | 86 | async with self._session.get( 87 | f"https://api.themoviedb.org/3/{endpoint}/{tmdb_id}", 88 | headers=headers 89 | ) as response: 90 | if response.status == 200: 91 | data = await response.json() 92 | return { 93 | 'poster': f"https://image.tmdb.org/t/p/w500{data.get('poster_path')}" if data.get('poster_path') else None, 94 | 'backdrop': f"https://image.tmdb.org/t/p/original{data.get('backdrop_path')}" if data.get('backdrop_path') else None, 95 | 'overview': data.get('overview'), 96 | 'genres': [g['name'] for g in data.get('genres', [])] 97 | } 98 | return {} 99 | except Exception as err: 100 | _LOGGER.error("Error fetching TMDB data: %s", err) 101 | return {} 102 | 103 | async def _process_item(self, item, media_type): 104 | try: 105 | base_item = { 106 | 'title': item['title'], 107 | 'year': item.get('year'), 108 | 'type': media_type, 109 | 'ids': item.get('ids', {}), 110 | 'slug': item.get('ids', {}).get('slug'), 111 | 'tmdb_id': item.get('ids', {}).get('tmdb'), 112 | 'imdb_id': item.get('ids', {}).get('imdb'), 113 | 'trakt_id': item.get('ids', {}).get('trakt') 114 | } 115 | 116 | if base_item['tmdb_id']: 117 | tmdb_data = await self._fetch_tmdb_data(base_item['tmdb_id'], media_type) 118 | base_item.update(tmdb_data) 119 | 120 | return base_item 121 | except Exception as err: 122 | _LOGGER.error("Error processing Trakt item: %s", err) 123 | return None 124 | 125 | async def async_update(self): 126 | try: 127 | if not self._access_token and not await self._get_access_token(): 128 | self._state = None 129 | self._attributes = {} 130 | self._available = False 131 | return 132 | 133 | all_items = [] 134 | 135 | if self._trending_type in ['shows', 'both']: 136 | shows = await self._fetch_popular('shows') 137 | for item in shows: 138 | processed = await self._process_item(item, 'show') 139 | if processed: 140 | all_items.append(processed) 141 | 142 | if self._trending_type in ['movies', 'both']: 143 | movies = await self._fetch_popular('movies') 144 | for item in movies: 145 | processed = await self._process_item(item, 'movie') 146 | if processed: 147 | all_items.append(processed) 148 | 149 | if all_items: 150 | self._state = len(all_items) 151 | self._attributes = {'data': all_items} 152 | self._available = True 153 | else: 154 | self._state = 0 155 | self._attributes = {'data': []} 156 | self._available = False 157 | 158 | except Exception as err: 159 | _LOGGER.error("Error updating Trakt sensor: %s", err) 160 | self._state = None 161 | self._attributes = {'data': []} 162 | self._available = False 163 | -------------------------------------------------------------------------------- /custom_components/mediarr/discovery/trakt_user.py_wip: -------------------------------------------------------------------------------- 1 | """Trakt user collections integration for Mediarr.""" 2 | import logging 3 | from datetime import datetime, timedelta 4 | from ..common.sensor import MediarrSensor 5 | 6 | _LOGGER = logging.getLogger(__name__) 7 | 8 | TRAKT_USER_ENDPOINTS = { 9 | 'collection': ['collection/movies', 'collection/shows'], 10 | 'watched': ['watched/movies', 'watched/shows'], 11 | 'watchlist': ['watchlist/movies', 'watchlist/shows'], 12 | 'recommendations': ['recommendations/movies', 'recommendations/shows'], 13 | 'recently_watched': ['history/movies', 'history/shows'], 14 | 'upcoming': ['calendar/movies', 'calendar/my/shows'] 15 | } 16 | 17 | class TraktUserMediarrSensor(MediarrSensor): 18 | """Representation of a Trakt user collection sensor.""" 19 | 20 | def __init__(self, session, username, client_id, client_secret, endpoint, max_items, tmdb_api_key=None): 21 | """Initialize the sensor.""" 22 | super().__init__() 23 | self._session = session 24 | self._username = username 25 | self._client_id = client_id 26 | self._client_secret = client_secret 27 | self._endpoint = endpoint 28 | self._max_items = max_items 29 | self._tmdb_api_key = tmdb_api_key 30 | self._name = f"Trakt User Mediarr {endpoint.replace('_', ' ').title()}" 31 | self._access_token = None 32 | self._headers = { 33 | 'Content-Type': 'application/json', 34 | 'trakt-api-version': '2', 35 | 'trakt-api-key': client_id 36 | } 37 | 38 | @property 39 | def name(self): 40 | """Return the name of the sensor.""" 41 | return self._name 42 | 43 | @property 44 | def unique_id(self): 45 | """Return the unique ID of the sensor.""" 46 | return f"trakt_user_mediarr_{self._endpoint}" 47 | 48 | async def async_update(self): 49 | """Update sensor state.""" 50 | try: 51 | results = [] 52 | endpoints = TRAKT_USER_ENDPOINTS.get(self._endpoint, []) 53 | 54 | for endpoint in endpoints: 55 | params = {} 56 | if self._endpoint == 'upcoming': 57 | start_date = datetime.now().strftime('%Y-%m-%d') 58 | end_date = (datetime.now() + timedelta(days=30)).strftime('%Y-%m-%d') 59 | params = {'start_date': start_date, 'end_date': end_date} 60 | elif self._endpoint == 'recently_watched': 61 | params = {'limit': self._max_items} 62 | 63 | async with self._session.get( 64 | f"https://api.trakt.tv/users/{self._username}/{endpoint}", 65 | headers=self._headers, 66 | params=params 67 | ) as response: 68 | if response.status == 200: 69 | data = await response.json() 70 | 71 | for item in data: 72 | if 'movie' in item: 73 | media_item = item['movie'] 74 | media_type = 'movie' 75 | elif 'show' in item: 76 | media_item = item['show'] 77 | media_type = 'show' 78 | else: 79 | continue 80 | 81 | result = { 82 | 'title': media_item.get('title'), 83 | 'year': media_item.get('year'), 84 | 'type': media_type, 85 | 'collected_at': item.get('collected_at'), 86 | 'last_watched_at': item.get('last_watched_at'), 87 | 'updated_at': item.get('updated_at'), 88 | 'ids': media_item.get('ids', {}), 89 | 'tmdb_id': media_item.get('ids', {}).get('tmdb'), 90 | 'imdb_id': media_item.get('ids', {}).get('imdb'), 91 | 'trakt_id': media_item.get('ids', {}).get('trakt') 92 | } 93 | 94 | if self._tmdb_api_key and result['tmdb_id']: 95 | tmdb_data = await self._fetch_tmdb_data( 96 | result['tmdb_id'], 97 | 'movie' if media_type == 'movie' else 'tv' 98 | ) 99 | result.update(tmdb_data) 100 | 101 | results.append(result) 102 | 103 | self._state = len(results) 104 | self._attributes = {'data': results[:self._max_items]} 105 | self._available = True 106 | 107 | except Exception as err: 108 | _LOGGER.error("Error updating Trakt user sensor: %s", err) 109 | self._state = None 110 | self._attributes = {'data': []} 111 | self._available = False 112 | 113 | async def _fetch_tmdb_data(self, tmdb_id, media_type): 114 | """Fetch additional metadata from TMDB.""" 115 | try: 116 | headers = { 117 | 'Authorization': f'Bearer {self._tmdb_api_key}', 118 | 'accept': 'application/json' 119 | } 120 | 121 | async with self._session.get( 122 | f"https://api.themoviedb.org/3/{media_type}/{tmdb_id}", 123 | headers=headers 124 | ) as response: 125 | if response.status == 200: 126 | data = await response.json() 127 | return { 128 | 'poster': f"https://image.tmdb.org/t/p/w500{data.get('poster_path')}" if data.get('poster_path') else None, 129 | 'backdrop': f"https://image.tmdb.org/t/p/original{data.get('backdrop_path')}" if data.get('backdrop_path') else None, 130 | 'overview': data.get('overview'), 131 | 'vote_average': data.get('vote_average'), 132 | 'popularity': data.get('popularity') 133 | } 134 | return {} 135 | except Exception as err: 136 | _LOGGER.error("Error fetching TMDB data: %s", err) 137 | return {} -------------------------------------------------------------------------------- /custom_components/mediarr/manager/__init__.py: -------------------------------------------------------------------------------- 1 | # mediarr/manager/__init__.py 2 | """The Mediarr Manager integration.""" 3 | 4 | from homeassistant.const import CONF_API_KEY, CONF_URL 5 | import homeassistant.helpers.config_validation as cv 6 | import voluptuous as vol 7 | from ..common.const import CONF_MAX_ITEMS, CONF_DAYS, DEFAULT_MAX_ITEMS, DEFAULT_DAYS 8 | 9 | # Base schema for all managers 10 | ARR_BASE_SCHEMA = { 11 | vol.Required(CONF_API_KEY): cv.string, 12 | vol.Required(CONF_URL): cv.url, 13 | vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): cv.positive_int, 14 | } 15 | 16 | # Sonarr schema 17 | SONARR_SCHEMA = ARR_BASE_SCHEMA.copy() 18 | SONARR_SCHEMA.update({ 19 | vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.positive_int, 20 | }) 21 | 22 | # Radarr schema 23 | RADARR_SCHEMA = ARR_BASE_SCHEMA.copy() 24 | 25 | # Combined platform schema 26 | PLATFORM_SCHEMA = vol.Schema({ 27 | vol.Optional("sonarr"): vol.Schema(SONARR_SCHEMA), 28 | vol.Optional("radarr"): vol.Schema(RADARR_SCHEMA), 29 | }) 30 | 31 | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 32 | """Set up the Mediarr manager platform.""" 33 | session = hass.helpers.aiohttp_client.async_get_clientsession() 34 | sensors = [] 35 | 36 | if "sonarr" in config: 37 | from .sonarr import SonarrMediarrSensor 38 | sensors.append(SonarrMediarrSensor( 39 | session, 40 | config["sonarr"][CONF_API_KEY], 41 | config["sonarr"][CONF_URL], 42 | config["sonarr"].get(CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS), 43 | config["sonarr"].get(CONF_DAYS, DEFAULT_DAYS) 44 | )) 45 | 46 | if "radarr" in config: 47 | from .radarr import RadarrMediarrSensor 48 | sensors.append(RadarrMediarrSensor( 49 | session, 50 | config["radarr"][CONF_API_KEY], 51 | config["radarr"][CONF_URL], 52 | config["radarr"].get(CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS) 53 | )) 54 | 55 | if sensors: 56 | async_add_entities(sensors, True) -------------------------------------------------------------------------------- /custom_components/mediarr/manager/radarr.py: -------------------------------------------------------------------------------- 1 | """Radarr integration for Mediarr.""" 2 | from ..common.sensor import MediarrSensor 3 | from datetime import datetime, timedelta 4 | import async_timeout 5 | import logging 6 | 7 | 8 | _LOGGER = logging.getLogger(__name__) 9 | 10 | class RadarrMediarrSensor(MediarrSensor): 11 | def __init__(self, session, api_key, url, max_items, days_to_check): 12 | """Initialize the sensor.""" 13 | self._session = session 14 | self._radarr_api_key = api_key 15 | self._url = url.rstrip('/') 16 | self._max_items = max_items 17 | self._days_to_check = days_to_check 18 | self._name = "Radarr Mediarr" 19 | self._state = 0 20 | 21 | 22 | 23 | @property 24 | def name(self): 25 | """Return the name of the sensor.""" 26 | return self._name 27 | 28 | @property 29 | def unique_id(self): 30 | """Return a unique ID.""" 31 | return f"radarr_mediarr_{self._url}" 32 | 33 | async def async_update(self): 34 | """Update the sensor.""" 35 | try: 36 | headers = {'X-Api-Key': self._radarr_api_key} 37 | now = datetime.now().astimezone() 38 | max_date = now + timedelta(days=self._days_to_check) 39 | 40 | async with async_timeout.timeout(10): 41 | async with self._session.get( 42 | f"{self._url}/api/v3/movie", 43 | headers=headers 44 | ) as response: 45 | if response.status != 200: 46 | raise Exception(f"Failed to connect to Radarr. Status: {response.status}") 47 | 48 | movies = await response.json() 49 | card_json = [] 50 | upcoming_movies = [] 51 | 52 | for movie in movies: 53 | release_dates = [] 54 | for date_field, date_type in [ 55 | ('digitalRelease', 'Digital'), 56 | ('physicalRelease', 'Physical'), 57 | ('inCinemas', 'Theaters') 58 | ]: 59 | if movie.get(date_field): 60 | try: 61 | release_date = datetime.fromisoformat( 62 | movie[date_field].replace('Z', '+00:00') 63 | ) 64 | if not release_date.tzinfo: 65 | release_date = release_date.replace(tzinfo=now.tzinfo) 66 | if now < release_date <= max_date: 67 | release_dates.append((date_type, release_date)) 68 | except ValueError: 69 | continue 70 | 71 | if release_dates: 72 | release_dates.sort(key=lambda x: x[1]) 73 | release_type, release_date = release_dates[0] 74 | 75 | images = {img['coverType']: img['remoteUrl'] for img in movie.get('images', [])} 76 | 77 | movie_data = { 78 | "title": str(movie["title"]), 79 | "release": f"{release_type} - {release_date.strftime('%Y-%m-%d')}", 80 | "aired": release_date.strftime("%Y-%m-%d"), 81 | "year": str(movie["year"]), 82 | "poster": images.get('poster', ''), 83 | "fanart": images.get('fanart', ''), 84 | "banner": images.get('banner', ''), 85 | "genres": ", ".join(str(g) for g in movie.get("genres", [])[:3]), 86 | "runtime": str(movie.get("runtime", 0)), 87 | "rating": str(movie.get("ratings", {}).get("value", "")), 88 | "studio": str(movie.get("studio", "N/A")), 89 | "flag": 1 90 | } 91 | upcoming_movies.append(movie_data) 92 | 93 | upcoming_movies.sort(key=lambda x: x['aired']) 94 | card_json.extend(upcoming_movies[:self._max_items]) 95 | 96 | if not card_json: 97 | card_json.append({ 98 | 'title_default': '$title', 99 | 'line1_default': '$release', 100 | 'line2_default': '$genres', 101 | 'line3_default': '$rating - $runtime', 102 | 'line4_default': '$studio', 103 | 'icon': 'mdi:arrow-down-circle' 104 | }) 105 | 106 | self._state = len(upcoming_movies) 107 | self._attributes = {'data': card_json} 108 | self._available = True 109 | 110 | except Exception as err: 111 | _LOGGER.error("Error updating Radarr sensor: %s", err) 112 | self._state = 0 113 | self._attributes = {'data': []} 114 | self._available = False -------------------------------------------------------------------------------- /custom_components/mediarr/manager/radarr2.py: -------------------------------------------------------------------------------- 1 | """Radarr2 integration for Mediarr.""" 2 | from ..common.sensor import MediarrSensor 3 | from datetime import datetime, timedelta 4 | import async_timeout 5 | import logging 6 | 7 | 8 | _LOGGER = logging.getLogger(__name__) 9 | 10 | class Radarr2MediarrSensor(MediarrSensor): 11 | def __init__(self, session, api_key, url, max_items, days_to_check): 12 | """Initialize the sensor.""" 13 | self._session = session 14 | self._radarr2_api_key = api_key 15 | self._url = url.rstrip('/') 16 | self._max_items = max_items 17 | self._days_to_check = days_to_check 18 | self._name = "Radarr2 Mediarr" 19 | self._state = 0 20 | 21 | 22 | 23 | @property 24 | def name(self): 25 | """Return the name of the sensor.""" 26 | return self._name 27 | 28 | @property 29 | def unique_id(self): 30 | """Return a unique ID.""" 31 | return f"radarr2_mediarr_{self._url}" 32 | 33 | async def async_update(self): 34 | """Update the sensor.""" 35 | try: 36 | headers = {'X-Api-Key': self._radarr2_api_key} 37 | now = datetime.now().astimezone() 38 | max_date = now + timedelta(days=self._days_to_check) 39 | 40 | async with async_timeout.timeout(10): 41 | async with self._session.get( 42 | f"{self._url}/api/v3/movie", 43 | headers=headers 44 | ) as response: 45 | if response.status != 200: 46 | raise Exception(f"Failed to connect to Radarr2. Status: {response.status}") 47 | 48 | movies = await response.json() 49 | card_json = [] 50 | upcoming_movies = [] 51 | 52 | for movie in movies: 53 | release_dates = [] 54 | for date_field, date_type in [ 55 | ('digitalRelease', 'Digital'), 56 | ('physicalRelease', 'Physical'), 57 | ('inCinemas', 'Theaters') 58 | ]: 59 | if movie.get(date_field): 60 | try: 61 | release_date = datetime.fromisoformat( 62 | movie[date_field].replace('Z', '+00:00') 63 | ) 64 | if not release_date.tzinfo: 65 | release_date = release_date.replace(tzinfo=now.tzinfo) 66 | if now < release_date <= max_date: 67 | release_dates.append((date_type, release_date)) 68 | except ValueError: 69 | continue 70 | 71 | if release_dates: 72 | release_dates.sort(key=lambda x: x[1]) 73 | release_type, release_date = release_dates[0] 74 | 75 | images = {img['coverType']: img['remoteUrl'] for img in movie.get('images', [])} 76 | 77 | movie_data = { 78 | "title": str(movie["title"]), 79 | "release": f"{release_type} - {release_date.strftime('%Y-%m-%d')}", 80 | "aired": release_date.strftime("%Y-%m-%d"), 81 | "year": str(movie["year"]), 82 | "poster": images.get('poster', ''), 83 | "fanart": images.get('fanart', ''), 84 | "banner": images.get('banner', ''), 85 | "genres": ", ".join(str(g) for g in movie.get("genres", [])[:3]), 86 | "runtime": str(movie.get("runtime", 0)), 87 | "rating": str(movie.get("ratings", {}).get("value", "")), 88 | "studio": str(movie.get("studio", "N/A")), 89 | "flag": 1 90 | } 91 | upcoming_movies.append(movie_data) 92 | 93 | upcoming_movies.sort(key=lambda x: x['aired']) 94 | card_json.extend(upcoming_movies[:self._max_items]) 95 | 96 | if not card_json: 97 | card_json.append({ 98 | 'title_default': '$title', 99 | 'line1_default': '$release', 100 | 'line2_default': '$genres', 101 | 'line3_default': '$rating - $runtime', 102 | 'line4_default': '$studio', 103 | 'icon': 'mdi:arrow-down-circle' 104 | }) 105 | 106 | self._state = len(upcoming_movies) 107 | self._attributes = {'data': card_json} 108 | self._available = True 109 | 110 | except Exception as err: 111 | _LOGGER.error("Error updating Radarr2 sensor: %s", err) 112 | self._state = 0 113 | self._attributes = {'data': []} 114 | self._available = False -------------------------------------------------------------------------------- /custom_components/mediarr/manager/sonarr.py: -------------------------------------------------------------------------------- 1 | """Sonarr integration for Mediarr.""" 2 | from ..common.sensor import MediarrSensor 3 | from datetime import datetime, timedelta 4 | import async_timeout 5 | from zoneinfo import ZoneInfo 6 | import logging 7 | 8 | 9 | _LOGGER = logging.getLogger(__name__) 10 | 11 | class SonarrMediarrSensor(MediarrSensor): 12 | def __init__(self, session, api_key, url, max_items, days_to_check): 13 | """Initialize the sensor.""" 14 | self._session = session 15 | self._sonarr_api_key = api_key 16 | self._url = url.rstrip('/') 17 | self._max_items = max_items 18 | self._days_to_check = days_to_check 19 | self._name = "Sonarr Mediarr" 20 | self._state = 0 21 | 22 | 23 | 24 | @property 25 | def name(self): 26 | """Return the name of the sensor.""" 27 | return self._name 28 | 29 | @property 30 | def unique_id(self): 31 | """Return a unique ID.""" 32 | return f"sonarr_mediarr_{self._url}" 33 | 34 | def _format_date(self, date_str: str) -> str: 35 | """Format date string.""" 36 | try: 37 | return datetime.strptime(date_str, '%Y-%m-%d').strftime('%Y-%m-%d') 38 | except ValueError: 39 | return 'Unknown' 40 | 41 | async def async_update(self): 42 | """Update the sensor.""" 43 | try: 44 | headers = {'X-Api-Key': self._sonarr_api_key} 45 | now = datetime.now(ZoneInfo('UTC')) 46 | params = { 47 | 'start': now.strftime('%Y-%m-%d'), 48 | 'end': (now + timedelta(days=self._days_to_check)).strftime('%Y-%m-%d'), 49 | 'includeSeries': 'true' 50 | } 51 | 52 | async with async_timeout.timeout(10): 53 | async with self._session.get( 54 | f"{self._url}/api/v3/calendar", 55 | headers=headers, 56 | params=params 57 | ) as response: 58 | if response.status == 200: 59 | upcoming_episodes = await response.json() 60 | card_json = [] 61 | shows_dict = {} 62 | 63 | for episode in upcoming_episodes: 64 | series = episode.get('series', {}) 65 | air_date = self._format_date(episode['airDate']) 66 | 67 | if air_date == 'Unknown': 68 | continue 69 | 70 | series_id = series['id'] 71 | images = {img['coverType']: img['remoteUrl'] for img in series.get('images', [])} 72 | 73 | show_data = { 74 | 'title': f"{series['title']} - {episode.get('seasonNumber', 0):02d}x{episode.get('episodeNumber', 0):02d}", 75 | 'episode': str(episode.get('title', 'Unknown')), 76 | 'release': air_date, 77 | 'number': f"S{episode.get('seasonNumber', 0):02d}E{episode.get('episodeNumber', 0):02d}", 78 | 'runtime': str(series.get('runtime', 0)), 79 | 'network': str(series.get('network', 'N/A')), 80 | 'poster': images.get('poster', ''), 81 | 'fanart': images.get('fanart', ''), 82 | 'banner': images.get('banner', ''), 83 | 'season': str(episode.get('seasonNumber', 0)), 84 | 'details': f"{series['title']}\n{episode.get('title', 'Unknown')}\nS{episode.get('seasonNumber', 0):02d}E{episode.get('episodeNumber', 0):02d}", 85 | 'flag': 1 86 | } 87 | 88 | # Take earliest episode air date for each series 89 | if series_id not in shows_dict or air_date < shows_dict[series_id]['release']: 90 | shows_dict[series_id] = show_data 91 | 92 | upcoming_shows = list(shows_dict.values()) 93 | upcoming_shows.sort(key=lambda x: x['release']) 94 | card_json.extend(upcoming_shows[:self._max_items]) 95 | 96 | if not card_json: 97 | card_json.append({ 98 | 'title_default': '$title', 99 | 'line1_default': '$episode', 100 | 'line2_default': '$release', 101 | 'line3_default': '$number', 102 | 'line4_default': '$runtime - $network', 103 | 'icon': 'mdi:arrow-down-circle' 104 | }) 105 | 106 | self._state = len(upcoming_shows) 107 | self._attributes = {'data': card_json} 108 | self._available = True 109 | else: 110 | raise Exception(f"Failed to connect to Sonarr. Status: {response.status}") 111 | 112 | except Exception as err: 113 | _LOGGER.error("Error updating Sonarr sensor: %s", err) 114 | self._state = 0 115 | self._attributes = {'data': []} 116 | self._available = False 117 | -------------------------------------------------------------------------------- /custom_components/mediarr/manager/sonarr2.py: -------------------------------------------------------------------------------- 1 | """Sonarr2 integration for Mediarr.""" 2 | from ..common.sensor import MediarrSensor 3 | from datetime import datetime, timedelta 4 | import async_timeout 5 | from zoneinfo import ZoneInfo 6 | import logging 7 | 8 | 9 | _LOGGER = logging.getLogger(__name__) 10 | 11 | class Sonarr2MediarrSensor(MediarrSensor): 12 | def __init__(self, session, api_key, url, max_items, days_to_check): 13 | """Initialize the sensor.""" 14 | self._session = session 15 | self._sonarr2_api_key = api_key 16 | self._url = url.rstrip('/') 17 | self._max_items = max_items 18 | self._days_to_check = days_to_check 19 | self._name = "Sonarr2 Mediarr" 20 | self._state = 0 21 | 22 | 23 | 24 | @property 25 | def name(self): 26 | """Return the name of the sensor.""" 27 | return self._name 28 | 29 | @property 30 | def unique_id(self): 31 | """Return a unique ID.""" 32 | return f"sonarr2_mediarr_{self._url}" 33 | 34 | def _format_date(self, date_str: str) -> str: 35 | """Format date string.""" 36 | try: 37 | return datetime.strptime(date_str, '%Y-%m-%d').strftime('%Y-%m-%d') 38 | except ValueError: 39 | return 'Unknown' 40 | 41 | async def async_update(self): 42 | """Update the sensor.""" 43 | try: 44 | headers = {'X-Api-Key': self._sonarr2_api_key} 45 | now = datetime.now(ZoneInfo('UTC')) 46 | params = { 47 | 'start': now.strftime('%Y-%m-%d'), 48 | 'end': (now + timedelta(days=self._days_to_check)).strftime('%Y-%m-%d'), 49 | 'includeSeries': 'true' 50 | } 51 | 52 | async with async_timeout.timeout(10): 53 | async with self._session.get( 54 | f"{self._url}/api/v3/calendar", 55 | headers=headers, 56 | params=params 57 | ) as response: 58 | if response.status == 200: 59 | upcoming_episodes = await response.json() 60 | card_json = [] 61 | shows_dict = {} 62 | 63 | for episode in upcoming_episodes: 64 | series = episode.get('series', {}) 65 | air_date = self._format_date(episode['airDate']) 66 | 67 | if air_date == 'Unknown': 68 | continue 69 | 70 | series_id = series['id'] 71 | images = {img['coverType']: img['remoteUrl'] for img in series.get('images', [])} 72 | 73 | show_data = { 74 | 'title': f"{series['title']} - {episode.get('seasonNumber', 0):02d}x{episode.get('episodeNumber', 0):02d}", 75 | 'episode': str(episode.get('title', 'Unknown')), 76 | 'release': air_date, 77 | 'number': f"S{episode.get('seasonNumber', 0):02d}E{episode.get('episodeNumber', 0):02d}", 78 | 'runtime': str(series.get('runtime', 0)), 79 | 'network': str(series.get('network', 'N/A')), 80 | 'poster': images.get('poster', ''), 81 | 'fanart': images.get('fanart', ''), 82 | 'banner': images.get('banner', ''), 83 | 'season': str(episode.get('seasonNumber', 0)), 84 | 'details': f"{series['title']}\n{episode.get('title', 'Unknown')}\nS{episode.get('seasonNumber', 0):02d}E{episode.get('episodeNumber', 0):02d}", 85 | 'flag': 1 86 | } 87 | 88 | # Take earliest episode air date for each series 89 | if series_id not in shows_dict or air_date < shows_dict[series_id]['release']: 90 | shows_dict[series_id] = show_data 91 | 92 | upcoming_shows = list(shows_dict.values()) 93 | upcoming_shows.sort(key=lambda x: x['release']) 94 | card_json.extend(upcoming_shows[:self._max_items]) 95 | 96 | if not card_json: 97 | card_json.append({ 98 | 'title_default': '$title', 99 | 'line1_default': '$episode', 100 | 'line2_default': '$release', 101 | 'line3_default': '$number', 102 | 'line4_default': '$runtime - $network', 103 | 'icon': 'mdi:arrow-down-circle' 104 | }) 105 | 106 | self._state = len(upcoming_shows) 107 | self._attributes = {'data': card_json} 108 | self._available = True 109 | else: 110 | raise Exception(f"Failed to connect to Sonarr2. Status: {response.status}") 111 | 112 | except Exception as err: 113 | _LOGGER.error("Error updating Sonarr2 sensor: %s", err) 114 | self._state = 0 115 | self._attributes = {'data': []} 116 | self._available = False 117 | -------------------------------------------------------------------------------- /custom_components/mediarr/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "domain": "mediarr", 3 | "name": "Mediarr", 4 | "documentation": "https://github.com/vansmak/mediarr_sensor", 5 | "issue_tracker": "https://github.com/vansmak/mediarr_sensor/issues", 6 | "dependencies": [], 7 | "codeowners": ["@vansmak"], 8 | "requirements": [ 9 | "aiohttp>=3.8.1", 10 | "aiofiles>=0.8.0" 11 | ], 12 | "iot_class": "local_polling", 13 | "version": "0.1.0" 14 | } 15 | -------------------------------------------------------------------------------- /custom_components/mediarr/sensor.py: -------------------------------------------------------------------------------- 1 | # Modified version of sensor.py to incorporate filters 2 | from homeassistant.config_entries import ConfigEntry 3 | from homeassistant.core import HomeAssistant 4 | from homeassistant.helpers.entity_platform import AddEntitiesCallback 5 | from homeassistant.helpers.aiohttp_client import async_get_clientsession 6 | from .discovery.tmdb import TMDB_ENDPOINTS 7 | from .common.const import ( 8 | CONF_MAX_ITEMS, 9 | CONF_DAYS, 10 | DEFAULT_MAX_ITEMS, 11 | DEFAULT_DAYS 12 | ) 13 | 14 | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 15 | """Set up Mediarr sensors from YAML configuration.""" 16 | session = async_get_clientsession(hass) 17 | sensors = [] 18 | 19 | # Server Sensors 20 | if "plex" in config: 21 | from .server.plex import PlexMediarrSensor 22 | plex_sensors = await PlexMediarrSensor.create_sensors(hass, config["plex"]) 23 | sensors.extend(plex_sensors) 24 | 25 | if "jellyfin" in config: 26 | from .server.jellyfin import JellyfinMediarrSensor 27 | jellyfin_sensors = await JellyfinMediarrSensor.create_sensors(hass, config["jellyfin"]) 28 | sensors.extend(jellyfin_sensors) 29 | 30 | if "emby" in config: 31 | from .server.emby import EmbyMediarrSensor 32 | emby_sensors = await EmbyMediarrSensor.create_sensors(hass, config["emby"]) 33 | sensors.extend(emby_sensors) 34 | 35 | # Manager Sensors 36 | if "sonarr" in config: 37 | from .manager.sonarr import SonarrMediarrSensor 38 | sensors.append(SonarrMediarrSensor( 39 | session, 40 | config["sonarr"]["api_key"], 41 | config["sonarr"]["url"], 42 | config["sonarr"].get("max_items", DEFAULT_MAX_ITEMS), 43 | config["sonarr"].get("days_to_check", DEFAULT_DAYS) 44 | )) 45 | if "sonarr2" in config: 46 | from .manager.sonarr2 import Sonarr2MediarrSensor 47 | sensors.append(Sonarr2MediarrSensor( 48 | session, 49 | config["sonarr2"]["api_key"], 50 | config["sonarr2"]["url"], 51 | config["sonarr2"].get("max_items", DEFAULT_MAX_ITEMS), 52 | config["sonarr2"].get("days_to_check", DEFAULT_DAYS) 53 | )) 54 | if "radarr" in config: 55 | from .manager.radarr import RadarrMediarrSensor 56 | sensors.append(RadarrMediarrSensor( 57 | session, 58 | config["radarr"]["api_key"], 59 | config["radarr"]["url"], 60 | config["radarr"].get("max_items", DEFAULT_MAX_ITEMS), 61 | config["radarr"].get("days_to_check", DEFAULT_DAYS) 62 | )) 63 | if "radarr2" in config: 64 | from .manager.radarr2 import Radarr2MediarrSensor 65 | sensors.append(Radarr2MediarrSensor( 66 | session, 67 | config["radarr2"]["api_key"], 68 | config["radarr2"]["url"], 69 | config["radarr2"].get("max_items", DEFAULT_MAX_ITEMS), 70 | config["radarr2"].get("days_to_check", DEFAULT_DAYS) 71 | )) 72 | 73 | # Discovery Sensors 74 | if "trakt" in config: 75 | from .discovery.trakt import TraktMediarrSensor 76 | sensors.append(TraktMediarrSensor( 77 | session, 78 | config["trakt"]["client_id"], 79 | config["trakt"]["client_secret"], 80 | config["trakt"].get("trending_type", "both"), 81 | config["trakt"].get("max_items", DEFAULT_MAX_ITEMS), 82 | config["trakt"]["tmdb_api_key"] 83 | )) 84 | 85 | if "tmdb" in config: 86 | from .discovery.tmdb import TMDBMediarrSensor 87 | tmdb_config = config["tmdb"] 88 | tmdb_api_key = tmdb_config.get("tmdb_api_key") 89 | filters = tmdb_config.get("filters", {}) 90 | 91 | # Standard endpoints 92 | for endpoint in ['trending', 'now_playing', 'upcoming', 'on_air', 'airing_today']: 93 | if tmdb_config.get(endpoint, False): 94 | sensors.append(TMDBMediarrSensor( 95 | session, 96 | tmdb_api_key, 97 | tmdb_config.get("max_items", DEFAULT_MAX_ITEMS), 98 | endpoint, 99 | filters 100 | )) 101 | 102 | # New endpoints for popular content 103 | if tmdb_config.get("popular_movies", False): 104 | sensors.append(TMDBMediarrSensor( 105 | session, 106 | tmdb_api_key, 107 | tmdb_config.get("max_items", DEFAULT_MAX_ITEMS), 108 | "popular_movies", 109 | filters 110 | )) 111 | 112 | if tmdb_config.get("popular_tv", False): 113 | sensors.append(TMDBMediarrSensor( 114 | session, 115 | tmdb_api_key, 116 | tmdb_config.get("max_items", DEFAULT_MAX_ITEMS), 117 | "popular_tv", 118 | filters 119 | )) 120 | 121 | if "seer" in config: 122 | from .services.seer import SeerMediarrSensor 123 | from .discovery.seer_discovery import SeerDiscoveryMediarrSensor 124 | seer_config = config["seer"] 125 | filters = seer_config.get("filters", {}) 126 | 127 | # Create the original requests sensor 128 | sensors.append(SeerMediarrSensor( 129 | session, 130 | seer_config["api_key"], 131 | seer_config["url"], 132 | seer_config.get("tmdb_api_key"), 133 | seer_config.get("max_items", DEFAULT_MAX_ITEMS) 134 | )) 135 | 136 | # Create additional discovery sensors if enabled with filters 137 | if seer_config.get("trending", False): 138 | sensors.append(SeerDiscoveryMediarrSensor( 139 | session, 140 | seer_config["api_key"], 141 | seer_config["url"], 142 | seer_config.get("tmdb_api_key"), 143 | seer_config.get("max_items", DEFAULT_MAX_ITEMS), 144 | "trending", 145 | None, 146 | filters # Pass filters to the sensor 147 | )) 148 | 149 | if seer_config.get("popular_movies", False): 150 | sensors.append(SeerDiscoveryMediarrSensor( 151 | session, 152 | seer_config["api_key"], 153 | seer_config["url"], 154 | seer_config.get("tmdb_api_key"), 155 | seer_config.get("max_items", DEFAULT_MAX_ITEMS), 156 | "popular_movies", 157 | "movies", 158 | filters # Pass filters to the sensor 159 | )) 160 | 161 | if seer_config.get("popular_tv", False): 162 | sensors.append(SeerDiscoveryMediarrSensor( 163 | session, 164 | seer_config["api_key"], 165 | seer_config["url"], 166 | seer_config.get("tmdb_api_key"), 167 | seer_config.get("max_items", DEFAULT_MAX_ITEMS), 168 | "popular_tv", 169 | "tv", 170 | filters # Pass filters to the sensor 171 | )) 172 | 173 | if seer_config.get("discover", False): 174 | sensors.append(SeerDiscoveryMediarrSensor( 175 | session, 176 | seer_config["api_key"], 177 | seer_config["url"], 178 | seer_config.get("tmdb_api_key"), 179 | seer_config.get("max_items", DEFAULT_MAX_ITEMS), 180 | "discover", 181 | None, 182 | filters # Pass filters to the sensor 183 | )) 184 | 185 | if sensors: 186 | if "mediarr_sensors" not in hass.data: 187 | hass.data["mediarr_sensors"] = [] 188 | hass.data["mediarr_sensors"].extend(sensors) 189 | async_add_entities(sensors, True) 190 | 191 | async def async_unload_platform(hass, config): 192 | """Unload the platform.""" 193 | if "seer" in config and "mediarr_sensors" in hass.data: 194 | sensors = hass.data["mediarr_sensors"] 195 | seer_sensors = [s for s in sensors if hasattr(s, "get_request_info")] 196 | for sensor in seer_sensors: 197 | await sensor.async_will_remove_from_hass() 198 | hass.data["mediarr_sensors"] = [s for s in sensors if s not in seer_sensors] -------------------------------------------------------------------------------- /custom_components/mediarr/server/__init__.py: -------------------------------------------------------------------------------- 1 | """The Mediarr Server integration.""" 2 | from homeassistant.const import CONF_TOKEN, CONF_URL 3 | import homeassistant.helpers.config_validation as cv 4 | import voluptuous as vol 5 | from ..common.const import CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS 6 | 7 | # Base schema for all media servers 8 | MEDIA_SERVER_BASE_SCHEMA = { 9 | vol.Required(CONF_TOKEN): cv.string, 10 | vol.Required(CONF_URL): cv.url, 11 | vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): cv.positive_int, 12 | } 13 | -------------------------------------------------------------------------------- /custom_components/mediarr/server/jellyfin.py: -------------------------------------------------------------------------------- 1 | """Jellyfin integration for Mediarr using TMDB images with WebSocket updates.""" 2 | import logging 3 | import json 4 | import aiohttp 5 | import asyncio 6 | import aiofiles 7 | import async_timeout 8 | import re 9 | import voluptuous as vol 10 | from datetime import timedelta 11 | from pathlib import Path 12 | from homeassistant.const import CONF_TOKEN, CONF_URL 13 | import homeassistant.helpers.config_validation as cv 14 | from ..common.const import CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS 15 | from ..common.tmdb_sensor import TMDBMediaSensor 16 | from homeassistant.helpers.aiohttp_client import async_get_clientsession 17 | from homeassistant.core import callback 18 | from homeassistant.helpers.event import async_track_time_interval 19 | 20 | _LOGGER = logging.getLogger(__name__) 21 | 22 | 23 | UPDATE_INTERVAL = timedelta(minutes=1) 24 | 25 | 26 | JELLYFIN_SCHEMA = { 27 | vol.Required(CONF_TOKEN): cv.string, 28 | vol.Required('tmdb_api_key'): cv.string, 29 | vol.Required(CONF_URL): cv.url, 30 | vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): cv.positive_int, 31 | vol.Optional('language', default='en'): cv.string, # Language parameter 32 | } 33 | 34 | class JellyfinWebSocket: 35 | """Jellyfin WebSocket client.""" 36 | 37 | def __init__(self, sensor, server_url, token, user_id): 38 | """Initialize the WebSocket client.""" 39 | 40 | self._sensor = sensor 41 | self._ws = None 42 | self._base_url = server_url.replace('http', 'ws', 1) 43 | self._token = token 44 | self._user_id = user_id 45 | self._connected = False 46 | self._session = None 47 | self._hass = sensor.hass 48 | self._retry_task = None 49 | self._connection_retry_count = 0 50 | self._disconnect_handle = None 51 | self._scheduled_retry = None 52 | 53 | async def connect(self): 54 | """Connect to the Jellyfin WebSocket.""" 55 | if self._connected: 56 | return 57 | 58 | try: 59 | if not self._session: 60 | self._session = aiohttp.ClientSession() 61 | 62 | headers = { 63 | "Authorization": f'MediaBrowser Token="{self._token}"' 64 | } 65 | 66 | url = f"{self._base_url}/socket?api_key={self._token}&deviceId=mediarr" 67 | self._ws = await self._session.ws_connect(url, headers=headers, heartbeat=30) 68 | self._connected = True 69 | self._connection_retry_count = 0 70 | 71 | # Send initial messages required by Jellyfin 72 | await self._ws.send_str(json.dumps({ 73 | "MessageType": "SessionsStart", 74 | "Data": "0,1500" 75 | })) 76 | 77 | _LOGGER.info("Connected to Jellyfin WebSocket") 78 | 79 | # Start listening for messages 80 | self._hass.async_create_task(self._listen()) 81 | 82 | except Exception as err: 83 | _LOGGER.error("WebSocket connection failed: %s", err) 84 | self._connected = False 85 | await self.cleanup() 86 | await self._schedule_reconnect() 87 | 88 | async def _schedule_reconnect(self): 89 | """Schedule a reconnection attempt with exponential backoff.""" 90 | if self._scheduled_retry: 91 | self._scheduled_retry.cancel() 92 | 93 | if self._connection_retry_count < 5: # Maximum retry limit 94 | delay = min(30, 2 ** self._connection_retry_count) 95 | self._connection_retry_count += 1 96 | _LOGGER.info(f"Scheduling reconnection attempt in {delay} seconds...") 97 | self._scheduled_retry = self._hass.loop.call_later( 98 | delay, lambda: self._hass.async_create_task(self.connect()) 99 | ) 100 | 101 | async def _listen(self): 102 | """Listen for WebSocket messages.""" 103 | try: 104 | async with async_timeout.timeout(30): # Add timeout 105 | async for msg in self._ws: 106 | if msg.type == aiohttp.WSMsgType.TEXT: 107 | data = json.loads(msg.data) 108 | 109 | # Handle different message types 110 | if data.get("MessageType") == "Library": 111 | # Library changed, trigger an update 112 | if "ItemsAdded" in data.get("Data", {}) or "ItemsRemoved" in data.get("Data", {}): 113 | _LOGGER.debug("Library changed, triggering update") 114 | await self._sensor.async_update() 115 | 116 | elif data.get("MessageType") == "ForceKeepAlive": 117 | # Respond to keep-alive messages 118 | await self._ws.send_str(json.dumps({ 119 | "MessageType": "KeepAlive" 120 | })) 121 | elif msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.ERROR): 122 | _LOGGER.warning("WebSocket connection closed or error") 123 | break 124 | 125 | except asyncio.TimeoutError: 126 | _LOGGER.warning("WebSocket listener timeout") 127 | except Exception as err: 128 | _LOGGER.error("WebSocket listener error: %s", err) 129 | finally: 130 | self._connected = False 131 | await self.cleanup() 132 | await self._schedule_reconnect() 133 | 134 | async def cleanup(self): 135 | """Clean up WebSocket resources.""" 136 | self._connected = False 137 | if self._scheduled_retry: 138 | self._scheduled_retry.cancel() 139 | self._scheduled_retry = None 140 | 141 | if self._ws and not self._ws.closed: 142 | await self._ws.close() 143 | 144 | if self._session: 145 | await self._session.close() 146 | self._session = None 147 | 148 | class JellyfinMediarrSensor(TMDBMediaSensor): 149 | """Representation of a Jellyfin recently added sensor using TMDB images.""" 150 | 151 | def __init__(self, hass, session, config, user_id): 152 | """Initialize the sensor.""" 153 | super().__init__(session, config['tmdb_api_key'], config.get('language', 'en')) 154 | self.hass = hass 155 | self._base_url = config[CONF_URL].rstrip('/') 156 | self._jellyfin_token = config[CONF_TOKEN] 157 | self._max_items = config[CONF_MAX_ITEMS] 158 | self._name = "Jellyfin Mediarr" 159 | self._user_id = user_id 160 | self._session = session 161 | self._state = 0 162 | self._attributes = {'data': []} 163 | self._ws_client = None 164 | self._config = config 165 | self._available = True 166 | self._remove_update_interval = None 167 | 168 | 169 | 170 | @callback 171 | def _update_callback(self, now): 172 | """Handle the update interval callback.""" 173 | self.hass.loop.create_task(self.async_update()) 174 | 175 | async def async_added_to_hass(self): 176 | """Handle entity which will be added.""" 177 | await super().async_added_to_hass() 178 | 179 | # Do an initial update 180 | await self.async_update() 181 | 182 | # Initialize WebSocket client 183 | self._ws_client = JellyfinWebSocket( 184 | self, 185 | self._base_url, 186 | self._jellyfin_token, 187 | self._user_id 188 | ) 189 | await self._ws_client.connect() 190 | 191 | # Set up periodic updates as fallback 192 | self._remove_update_interval = async_track_time_interval( 193 | self.hass, 194 | self._update_callback, # Use the callback method 195 | UPDATE_INTERVAL 196 | ) 197 | 198 | @property 199 | def name(self): 200 | """Return the name of the sensor.""" 201 | return self._name 202 | 203 | @property 204 | def unique_id(self): 205 | """Return a unique ID for the sensor.""" 206 | return "jellyfin_mediarr" 207 | 208 | @property 209 | def available(self): 210 | """Return True if entity is available.""" 211 | return self._available 212 | 213 | @property 214 | def state(self): 215 | """Return the state of the sensor.""" 216 | return self._state 217 | 218 | @property 219 | def extra_state_attributes(self): 220 | """Return the state attributes.""" 221 | return self._attributes 222 | 223 | async def _enhanced_tmdb_search(self, title, year=None, media_type='movie'): 224 | """Enhanced TMDB search with multiple strategies.""" 225 | # Try exact match first 226 | tmdb_id = await self._search_tmdb(title, year, media_type) 227 | if tmdb_id: 228 | return tmdb_id 229 | 230 | # Try without year suffix in title 231 | title_no_year = re.sub(r'\s*\(\d{4}\)\s*$', '', title).strip() 232 | if title_no_year != title: 233 | tmdb_id = await self._search_tmdb(title_no_year, year, media_type) 234 | if tmdb_id: 235 | return tmdb_id 236 | 237 | # Try removing any text in parentheses 238 | title_no_parens = re.sub(r'\s*\([^)]*\)\s*', ' ', title).strip() 239 | if title_no_parens != title and title_no_parens != title_no_year: 240 | tmdb_id = await self._search_tmdb(title_no_parens, year, media_type) 241 | if tmdb_id: 242 | return tmdb_id 243 | 244 | # Try first part of title (before colon) 245 | if ':' in title: 246 | first_part = title.split(':', 1)[0].strip() 247 | if len(first_part) > 3: # Avoid too short titles 248 | tmdb_id = await self._search_tmdb(first_part, year, media_type) 249 | if tmdb_id: 250 | return tmdb_id 251 | 252 | return None 253 | async def _download_and_cache_image(self, url, item_id, image_type): 254 | """Download and cache an image from Jellyfin.""" 255 | try: 256 | headers = { 257 | "Authorization": f'MediaBrowser Token="{self._jellyfin_token}"', 258 | "Accept": "image/jpeg" 259 | } 260 | 261 | async with async_timeout.timeout(10): 262 | async with self._session.get(url, headers=headers) as response: 263 | if response.status == 200: 264 | cache_dir = Path(self.hass.config.path("www/mediarr/cache")) 265 | cache_dir.mkdir(parents=True, exist_ok=True) 266 | 267 | file_name = f"{item_id}_{image_type}.jpg" 268 | cached_path = cache_dir / file_name 269 | 270 | content = await response.read() 271 | 272 | # Use aiofiles for async file operations 273 | async with aiofiles.open(cached_path, 'wb') as f: 274 | await f.write(content) 275 | 276 | _LOGGER.debug("Successfully cached image for %s: %s", item_id, image_type) 277 | return f"/local/mediarr/cache/{file_name}" 278 | else: 279 | _LOGGER.warning("Failed to download image %s for %s: %s", image_type, item_id, response.status) 280 | except Exception as err: 281 | _LOGGER.error("Error caching image %s for %s: %s", image_type, item_id, err) 282 | return None 283 | 284 | def _clean_unused_images(self, current_ids): 285 | """Clean up cached images that aren't in the current item list.""" 286 | try: 287 | cache_dir = Path(self.hass.config.path("www/mediarr/cache")) 288 | if not cache_dir.exists(): 289 | return 290 | 291 | for image_file in cache_dir.glob("*.jpg"): 292 | item_id = image_file.stem.split('_')[0] 293 | if item_id not in current_ids: 294 | try: 295 | image_file.unlink() 296 | _LOGGER.debug("Removed unused image: %s", image_file.name) 297 | except Exception as err: 298 | _LOGGER.error("Error removing image %s: %s", image_file.name, err) 299 | except Exception as err: 300 | _LOGGER.error("Error cleaning cached images: %s", err) 301 | 302 | async def _get_jellyfin_images(self, item_id): 303 | """Get and cache images from Jellyfin.""" 304 | base_img_url = f"{self._base_url}/Items/{item_id}/Images" 305 | poster_url = f"{base_img_url}/Primary" 306 | backdrop_url = f"{base_img_url}/Backdrop" 307 | 308 | try: 309 | # Ensure full URL works with both HTTP and HTTPS 310 | def _ensure_full_url(url): 311 | if not url.startswith(('http://', 'https://')): 312 | return f"{self._base_url}{url}" 313 | return url 314 | 315 | cached_poster = await self._download_and_cache_image(_ensure_full_url(poster_url), item_id, "poster") 316 | cached_backdrop = await self._download_and_cache_image(_ensure_full_url(backdrop_url), item_id, "backdrop") 317 | 318 | if cached_poster or cached_backdrop: 319 | _LOGGER.debug("Successfully cached images for item %s", item_id) 320 | return cached_poster, cached_backdrop, cached_backdrop 321 | except Exception as err: 322 | _LOGGER.error("Error getting Jellyfin images: %s", err) 323 | return None, None, None 324 | 325 | async def _get_libraries(self): 326 | """Fetch movie and TV show libraries.""" 327 | url = f"{self._base_url}/Users/{self._user_id}/Views" 328 | headers = { 329 | "Authorization": f'MediaBrowser Token="{self._jellyfin_token}"', 330 | "Accept": "application/json" 331 | } 332 | 333 | libraries = {'movies': [], 'tvshows': []} 334 | 335 | try: 336 | async with async_timeout.timeout(10): 337 | async with self._session.get(url, headers=headers) as response: 338 | if response.status == 200: 339 | data = await response.json() 340 | for lib in data['Items']: 341 | if lib['CollectionType'] == 'movies': 342 | libraries['movies'].append(lib['Id']) 343 | elif lib['CollectionType'] == 'tvshows': 344 | libraries['tvshows'].append(lib['Id']) 345 | return libraries 346 | except Exception as err: 347 | _LOGGER.error("Error fetching libraries: %s", err) 348 | return libraries 349 | 350 | async def _fetch_recently_added(self, library_id): 351 | """Fetch recently added items from a library.""" 352 | url = f"{self._base_url}/Users/{self._user_id}/Items/Latest" 353 | params = { 354 | "ParentId": library_id, 355 | "Limit": self._max_items, 356 | "Fields": "ProviderIds,Overview,PremiereDate,RunTimeTicks,Genres,ParentIndexNumber,IndexNumber,SeriesName,SeriesId,ProductionYear,DateCreated", # Added DateCreated 357 | "EnableImages": "true", 358 | "ImageTypeLimit": 1, 359 | "SortBy": "DateCreated,SortName", # Changed primary sort to DateCreated 360 | "SortOrder": "Descending" 361 | } 362 | headers = { 363 | "Authorization": f'MediaBrowser Token="{self._jellyfin_token}"', 364 | "Accept": "application/json" 365 | } 366 | 367 | try: 368 | async with async_timeout.timeout(10): 369 | async with self._session.get(url, params=params, headers=headers) as response: 370 | if response.status == 200: 371 | return await response.json() 372 | return [] 373 | except Exception as err: 374 | _LOGGER.error("Error fetching recently added items: %s", err) 375 | return [] 376 | 377 | async def _process_item(self, item): 378 | """Process a single item from Jellyfin.""" 379 | try: 380 | is_episode = item.get('Type') == 'Episode' 381 | item_id = item.get('Id') 382 | date_added = item.get('DateCreated', '') # Get the date added 383 | 384 | # Default empty images to None for cleaner handling 385 | poster_url = backdrop_url = main_backdrop_url = None 386 | 387 | if is_episode: 388 | # Get TMDB ID for the series 389 | series_name = str(item.get('SeriesName', '')).strip() 390 | tmdb_id = item.get('ProviderIds', {}).get('Tmdb') 391 | 392 | # First, try getting local (Jellyfin) images 393 | local_poster, local_backdrop, local_main = await self._get_jellyfin_images(item_id) 394 | poster_url = local_poster 395 | backdrop_url = local_backdrop 396 | main_backdrop_url = local_main 397 | 398 | # Only if no local images, try TMDB 399 | if not poster_url or not backdrop_url or not main_backdrop_url: 400 | if not tmdb_id: 401 | _LOGGER.debug("Searching TMDB for TV show: %s", series_name) 402 | tmdb_id = await self._enhanced_tmdb_search(series_name, None, 'tv') 403 | 404 | if tmdb_id: 405 | try: 406 | _LOGGER.debug("Getting TMDB images for show ID: %s", tmdb_id) 407 | tmdb_poster, tmdb_backdrop, tmdb_main = await self._get_tmdb_images(tmdb_id, 'tv') 408 | 409 | # Use TMDB images only for missing local images 410 | if not poster_url and tmdb_poster: 411 | poster_url = tmdb_poster 412 | if not backdrop_url and tmdb_backdrop: 413 | backdrop_url = tmdb_backdrop 414 | if not main_backdrop_url and tmdb_main: 415 | main_backdrop_url = tmdb_main 416 | except Exception as err: 417 | _LOGGER.error("Error getting TMDB images for %s: %s", series_name, err) 418 | 419 | return { 420 | 'title': str(item.get('SeriesName', '')), 421 | 'episode': str(item.get('Name', '')), 422 | 'release': self._format_date(item.get('PremiereDate')), 423 | 'added': self._format_date(date_added), 424 | 'number': f"S{item.get('ParentIndexNumber', 0):02d}E{item.get('IndexNumber', 0):02d}", 425 | 'runtime': str(int(item.get('RunTimeTicks', 0)) // 600000000), 426 | 'genres': ', '.join(str(g) for g in item.get('Genres', [])), 427 | 'poster': str(poster_url or ""), 428 | 'fanart': str(main_backdrop_url or backdrop_url or ""), 429 | 'banner': str(backdrop_url or ""), 430 | 'flag': 1, 431 | 'added_at': date_added # Store raw date for sorting 432 | } 433 | else: 434 | # Process movie 435 | title = str(item.get('Name', '')).strip() 436 | year = item.get('ProductionYear') 437 | 438 | # First, try getting local (Jellyfin) images 439 | local_poster, local_backdrop, local_main = await self._get_jellyfin_images(item_id) 440 | poster_url = local_poster 441 | backdrop_url = local_backdrop 442 | main_backdrop_url = local_main 443 | 444 | # Try TMDB ID from provider IDs first 445 | tmdb_id = item.get('ProviderIds', {}).get('Tmdb') 446 | 447 | # Only if no local images, try TMDB 448 | if not poster_url or not backdrop_url or not main_backdrop_url: 449 | if not tmdb_id: 450 | _LOGGER.debug("Searching TMDB for movie: %s (%s)", title, year) 451 | tmdb_id = await self._enhanced_tmdb_search(title, year, 'movie') 452 | 453 | if tmdb_id: 454 | try: 455 | _LOGGER.debug("Getting TMDB images for movie ID: %s", tmdb_id) 456 | tmdb_poster, tmdb_backdrop, tmdb_main = await self._get_tmdb_images(tmdb_id, 'movie') 457 | 458 | # Use TMDB images only for missing local images 459 | if not poster_url and tmdb_poster: 460 | poster_url = tmdb_poster 461 | if not backdrop_url and tmdb_backdrop: 462 | backdrop_url = tmdb_backdrop 463 | if not main_backdrop_url and tmdb_main: 464 | main_backdrop_url = tmdb_main 465 | except Exception as err: 466 | _LOGGER.error("Error getting TMDB images for %s: %s", title, err) 467 | 468 | return { 469 | 'title': str(item.get('Name', 'Unknown')), 470 | 'episode': str(item.get('Overview', 'N/A')[:100] + '...' if item.get('Overview') else 'N/A'), 471 | 'release': self._format_date(item.get('PremiereDate')), 472 | 'added': self._format_date(date_added), 473 | 'number': str(item.get('ProductionYear', '')), 474 | 'runtime': str(int(item.get('RunTimeTicks', 0)) // 600000000), 475 | 'genres': ', '.join(str(g) for g in item.get('Genres', [])), 476 | 'poster': str(poster_url or ""), 477 | 'fanart': str(main_backdrop_url or backdrop_url or ""), 478 | 'banner': str(backdrop_url or ""), 479 | 'flag': 1, 480 | 'added_at': date_added # Store raw date for sorting 481 | } 482 | 483 | except Exception as err: 484 | _LOGGER.error("Error processing item: %s", err) 485 | return None 486 | 487 | async def async_update(self): 488 | """Update sensor data.""" 489 | try: 490 | recently_added = [] 491 | current_item_ids = set() 492 | show_episodes = {} # Dictionary to track episodes per show 493 | libraries = await self._get_libraries() 494 | 495 | # Get all libraries (both movies and TV) 496 | all_libraries = libraries['movies'] + libraries['tvshows'] 497 | 498 | # Fetch recent items from all libraries 499 | for library_id in all_libraries: 500 | items = await self._fetch_recently_added(library_id) 501 | for item in items: 502 | processed = await self._process_item(item) 503 | if processed: 504 | item_id = item.get('Id') 505 | if item_id: 506 | current_item_ids.add(item_id) 507 | 508 | # Handle TV show episodes differently for grouping 509 | if 'S' in processed.get('number', ''): # It's a TV episode 510 | show_title = processed['title'] 511 | if show_title not in show_episodes: 512 | show_episodes[show_title] = { 513 | **processed, 514 | 'episodes': [processed['number']], 515 | 'added_at': processed.get('added', '') # Use added date instead of release 516 | } 517 | else: 518 | show_episodes[show_title]['episodes'].append(processed['number']) 519 | # Update if this episode was added more recently 520 | if processed.get('added', '') > show_episodes[show_title]['added_at']: 521 | show_episodes[show_title]['added_at'] = processed.get('added', '') 522 | show_episodes[show_title]['episode'] = processed['episode'] 523 | show_episodes[show_title]['number'] = processed['number'] 524 | else: 525 | recently_added.append(processed) 526 | 527 | # Process grouped shows 528 | for show_data in show_episodes.values(): 529 | episode_count = len(show_data['episodes']) 530 | if episode_count > 1: 531 | show_data['episode'] = f"{episode_count} new episodes ({show_data['number']})" 532 | del show_data['episodes'] # Remove the episodes list before adding to final results 533 | recently_added.append(show_data) 534 | 535 | # Clean up unused cached images 536 | self._clean_unused_images(current_item_ids) 537 | 538 | # Sort by added date instead of release date 539 | recently_added.sort(key=lambda x: x.get('added_at', '') or x.get('added', ''), reverse=True) 540 | recently_added = recently_added[:self._max_items] 541 | 542 | if recently_added: 543 | self._state = len(recently_added) 544 | self._attributes = {'data': recently_added} 545 | else: 546 | self._state = 0 547 | self._attributes = {'data': [{ 548 | 'title_default': '$title', 549 | 'line1_default': '$episode', 550 | 'line2_default': '$release', 551 | 'line3_default': '$number - $rating - $runtime', 552 | 'line4_default': '$genres', 553 | 'icon': 'mdi:eye-off' 554 | }]} 555 | 556 | self._available = True 557 | 558 | except Exception as err: 559 | _LOGGER.error("Error updating Jellyfin sensor: %s", err) 560 | self._state = 0 561 | self._attributes = {'data': []} 562 | self._available = False 563 | @classmethod 564 | async def create_sensors(cls, hass, config): 565 | """Create a single Jellyfin sensor for all libraries.""" 566 | try: 567 | token = config[CONF_TOKEN] 568 | headers = { 569 | "Authorization": f'MediaBrowser Token="{token}"', 570 | "Accept": "application/json" 571 | } 572 | 573 | url = f"{config[CONF_URL].rstrip('/')}/Users" 574 | async with aiohttp.ClientSession() as session: 575 | async with async_timeout.timeout(10): 576 | async with session.get(url, headers=headers) as response: 577 | if response.status != 200: 578 | raise Exception(f"Error fetching user info: {response.status}") 579 | users = await response.json() 580 | if not users: 581 | raise Exception("No users found") 582 | user = next((u for u in users if u.get('Policy', {}).get('IsAdministrator')), users[0]) 583 | user_id = user['Id'] 584 | 585 | return [cls(hass, async_get_clientsession(hass), config, user_id)] 586 | 587 | except Exception as error: 588 | _LOGGER.error("Error initializing Jellyfin sensors: %s", error) 589 | return [] 590 | 591 | 592 | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 593 | """Set up the Jellyfin sensor.""" 594 | sensors = await JellyfinMediarrSensor.create_sensors(hass, config) 595 | async_add_entities(sensors, True) 596 | -------------------------------------------------------------------------------- /custom_components/mediarr/server/plex.py: -------------------------------------------------------------------------------- 1 | """Plex integration for Mediarr using TMDB images.""" 2 | import logging 3 | import xml.etree.ElementTree as ET 4 | import aiohttp 5 | import re 6 | import async_timeout 7 | import voluptuous as vol 8 | import aiofiles 9 | from pathlib import Path 10 | from datetime import datetime 11 | from homeassistant.const import CONF_TOKEN, CONF_URL 12 | import homeassistant.helpers.config_validation as cv 13 | from ..common.const import CONF_MAX_ITEMS, DEFAULT_MAX_ITEMS 14 | from ..common.tmdb_sensor import TMDBMediaSensor 15 | from homeassistant.helpers.aiohttp_client import async_get_clientsession 16 | 17 | _LOGGER = logging.getLogger(__name__) 18 | 19 | PLEX_SCHEMA = { 20 | vol.Required(CONF_TOKEN): cv.string, 21 | vol.Required('tmdb_api_key'): cv.string, 22 | vol.Required(CONF_URL): cv.url, 23 | vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): cv.positive_int, 24 | vol.Optional('language', default='en'): cv.string, # Language parameter 25 | } 26 | 27 | class PlexMediarrSensor(TMDBMediaSensor): 28 | """Representation of a Plex recently added sensor using TMDB images.""" 29 | 30 | def __init__(self, hass, session, config, sections): 31 | """Initialize the sensor.""" 32 | super().__init__(session, config['tmdb_api_key'], config.get('language', 'en')) 33 | self.hass = hass # Add hass reference 34 | self._base_url = config[CONF_URL].rstrip('/') 35 | self._token = config[CONF_TOKEN] 36 | self._max_items = config[CONF_MAX_ITEMS] 37 | self._name = "Plex Mediarr" 38 | self._sections = sections 39 | self._session = session 40 | self._state = 0 41 | self._attributes = {'data': []} 42 | self._available = True 43 | self._current_item_ids = set() # For tracking current items 44 | 45 | @property 46 | def name(self): 47 | """Return the name of the sensor.""" 48 | return self._name 49 | 50 | @property 51 | def unique_id(self): 52 | """Return a unique ID for the sensor.""" 53 | return "plex_mediarr" 54 | 55 | @property 56 | def available(self): 57 | """Return True if entity is available.""" 58 | return self._available 59 | 60 | @property 61 | def state(self): 62 | """Return the state of the sensor.""" 63 | return self._state 64 | 65 | @property 66 | def extra_state_attributes(self): 67 | """Return the state attributes.""" 68 | return self._attributes 69 | 70 | async def _enhanced_tmdb_search(self, title, year=None, media_type='movie'): 71 | """Enhanced TMDB search with multiple strategies.""" 72 | 73 | 74 | # Try exact match first 75 | tmdb_id = await self._search_tmdb(title, year, media_type) 76 | if tmdb_id: 77 | return tmdb_id 78 | 79 | # Try without year suffix in title 80 | title_no_year = re.sub(r'\s*\(\d{4}\)\s*$', '', title).strip() 81 | if title_no_year != title: 82 | tmdb_id = await self._search_tmdb(title_no_year, year, media_type) 83 | if tmdb_id: 84 | return tmdb_id 85 | 86 | # Try removing any text in parentheses 87 | title_no_parens = re.sub(r'\s*\([^)]*\)\s*', ' ', title).strip() 88 | if title_no_parens != title and title_no_parens != title_no_year: 89 | tmdb_id = await self._search_tmdb(title_no_parens, year, media_type) 90 | if tmdb_id: 91 | return tmdb_id 92 | 93 | # Try first part of title (before colon) 94 | if ':' in title: 95 | first_part = title.split(':', 1)[0].strip() 96 | if len(first_part) > 3: # Avoid too short titles 97 | tmdb_id = await self._search_tmdb(first_part, year, media_type) 98 | if tmdb_id: 99 | return tmdb_id 100 | 101 | return None 102 | 103 | def _format_added_date(self, timestamp): 104 | """Format the added date from Unix timestamp.""" 105 | try: 106 | if timestamp: 107 | dt = datetime.fromtimestamp(int(timestamp)) 108 | return dt.strftime("%Y-%m-%d") 109 | except Exception as err: 110 | _LOGGER.error("Error formatting date: %s", err) 111 | return "" 112 | 113 | async def _fetch_recently_added(self, section_id): 114 | """Fetch recently added items from a Plex section.""" 115 | url = f"{self._base_url}/library/sections/{section_id}/recentlyAdded" 116 | headers = {"X-Plex-Token": self._token} 117 | try: 118 | async with async_timeout.timeout(10): 119 | async with self._session.get(url, headers=headers) as response: 120 | if response.status == 200: 121 | xml_content = await response.text() 122 | tree = ET.fromstring(xml_content) 123 | videos = tree.findall(".//Video") 124 | _LOGGER.debug("Found %d items in Plex section %s", len(videos), section_id) 125 | return tree 126 | else: 127 | raise Exception(f"Failed to fetch recently added: {response.status}") 128 | except Exception as err: 129 | _LOGGER.error("Error fetching recently added: %s", err) 130 | return None 131 | async def _download_and_cache_image(self, url, item_id, image_type): 132 | """Download and cache an image from Plex.""" 133 | try: 134 | headers = {"X-Plex-Token": self._token} 135 | 136 | async with async_timeout.timeout(10): 137 | async with self._session.get(url, headers=headers) as response: 138 | if response.status == 200: 139 | cache_dir = Path(self.hass.config.path("www/mediarr/cache")) 140 | cache_dir.mkdir(parents=True, exist_ok=True) 141 | 142 | file_name = f"plex_{item_id}_{image_type}.jpg" 143 | cached_path = cache_dir / file_name 144 | 145 | content = await response.read() 146 | 147 | # Use aiofiles for async file operations 148 | async with aiofiles.open(cached_path, 'wb') as f: 149 | await f.write(content) 150 | 151 | _LOGGER.debug("Successfully cached image for %s: %s", item_id, image_type) 152 | return f"/local/mediarr/cache/{file_name}" 153 | else: 154 | _LOGGER.warning("Failed to download image %s for %s: %s", image_type, item_id, response.status) 155 | return None 156 | except Exception as err: 157 | _LOGGER.error("Error caching image %s for %s: %s", image_type, item_id, err) 158 | return None 159 | 160 | def _clean_unused_images(self, current_ids): 161 | """Clean up cached images that aren't in the current item list.""" 162 | try: 163 | cache_dir = Path(self.hass.config.path("www/mediarr/cache")) 164 | if not cache_dir.exists(): 165 | return 166 | 167 | for image_file in cache_dir.glob("plex_*.jpg"): 168 | item_id = image_file.stem.split('_')[1] 169 | if item_id not in current_ids: 170 | try: 171 | image_file.unlink() 172 | _LOGGER.debug("Removed unused image: %s", image_file.name) 173 | except Exception as err: 174 | _LOGGER.error("Error removing image %s: %s", image_file.name, err) 175 | except Exception as err: 176 | _LOGGER.error("Error cleaning cached images: %s", err) 177 | 178 | async def _get_plex_images(self, item, item_id, is_show=False): 179 | """Get and cache images from Plex.""" 180 | try: 181 | if is_show: 182 | thumb = item.get('grandparentThumb') 183 | art = item.get('grandparentArt') 184 | else: 185 | thumb = item.get('thumb') 186 | art = item.get('art') 187 | 188 | poster_url = backdrop_url = main_backdrop_url = None 189 | 190 | # Ensure full URL works with both HTTP and HTTPS 191 | def _ensure_full_url(partial_url): 192 | if partial_url: 193 | if not partial_url.startswith(('http://', 'https://')): 194 | return f"{self._base_url}{partial_url}?X-Plex-Token={self._token}" 195 | return f"{partial_url}?X-Plex-Token={self._token}" 196 | return None 197 | 198 | if thumb: 199 | full_thumb_url = _ensure_full_url(thumb) 200 | poster_url = await self._download_and_cache_image(full_thumb_url, item_id, "poster") 201 | 202 | if art: 203 | full_art_url = _ensure_full_url(art) 204 | backdrop_url = await self._download_and_cache_image(full_art_url, item_id, "backdrop") 205 | main_backdrop_url = backdrop_url 206 | 207 | return poster_url, backdrop_url, main_backdrop_url 208 | except Exception as err: 209 | _LOGGER.error("Error getting Plex images: %s", err) 210 | return None, None, None 211 | 212 | async def _process_item(self, item): 213 | """Process a single Plex item and get TMDB images.""" 214 | try: 215 | added_at = item.get('addedAt', '0') 216 | added_date = self._format_added_date(added_at) 217 | is_episode = item.get('type') == 'episode' 218 | item_id = item.get('ratingKey', '') # Use Plex rating key as unique ID 219 | 220 | # Keep track of item IDs for cache cleaning 221 | if item_id: 222 | self._current_item_ids.add(item_id) 223 | 224 | # Default empty images to None for cleaner handling 225 | poster_url = backdrop_url = main_backdrop_url = None 226 | 227 | if is_episode: 228 | show_title = item.get('grandparentTitle', '') 229 | tmdb_id = None 230 | 231 | # Try to get TMDB ID from Guid tags 232 | guid_list = item.findall('.//Guid') 233 | for guid in guid_list: 234 | guid_str = guid.get('id', '') 235 | if 'themoviedb://' in guid_str: 236 | tmdb_id = guid_str.split('themoviedb://')[1].split('?')[0] 237 | break 238 | 239 | # First, try getting local (Plex/Jellyfin) images 240 | local_poster, local_backdrop, local_main = await self._get_plex_images(item, item_id, is_show=True) 241 | poster_url = local_poster 242 | backdrop_url = local_backdrop 243 | main_backdrop_url = local_main 244 | 245 | # Only if no local images, try TMDB 246 | if not poster_url or not backdrop_url or not main_backdrop_url: 247 | if not tmdb_id: 248 | tmdb_id = await self._enhanced_tmdb_search(show_title, None, 'tv') 249 | 250 | if tmdb_id: 251 | try: 252 | tmdb_poster, tmdb_backdrop, tmdb_main = await self._get_tmdb_images(tmdb_id, 'tv') 253 | 254 | # Use TMDB images only for missing local images 255 | if not poster_url and tmdb_poster: 256 | poster_url = tmdb_poster 257 | if not backdrop_url and tmdb_backdrop: 258 | backdrop_url = tmdb_backdrop 259 | if not main_backdrop_url and tmdb_main: 260 | main_backdrop_url = tmdb_main 261 | except Exception as err: 262 | _LOGGER.error("Error getting TMDB images for %s: %s", show_title, err) 263 | 264 | return { 265 | 'title': str(show_title)[:100], 266 | 'episode': str(item.get('title', ''))[:100], 267 | 'release': self._format_date(item.get('originallyAvailableAt', '')), 268 | 'added': added_date, 269 | 'number': f"S{int(item.get('parentIndex', 0)):02d}E{int(item.get('index', 0)):02d}", 270 | 'runtime': str(int(item.get('duration', 0)) // 60000), 271 | 'genres': ', '.join(str(genre.get('tag', '')) for genre in item.findall('.//Genre'))[:50], 272 | 'poster': str(poster_url or ""), 273 | 'fanart': str(main_backdrop_url or backdrop_url or ""), 274 | 'banner': str(backdrop_url or ""), 275 | 'flag': 1, 276 | 'added_at': added_at 277 | } 278 | else: 279 | title = str(item.get('title', '')) 280 | year = item.get('year') 281 | 282 | tmdb_id = None 283 | guid_list = item.findall('.//Guid') 284 | for guid in guid_list: 285 | guid_str = guid.get('id', '') 286 | if 'themoviedb://' in guid_str: 287 | tmdb_id = guid_str.split('themoviedb://')[1].split('?')[0] 288 | break 289 | 290 | # First, try getting local (Plex) images 291 | local_poster, local_backdrop, local_main = await self._get_plex_images(item, item_id) 292 | poster_url = local_poster 293 | backdrop_url = local_backdrop 294 | main_backdrop_url = local_main 295 | 296 | # Only if no local images, try TMDB 297 | if not poster_url or not backdrop_url or not main_backdrop_url: 298 | if not tmdb_id: 299 | tmdb_id = await self._enhanced_tmdb_search(title, year, 'movie') 300 | 301 | if tmdb_id: 302 | try: 303 | tmdb_poster, tmdb_backdrop, tmdb_main = await self._get_tmdb_images(tmdb_id, 'movie') 304 | 305 | # Use TMDB images only for missing local images 306 | if not poster_url and tmdb_poster: 307 | poster_url = tmdb_poster 308 | if not backdrop_url and tmdb_backdrop: 309 | backdrop_url = tmdb_backdrop 310 | if not main_backdrop_url and tmdb_main: 311 | main_backdrop_url = tmdb_main 312 | except Exception as err: 313 | _LOGGER.error("Error getting TMDB images for %s: %s", title, err) 314 | 315 | summary = str(item.get('summary', 'N/A')) 316 | if len(summary) > 97: 317 | summary = summary[:97] + '...' 318 | 319 | return { 320 | 'title': title[:100], 321 | 'episode': summary, 322 | 'release': self._format_date(item.get('originallyAvailableAt', '')), 323 | 'added': added_date, 324 | 'number': str(year or ''), 325 | 'runtime': str(int(item.get('duration', 0)) // 60000), 326 | 'genres': ', '.join(str(genre.get('tag', '')) for genre in item.findall('.//Genre'))[:50], 327 | 'poster': str(poster_url or ""), 328 | 'fanart': str(main_backdrop_url or backdrop_url or ""), 329 | 'banner': str(backdrop_url or ""), 330 | 'flag': 1, 331 | 'added_at': added_at 332 | } 333 | 334 | except Exception as err: 335 | _LOGGER.error("Error processing item %s: %s", item.get('title', 'Unknown'), err) 336 | return None 337 | 338 | async def async_update(self): 339 | """Update sensor data.""" 340 | try: 341 | recently_added = [] 342 | card_json = [] 343 | show_episodes = {} 344 | self._current_item_ids = set() 345 | for section_id in self._sections: 346 | try: 347 | data = await self._fetch_recently_added(section_id) 348 | if data is not None: 349 | for item in data.findall(".//Video"): 350 | processed_item = await self._process_item(item) 351 | if processed_item: 352 | if processed_item.get('number', '').startswith('S'): 353 | show_title = processed_item['title'] 354 | if show_title not in show_episodes: 355 | show_episodes[show_title] = { 356 | **processed_item, 357 | 'episodes': [processed_item['number']], 358 | 'added_at': processed_item['added_at'] 359 | } 360 | else: 361 | show_episodes[show_title]['episodes'].append(processed_item['number']) 362 | if int(processed_item['added_at']) > int(show_episodes[show_title]['added_at']): 363 | show_episodes[show_title]['added_at'] = processed_item['added_at'] 364 | show_episodes[show_title]['episode'] = processed_item['episode'] 365 | show_episodes[show_title]['number'] = processed_item['number'] 366 | else: 367 | recently_added.append(processed_item) 368 | 369 | except Exception as section_err: 370 | _LOGGER.error("Error updating section %s: %s", section_id, section_err) 371 | 372 | # Process grouped shows 373 | for show_data in show_episodes.values(): 374 | episode_count = len(show_data['episodes']) 375 | if episode_count > 1: 376 | show_data['episode'] = f"{episode_count} new episodes ({show_data['number']})" 377 | del show_data['episodes'] 378 | recently_added.append(show_data) 379 | 380 | # Sort by added date 381 | recently_added.sort(key=lambda x: int(x.get('added_at', 0)), reverse=True) 382 | card_json.extend(recently_added[:self._max_items]) 383 | 384 | if not card_json: 385 | card_json.append({ 386 | 'title_default': '$title', 387 | 'line1_default': '$episode', 388 | 'line2_default': '$release', 389 | 'line3_default': '$number - $rating - $runtime', 390 | 'line4_default': '$genres', 391 | 'icon': 'mdi:eye-off' 392 | }) 393 | 394 | self._state = len(recently_added) 395 | self._attributes = {'data': card_json} 396 | self._available = True 397 | self._clean_unused_images(self._current_item_ids) 398 | except Exception as err: 399 | _LOGGER.error("Error updating Plex sensor: %s", err) 400 | self._state = 0 401 | self._attributes = {'data': []} 402 | self._available = False 403 | 404 | @classmethod 405 | async def create_sensors(cls, hass, config): 406 | """Create a single Plex sensor for all sections.""" 407 | try: 408 | base_url = config[CONF_URL].rstrip('/') 409 | token = config[CONF_TOKEN] 410 | 411 | # Fetch sections 412 | url = f"{base_url}/library/sections" 413 | headers = {"X-Plex-Token": token, "Accept": "application/xml"} 414 | 415 | async with aiohttp.ClientSession() as session: 416 | async with async_timeout.timeout(10): 417 | async with session.get(url, headers=headers) as response: 418 | if response.status != 200: 419 | raise Exception(f"Error fetching library sections: {response.status}") 420 | xml_content = await response.text() 421 | 422 | # Parse XML 423 | root = ET.fromstring(xml_content) 424 | sections = [directory.get("key") for directory in root.findall(".//Directory") 425 | if directory.get("key")] 426 | 427 | # Pass hass as the first argument 428 | return [cls(hass, async_get_clientsession(hass), config, sections)] 429 | 430 | except Exception as error: 431 | _LOGGER.error("Error initializing Plex sensors: %s", error) 432 | return [] 433 | 434 | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 435 | """Set up the Plex sensor.""" 436 | sensors = await PlexMediarrSensor.create_sensors(hass, config) 437 | async_add_entities(sensors, True) -------------------------------------------------------------------------------- /custom_components/mediarr/services.yaml: -------------------------------------------------------------------------------- 1 | # services.yaml 2 | submit_movie_request: 3 | description: Searches for a movie and requests the first result. 4 | fields: 5 | name: 6 | description: Title of movie to request 7 | example: "Avatar" 8 | required: true 9 | selector: 10 | text: 11 | type: text 12 | 13 | submit_tv_request: 14 | description: Searches for a TV show and requests the specified seasons. 15 | fields: 16 | name: 17 | description: Title of TV show to request 18 | example: "Breaking Bad" 19 | required: true 20 | selector: 21 | text: 22 | type: text 23 | season: 24 | description: Which season(s) to request 25 | example: "latest" 26 | default: "latest" 27 | required: false 28 | selector: 29 | select: 30 | options: 31 | - "first" 32 | - "latest" 33 | - "all" 34 | 35 | update_request: 36 | description: Updates the status of a media request. 37 | fields: 38 | name: 39 | description: Title of the media 40 | example: "Avatar" 41 | required: true 42 | selector: 43 | text: 44 | type: text 45 | type: 46 | description: Type of media 47 | example: "movie" 48 | required: true 49 | selector: 50 | select: 51 | options: 52 | - "movie" 53 | - "tv" 54 | new_status: 55 | description: New status for the request 56 | example: "approve" 57 | required: true 58 | selector: 59 | select: 60 | options: 61 | - "approve" 62 | - "decline" 63 | - "remove" 64 | request_id: 65 | description: ID of the request (optional, will use title if not provided) 66 | example: 123 67 | required: false 68 | selector: 69 | number: 70 | min: 1 71 | mode: box 72 | 73 | -------------------------------------------------------------------------------- /custom_components/mediarr/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vansmak/mediarr_sensor/5fd94bc3091f39762c32428e5d6fa71f67c044e9/custom_components/mediarr/services/__init__.py -------------------------------------------------------------------------------- /custom_components/mediarr/services/seer.py: -------------------------------------------------------------------------------- 1 | """Jellyseerr/Overseerr integration for Mediarr using TMDB images.""" 2 | import logging 3 | from datetime import datetime 4 | from ..common.tmdb_sensor import TMDBMediaSensor 5 | import async_timeout 6 | import aiofiles 7 | 8 | _LOGGER = logging.getLogger(__name__) 9 | 10 | class SeerMediarrSensor(TMDBMediaSensor): 11 | def __init__(self, session, api_key, url, tmdb_api_key, max_items): 12 | """Initialize the sensor.""" 13 | super().__init__(session, tmdb_api_key) 14 | self._seer_api_key = api_key 15 | self._url = url.rstrip('/') 16 | self._max_items = max_items 17 | self._name = "Seer Mediarr" 18 | 19 | @property 20 | def name(self): 21 | """Return the name of the sensor.""" 22 | return self._name 23 | 24 | @property 25 | def unique_id(self): 26 | """Return a unique ID.""" 27 | return f"seer_mediarr_{self._url}" 28 | 29 | async def _get_tmdb_details(self, tmdb_id, media_type): 30 | """Fetch title and overview from TMDB.""" 31 | try: 32 | url = f"https://api.themoviedb.org/3/{media_type}/{tmdb_id}" 33 | headers = {'Authorization': f'Bearer {self._tmdb_api_key}'} 34 | 35 | async with async_timeout.timeout(10): 36 | async with self._session.get(url, headers=headers) as response: 37 | if response.status == 200: 38 | data = await response.json() 39 | return { 40 | 'title': data.get('title' if media_type == 'movie' else 'name', 'Unknown'), 41 | 'overview': data.get('overview', 'No description available.'), 42 | 'year': data.get('release_date' if media_type == 'movie' else 'first_air_date', '')[:4] 43 | } 44 | return None 45 | except Exception as err: 46 | _LOGGER.error("Error fetching TMDB details: %s", err) 47 | return None 48 | 49 | async def async_update(self): 50 | """Update the sensor.""" 51 | try: 52 | headers = {'X-Api-Key': self._seer_api_key} 53 | 54 | async with async_timeout.timeout(10): 55 | async with self._session.get( 56 | f"{self._url}/api/v1/request", 57 | headers=headers 58 | ) as response: 59 | if response.status == 200: 60 | requests = await response.json() 61 | card_json = [] 62 | 63 | for request in requests['results'][:self._max_items]: 64 | media = request.get('media', {}) 65 | 66 | is_movie = media.get('mediaType') == 'movie' 67 | tmdb_id = media.get('tmdbId') 68 | media_type = 'movie' if is_movie else 'tv' 69 | request_id = request.get('id') # Get the request ID 70 | 71 | # Always fetch TMDB details since we need the title 72 | tmdb_details = await self._get_tmdb_details(tmdb_id, media_type) if tmdb_id else None 73 | 74 | # Fetch images using parent class method 75 | poster_url, backdrop_url, main_backdrop_url = await self._get_tmdb_images( 76 | tmdb_id, media_type 77 | ) if tmdb_id else (None, None, None) 78 | 79 | # Get title from TMDB details 80 | title = tmdb_details.get('title', 'Unknown') if tmdb_details else 'Unknown' 81 | overview = tmdb_details.get('overview', 'No description available.') if tmdb_details else 'No description available.' 82 | year = tmdb_details.get('year', '') if tmdb_details else '' 83 | 84 | # Fix date handling 85 | requested_date = request.get('createdAt') 86 | if requested_date: 87 | try: 88 | formatted_date = datetime.fromisoformat(requested_date.replace('Z', '+00:00')).strftime('%Y-%m-%d') 89 | except ValueError: 90 | formatted_date = "Unknown Date" 91 | else: 92 | formatted_date = "Unknown Date" 93 | 94 | # Extract season & episode (if TV show) 95 | season = media.get('seasonNumber') 96 | episode = media.get('episodeNumber') 97 | episode_info = f"S{season:02d}E{episode:02d}" if season and episode else "" 98 | 99 | request_data = { 100 | 'title': title, 101 | 'type': 'Movie' if is_movie else 'TV Show', 102 | 'status': request.get('status', 'Unknown'), 103 | 'requested_by': request.get('requestedBy', {}).get('displayName', 'Unknown User'), 104 | 'requested_date': formatted_date, 105 | 'overview': overview, 106 | 'year': year, 107 | 'season_episode': episode_info, 108 | 'poster': str(poster_url or ""), 109 | 'fanart': str(main_backdrop_url or backdrop_url or ""), 110 | 'banner': str(backdrop_url or ""), 111 | 'release': formatted_date, 112 | 'details': ( 113 | f"{title} {episode_info}\n" 114 | f"Requested by: {request.get('requestedBy', {}).get('displayName', 'Unknown User')}\n" 115 | f"Status: {request.get('status', 'Unknown')}" 116 | ), 117 | 'flag': 1, 118 | 'request_id': request_id, # Add the request ID to the data 119 | 'media_type': media_type, # Add the media type 120 | 'tmdb_id': tmdb_id # Add the TMDB ID 121 | } 122 | 123 | card_json.append(request_data) 124 | 125 | if not card_json: 126 | card_json.append({ 127 | 'title_default': '$title', 128 | 'line1_default': '$type', 129 | 'line2_default': '$status', 130 | 'line3_default': '$requested_by', 131 | 'line4_default': '$requested_date', 132 | 'icon': 'mdi:movie-search' 133 | }) 134 | 135 | self._state = len(card_json) 136 | self._attributes = {'data': card_json} 137 | self._available = True 138 | else: 139 | raise Exception(f"Failed to connect to Jellyseerr/Overseerr. Status: {response.status}") 140 | 141 | except Exception as err: 142 | _LOGGER.error("Error updating Jellyseerr/Overseerr sensor: %s", err) 143 | self._state = 0 144 | self._attributes = {'data': []} 145 | self._available = False -------------------------------------------------------------------------------- /custom_components/mediarr/services/seer_requests.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import voluptuous as vol 3 | from homeassistant.core import HomeAssistant, ServiceCall 4 | from homeassistant.helpers import config_validation as cv 5 | import aiohttp 6 | import urllib.parse 7 | import async_timeout 8 | 9 | _LOGGER = logging.getLogger(__name__) 10 | 11 | from ..common.const import ( 12 | ATTR_REQUEST_NAME, 13 | ATTR_REQUEST_SEASON, 14 | ATTR_REQUEST_ID, 15 | ATTR_REQUEST_STATUS, 16 | ATTR_REQUEST_TYPE, 17 | DEFAULT_REQUEST_SEASON, 18 | SERVICE_MOVIE_REQUEST, 19 | SERVICE_TV_REQUEST, 20 | SERVICE_UPDATE_REQUEST, 21 | REQUEST_STATUS_APPROVED, 22 | REQUEST_STATUS_DECLINED, 23 | REQUEST_STATUS_REMOVE, 24 | SEER_TV_DETAILS_ENDPOINT, 25 | SEARCH_TYPE_MOVIE, 26 | SEARCH_TYPE_TV, 27 | ) 28 | 29 | # Schema Definitions 30 | MOVIE_REQUEST_SCHEMA = vol.Schema({ 31 | vol.Required(ATTR_REQUEST_NAME): cv.string, 32 | }) 33 | 34 | TV_REQUEST_SCHEMA = vol.Schema({ 35 | vol.Required(ATTR_REQUEST_NAME): cv.string, 36 | vol.Optional(ATTR_REQUEST_SEASON, default=DEFAULT_REQUEST_SEASON): vol.In( 37 | ["first", "latest", "all"] 38 | ), 39 | }) 40 | 41 | # New combined schema for update/remove 42 | UPDATE_REQUEST_SCHEMA = vol.Schema({ 43 | vol.Required(ATTR_REQUEST_NAME): cv.string, 44 | vol.Required(ATTR_REQUEST_TYPE): vol.In(["movie", "tv"]), 45 | vol.Required(ATTR_REQUEST_STATUS): vol.In([ 46 | REQUEST_STATUS_APPROVED, 47 | REQUEST_STATUS_DECLINED, 48 | REQUEST_STATUS_REMOVE, 49 | ]), 50 | vol.Optional(ATTR_REQUEST_ID): cv.positive_int, 51 | }) 52 | 53 | 54 | class SeerRequestHandler: 55 | """Handler for Jellyseerr/Overseerr requests.""" 56 | 57 | def __init__(self, hass: HomeAssistant, url: str, api_key: str): 58 | """Initialize the request handler.""" 59 | self.hass = hass 60 | self._url = url.rstrip('/') 61 | self._api_key = api_key 62 | self._session = aiohttp.ClientSession() 63 | self._headers = {'X-Api-Key': self._api_key} 64 | 65 | async def async_search_media(self, query: str, media_type: str = None) -> dict: 66 | """Search for media in Jellyseerr/Overseerr.""" 67 | try: 68 | # URL encode the query properly 69 | encoded_query = urllib.parse.quote(query) 70 | params = {'query': encoded_query} 71 | search_url = f"{self._url}/api/v1/search" 72 | 73 | _LOGGER.debug("Searching with URL: %s, params: %s", search_url, params) 74 | 75 | async with async_timeout.timeout(10): 76 | async with self._session.get(search_url, headers=self._headers, params=params) as response: 77 | if response.status == 200: 78 | data = await response.json() 79 | _LOGGER.debug("Search response: %s", data) 80 | if data and data.get('results'): 81 | if media_type: 82 | filtered = [r for r in data['results'] if r.get('mediaType') == media_type] 83 | _LOGGER.debug("Filtered results for type %s: %s", media_type, filtered) 84 | if filtered: 85 | return filtered[0] 86 | return data['results'][0] 87 | return None 88 | except Exception as err: 89 | _LOGGER.error("Error searching for media: %s", err) 90 | return None 91 | 92 | async def async_find_request_by_title(self, title: str, media_type: str = None) -> dict: 93 | """Find a request by title and optionally media type.""" 94 | # First check sensor data 95 | try: 96 | for entity_id in self.hass.states.async_entity_ids("sensor"): 97 | if "seer_mediarr" in entity_id: 98 | state = self.hass.states.get(entity_id) 99 | if state and state.attributes.get("data"): 100 | for item in state.attributes["data"]: 101 | current_title = item.get("title", "").lower().strip() 102 | if current_title == title.lower().strip(): 103 | # If we have a request_id, return a request-like structure 104 | if item.get("request_id"): 105 | return { 106 | "id": item["request_id"], 107 | "media": { 108 | "title": item["title"], 109 | "mediaType": item["type"].lower() 110 | } 111 | } 112 | except Exception as err: 113 | _LOGGER.error("Error checking sensor data: %s", err) 114 | 115 | # If not found in sensor data, fall back to API search 116 | try: 117 | url = f"{self._url}/api/v1/request" 118 | async with async_timeout.timeout(10): 119 | async with self._session.get(url, headers=self._headers) as response: 120 | if response.status == 200: 121 | data = await response.json() 122 | for request in data.get('results', []): 123 | media = request.get('media', {}) 124 | current_title = media.get('title', '').lower().strip() 125 | current_type = media.get('mediaType', '').lower() 126 | 127 | if current_title == title.lower().strip(): 128 | if not media_type or current_type == media_type.lower(): 129 | return request 130 | _LOGGER.debug("Request not found in API data") 131 | return None 132 | else: 133 | _LOGGER.error("Failed to fetch requests from API. Status: %s", response.status) 134 | return None 135 | except Exception as err: 136 | _LOGGER.error("Error searching API for request: %s", err) 137 | return None 138 | 139 | async def async_update_request(self, call: ServiceCall) -> bool: 140 | """Handle request status update service calls.""" 141 | name = call.data[ATTR_REQUEST_NAME] 142 | media_type = call.data[ATTR_REQUEST_TYPE] 143 | new_status = call.data[ATTR_REQUEST_STATUS] 144 | request_id = call.data.get(ATTR_REQUEST_ID) 145 | 146 | try: 147 | if not request_id: 148 | # First check if we can find the request in sensor data 149 | request = await self.async_find_request_by_title(name, media_type) 150 | if not request: 151 | _LOGGER.error("Could not find request for: %s", name) 152 | return False 153 | request_id = request.get('id') 154 | 155 | # If this is a remove request 156 | if new_status == REQUEST_STATUS_REMOVE: 157 | # Handle removal 158 | url = f"{self._url}/api/v1/request/{request_id}" 159 | async with async_timeout.timeout(10): 160 | async with self._session.delete(url, headers=self._headers) as response: 161 | success = response.status in [200, 204] 162 | if success: 163 | _LOGGER.info("Successfully removed request for: %s", name) 164 | else: 165 | response_text = await response.text() 166 | _LOGGER.error("Failed to remove request. Status: %s, Response: %s", 167 | response.status, response_text) 168 | return success 169 | else: 170 | # Convert status to endpoint action 171 | status_mapping = { 172 | REQUEST_STATUS_APPROVED: "approve", 173 | REQUEST_STATUS_DECLINED: "decline" 174 | } 175 | status_value = status_mapping.get(new_status) 176 | 177 | if not status_value: 178 | _LOGGER.error("Invalid status: %s. Must be 'approve' or 'decline'", new_status) 179 | return False 180 | 181 | url = f"{self._url}/api/v1/request/{request_id}/{status_value}" 182 | 183 | async with async_timeout.timeout(10): 184 | async with self._session.post(url, headers=self._headers) as response: 185 | success = response.status in [200, 201, 202] 186 | if success: 187 | _LOGGER.info("Successfully updated request status for: %s", name) 188 | else: 189 | response_text = await response.text() 190 | _LOGGER.error("Failed to update request status. Status: %s, Response: %s", 191 | response.status, response_text) 192 | return success 193 | 194 | except Exception as err: 195 | _LOGGER.error("Error updating request: %s - Error: %s", name, err) 196 | return False 197 | 198 | async def async_request_movie(self, call: ServiceCall) -> bool: 199 | """Handle movie request service calls.""" 200 | name = call.data[ATTR_REQUEST_NAME] 201 | movie = await self.async_search_media(name, SEARCH_TYPE_MOVIE) 202 | if not movie: 203 | _LOGGER.error("No movie found with name: %s", name) 204 | return False 205 | 206 | request_data = { 207 | "mediaType": "movie", 208 | "mediaId": movie['id'] 209 | } 210 | 211 | url = f"{self._url}/api/v1/request" 212 | headers = {**self._headers, 'Content-Type': 'application/json'} 213 | 214 | try: 215 | async with async_timeout.timeout(10): 216 | async with self._session.post(url, headers=headers, json=request_data) as response: 217 | success = response.status in [200, 201, 202] 218 | if success: 219 | _LOGGER.info("Successfully requested movie: %s", name) 220 | else: 221 | response_text = await response.text() 222 | _LOGGER.error("Failed to request movie. Status: %s, Response: %s", 223 | response.status, response_text) 224 | return success 225 | except Exception as err: 226 | _LOGGER.error("Error requesting movie: %s - Error: %s", name, err) 227 | return False 228 | 229 | async def async_request_tv(self, call: ServiceCall) -> bool: 230 | """Handle TV show request service calls.""" 231 | name = call.data[ATTR_REQUEST_NAME] 232 | season = call.data.get(ATTR_REQUEST_SEASON, DEFAULT_REQUEST_SEASON) 233 | 234 | show = await self.async_search_media(name, SEARCH_TYPE_TV) 235 | if not show: 236 | _LOGGER.error("No TV show found with name: %s", name) 237 | return False 238 | 239 | # Convert season selection to API format 240 | if season == "all": 241 | seasons = "all" 242 | elif season == "latest": 243 | # Get all seasons to find latest 244 | url = f"{self._url}/api/v1/tv/{show['id']}" 245 | async with async_timeout.timeout(10): 246 | async with self._session.get(url, headers=self._headers) as response: 247 | if response.status == 200: 248 | details = await response.json() 249 | latest = max((s["seasonNumber"] for s in details.get("seasons", [])), default=1) 250 | seasons = [latest] 251 | else: 252 | seasons = [1] 253 | else: # first 254 | seasons = [1] 255 | 256 | request_data = { 257 | "mediaType": "tv", 258 | "mediaId": show['id'], 259 | "seasons": seasons 260 | } 261 | 262 | url = f"{self._url}/api/v1/request" 263 | headers = {**self._headers, 'Content-Type': 'application/json'} 264 | 265 | try: 266 | async with async_timeout.timeout(10): 267 | async with self._session.post(url, headers=headers, json=request_data) as response: 268 | success = response.status in [200, 201, 202] 269 | if success: 270 | _LOGGER.info("Successfully requested TV show: %s (Season: %s)", name, season) 271 | else: 272 | response_text = await response.text() 273 | _LOGGER.error("Failed to request TV show. Status: %s, Response: %s", 274 | response.status, response_text) 275 | return success 276 | except Exception as err: 277 | _LOGGER.error("Error requesting TV show: %s - Error: %s", name, err) 278 | return False 279 | 280 | async def async_get_tv_details(self, tv_id: int) -> dict: 281 | """Get TV show details including seasons.""" 282 | try: 283 | url = f"{self._url}{SEER_TV_DETAILS_ENDPOINT}/{tv_id}" 284 | async with async_timeout.timeout(10): 285 | async with self._session.get(url, headers=self._headers) as response: 286 | if response.status == 200: 287 | return await response.json() 288 | return None 289 | except Exception as err: 290 | _LOGGER.error("Error getting TV details: %s", err) 291 | return None 292 | 293 | async def close(self): 294 | """Close the session.""" 295 | if self._session: 296 | await self._session.close() 297 | 298 | 299 | async def async_setup_services(hass: HomeAssistant, domain: str) -> bool: 300 | """Register custom services for Mediarr integration.""" 301 | 302 | async def handle_movie_request(call: ServiceCall): 303 | """Handle a movie request service call.""" 304 | handler = hass.data[domain].get("seer_request_handler") 305 | if handler: 306 | await handler.async_request_movie(call) 307 | 308 | async def handle_tv_request(call: ServiceCall): 309 | """Handle a TV show request service call.""" 310 | handler = hass.data[domain].get("seer_request_handler") 311 | if handler: 312 | await handler.async_request_tv(call) 313 | 314 | async def handle_update_request(call: ServiceCall): 315 | """Handle an update request service call.""" 316 | handler = hass.data[domain].get("seer_request_handler") 317 | if handler: 318 | await handler.async_update_request(call) 319 | 320 | try: 321 | hass.services.async_register(domain, SERVICE_MOVIE_REQUEST, handle_movie_request, schema=MOVIE_REQUEST_SCHEMA) 322 | hass.services.async_register(domain, SERVICE_TV_REQUEST, handle_tv_request, schema=TV_REQUEST_SCHEMA) 323 | hass.services.async_register(domain, SERVICE_UPDATE_REQUEST, handle_update_request, schema=UPDATE_REQUEST_SCHEMA) 324 | 325 | 326 | _LOGGER.info("Successfully registered all Mediarr services") 327 | return True 328 | except Exception as err: 329 | _LOGGER.error("Error setting up Mediarr services: %s", err) 330 | return False 331 | 332 | 333 | async def async_unload_services(hass: HomeAssistant, domain: str) -> bool: 334 | """Unload Mediarr services.""" 335 | handler = hass.data[domain].get("seer_request_handler") 336 | if handler: 337 | await handler.close() 338 | 339 | hass.services.async_remove(domain, SERVICE_MOVIE_REQUEST) 340 | hass.services.async_remove(domain, SERVICE_TV_REQUEST) 341 | hass.services.async_remove(domain, SERVICE_UPDATE_REQUEST) 342 | 343 | 344 | return True -------------------------------------------------------------------------------- /hacs.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Mediarr", 3 | "render_readme": true, 4 | 5 | "domains": ["sensor"], 6 | "country": "ALL", 7 | "homeassistant": "2024.1.0" 8 | } 9 | --------------------------------------------------------------------------------