├── tests ├── __init__.py ├── test_cache.py ├── test_helpers.py ├── test_vector_index.py ├── test_llm.py ├── test_manual_search.py ├── test_spotify_transfer.py ├── test_playlist_import.py └── test_plex_search.py ├── beetsplug ├── ai │ └── __init__.py ├── core │ ├── __init__.py │ ├── config.py │ ├── vector_index.py │ └── cache.py ├── plex │ ├── __init__.py │ ├── collage.py │ ├── operations.py │ ├── spotify_transfer.py │ ├── playlist_import.py │ └── manual_search.py ├── utils │ ├── __init__.py │ └── helpers.py ├── providers │ ├── __init__.py │ ├── tidal.py │ ├── gaana.py │ ├── youtube.py │ ├── m3u8.py │ ├── http_post.py │ ├── apple.py │ ├── jiosaavn.py │ └── spotify.py └── __init__.py ├── collage.png ├── .vscode ├── mcp.json └── settings.json ├── setup.py ├── LICENSE ├── .gitignore ├── agents.md ├── gemini.md ├── .github └── copilot-instructions.md └── README.md /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /beetsplug/ai/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /beetsplug/core/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /beetsplug/plex/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /beetsplug/utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /beetsplug/providers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /collage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/arsaboo/beets-plexsync/HEAD/collage.png -------------------------------------------------------------------------------- /beetsplug/__init__.py: -------------------------------------------------------------------------------- 1 | from pkgutil import extend_path 2 | __path__ = extend_path(__path__, __name__) 3 | -------------------------------------------------------------------------------- /.vscode/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "servers": { 3 | "context7": { 4 | "type": "http", 5 | "url": "https://mcp.context7.com/mcp" 6 | } 7 | }, 8 | "inputs": [] 9 | } -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='beets-plexsync', 5 | version='0.1', 6 | description='beets plugin to sync with Plex', 7 | long_description=open('README.md').read(), 8 | author='Alok Saboo', 9 | author_email='', 10 | url='https://github.com/arsaboo/beets-plexsync', 11 | license='MIT', 12 | platforms='ALL', 13 | packages=find_packages(include=['beetsplug', 'beetsplug.*']), 14 | install_requires=[ 15 | 'beets>=2.4.0', 16 | 'plexapi>=4.13.4', 17 | 'jiosaavn-python>=0.2', 18 | 'spotipy', 19 | 'ollama', 20 | 'openai', 21 | 'pydantic>=2.0.0', 22 | 'python-dateutil', 23 | 'confuse', 24 | 'requests', 25 | 'beautifulsoup4', 26 | 'pillow', 27 | 'json_repair', 28 | 'agno>=1.2.16', 29 | 'instructor>=1.0', 30 | 'tavily-python', 31 | 'exa_py', 32 | 'brave-search', 33 | 'scipy', 34 | 'numpy', 35 | 'pytz', 36 | ], 37 | ) 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Alok Saboo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "[python]": { 3 | "editor.defaultFormatter": "ms-python.black-formatter" 4 | }, 5 | "python.formatting.provider": "none", 6 | "python.testing.unittestArgs": [ 7 | "-v", 8 | "-s", 9 | "./beetsplug", 10 | "-p", 11 | "test_*.py" 12 | ], 13 | "python.testing.pytestEnabled": false, 14 | "python.testing.unittestEnabled": true, 15 | "chat.mcp.serverSampling": { 16 | "beets-plexsync/.vscode/mcp.json: context7": { 17 | "allowedModels": [ 18 | "copilot/gpt-4.1", 19 | "copilot/auto", 20 | "copilot/claude-3.5-sonnet", 21 | "copilot/claude-3.7-sonnet", 22 | "copilot/claude-3.7-sonnet-thought", 23 | "copilot/claude-sonnet-4", 24 | "copilot/gemini-2.0-flash-001", 25 | "copilot/gemini-2.5-pro", 26 | "copilot/gpt-4o", 27 | "copilot/gpt-5", 28 | "copilot/o3-mini", 29 | "copilot/o4-mini", 30 | "openrouter/qwen/qwen3-coder" 31 | ] 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /beetsplug/core/config.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Sequence 4 | 5 | import confuse 6 | from beets import config 7 | 8 | 9 | def get_config_value(item_cfg: Any, defaults_cfg: Any, key: str, code_default: Any): 10 | """Get a config value from item or defaults with a code fallback.""" 11 | if key in item_cfg: 12 | val = item_cfg[key] 13 | return val.get() if hasattr(val, "get") else val 14 | if key in defaults_cfg: 15 | val = defaults_cfg[key] 16 | return val.get() if hasattr(val, "get") else val 17 | return code_default 18 | 19 | 20 | def get_plexsync_config(path: str | Sequence[str], cast=None, default=None): 21 | """Safely fetch a plexsync config value with consistent defaults.""" 22 | segments = (path,) if isinstance(path, str) else tuple(path) 23 | node = config['plexsync'] 24 | try: 25 | for segment in segments: 26 | node = node[segment] 27 | except (confuse.NotFoundError, KeyError, TypeError): 28 | return default 29 | 30 | try: 31 | return node.get(cast) if cast is not None else node.get() 32 | except (confuse.NotFoundError, confuse.ConfigValueError, TypeError): 33 | return default 34 | -------------------------------------------------------------------------------- /beetsplug/providers/tidal.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | _log = logging.getLogger('beets.plexsync.tidal') 4 | 5 | def import_tidal_playlist(url, cache=None): 6 | """Import Tidal playlist with caching. 7 | 8 | Args: 9 | url: URL of the Tidal playlist 10 | cache: Cache object for storing results 11 | 12 | Returns: 13 | list: List of song dictionaries 14 | """ 15 | # Generate cache key from URL 16 | playlist_id = url.split('/')[-1] 17 | 18 | # Check cache 19 | if cache: 20 | cached_data = cache.get_playlist_cache(playlist_id, 'tidal') 21 | if (cached_data): 22 | _log.info(f"Using cached Tidal playlist data") 23 | return cached_data 24 | 25 | try: 26 | from beetsplug.tidal import TidalPlugin 27 | except ModuleNotFoundError: 28 | _log.error(f"Tidal plugin not installed") 29 | return None 30 | 31 | try: 32 | tidal = TidalPlugin() 33 | song_list = tidal.import_tidal_playlist(url) 34 | 35 | # Cache successful results 36 | if cache and song_list: 37 | cache.set_playlist_cache(playlist_id, 'tidal', song_list) 38 | _log.info(f"Cached {len(song_list)} tracks from Tidal playlist") 39 | 40 | return song_list 41 | except Exception as e: 42 | _log.error(f"Unable to initialize Tidal plugin. Error: {e}") 43 | return None -------------------------------------------------------------------------------- /tests/test_cache.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import sqlite3 4 | import tempfile 5 | import types 6 | import unittest 7 | 8 | from tests.test_playlist_import import DummyLogger, ensure_stubs 9 | 10 | 11 | class CacheTests(unittest.TestCase): 12 | def setUp(self): 13 | ensure_stubs({'plexsync': {}}) 14 | import sys 15 | sys.modules.setdefault('plexapi.audio', types.SimpleNamespace(Track=object)) 16 | sys.modules.setdefault('plexapi.video', types.SimpleNamespace(Video=object)) 17 | sys.modules.setdefault('plexapi.server', types.SimpleNamespace(PlexServer=object)) 18 | from beetsplug.core.cache import Cache 19 | 20 | self.tempdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True) 21 | self.db_path = os.path.join(self.tempdir.name, 'cache.db') 22 | 23 | class PluginStub: 24 | def __init__(self): 25 | self._log = DummyLogger() 26 | 27 | self.cache = Cache(self.db_path, PluginStub()) 28 | 29 | def tearDown(self): 30 | self.tempdir.cleanup() 31 | 32 | def test_set_and_get(self): 33 | key = json.dumps({'title': 'Song'}) 34 | self.cache.set(key, 123) 35 | self.assertEqual(self.cache.get(key), (123, None)) 36 | 37 | def test_negative_cache_storage(self): 38 | key = json.dumps({'title': 'Skip'}) 39 | self.cache.set(key, None) 40 | self.assertEqual(self.cache.get(key), (-1, None)) 41 | 42 | def test_clear(self): 43 | key = json.dumps({'title': 'Clear'}) 44 | self.cache.set(key, 1) 45 | self.cache.clear() 46 | self.assertIsNone(self.cache.get(key)) 47 | 48 | 49 | if __name__ == '__main__': 50 | unittest.main() 51 | -------------------------------------------------------------------------------- /beetsplug/providers/gaana.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | _log = logging.getLogger('beets.plexsync.gaana') 4 | 5 | def import_gaana_playlist(url, cache=None): 6 | """Import Gaana playlist with caching. 7 | 8 | Args: 9 | url: URL of the Gaana playlist 10 | cache: Cache object for storing results 11 | 12 | Returns: 13 | list: List of song dictionaries 14 | """ 15 | # Generate cache key from URL 16 | playlist_id = url.split('/')[-1] 17 | 18 | if not playlist_id: 19 | _log.error(f"Could not extract playlist ID from URL: {url}") 20 | return [] 21 | 22 | # Check cache 23 | if cache: 24 | cached_data = cache.get_playlist_cache(playlist_id, 'gaana') 25 | if cached_data: 26 | _log.info(f"Using cached tracks for Gaana playlist {playlist_id}") 27 | return cached_data 28 | 29 | try: 30 | from beetsplug.gaana import GaanaPlugin 31 | except ModuleNotFoundError: 32 | _log.error( 33 | "Gaana plugin not installed. \ 34 | See https://github.com/arsaboo/beets-gaana" 35 | ) 36 | return None 37 | 38 | try: 39 | gaana = GaanaPlugin() 40 | except Exception as e: 41 | _log.error(f"Unable to initialize Gaana plugin. Error: {e}") 42 | return None 43 | 44 | # Get songs from Gaana 45 | song_list = gaana.import_gaana_playlist(url) 46 | 47 | if not song_list: 48 | _log.warning(f"No tracks found in Gaana playlist {playlist_id}") 49 | 50 | # Cache successful results 51 | if song_list and cache: 52 | cache.set_playlist_cache(playlist_id, 'gaana', song_list) 53 | _log.info(f"Cached {len(song_list)} tracks from Gaana playlist") 54 | 55 | return song_list -------------------------------------------------------------------------------- /beetsplug/providers/youtube.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | import logging 4 | 5 | _log = logging.getLogger('beets.plexsync.youtube') 6 | 7 | def import_yt_playlist(url, cache=None): 8 | """Import YouTube playlist with caching. 9 | 10 | Args: 11 | url: URL of the YouTube playlist 12 | cache: Cache object for storing results 13 | 14 | Returns: 15 | list: List of song dictionaries 16 | """ 17 | # Generate cache key from URL 18 | playlist_id = url.split('list=')[-1].split('&')[0] # Extract playlist ID from URL 19 | 20 | # Check cache 21 | if cache: 22 | cached_data = cache.get_playlist_cache(playlist_id, 'youtube') 23 | if cached_data: 24 | _log.info("Using cached YouTube playlist data") 25 | return cached_data 26 | 27 | try: 28 | from beetsplug.youtube import YouTubePlugin 29 | except ModuleNotFoundError: 30 | _log.error("YouTube plugin not installed") 31 | return None 32 | 33 | try: 34 | ytp = YouTubePlugin() 35 | song_list = ytp.import_youtube_playlist(url) 36 | 37 | # Cache successful results 38 | if cache and song_list: 39 | cache.set_playlist_cache(playlist_id, 'youtube', song_list) 40 | _log.info("Cached %s tracks from YouTube playlist", len(song_list)) 41 | 42 | return song_list 43 | except Exception as e: 44 | _log.error("Unable to initialize YouTube plugin. Error: %s", e) 45 | return None 46 | 47 | 48 | def import_yt_search(query, limit, cache=None): 49 | """Import YouTube search results. 50 | 51 | Args: 52 | query: Search query string 53 | limit: Maximum number of results to return 54 | cache: Cache object for storing results 55 | 56 | Returns: 57 | list: List of song dictionaries 58 | """ 59 | try: 60 | from beetsplug.youtube import YouTubePlugin 61 | except ModuleNotFoundError: 62 | _log.error("YouTube plugin not installed") 63 | return [] 64 | try: 65 | ytp = YouTubePlugin() 66 | return ytp.import_youtube_search(query, limit) 67 | except Exception as e: 68 | _log.error("Unable to initialize YouTube plugin. Error: %s", e) 69 | return [] -------------------------------------------------------------------------------- /beetsplug/utils/helpers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import re 4 | 5 | from beets import ui 6 | 7 | 8 | def parse_title(title_orig): 9 | """Parse title to separate movie soundtrack information.""" 10 | if '(From "' in title_orig: 11 | title = re.sub(r"\(From.*\)", "", title_orig) 12 | album = re.sub(r'^[^"]+"|(? str: 39 | """Highlight exact matching parts between source and target strings.""" 40 | if source is None or target is None: 41 | return target or "Unknown" 42 | 43 | source_words = source.lower().split() if source else [] 44 | target_words = target.lower().split() if target else [] 45 | 46 | if source and target and source.lower() == target.lower(): 47 | return ui.colorize('text_success', target) 48 | 49 | from difflib import SequenceMatcher 50 | 51 | def fuzzy_score(a: str, b: str) -> float: 52 | return SequenceMatcher(None, a.lower(), b.lower()).ratio() 53 | 54 | highlighted_words: list[str] = [] 55 | original_target_words = target.split() 56 | for i, target_word in enumerate(target_words): 57 | word_matched = False 58 | clean_target_word = re.sub(r'[^\w]', '', target_word) 59 | 60 | for source_word in source_words: 61 | clean_source_word = re.sub(r'[^\w]', '', source_word) 62 | if ( 63 | clean_source_word == clean_target_word 64 | or fuzzy_score(clean_source_word, clean_target_word) > 0.8 65 | ): 66 | highlighted_words.append( 67 | ui.colorize('text_success', original_target_words[i]) 68 | ) 69 | word_matched = True 70 | break 71 | 72 | if not word_matched: 73 | highlighted_words.append(original_target_words[i]) 74 | 75 | return ' '.join(highlighted_words) 76 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import sys 3 | import types 4 | import unittest 5 | 6 | 7 | class DummyConfigNode: 8 | def __init__(self, data): 9 | self._data = data 10 | 11 | def __getitem__(self, key): 12 | if isinstance(self._data, dict) and key in self._data: 13 | return DummyConfigNode(self._data[key]) 14 | raise NotFoundError(key) 15 | 16 | def get(self, cast=None): 17 | value = self._data 18 | if isinstance(value, DummyConfigNode): 19 | value = value._data 20 | if cast is None or value is None: 21 | return value 22 | if cast is bool: 23 | return bool(value) 24 | return cast(value) 25 | 26 | 27 | class DummyConfig(DummyConfigNode): 28 | def __init__(self): 29 | super().__init__({}) 30 | 31 | def set_data(self, data): 32 | self._data = data 33 | 34 | 35 | class NotFoundError(Exception): 36 | pass 37 | 38 | 39 | class ConfigValueError(Exception): 40 | pass 41 | 42 | 43 | def ensure_stubs(data): 44 | config = DummyConfig() 45 | config.set_data(data) 46 | 47 | beets = types.ModuleType('beets') 48 | ui_module = types.ModuleType('beets.ui') 49 | 50 | def colorize(_name, text): 51 | return text 52 | 53 | ui_module.colorize = colorize 54 | beets.ui = ui_module 55 | beets.config = config 56 | 57 | confuse = types.ModuleType('confuse') 58 | confuse.NotFoundError = NotFoundError 59 | confuse.ConfigValueError = ConfigValueError 60 | 61 | sys.modules['beets'] = beets 62 | sys.modules['beets.ui'] = ui_module 63 | sys.modules['confuse'] = confuse 64 | 65 | return config 66 | 67 | 68 | class GetPlexsyncConfigTest(unittest.TestCase): 69 | def setUp(self): 70 | self.config = ensure_stubs({'plexsync': {}}) 71 | if 'beetsplug.helpers' in sys.modules: 72 | importlib.reload(sys.modules['beetsplug.helpers']) 73 | else: 74 | importlib.import_module('beetsplug.helpers') 75 | 76 | def test_default_value_returned(self): 77 | helpers = importlib.import_module('beetsplug.helpers') 78 | self.assertTrue(helpers.get_plexsync_config('manual_search', bool, True)) 79 | 80 | def test_nested_lookup(self): 81 | self.config.set_data({'plexsync': {'playlists': {'items': [1, 2]}}}) 82 | helpers = importlib.import_module('beetsplug.helpers') 83 | self.assertEqual( 84 | helpers.get_plexsync_config(['playlists', 'items'], list, []), 85 | [1, 2], 86 | ) 87 | 88 | def test_missing_nested_returns_default(self): 89 | helpers = importlib.import_module('beetsplug.helpers') 90 | self.assertEqual( 91 | helpers.get_plexsync_config(['playlists', 'defaults'], dict, {'x': 1}), 92 | {'x': 1}, 93 | ) 94 | 95 | 96 | if __name__ == '__main__': 97 | unittest.main() 98 | -------------------------------------------------------------------------------- /beetsplug/providers/m3u8.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | 4 | _log = logging.getLogger('beets.plexsync.m3u8') 5 | 6 | def import_m3u8_playlist(filepath, cache=None): 7 | """Import M3U8 playlist with caching. 8 | 9 | Args: 10 | filepath: Path to the M3U8 file 11 | cache: Cache object for storing results 12 | 13 | Returns: 14 | list: List of song dictionaries 15 | """ 16 | playlist_id = str(Path(filepath).stem) 17 | 18 | if cache: 19 | cached_data = cache.get_playlist_cache(playlist_id, 'm3u8') 20 | if cached_data: 21 | _log.info("Using cached M3U8 playlist data") 22 | return cached_data 23 | 24 | song_list = [] 25 | 26 | try: 27 | with open(filepath, 'r', encoding='utf-8') as f: 28 | lines = [line.strip() for line in f if line.strip()] 29 | 30 | i = 0 31 | while i < len(lines): 32 | line = lines[i] 33 | 34 | if line.startswith('#EXTINF:'): 35 | meta = line.split(',', 1)[1] 36 | _log.debug(f"EXTINF meta raw line: '{meta}'") 37 | 38 | if ' - ' in meta: 39 | artist, title = meta.split(' - ', 1) 40 | artist, title = artist.strip(), title.strip() 41 | _log.debug(f"Parsed EXTINF as artist='{artist}', title='{title}'") 42 | else: 43 | _log.warning(f"EXTINF missing '-': '{meta}'") 44 | artist, title = None, None 45 | 46 | current_song = { 47 | 'artist': artist, 48 | 'title': title, 49 | 'album': None 50 | } 51 | 52 | # Optional EXTALB line 53 | next_idx = i + 1 54 | if next_idx < len(lines) and lines[next_idx].startswith('#EXTALB:'): 55 | album = lines[next_idx][8:].strip() 56 | current_song['album'] = album if album else None 57 | _log.debug(f"Found album: '{current_song['album']}'") 58 | next_idx += 1 59 | 60 | # Optional file path (we'll skip) 61 | if next_idx < len(lines) and not lines[next_idx].startswith('#'): 62 | next_idx += 1 63 | 64 | # Log before appending: 65 | _log.debug(f"Appending song entry: {current_song}") 66 | 67 | song_list.append(current_song.copy()) 68 | i = next_idx - 1 # Set to the last processed line 69 | 70 | i += 1 71 | 72 | if song_list and cache: 73 | cache.set_playlist_cache(playlist_id, 'm3u8', song_list) 74 | _log.info(f"Cached {len(song_list)} tracks from M3U8 playlist") 75 | 76 | return song_list 77 | 78 | except Exception as e: 79 | _log.error(f"Error importing M3U8 playlist '{filepath}': {e}") 80 | return [] -------------------------------------------------------------------------------- /beetsplug/plex/collage.py: -------------------------------------------------------------------------------- 1 | """Collage creation helpers extracted from plexsync.""" 2 | 3 | import os 4 | from datetime import datetime 5 | from io import BytesIO 6 | 7 | import requests 8 | from PIL import Image 9 | from plexapi import exceptions 10 | 11 | 12 | def create_collage(list_image_urls, dimension, logger): 13 | """Create a square collage from a list of image urls. 14 | 15 | Returns a PIL.Image. Behavior identical to original. 16 | """ 17 | thumbnail_size = 300 18 | grid_size = thumbnail_size * dimension 19 | grid = Image.new("RGB", (grid_size, grid_size), "black") 20 | 21 | for index, url in enumerate(list_image_urls): 22 | if index >= dimension * dimension: 23 | break 24 | try: 25 | response = requests.get(url, timeout=10) 26 | img = Image.open(BytesIO(response.content)) 27 | if img.mode != "RGB": 28 | img = img.convert("RGB") 29 | img.thumbnail((thumbnail_size, thumbnail_size), Image.Resampling.LANCZOS) 30 | x = thumbnail_size * (index % dimension) 31 | y = thumbnail_size * (index // dimension) 32 | grid.paste(img, (x, y)) 33 | img.close() 34 | except Exception as e: 35 | logger.debug("Failed to process image {}: {}", url, e) 36 | continue 37 | return grid 38 | 39 | 40 | def plex_collage(plugin, interval, grid): 41 | """Create a collage of most played albums and save to config dir.""" 42 | interval = int(interval) 43 | grid = int(grid) 44 | plugin._log.info("Creating collage of most played albums in the last {} days", interval) 45 | 46 | # Fetch all tracks played within interval days. Avoid sorting by 47 | # lifetime viewCount to prevent bias; prefer recency or no sort. 48 | tracks = plugin.music.search( 49 | filters={"track.lastViewedAt>>": f"{interval}d"}, 50 | sort="lastViewedAt:desc", 51 | libtype="track", 52 | maxresults=None, 53 | ) 54 | 55 | max_albums = grid * grid 56 | sorted_albums = plugin._plex_most_played_albums(tracks, interval)[:max_albums] 57 | 58 | if not sorted_albums: 59 | plugin._log.error("No albums found in the specified time period") 60 | return 61 | 62 | album_art_urls = [] 63 | for album in sorted_albums: 64 | if hasattr(album, "thumbUrl") and album.thumbUrl: 65 | album_art_urls.append(album.thumbUrl) 66 | plugin._log.debug( 67 | "Added album art for: {} (played {} times)", 68 | album.title, 69 | album.count, 70 | ) 71 | 72 | if not album_art_urls: 73 | plugin._log.error("No album artwork found") 74 | return 75 | 76 | try: 77 | collage = create_collage(album_art_urls, grid, plugin._log) 78 | output_path = os.path.join(plugin.config_dir, "collage.png") 79 | collage.save(output_path, "PNG", quality=95) 80 | plugin._log.info("Collage saved to: {}", output_path) 81 | except Exception as e: 82 | plugin._log.error("Failed to create collage: {}", e) 83 | -------------------------------------------------------------------------------- /beetsplug/providers/http_post.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import requests 3 | 4 | _log = logging.getLogger('beets.plexsync.post') 5 | 6 | def import_post_playlist(source_config, cache=None): 7 | """Import playlist from a POST request endpoint with caching. 8 | 9 | Args: 10 | source_config: Dictionary containing server_url, headers, and payload 11 | cache: Cache object for storing results 12 | 13 | Returns: 14 | list: List of song dictionaries 15 | """ 16 | # Generate cache key from URL in payload 17 | playlist_url = source_config.get("payload", {}).get("playlist_url") 18 | if not playlist_url: 19 | _log.error("No playlist_url provided in POST request payload") 20 | return [] 21 | 22 | playlist_id = playlist_url.split('/')[-1] 23 | 24 | # Check cache 25 | if cache: 26 | cached_data = cache.get_playlist_cache(playlist_id, 'post') 27 | if cached_data: 28 | _log.info("Using cached POST request playlist data") 29 | return cached_data 30 | 31 | server_url = source_config.get("server_url") 32 | if not server_url: 33 | _log.error("No server_url provided for POST request") 34 | return [] 35 | 36 | headers = source_config.get("headers", {}) 37 | payload = source_config.get("payload", {}) 38 | 39 | try: 40 | response = requests.post(server_url, headers=headers, json=payload) 41 | response.raise_for_status() # Raise exception for non-200 status codes 42 | 43 | data = response.json() 44 | if not isinstance(data, dict) or "song_list" not in data: 45 | _log.error("Invalid response format. Expected 'song_list' in JSON response") 46 | return [] 47 | 48 | # Convert response to our standard format 49 | song_list = [] 50 | for song in data["song_list"]: 51 | song_dict = { 52 | "title": song.get("title", "").strip(), 53 | "artist": song.get("artist", "").strip(), 54 | "album": song.get("album", "").strip() if song.get("album") else None, 55 | } 56 | # Add year if available 57 | if "year" in song and song["year"]: 58 | try: 59 | year = int(song["year"]) 60 | song_dict["year"] = year 61 | except (ValueError, TypeError): 62 | pass 63 | 64 | if song_dict["title"] and song_dict["artist"]: # Only add if we have minimum required fields 65 | song_list.append(song_dict) 66 | 67 | # Cache successful results 68 | if song_list and cache: 69 | cache.set_playlist_cache(playlist_id, 'post', song_list) 70 | _log.info("Cached {} tracks from POST request playlist", len(song_list)) 71 | 72 | return song_list 73 | 74 | except requests.exceptions.RequestException as e: 75 | _log.error("Error making POST request: {}", e) 76 | return [] 77 | except ValueError as e: 78 | _log.error("Error parsing JSON response: {}", e) 79 | return [] 80 | except Exception as e: 81 | _log.error("Unexpected error during POST request: {}", e) 82 | return [] -------------------------------------------------------------------------------- /tests/test_vector_index.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import types 4 | 5 | from beetsplug.core.vector_index import BeetsVectorIndex 6 | from beetsplug.plexsync import PlexSync 7 | 8 | 9 | def _extract_meta(item): 10 | return { 11 | "id": getattr(item, "id", None), 12 | "title": getattr(item, "title", "") or "", 13 | "album": getattr(item, "album", "") or "", 14 | "artist": getattr(item, "artist", "") or "", 15 | "plex_ratingkey": getattr(item, "plex_ratingkey", None), 16 | } 17 | 18 | 19 | def _build_plugin(index, info): 20 | return types.SimpleNamespace( 21 | _vector_index=index, 22 | _vector_index_info=info, 23 | register_listener=lambda *args, **kwargs: None, 24 | _extract_vector_metadata=_extract_meta, 25 | ) 26 | 27 | 28 | def test_vector_index_upsert_updates_tokens(): 29 | index = BeetsVectorIndex() 30 | index.add_item( 31 | 1, 32 | {"title": "First Song", "artist": "Artist", "album": "Album"}, 33 | ) 34 | 35 | # Query matches original tokens. 36 | query_counts, query_norm = index.build_query_vector({"title": "First"}) 37 | assert index.candidate_scores(query_counts, query_norm) 38 | 39 | # Upsert with new metadata that shouldn't match the original query. 40 | index.upsert_item( 41 | 1, 42 | {"title": "Second Song", "artist": "Artist", "album": "Album"}, 43 | ) 44 | 45 | query_counts, query_norm = index.build_query_vector({"title": "First"}) 46 | assert not index.candidate_scores(query_counts, query_norm) 47 | 48 | query_counts, query_norm = index.build_query_vector({"title": "Second"}) 49 | matches = index.candidate_scores(query_counts, query_norm) 50 | assert matches and matches[0][0].item_id == 1 51 | 52 | 53 | def test_listen_for_db_change_upserts_into_index(): 54 | index = BeetsVectorIndex() 55 | index.add_item(5, {"title": "Existing", "artist": "Artist", "album": "Album"}) 56 | 57 | with tempfile.NamedTemporaryFile(delete=False) as handle: 58 | db_path = handle.name 59 | os.utime(db_path, None) 60 | 61 | info = {"db_path": db_path, "mtime": os.path.getmtime(db_path), "size": len(index)} 62 | plugin = _build_plugin(index, info) 63 | 64 | model = types.SimpleNamespace( 65 | id=10, 66 | title="New Track", 67 | album="Fresh Album", 68 | artist="New Artist", 69 | plex_ratingkey=None, 70 | ) 71 | lib = types.SimpleNamespace(path=db_path) 72 | 73 | PlexSync.listen_for_db_change(plugin, lib, model) 74 | 75 | assert len(plugin._vector_index) == 2 76 | assert plugin._vector_index_info["size"] == 2 77 | assert "mtime" in plugin._vector_index_info 78 | 79 | query_counts, query_norm = index.build_query_vector({"title": "New Track"}) 80 | matches = index.candidate_scores(query_counts, query_norm) 81 | assert matches and matches[0][0].item_id == 10 82 | 83 | os.unlink(db_path) 84 | 85 | 86 | def test_listen_for_db_change_defers_when_index_missing(): 87 | plugin = _build_plugin(None, {"db_path": "/tmp/test.db"}) 88 | model = types.SimpleNamespace(id=20) 89 | 90 | PlexSync.listen_for_db_change(plugin, None, model) 91 | 92 | assert plugin._vector_index is None 93 | assert plugin._vector_index_info == {} 94 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 105 | __pypackages__/ 106 | 107 | # Celery stuff 108 | celerybeat-schedule 109 | celerybeat.pid 110 | 111 | # SageMath parsed files 112 | *.sage.py 113 | 114 | # Environments 115 | .env 116 | .venv 117 | env/ 118 | venv/ 119 | ENV/ 120 | env.bak/ 121 | venv.bak/ 122 | 123 | # Spyder project settings 124 | .spyderproject 125 | .spyproject 126 | 127 | # Rope project settings 128 | .ropeproject 129 | 130 | # mkdocs documentation 131 | /site 132 | 133 | # mypy 134 | .mypy_cache/ 135 | .dmypy.json 136 | dmypy.json 137 | 138 | # Pyre type checker 139 | .pyre/ 140 | 141 | # pytype static type analyzer 142 | .pytype/ 143 | 144 | # Cython debug symbols 145 | cython_debug/ 146 | 147 | # PyCharm 148 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can 149 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 150 | # and can be added to the global gitignore or merged into this file. For a more nuclear 151 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 152 | #.idea/ 153 | -------------------------------------------------------------------------------- /beetsplug/providers/apple.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import requests 4 | from bs4 import BeautifulSoup 5 | 6 | _log = logging.getLogger('beets.plexsync.apple') 7 | 8 | def import_apple_playlist(url, cache=None, headers=None): 9 | """Import Apple Music playlist with caching. 10 | 11 | Args: 12 | url: URL of the Apple Music playlist 13 | cache: Cache object for storing results 14 | headers: HTTP headers for the request 15 | 16 | Returns: 17 | list: List of song dictionaries 18 | """ 19 | # Generate cache key from URL 20 | playlist_id = url.split('/')[-1] 21 | 22 | # Check cache 23 | if cache: 24 | cached_data = cache.get_playlist_cache(playlist_id, 'apple') 25 | if (cached_data): 26 | _log.info(f"Using cached Apple Music playlist data") 27 | return cached_data 28 | 29 | if headers is None: 30 | headers = { 31 | "User-Agent": "Mozilla/5.0 (Windows NT 0.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", 32 | "Accept-Language": "en-US,en;q=0.9", 33 | "Accept-Encoding": "gzip, deflate, br", 34 | "Connection": "keep-alive", 35 | "Upgrade-Insecure-Requests": "1", 36 | "DNT": "1", # Do Not Track Request Header 37 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8" 38 | } 39 | 40 | song_list = [] 41 | 42 | try: 43 | # Send a GET request to the URL and get the HTML content 44 | response = requests.get(url, headers=headers) 45 | content = response.text 46 | 47 | # Create a BeautifulSoup object with the HTML content 48 | soup = BeautifulSoup(content, "html.parser") 49 | try: 50 | data = soup.find("script", id="serialized-server-data").text 51 | except AttributeError: 52 | _log.debug(f"Error parsing Apple Music playlist") 53 | return None 54 | 55 | # load the data as a JSON object 56 | data = json.loads(data) 57 | 58 | # Extract songs from the sections 59 | try: 60 | songs = data[0]["data"]["sections"][1]["items"] 61 | except (KeyError, IndexError) as e: 62 | _log.error(f"Failed to extract songs from Apple Music data: {e}") 63 | return None 64 | 65 | # Loop through each song element 66 | for song in songs: 67 | try: 68 | # Find and store the song title 69 | title = song["title"].strip() 70 | album = song["tertiaryLinks"][0]["title"] 71 | # Find and store the song artist 72 | artist = song["subtitleLinks"][0]["title"] 73 | # Create a dictionary with the song information 74 | song_dict = { 75 | "title": title.strip(), 76 | "album": album.strip(), 77 | "artist": artist.strip(), 78 | } 79 | # Append the dictionary to the list of songs 80 | song_list.append(song_dict) 81 | except (KeyError, IndexError) as e: 82 | _log.debug(f"Error processing song {song.get('title', 'Unknown')}: {e}") 83 | continue 84 | 85 | if song_list and cache: 86 | cache.set_playlist_cache(playlist_id, 'apple', song_list) 87 | _log.info(f"Cached {len(song_list)} tracks from Apple Music playlist") 88 | 89 | except Exception as e: 90 | _log.error(f"Error importing Apple Music playlist: {e}") 91 | return [] 92 | 93 | return song_list -------------------------------------------------------------------------------- /tests/test_llm.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import json 3 | import sys 4 | import types 5 | import unittest 6 | 7 | from tests.test_playlist_import import ensure_stubs 8 | 9 | 10 | class LLMSearchTest(unittest.TestCase): 11 | def setUp(self): 12 | if sys.version_info < (3, 9): 13 | self.skipTest('LLM module requires Python 3.9+') 14 | class SimpleBaseModel: 15 | def __init__(self, **data): 16 | for key, value in data.items(): 17 | setattr(self, key, value) 18 | def model_dump(self): 19 | return self.__dict__.copy() 20 | @classmethod 21 | def model_validate_json(cls, data): 22 | return cls(**json.loads(data)) 23 | def Field(default=None, **kwargs): 24 | return default 25 | def field_validator(*args, **kwargs): 26 | def decorator(func): 27 | return func 28 | return decorator 29 | sys.modules['pydantic'] = types.SimpleNamespace( 30 | BaseModel=SimpleBaseModel, 31 | Field=Field, 32 | field_validator=field_validator, 33 | ) 34 | ensure_stubs({'llm': {'search': {}}}) 35 | if 'beetsplug.ai.llm' in sys.modules: 36 | importlib.reload(sys.modules['beetsplug.ai.llm']) 37 | else: 38 | importlib.import_module('beetsplug.ai.llm') 39 | self.llm = importlib.import_module('beetsplug.ai.llm') 40 | 41 | def tearDown(self): 42 | if 'beetsplug.ai.llm' in sys.modules: 43 | sys.modules['beetsplug.ai.llm']._search_toolkit = None 44 | 45 | def test_search_track_info_toolkit_missing(self): 46 | self.llm._search_toolkit = None 47 | result = self.llm.search_track_info('Test Song') 48 | self.assertEqual(result, {'title': 'Test Song', 'artist': '', 'album': None}) 49 | 50 | def test_search_track_info_with_toolkit(self): 51 | class Toolkit: 52 | def search_song_info(self, query): 53 | return {'title': 'Found', 'artist': 'Artist', 'album': 'Album'} 54 | self.llm._search_toolkit = Toolkit() 55 | result = self.llm.search_track_info('Input Song') 56 | self.assertEqual(result, {'title': 'Found', 'artist': 'Artist', 'album': 'Album'}) 57 | 58 | def test_instructor_available_flag(self): 59 | """Test that INSTRUCTOR_AVAILABLE flag is properly set.""" 60 | # The flag should be False in test environment (no instructor installed) 61 | self.assertFalse(self.llm.INSTRUCTOR_AVAILABLE) 62 | 63 | def test_create_fallback_song(self): 64 | """Test fallback song creation.""" 65 | toolkit = self.llm.MusicSearchTools(provider='ollama') 66 | fallback = toolkit._create_fallback_song('Test Title') 67 | self.assertEqual(fallback.title, 'Test Title') 68 | self.assertEqual(fallback.artist, '') 69 | self.assertIsNone(fallback.album) 70 | 71 | def test_instructor_client_initialization(self): 72 | """Test that instructor_client is initialized when instructor is available.""" 73 | toolkit = self.llm.MusicSearchTools(provider='ollama') 74 | # In test environment, instructor is not available 75 | self.assertIsNone(toolkit.instructor_client) 76 | 77 | def test_agno_fallback_exists(self): 78 | """Test that Agno agent fallback is maintained.""" 79 | toolkit = self.llm.MusicSearchTools(provider='ollama') 80 | # Agno is also not available in test environment, so ollama_agent will be None 81 | # This test just verifies the attribute exists 82 | self.assertTrue(hasattr(toolkit, 'ollama_agent')) 83 | 84 | 85 | if __name__ == '__main__': 86 | unittest.main() 87 | -------------------------------------------------------------------------------- /tests/test_manual_search.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import sys 3 | import types 4 | import unittest 5 | 6 | from tests.test_playlist_import import ensure_stubs, DummyLogger 7 | 8 | 9 | class ManualSearchTest(unittest.TestCase): 10 | def setUp(self): 11 | # set up beets/confuse stubs and load manual_search 12 | self.config, _ = ensure_stubs({'plexsync': {'manual_search': False}}) 13 | sys.modules['beetsplug.core.matching'] = types.SimpleNamespace( 14 | get_fuzzy_score=lambda a, b: 1.0 if a and b and a.lower() == b.lower() else 0.5 15 | ) 16 | module_name = 'beetsplug.plex.manual_search' 17 | if module_name in sys.modules: 18 | importlib.reload(sys.modules[module_name]) 19 | else: 20 | importlib.import_module(module_name) 21 | self.manual = importlib.import_module(module_name) 22 | 23 | def test_handle_manual_search_caches_selection(self): 24 | class Plugin: 25 | def __init__(self): 26 | self.cache = types.SimpleNamespace(_key=None) 27 | self._log = DummyLogger() 28 | self.cache_calls = [] 29 | 30 | def _cache_result(self, cache_key, result, cleaned_metadata=None): 31 | self.cache_calls.append((cache_key, result)) 32 | 33 | def manual_track_search(self, original): 34 | assert False, "Should not recurse" 35 | 36 | def cache_key(self, song): 37 | return f"cache-{song['title']}" 38 | 39 | plugin = Plugin() 40 | plugin.cache._make_cache_key = lambda song: f"cache-{song['title']}" 41 | 42 | track = types.SimpleNamespace(title='Song', parentTitle='Album', artist=lambda: types.SimpleNamespace(title='Artist')) 43 | # ensure helper stores negative cache 44 | self.manual._store_negative_cache(plugin, {'title': 'Song'}, None) 45 | self.assertIn(('cache-Song', None), plugin.cache_calls) 46 | 47 | def test_cache_selection_skips_manual_query_cache(self): 48 | class Plugin: 49 | def __init__(self): 50 | self.cache = types.SimpleNamespace(_key=None) 51 | self._log = DummyLogger() 52 | self.cache_calls = [] 53 | 54 | def _cache_result(self, cache_key, result, cleaned_metadata=None): 55 | self.cache_calls.append((cache_key, result)) 56 | 57 | plugin = Plugin() 58 | plugin.cache._make_cache_key = lambda song: f"cache-{song['title']}" 59 | 60 | manual_query = {'title': '', 'album': 'Manual Album', 'artist': ''} 61 | original_query = {'title': 'Original Title', 'album': 'Original Album', 'artist': 'Original Artist'} 62 | track = types.SimpleNamespace(ratingKey=123) 63 | 64 | self.manual._cache_selection(plugin, manual_query, track, original_query) 65 | 66 | self.assertIn(('cache-Original Title', track), plugin.cache_calls) 67 | self.assertNotIn(('cache-', track), plugin.cache_calls) 68 | 69 | def test_cache_selection_without_original_query_does_not_cache(self): 70 | class Plugin: 71 | def __init__(self): 72 | self.cache = types.SimpleNamespace(_key=None) 73 | self._log = DummyLogger() 74 | self.cache_calls = [] 75 | 76 | def _cache_result(self, cache_key, result, cleaned_metadata=None): 77 | self.cache_calls.append((cache_key, result)) 78 | 79 | plugin = Plugin() 80 | plugin.cache._make_cache_key = lambda song: f"cache-{song['title']}" 81 | 82 | manual_query = {'title': '', 'album': 'Manual Album', 'artist': ''} 83 | track = types.SimpleNamespace(ratingKey=456) 84 | 85 | self.manual._cache_selection(plugin, manual_query, track) 86 | 87 | self.assertEqual(plugin.cache_calls, []) 88 | 89 | 90 | if __name__ == '__main__': 91 | unittest.main() 92 | -------------------------------------------------------------------------------- /tests/test_spotify_transfer.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import types 3 | import unittest 4 | 5 | from tests.test_playlist_import import ensure_stubs, DummyLogger 6 | 7 | 8 | class SpotifyTransferTest(unittest.TestCase): 9 | def setUp(self): 10 | ensure_stubs({'plexsync': {}}) 11 | if 'beetsplug.plex.spotify_transfer' in importlib.sys.modules: 12 | importlib.reload(importlib.sys.modules['beetsplug.plex.spotify_transfer']) 13 | else: 14 | importlib.import_module('beetsplug.plex.spotify_transfer') 15 | self.transfer = importlib.import_module('beetsplug.plex.spotify_transfer') 16 | 17 | def test_transfers_tracks_with_existing_ids(self): 18 | logger = DummyLogger() 19 | 20 | class Playlist: 21 | def __init__(self, items): 22 | self._items = items 23 | 24 | def items(self): 25 | return self._items 26 | 27 | class PlexItem: 28 | def __init__(self, rating_key, parent_title, title): 29 | self.ratingKey = rating_key 30 | self.parentTitle = parent_title 31 | self.title = title 32 | 33 | class Plugin: 34 | def __init__(self): 35 | self._log = logger 36 | self.plex = types.SimpleNamespace(playlist=lambda name: Playlist([ 37 | PlexItem(1, 'Album', 'Song'), 38 | ])) 39 | self.called_auth = False 40 | self.sp = types.SimpleNamespace(track=lambda track_id: { 41 | 'is_playable': True, 42 | 'available_markets': ['US'], 43 | }) 44 | 45 | def authenticate_spotify(self): 46 | self.called_auth = True 47 | 48 | def add_tracks_to_spotify_playlist(self, playlist, tracks): 49 | self.sent = (playlist, tracks) 50 | 51 | def _search_spotify_track(self, beets_item): # pragma: no cover 52 | return 'alt-track' 53 | 54 | plugin = Plugin() 55 | 56 | class LibraryItem: 57 | def __init__(self, rating_key, spotify_id, artist, album, title): 58 | self.plex_ratingkey = rating_key 59 | self.spotify_track_id = spotify_id 60 | self.artist = artist 61 | self.album = album 62 | self.title = title 63 | 64 | lib = types.SimpleNamespace( 65 | items=lambda *args, **kwargs: [ 66 | LibraryItem(1, 'spotify:track:123', 'Artist', 'Album', 'Song') 67 | ] 68 | ) 69 | self.transfer.plex_to_spotify(plugin, lib, 'Mix') 70 | 71 | self.assertTrue(plugin.called_auth) 72 | self.assertEqual(plugin.sent, ('Mix', ['spotify:track:123'])) 73 | 74 | def test_falls_back_to_search_when_unplayable(self): 75 | logger = DummyLogger() 76 | 77 | class Plugin: 78 | def __init__(self): 79 | self._log = logger 80 | self.plex = types.SimpleNamespace(playlist=lambda name: types.SimpleNamespace(items=lambda: [types.SimpleNamespace(ratingKey=1, parentTitle='Alb', title='Song')])) 81 | self.sp = types.SimpleNamespace(track=lambda _id: { 82 | 'is_playable': False, 83 | 'available_markets': [], 84 | }) 85 | 86 | def authenticate_spotify(self): 87 | pass 88 | 89 | def _search_spotify_track(self, beets_item): 90 | return 'fallback' 91 | 92 | def add_tracks_to_spotify_playlist(self, playlist, tracks): 93 | self.sent = tracks 94 | 95 | plugin = Plugin() 96 | 97 | class LibraryItem: 98 | def __init__(self, rating_key, spotify_id, artist, album, title): 99 | self.plex_ratingkey = rating_key 100 | self.spotify_track_id = spotify_id 101 | self.artist = artist 102 | self.album = album 103 | self.title = title 104 | 105 | lib = types.SimpleNamespace( 106 | items=lambda *args, **kwargs: [ 107 | LibraryItem(1, 'orig', 'Art', 'Alb', 'Song') 108 | ] 109 | ) 110 | self.transfer.plex_to_spotify(plugin, lib, 'Mix') 111 | 112 | self.assertEqual(plugin.sent, ['fallback']) 113 | 114 | 115 | if __name__ == '__main__': 116 | unittest.main() 117 | 118 | -------------------------------------------------------------------------------- /beetsplug/providers/jiosaavn.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import asyncio 4 | from jiosaavn import JioSaavn 5 | from beetsplug.utils.helpers import parse_title, clean_album_name 6 | 7 | _log = logging.getLogger('beets.plexsync.jiosaavn') 8 | 9 | # Create JioSaavn instance 10 | saavn = JioSaavn() 11 | 12 | async def get_playlist_songs(playlist_url): 13 | """Get playlist songs by URL. 14 | 15 | Args: 16 | playlist_url: URL of the JioSaavn playlist 17 | 18 | Returns: 19 | dict: JioSaavn API response with playlist data 20 | """ 21 | # Use the async method from saavn 22 | songs = await saavn.get_playlist_songs(playlist_url) 23 | # Return a list of songs with details 24 | return songs 25 | 26 | 27 | def import_jiosaavn_playlist(url, cache=None): 28 | """Import JioSaavn playlist with caching. 29 | 30 | Args: 31 | url: URL of the JioSaavn playlist 32 | cache: Cache object for storing results 33 | 34 | Returns: 35 | list: List of song dictionaries 36 | """ 37 | playlist_id = url.split('/')[-1] 38 | 39 | # Check cache first 40 | if cache: 41 | cached_data = cache.get_playlist_cache(playlist_id, 'jiosaavn') 42 | if cached_data: 43 | _log.info("Using cached JioSaavn playlist data") 44 | return cached_data 45 | 46 | song_list = [] 47 | 48 | try: 49 | try: 50 | loop = asyncio.get_event_loop() 51 | if loop.is_running(): 52 | # If already running, schedule the coroutine and wait for result 53 | import concurrent.futures 54 | future = asyncio.ensure_future(get_playlist_songs(url)) 55 | # Use run_coroutine_threadsafe for thread-safe execution 56 | data = asyncio.run_coroutine_threadsafe(get_playlist_songs(url), loop).result() 57 | else: 58 | # If not running, just run until complete 59 | data = loop.run_until_complete(get_playlist_songs(url)) 60 | except (RuntimeError, AssertionError): 61 | # No event loop or closed, create a new one 62 | new_loop = asyncio.new_event_loop() 63 | asyncio.set_event_loop(new_loop) 64 | data = new_loop.run_until_complete(get_playlist_songs(url)) 65 | new_loop.close() 66 | 67 | if not data or "data" not in data or "list" not in data["data"]: 68 | _log.error("Invalid response from JioSaavn API") 69 | return song_list 70 | 71 | songs = data["data"]["list"] 72 | 73 | for song in songs: 74 | try: 75 | # Process song title 76 | if ('From "' in song["title"]) or ("From "" in song["title"]): 77 | title_orig = song["title"].replace(""", '"') 78 | title, album = parse_title(title_orig) 79 | else: 80 | title = song["title"] 81 | album = clean_album_name(song["more_info"]["album"]) 82 | 83 | # Get year if available 84 | year = song.get("year", None) 85 | 86 | # Get primary artist from artistMap 87 | try: 88 | artist = song["more_info"]["artistMap"]["primary_artists"][0]["name"] 89 | except (KeyError, IndexError): 90 | # Fallback to first featured artist if primary not found 91 | try: 92 | artist = song["more_info"]["artistMap"]["featured_artists"][0]["name"] 93 | except (KeyError, IndexError): 94 | # Skip if no artist found 95 | continue 96 | 97 | # Create song dictionary with cleaned data 98 | song_dict = { 99 | "title": title.strip(), 100 | "album": album.strip(), 101 | "artist": artist.strip(), 102 | "year": year, 103 | } 104 | 105 | song_list.append(song_dict) 106 | _log.debug(f"Added song: {song_dict['title']} - {song_dict['artist']}") 107 | 108 | except Exception as e: 109 | _log.debug(f"Error processing JioSaavn song: {e}") 110 | continue 111 | 112 | # Cache successful results 113 | if song_list and cache: 114 | cache.set_playlist_cache(playlist_id, 'jiosaavn', song_list) 115 | _log.info(f"Cached {len(song_list)} tracks from JioSaavn playlist") 116 | 117 | except Exception as e: 118 | _log.error(f"Error importing JioSaavn playlist: {e}") 119 | 120 | return song_list 121 | -------------------------------------------------------------------------------- /beetsplug/plex/operations.py: -------------------------------------------------------------------------------- 1 | """Plex playlist operations extracted from plexsync. 2 | 3 | These helpers encapsulate low-level Plex operations and log consistently. 4 | They are intentionally thin to avoid behavior changes. 5 | """ 6 | 7 | from typing import Iterable 8 | 9 | from plexapi import exceptions 10 | 11 | 12 | def sort_plex_playlist(plex, playlist_name: str, sort_field: str, logger) -> None: 13 | """Sort a Plex playlist by a given datetime field (desc).""" 14 | playlist = plex.playlist(playlist_name) 15 | items = playlist.items() 16 | sorted_items = sorted( 17 | items, 18 | key=lambda x: (getattr(x, sort_field).timestamp() if getattr(x, sort_field) is not None else 0), 19 | reverse=True, 20 | ) 21 | playlist.removeItems(items) 22 | for item in sorted_items: 23 | playlist.addItems(item) 24 | 25 | 26 | def _resolve_plex_items(plex, items: Iterable, logger): 27 | """Normalize incoming items to Plex items via rating key. 28 | 29 | Supports objects with either `plex_ratingkey` or `ratingKey` attributes. 30 | """ 31 | plex_set = set() 32 | for item in items: 33 | try: 34 | rating_key = getattr(item, 'plex_ratingkey', None) or getattr(item, 'ratingKey', None) 35 | if rating_key: 36 | plex_set.add(plex.fetchItem(rating_key)) 37 | else: 38 | logger.warning("{} does not have plex_ratingkey or ratingKey attribute. Item details: {}", item, vars(item)) 39 | except (exceptions.NotFound, AttributeError) as e: 40 | logger.warning("{} not found in Plex library. Error: {}", item, e) 41 | continue 42 | return plex_set 43 | 44 | 45 | def plex_add_playlist_item(plex, items: Iterable, playlist_name: str, logger) -> None: 46 | """Add items to a Plex playlist (no duplicates).""" 47 | if not items: 48 | logger.warning("No items to add to playlist {}", playlist_name) 49 | return 50 | 51 | try: 52 | plst = plex.playlist(playlist_name) 53 | playlist_set = set(plst.items()) 54 | except exceptions.NotFound: 55 | plst = None 56 | playlist_set = set() 57 | 58 | plex_set = _resolve_plex_items(plex, items, logger) 59 | to_add = plex_set - playlist_set 60 | logger.info("Adding {} tracks to {} playlist", len(to_add), playlist_name) 61 | if plst is None: 62 | logger.info("{} playlist will be created", playlist_name) 63 | plex.createPlaylist(playlist_name, items=list(to_add)) 64 | else: 65 | try: 66 | plst.addItems(items=list(to_add)) 67 | except exceptions.BadRequest as e: 68 | logger.error("Error adding items {} to {} playlist. Error: {}", to_add, playlist_name, e) 69 | 70 | # Sort by recency, matches original behavior 71 | try: 72 | sort_plex_playlist(plex, playlist_name, "lastViewedAt", logger) 73 | except Exception: 74 | # Non-fatal if sorting fails 75 | pass 76 | 77 | 78 | def plex_playlist_to_collection(music, playlist_name: str, logger) -> None: 79 | """Convert a Plex playlist to a Plex collection, de-duplicated.""" 80 | try: 81 | plst = music.playlist(playlist_name) 82 | playlist_set = set(plst.items()) 83 | except exceptions.NotFound: 84 | logger.error("{} playlist not found", playlist_name) 85 | return 86 | 87 | try: 88 | col = music.collection(playlist_name) 89 | collection_set = set(col.items()) 90 | except exceptions.NotFound: 91 | col = None 92 | collection_set = set() 93 | 94 | to_add = playlist_set - collection_set 95 | logger.info("Adding {} tracks to {} collection", len(to_add), playlist_name) 96 | if col is None: 97 | logger.info("{} collection will be created", playlist_name) 98 | music.createCollection(playlist_name, items=list(to_add)) 99 | else: 100 | try: 101 | col.addItems(items=list(to_add)) 102 | except exceptions.BadRequest as e: 103 | logger.error("Error adding items {} to {} collection. Error: {}", to_add, playlist_name, e) 104 | 105 | 106 | def plex_remove_playlist_item(plex, items: Iterable, playlist_name: str, logger) -> None: 107 | """Remove items from a Plex playlist if present.""" 108 | try: 109 | plst = plex.playlist(playlist_name) 110 | playlist_set = set(plst.items()) 111 | except exceptions.NotFound: 112 | logger.error("{} playlist not found", playlist_name) 113 | return 114 | 115 | plex_set = set() 116 | from requests.exceptions import ConnectionError, ContentDecodingError 117 | 118 | for item in items: 119 | try: 120 | plex_set.add(plex.fetchItem(item.plex_ratingkey)) 121 | except (exceptions.NotFound, AttributeError, ContentDecodingError, ConnectionError) as e: 122 | logger.warning("{} not found in Plex library. Error: {}", item, e) 123 | continue 124 | 125 | to_remove = plex_set.intersection(playlist_set) 126 | logger.info("Removing {} tracks from {} playlist", len(to_remove), playlist_name) 127 | plst.removeItems(items=list(to_remove)) 128 | 129 | 130 | def plex_clear_playlist(plex, playlist_name: str) -> None: 131 | """Clear all items from a Plex playlist.""" 132 | plist = plex.playlist(playlist_name) 133 | tracks = plist.items() 134 | for track in tracks: 135 | plist.removeItems(track) 136 | 137 | -------------------------------------------------------------------------------- /agents.md: -------------------------------------------------------------------------------- 1 | # beets-plexsync - Project Context 2 | 3 | ## Project Overview 4 | 5 | This project is a plugin for [beets](https://github.com/beetbox/beets), a music library manager. The plugin, named `plexsync`, provides powerful tools to synchronize and manage your music library between beets and a Plex Media Server. 6 | 7 | Key features include: 8 | - **Library Sync**: Import track data (ratings, play counts, last played dates) from Plex into your beets library. 9 | - **Smart Playlists**: Generate dynamic playlists in Plex based on your listening history, track ratings, genres, and other criteria. Includes "Daily Discovery", "Forgotten Gems", and "Recent Hits". 10 | - **AI-Generated Playlists**: Create playlists in Plex based on natural language prompts using an LLM (like GPT, Ollama models). 11 | - **External Playlist Import**: Import playlists from various sources like Spotify, Apple Music, YouTube, Tidal, JioSaavn, Gaana, local M3U8 files, and custom HTTP POST endpoints. 12 | - **Playlist Management**: Add/remove tracks from Plex playlists using beets queries, clear playlists. 13 | - **Additional Tools**: Copy Plex playlists to Spotify, convert playlists to collections, create album collages. 14 | 15 | The plugin is written in Python and leverages several libraries including `plexapi`, `spotipy`, `openai`, `pydantic`, and others. 16 | 17 | ## Implementation Guidelines for Coding Assistants 18 | 19 | - Ask clarifying questions for ambiguous changes 20 | - Draft and confirm approach for non-trivial features 21 | - List trade-offs when multiple approaches exist 22 | - Follow existing patterns and module boundaries below 23 | 24 | ### Critical Constraints 25 | - NEVER modify cache keys (stored in SQLite via core/cache.py) 26 | - Keep public APIs and method signatures stable when possible 27 | - Maintain compatibility with beets plugin architecture and CLI 28 | - Preserve vector index behavior (core/vector_index.py) to avoid regressions 29 | 30 | ### Development Patterns 31 | - Use logging with namespace beets.plexsync 32 | - Prefer Pydantic v2 models for structured data 33 | - Cache expensive operations (Plex calls, providers, LLM) 34 | - Keep LLM tooling behind config flags and degrade gracefully 35 | 36 | ## Code Organization 37 | - Entry point: beetsplug/plexsync.py 38 | - AI: beetsplug/ai/llm.py (Agno-based; OpenAI-like or Ollama) 39 | - Core: beetsplug/core/{cache.py, config.py, matching.py, vector_index.py} 40 | - Plex: beetsplug/plex/{search.py, manual_search.py, playlist_import.py, smartplaylists.py, operations.py, spotify_transfer.py, collage.py} 41 | - Providers: beetsplug/providers/{apple.py, spotify.py, youtube.py, tidal.py, jiosaavn.py, gaana.py, m3u8.py, post.py} 42 | - Utils: beetsplug/utils/helpers.py 43 | 44 | ## Search Pipeline Overview (beetsplug/plex/search.py) 45 | When PlexSync.search_plex_song(...) is called, the pipeline should proceed: 46 | 1. Cache check 47 | - Return cached ratingKey via plugin.music.fetchItem when present 48 | 2. Local beets candidates 49 | - Use core/vector_index.py to surface LocalCandidate entries 50 | - Try direct match via cached plex_ratingkey if present 51 | - Accept immediately if similarity >= 0.8 52 | - Otherwise queue for manual confirmation 53 | - Prepare variant queries from candidates and try Plex music.searchTracks 54 | 3. Single/multiple track search 55 | - If tracks found, score with core/matching.plex_track_distance 56 | - Accept when similarity threshold is met; else queue for review 57 | 4. Manual search UI (manual_search.py) 58 | - review_candidate_confirmations(…) queues and deduplicates options 59 | - handle_manual_search(…) supports actions: 60 | - a: Abort, s: Skip (store negative cache), e: Enter manual search 61 | - Numeric selection caches positive result against the original query only 62 | - _store_negative_cache(plugin, song, original_query) 63 | - Writes None to cache when there is a valid title in the chosen query 64 | - _cache_selection(plugin, song, track, original_query) 65 | - Caches ONLY the original query key (not the manual entry), matching tests 66 | 5. LLM search fallback (optional) 67 | - If enabled via plexsync.use_llm_search, use ai/llm.py 68 | - Provider priority in toolkit: SearxNG > Exa > Brave > Tavily 69 | - Brave Search is rate-limited to ~1 request/second 70 | 71 | ## Smart Playlists 72 | - Built in PlexSync.plex_smartplaylists command supports: 73 | - System playlists: daily_discovery, forgotten_gems, recent_hits, fresh_favorites, 70s80s_flashback, highly_rated, most_played 74 | - Imported playlists from providers and M3U8 files 75 | - Flags: 76 | - --only: restrict to a comma-separated list of playlist IDs 77 | - --import-failed/--log-file: retry manual imports using generated logs 78 | 79 | ## Testing 80 | - Run unit tests: 81 | ```bash 82 | python3 -m unittest discover -s ./tests -p "test_*.py" -v 83 | ``` 84 | - Compile modules quickly: 85 | ```bash 86 | python3 - << 'PY' 87 | import os, py_compile 88 | for root, _, files in os.walk('beetsplug'): 89 | for f in files: 90 | if f.endswith('.py'): 91 | py_compile.compile(os.path.join(root, f)) 92 | print('OK') 93 | PY 94 | ``` 95 | 96 | ## LLM Configuration Notes 97 | - Auto-detect provider: 98 | - If llm.api_key is set: OpenAI-compatible via agno.models.openai.like.OpenAILike 99 | - Else: Ollama via agno.models.ollama.Ollama 100 | - Search toolkit keys under llm.search: 101 | - searxng_host, exa_api_key, brave_api_key, tavily_api_key 102 | - Brave Search requests are rate-limited in code -------------------------------------------------------------------------------- /gemini.md: -------------------------------------------------------------------------------- 1 | # beets-plexsync - Project Context 2 | 3 | ## Project Overview 4 | 5 | This project is a plugin for [beets](https://github.com/beetbox/beets), a music library manager. The plugin, named `plexsync`, provides powerful tools to synchronize and manage your music library between beets and a Plex Media Server. 6 | 7 | Key features include: 8 | - **Library Sync**: Import track data (ratings, play counts, last played dates) from Plex into your beets library. 9 | - **Smart Playlists**: Generate dynamic playlists in Plex based on your listening history, track ratings, genres, and other criteria. Includes "Daily Discovery", "Forgotten Gems", and "Recent Hits". 10 | - **AI-Generated Playlists**: Create playlists in Plex based on natural language prompts using an LLM (like GPT, Ollama models). 11 | - **External Playlist Import**: Import playlists from various sources like Spotify, Apple Music, YouTube, Tidal, JioSaavn, Gaana, local M3U8 files, and custom HTTP POST endpoints. 12 | - **Playlist Management**: Add/remove tracks from Plex playlists using beets queries, clear playlists. 13 | - **Additional Tools**: Copy Plex playlists to Spotify, convert playlists to collections, create album collages. 14 | 15 | The plugin is written in Python and leverages several libraries including `plexapi`, `spotipy`, `openai`, `pydantic`, and others. 16 | 17 | ## Implementation Guidelines for Coding Assistants 18 | 19 | - Ask clarifying questions for ambiguous changes 20 | - Draft and confirm approach for non-trivial features 21 | - List trade-offs when multiple approaches exist 22 | - Follow existing patterns and module boundaries below 23 | 24 | ### Critical Constraints 25 | - NEVER modify cache keys (stored in SQLite via core/cache.py) 26 | - Keep public APIs and method signatures stable when possible 27 | - Maintain compatibility with beets plugin architecture and CLI 28 | - Preserve vector index behavior (core/vector_index.py) to avoid regressions 29 | 30 | ### Development Patterns 31 | - Use logging with namespace beets.plexsync 32 | - Prefer Pydantic v2 models for structured data 33 | - Cache expensive operations (Plex calls, providers, LLM) 34 | - Keep LLM tooling behind config flags and degrade gracefully 35 | 36 | ## Code Organization 37 | - Entry point: beetsplug/plexsync.py 38 | - AI: beetsplug/ai/llm.py (Agno-based; OpenAI-like or Ollama) 39 | - Core: beetsplug/core/{cache.py, config.py, matching.py, vector_index.py} 40 | - Plex: beetsplug/plex/{search.py, manual_search.py, playlist_import.py, smartplaylists.py, operations.py, spotify_transfer.py, collage.py} 41 | - Providers: beetsplug/providers/{apple.py, spotify.py, youtube.py, tidal.py, jiosaavn.py, gaana.py, m3u8.py, post.py} 42 | - Utils: beetsplug/utils/helpers.py 43 | 44 | ## Search Pipeline Overview (beetsplug/plex/search.py) 45 | When PlexSync.search_plex_song(...) is called, the pipeline should proceed: 46 | 1. Cache check 47 | - Return cached ratingKey via plugin.music.fetchItem when present 48 | 2. Local beets candidates 49 | - Use core/vector_index.py to surface LocalCandidate entries 50 | - Try direct match via cached plex_ratingkey if present 51 | - Accept immediately if similarity >= 0.8 52 | - Otherwise queue for manual confirmation 53 | - Prepare variant queries from candidates and try Plex music.searchTracks 54 | 3. Single/multiple track search 55 | - If tracks found, score with core/matching.plex_track_distance 56 | - Accept when similarity threshold is met; else queue for review 57 | 4. Manual search UI (manual_search.py) 58 | - review_candidate_confirmations(…) queues and deduplicates options 59 | - handle_manual_search(…) supports actions: 60 | - a: Abort, s: Skip (store negative cache), e: Enter manual search 61 | - Numeric selection caches positive result against the original query only 62 | - _store_negative_cache(plugin, song, original_query) 63 | - Writes None to cache when there is a valid title in the chosen query 64 | - _cache_selection(plugin, song, track, original_query) 65 | - Caches ONLY the original query key (not the manual entry), matching tests 66 | 5. LLM search fallback (optional) 67 | - If enabled via plexsync.use_llm_search, use ai/llm.py 68 | - Provider priority in toolkit: SearxNG > Exa > Brave > Tavily 69 | - Brave Search is rate-limited to ~1 request/second 70 | 71 | ## Smart Playlists 72 | - Built in PlexSync.plex_smartplaylists command supports: 73 | - System playlists: daily_discovery, forgotten_gems, recent_hits, fresh_favorites, 70s80s_flashback, highly_rated, most_played 74 | - Imported playlists from providers and M3U8 files 75 | - Flags: 76 | - --only: restrict to a comma-separated list of playlist IDs 77 | - --import-failed/--log-file: retry manual imports using generated logs 78 | 79 | ## Testing 80 | - Run unit tests: 81 | ```bash 82 | python3 -m unittest discover -s ./tests -p "test_*.py" -v 83 | ``` 84 | - Compile modules quickly: 85 | ```bash 86 | python3 - << 'PY' 87 | import os, py_compile 88 | for root, _, files in os.walk('beetsplug'): 89 | for f in files: 90 | if f.endswith('.py'): 91 | py_compile.compile(os.path.join(root, f)) 92 | print('OK') 93 | PY 94 | ``` 95 | 96 | ## LLM Configuration Notes 97 | - Auto-detect provider: 98 | - If llm.api_key is set: OpenAI-compatible via agno.models.openai.like.OpenAILike 99 | - Else: Ollama via agno.models.ollama.Ollama 100 | - Search toolkit keys under llm.search: 101 | - searxng_host, exa_api_key, brave_api_key, tavily_api_key 102 | - Brave Search requests are rate-limited in code -------------------------------------------------------------------------------- /beetsplug/plex/spotify_transfer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from beetsplug.plex import smartplaylists as sp_mod 4 | 5 | """Utilities for transferring Plex playlists to Spotify.""" 6 | 7 | def plex_to_spotify(plugin, lib, playlist, query_args=None): 8 | """Transfer a Plex playlist to Spotify using the plugin context.""" 9 | plugin.authenticate_spotify() 10 | plex_playlist = plugin.plex.playlist(playlist) 11 | plex_playlist_items = list(plex_playlist.items()) 12 | plugin._log.debug("Total items in Plex playlist: {}", len(plex_playlist_items)) 13 | 14 | plex_lookup = plugin._build_plex_lookup_and_vector_index(lib) 15 | spotify_tracks = [] 16 | 17 | query_rating_keys = None 18 | if query_args: 19 | query_items = lib.items(query_args) 20 | query_rating_keys = { 21 | item.plex_ratingkey for item in query_items if hasattr(item, 'plex_ratingkey') 22 | } 23 | plugin._log.info( 24 | "Query matched {} beets items, filtering playlist accordingly", 25 | len(query_rating_keys), 26 | ) 27 | 28 | progress = plugin.create_progress_counter( 29 | len(plex_playlist_items), 30 | f"Resolving Spotify matches for {playlist}", 31 | unit="track", 32 | ) 33 | try: 34 | for item in plex_playlist_items: 35 | plugin._log.debug("Processing {}", item.ratingKey) 36 | beets_item = plex_lookup.get(item.ratingKey) 37 | if not beets_item: 38 | plugin._log.debug( 39 | "Library not synced. Item not found in Beets: {} - {}", 40 | item.parentTitle, 41 | item.title, 42 | ) 43 | if progress is not None: 44 | progress.update() 45 | continue 46 | 47 | if query_rating_keys is not None and item.ratingKey not in query_rating_keys: 48 | plugin._log.debug( 49 | "Item filtered out by query: {} - {} - {}", 50 | beets_item.artist, 51 | beets_item.album, 52 | beets_item.title, 53 | ) 54 | if progress is not None: 55 | progress.update() 56 | continue 57 | 58 | plugin._log.debug("Beets item: {}", beets_item) 59 | spotify_track_id = _resolve_spotify_track(plugin, beets_item) 60 | if spotify_track_id: 61 | spotify_tracks.append(spotify_track_id) 62 | else: 63 | plugin._log.info("No playable Spotify match found for {}", beets_item) 64 | if progress is not None: 65 | progress.update() 66 | finally: 67 | if progress is not None: 68 | try: 69 | progress.close() 70 | except Exception: # noqa: BLE001 - optional UI element 71 | plugin._log.debug("Unable to close Spotify transfer progress for playlist {}", playlist) 72 | 73 | if query_args: 74 | plugin._log.info( 75 | "Found {} Spotify tracks matching query in Plex playlist order", 76 | len(spotify_tracks), 77 | ) 78 | else: 79 | plugin._log.debug( 80 | "Found {} Spotify tracks in Plex playlist order", 81 | len(spotify_tracks), 82 | ) 83 | 84 | # Deduplicate while preserving order 85 | seen = set() 86 | deduplicated_tracks = [] 87 | for track_id in spotify_tracks: 88 | if track_id not in seen: 89 | seen.add(track_id) 90 | deduplicated_tracks.append(track_id) 91 | 92 | if len(deduplicated_tracks) < len(spotify_tracks): 93 | plugin._log.info( 94 | "Removed {} duplicate tracks from playlist transfer", 95 | len(spotify_tracks) - len(deduplicated_tracks) 96 | ) 97 | 98 | plugin.add_tracks_to_spotify_playlist(playlist, deduplicated_tracks) 99 | 100 | def _resolve_spotify_track(plugin, beets_item): 101 | spotify_track_id = None 102 | try: 103 | spotify_track_id = getattr(beets_item, 'spotify_track_id', None) 104 | plugin._log.debug("Spotify track id in beets: {}", spotify_track_id) 105 | 106 | if spotify_track_id: 107 | try: 108 | track_info = plugin.sp.track(spotify_track_id) 109 | if ( 110 | not track_info 111 | or not track_info.get('is_playable', True) 112 | or track_info.get('restrictions', {}).get('reason') == 'unavailable' 113 | or not track_info.get('available_markets') 114 | ): 115 | plugin._log.debug( 116 | "Track {} is not playable or not available, searching for alternatives", 117 | spotify_track_id, 118 | ) 119 | spotify_track_id = None 120 | except Exception as exc: # noqa: BLE001 - log but continue 121 | plugin._log.debug( 122 | "Error checking track availability {}: {}", 123 | spotify_track_id, 124 | exc, 125 | ) 126 | spotify_track_id = None 127 | except Exception: 128 | spotify_track_id = None 129 | plugin._log.debug("Spotify track_id not found in beets") 130 | 131 | if not spotify_track_id: 132 | spotify_track_id = plugin._search_spotify_track(beets_item) 133 | return spotify_track_id 134 | -------------------------------------------------------------------------------- /beetsplug/core/vector_index.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | """Utilities for building and querying a lightweight cosine-similarity index 4 | over beets library metadata.""" 5 | 6 | import math 7 | import unicodedata 8 | from collections import Counter, defaultdict 9 | from dataclasses import dataclass 10 | from typing import Counter as CounterType 11 | from typing import Dict, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple 12 | 13 | from beetsplug.core.matching import clean_string 14 | 15 | TOKEN_WEIGHTS = {"title": 3, "artist": 2, "album": 1} 16 | MIN_SCORE_DEFAULT = 0.35 17 | CHAR_NGRAM_SIZE = 3 18 | 19 | 20 | def _normalize_token_text(value: str) -> str: 21 | """Normalize value for tokenization.""" 22 | if not value: 23 | return "" 24 | cleaned = clean_string(value) 25 | if not cleaned: 26 | return "" 27 | normalized = unicodedata.normalize("NFKD", cleaned) 28 | ascii_only = "".join( 29 | ch for ch in normalized if not unicodedata.combining(ch) 30 | ) 31 | return ascii_only 32 | 33 | 34 | def _char_ngrams(text: str, size: int = CHAR_NGRAM_SIZE) -> Iterator[str]: 35 | text = text.replace(" ", "") 36 | if len(text) < size or size <= 0: 37 | return iter(()) 38 | return (text[i : i + size] for i in range(len(text) - size + 1)) 39 | 40 | 41 | def _tokenize_metadata(metadata: Mapping[str, str]) -> CounterType[str]: 42 | counts: CounterType[str] = Counter() 43 | for field, weight in TOKEN_WEIGHTS.items(): 44 | raw_value = metadata.get(field) or "" 45 | normalized = _normalize_token_text(raw_value) 46 | if not normalized: 47 | continue 48 | 49 | for token in normalized.split(): 50 | if token: 51 | counts[token] += weight 52 | 53 | # Add lightweight char n-grams to tolerate minor misspellings. 54 | ngram_weight = max(1, weight - 1) 55 | for ngram in _char_ngrams(normalized): 56 | counts[f"ng:{ngram}"] += ngram_weight 57 | return counts 58 | 59 | 60 | def _vector_norm(counts: Mapping[str, float]) -> float: 61 | return math.sqrt(sum(value * value for value in counts.values())) 62 | 63 | 64 | @dataclass(frozen=True) 65 | class VectorEntry: 66 | item_id: int 67 | counts: CounterType[str] 68 | norm: float 69 | metadata: Mapping[str, str] 70 | 71 | def overlap_tokens(self, other_counts: Mapping[str, float]) -> List[str]: 72 | return sorted(token for token in other_counts if token in self.counts) 73 | 74 | 75 | class BeetsVectorIndex: 76 | """In-memory cosine-similarity index over beets metadata.""" 77 | 78 | def __init__(self) -> None: 79 | self._entries: Dict[int, VectorEntry] = {} 80 | self._token_index: MutableMapping[str, set[int]] = defaultdict(set) 81 | 82 | def __len__(self) -> int: 83 | return len(self._entries) 84 | 85 | def add_item(self, item_id: int, metadata: Mapping[str, str]) -> bool: 86 | """Add a beets item to the index. 87 | 88 | Returns: 89 | bool: True if the item was indexed, False if skipped. 90 | """ 91 | counts = _tokenize_metadata(metadata) 92 | if not counts: 93 | return False 94 | 95 | norm = _vector_norm(counts) 96 | if norm == 0.0: 97 | return False 98 | 99 | entry = VectorEntry( 100 | item_id=item_id, 101 | counts=counts, 102 | norm=norm, 103 | metadata=metadata, 104 | ) 105 | self._entries[item_id] = entry 106 | 107 | for token in counts: 108 | self._token_index[token].add(item_id) 109 | return True 110 | 111 | def remove_item(self, item_id: int) -> bool: 112 | """Remove an item from the index if present.""" 113 | entry = self._entries.pop(item_id, None) 114 | if entry is None: 115 | return False 116 | 117 | for token in entry.counts: 118 | bucket = self._token_index.get(token) 119 | if not bucket: 120 | continue 121 | bucket.discard(item_id) 122 | if not bucket: 123 | self._token_index.pop(token, None) 124 | return True 125 | 126 | def upsert_item(self, item_id: int, metadata: Mapping[str, str]) -> bool: 127 | """Add or replace an item in the index.""" 128 | self.remove_item(item_id) 129 | return self.add_item(item_id, metadata) 130 | 131 | def iter_entries(self) -> Iterator[VectorEntry]: 132 | return iter(self._entries.values()) 133 | 134 | def build_query_vector( 135 | self, metadata: Mapping[str, str] 136 | ) -> Tuple[CounterType[str], float]: 137 | counts = _tokenize_metadata(metadata) 138 | return counts, _vector_norm(counts) 139 | 140 | def candidate_scores( 141 | self, 142 | query_counts: Mapping[str, float], 143 | query_norm: float, 144 | limit: int = 25, 145 | min_score: float = MIN_SCORE_DEFAULT, 146 | ) -> List[Tuple[VectorEntry, float]]: 147 | if not query_counts or query_norm == 0.0: 148 | return [] 149 | 150 | candidate_ids: set[int] = set() 151 | for token in query_counts: 152 | candidate_ids.update(self._token_index.get(token, ())) 153 | 154 | scored: List[Tuple[VectorEntry, float]] = [] 155 | for item_id in candidate_ids: 156 | entry = self._entries.get(item_id) 157 | if entry is None or entry.norm == 0.0: 158 | continue 159 | 160 | dot = 0.0 161 | for token, weight in query_counts.items(): 162 | if not weight: 163 | continue 164 | dot += weight * entry.counts.get(token, 0.0) 165 | 166 | if dot <= 0.0: 167 | continue 168 | 169 | score = dot / (query_norm * entry.norm) 170 | if score < min_score: 171 | continue 172 | scored.append((entry, score)) 173 | 174 | scored.sort(key=lambda pair: pair[1], reverse=True) 175 | return scored[:limit] 176 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # beets-plexsync Plugin Development Instructions 2 | 3 | **ALWAYS follow these instructions first and only fallback to additional search and context gathering if the information here is incomplete or found to be in error.** 4 | 5 | ## Project Overview 6 | 7 | beets-plexsync is a Python plugin for [beets](https://github.com/beetbox/beets), a music library manager. The plugin provides comprehensive integration with Plex Media Server including library synchronization, AI-generated playlists, smart playlist generation, and playlist import from external services (Spotify, YouTube, Apple Music, etc.). 8 | 9 | ## Working Effectively 10 | 11 | ### VS Code and Copilot Chat (MCP) Setup 12 | 13 | - This repo ships a preconfigured MCP server for Copilot Chat: 14 | - .vscode/mcp.json defines server "context7" 15 | - .vscode/settings.json pins allowed models via chat.mcp.serverSampling for this project 16 | - Use Copilot Chat inside VS Code; the MCP server attaches automatically when the workspace is opened. If it doesn’t: 17 | - Ensure both files exist (.vscode/mcp.json and .vscode/settings.json) 18 | - Reload window 19 | - Network note: MCP tools may require external network access. Given this environment’s limitations, treat external tool calls as “best-effort” and prefer local validation steps below. 20 | 21 | ### Editor, Formatting, and Tests (VS Code defaults) 22 | 23 | - Python formatting: ms-python.black-formatter is the default. Do not change provider; "python.formatting.provider" is intentionally set to "none". 24 | - Tests: VS Code is configured for unittest discovery with: 25 | - Start dir: ./tests 26 | - Pattern: test_*.py 27 | - Keep tests compatible with unittest discovery or run the CLI snippets below. 28 | 29 | ## Prerequisites and Environment Setup 30 | 31 | **CRITICAL**: This environment has significant network limitations that prevent pip installations from PyPI due to timeout issues. Use system packages wherever possible. 32 | 33 | 1. **Install beets and basic dependencies**: 34 | ```bash 35 | sudo apt update 36 | sudo apt install -y beets beets-doc 37 | sudo apt install -y python3-pydantic python3-requests python3-bs4 python3-dateutil python3-confuse 38 | ``` 39 | - Installation time: 2-3 minutes. NEVER CANCEL. 40 | 41 | 2. **Verify beets installation**: 42 | ```bash 43 | beet --version # Should show "beets version 1.6.0" 44 | ``` 45 | 46 | 3. **Set up clean beets environment for testing**: 47 | ```bash 48 | mkdir -p /tmp/beets-test 49 | cd /tmp/beets-test 50 | beet -d /tmp/beets-test/library.db config 51 | ``` 52 | 53 | ### Plugin Installation and Testing 54 | 55 | **CRITICAL LIMITATION**: Direct pip installation fails due to network timeouts. The plugin requires these major dependencies that cannot be installed in this environment: 56 | - `spotipy` (Spotify integration) 57 | - `plexapi` (Plex server communication) 58 | - `openai` (AI features) 59 | - `agno>=1.2.16` (LLM framework) 60 | - `jiosaavn-python`, `tavily-python`, `exa_py`, `brave-search` (external services) 61 | 62 | **Installation approach**: 63 | ```bash 64 | # DOES NOT WORK due to network issues - will fail after 5+ minutes 65 | pip install git+https://github.com/arsaboo/beets-plexsync.git 66 | ``` 67 | 68 | **Validation approach**: 69 | 1. **Syntax validation** (works without dependencies): 70 | ```bash 71 | cd /home/runner/work/beets-plexsync/beets-plexsync 72 | python3 -m py_compile beetsplug/*.py # Should complete silently 73 | ``` 74 | 75 | 2. **Basic imports** (limited without full dependencies): 76 | ```bash 77 | PYTHONPATH=/home/runner/work/beets-plexsync/beets-plexsync python3 -c "from beetsplug.helpers import parse_title; print('Helper functions work')" 78 | ``` 79 | 80 | 3. **Plugin structure validation**: 81 | ```bash 82 | cd /home/runner/work/beets-plexsync/beets-plexsync 83 | ls -la beetsplug/ # Should show 14 Python files including plexsync.py 84 | ``` 85 | 86 | ## Build and Testing Process 87 | 88 | ### Code Validation 89 | 90 | NEVER CANCEL: All validation steps complete in under 5 seconds total but are essential. 91 | 92 | 1. Python syntax check (<1 second): 93 | ```bash 94 | cd /home/runner/work/beets-plexsync/beets-plexsync 95 | python3 - << 'PY' 96 | import os, py_compile 97 | for root, _, files in os.walk('beetsplug'): 98 | for f in files: 99 | if f.endswith('.py'): 100 | py_compile.compile(os.path.join(root, f)) 101 | print('All modules compiled') 102 | PY 103 | ``` 104 | 105 | 2. Basic import test (<1 second): 106 | ```bash 107 | PYTHONPATH=/home/runner/work/beets-plexsync/beets-plexsync python3 -c " 108 | import sys 109 | sys.path.insert(0, '/home/runner/work/beets-plexsync/beets-plexsync') 110 | from beetsplug.utils.helpers import parse_title, clean_album_name 111 | print('Core helper functions import successfully') 112 | " 113 | ``` 114 | 115 | 3. Configuration validation: 116 | ```bash 117 | cd /home/runner/work/beets-plexsync/beets-plexsync 118 | python3 -c " 119 | with open('setup.py', 'r') as f: 120 | content = f.read() 121 | print('setup.py loads correctly') 122 | print('Content length:', len(content), 'characters') 123 | if 'install_requires' in content: 124 | print('Has install_requires section') 125 | " 126 | ``` 127 | 128 | ### Manual Validation Scenarios 129 | 130 | Since this plugin requires external services (Plex server, Spotify, etc.), full functional testing requires: 131 | 132 | 1. **Configuration Testing**: Verify plugin can be loaded by beets (requires full dependency installation) 133 | 2. **Plex Server Integration**: Test library sync commands (requires running Plex server) 134 | 3. **External Service Integration**: Test playlist imports (requires API keys) 135 | 136 | **Due to network limitations, focus validation on**: 137 | - Python syntax correctness ✓ 138 | - Import structure validation ✓ 139 | - Configuration file parsing ✓ 140 | - Code style consistency ✓ 141 | 142 | ### Additional Validation Commands 143 | 144 | MCP configuration validation (<1 second): 145 | ```bash 146 | cd /home/runner/work/beets-plexsync/beets-plexsync 147 | python3 - << 'PY' 148 | import json, os, sys 149 | for p in ('.vscode/mcp.json', '.vscode/settings.json'): 150 | with open(p, 'r') as f: 151 | json.load(f) 152 | print('VS Code MCP config loads successfully') 153 | PY 154 | ``` 155 | 156 | VS Code unittest discovery parity (<1 second): 157 | ```bash 158 | cd /home/runner/work/beets-plexsync/beets-plexsync 159 | python3 -m unittest discover -s ./tests -p "test_*.py" -v 160 | ``` 161 | 162 | Provider module validation (<1 second): 163 | ```bash 164 | cd /home/runner/work/beets-plexsync/beets-plexsync 165 | python3 -m py_compile beetsplug/providers/*.py 166 | echo "All provider modules compile successfully" 167 | ``` 168 | 169 | Core module validation (<1 second): 170 | ```bash 171 | cd /home/runner/work/beets-plexsync/beets-plexsync 172 | python3 -m py_compile beetsplug/core/matching.py beetsplug/core/cache.py beetsplug/core/config.py 173 | echo "Core modules compile successfully" 174 | ``` 175 | 176 | Helper functions test (<1 second): 177 | ```bash 178 | PYTHONPATH=/home/runner/work/beets-plexsync/beets-plexsync python3 -c " 179 | from beetsplug.utils.helpers import parse_title, clean_album_name 180 | print('Helper functions import successfully') 181 | " 182 | ``` 183 | 184 | Plugin structure validation: 185 | ```bash 186 | cd /home/runner/work/beets-plexsync/beets-plexsync 187 | echo 'Plugin files:' $(find beetsplug -name "*.py" | wc -l) 188 | echo 'Provider files:' $(ls beetsplug/providers/*.py | wc -l) 189 | ``` -------------------------------------------------------------------------------- /tests/test_playlist_import.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import sys 3 | import types 4 | import unittest 5 | 6 | 7 | class DummyConfigNode: 8 | def __init__(self, data): 9 | self._data = data 10 | 11 | def __getitem__(self, key): 12 | if isinstance(self._data, dict) and key in self._data: 13 | return DummyConfigNode(self._data[key]) 14 | raise NotFoundError(key) 15 | 16 | def add(self, value): 17 | if isinstance(self._data, dict) and isinstance(value, dict): 18 | self._data.update(value) 19 | return self 20 | 21 | def get(self, cast=None): 22 | value = self._data 23 | if isinstance(value, DummyConfigNode): 24 | value = value._data 25 | if cast is None or value is None: 26 | return value 27 | if cast is bool: 28 | return bool(value) 29 | return cast(value) 30 | 31 | 32 | class DummyConfig(DummyConfigNode): 33 | def __init__(self): 34 | super().__init__({}) 35 | 36 | def set_data(self, data): 37 | self._data = data 38 | 39 | 40 | class NotFoundError(Exception): 41 | pass 42 | 43 | 44 | class ConfigValueError(Exception): 45 | pass 46 | 47 | 48 | class CacheStub: 49 | def get_playlist_cache(self, *args, **kwargs): 50 | return None 51 | 52 | def set_playlist_cache(self, *args, **kwargs): 53 | return None 54 | 55 | 56 | 57 | def ensure_stubs(data): 58 | config = DummyConfig() 59 | config.set_data(data) 60 | 61 | beets = types.ModuleType('beets') 62 | ui_module = types.ModuleType('beets.ui') 63 | 64 | class UserError(Exception): 65 | pass 66 | 67 | def colorize(_name, text): 68 | return text 69 | 70 | ui_module.UserError = UserError 71 | ui_module.colorize = colorize 72 | ui_module.input_ = lambda prompt='': '' 73 | ui_module.input_yn = lambda prompt='', default=True: default 74 | ui_module.input_options = lambda *args, **kwargs: 0 75 | ui_module.print_ = print 76 | 77 | beets.ui = ui_module 78 | beets.config = config 79 | 80 | # Minimal beets.library stub used by matching helpers during tests. 81 | library_module = types.ModuleType('beets.library') 82 | 83 | class LibraryItem: 84 | """Lightweight stand-in for beets.library.Item.""" 85 | 86 | def __init__(self, **fields): 87 | for key, value in fields.items(): 88 | setattr(self, key, value) 89 | 90 | library_module.Item = LibraryItem 91 | beets.library = library_module 92 | 93 | # Provide beets.autotag.distance with the API expected by matching.py. 94 | autotag_module = types.ModuleType('beets.autotag') 95 | distance_module = types.ModuleType('beets.autotag.distance') 96 | 97 | class Distance: 98 | def __init__(self): 99 | self._weights = {} 100 | self._components = {} 101 | 102 | def add_ratio(self, key, value, weight): 103 | self._components[key] = abs(value) 104 | 105 | def add_string(self, key, left, right): 106 | self._components[key] = 0.0 if (left or "") == (right or "") else 1.0 107 | 108 | @property 109 | def distance(self): 110 | if not self._components: 111 | return 0.0 112 | return sum(self._components.values()) / len(self._components) 113 | 114 | def string_dist(left, right): 115 | left = left or "" 116 | right = right or "" 117 | if left == right: 118 | return 0.0 119 | if left in right or right in left: 120 | return 0.25 121 | return 0.75 122 | 123 | autotag_module.distance = distance_module 124 | distance_module.Distance = Distance 125 | distance_module.string_dist = string_dist 126 | beets.autotag = autotag_module 127 | 128 | plexapi_module = types.ModuleType('plexapi') 129 | plexapi_audio_module = types.ModuleType('plexapi.audio') 130 | 131 | class TrackStub: 132 | """Minimal plexapi.audio.Track replacement for tests.""" 133 | 134 | def __init__(self, title="", parentTitle="", originalTitle="", artist_title=""): 135 | self.title = title 136 | self.parentTitle = parentTitle 137 | self.originalTitle = originalTitle 138 | self._artist_title = artist_title 139 | 140 | def artist(self): 141 | return types.SimpleNamespace(title=self._artist_title or self.originalTitle or "") 142 | 143 | plexapi_audio_module.Track = TrackStub 144 | plexapi_module.audio = plexapi_audio_module 145 | 146 | confuse = types.ModuleType('confuse') 147 | confuse.NotFoundError = NotFoundError 148 | confuse.ConfigValueError = ConfigValueError 149 | 150 | sys.modules['beets'] = beets 151 | sys.modules['beets.ui'] = ui_module 152 | sys.modules['beets.library'] = library_module 153 | sys.modules['beets.autotag'] = autotag_module 154 | sys.modules['beets.autotag.distance'] = distance_module 155 | sys.modules['plexapi'] = plexapi_module 156 | sys.modules['plexapi.audio'] = plexapi_audio_module 157 | sys.modules['confuse'] = confuse 158 | 159 | return config, UserError 160 | 161 | 162 | class DummyLogger: 163 | def __init__(self): 164 | self.messages = [] 165 | 166 | def _record(self, level, msg, *args): 167 | self.messages.append((level, msg.format(*args))) 168 | 169 | def warning(self, msg, *args): 170 | self._record('warning', msg, *args) 171 | 172 | def info(self, msg, *args): 173 | self._record('info', msg, *args) 174 | 175 | def error(self, msg, *args): 176 | self._record('error', msg, *args) 177 | 178 | def debug(self, msg, *args): 179 | self._record('debug', msg, *args) 180 | 181 | 182 | class PluginStub: 183 | def __init__(self, logger): 184 | self._log = logger 185 | self.added = None 186 | self.last_manual = None 187 | self.cache = CacheStub() 188 | 189 | def search_plex_song(self, song, manual_search=False): 190 | self.last_manual = manual_search 191 | return f"match-{song['title']}" 192 | 193 | def create_progress_counter(self, *args, **kwargs): 194 | return None 195 | 196 | def _plex_add_playlist_item(self, tracks, playlist): 197 | self.added = (tracks, playlist) 198 | 199 | def get_playlist_id(self, url): 200 | return 'list-id' 201 | 202 | def import_spotify_playlist(self, playlist_id): 203 | return [] 204 | 205 | def import_apple_playlist(self, url): 206 | return [] 207 | 208 | def import_jiosaavn_playlist(self, url): 209 | return [] 210 | 211 | 212 | class PlaylistImportTest(unittest.TestCase): 213 | def setUp(self): 214 | self.config, self.UserError = ensure_stubs({'plexsync': {'manual_search': False}}) 215 | if 'beetsplug.plex.playlist_import' in sys.modules: 216 | importlib.reload(sys.modules['beetsplug.plex.playlist_import']) 217 | else: 218 | importlib.import_module('beetsplug.plex.playlist_import') 219 | self.module = importlib.import_module('beetsplug.plex.playlist_import') 220 | self.search_calls = [] 221 | 222 | def _stub_search(query, limit, cache): 223 | self.search_calls.append((query, limit)) 224 | return [{'title': 'Q'}] 225 | 226 | self.module.import_yt_search = _stub_search 227 | self.module.import_yt_playlist = lambda url, cache: [] 228 | self.module.import_gaana_playlist = lambda url, cache: [] 229 | self.module.import_tidal_playlist = lambda url, cache: [] 230 | 231 | def test_add_songs_to_plex_adds_matches(self): 232 | logger = DummyLogger() 233 | plugin = PluginStub(logger) 234 | songs = [{'title': 'One'}, {'title': 'Two'}] 235 | 236 | self.module.add_songs_to_plex(plugin, 'Mix', songs) 237 | 238 | self.assertEqual(plugin.added, (['match-One', 'match-Two'], 'Mix')) 239 | self.assertFalse(plugin.last_manual) 240 | 241 | def test_add_songs_to_plex_warns_when_empty(self): 242 | logger = DummyLogger() 243 | 244 | class EmptyPlugin(PluginStub): 245 | def search_plex_song(self, song, manual_search=False): 246 | self.last_manual = manual_search 247 | return None 248 | 249 | plugin = EmptyPlugin(logger) 250 | self.module.add_songs_to_plex(plugin, 'Empty', [{'title': 'Zero'}]) 251 | 252 | self.assertIsNone(plugin.added) 253 | self.assertTrue(any(level == 'warning' for level, _ in logger.messages)) 254 | 255 | def test_import_playlist_spotify_flow(self): 256 | logger = DummyLogger() 257 | 258 | class SpotifyPlugin(PluginStub): 259 | def __init__(self, logger): 260 | super().__init__(logger) 261 | self.imported_id = None 262 | 263 | def import_spotify_playlist(self, playlist_id): 264 | self.imported_id = playlist_id 265 | return [{'title': 'Track'}] 266 | 267 | plugin = SpotifyPlugin(logger) 268 | self.module.import_playlist(plugin, 'MyMix', 'https://open.spotify.com/playlist/demo') 269 | 270 | self.assertEqual(plugin.imported_id, 'list-id') 271 | self.assertEqual(plugin.added, (['match-Track'], 'MyMix')) 272 | self.assertFalse(plugin.last_manual) 273 | 274 | def test_import_playlist_requires_url(self): 275 | logger = DummyLogger() 276 | plugin = PluginStub(logger) 277 | from beets import ui 278 | 279 | with self.assertRaises(self.UserError): 280 | self.module.import_playlist(plugin, 'Test', None) 281 | 282 | def test_import_search(self): 283 | logger = DummyLogger() 284 | plugin = PluginStub(logger) 285 | 286 | self.module.import_search(plugin, 'SearchMix', 'query', limit=5) 287 | 288 | self.assertEqual(plugin.added, (['match-Q'], 'SearchMix')) 289 | self.assertEqual(self.search_calls[-1], ('query', 5)) 290 | 291 | 292 | if __name__ == '__main__': 293 | unittest.main() 294 | -------------------------------------------------------------------------------- /beetsplug/plex/playlist_import.py: -------------------------------------------------------------------------------- 1 | """Helpers for importing playlists into Plex.""" 2 | 3 | from __future__ import annotations 4 | 5 | from beets import ui 6 | 7 | from beetsplug.core.config import get_plexsync_config 8 | from beetsplug.providers.gaana import import_gaana_playlist 9 | from beetsplug.providers.youtube import import_yt_playlist, import_yt_search 10 | from beetsplug.providers.tidal import import_tidal_playlist 11 | from beetsplug.plex import smartplaylists 12 | 13 | 14 | def import_playlist(plugin, playlist, playlist_url=None, listenbrainz=False): 15 | """Import a playlist into Plex using the plugin context.""" 16 | if listenbrainz: 17 | try: 18 | from beetsplug.listenbrainz import ListenBrainzPlugin 19 | except ModuleNotFoundError: 20 | plugin._log.error("ListenBrainz plugin not installed") 21 | return 22 | 23 | try: 24 | lb = ListenBrainzPlugin() 25 | except Exception as exc: # noqa: BLE001 - propagate details to log 26 | plugin._log.error("Unable to initialize ListenBrainz plugin. Error: {}", exc) 27 | return 28 | 29 | plugin._log.info("Importing weekly jams playlist") 30 | weekly_jams = lb.get_weekly_jams() 31 | plugin._log.info("Importing {} songs from Weekly Jams", len(weekly_jams)) 32 | add_songs_to_plex(plugin, "Weekly Jams", weekly_jams) 33 | 34 | plugin._log.info("Importing weekly exploration playlist") 35 | weekly_exploration = lb.get_weekly_exploration() 36 | plugin._log.info( 37 | "Importing {} songs from Weekly Exploration", len(weekly_exploration) 38 | ) 39 | add_songs_to_plex(plugin, "Weekly Exploration", weekly_exploration) 40 | return 41 | 42 | if playlist_url is None or ("http://" not in playlist_url and "https://" not in playlist_url): 43 | raise ui.UserError("Playlist URL not provided") 44 | 45 | if "apple" in playlist_url: 46 | songs = plugin.import_apple_playlist(playlist_url) 47 | elif "jiosaavn" in playlist_url: 48 | songs = plugin.import_jiosaavn_playlist(playlist_url) 49 | elif "gaana.com" in playlist_url: 50 | songs = import_gaana_playlist(playlist_url, plugin.cache) 51 | elif "spotify" in playlist_url: 52 | songs = plugin.import_spotify_playlist(plugin.get_playlist_id(playlist_url)) 53 | elif "youtube" in playlist_url: 54 | songs = import_yt_playlist(playlist_url, plugin.cache) 55 | elif "tidal" in playlist_url: 56 | songs = import_tidal_playlist(playlist_url, plugin.cache) 57 | else: 58 | songs = [] 59 | plugin._log.error("Playlist URL not supported") 60 | 61 | plugin._log.info("Importing {} songs from {}", len(songs), playlist_url) 62 | add_songs_to_plex(plugin, playlist, songs) 63 | 64 | 65 | def add_songs_to_plex(plugin, playlist, songs, manual_search=None): 66 | """Add a list of songs to a Plex playlist via the plugin.""" 67 | if manual_search is None: 68 | manual_search = get_plexsync_config("manual_search", bool, False) 69 | 70 | songs_to_process = list(songs or []) 71 | progress = plugin.create_progress_counter( 72 | len(songs_to_process), 73 | f"Matching Plex tracks for {playlist}", 74 | unit="song", 75 | ) 76 | 77 | song_list = [] 78 | try: 79 | for song in songs_to_process: 80 | found = plugin.search_plex_song(song, manual_search) 81 | if found is not None: 82 | song_list.append(found) 83 | if progress is not None: 84 | progress.update() 85 | finally: 86 | if progress is not None: 87 | try: 88 | progress.close() 89 | except Exception: # noqa: BLE001 - progress is optional feedback 90 | plugin._log.debug("Unable to close progress counter for playlist {}", playlist) 91 | 92 | if not song_list: 93 | plugin._log.warning("No songs found to add to playlist {}", playlist) 94 | return 95 | 96 | plugin._plex_add_playlist_item(song_list, playlist) 97 | 98 | 99 | def import_search(plugin, playlist, search, limit=10): 100 | """Import search results into Plex for the given playlist.""" 101 | plugin._log.info("Searching for {}", search) 102 | songs = list(import_yt_search(search, limit, plugin.cache) or []) 103 | progress = plugin.create_progress_counter( 104 | len(songs), 105 | f"Resolving search results for {playlist}", 106 | unit="song", 107 | ) 108 | song_list = [] 109 | try: 110 | for song in songs: 111 | found = plugin.search_plex_song(song) 112 | if found is not None: 113 | song_list.append(found) 114 | if progress is not None: 115 | progress.update() 116 | finally: 117 | if progress is not None: 118 | try: 119 | progress.close() 120 | except Exception: # noqa: BLE001 - best effort feedback 121 | plugin._log.debug("Unable to close search progress counter for playlist {}", playlist) 122 | plugin._plex_add_playlist_item(song_list, playlist) 123 | 124 | 125 | def generate_imported_playlist(plugin, lib, playlist_config, plex_lookup=None): 126 | """Generate imported playlist from various sources based on config.""" 127 | from datetime import datetime as _dt 128 | from beetsplug.core.config import get_config_value, get_plexsync_config 129 | from beetsplug.providers.m3u8 import import_m3u8_playlist 130 | from beetsplug.providers.http_post import import_post_playlist 131 | 132 | playlist_name = playlist_config.get("name", "Imported Playlist") 133 | sources = playlist_config.get("sources", []) 134 | max_tracks = playlist_config.get("max_tracks", None) 135 | import os 136 | log_file = os.path.join(plugin.config_dir, f"{playlist_name.lower().replace(' ', '_')}_import.log") 137 | 138 | with open(log_file, 'w', encoding='utf-8') as f: 139 | f.write(f"Import log for playlist: {playlist_name}\n") 140 | f.write(f"Import started at: {_dt.now().strftime('%Y-%m-%d %H:%M:%S')}\n") 141 | f.write("-" * 80 + "\n\n") 142 | 143 | defaults_cfg = get_plexsync_config(["playlists", "defaults"], dict, {}) 144 | manual_search = get_config_value( 145 | playlist_config, defaults_cfg, "manual_search", get_plexsync_config("manual_search", bool, False) 146 | ) 147 | clear_playlist = get_config_value( 148 | playlist_config, defaults_cfg, "clear_playlist", False 149 | ) 150 | 151 | if not sources: 152 | plugin._log.warning("No sources defined for imported playlist {}", playlist_name) 153 | return 154 | 155 | plugin._log.info("Generating imported playlist {} from {} sources", playlist_name, len(sources)) 156 | all_tracks = [] 157 | source_progress = plugin.create_progress_counter( 158 | total=len(sources), 159 | desc=f"{playlist_name[:18]} src", 160 | unit="source", 161 | ) 162 | 163 | try: 164 | for source in sources: 165 | try: 166 | tracks = [] 167 | src_desc = None 168 | # String source (URL or file) 169 | if isinstance(source, str): 170 | src_desc = source 171 | low = source.lower() 172 | if low.endswith('.m3u8'): 173 | # Resolve relative path under config dir 174 | if not os.path.isabs(source): 175 | source = os.path.join(plugin.config_dir, source) 176 | plugin._log.info("Importing from M3U8: {}", source) 177 | tracks = import_m3u8_playlist(source, plugin.cache) 178 | elif 'spotify' in low: 179 | from beetsplug.providers.spotify import get_playlist_id as _get_pl_id 180 | plugin._log.info("Importing from Spotify URL") 181 | tracks = plugin.import_spotify_playlist(_get_pl_id(source)) 182 | elif 'jiosaavn' in low: 183 | plugin._log.info("Importing from JioSaavn URL") 184 | tracks = plugin.import_jiosaavn_playlist(source) 185 | elif 'apple' in low: 186 | plugin._log.info("Importing from Apple Music URL") 187 | tracks = plugin.import_apple_playlist(source) 188 | elif 'gaana' in low: 189 | plugin._log.info("Importing from Gaana URL") 190 | tracks = import_gaana_playlist(source, plugin.cache) 191 | elif 'youtube' in low: 192 | plugin._log.info("Importing from YouTube URL") 193 | tracks = import_yt_playlist(source, plugin.cache) 194 | elif 'tidal' in low: 195 | plugin._log.info("Importing from Tidal URL") 196 | tracks = import_tidal_playlist(source, plugin.cache) 197 | else: 198 | plugin._log.warning("Unsupported string source: {}", source) 199 | # Dict source (typed) 200 | elif isinstance(source, dict): 201 | source_type = source.get("type") 202 | src_desc = source_type or "Unknown" 203 | if source_type == "Apple Music": 204 | plugin._log.info("Importing from Apple Music: {}", source.get("name", "")) 205 | tracks = plugin.import_apple_playlist(source.get("url", "")) 206 | elif source_type == "JioSaavn": 207 | plugin._log.info("Importing from JioSaavn: {}", source.get("name", "")) 208 | tracks = plugin.import_jiosaavn_playlist(source.get("url", "")) 209 | elif source_type == "Gaana": 210 | plugin._log.info("Importing from Gaana: {}", source.get("name", "")) 211 | tracks = import_gaana_playlist(source.get("url", ""), plugin.cache) 212 | elif source_type == "Spotify": 213 | plugin._log.info("Importing from Spotify: {}", source.get("name", "")) 214 | from beetsplug.providers.spotify import get_playlist_id as _get_pl_id 215 | tracks = plugin.import_spotify_playlist(_get_pl_id(source.get("url", ""))) 216 | elif source_type == "YouTube": 217 | plugin._log.info("Importing from YouTube: {}", source.get("name", "")) 218 | tracks = import_yt_playlist(source.get("url", ""), plugin.cache) 219 | elif source_type == "Tidal": 220 | plugin._log.info("Importing from Tidal: {}", source.get("name", "")) 221 | tracks = import_tidal_playlist(source.get("url", ""), plugin.cache) 222 | elif source_type == "M3U8": 223 | fp = source.get("filepath", "") 224 | if fp and not os.path.isabs(fp): 225 | fp = os.path.join(plugin.config_dir, fp) 226 | plugin._log.info("Importing from M3U8: {}", fp) 227 | tracks = import_m3u8_playlist(fp, plugin.cache) 228 | elif source_type == "POST": 229 | plugin._log.info("Importing from POST endpoint") 230 | tracks = import_post_playlist(source, plugin.cache) 231 | else: 232 | plugin._log.warning("Unsupported source type: {}", source_type) 233 | else: 234 | src_desc = str(type(source)) 235 | plugin._log.warning("Invalid source format: {}", src_desc) 236 | 237 | if tracks: 238 | plugin._log.info("Imported {} tracks from {}", len(tracks), src_desc) 239 | all_tracks.extend(tracks) 240 | except Exception as e: 241 | plugin._log.error("Error importing from {}: {}", src_desc or "Unknown", e) 242 | continue 243 | finally: 244 | if source_progress is not None: 245 | try: 246 | source_progress.update() 247 | except Exception: 248 | plugin._log.debug("Failed to update source progress for {}", playlist_name) 249 | finally: 250 | if source_progress is not None: 251 | try: 252 | source_progress.close() 253 | except Exception: 254 | plugin._log.debug("Failed to close source progress for {}", playlist_name) 255 | 256 | unique_tracks = [] 257 | seen = set() 258 | for t in all_tracks: 259 | # Some sources may set explicit None values; normalize to empty strings before lowercasing 260 | key = ( 261 | (t.get('title') or '').lower(), 262 | (t.get('artist') or '').lower(), 263 | (t.get('album') or '').lower(), 264 | ) 265 | if key not in seen: 266 | seen.add(key) 267 | unique_tracks.append(t) 268 | 269 | plugin._log.info("Found {} unique tracks across sources", len(unique_tracks)) 270 | 271 | matched_songs = [] 272 | match_progress = plugin.create_progress_counter( 273 | total=len(unique_tracks), 274 | desc=f"{playlist_name[:18]} match", 275 | unit="track", 276 | ) 277 | 278 | try: 279 | for song in unique_tracks: 280 | found = plugin.search_plex_song(song, manual_search) 281 | if found is not None: 282 | matched_songs.append(found) 283 | if match_progress is not None: 284 | try: 285 | match_progress.update() 286 | except Exception: 287 | plugin._log.debug("Failed to update match progress for {}", playlist_name) 288 | finally: 289 | if match_progress is not None: 290 | try: 291 | match_progress.close() 292 | except Exception: 293 | plugin._log.debug("Failed to close match progress for {}", playlist_name) 294 | 295 | plugin._log.info("Matched {} tracks in Plex", len(matched_songs)) 296 | 297 | if max_tracks: 298 | matched_songs = matched_songs[:max_tracks] 299 | 300 | # Apply filters to matched songs if filters are defined in the playlist config 301 | filters = playlist_config.get("filters", {}) 302 | if filters: 303 | matched_songs = smartplaylists.apply_playlist_filters(plugin, matched_songs, filters) 304 | 305 | unique_matched = [] 306 | seen_keys = set() 307 | for track in matched_songs: 308 | key = getattr(track, 'ratingKey', None) 309 | if key and key not in seen_keys: 310 | seen_keys.add(key) 311 | unique_matched.append(track) 312 | 313 | with open(log_file, 'a', encoding='utf-8') as f: 314 | f.write("\nImport Summary:\n") 315 | f.write(f"Total tracks fetched from sources: {len(all_tracks)}\n") 316 | f.write(f"Unique tracks after de-duplication: {len(unique_tracks)}\n") 317 | if filters: 318 | f.write(f"Tracks after applying filters: {len(matched_songs)}\n") 319 | f.write(f"Tracks matched and added: {len(unique_matched)}\n") 320 | f.write(f"\nImport completed at: {_dt.now().strftime('%Y-%m-%d %H:%M:%S')}\n") 321 | 322 | plugin._log.info("Found {} unique tracks after filtering (see {} for details)", len(unique_matched), log_file) 323 | 324 | if clear_playlist: 325 | try: 326 | plugin._plex_clear_playlist(playlist_name) 327 | plugin._log.info("Cleared existing playlist {}", playlist_name) 328 | except Exception: 329 | plugin._log.debug("No existing playlist {} found", playlist_name) 330 | 331 | if unique_matched: 332 | plugin._plex_add_playlist_item(unique_matched, playlist_name) 333 | plugin._log.info("Successfully created playlist {} with {} tracks", playlist_name, len(unique_matched)) 334 | else: 335 | plugin._log.warning("No tracks remaining after filtering for {}", playlist_name) -------------------------------------------------------------------------------- /beetsplug/plex/manual_search.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | """Shared helpers for interactive manual Plex searches.""" 4 | 5 | from typing import Iterable, Dict, List, Optional 6 | 7 | from beets import ui 8 | from beets.ui import input_, print_ 9 | 10 | from beetsplug.utils.helpers import highlight_matches 11 | from beetsplug.core.matching import get_fuzzy_score 12 | 13 | 14 | def _render_actions() -> str: 15 | return ( 16 | ui.colorize('action', 'a') + ui.colorize('text_highlight_minor', ': Abort') + ' ' 17 | + ui.colorize('action', 's') + ui.colorize('text_highlight_minor', ': Skip') + ' ' 18 | + ui.colorize('action', 'e') + ui.colorize('text_highlight_minor', ': Enter manual search') + '\n' 19 | ) 20 | 21 | 22 | def review_candidate_confirmations( 23 | plugin, 24 | queued_candidates: List[Dict[str, object]], 25 | fallback_song: Optional[Dict[str, str]] = None, 26 | *, 27 | current_cache_key: Optional[str] = None, 28 | ) -> Dict[str, object]: 29 | """Present queued confirmation candidates and return the user's choice.""" 30 | if not queued_candidates: 31 | return {"action": "skip"} 32 | 33 | aggregated: List[Dict[str, object]] = [] 34 | dedupe_map: Dict[object, Dict[str, object]] = {} 35 | 36 | for candidate in queued_candidates: 37 | track = candidate.get("track") 38 | if track is None: 39 | continue 40 | 41 | try: 42 | rating_key = getattr(track, "ratingKey", None) 43 | except Exception: # noqa: BLE001 - tolerate unexpected track objects 44 | rating_key = None 45 | dedupe_key = rating_key if rating_key is not None else id(track) 46 | 47 | similarity = float(candidate.get("similarity", 0.0) or 0.0) 48 | cache_key = candidate.get("cache_key") 49 | source = candidate.get("source") or "candidate" 50 | 51 | entry = dedupe_map.get(dedupe_key) 52 | if entry is None: 53 | entry = { 54 | "track": track, 55 | "similarity": similarity, 56 | "sources": {source}, 57 | "cache_keys": {cache_key} if cache_key else set(), 58 | "candidates": [candidate], 59 | "original_songs": [], 60 | } 61 | dedupe_map[dedupe_key] = entry 62 | aggregated.append(entry) 63 | else: 64 | entry["similarity"] = max(entry["similarity"], similarity) 65 | entry["sources"].add(source) 66 | if cache_key: 67 | entry["cache_keys"].add(cache_key) 68 | entry["candidates"].append(candidate) 69 | 70 | original_song = candidate.get("song") 71 | if isinstance(original_song, dict): 72 | entry["original_songs"].append(original_song) 73 | 74 | if not aggregated: 75 | return {"action": "skip"} 76 | 77 | aggregated.sort(key=lambda item: item.get("similarity", 0.0), reverse=True) 78 | 79 | reference_song = fallback_song or {} 80 | ref_title = reference_song.get("title", "") 81 | ref_album = reference_song.get("album", "Unknown") 82 | ref_artist = reference_song.get("artist", "") 83 | 84 | header = ( 85 | ui.colorize('text_highlight', '\nReview candidate matches for: ') 86 | + ui.colorize('text_highlight_minor', f"{ref_album} - {ref_title} - {ref_artist}") 87 | ) 88 | print_(header) 89 | 90 | for index, entry in enumerate(aggregated, start=1): 91 | track = entry["track"] 92 | try: 93 | track_title = getattr(track, "title", "") or "" 94 | track_album = getattr(track, "parentTitle", "") or "" 95 | track_artist = getattr(track, "originalTitle", None) or track.artist().title 96 | except Exception: # noqa: BLE001 - tolerate Plex quirks 97 | track_title = getattr(track, "title", "") or "" 98 | track_album = getattr(track, "parentTitle", "") or "" 99 | track_artist = "" 100 | 101 | query_song = entry["original_songs"][0] if entry["original_songs"] else reference_song 102 | query_title = (query_song or {}).get("title", "") 103 | query_album = (query_song or {}).get("album", "") 104 | query_artist = (query_song or {}).get("artist", "") 105 | 106 | highlighted_title = highlight_matches(query_title, track_title) 107 | highlighted_album = highlight_matches(query_album or ref_album, track_album) 108 | highlighted_artist = highlight_matches(query_artist or ref_artist, track_artist) 109 | 110 | similarity = entry.get("similarity", 0.0) or 0.0 111 | if similarity >= 0.8: 112 | score_color = 'text_success' 113 | elif similarity >= 0.5: 114 | score_color = 'text_warning' 115 | else: 116 | score_color = 'text_error' 117 | 118 | sources = ", ".join(sorted(entry.get("sources", []))) or "candidate" 119 | print_( 120 | f"{ui.colorize('action', str(index))}. {highlighted_album} - {highlighted_title} - " 121 | f"{highlighted_artist} (Match: {ui.colorize(score_color, f'{similarity:.2f}')}, " 122 | f"Sources: {ui.colorize('text_highlight_minor', sources)})" 123 | ) 124 | 125 | if query_song and ( 126 | query_title != ref_title or query_album != ref_album or query_artist != ref_artist 127 | ): 128 | print_( 129 | ui.colorize( 130 | 'text_highlight_minor', 131 | f" Based on query: {query_album or 'Unknown'} - {query_title} - {query_artist}", 132 | ) 133 | ) 134 | 135 | print_(ui.colorize('text_highlight', '\nActions:')) 136 | print_(ui.colorize('text_highlight_minor', ' #: Select match by number')) 137 | print_(_render_actions()) 138 | 139 | selection = ui.input_options( 140 | ("aBort", "Skip", "Enter manual search"), 141 | numrange=(1, len(aggregated)), 142 | default=1, 143 | ) 144 | 145 | if isinstance(selection, int) and selection > 0: 146 | entry = aggregated[selection - 1] 147 | cache_keys = entry.get("cache_keys") or set() 148 | chosen_cache_key = None 149 | if current_cache_key and current_cache_key in cache_keys: 150 | chosen_cache_key = current_cache_key 151 | elif cache_keys: 152 | # Stable choice: prefer cache key from first candidate appended. 153 | for candidate in entry.get("candidates", []): 154 | candidate_cache_key = candidate.get("cache_key") 155 | if candidate_cache_key: 156 | chosen_cache_key = candidate_cache_key 157 | break 158 | if not chosen_cache_key: 159 | chosen_cache_key = current_cache_key 160 | 161 | chosen_candidate = None 162 | for candidate in entry.get("candidates", []): 163 | candidate_cache_key = candidate.get("cache_key") 164 | if candidate_cache_key == chosen_cache_key: 165 | chosen_candidate = candidate 166 | break 167 | if chosen_candidate is None and entry.get("candidates"): 168 | chosen_candidate = entry["candidates"][0] 169 | 170 | return { 171 | "action": "selected", 172 | "track": entry["track"], 173 | "cache_key": chosen_cache_key, 174 | "similarity": entry.get("similarity", 0.0), 175 | "sources": sorted(entry.get("sources", [])), 176 | "original_song": (chosen_candidate or {}).get("song") 177 | if isinstance(chosen_candidate, dict) 178 | else None, 179 | } 180 | 181 | if selection in ("b", "B"): 182 | return {"action": "abort"} 183 | if selection in ("s", "S"): 184 | return {"action": "skip"} 185 | if selection in ("e", "E"): 186 | return {"action": "manual"} 187 | 188 | return {"action": "skip"} 189 | 190 | 191 | def handle_manual_search(plugin, sorted_tracks, song, original_query=None): 192 | """Display the manual selection UI and return the chosen Plex track.""" 193 | source_title = song.get("title", "") 194 | source_album = song.get("album", "Unknown") 195 | source_artist = song.get("artist", "") 196 | 197 | header = ( 198 | ui.colorize('text_highlight', '\nChoose candidates for: ') 199 | + ui.colorize('text_highlight_minor', f"{source_album} - {source_title} - {source_artist}") 200 | ) 201 | print_(header) 202 | 203 | for index, (track, score) in enumerate(sorted_tracks, start=1): 204 | track_artist = getattr(track, 'originalTitle', None) or track.artist().title 205 | highlighted_title = highlight_matches(source_title, track.title) 206 | highlighted_album = highlight_matches(source_album, track.parentTitle) 207 | highlighted_artist = highlight_matches(source_artist, track_artist) 208 | 209 | if score >= 0.8: 210 | score_color = 'text_success' 211 | elif score >= 0.5: 212 | score_color = 'text_warning' 213 | else: 214 | score_color = 'text_error' 215 | 216 | print_( 217 | f"{index}. {highlighted_album} - {highlighted_title} - {highlighted_artist} " 218 | f"(Match: {ui.colorize(score_color, f'{score:.2f}')})" 219 | ) 220 | 221 | print_(ui.colorize('text_highlight', '\nActions:')) 222 | print_(ui.colorize('text_highlight_minor', ' #: Select match by number')) 223 | print_(_render_actions()) 224 | 225 | sel = ui.input_options(("aBort", "Skip", "Enter"), numrange=(1, len(sorted_tracks)), default=1) 226 | 227 | if sel in ("b", "B"): 228 | return None 229 | if sel in ("s", "S"): 230 | _store_negative_cache(plugin, song, original_query) 231 | return None 232 | if sel in ("e", "E"): 233 | return manual_track_search(plugin, original_query if original_query is not None else song) 234 | 235 | selected_track = sorted_tracks[sel - 1][0] if sel > 0 else None 236 | if selected_track: 237 | _cache_selection(plugin, song, selected_track, original_query) 238 | return selected_track 239 | 240 | 241 | def manual_track_search(plugin, original_query=None): 242 | """Interactively search for a Plex track.""" 243 | print_(ui.colorize('text_highlight', '\nManual Search')) 244 | print_('Enter search criteria (empty to skip):') 245 | 246 | title = input_(ui.colorize('text_highlight_minor', 'Title: ')).strip() 247 | album = input_(ui.colorize('text_highlight_minor', 'Album: ')).strip() 248 | artist = input_(ui.colorize('text_highlight_minor', 'Artist: ')).strip() 249 | 250 | plugin._log.debug("Searching with title='{}', album='{}', artist='{}'", title, album, artist) 251 | 252 | tracks = _run_manual_search_queries(plugin, title, album, artist) 253 | if not tracks: 254 | plugin._log.info("No matching tracks found") 255 | return None 256 | 257 | filtered_tracks = _filter_tracks(plugin, tracks, title, album, artist) 258 | if not filtered_tracks: 259 | plugin._log.info("No matching tracks found after filtering") 260 | return None 261 | 262 | song_dict = { 263 | "title": title or "", 264 | "album": album or "", 265 | "artist": artist or "", 266 | } 267 | 268 | sorted_tracks = plugin.find_closest_match(song_dict, filtered_tracks) 269 | header = ( 270 | ui.colorize('text_highlight', '\nChoose candidates for: ') 271 | + ui.colorize('text_highlight_minor', f"{album} - {title} - {artist}") 272 | ) 273 | print_(header) 274 | 275 | for index, (track, score) in enumerate(sorted_tracks, start=1): 276 | track_artist = getattr(track, 'originalTitle', None) or track.artist().title 277 | highlighted_title = highlight_matches(title, track.title) 278 | highlighted_album = highlight_matches(album, track.parentTitle) 279 | highlighted_artist = highlight_matches(artist, track_artist) 280 | 281 | if score >= 0.8: 282 | score_color = 'text_success' 283 | elif score >= 0.5: 284 | score_color = 'text_warning' 285 | else: 286 | score_color = 'text_error' 287 | 288 | print_( 289 | f"{ui.colorize('action', str(index))}. {highlighted_album} - {highlighted_title} - " 290 | f"{highlighted_artist} (Match: {ui.colorize(score_color, f'{score:.2f}')})" 291 | ) 292 | 293 | print_(ui.colorize('text_highlight', '\nActions:')) 294 | print_(ui.colorize('text_highlight_minor', ' #: Select match by number')) 295 | print_(_render_actions()) 296 | 297 | sel = ui.input_options(("aBort", "Skip", "Enter"), numrange=(1, len(sorted_tracks)), default=1) 298 | 299 | if sel in ("b", "B"): 300 | return None 301 | if sel in ("s", "S"): 302 | _store_negative_cache(plugin, song_dict, original_query) 303 | return None 304 | if sel in ("e", "E"): 305 | return manual_track_search(plugin, original_query) 306 | 307 | selected_track = sorted_tracks[sel - 1][0] if sel > 0 else None 308 | if selected_track: 309 | _cache_selection(plugin, song_dict, selected_track, original_query) 310 | return selected_track 311 | 312 | 313 | def _run_manual_search_queries(plugin, title: str, album: str, artist: str): 314 | tracks = [] 315 | try: 316 | if album and any(x in album.lower() for x in ('movie', 'soundtrack', 'original')): 317 | tracks = plugin.music.searchTracks(**{"album.title": album}, limit=100) 318 | plugin._log.debug("Album-first search found {} tracks", len(tracks)) 319 | 320 | if not tracks and album and title: 321 | tracks = plugin.music.searchTracks( 322 | **{"album.title": album, "track.title": title}, 323 | limit=100, 324 | ) 325 | plugin._log.debug("Combined album-title search found {} tracks", len(tracks)) 326 | 327 | if not tracks and album: 328 | tracks = plugin.music.searchTracks(**{"album.title": album}, limit=100) 329 | plugin._log.debug("Album-only search found {} tracks", len(tracks)) 330 | 331 | if not tracks and title: 332 | tracks = plugin.music.searchTracks(**{"track.title": title}, limit=100) 333 | plugin._log.debug("Title-only search found {} tracks", len(tracks)) 334 | 335 | if not tracks and artist: 336 | tracks = plugin.music.searchTracks(**{"artist.title": artist}, limit=100) 337 | plugin._log.debug("Artist-only search found {} tracks", len(tracks)) 338 | except Exception as exc: 339 | plugin._log.error("Error during manual search query: {}", exc) 340 | tracks = [] 341 | return tracks 342 | 343 | 344 | def _filter_tracks(plugin, tracks: Iterable, title: str, album: str, artist: str): 345 | filtered = [] 346 | for track in tracks: 347 | track_artist = getattr(track, 'originalTitle', None) or track.artist().title 348 | track_album = track.parentTitle 349 | track_title = track.title 350 | 351 | plugin._log.debug("Considering track: {} - {} - {}", track_album, track_title, track_artist) 352 | 353 | title_match = not title or get_fuzzy_score(title.lower(), track_title.lower()) > 0.4 354 | album_match = not album or get_fuzzy_score(album.lower(), track_album.lower()) > 0.4 355 | 356 | artist_match = True 357 | if artist: 358 | track_artists = {a.strip().lower() for a in track_artist.split(',')} 359 | search_artists = {a.strip().lower() for a in artist.split(',')} 360 | common_artists = track_artists.intersection(search_artists) 361 | total_artists = track_artists.union(search_artists) 362 | artist_score = len(common_artists) / len(total_artists) if total_artists else 0 363 | artist_match = artist_score >= 0.3 364 | 365 | perfect_album = album and track_album and album.lower() == track_album.lower() 366 | strong_title = title and get_fuzzy_score(title.lower(), track_title.lower()) > 0.8 367 | standard_match = title_match and album_match and artist_match 368 | 369 | if perfect_album or strong_title or standard_match: 370 | filtered.append(track) 371 | plugin._log.debug( 372 | "Matched: {} - {} - {} (Perfect album: {}, Strong title: {}, Standard: {})", 373 | track_album, 374 | track_title, 375 | track_artist, 376 | perfect_album, 377 | strong_title, 378 | standard_match, 379 | ) 380 | return filtered 381 | 382 | 383 | def _store_negative_cache(plugin, song, original_query=None): 384 | plugin._log.debug("User skipped, storing negative cache result.") 385 | query = None 386 | if original_query and original_query.get('title') and original_query['title'].strip(): 387 | query = original_query 388 | elif song.get('title') and song['title'].strip(): 389 | query = song 390 | 391 | if query: 392 | cache_key = plugin.cache._make_cache_key(query) 393 | plugin._cache_result(cache_key, None) 394 | else: 395 | plugin._log.debug("No suitable query to store negative cache against for skip.") 396 | 397 | 398 | def _cache_selection(plugin, song, track, original_query=None): 399 | """Cache the manual selection result where appropriate.""" 400 | 401 | cached_original = False 402 | if original_query: 403 | original_key = plugin.cache._make_cache_key(original_query) 404 | plugin._log.debug("Caching result for original query key: {}", original_query) 405 | plugin._cache_result(original_key, track) 406 | cached_original = True 407 | 408 | if not cached_original: 409 | plugin._log.debug( 410 | "Skipping cache write for manual search input: {} (no original query)", song 411 | ) 412 | -------------------------------------------------------------------------------- /beetsplug/providers/spotify.py: -------------------------------------------------------------------------------- 1 | """Spotify provider helpers extracted from plexsync. 2 | 3 | These functions operate on the plugin instance to keep behavior identical. 4 | They do not change cache key formats or returned structures. 5 | """ 6 | 7 | import os 8 | import re 9 | import json 10 | from typing import Any, Dict, List, Optional 11 | from collections import Counter 12 | 13 | import dateutil.parser 14 | import requests 15 | from bs4 import BeautifulSoup 16 | import spotipy 17 | from spotipy.oauth2 import SpotifyOAuth 18 | from spotipy.exceptions import SpotifyOauthError 19 | 20 | from beets import config 21 | from beetsplug.utils.helpers import parse_title, clean_album_name 22 | 23 | 24 | def _clear_cached_token(plugin) -> None: 25 | """Remove any cached Spotify token file.""" 26 | handler = getattr(plugin, "auth_manager", None) 27 | if handler is None: 28 | return 29 | 30 | cache_handler = handler.cache_handler 31 | if hasattr(cache_handler, "delete_cached_token"): 32 | cache_handler.delete_cached_token() 33 | return 34 | 35 | cache_path = getattr(cache_handler, "cache_path", None) or getattr( 36 | plugin, "plexsync_token", None 37 | ) 38 | if not cache_path: 39 | return 40 | 41 | try: 42 | os.remove(cache_path) 43 | plugin._log.debug("Deleted Spotify cache file {}", cache_path) 44 | except FileNotFoundError: 45 | pass 46 | except OSError as exc: 47 | plugin._log.debug("Failed to delete Spotify cache file {}: {}", cache_path, exc) 48 | 49 | 50 | def authenticate(plugin) -> None: 51 | """Authenticate Spotify, storing `sp` on the plugin identical to before.""" 52 | ID = config["spotify"]["client_id"].get() 53 | SECRET = config["spotify"]["client_secret"].get() 54 | redirect_uri = "http://127.0.0.1/" 55 | scope = ( 56 | "user-read-private user-read-email playlist-modify-public " 57 | "playlist-modify-private playlist-read-private" 58 | ) 59 | 60 | plugin.auth_manager = SpotifyOAuth( 61 | client_id=ID, 62 | client_secret=SECRET, 63 | redirect_uri=redirect_uri, 64 | scope=scope, 65 | open_browser=False, 66 | cache_path=plugin.plexsync_token, 67 | ) 68 | try: 69 | plugin.token_info = plugin.auth_manager.get_cached_token() 70 | except SpotifyOauthError as exc: 71 | plugin._log.debug("Failed to load cached Spotify token: {}", exc) 72 | _clear_cached_token(plugin) 73 | plugin.token_info = None 74 | 75 | if not plugin.token_info: 76 | plugin.token_info = plugin.auth_manager.get_access_token(as_dict=True) 77 | else: 78 | try: 79 | need_token = plugin.auth_manager.is_token_expired(plugin.token_info) 80 | except (SpotifyOauthError, KeyError, TypeError) as exc: 81 | plugin._log.debug("Cached Spotify token missing metadata: {}", exc) 82 | need_token = True 83 | 84 | if need_token: 85 | try: 86 | plugin.token_info = plugin.auth_manager.refresh_access_token( 87 | plugin.token_info["refresh_token"] 88 | ) 89 | except SpotifyOauthError as exc: 90 | message = str(exc).lower() 91 | if "invalid_grant" in message: 92 | plugin._log.info( 93 | "Spotify refresh token revoked; requesting new authorization." 94 | ) 95 | _clear_cached_token(plugin) 96 | plugin.token_info = plugin.auth_manager.get_access_token( 97 | as_dict=True 98 | ) 99 | else: 100 | raise 101 | 102 | plugin.sp = spotipy.Spotify(auth=plugin.token_info.get("access_token")) 103 | 104 | 105 | def process_spotify_track(track: Dict[str, Any], logger) -> Optional[Dict[str, Any]]: 106 | """Process a single Spotify track into a standardized dict.""" 107 | try: 108 | if ('From "' in track['name']) or ("From "" in track['name']): 109 | title_orig = track['name'].replace(""", '"') 110 | title, album = parse_title(title_orig) 111 | else: 112 | title = track['name'] 113 | album = clean_album_name(track['album']['name']) 114 | 115 | try: 116 | year = track['album'].get('release_date') 117 | if year: 118 | year = dateutil.parser.parse(year, ignoretz=True) 119 | except (ValueError, KeyError, AttributeError): 120 | year = None 121 | 122 | artist = track['artists'][0]['name'] if track['artists'] else "Unknown" 123 | 124 | return { 125 | "title": title.strip(), 126 | "album": album.strip(), 127 | "artist": artist.strip(), 128 | "year": year 129 | } 130 | except Exception as e: 131 | logger.debug("Error processing Spotify track: {}", e) 132 | return None 133 | 134 | 135 | def get_playlist_id(url: str) -> str: 136 | parts = url.split("/") 137 | index = parts.index("playlist") 138 | return parts[index + 1] 139 | 140 | 141 | def get_playlist_tracks(plugin, playlist_id: str) -> List[Dict[str, Any]]: 142 | """Return list of track items for a Spotify playlist (all pages).""" 143 | try: 144 | tracks_response = plugin.sp.playlist_items( 145 | playlist_id, additional_types=["track"] 146 | ) 147 | tracks = tracks_response["items"] 148 | while tracks_response["next"]: 149 | tracks_response = plugin.sp.next(tracks_response) 150 | tracks.extend(tracks_response["items"]) 151 | return tracks 152 | except spotipy.exceptions.SpotifyException as e: 153 | plugin._log.error("Failed to fetch playlist: {} - {}", playlist_id, str(e)) 154 | return [] 155 | 156 | 157 | def import_spotify_playlist(plugin, playlist_id: str) -> List[Dict[str, Any]]: 158 | """Import a Spotify playlist using API first, then fallback to scraping.""" 159 | song_list: List[Dict[str, Any]] = [] 160 | 161 | cached_tracks = plugin.cache.get_playlist_cache(playlist_id, 'spotify_tracks') 162 | if cached_tracks: 163 | plugin._log.info("Using cached track list for Spotify playlist {}", playlist_id) 164 | return cached_tracks 165 | 166 | try: 167 | cached_api_data = plugin.cache.get_playlist_cache(playlist_id, 'spotify_api') 168 | if cached_api_data: 169 | songs = cached_api_data 170 | else: 171 | authenticate(plugin) 172 | songs = get_playlist_tracks(plugin, playlist_id) 173 | if songs: 174 | plugin.cache.set_playlist_cache(playlist_id, 'spotify_api', songs) 175 | 176 | if songs: 177 | for song in songs: 178 | track_data = process_spotify_track(song["track"], plugin._log) 179 | if track_data: 180 | song_list.append(track_data) 181 | 182 | if song_list: 183 | plugin._log.info("Successfully imported {} tracks via Spotify API", len(song_list)) 184 | plugin.cache.set_playlist_cache(playlist_id, 'spotify_tracks', song_list) 185 | return song_list 186 | 187 | except Exception as e: 188 | plugin._log.warning("Spotify API import failed: {}. Falling back to scraping.", e) 189 | 190 | cached_web_data = plugin.cache.get_playlist_cache(playlist_id, 'spotify_web') 191 | if cached_web_data: 192 | return cached_web_data 193 | 194 | try: 195 | playlist_url = f"https://open.spotify.com/playlist/{playlist_id}" 196 | response = requests.get(playlist_url, headers=plugin.headers) 197 | if response.status_code != 200: 198 | plugin._log.error("Failed to fetch playlist page: {}", response.status_code) 199 | return song_list 200 | 201 | soup = BeautifulSoup(response.text, "html.parser") 202 | 203 | meta_script = None 204 | for script in soup.find_all("script"): 205 | if script.string and "Spotify.Entity" in str(script.string): 206 | meta_script = script 207 | break 208 | 209 | if meta_script: 210 | json_text = re.search(r'Spotify\.Entity = ({.+});', str(meta_script.string)) 211 | if json_text: 212 | playlist_data = json.loads(json_text.group(1)) 213 | if 'tracks' in playlist_data: 214 | for track in playlist_data['tracks']['items']: 215 | if not track or not track.get('track'): 216 | continue 217 | track_data = track['track'] 218 | song_dict = { 219 | 'title': track_data.get('name', '').strip(), 220 | 'artist': track_data.get('artists', [{}])[0].get('name', '').strip(), 221 | 'album': track_data.get('album', {}).get('name', '').strip(), 222 | 'year': None 223 | } 224 | try: 225 | if 'release_date' in track_data.get('album', {}): 226 | year = track_data['album']['release_date'] 227 | if year: 228 | year = dateutil.parser.parse(year, ignoretz=True) 229 | song_dict['year'] = year 230 | except Exception: 231 | pass 232 | song_list.append(song_dict) 233 | else: 234 | # Fallback: try to find track links 235 | for link in soup.find_all('a', href=True): 236 | href = link['href'] 237 | if '/track/' in href: 238 | track_id = href.split('/track/')[-1].split('?')[0] 239 | track_url = f"https://open.spotify.com/track/{track_id}" 240 | try: 241 | track_page = requests.get(track_url, headers=plugin.headers) 242 | if track_page.status_code == 200: 243 | track_soup = BeautifulSoup(track_page.text, 'html.parser') 244 | title = track_soup.find('meta', {'property': 'og:title'}) 245 | description = track_soup.find('meta', {'property': 'og:description'}) 246 | if title and description: 247 | desc_parts = description['content'].split(' · ') 248 | song_dict = { 249 | 'title': title['content'].strip(), 250 | 'artist': desc_parts[0].strip() if len(desc_parts) > 0 else '', 251 | 'album': desc_parts[1].strip() if len(desc_parts) > 1 else '', 252 | 'year': None 253 | } 254 | song_list.append(song_dict) 255 | except Exception as e: 256 | plugin._log.debug("Error processing track {}: {}", track_url, e) 257 | 258 | if song_list: 259 | plugin._log.info("Successfully scraped {} tracks from Spotify playlist", len(song_list)) 260 | plugin.cache.set_playlist_cache(playlist_id, 'spotify_web', song_list) 261 | return song_list 262 | 263 | except Exception as e: 264 | plugin._log.error("Error scraping Spotify playlist: {}", e) 265 | return song_list 266 | 267 | return song_list 268 | 269 | 270 | def _fuzzy_score(a: str, b: str) -> float: 271 | from difflib import SequenceMatcher 272 | return SequenceMatcher(None, a.lower(), b.lower()).ratio() 273 | 274 | 275 | def search_spotify_track(plugin, beets_item) -> Optional[str]: 276 | """Search for a track on Spotify with fallback strategies.""" 277 | search_strategies = [ 278 | lambda: f"track:{beets_item.title} album:{beets_item.album} artist:{beets_item.artist}", 279 | lambda: f"track:{beets_item.title} album:{beets_item.album}", 280 | lambda: f"track:{beets_item.title} artist:{beets_item.artist}", 281 | lambda: f'"{beets_item.title}" "{beets_item.artist}"', 282 | lambda: f"{beets_item.title} {beets_item.artist}", 283 | ] 284 | 285 | for i, strategy in enumerate(search_strategies, 1): 286 | try: 287 | query = strategy() 288 | plugin._log.debug("Spotify search strategy {}: {}", i, query) 289 | 290 | spotify_search_results = plugin.sp.search( 291 | q=query, 292 | limit=10, 293 | type="track", 294 | ) 295 | 296 | if spotify_search_results["tracks"]["items"]: 297 | for track in spotify_search_results["tracks"]["items"]: 298 | if track.get('is_playable', True): 299 | track_title = track['name'].lower() 300 | original_title = beets_item.title.lower() 301 | track_artist = track['artists'][0]['name'].lower() 302 | original_artist = beets_item.artist.lower() 303 | 304 | title_match = (original_title in track_title or 305 | track_title in original_title or 306 | _fuzzy_score(original_title, track_title) > 0.6) 307 | artist_match = (original_artist in track_artist or 308 | track_artist in original_artist or 309 | _fuzzy_score(original_artist, track_artist) > 0.6) 310 | 311 | if title_match and artist_match: 312 | plugin._log.debug("Found playable match: {} - {} (strategy {})", 313 | track['name'], track['artists'][0]['name'], i) 314 | return track['id'] 315 | elif i >= 5: 316 | if title_match or artist_match: 317 | plugin._log.debug("Found loose match: {} - {} (strategy {})", 318 | track['name'], track['artists'][0]['name'], i) 319 | return track['id'] 320 | 321 | plugin._log.debug("Found {} results but no good matches for strategy {}", 322 | len(spotify_search_results["tracks"]["items"]), i) 323 | else: 324 | plugin._log.debug("No results for strategy {}", i) 325 | 326 | except Exception as e: 327 | plugin._log.debug("Error in search strategy {}: {}", i, e) 328 | continue 329 | 330 | return None 331 | 332 | 333 | def add_tracks_to_spotify_playlist(plugin, playlist_name: str, track_uris: List[str]) -> None: 334 | """Sync new tracks to top, keep existing order below. 335 | 336 | - Adds only the tracks missing from the current playlist at position 0 337 | while preserving their relative order. 338 | - Removes tracks that are no longer present in the target (optional cleanup). 339 | - Uses non-overlapping 100-size chunks to avoid duplication. 340 | """ 341 | user_id = plugin.sp.current_user()["id"] 342 | playlists = plugin.sp.user_playlists(user_id) 343 | playlist_id = None 344 | for playlist in playlists["items"]: 345 | if playlist["name"].lower() == playlist_name.lower(): 346 | playlist_id = playlist["id"] 347 | break 348 | if not playlist_id: 349 | playlist = plugin.sp.user_playlist_create( 350 | user_id, playlist_name, public=False 351 | ) 352 | playlist_id = playlist["id"] 353 | plugin._log.debug( 354 | f"Playlist {playlist_name} created with id {playlist_id}" 355 | ) 356 | 357 | # Normalize target IDs preserving order and duplicates 358 | target_track_ids: List[str] = [ 359 | uri.replace("spotify:track:", "") if isinstance(uri, str) and uri.startswith("spotify:track:") else uri 360 | for uri in track_uris 361 | if uri 362 | ] 363 | 364 | # Fetch current playlist (ordered) 365 | playlist_tracks = get_playlist_tracks(plugin, playlist_id) 366 | current_track_ids: List[str] = [ 367 | t["track"]["id"] for t in playlist_tracks if t.get("track") and t["track"].get("id") 368 | ] 369 | 370 | # Fast path: exact match (order and counts) 371 | if current_track_ids == target_track_ids: 372 | plugin._log.debug("Playlist is already in sync - no changes needed") 373 | return 374 | 375 | # Remove tracks that are not in target at all 376 | current_counts = Counter(current_track_ids) 377 | target_counts = Counter(target_track_ids) 378 | obsolete_ids = [tid for tid in current_counts.keys() if tid not in target_counts] 379 | if obsolete_ids: 380 | for i in range(0, len(obsolete_ids), 100): 381 | chunk = obsolete_ids[i:i+100] 382 | plugin.sp.user_playlist_remove_all_occurrences_of_tracks( 383 | user_id, playlist_id, chunk 384 | ) 385 | plugin._log.debug(f"Removed {len(obsolete_ids)} obsolete tracks from playlist {playlist_id}") 386 | 387 | # Compute which tracks are missing (multiset difference), preserving order 388 | remaining_counts = Counter( 389 | {tid: min(current_counts.get(tid, 0), target_counts.get(tid, 0)) for tid in set(current_counts) | set(target_counts)} 390 | ) 391 | new_track_ids: List[str] = [] 392 | temp_counts = Counter(remaining_counts) 393 | for tid in target_track_ids: 394 | if temp_counts.get(tid, 0) > 0: 395 | temp_counts[tid] -= 1 396 | else: 397 | new_track_ids.append(tid) 398 | 399 | plugin._log.debug( 400 | f"Current={len(current_track_ids)} Target={len(target_track_ids)} New={len(new_track_ids)} Removed={len(obsolete_ids)}" 401 | ) 402 | 403 | # Add new tracks at top, preserving their order via reverse chunking 404 | if new_track_ids: 405 | n = len(new_track_ids) 406 | idx = n 407 | while idx > 0: 408 | start = max(0, idx - 100) 409 | chunk = new_track_ids[start:idx] 410 | plugin.sp.user_playlist_add_tracks(user_id, playlist_id, chunk, position=0) 411 | idx -= 100 412 | plugin._log.debug( 413 | f"Added {len(new_track_ids)} new tracks to top of playlist {playlist_id}" 414 | ) 415 | 416 | # Note: We are intentionally not reordering existing tracks to keep their 417 | # relative order and "Date added" intact, per requirement. 418 | -------------------------------------------------------------------------------- /beetsplug/core/cache.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import re 4 | import sqlite3 5 | from datetime import datetime, timedelta 6 | 7 | from plexapi.audio import Track 8 | from plexapi.server import PlexServer 9 | from plexapi.video import Video 10 | from xml.etree.ElementTree import Element 11 | 12 | logger = logging.getLogger("beets") 13 | 14 | 15 | class PlexJSONEncoder(json.JSONEncoder): 16 | """Custom JSON encoder for Plex objects.""" 17 | 18 | def default(self, obj): 19 | if obj is None: 20 | return None 21 | if isinstance(obj, (Track, Video)): 22 | try: 23 | encoded = { 24 | "_type": obj.__class__.__name__, 25 | "plex_ratingkey": getattr(obj, "ratingKey", None), 26 | "title": getattr(obj, "title", ""), 27 | "parentTitle": getattr(obj, "parentTitle", ""), 28 | "originalTitle": getattr(obj, "originalTitle", ""), 29 | "userRating": getattr(obj, "userRating", None), 30 | "viewCount": getattr(obj, "viewCount", 0), 31 | "lastViewedAt": ( 32 | obj.lastViewedAt.isoformat() 33 | if getattr(obj, "lastViewedAt", None) 34 | else None 35 | ), 36 | } 37 | logger.debug("Encoded Plex object: {} -> {}", obj.title, encoded) 38 | return encoded 39 | except AttributeError as e: 40 | logger.error("Failed to encode Plex object: {}", e) 41 | return None 42 | elif isinstance(obj, datetime): 43 | return obj.isoformat() 44 | elif isinstance(obj, PlexServer): 45 | logger.debug("Skipping PlexServer object serialization") 46 | return None 47 | elif isinstance(obj, Element): 48 | return str(obj) 49 | return super().default(obj) 50 | 51 | 52 | class Cache: 53 | def __init__(self, db_path, plugin_instance): 54 | self.db_path = db_path 55 | self.plugin = plugin_instance 56 | logger.debug("Initializing cache at: {}", db_path) 57 | self._initialize_db() 58 | self._initialize_spotify_cache() 59 | 60 | def _initialize_db(self): 61 | """Initialize the SQLite database.""" 62 | try: 63 | with sqlite3.connect(self.db_path) as conn: 64 | cursor = conn.cursor() 65 | 66 | # Create the tables if they don't exist 67 | cursor.execute( 68 | """ 69 | CREATE TABLE IF NOT EXISTS cache ( 70 | query TEXT PRIMARY KEY, 71 | plex_ratingkey INTEGER, 72 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP 73 | ) 74 | """ 75 | ) 76 | 77 | # Add indexes 78 | cursor.execute( 79 | """ 80 | CREATE INDEX IF NOT EXISTS idx_created_at ON cache(created_at) 81 | """ 82 | ) 83 | 84 | # Create playlist cache table 85 | cursor.execute( 86 | """ 87 | CREATE TABLE IF NOT EXISTS playlist_cache ( 88 | playlist_id TEXT, 89 | source TEXT, 90 | data TEXT, 91 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 92 | PRIMARY KEY (playlist_id, source) 93 | ) 94 | """ 95 | ) 96 | cursor.execute( 97 | """ 98 | CREATE INDEX IF NOT EXISTS idx_playlist_cache_created 99 | ON playlist_cache(created_at) 100 | """ 101 | ) 102 | 103 | conn.commit() 104 | logger.debug("Cache database initialized successfully") 105 | 106 | # Check if cleaned_query column exists 107 | cursor.execute("PRAGMA table_info(cache)") 108 | columns = [col[1] for col in cursor.fetchall()] 109 | if "cleaned_query" not in columns: 110 | cursor.execute("ALTER TABLE cache ADD COLUMN cleaned_query TEXT") 111 | conn.commit() 112 | logger.debug("Added cleaned_query column to cache table") 113 | 114 | # Cleanup old entries on startup 115 | self._cleanup_expired() 116 | 117 | except Exception as e: 118 | logger.error("Failed to initialize cache database: {}", e) 119 | raise 120 | 121 | def _initialize_spotify_cache(self): 122 | """Initialize Spotify-specific cache tables.""" 123 | try: 124 | with sqlite3.connect(self.db_path) as conn: 125 | cursor = conn.cursor() 126 | 127 | # Check if tables exist 128 | existing_tables = set() 129 | cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") 130 | for row in cursor.fetchall(): 131 | existing_tables.add(row[0]) 132 | 133 | # Create tables only if they don't exist 134 | for table_type in ["api", "web", "tracks"]: 135 | table_name = f"spotify_{table_type}_cache" 136 | if table_name not in existing_tables: 137 | cursor.execute( 138 | f""" 139 | CREATE TABLE {table_name} ( 140 | playlist_id TEXT PRIMARY KEY, 141 | data TEXT, 142 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP 143 | ) 144 | """ 145 | ) 146 | cursor.execute( 147 | f""" 148 | CREATE INDEX IF NOT EXISTS idx_{table_name}_created 149 | ON {table_name}(created_at) 150 | """ 151 | ) 152 | logger.debug("Created new {} table", table_name) 153 | 154 | conn.commit() 155 | logger.debug("Spotify cache tables verified") 156 | 157 | except Exception as e: 158 | logger.error("Failed to initialize Spotify cache tables: {}", e) 159 | raise 160 | 161 | def clear_expired_spotify_cache(self): 162 | """Clear expired Spotify cache entries with randomized expiration.""" 163 | try: 164 | import random 165 | 166 | with sqlite3.connect(self.db_path) as conn: 167 | cursor = conn.cursor() 168 | 169 | # Get all entries 170 | for table_type in ["api", "web", "tracks"]: 171 | table_name = f"spotify_{table_type}_cache" 172 | cursor.execute(f"SELECT playlist_id, created_at FROM {table_name}") 173 | rows = cursor.fetchall() 174 | 175 | for playlist_id, created_at in rows: 176 | if created_at: 177 | expiry_hours = random.uniform(60, 200) 178 | created_dt = datetime.fromisoformat(created_at) 179 | expiry = created_dt + timedelta(hours=expiry_hours) 180 | 181 | # Check if expired 182 | if datetime.now() > expiry: 183 | cursor.execute( 184 | f"DELETE FROM {table_name} WHERE playlist_id = ?", 185 | (playlist_id,), 186 | ) 187 | if cursor.rowcount: 188 | logger.debug( 189 | "Cleaned expired entry from {} (age: {:.1f}h)", 190 | table_name, 191 | expiry_hours, 192 | ) 193 | 194 | conn.commit() 195 | except Exception as e: 196 | logger.error("Failed to clear expired Spotify cache: {}", e) 197 | 198 | def clear_expired_playlist_cache(self, max_age_hours=72): 199 | """Clear expired playlist cache entries.""" 200 | try: 201 | with sqlite3.connect(self.db_path) as conn: 202 | cursor = conn.cursor() 203 | expiry = datetime.now() - timedelta(hours=max_age_hours) 204 | 205 | # Delete expired entries 206 | cursor.execute( 207 | "DELETE FROM playlist_cache WHERE created_at < ?", 208 | (expiry.isoformat(),), 209 | ) 210 | if cursor.rowcount: 211 | logger.debug( 212 | "Cleaned {} expired playlist cache entries", cursor.rowcount 213 | ) 214 | conn.commit() 215 | except Exception as e: 216 | logger.error("Failed to clear expired playlist cache: {}", e) 217 | 218 | def _cleanup_expired(self, days=7): 219 | """Remove negative cache entries older than specified days.""" 220 | try: 221 | with sqlite3.connect(self.db_path) as conn: 222 | cursor = conn.cursor() 223 | expiry = datetime.now() - timedelta(days=days) 224 | cursor.execute( 225 | "DELETE FROM cache WHERE plex_ratingkey = -1 AND created_at < ?", 226 | (expiry.isoformat(),), 227 | ) 228 | if cursor.rowcount: 229 | logger.debug( 230 | "Cleaned up {} expired negative cache entries", cursor.rowcount 231 | ) 232 | conn.commit() 233 | except Exception as e: 234 | logger.error("Failed to cleanup expired cache entries: {}", e) 235 | 236 | def _sanitize_query_for_log(self, query): 237 | """Sanitize query for logging.""" 238 | try: 239 | return str(query) 240 | except Exception: 241 | return "" 242 | 243 | def normalize_text(self, text): 244 | """Normalize text for consistent cache keys.""" 245 | if not text: 246 | return "" 247 | # Convert to lowercase 248 | text = text.lower() 249 | # Remove featuring artists 250 | text = re.sub( 251 | r"\s*[\(\[]?(?:feat\.?|ft\.?|featuring)\s+[^\]\)]+[\]\)]?\s*", "", text 252 | ) 253 | # Remove any parentheses or brackets and their contents 254 | text = re.sub(r"\s*[\(\[][^\]\)]*[\]\)]\s*", "", text) 255 | # Remove extra whitespace 256 | text = " ".join(text.split()) 257 | return text 258 | 259 | 260 | def _make_cache_key(self, query_data): 261 | """Create a consistent cache key regardless of input type.""" 262 | if isinstance(query_data, str): 263 | return query_data 264 | elif isinstance(query_data, dict): 265 | # Normalize and clean the key fields - keep the original 3-part format 266 | normalized_title = self.normalize_text(query_data.get("title", "")) 267 | normalized_artist = self.normalize_text(query_data.get("artist", "")) 268 | normalized_album = self.normalize_text(query_data.get("album", "")) 269 | 270 | # Create a pipe-separated key with title|artist|album 271 | key_str = f"{normalized_title}|{normalized_artist}|{normalized_album}" 272 | return key_str 273 | return str(query_data) 274 | 275 | def _verify_track_exists(self, plex_ratingkey, query): 276 | """Verify track exists in Plex.""" 277 | try: 278 | # First try direct lookup 279 | self.plugin.music.fetchItem(plex_ratingkey) 280 | return True 281 | except Exception: 282 | return False 283 | 284 | def get(self, query): 285 | """Retrieve cached result for a given query.""" 286 | try: 287 | with sqlite3.connect(self.db_path) as conn: 288 | cursor = conn.cursor() 289 | 290 | # Generate cache key 291 | cache_key = self._make_cache_key(query) 292 | 293 | # Try exact match first 294 | cursor.execute( 295 | 'SELECT plex_ratingkey, cleaned_query FROM cache WHERE query = ?', 296 | (cache_key,) 297 | ) 298 | row = cursor.fetchone() 299 | 300 | if row: 301 | plex_ratingkey, cleaned_metadata_json = row 302 | cleaned_metadata = json.loads(cleaned_metadata_json) if cleaned_metadata_json else None 303 | return (plex_ratingkey, cleaned_metadata) 304 | 305 | # If no exact match, try flexible matching for new pipe format only 306 | # This handles cases where album names might have slight variations 307 | if isinstance(query, dict): 308 | normalized_title = self.normalize_text(query.get("title", "")) 309 | normalized_artist = self.normalize_text(query.get("artist", "")) 310 | 311 | # Look for entries with same title and artist (new pipe format only) 312 | cursor.execute( 313 | '''SELECT plex_ratingkey, cleaned_query, query 314 | FROM cache 315 | WHERE query LIKE ? AND query LIKE '%|%' ''', 316 | (f'{normalized_title}|{normalized_artist}|%',) 317 | ) 318 | 319 | for row in cursor.fetchall(): 320 | plex_ratingkey, cleaned_metadata_json, cached_query = row 321 | cleaned_metadata = json.loads(cleaned_metadata_json) if cleaned_metadata_json else None 322 | logger.debug('Found flexible match: "{}" -> rating_key: {}', cached_query, plex_ratingkey) 323 | return (plex_ratingkey, cleaned_metadata) 324 | 325 | return None 326 | except Exception as e: 327 | logger.error('Cache lookup failed: {}', str(e)) 328 | return None 329 | 330 | def set(self, query, plex_ratingkey, cleaned_metadata=None): 331 | """Store result in cache.""" 332 | try: 333 | def datetime_handler(obj): 334 | if isinstance(obj, datetime): 335 | return obj.isoformat() 336 | raise TypeError(f'Object of type {type(obj)} is not JSON serializable') 337 | 338 | rating_key = -1 if plex_ratingkey is None else int(plex_ratingkey) 339 | cleaned_json = json.dumps(cleaned_metadata, default=datetime_handler) if cleaned_metadata else None 340 | 341 | # Generate cache key using the same method as get() 342 | cache_key = self._make_cache_key(query) 343 | 344 | with sqlite3.connect(self.db_path) as conn: 345 | cursor = conn.cursor() 346 | cursor.execute( 347 | 'REPLACE INTO cache (query, plex_ratingkey, cleaned_query) VALUES (?, ?, ?)', 348 | (cache_key, rating_key, cleaned_json) 349 | ) 350 | conn.commit() 351 | logger.debug('Cached result: "{}" -> rating_key: {}', cache_key, rating_key) 352 | 353 | except Exception as e: 354 | logger.error('Cache storage failed for query "{}": {}', 355 | self._sanitize_query_for_log(query), str(e)) 356 | return None 357 | 358 | def get_playlist_cache(self, playlist_id, source): 359 | """Get cached playlist data for any source.""" 360 | try: 361 | # Clear expired entries first 362 | self.clear_expired_playlist_cache() 363 | 364 | with sqlite3.connect(self.db_path) as conn: 365 | cursor = conn.cursor() 366 | cursor.execute( 367 | "SELECT data FROM playlist_cache WHERE playlist_id = ? AND source = ?", 368 | (playlist_id, source), 369 | ) 370 | row = cursor.fetchone() 371 | 372 | if row: 373 | logger.debug("Cache hit for {} playlist: {}", source, playlist_id) 374 | return json.loads(row[0]) 375 | 376 | logger.debug("Cache miss for {} playlist: {}", source, playlist_id) 377 | return None 378 | 379 | except Exception as e: 380 | logger.error("{} playlist cache lookup failed: {}", source, e) 381 | return None 382 | 383 | def set_playlist_cache(self, playlist_id, source, data): 384 | """Store playlist data in cache for any source.""" 385 | try: 386 | with sqlite3.connect(self.db_path) as conn: 387 | cursor = conn.cursor() 388 | 389 | # Convert datetime objects to ISO format strings 390 | def datetime_handler(obj): 391 | if isinstance(obj, datetime): 392 | return obj.isoformat() 393 | return str(obj) 394 | 395 | # Store data as JSON string 396 | json_data = json.dumps(data, default=datetime_handler) 397 | 398 | cursor.execute( 399 | "REPLACE INTO playlist_cache (playlist_id, source, data) VALUES (?, ?, ?)", 400 | (playlist_id, source, json_data), 401 | ) 402 | conn.commit() 403 | logger.debug("Cached {} playlist data for: {}", source, playlist_id) 404 | 405 | except Exception as e: 406 | logger.error("{} playlist cache storage failed: {}", source, e) 407 | 408 | # Legacy methods for backward compatibility 409 | def get_spotify_cache(self, playlist_id, cache_type="api"): 410 | """Legacy method - redirects to generic get_playlist_cache.""" 411 | return self.get_playlist_cache(playlist_id, f"spotify_{cache_type}") 412 | 413 | def set_spotify_cache(self, playlist_id, data, cache_type="api"): 414 | """Legacy method - redirects to generic set_playlist_cache.""" 415 | return self.set_playlist_cache(playlist_id, f"spotify_{cache_type}", data) 416 | 417 | def clear(self): 418 | """Clear all cached entries.""" 419 | try: 420 | with sqlite3.connect(self.db_path) as conn: 421 | cursor = conn.cursor() 422 | cursor.execute("SELECT COUNT(*) FROM cache") 423 | count_before = cursor.fetchone()[0] 424 | 425 | cursor.execute("DELETE FROM cache") 426 | conn.commit() 427 | 428 | logger.info("Cleared {} entries from cache", count_before) 429 | except Exception as e: 430 | logger.error("Failed to clear cache: {}", e) 431 | 432 | def clear_negative_cache_entries(self, pattern=None): 433 | """Clear negative cache entries, optionally matching a pattern.""" 434 | try: 435 | with sqlite3.connect(self.db_path) as conn: 436 | cursor = conn.cursor() 437 | 438 | if pattern: 439 | # Clear specific pattern 440 | cursor.execute( 441 | "DELETE FROM cache WHERE plex_ratingkey = -1 AND query LIKE ?", 442 | (f"%{pattern}%",) 443 | ) 444 | logger.debug("Cleared {} negative cache entries matching pattern: {}", 445 | cursor.rowcount, pattern) 446 | else: 447 | # Clear all negative entries 448 | cursor.execute("DELETE FROM cache WHERE plex_ratingkey = -1") 449 | logger.debug("Cleared {} negative cache entries", cursor.rowcount) 450 | 451 | conn.commit() 452 | return cursor.rowcount 453 | except Exception as e: 454 | logger.error("Failed to clear negative cache entries: {}", e) 455 | return 0 456 | 457 | def clear_old_format_entries(self): 458 | """Clear all old format cache entries (JSON and list formats).""" 459 | try: 460 | with sqlite3.connect(self.db_path) as conn: 461 | cursor = conn.cursor() 462 | 463 | # Delete entries that don't use the new pipe format 464 | cursor.execute( 465 | "DELETE FROM cache WHERE query NOT LIKE '%|%' OR query LIKE '{%' OR query LIKE '[%'" 466 | ) 467 | cleared_count = cursor.rowcount 468 | 469 | if cleared_count > 0: 470 | logger.info("Cleared {} old format cache entries", cleared_count) 471 | 472 | conn.commit() 473 | return cleared_count 474 | except Exception as e: 475 | logger.error("Failed to clear old format cache entries: {}", e) 476 | return 0 477 | 478 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # beets-plexsync 2 | A plugin for [beets][beets] to sync with your Plex server. 3 | 4 | ## Key Features 5 | 6 | ### AI-Generated Playlists 7 | - **AI-Generated Playlists**: Use `beet plexsonic -p "YOUR_PROMPT"` to create a playlist based on YOUR_PROMPT. Modify the playlist name using `-m` flag, change the number of tracks requested with `-n` flag, and clear the playlist before adding new songs with `-c` flag. 8 | 9 | ### Smart Playlists 10 | Use `beet plex_smartplaylists [-o ONLY]` to generate or manage custom playlists in Plex. The plugin currently supports various types of playlists: 11 | 12 | You can use the `-o` or `--only` option to specify a comma-separated list of playlist IDs to update. This is useful for updating only certain playlists (e.g., just the AI playlists) on a schedule: 13 | 14 | ```sh 15 | beet plex_smartplaylists -o daily_discovery,forgotten_gems 16 | ``` 17 | 18 | The command will only generate the specified playlists, skipping others in your configuration. 19 | 20 | 1. **Daily Discovery**: 21 | - Uses tracks you've played in the last 15 days as a base to learn about listening habits (configurable via `history_days`) 22 | - Excludes tracks played in the last 30 days (configurable via `exclusion_days`) 23 | - Uses an intelligent scoring system that considers: 24 | - Track popularity relative to your library 25 | - Rating for rated tracks 26 | - Recency of addition to library 27 | - Release year (favors newer releases) 28 | - Introduces controlled randomization to ensure variety 29 | - Matches genres with your recent listening history using both sonic analysis and library-wide genre preferences 30 | - Uses Plex's [Sonic Analysis](https://support.plex.tv/articles/sonic-analysis-music/) to find sonically similar tracks 31 | - Also discovers tracks from your entire library that match your preferred genres 32 | - Limits the playlist size (configurable via `max_tracks`, default 20) 33 | - Controls discovery vs. familiar ratio (configurable via `discovery_ratio`, default 30% - more familiar tracks) 34 | 35 | 2. **70s/80s Flashback**: 36 | - Creates a nostalgic playlist featuring tracks from 1970-1989 37 | - Prioritizes well-rated tracks from the 70s and 80s that may not have been played recently 38 | - Uses a specialized scoring algorithm that emphasizes nostalgic value and age 39 | - Balances between familiar favorites and forgotten gems from the era 40 | - Limits the playlist size (configurable via `max_tracks`, default 20) 41 | - Controls discovery vs. nostalgia ratio (configurable via `discovery_ratio`, default 30%) 42 | 43 | 3. **Highly Rated Tracks**: 44 | - Curates tracks with high user ratings (7.0 and above) 45 | - Focuses on quality by prioritizing highly-rated content 46 | - Includes tracks that have stood the test of time according to your ratings 47 | - Adds slight recency factor to maintain variety in the playlist 48 | - Limits the playlist size (configurable via `max_tracks`, default 20) 49 | 50 | 4. **Most Played Tracks**: 51 | - Features your most frequently played tracks 52 | - Ranks tracks based on cumulative play counts (plex_viewcount) 53 | - Uses weighted selection to add variety while prioritizing popular tracks 54 | - Sorts all tracks by play count in descending order 55 | - Limits the playlist size (configurable via `max_tracks`, default 20) 56 | 57 | 5. **Forgotten Gems**: 58 | - Creates a playlist of tracks that deserve more attention 59 | - Uses your highly-rated tracks to establish a quality baseline 60 | - Prioritizes unrated tracks with popularity comparable to your favorites 61 | - Only includes tracks matching your genre preferences 62 | - Automatically adjusts selection criteria based on your library's characteristics 63 | - Limits the playlist size (configurable via `max_tracks`, default 20) 64 | - Controls maximum play count (configurable via `max_plays`, default 2) 65 | - Minimum rating for rated tracks to be included (configurable via `min_rating`, default 4) 66 | - Percentage of playlist to fill with unrated but popular tracks (configurable via `discovery_ratio`, default 30%) 67 | - Excludes tracks played recently (configurable via `exclusion_days`) 68 | 69 | 6. **Recent Hits**: 70 | - Curates a playlist of recent, high-energy tracks 71 | - Applies a default release-year guard covering roughly the last 3 years whenever no year filter is provided; override with `filters.include.years` or the playlist-level `max_age_years`/`min_year` options 72 | - Updated scoring leans harder on release recency and last-play data, with popularity and ratings acting as the tie-breakers 73 | - Uses weighted randomness for track selection while respecting your genre preferences 74 | - Automatically adjusts selection criteria and limits size (configurable via `max_tracks`, default 20) 75 | - Requires a minimum rating (`min_rating`, default 4) and lets you control the discovery ratio (default 20%) 76 | - Set `exclusion_days` if you want to keep very recent listens out (default 30 days) 77 | 78 | 7. **Fresh Favorites**: 79 | - Creates a playlist of high-quality tracks that deserve more plays 80 | - Enforces a default release window spanning roughly the last 7 years unless you supply custom year filters or specify `max_age_years`/`min_year` 81 | - Updated scoring strongly favors release recency and recent spins while still rewarding strong ratings and popularity 82 | - Skips tracks without a trusted release year when the recency guard is active to keep the mix on-theme 83 | - Defaults: `max_tracks: 100`, `discovery_ratio: 25`, `min_rating: 6`, `exclusion_days: 21` 84 | 85 | 8. **Imported Playlists**: 86 | - Import playlists from external services (Spotify, Apple Music, YouTube, etc.) and local M3U8 files 87 | - Configure multiple source URLs and file paths per playlist 88 | - For M3U8 files, use paths relative to beets config directory or absolute paths 89 | - Support for custom HTTP POST requests to fetch playlists 90 | - Control playlist behavior with options: 91 | - `manual_search`: Enable/disable manual matching for unmatched tracks 92 | - `clear_playlist`: Clear existing playlist before adding new tracks 93 | - `max_tracks`: Limit the number of tracks in the playlist 94 | 95 | You can use config filters to finetune any playlist. You can specify the `genre`, `year`, and `UserRating` to be included and excluded from any of the playlists. See the extended example below. 96 | 97 | ### Library Sync 98 | - **Plex Library Sync**: `beet plexsync [-f]` imports all the data from your Plex library inside beets. Use the `-f` flag to force update the entire library with fresh information from Plex. 99 | - **Recent Sync**: `beet plexsyncrecent [--days N]` updates the information for tracks listened in the last N days (default: 7). For example, `beet plexsyncrecent [--days 14]` will update tracks played in the last 14 days. 100 | 101 | ### Playlist Manipulation 102 | - **Playlist Manipulation**: `beet plexplaylistadd [-m PLAYLIST] [QUERY]` and `beet plexplaylistremove [-m PLAYLIST] [QUERY]` add or remove tracks from Plex playlists. Use the `-m` flag to provide the playlist name. You can use any [beets query][queries_] as an optional filter. 103 | - **Playlist Clear**: `beet plexplaylistclear [-m PLAYLIST]` clears a Plex playlist. Use the `-m` flag to specify the playlist name. 104 | 105 | ### Playlist Import 106 | - **Playlist Import**: `beet plexplaylistimport [-m PLAYLIST] [-u URL] [-l]` imports individual playlists from Spotify, Apple Music, Gaana.com, JioSaavn, Youtube, Tidal, M3U8 files, custom APIs, and ListenBrainz. Use the `-m` flag to specify the playlist name and: 107 | - For online services: use the `-u` flag to supply the full playlist url 108 | - For M3U8 files: use the `-u` flag with the file path (relative to beets config directory or absolute path) 109 | - For custom APIs: configure POST requests in config.yaml (see Configuration section) 110 | - For ListenBrainz: use the `-l` or `--listenbrainz` flag to import "Weekly Jams" and "Weekly Exploration" playlists 111 | 112 | You can define multiple sources per playlist in your config including custom POST endpoints: 113 | ```yaml 114 | - name: "Mixed Sources Playlist" 115 | type: "imported" 116 | sources: 117 | - "https://open.spotify.com/playlist/37i9dQZF1DX0kbJZpiYdZl" # Spotify 118 | - "playlists/local.m3u8" # Local M3U8 119 | - type: "post" # Custom API 120 | server_url: "http://localhost:8000/api/playlist" 121 | headers: 122 | Authorization: "Bearer your-token" 123 | payload: 124 | playlist_url: "https://example.com/playlist/123" 125 | ``` 126 | 127 | For each import session, a detailed log file is created in your beets config directory (named `_import.log`) that records: 128 | - Tracks that couldn't be found in your Plex library 129 | - Low-rated tracks that were skipped 130 | - Import statistics and summary 131 | - The log file helps you identify which tracks need manual attention 132 | - **Youtube Search Import**: `beet plexsearchimport [-m PLAYLIST] [-s SEARCH] [-l LIMIT]` imports playlists based on Youtube search. Use the `-m` flag to specify the playlist name, the `-s` flag for the search query, and the `-l` flag to limit the number of search results. 133 | 134 | ### Additional Tools 135 | - **Plex to Spotify**: `beet plex2spotify [-m PLAYLIST] [QUERY]` copies a Plex playlist to Spotify. Use the `-m` flag to specify the playlist name. 136 | 137 | You can use [beets queries][queries_] with this command to filter which tracks are sent to Spotify. For example, to add only tracks with a `plex_userrating` greater than 2 to the "Sufiyana" playlist, use: 138 | 139 | ```sh 140 | beet plex2spotify -m "Sufiyana" plex_userrating:2.. 141 | ``` 142 | 143 | Additional filtering examples: 144 | - Only transfer highly-rated tracks: `beet plex2spotify -m "My Playlist" plex_userrating:8..` 145 | - Transfer tracks by specific artist: `beet plex2spotify -m "Rock Hits" artist:"The Beatles"` 146 | - Transfer tracks from a specific year range: `beet plex2spotify -m "2000s Hits" year:2000..2009` 147 | - Combine multiple filters: `beet plex2spotify -m "Recent Favorites" plex_userrating:7.. year:2020..` 148 | - **Playlist to Collection**: `beet plexplaylist2collection [-m PLAYLIST]` converts a Plex playlist to a collection. Use the `-m` flag to specify the playlist name. 149 | - **Album Collage**: `beet plexcollage [-i INTERVAL] [-g GRID]` creates a collage of most played albums. Use the `-i` flag to specify the number of days and `-g` flag to specify the grid size. 150 | 151 | ### Manual Import for Failed Tracks 152 | The plugin creates detailed import logs for each playlist import session. You can manually process failed imports using: 153 | 154 | - `beet plex_smartplaylists [--import-failed] [--log-file LOGFILE]`: Process all import logs and attempt manual matching for failed tracks, or process a specific log file. 155 | 156 | This is especially useful when: 157 | - You've added new music to your library and want to retry matching previously failed tracks 158 | - You want to manually match specific tracks from a particular playlist's import log 159 | - You need to clean up import logs by removing successfully matched tracks 160 | 161 | ## Introduction 162 | 163 | This plugin allows you to sync your Plex library with beets, create playlists based on AI-generated prompts, import playlists from other online services, and more. 164 | 165 | ## Installation 166 | 167 | Install the plugin using `pip`: 168 | 169 | ```shell 170 | pip install git+https://github.com/arsaboo/beets-plexsync.git 171 | ``` 172 | 173 | Then, [configure](#configuration) the plugin in your [`config.yaml`][config] file. 174 | 175 | To upgrade, use the command: 176 | ```shell 177 | pip install --upgrade --force-reinstall --no-deps git+https://github.com/arsaboo/beets-plexsync.git 178 | ``` 179 | 180 | ## Configuration 181 | 182 | Add `plexsync` to your list of enabled plugins. 183 | 184 | ```yaml 185 | plugins: plexsync 186 | 187 | # If you want to use the ListenBrainz import feature, you'll need to configure 188 | # the ListenBrainz plugin. See https://github.com/arsaboo/beets-listenbrainz for setup. 189 | listenbrainz: 190 | user_token: YOUR_USER_TOKEN 191 | username: YOUR_USERNAME 192 | ``` 193 | 194 | Next, you can configure your Plex server and library like following (see instructions to obtain Plex token [here][plex_token]). 195 | 196 | ```yaml 197 | plex: 198 | host: '192.168.2.212' 199 | port: 32400 200 | token: PLEX_TOKEN 201 | library_name: 'Music' 202 | ``` 203 | 204 | If you want to import `spotify` playlists, you will also need to configure the `spotify` plugin. If you are already using the [Spotify][Spotify] plugin, `plexsync` will reuse the same configuration. 205 | ```yaml 206 | spotify: 207 | client_id: CLIENT_ID 208 | client_secret: CLIENT_SECRET 209 | ``` 210 | 211 | * The `beet plexsonic` command allows you to create AI-based playlists using an OpenAI-compatible language model. To use this feature, you will need to configure the AI model with an API key. Once you have obtained an API key, you can configure `beets` to use it by adding the following to your `config.yaml` file: 212 | 213 | ```yaml 214 | llm: 215 | api_key: API_KEY 216 | model: "gpt-3.5-turbo" 217 | base_url: "https://api.openai.com/v1" # Optional, for other providers 218 | search: 219 | # provider is auto-detected: OpenAI if llm.api_key is set, otherwise Ollama 220 | # Explicitly set to "ollama" if you want to use Ollama instead 221 | brave_api_key: "your-brave-api-key" # Optional Brave Search API key 222 | searxng_host: "http://your-searxng-instance.com" # Optional SearxNG instance 223 | exa_api_key: "your-exa-api-key" # Optional Exa search API key 224 | tavily_api_key: "your-tavily-api-key" # Optional Tavily API key 225 | # Advanced: Override settings from main llm config 226 | # api_key: "" # Uses llm.api_key if empty 227 | # base_url: "" # Uses llm.base_url if empty 228 | # model: "" # Uses llm.model if empty (for OpenAI) or "qwen3:latest" (for Ollama) 229 | # ollama_host: "http://localhost:11434" # Only used when provider is "ollama" 230 | ``` 231 | 232 | **Using OpenAI or OpenAI-compatible APIs for search:** 233 | 234 | The plugin automatically uses OpenAI-compatible models (via OpenAILike) for LLM search if you have `llm.api_key` configured. No additional configuration needed! 235 | 236 | **Simple configuration** (auto-detects OpenAI): 237 | ```yaml 238 | llm: 239 | api_key: YOUR_OPENAI_API_KEY 240 | model: "gpt-4.1-mini" # Or your preferred model 241 | base_url: "https://api.openai.com/v1" # Or your preferred endpoint 242 | search: 243 | brave_api_key: "your-brave-api-key" # At least one search provider is required 244 | ``` 245 | 246 | **Using Ollama instead** (explicit override): 247 | ```yaml 248 | llm: 249 | search: 250 | provider: "ollama" # Explicitly use Ollama 251 | model: "qwen3:latest" 252 | ollama_host: "http://localhost:11434" 253 | brave_api_key: "your-brave-api-key" 254 | ``` 255 | 256 | **Advanced: Override search-specific settings**: 257 | ```yaml 258 | llm: 259 | api_key: YOUR_MAIN_API_KEY 260 | model: "gpt-4" 261 | search: 262 | api_key: YOUR_SEARCH_SPECIFIC_KEY # Use different key for search 263 | model: "gpt-3.5-turbo" # Use cheaper model for search 264 | brave_api_key: "your-brave-api-key" 265 | ``` 266 | 267 | Note: To enable LLM search, you must also set `use_llm_search: yes` in your `plexsync` configuration (see Advanced Usage section). 268 | 269 | **Structured Output with instructor:** 270 | 271 | The plugin uses the [instructor](https://github.com/jxnl/instructor) library for reliable structured output from LLMs (>99% reliability). This works with both Ollama (via `/v1` endpoint) and OpenAI-compatible APIs. The `instructor` library ensures that LLM responses match the expected Pydantic models, with built-in retry logic. If `instructor` is not available, the plugin gracefully falls back to the Agno framework. 272 | 273 | When multiple search providers are configured, they're used in the following priority order: 274 | 1. SearxNG (tried first if configured) 275 | 2. Exa (used if SearxNG fails or isn't configured) 276 | 3. Brave Search (used if both SearxNG and Exa fail or aren't configured) 277 | 4. Tavily (used if all above fail or aren't configured) 278 | 279 | You can get started with `beet plexsonic -p "YOUR_PROMPT"` to create the playlist based on YOUR_PROMPT. The default playlist name is `SonicSage` (wink wink), you can modify it using `-m` flag. By default, it requests 10 tracks from the AI model. Use the `-n` flag to change the number of tracks requested. Finally, if you prefer to clear the playlist before adding the new songs, you can add `-c` flag. So, to create a new classical music playlist, you can use something like `beet plexsonic -c -n 10 -p "classical music, romanticism era, like Schubert, Chopin, Liszt"`. 280 | 281 | Please note that not all tracks returned by the AI model may be available in your library or matched perfectly, affecting the size of the playlist created. The command will log the tracks that could not be found on your library. You can improve the matching by enabling `manual_search` (see Advanced Usage). This is working extremely well for me. I would love to hear your comments/feedback to improve this feature. 282 | 283 | * To configure imported playlists, you can use various source types including custom POST requests: 284 | 285 | ```yaml 286 | plexsync: 287 | playlists: 288 | items: 289 | - name: "Custom Playlist" 290 | type: "imported" 291 | sources: 292 | # Standard URL sources 293 | - "https://open.spotify.com/playlist/37i9dQZF1DX0kbJZpiYdZl" 294 | - "playlists/local_hits.m3u8" 295 | # POST request source 296 | - type: "post" 297 | server_url: "http://localhost:8000/api/playlist" 298 | headers: 299 | Authorization: "Bearer your-token" 300 | Content-Type: "application/json" 301 | payload: 302 | playlist_url: "https://example.com/playlist/123" 303 | ``` 304 | 305 | The POST request expects a JSON response with this format: 306 | ```json 307 | { 308 | "song_list": [ 309 | { 310 | "title": "Song Title", 311 | "artist": "Artist Name", 312 | "album": "Album Name", # Optional 313 | "year": "2024" # Optional 314 | } 315 | ] 316 | } 317 | ``` 318 | 319 | ## Advanced 320 | Plex matching may be less than perfect and it can miss tracks if the tags don't match perfectly. There are few tools you can use to improve searching: 321 | * You can enable manual search to improve the matching by enabling `manual_search` in your config (default: `False`). 322 | * You can enable LLM-powered search using Ollama with optional integration for SearxNG, Exa, or Tavily (used in that order if all of them are configured). This provides intelligent search capabilities that can better match tracks with incomplete or variant metadata. See the `llm` configuration section above. 323 | 324 | ```yaml 325 | plexsync: 326 | manual_search: yes 327 | use_llm_search: yes # Enable LLM searching; see llm config 328 | playlists: 329 | defaults: 330 | max_tracks: 20 331 | items: 332 | - id: daily_discovery 333 | name: "Daily Discovery" 334 | max_tracks: 20 # Maximum number of tracks for Daily Discovery playlist 335 | exclusion_days: 30 # Number of days to exclude recently played tracks. Tracks played in the last 30 days will not be included in the playlist. 336 | history_days: 15 # Number of days to use to learn listening habits 337 | discovery_ratio: 70 # Percentage of unrated tracks (0-100) 338 | # Higher values = more discovery 339 | # Example: 30 = 30% unrated + 70% rated tracks 340 | # 70 = 70% unrated + 30% rated tracks 341 | 342 | - id: forgotten_gems 343 | name: "Forgotten Gems" 344 | max_tracks: 50 # Maximum number of tracks for playlist 345 | max_plays: 2 # Maximum number of plays for tracks to be included 346 | min_rating: 4 # Minimum rating for rated tracks 347 | discovery_ratio: 30 # Percentage of unrated tracks (0-100); Higher values = more discovery 348 | exclusion_days: 30 # Number of days to exclude recently played tracks 349 | filters: 350 | include: 351 | genres: 352 | - Filmi 353 | - Indi Pop 354 | - Punjabi 355 | - Sufi 356 | - Ghazals 357 | years: 358 | after: 1970 359 | exclude: 360 | genres: 361 | - Religious 362 | - Bollywood Unwind 363 | - Bollywood Instrumental 364 | years: 365 | before: 1960 366 | min_rating: 5 367 | 368 | - id: recent_hits 369 | name: "Recent Hits" 370 | max_tracks: 20 371 | discovery_ratio: 20 372 | exclusion_days: 0 # Number of days to exclude recently played tracks (default: 0 = include all) 373 | filters: 374 | include: 375 | genres: 376 | - Pop 377 | - Rock 378 | years: 379 | after: 2022 380 | min_rating: 4 381 | 382 | - id: bollywood_hits 383 | name: "Bollywood Hits" 384 | type: imported 385 | sources: # full playlist urls or M3U8 file paths 386 | - https://music.youtube.com/playlist?list=RDCLAK5uy_kjNBBWqyQ_Cy14B0P4xrcKgd39CRjXXKk 387 | - "playlists/local_hits.m3u8" # Relative to beets config dir 388 | - "/absolute/path/to/playlist.m3u8" 389 | max_tracks: 100 # Optional limit 390 | manual_search: no 391 | clear_playlist: no 392 | ``` 393 | 394 | [collage]: collage.png 395 | [queries_]: https://beets.readthedocs.io/en/latest/reference/query.html?highlight=queries 396 | [plaxapi]: https://python-plexapi.readthedocs.io/en/latest/modules/audio.html 397 | [plex_token]: https://support.plex.tv/articles/204059436-finding-an-authentication-token-x-plex-token/ 398 | [config]: https://beets.readthedocs.io/en/latest/plugins/index.html 399 | [beets]: https://github.com/beetbox/beets 400 | [Spotify]: https://beets.readthedocs.io/en/stable/plugins/spotify.html 401 | [listenbrainz_plugin_]: https://github.com/arsaboo/beets-listenbrainz 402 | -------------------------------------------------------------------------------- /tests/test_plex_search.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import json 3 | import sys 4 | import types 5 | import unittest 6 | 7 | from tests.test_playlist_import import DummyLogger, ensure_stubs 8 | 9 | 10 | class CacheStub: 11 | def __init__(self): 12 | self.storage = {} 13 | 14 | def _make_cache_key(self, query): 15 | if isinstance(query, dict): 16 | return str(sorted(query.items())) 17 | return str(query) 18 | 19 | def get(self, query): 20 | return self.storage.get(self._make_cache_key(query)) 21 | 22 | def set(self, query, value, cleaned_metadata=None): 23 | key = self._make_cache_key(query) 24 | rating_key = -1 if value is None else value 25 | self.storage[key] = (rating_key, cleaned_metadata) 26 | 27 | 28 | class PlexSearchTests(unittest.TestCase): 29 | def setUp(self): 30 | if sys.version_info < (3, 9): 31 | self.skipTest('Plex search tests require Python 3.9+') 32 | class SimpleBaseModel: 33 | def __init__(self, **data): 34 | for key, value in data.items(): 35 | setattr(self, key, value) 36 | def model_dump(self): 37 | return self.__dict__.copy() 38 | @classmethod 39 | def model_validate_json(cls, data): 40 | return cls(**json.loads(data)) 41 | def Field(default=None, **kwargs): 42 | return default 43 | def field_validator(*args, **kwargs): 44 | def decorator(func): 45 | return func 46 | return decorator 47 | sys.modules['pydantic'] = types.SimpleNamespace( 48 | BaseModel=SimpleBaseModel, 49 | Field=Field, 50 | field_validator=field_validator, 51 | ) 52 | ensure_stubs({'plexsync': {}, 'llm': {'search': {}}}) 53 | if 'beetsplug.plex.search' in sys.modules: 54 | importlib.reload(sys.modules['beetsplug.plex.search']) 55 | else: 56 | importlib.import_module('beetsplug.plex.search') 57 | self.search = importlib.import_module('beetsplug.plex.search') 58 | 59 | def test_returns_cached_track(self): 60 | track = types.SimpleNamespace(ratingKey=42, title='Cached') 61 | 62 | class Music: 63 | def fetchItem(self, key): 64 | return track 65 | def searchTracks(self, **kwargs): 66 | return [] 67 | 68 | plugin = types.SimpleNamespace() 69 | plugin._log = DummyLogger() 70 | cache = CacheStub() 71 | cache.storage[cache._make_cache_key({'title': 'Song', 'artist': 'Artist'})] = (track.ratingKey, None) 72 | plugin.cache = cache 73 | plugin.music = Music() 74 | plugin.search_llm = None 75 | plugin.manual_track_search = lambda song: None 76 | plugin._cache_result = lambda *args, **kwargs: None 77 | 78 | result = self.search.search_plex_song(plugin, {'title': 'Song', 'artist': 'Artist'}, manual_search=False) 79 | self.assertIs(result, track) 80 | 81 | def test_single_track_search_caches_result(self): 82 | track = types.SimpleNamespace(ratingKey=7, title='Match', parentTitle='Album') 83 | 84 | class Music: 85 | def searchTracks(self, **kwargs): 86 | return [track] 87 | def fetchItem(self, key): 88 | raise AssertionError('fetchItem should not be called') 89 | 90 | recorded = [] 91 | 92 | plugin = types.SimpleNamespace() 93 | plugin._log = DummyLogger() 94 | plugin.cache = CacheStub() 95 | plugin.music = Music() 96 | plugin.search_llm = None 97 | plugin.manual_track_search = lambda song: None 98 | plugin._cache_result = lambda key, result: recorded.append((key, result)) 99 | 100 | song = {'title': 'Song', 'album': 'Album', 'artist': 'Artist'} 101 | result = self.search.search_plex_song(plugin, song, manual_search=False) 102 | self.assertIs(result, track) 103 | self.assertTrue(recorded) 104 | 105 | def test_local_candidate_direct_match_short_circuits_search(self): 106 | track = types.SimpleNamespace(ratingKey=303, title='Vector Match', parentTitle='Album') 107 | 108 | class Music: 109 | def __init__(self): 110 | self.fetch_calls = [] 111 | self.search_calls = [] 112 | 113 | def fetchItem(self, key): 114 | self.fetch_calls.append(key) 115 | return track 116 | 117 | def searchTracks(self, **kwargs): 118 | self.search_calls.append(kwargs) 119 | return [] 120 | 121 | class Candidate: 122 | def __init__(self, metadata, score): 123 | self.metadata = metadata 124 | self.score = score 125 | 126 | def song_dict(self): 127 | return { 128 | 'title': self.metadata.get('title', ''), 129 | 'album': self.metadata.get('album', ''), 130 | 'artist': self.metadata.get('artist', ''), 131 | } 132 | 133 | def overlap_tokens(self, counts): 134 | return [] 135 | 136 | music = Music() 137 | plugin = types.SimpleNamespace() 138 | plugin._log = DummyLogger() 139 | plugin.cache = CacheStub() 140 | plugin.music = music 141 | plugin.search_llm = None 142 | plugin.manual_track_search = lambda song: None 143 | plugin._cache_result = lambda *args, **kwargs: None 144 | plugin._match_score_for_query = lambda song, track: 0.95 145 | 146 | candidate = Candidate( 147 | {'title': 'Vector Match', 'album': 'Album', 'artist': 'Artist', 'plex_ratingkey': 303}, 148 | 0.92, 149 | ) 150 | plugin.get_local_beets_candidates = lambda song: [candidate] 151 | 152 | def stub_direct_match(cand, query): 153 | rating_key = cand.metadata.get('plex_ratingkey') 154 | if rating_key is None: 155 | return None 156 | return music.fetchItem(rating_key) 157 | 158 | plugin._try_candidate_direct_match = lambda cand, query, cache_key=None: stub_direct_match(cand, query) 159 | plugin._prepare_candidate_variants = lambda candidates, song: [] 160 | 161 | song = {'title': 'Original', 'album': 'Album', 'artist': 'Artist'} 162 | result = self.search.search_plex_song(plugin, song, manual_search=False) 163 | 164 | self.assertIs(result, track) 165 | self.assertEqual(music.fetch_calls, [303]) 166 | self.assertEqual(music.search_calls, []) 167 | 168 | def test_local_candidate_variant_fallback(self): 169 | variant_track = types.SimpleNamespace( 170 | ratingKey=808, 171 | title='Variant Song', 172 | parentTitle='Variant Album', 173 | artist=lambda: types.SimpleNamespace(title='Variant Artist'), 174 | ) 175 | 176 | class Music: 177 | def __init__(self): 178 | self.search_calls = [] 179 | 180 | def searchTracks(self, **kwargs): 181 | self.search_calls.append(kwargs) 182 | target = {'album.title': 'Variant Album', 'track.title': 'Variant Song'} 183 | filtered = {k: v for k, v in kwargs.items() if k != 'limit'} 184 | if filtered == target: 185 | return [variant_track] 186 | return [] 187 | 188 | def fetchItem(self, key): 189 | raise AssertionError('fetchItem should not be called without a rating key') 190 | 191 | class Candidate: 192 | def __init__(self, metadata, score): 193 | self.metadata = metadata 194 | self.score = score 195 | 196 | def song_dict(self): 197 | return { 198 | 'title': self.metadata.get('title', ''), 199 | 'album': self.metadata.get('album', ''), 200 | 'artist': self.metadata.get('artist', ''), 201 | } 202 | 203 | def overlap_tokens(self, counts): 204 | return [] 205 | 206 | cache = CacheStub() 207 | recorded_cache = [] 208 | music = Music() 209 | 210 | plugin = types.SimpleNamespace() 211 | plugin._log = DummyLogger() 212 | plugin.cache = cache 213 | plugin.music = music 214 | plugin.search_llm = None 215 | plugin.manual_track_search = lambda song: None 216 | def cache_result(key, result, cleaned=None): 217 | recorded_cache.append((key, result)) 218 | cache.set(key, result, cleaned) 219 | plugin._cache_result = cache_result 220 | plugin.find_closest_match = lambda song, tracks: [(variant_track, 0.95)] 221 | plugin._match_score_for_query = lambda song, track: 0.92 222 | 223 | candidate = Candidate( 224 | {'title': 'Variant Song', 'album': 'Variant Album', 'artist': 'Variant Artist'}, 225 | 0.88, 226 | ) 227 | plugin.get_local_beets_candidates = lambda song: [candidate] 228 | plugin._try_candidate_direct_match = lambda cand, query, cache_key=None: None 229 | 230 | def prepare_variants(candidates, original_song): 231 | return [(candidates[0].song_dict(), candidates[0].score)] 232 | 233 | plugin._prepare_candidate_variants = prepare_variants 234 | 235 | song = {'title': 'Original Song', 'album': 'Original Album', 'artist': 'Original Artist'} 236 | result = self.search.search_plex_song(plugin, song, manual_search=False) 237 | 238 | self.assertIs(result, variant_track) 239 | # Ensure the variant metadata search was attempted. 240 | self.assertGreaterEqual(len(music.search_calls), 1) 241 | self.assertTrue( 242 | any( 243 | {k: v for k, v in call.items() if k != 'limit'} 244 | == {'album.title': 'Variant Album', 'track.title': 'Variant Song'} 245 | for call in music.search_calls 246 | ) 247 | ) 248 | # Ensure search results are cached for the original query. 249 | cache_keys = list(cache.storage.keys()) 250 | self.assertTrue(any('Original Song' in key for key in cache_keys)) 251 | 252 | def test_single_track_low_similarity_rejected(self): 253 | track = types.SimpleNamespace( 254 | ratingKey=909, 255 | title='Mismatch Song', 256 | parentTitle='Mismatch Album', 257 | artist=lambda: types.SimpleNamespace(title='Mismatch Artist'), 258 | ) 259 | 260 | class Music: 261 | def __init__(self): 262 | self.search_calls = [] 263 | 264 | def searchTracks(self, **kwargs): 265 | self.search_calls.append(kwargs) 266 | return [track] 267 | 268 | def fetchItem(self, key): 269 | raise AssertionError('fetchItem should not be called without a rating key') 270 | 271 | cache = CacheStub() 272 | positive_results = [] 273 | music = Music() 274 | 275 | plugin = types.SimpleNamespace() 276 | plugin._log = DummyLogger() 277 | plugin.cache = cache 278 | plugin.music = music 279 | plugin.search_llm = None 280 | plugin.manual_track_search = lambda song: None 281 | 282 | def cache_result(key, result, cleaned=None): 283 | if result is not None: 284 | positive_results.append(result) 285 | cache.set(key, result, cleaned) 286 | 287 | plugin._cache_result = cache_result 288 | plugin.find_closest_match = lambda song, tracks: [] 289 | plugin.get_local_beets_candidates = lambda song: [] 290 | plugin._try_candidate_direct_match = lambda cand, query, cache_key=None: None 291 | plugin._prepare_candidate_variants = lambda candidates, song: [] 292 | plugin._match_score_for_query = lambda song, found: 0.55 293 | 294 | song = {'title': 'Original Song', 'album': 'Original Album', 'artist': 'Original Artist'} 295 | result = self.search.search_plex_song(plugin, song, manual_search=False) 296 | 297 | self.assertIsNone(result) 298 | self.assertFalse(positive_results) 299 | 300 | def test_user_confirmation_accepts_candidate(self): 301 | track = types.SimpleNamespace( 302 | ratingKey=111, 303 | title='Candidate Song', 304 | parentTitle='Candidate Album', 305 | artist=lambda: types.SimpleNamespace(title='Candidate Artist'), 306 | ) 307 | 308 | class Music: 309 | def __init__(self): 310 | self.search_calls = [] 311 | 312 | def searchTracks(self, **kwargs): 313 | self.search_calls.append(kwargs) 314 | return [track] 315 | 316 | def fetchItem(self, key): 317 | raise AssertionError('fetchItem should not be called without a rating key') 318 | 319 | cache = CacheStub() 320 | cached_results = [] 321 | music = Music() 322 | 323 | plugin = types.SimpleNamespace() 324 | plugin._log = DummyLogger() 325 | plugin.cache = cache 326 | plugin.music = music 327 | plugin.search_llm = None 328 | plugin.manual_track_search_called = False 329 | 330 | def manual_track_search(_song): 331 | plugin.manual_track_search_called = True 332 | return None 333 | 334 | plugin.manual_track_search = manual_track_search 335 | plugin._cache_result = lambda key, result, cleaned=None: cached_results.append((key, result)) 336 | plugin.find_closest_match = lambda song, tracks: [] 337 | plugin.get_local_beets_candidates = lambda song: [] 338 | plugin._try_candidate_direct_match = lambda cand, query, cache_key=None: None 339 | plugin._prepare_candidate_variants = lambda candidates, song: [] 340 | plugin._candidate_confirmations = [] 341 | 342 | def queue_candidate_confirmation(**kwargs): 343 | plugin._candidate_confirmations.append(kwargs) 344 | 345 | plugin._queue_candidate_confirmation = queue_candidate_confirmation 346 | plugin._match_score_for_query = lambda song, found: 0.75 347 | 348 | review_module = self.search.manual_search_ui 349 | original_review = review_module.review_candidate_confirmations 350 | def fake_review(_plugin, queued, _song, current_cache_key=None): 351 | candidate = queued[0] if queued else {} 352 | return { 353 | "action": "selected", 354 | "track": candidate.get("track", track), 355 | "cache_key": candidate.get("cache_key", current_cache_key), 356 | "sources": ["direct"], 357 | "original_song": candidate.get("song"), 358 | } 359 | review_module.review_candidate_confirmations = fake_review 360 | self.addCleanup(lambda: setattr(review_module, "review_candidate_confirmations", original_review)) 361 | 362 | song = {'title': 'Original Song', 'album': 'Original Album', 'artist': 'Original Artist'} 363 | 364 | result = self.search.search_plex_song(plugin, song, manual_search=True) 365 | 366 | self.assertIs(result, track) 367 | self.assertTrue(cached_results) 368 | self.assertFalse(plugin.manual_track_search_called) 369 | self.assertFalse(plugin._candidate_confirmations) 370 | 371 | def test_confirmation_survives_nested_call(self): 372 | variant_track = types.SimpleNamespace( 373 | ratingKey=222, 374 | title='Variant Track', 375 | parentTitle='Variant Album', 376 | artist=lambda: types.SimpleNamespace(title='Variant Artist'), 377 | ) 378 | 379 | class Music: 380 | def __init__(self): 381 | self.search_calls = [] 382 | 383 | def searchTracks(self, **kwargs): 384 | self.search_calls.append(kwargs) 385 | if kwargs.get('track.title') == 'Variant Track': 386 | return [variant_track] 387 | return [] 388 | 389 | def fetchItem(self, key): 390 | if key == variant_track.ratingKey: 391 | return variant_track 392 | raise AssertionError(f'unexpected fetchItem call for {key}') 393 | 394 | class Candidate: 395 | def __init__(self, metadata, score): 396 | self.metadata = metadata 397 | self.score = score 398 | 399 | def song_dict(self): 400 | return { 401 | 'title': self.metadata.get('title', ''), 402 | 'album': self.metadata.get('album', ''), 403 | 'artist': self.metadata.get('artist', ''), 404 | } 405 | 406 | def overlap_tokens(self, counts): 407 | return [] 408 | 409 | cache = CacheStub() 410 | cached_results = [] 411 | music = Music() 412 | 413 | plugin = types.SimpleNamespace() 414 | plugin._log = DummyLogger() 415 | plugin.cache = cache 416 | plugin.music = music 417 | plugin.search_llm = None 418 | plugin.manual_track_search_called = False 419 | 420 | def manual_track_search(_song): 421 | plugin.manual_track_search_called = True 422 | return None 423 | 424 | plugin.manual_track_search = manual_track_search 425 | plugin._cache_result = lambda key, result, cleaned=None: cached_results.append((key, result)) 426 | plugin.find_closest_match = lambda song, tracks: [(variant_track, 0.95)] 427 | 428 | candidate = Candidate( 429 | {'title': 'Original Candidate', 'album': 'Original Album', 'artist': 'Original Artist'}, 430 | 0.72, 431 | ) 432 | plugin.get_local_beets_candidates = lambda song: [candidate] 433 | plugin._try_candidate_direct_match = lambda cand, query, cache_key=None: None 434 | 435 | def prepare_variants(_candidates, _song): 436 | return [( 437 | {'title': 'Variant Track', 'album': 'Variant Album', 'artist': 'Variant Artist'}, 438 | 0.82, 439 | )] 440 | 441 | plugin._prepare_candidate_variants = prepare_variants 442 | plugin._candidate_confirmations = [] 443 | plugin._queue_candidate_confirmation = lambda **kwargs: plugin._candidate_confirmations.append(kwargs) 444 | 445 | def match_score(query, track): 446 | if query.get('title') == 'Variant Track': 447 | return 0.95 448 | return 0.55 449 | 450 | plugin._match_score_for_query = match_score 451 | 452 | review_module = self.search.manual_search_ui 453 | original_review = review_module.review_candidate_confirmations 454 | def fake_review(_plugin, queued, _song, current_cache_key=None): 455 | candidate = queued[0] if queued else {} 456 | return { 457 | "action": "selected", 458 | "track": candidate.get("track", variant_track), 459 | "cache_key": candidate.get("cache_key", current_cache_key), 460 | "sources": ["variant"], 461 | "original_song": candidate.get("song"), 462 | } 463 | review_module.review_candidate_confirmations = fake_review 464 | self.addCleanup(lambda: setattr(review_module, "review_candidate_confirmations", original_review)) 465 | 466 | song = {'title': 'Original Song', 'album': 'Original Album', 'artist': 'Original Artist'} 467 | 468 | result = self.search.search_plex_song(plugin, song, manual_search=True) 469 | 470 | self.assertIs(result, variant_track) 471 | self.assertTrue(cached_results) 472 | self.assertFalse(plugin.manual_track_search_called) 473 | self.assertFalse(plugin._candidate_confirmations) 474 | self.assertGreaterEqual(len(music.search_calls), 1) 475 | 476 | def test_variant_rejected_when_similarity_low(self): 477 | variant_track = types.SimpleNamespace( 478 | ratingKey=512, 479 | title='Variant Song', 480 | parentTitle='Variant Album', 481 | artist=lambda: types.SimpleNamespace(title='Variant Artist'), 482 | ) 483 | 484 | class Music: 485 | def __init__(self): 486 | self.search_calls = [] 487 | 488 | def searchTracks(self, **kwargs): 489 | self.search_calls.append(kwargs) 490 | filtered = {k: v for k, v in kwargs.items() if k != 'limit'} 491 | target = {'album.title': 'Variant Album', 'track.title': 'Variant Song'} 492 | if filtered == target: 493 | return [variant_track] 494 | return [] 495 | 496 | def fetchItem(self, key): 497 | raise AssertionError('fetchItem should not be called without a rating key') 498 | 499 | class Candidate: 500 | def __init__(self, metadata, score): 501 | self.metadata = metadata 502 | self.score = score 503 | 504 | def song_dict(self): 505 | return { 506 | 'title': self.metadata.get('title', ''), 507 | 'album': self.metadata.get('album', ''), 508 | 'artist': self.metadata.get('artist', ''), 509 | } 510 | 511 | def overlap_tokens(self, counts): 512 | return [] 513 | 514 | cache = CacheStub() 515 | cached_results = [] 516 | music = Music() 517 | 518 | plugin = types.SimpleNamespace() 519 | plugin._log = DummyLogger() 520 | plugin.cache = cache 521 | plugin.music = music 522 | plugin.search_llm = None 523 | plugin.manual_track_search = lambda song: None 524 | def cache_result(key, result, cleaned=None): 525 | key_str = str(key) 526 | if result is not None and 'Original Song' in key_str: 527 | cached_results.append(result) 528 | cache.set(key, result, cleaned) 529 | plugin._cache_result = cache_result 530 | plugin.find_closest_match = lambda song, tracks: [] 531 | plugin.get_local_beets_candidates = lambda song: [Candidate( 532 | {'title': 'Variant Song', 'album': 'Variant Album', 'artist': 'Variant Artist'}, 533 | 0.88, 534 | )] 535 | plugin._try_candidate_direct_match = lambda cand, query, cache_key=None: None 536 | plugin._prepare_candidate_variants = lambda candidates, song: [ 537 | (candidates[0].song_dict(), candidates[0].score) 538 | ] 539 | 540 | def match_score(song, track): 541 | if song.get('title') == 'Original Song': 542 | return 0.5 543 | return 0.95 544 | 545 | plugin._match_score_for_query = match_score 546 | 547 | song = {'title': 'Original Song', 'album': 'Original Album', 'artist': 'Original Artist'} 548 | result = self.search.search_plex_song(plugin, song, manual_search=False) 549 | 550 | self.assertIsNone(result) 551 | # Ensure no positive cache entry was written. 552 | self.assertFalse(cached_results) 553 | 554 | 555 | if __name__ == '__main__': 556 | unittest.main() 557 | --------------------------------------------------------------------------------