├── backend
├── api
│ ├── state.py
│ ├── routers
│ │ ├── system.py
│ │ ├── library.py
│ │ └── listenbrainz.py
│ ├── clients
│ │ ├── __init__.py
│ │ └── listenbrainz.py
│ ├── utils
│ │ ├── logging.py
│ │ ├── text.py
│ │ └── extraction.py
│ ├── settings.py
│ ├── models.py
│ ├── main.py
│ ├── services
│ │ ├── search.py
│ │ ├── lyrics.py
│ │ ├── beets.py
│ │ ├── listenbrainz.py
│ │ ├── files.py
│ │ ├── download.py
│ │ └── library.py
│ └── auth.py
├── start.ps1
├── quick_test.py
├── requirements.txt
├── config.py
├── .env.example
├── test_api.py
├── tests
│ ├── conftest.py
│ ├── test_downloads.py
│ ├── test_utils.py
│ └── test_search.py
├── termux-setup.sh
├── test_quick.py
├── download_state.py
└── lyrics_client.py
├── frontend
├── public
│ ├── favicon.ico
│ └── tsunami.svg
├── src
│ ├── assets
│ │ ├── nyan-cat.mp3
│ │ └── tsunami.svg
│ ├── main.jsx
│ ├── stores
│ │ ├── toastStore.js
│ │ └── downloadStore.js
│ ├── store
│ │ └── authStore.js
│ ├── data
│ │ └── releaseNotes.js
│ ├── hooks
│ │ └── useTheme.js
│ ├── components
│ │ ├── Toast.jsx
│ │ ├── ReleaseNotes.jsx
│ │ ├── Login.jsx
│ │ ├── LibraryPage.jsx
│ │ ├── SettingsPanel.jsx
│ │ └── PlaylistPage.jsx
│ ├── app.jsx
│ ├── api
│ │ └── client.js
│ └── utils
│ │ └── downloadManager.js
├── postcss.config.js
├── vite.config.js
├── package.json
├── index.html
└── tailwind.config.js
├── restart-service.sh
├── run_tests.sh
├── stop-service.sh
├── start-service.sh
├── .dockerignore
├── docker-compose.yml
├── .github
└── workflows
│ ├── tests.yml
│ ├── build-branch-image.yml
│ └── docker-publish.yml
├── .env.example
├── .gitignore
├── LICENSE
├── install-termux-service.sh
├── Dockerfile
└── README.md
/backend/api/state.py:
--------------------------------------------------------------------------------
1 | active_downloads = {}
2 | lb_progress_queues = {}
3 |
--------------------------------------------------------------------------------
/frontend/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayZ3R0/tidaloader/HEAD/frontend/public/favicon.ico
--------------------------------------------------------------------------------
/frontend/src/assets/nyan-cat.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RayZ3R0/tidaloader/HEAD/frontend/src/assets/nyan-cat.mp3
--------------------------------------------------------------------------------
/frontend/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | };
7 |
--------------------------------------------------------------------------------
/backend/start.ps1:
--------------------------------------------------------------------------------
1 | # Activate venv and start server on port 8001
2 | .\venv\Scripts\Activate.ps1
3 | python -m uvicorn api.main:app --reload --host 0.0.0.0 --port 8001
--------------------------------------------------------------------------------
/frontend/src/main.jsx:
--------------------------------------------------------------------------------
1 | import { render } from "preact";
2 | import { App } from "./app";
3 | import "./style.css";
4 |
5 | render(, document.getElementById("app"));
6 |
--------------------------------------------------------------------------------
/restart-service.sh:
--------------------------------------------------------------------------------
1 | #!/data/data/com.termux/files/usr/bin/bash
2 |
3 | # Restart script for Tidal Troi UI
4 | cd ~/tidaloader
5 | ./stop-service.sh
6 | sleep 2
7 | ./start-service.sh
--------------------------------------------------------------------------------
/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # Set environment variables for testing
5 | export PYTHONPATH=$(pwd)/backend
6 | export AUTH_USERNAME=test
7 | export AUTH_PASSWORD=test
8 |
9 | echo "Running backend tests..."
10 | backend/venv/bin/pytest backend/tests/ "$@"
11 |
--------------------------------------------------------------------------------
/backend/api/routers/system.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter
2 |
3 | router = APIRouter()
4 |
5 | @router.get("/api")
6 | async def api_root():
7 | return {"status": "ok", "message": "Tidaloader API"}
8 |
9 | @router.get("/api/health")
10 | async def health_check():
11 | return {"status": "healthy"}
12 |
--------------------------------------------------------------------------------
/backend/quick_test.py:
--------------------------------------------------------------------------------
1 | """Quick test to verify imports work"""
2 | try:
3 | from fastapi import FastAPI
4 | from pydantic import BaseModel
5 | import requests
6 | print("✓ All imports successful!")
7 | print("✓ Backend dependencies are ready")
8 | except ImportError as e:
9 | print(f"✗ Import failed: {e}")
--------------------------------------------------------------------------------
/frontend/vite.config.js:
--------------------------------------------------------------------------------
1 | import { defineConfig } from "vite";
2 | import preact from "@preact/preset-vite";
3 |
4 | export default defineConfig({
5 | plugins: [preact()],
6 | server: {
7 | port: 5173,
8 | proxy: {
9 | "/api": {
10 | target: "http://localhost:8001",
11 | changeOrigin: true,
12 | },
13 | },
14 | },
15 | });
16 |
--------------------------------------------------------------------------------
/backend/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.115.5
2 | uvicorn[standard]==0.32.1
3 | python-multipart==0.0.20
4 | aiofiles==24.1.0
5 | requests==2.32.3
6 | pydantic==2.10.3
7 | pydantic-settings==2.6.1
8 | python-dotenv
9 | aiohttp==3.11.11
10 | mutagen==1.46.0
11 | lrclibapi==0.3.1
12 | passlib[bcrypt]==1.7.4
13 | python-jose[cryptography]==3.3.0
14 | pykakasi==2.3.0
15 | beets
16 | pyacoustid
17 | pylast
18 | pytest==7.4.3
19 | httpx==0.25.2
20 | pytest-asyncio==0.23.2
--------------------------------------------------------------------------------
/stop-service.sh:
--------------------------------------------------------------------------------
1 | #!/data/data/com.termux/files/usr/bin/bash
2 |
3 | # Stop script for Tidal Troi UI
4 | if [ -f ~/tidaloader.pid ]; then
5 | PID=$(cat ~/tidaloader.pid)
6 | if kill -0 $PID 2>/dev/null; then
7 | echo "Stopping Tidal Troi UI (PID: $PID)..."
8 | kill $PID
9 | rm ~/tidaloader.pid
10 | echo "Service stopped"
11 | else
12 | echo "Process not running"
13 | rm ~/tidaloader.pid
14 | fi
15 | else
16 | echo "PID file not found"
17 | fi
--------------------------------------------------------------------------------
/start-service.sh:
--------------------------------------------------------------------------------
1 | #!/data/data/com.termux/files/usr/bin/bash
2 |
3 | # Start script for Tidal Troi UI
4 | set -e
5 |
6 | cd ~/tidaloader/backend
7 |
8 | # Activate virtual environment
9 | source venv/bin/activate
10 |
11 | # Start the backend server
12 | echo "Starting Tidal Troi UI backend on port 8001..."
13 | nohup python -m uvicorn api.main:app --host 0.0.0.0 --port 8001 > ~/tidaloader.log 2>&1 &
14 |
15 | echo $! > ~/tidaloader.pid
16 | echo "Service started with PID $(cat ~/tidaloader.pid)"
17 | echo "Logs: ~/tidaloader.log"
18 | echo "Access at: http://localhost:8001"
--------------------------------------------------------------------------------
/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "tidaloader",
3 | "version": "1.0.0",
4 | "license": "MIT",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "preview": "vite preview"
10 | },
11 | "dependencies": {
12 | "framer-motion": "^12.23.26",
13 | "preact": "^10.19.3",
14 | "preact-router": "^4.1.2",
15 | "zustand": "^4.4.7"
16 | },
17 | "devDependencies": {
18 | "@preact/preset-vite": "^2.8.1",
19 | "autoprefixer": "^10.4.21",
20 | "postcss": "^8.5.6",
21 | "tailwindcss": "^3.4.1",
22 | "vite": "^5.0.8"
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/frontend/src/stores/toastStore.js:
--------------------------------------------------------------------------------
1 | import { create } from "zustand";
2 |
3 | export const useToastStore = create((set, get) => ({
4 | toasts: [],
5 |
6 | addToast: (message, type = "info") => {
7 | const id = Date.now();
8 | const toast = { id, message, type };
9 |
10 | set((state) => ({
11 | toasts: [...state.toasts, toast],
12 | }));
13 |
14 | setTimeout(() => {
15 | get().removeToast(id);
16 | }, 4000);
17 | },
18 |
19 | removeToast: (id) => {
20 | set((state) => ({
21 | toasts: state.toasts.filter((toast) => toast.id !== id),
22 | }));
23 | },
24 | }));
25 |
--------------------------------------------------------------------------------
/backend/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Configuration for download paths
3 | """
4 | import os
5 | from pathlib import Path
6 | from dotenv import load_dotenv
7 |
8 |
9 | load_dotenv()
10 |
11 | def get_music_dir() -> Path:
12 | """Get music directory from environment or default"""
13 | music_dir_str = os.getenv('MUSIC_DIR')
14 |
15 | if music_dir_str:
16 | music_dir = Path(music_dir_str)
17 | else:
18 |
19 | music_dir = Path(__file__).parent / "downloads"
20 |
21 |
22 | music_dir.mkdir(parents=True, exist_ok=True)
23 |
24 | return music_dir
25 |
26 |
27 | MUSIC_DIR = get_music_dir()
--------------------------------------------------------------------------------
/backend/.env.example:
--------------------------------------------------------------------------------
1 | # Music directory - change this to your Navidrome music path
2 | MUSIC_DIR=C:\Users\USER\Music
3 |
4 | AUTH_USERNAME=admin
5 | AUTH_PASSWORD=your-secure-password-here
6 |
7 | # ==============================================================================
8 | # DOWNLOAD QUEUE SETTINGS
9 | # ==============================================================================
10 |
11 | # Maximum number of concurrent downloads (default: 3)
12 | MAX_CONCURRENT_DOWNLOADS=3
13 |
14 | # Auto-process downloads when added to queue (default: true)
15 | # Set to false for manual start/stop control (not recommended for multi-user setups)
16 | QUEUE_AUTO_PROCESS=true
--------------------------------------------------------------------------------
/backend/api/clients/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | # Add backend directory to path so we can import tidal_client
5 | # __file__ = backend/api/clients/__init__.py
6 | # parent = backend/api/clients
7 | # parent.parent = backend/api
8 | # parent.parent.parent = backend
9 |
10 | backend_path = Path(__file__).parent.parent.parent
11 | sys.path.append(str(backend_path))
12 |
13 | try:
14 | from tidal_client import TidalAPIClient
15 | except ImportError:
16 | # Fallback if running from root
17 | from backend.tidal_client import TidalAPIClient
18 |
19 | tidal_client = TidalAPIClient()
20 |
21 | # Export ListenBrainzClient for easier access
22 | from .listenbrainz import ListenBrainzClient
23 |
--------------------------------------------------------------------------------
/backend/api/utils/logging.py:
--------------------------------------------------------------------------------
1 | class Colors:
2 | RESET = '\033[0m'
3 | RED = '\033[91m'
4 | GREEN = '\033[92m'
5 | YELLOW = '\033[93m'
6 | BLUE = '\033[94m'
7 | MAGENTA = '\033[95m'
8 | CYAN = '\033[96m'
9 | WHITE = '\033[97m'
10 | BOLD = '\033[1m'
11 | DIM = '\033[2m'
12 |
13 | def log_success(msg: str):
14 | print(f"{Colors.GREEN}[SUCCESS]{Colors.RESET} {msg}")
15 |
16 | def log_error(msg: str):
17 | print(f"{Colors.RED}[ERROR]{Colors.RESET} {msg}")
18 |
19 | def log_warning(msg: str):
20 | print(f"{Colors.YELLOW}[WARNING]{Colors.RESET} {msg}")
21 |
22 | def log_info(msg: str):
23 | print(f"{Colors.CYAN}[INFO]{Colors.RESET} {msg}")
24 |
25 | def log_step(step: str, msg: str):
26 | print(f"{Colors.MAGENTA}[{step}]{Colors.RESET} {msg}")
27 |
--------------------------------------------------------------------------------
/backend/api/settings.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Optional
3 | from pydantic_settings import BaseSettings
4 | from dotenv import load_dotenv
5 |
6 | load_dotenv()
7 |
8 | class Settings(BaseSettings):
9 | music_dir: str = str(Path.home() / "music")
10 | auth_username: Optional[str] = None
11 | auth_password: Optional[str] = None
12 |
13 | class Config:
14 | env_file = Path(__file__).parent.parent.parent / ".env"
15 | case_sensitive = False
16 | extra = "ignore"
17 |
18 | settings = Settings()
19 |
20 | DOWNLOAD_DIR = Path(settings.music_dir)
21 | DOWNLOAD_DIR.mkdir(parents=True, exist_ok=True)
22 |
23 | MP3_QUALITY_MAP = {
24 | "MP3_128": 128,
25 | "MP3_256": 256,
26 | }
27 |
28 | OPUS_QUALITY_MAP = {
29 | "OPUS_192VBR": 192,
30 | }
31 |
--------------------------------------------------------------------------------
/frontend/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Tidaloader
8 |
9 |
10 |
11 |
12 |
13 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Python
2 | **/__pycache__
3 | **/*.pyc
4 | **/*.pyo
5 | **/*.pyd
6 | **/.Python
7 | **/env
8 | **/venv
9 | **/.venv
10 | **/pip-log.txt
11 | **/pip-delete-this-directory.txt
12 | **/.pytest_cache
13 | **/.coverage
14 | **/*.egg-info
15 |
16 | # Node
17 | **/node_modules
18 | **/npm-debug.log*
19 | **/yarn-debug.log*
20 | **/yarn-error.log*
21 | **/.npm
22 |
23 | # IDE
24 | **/.vscode
25 | **/.idea
26 | **/*.swp
27 | **/*.swo
28 | **/*~
29 |
30 | # OS
31 | **/.DS_Store
32 | **/Thumbs.db
33 |
34 | # Git
35 | .git
36 | .gitignore
37 |
38 | # Environment
39 | **/.env
40 | !backend/.env.example
41 |
42 | # Downloads
43 | backend/downloads
44 | music
45 |
46 | # Logs
47 | **/*.log
48 |
49 | # Build artifacts
50 | frontend/dist
51 | frontend/build
52 |
53 | # Scripts (not needed in container)
54 | *.ps1
55 | *.sh
56 | !backend/termux-setup.sh
--------------------------------------------------------------------------------
/frontend/src/store/authStore.js:
--------------------------------------------------------------------------------
1 | import { create } from "zustand";
2 | import { persist } from "zustand/middleware";
3 |
4 | export const useAuthStore = create(
5 | persist(
6 | (set) => ({
7 | credentials: null,
8 | isAuthenticated: false,
9 |
10 | setCredentials: (username, password) => {
11 | const credentials = btoa(`${username}:${password}`);
12 | set({ credentials, isAuthenticated: true });
13 | },
14 |
15 | clearCredentials: () => {
16 | set({ credentials: null, isAuthenticated: false });
17 | },
18 |
19 | getAuthHeader: () => {
20 | const state = useAuthStore.getState();
21 | if (!state.credentials) return null;
22 | return `Basic ${state.credentials}`;
23 | },
24 | }),
25 | {
26 | name: "troi-auth-storage",
27 | }
28 | )
29 | );
30 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | tidaloader:
3 | build:
4 | context: .
5 | args:
6 | USER_UID: ${USER_UID:-1000}
7 | USER_GID: ${USER_GID:-1000}
8 | container_name: tidaloader
9 | ports:
10 | - "8001:8001"
11 | environment:
12 | - MUSIC_DIR=${MUSIC_DIR:-/music}
13 | - AUTH_USERNAME=${AUTH_USERNAME:-admin}
14 | - AUTH_PASSWORD=${AUTH_PASSWORD:-changeme}
15 | # Queue configuration
16 | - MAX_CONCURRENT_DOWNLOADS=${MAX_CONCURRENT_DOWNLOADS:-3}
17 | - QUEUE_AUTO_PROCESS=${QUEUE_AUTO_PROCESS:-true}
18 | volumes:
19 | - ${MUSIC_DIR_HOST:-./music}:/music
20 | restart: unless-stopped
21 | healthcheck:
22 | test: ["CMD", "curl", "-f", "http://localhost:8001/api/health"]
23 | interval: 30s
24 | timeout: 10s
25 | retries: 3
26 | start_period: 40s
27 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Backend Tests
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 |
13 | defaults:
14 | run:
15 | working-directory: ./backend
16 |
17 | steps:
18 | - uses: actions/checkout@v3
19 |
20 | - name: Set up Python 3.10
21 | uses: actions/setup-python@v4
22 | with:
23 | python-version: "3.10"
24 |
25 | - name: Install dependencies
26 | run: |
27 | python -m pip install --upgrade pip
28 | pip install -r requirements.txt
29 | pip install pytest httpx pytest-asyncio
30 |
31 | - name: Run tests
32 | env:
33 | AUTH_USERNAME: test
34 | AUTH_PASSWORD: test
35 | PYTHONPATH: ${{ github.workspace }}/backend
36 | run: |
37 | pytest tests/
38 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | # Copy this to .env and customize
2 |
3 | # Authentication credentials
4 | AUTH_USERNAME=admin
5 | AUTH_PASSWORD=your-secure-password-here
6 |
7 | # Music directory inside container (keep as /music)
8 | MUSIC_DIR=/music
9 |
10 | # Music directory on your host machine
11 | # Windows: MUSIC_DIR_HOST=./music or C:/Users/YourName/Music
12 | # Linux/Mac: MUSIC_DIR_HOST=./music or /home/yourname/Music
13 | MUSIC_DIR_HOST=./music
14 |
15 | # ==============================================================================
16 | # DOWNLOAD QUEUE SETTINGS
17 | # ==============================================================================
18 |
19 | # Maximum number of concurrent downloads (default: 3)
20 | MAX_CONCURRENT_DOWNLOADS=3
21 |
22 | # Auto-process downloads when added to queue (default: true)
23 | # Set to false for manual start/stop control (not recommended for multi-user setups)
24 | QUEUE_AUTO_PROCESS=true
--------------------------------------------------------------------------------
/backend/test_api.py:
--------------------------------------------------------------------------------
1 | """
2 | Test script for backend API
3 | """
4 | import requests
5 |
6 | BASE_URL = "http://localhost:8000"
7 |
8 | def test_search():
9 | """Test track search"""
10 | response = requests.get(f"{BASE_URL}/api/search/tracks", params={"q": "Radiohead"})
11 | print(f"Search Status: {response.status_code}")
12 | print(f"Results: {len(response.json()['items'])} tracks")
13 |
14 | def test_troi():
15 | """Test Troi generation"""
16 | response = requests.post(
17 | f"{BASE_URL}/api/troi/generate",
18 | json={"username": "z3r069", "playlist_type": "periodic-jams"}
19 | )
20 | print(f"Troi Status: {response.status_code}")
21 | data = response.json()
22 | print(f"Generated: {data['count']} tracks")
23 | print(f"Found on Tidal: {data['found_on_tidal']} tracks")
24 |
25 | if __name__ == "__main__":
26 | print("Testing API...")
27 | test_search()
28 | print("\n" + "="*50 + "\n")
29 | test_troi()
--------------------------------------------------------------------------------
/backend/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from fastapi.testclient import TestClient
3 | from unittest.mock import MagicMock
4 | import sys
5 | from pathlib import Path
6 |
7 | # Add backend directory to path so we can import api modules
8 | sys.path.append(str(Path(__file__).parent.parent))
9 |
10 | from api.main import app
11 | from api.clients import tidal_client
12 |
13 | @pytest.fixture
14 | def client():
15 | return TestClient(app)
16 |
17 | @pytest.fixture
18 | def mock_tidal_client(monkeypatch):
19 | mock = MagicMock()
20 | # Replace the global tidal_client instance with our mock
21 | monkeypatch.setattr("api.routers.search.tidal_client", mock)
22 | monkeypatch.setattr("api.routers.downloads.tidal_client", mock)
23 | monkeypatch.setattr("api.clients.tidal_client", mock)
24 | return mock
25 |
26 | @pytest.fixture
27 | def mock_background_tasks(monkeypatch):
28 | # Mock background tasks to prevent actual execution
29 | mock = MagicMock()
30 | return mock
31 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Backend
2 | backend/.env
3 | backend/venv/
4 | backend/__pycache__/
5 | backend/api/__pycache__/
6 | backend/downloads/
7 | backend/.cache/
8 | *.pyc
9 |
10 | # Frontend
11 | frontend/node_modules/
12 | frontend/dist/
13 | frontend/.env
14 |
15 | # Python
16 | __pycache__/
17 | *.py[cod]
18 | *$py.class
19 | *.so
20 | .Python
21 | env/
22 | venv/
23 | .venv/
24 | *.egg-info/
25 | dist/
26 | build/
27 |
28 | # Node
29 | node_modules/
30 | npm-debug.log*
31 | yarn-debug.log*
32 | yarn-error.log*
33 |
34 | # Environment - ignore actual .env but keep .env.example
35 | .env
36 | !.env.example
37 |
38 | # IDE
39 | .vscode/
40 | .idea/
41 | *.swp
42 | *.swo
43 |
44 | # OS
45 | .DS_Store
46 | Thumbs.db
47 |
48 | # Downloads and Music
49 | downloads/
50 | music/*
51 | !music/.gitkeep
52 |
53 | # Logs
54 | *.log
55 |
56 | # Build
57 | frontend/dist/
58 | frontend/build/
59 |
60 |
61 | backend/download_state.json
62 |
63 | backend/tidaloader_beets.yaml
64 | tidal-ui-analysis/
65 | backend/queue_state.json
66 |
67 | backend/api/.cache
--------------------------------------------------------------------------------
/backend/termux-setup.sh:
--------------------------------------------------------------------------------
1 | #!/data/data/com.termux/files/usr/bin/bash
2 |
3 | # Termux setup script for Tidal Troi UI
4 | set -e
5 |
6 | echo "Setting up Tidal Troi UI on Termux..."
7 |
8 | # Install required packages
9 | pkg update -y
10 | pkg install -y python git nodejs-lts
11 |
12 | # Create directories
13 | mkdir -p ~/tidaloader
14 | mkdir -p ~/music/tidal-downloads
15 |
16 | # Clone repository if not exists
17 | if [ ! -d ~/tidaloader/.git ]; then
18 | cd ~
19 | git clone https://github.com/RayZ3R0/tidaloader.git
20 | fi
21 |
22 | cd ~/tidaloader
23 |
24 | # Backend setup
25 | cd backend
26 | python -m venv venv
27 | source venv/bin/activate
28 | pip install --upgrade pip
29 | pip install -r requirements.txt
30 |
31 | # Create .env file
32 | cat > .env << 'EOF'
33 | # Music directory for Termux
34 | MUSIC_DIR=/data/data/com.termux/files/home/music/tidal-downloads
35 | EOF
36 |
37 | cd ..
38 |
39 | # Frontend setup
40 | cd frontend
41 | npm install
42 | npm run build
43 |
44 | cd ..
45 |
46 | echo "Setup complete!"
47 | echo "Run './start-service.sh' to start the service"
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 RayZ3R0
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.github/workflows/build-branch-image.yml:
--------------------------------------------------------------------------------
1 | name: Build Branch Image
2 |
3 | on:
4 | push:
5 | branches-ignore:
6 | - main
7 | - master
8 | workflow_dispatch:
9 |
10 | env:
11 | IMAGE_NAME: ghcr.io/${{ github.repository }}
12 |
13 | jobs:
14 | build-only:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Checkout
18 | uses: actions/checkout@v4
19 |
20 | - name: Set up QEMU
21 | uses: docker/setup-qemu-action@v3
22 |
23 | - name: Set up Docker Buildx
24 | uses: docker/setup-buildx-action@v3
25 |
26 | - name: Extract metadata for branch tag
27 | id: meta
28 | uses: docker/metadata-action@v5
29 | with:
30 | images: |
31 | ${{ env.IMAGE_NAME }}
32 | tags: |
33 | type=ref,event=branch
34 |
35 | - name: Build image without pushing
36 | uses: docker/build-push-action@v5
37 | with:
38 | context: .
39 | push: false
40 | tags: ${{ steps.meta.outputs.tags }}
41 | labels: ${{ steps.meta.outputs.labels }}
42 | cache-from: type=gha
43 | cache-to: type=gha,mode=max
44 |
45 |
--------------------------------------------------------------------------------
/backend/api/utils/text.py:
--------------------------------------------------------------------------------
1 | import unicodedata
2 | from typing import Optional
3 | from api.utils.logging import log_warning
4 |
5 | def fix_unicode(text: str) -> str:
6 | if not text:
7 | return text
8 |
9 | try:
10 | if '\\u' in text:
11 | text = text.encode('raw_unicode_escape').decode('unicode_escape')
12 | except:
13 | pass
14 |
15 | try:
16 | text = unicodedata.normalize('NFC', text)
17 | except:
18 | pass
19 |
20 | return text
21 |
22 | def romanize_japanese(text: str) -> Optional[str]:
23 | if not text:
24 | return None
25 |
26 | has_japanese = any('\u3040' <= c <= '\u30ff' or '\u4e00' <= c <= '\u9fff' for c in text)
27 |
28 | if not has_japanese:
29 | return None
30 |
31 | try:
32 | import pykakasi
33 | kakasi = pykakasi.kakasi()
34 | result = kakasi.convert(text)
35 | romanized = ' '.join([item['hepburn'] for item in result])
36 | return romanized
37 | except ImportError:
38 | return None
39 | except Exception as e:
40 | log_warning(f"Romanization failed: {e}")
41 | return None
42 |
--------------------------------------------------------------------------------
/install-termux-service.sh:
--------------------------------------------------------------------------------
1 | #!/data/data/com.termux/files/usr/bin/bash
2 |
3 | # Install as Termux service using Termux:Boot
4 | set -e
5 |
6 | echo "Installing Tidal Troi UI as a Termux service..."
7 |
8 | # Install Termux:Boot if not installed
9 | if ! command -v termux-wake-lock &> /dev/null; then
10 | echo "Please install Termux:Boot from F-Droid or Google Play"
11 | echo "https://wiki.termux.com/wiki/Termux:Boot"
12 | exit 1
13 | fi
14 |
15 | # Create boot script directory
16 | mkdir -p ~/.termux/boot
17 |
18 | # Create boot script
19 | cat > ~/.termux/boot/start-tidal-troi.sh << 'EOF'
20 | #!/data/data/com.termux/files/usr/bin/bash
21 |
22 | # Acquire wake lock to prevent sleep
23 | termux-wake-lock
24 |
25 | # Wait for network
26 | sleep 10
27 |
28 | # Start Tidal Troi UI
29 | cd ~/tidaloader
30 | ./start-service.sh
31 | EOF
32 |
33 | chmod +x ~/.termux/boot/start-tidal-troi.sh
34 | chmod +x ~/tidaloader/start-service.sh
35 | chmod +x ~/tidaloader/stop-service.sh
36 | chmod +x ~/tidaloader/restart-service.sh
37 |
38 | echo "Service installed!"
39 | echo "The app will start automatically when you boot your device"
40 | echo "Make sure Termux:Boot is enabled in your Android settings"
--------------------------------------------------------------------------------
/frontend/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | export default {
3 | content: ["./index.html", "./src/**/*.{js,ts,jsx,tsx}"],
4 | darkMode: "class",
5 | theme: {
6 | extend: {
7 | colors: {
8 | primary: {
9 | DEFAULT: "rgb(var(--color-primary) / )",
10 | light: "rgb(var(--color-primary-light) / )",
11 | dark: "rgb(var(--color-primary-dark) / )",
12 | },
13 | secondary: {
14 | DEFAULT: "rgb(var(--color-secondary) / )",
15 | light: "rgb(var(--color-secondary-light) / )",
16 | dark: "rgb(var(--color-secondary-dark) / )",
17 | },
18 | background: {
19 | DEFAULT: "rgb(var(--color-background) / )",
20 | alt: "rgb(var(--color-background-alt) / )",
21 | },
22 | surface: {
23 | DEFAULT: "rgb(var(--color-surface) / )",
24 | alt: "rgb(var(--color-surface-alt) / )",
25 | },
26 | text: {
27 | DEFAULT: "rgb(var(--color-text) / )",
28 | muted: "rgb(var(--color-text-muted) / )",
29 | },
30 | border: {
31 | DEFAULT: "rgb(var(--color-border) / )",
32 | light: "rgb(var(--color-border-light) / )",
33 | },
34 | },
35 | },
36 | },
37 | plugins: [],
38 | };
39 |
--------------------------------------------------------------------------------
/frontend/public/tsunami.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/frontend/src/assets/tsunami.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Stage 1: Frontend Builder
2 | FROM node:18-alpine AS frontend-builder
3 | WORKDIR /app/frontend
4 | COPY frontend/package*.json ./
5 | RUN npm install && \
6 | npm install @rollup/rollup-linux-x64-musl --save-optional
7 | COPY frontend/ ./
8 | RUN npm run build
9 |
10 | # Stage 2: Python Dependency Builder
11 | FROM python:3.11-slim AS python-builder
12 | WORKDIR /app
13 | COPY backend/requirements.txt .
14 | RUN pip install --user --no-cache-dir -r requirements.txt
15 |
16 | # Stage 3: FFmpeg Static Binary Provider
17 | FROM mwader/static-ffmpeg:7.1 AS ffmpeg
18 |
19 | # Stage 4: Final Image
20 | FROM python:3.11-slim
21 | WORKDIR /app
22 |
23 | # Install runtime system dependencies (minimal)
24 | # libchromaprint-tools: for fpcalc (audio fingerprinting)
25 | # curl: for healthcheck
26 | RUN apt-get update && apt-get install -y --no-install-recommends \
27 | curl \
28 | libchromaprint-tools \
29 | && rm -rf /var/lib/apt/lists/*
30 |
31 | # Create non-root user
32 | ARG USER_UID=1000
33 | ARG USER_GID=1000
34 | RUN groupadd -g ${USER_GID} appuser && \
35 | useradd -m -u ${USER_UID} -g appuser appuser
36 |
37 | # Copy Python dependencies from builder
38 | COPY --from=python-builder /root/.local /home/appuser/.local
39 |
40 | # Copy FFmpeg static binaries
41 | COPY --from=ffmpeg /ffmpeg /usr/local/bin/
42 | COPY --from=ffmpeg /ffprobe /usr/local/bin/
43 |
44 | # Copy Backend Code
45 | COPY backend/ ./backend/
46 |
47 | # Copy Frontend Build
48 | COPY --from=frontend-builder /app/frontend/dist ./frontend/dist
49 |
50 | # Setup Directories and Permissions
51 | RUN mkdir -p /music && \
52 | chown -R appuser:appuser /app /music
53 |
54 | # Env vars
55 | ENV PATH=/home/appuser/.local/bin:$PATH
56 | ENV PYTHONUNBUFFERED=1
57 |
58 | WORKDIR /app/backend
59 | USER appuser
60 |
61 | EXPOSE 8001
62 |
63 | HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
64 | CMD curl -f http://localhost:8001/api/health || exit 1
65 |
66 | CMD ["python", "-m", "uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "8001"]
--------------------------------------------------------------------------------
/backend/api/models.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from dataclasses import dataclass
3 | from pydantic import BaseModel
4 |
5 | @dataclass
6 | class PlaylistTrack:
7 | """Track from generated playlist"""
8 | title: str
9 | artist: str
10 | mbid: Optional[str] = None
11 | tidal_id: Optional[int] = None
12 | tidal_artist_id: Optional[int] = None
13 | tidal_album_id: Optional[int] = None
14 | tidal_exists: bool = False
15 | album: Optional[str] = None
16 | cover: Optional[str] = None
17 |
18 | class ListenBrainzGenerateRequest(BaseModel):
19 | username: str
20 | playlist_type: str = "periodic-jams"
21 | should_validate: bool = True
22 |
23 | class ValidateTrackRequest(BaseModel):
24 | track: PlaylistTrack
25 |
26 | class TrackSearchResult(BaseModel):
27 | id: int
28 | title: str
29 | artist: str
30 | album: Optional[str] = None
31 | track_number: Optional[int] = None
32 | duration: Optional[int] = None
33 | cover: Optional[str] = None
34 | quality: Optional[str] = None
35 | trackNumber: Optional[int] = None
36 | albumArtist: Optional[str] = None
37 | tidal_artist_id: Optional[int] = None
38 | tidal_album_id: Optional[int] = None
39 |
40 | class PlaylistSearchResult(BaseModel):
41 | id: str
42 | title: str
43 | creator: Optional[str] = None
44 | description: Optional[str] = None
45 | numberOfTracks: Optional[int] = None
46 | cover: Optional[str] = None
47 |
48 | class ListenBrainzTrackResponse(BaseModel):
49 | title: str
50 | artist: str
51 | mbid: Optional[str]
52 | tidal_id: Optional[int]
53 | tidal_exists: bool
54 | album: Optional[str]
55 |
56 | class DownloadTrackRequest(BaseModel):
57 | track_id: int
58 | artist: str
59 | title: str
60 | album: Optional[str] = None
61 | album_id: Optional[int] = None
62 | track_number: Optional[int] = None
63 | cover: Optional[str] = None
64 | quality: Optional[str] = "LOSSLESS"
65 | organization_template: Optional[str] = "{Artist}/{Album}/{TrackNumber} - {Title}"
66 | group_compilations: Optional[bool] = True
67 | run_beets: Optional[bool] = False
68 | embed_lyrics: Optional[bool] = False
69 | tidal_track_id: Optional[str] = None
70 | tidal_artist_id: Optional[str] = None
71 | tidal_album_id: Optional[str] = None
72 |
--------------------------------------------------------------------------------
/.github/workflows/docker-publish.yml:
--------------------------------------------------------------------------------
1 | name: Build and Push Docker Images
2 |
3 | on:
4 | push:
5 | branches: [main, master]
6 | tags: ["v*"]
7 | paths-ignore:
8 | - '**.md'
9 | - 'docs/**'
10 | - 'LICENSE'
11 | - '.gitignore'
12 | pull_request:
13 | branches: [main, master]
14 | paths-ignore:
15 | - '**.md'
16 | - 'docs/**'
17 | - 'LICENSE'
18 | - '.gitignore'
19 | workflow_dispatch:
20 |
21 | env:
22 | REGISTRY_DOCKERHUB: docker.io
23 | REGISTRY_GHCR: ghcr.io
24 | IMAGE_NAME: ${{ github.repository }}
25 |
26 | jobs:
27 | build-and-push:
28 | runs-on: ubuntu-latest
29 | permissions:
30 | contents: read
31 | packages: write
32 | id-token: write
33 |
34 | steps:
35 | - name: Checkout repository
36 | uses: actions/checkout@v4
37 |
38 | - name: Set up QEMU
39 | uses: docker/setup-qemu-action@v3
40 |
41 | - name: Set up Docker Buildx
42 | uses: docker/setup-buildx-action@v3
43 |
44 | - name: Log into Docker Hub
45 | if: github.event_name != 'pull_request'
46 | uses: docker/login-action@v3
47 | with:
48 | username: ${{ secrets.DOCKERHUB_USERNAME }}
49 | password: ${{ secrets.DOCKERHUB_TOKEN }}
50 |
51 | - name: Log into GitHub Container Registry
52 | if: github.event_name != 'pull_request'
53 | uses: docker/login-action@v3
54 | with:
55 | registry: ghcr.io
56 | username: ${{ github.actor }}
57 | password: ${{ secrets.GITHUB_TOKEN }}
58 |
59 | - name: Extract metadata
60 | id: meta
61 | uses: docker/metadata-action@v5
62 | with:
63 | images: |
64 | ${{ secrets.DOCKERHUB_USERNAME }}/tidaloader
65 | ghcr.io/${{ github.repository }}
66 | tags: |
67 | type=ref,event=branch
68 | type=ref,event=pr
69 | type=semver,pattern={{version}}
70 | type=semver,pattern={{major}}.{{minor}}
71 | type=semver,pattern={{major}}
72 | type=raw,value=latest,enable={{is_default_branch}}
73 |
74 | - name: Build and push Docker image
75 | uses: docker/build-push-action@v5
76 | with:
77 | context: .
78 | platforms: linux/amd64,linux/arm64
79 | # Pushes to registry ONLY if it is not a PR
80 | push: ${{ github.event_name != 'pull_request' }}
81 | tags: ${{ steps.meta.outputs.tags }}
82 | labels: ${{ steps.meta.outputs.labels }}
83 | cache-from: type=gha
84 | cache-to: type=gha,mode=max
--------------------------------------------------------------------------------
/backend/tests/test_downloads.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import MagicMock
2 | import os
3 |
4 | # Set dummy auth for tests
5 | os.environ["AUTH_USERNAME"] = "test"
6 | os.environ["AUTH_PASSWORD"] = "test"
7 |
8 | AUTH_HEADER = {"Authorization": "Basic dGVzdDp0ZXN0"}
9 |
10 | def test_start_download_endpoint(client):
11 | response = client.post("/api/download/start", headers=AUTH_HEADER)
12 | assert response.status_code == 200
13 | assert response.json() == {"status": "started"}
14 |
15 | def test_get_stream_url_success(client, mock_tidal_client):
16 | # Setup mock
17 | mock_tidal_client.get_track.return_value = {
18 | "OriginalTrackUrl": "http://stream.url"
19 | }
20 |
21 | response = client.get("/api/download/stream/123", headers=AUTH_HEADER)
22 | assert response.status_code == 200
23 | data = response.json()
24 | assert data["stream_url"] == "http://stream.url"
25 | assert data["track_id"] == 123
26 |
27 | def test_get_stream_url_not_found(client, mock_tidal_client):
28 | mock_tidal_client.get_track.return_value = None
29 |
30 | response = client.get("/api/download/stream/999", headers=AUTH_HEADER)
31 | assert response.status_code == 404
32 |
33 | def test_download_track_post(client, mock_tidal_client, mock_background_tasks):
34 | # Setup mock track info
35 | mock_tidal_client.get_track.side_effect = [
36 | # First call gets metadata
37 | {
38 | "title": "Test Track",
39 | "artist": {"name": "Test Artist"},
40 | "trackNumber": 1,
41 | "duration": 300,
42 | "OriginalTrackUrl": "http://stream.url" # Extracted here internally
43 | },
44 | # Second call gets stream url (called inside download logic again?)
45 | # Actually logic is: get_track -> check metadata -> extract stream url from same object usually or secondary call
46 | # In download_track_server_side, it calls get_track(id, quality)
47 | {
48 | "title": "Test Track",
49 | "OriginalTrackUrl": "http://stream.url"
50 | }
51 | ]
52 |
53 | payload = {
54 | "track_id": 1001,
55 | "artist": "Test Artist",
56 | "title": "Test Track",
57 | "quality": "LOSSLESS"
58 | }
59 |
60 | response = client.post("/api/download/track", json=payload, headers=AUTH_HEADER)
61 |
62 | # Since we are mocking background tasks, the actual download won't happen,
63 | # but the API should return success (starting)
64 | assert response.status_code == 200
65 | data = response.json()
66 | assert data["status"] == "downloading"
67 | assert "filename" in data
68 |
--------------------------------------------------------------------------------
/frontend/src/data/releaseNotes.js:
--------------------------------------------------------------------------------
1 | export const releaseNotes = [
2 | {
3 | version: "1.1.0",
4 | date: "2025-12-14",
5 | title: "Extended ListenBrainz Integration",
6 | changes: [
7 | "Renamed 'Weekly Jams' tab to 'Listenbrainz Playlists' to reflect broader support.",
8 | "Added support for 'Weekly Exploration' and 'Year in Review' (Discoveries & Missed) playlists.",
9 | "Implemented a new 'Fetch then Check' workflow: Fetch playlists instantly and validate tracks on demand.",
10 | "Overhauled the UI with album art display, better status indicators, and selective download queuing.",
11 | "Added 'Check All' functionality to batch validate playlist tracks against Tidal."
12 | ]
13 | },
14 | {
15 | version: "1.0.4",
16 | date: "2025-12-14",
17 | title: "Download Manager Authentication Fix",
18 | changes: [
19 | "Fixed DownloadManager to properly respect the user's authentication state."
20 | ]
21 | },
22 | {
23 | version: "1.0.3",
24 | date: "2025-12-14",
25 | title: "Weekly Jams & Core Optimizations",
26 | changes: [
27 | "Replaced Troi with direct ListenBrainz integration for 'Weekly Jams', reducing image size by a lot.",
28 | "Major Docker optimization: Image size slashed by removing OS dependencies and using static binaries.",
29 | "Fixed annoying browser 'Sign in' popups during playlist generation.",
30 | "Improved backend stability and request handling."
31 | ]
32 | },
33 | {
34 | version: "1.0.2",
35 | date: "2025-12-14",
36 | title: "Easter egg",
37 | changes: [
38 | "Added cool nyan cat easter egg"
39 | ]
40 | },
41 | {
42 | version: "1.0.1",
43 | date: "2025-12-13",
44 | title: "Playlist Download Support",
45 | changes: [
46 | "Added support for downloading playlists thanks to @Oduanir."
47 | ]
48 | },
49 | {
50 | version: "1.0.0",
51 | date: "2025-12-13",
52 | title: "Themes Update & Proper Versioning",
53 | changes: [
54 | "New 'Tsunami' icon and branding!",
55 | "Added a dedicated Light/Dark mode toggle for quick switching.",
56 | "Introduced a new cohesive definition for themes.",
57 | "Improved contrast and visibility for multiple themes (Kanagawa, Dracula, Nord, etc.).",
58 | "Fixed 'White Screen' issues with theme colors.",
59 | "Added this release notes system!",
60 | "Added versioning to the app."
61 | ]
62 | }
63 | ];
64 |
--------------------------------------------------------------------------------
/frontend/src/hooks/useTheme.js:
--------------------------------------------------------------------------------
1 | import { useState, useEffect } from 'preact/hooks';
2 |
3 | export const themeNames = {
4 | "light": "Light",
5 | "dark": "Dark",
6 | "catppuccin-latte": "Catppuccin Latte",
7 | "catppuccin-frappe": "Catppuccin Frappe",
8 | "catppuccin-macchiato": "Catppuccin Macchiato",
9 | "catppuccin-mocha": "Catppuccin Mocha",
10 | "matcha": "Matcha",
11 | "nord": "Nord",
12 | "gruvbox": "Gruvbox",
13 | "dracula": "Dracula",
14 | "solarized-light": "Solarized Light",
15 | "solarized-dark": "Solarized Dark",
16 | "rose-pine": "Rose Pine",
17 | "tokyo-night": "Tokyo Night",
18 | "crimson": "Crimson",
19 | "kanagawa": "Kanagawa",
20 | "one-dark": "One Dark",
21 | "one-light": "One Light",
22 | "everforest": "Everforest",
23 | "cotton-candy-dreams": "Cotton Candy",
24 | "sea-green": "Sea Green"
25 | };
26 |
27 | export function useTheme() {
28 | const [theme, setThemeState] = useState(() => {
29 | if (typeof localStorage !== 'undefined' && localStorage.getItem('theme')) {
30 | const savedTheme = localStorage.getItem('theme');
31 | // Legacy support: map old boolean-like values to new defaults if necessary
32 | // But based on previous code it was just "dark" or "light", which maps fine.
33 | return savedTheme;
34 | }
35 | if (typeof window !== 'undefined' && window.matchMedia('(prefers-color-scheme: dark)').matches) {
36 | return 'dark';
37 | }
38 | return 'light';
39 | });
40 |
41 | const setTheme = (newTheme) => {
42 | setThemeState(newTheme);
43 | localStorage.setItem('theme', newTheme);
44 | };
45 |
46 | useEffect(() => {
47 | const root = document.documentElement;
48 | // Remove all known theme classes
49 | Object.keys(themeNames).forEach(t => root.classList.remove(t));
50 |
51 | // Add current theme class
52 | // Note: The original 'light' theme might just be the absence of a class,
53 | // or we can make it explicit. The previous code removed 'dark' for light mode.
54 | // For this new system, let's explicit classes for everything EXCEPT potentially 'light'
55 | // if 'light' is the default root variables.
56 | // However, to be cleaner, we can apply the class for consistency if we move default vars to .light
57 | // But typically root has defaults. Let's assume root = light, and others are classes.
58 | // IF newTheme is NOT light, add the class.
59 |
60 | if (theme !== 'light') {
61 | root.classList.add(theme);
62 | }
63 | }, [theme]);
64 |
65 | return { theme, setTheme };
66 | }
67 |
--------------------------------------------------------------------------------
/backend/api/main.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 | from fastapi import FastAPI, HTTPException
4 | from fastapi.middleware.cors import CORSMiddleware
5 | from fastapi.staticfiles import StaticFiles
6 | from fastapi.responses import FileResponse
7 | from dotenv import load_dotenv
8 |
9 | # Load env before imports that might need it
10 | load_dotenv()
11 |
12 | # Fix path to include backend root
13 | sys.path.append(str(Path(__file__).parent.parent))
14 |
15 | from api.routers import system, listenbrainz, search, downloads, library
16 | from api.clients import tidal_client
17 | from api.utils.logging import log_warning, log_info
18 | from download_state import download_state_manager
19 | from queue_manager import queue_manager, QUEUE_AUTO_PROCESS
20 |
21 | from contextlib import asynccontextmanager
22 |
23 | @asynccontextmanager
24 | async def lifespan(app: FastAPI):
25 | # Startup
26 | tidal_client.cleanup_old_status_cache()
27 | download_state_manager._cleanup_old_entries()
28 |
29 | # Initialize queue manager and start processing if auto mode is enabled
30 | log_info(f"Queue manager initialized: auto_process={QUEUE_AUTO_PROCESS}")
31 | if QUEUE_AUTO_PROCESS:
32 | # Start queue processing in background
33 | import asyncio
34 | asyncio.create_task(queue_manager.start_processing())
35 | log_info("Queue auto-processing started")
36 |
37 | yield
38 | # Shutdown
39 | await queue_manager.stop_processing()
40 |
41 | app = FastAPI(title="Tidaloader API", lifespan=lifespan)
42 |
43 | app.add_middleware(
44 | CORSMiddleware,
45 | allow_origins=["*"],
46 | allow_credentials=True,
47 | allow_methods=["*"],
48 | allow_headers=["*"],
49 | )
50 |
51 | # Include Routers
52 | app.include_router(system.router)
53 | app.include_router(listenbrainz.router)
54 | app.include_router(search.router)
55 | app.include_router(downloads.router)
56 | app.include_router(library.router)
57 |
58 | # Frontend Serving
59 | frontend_dist = Path(__file__).parent.parent.parent / "frontend" / "dist"
60 |
61 | if frontend_dist.exists():
62 | assets_path = frontend_dist / "assets"
63 | if assets_path.exists():
64 | app.mount("/assets", StaticFiles(directory=str(assets_path)), name="assets")
65 |
66 | @app.get("/{full_path:path}")
67 | async def serve_frontend(full_path: str):
68 | if full_path.startswith("api"):
69 | raise HTTPException(status_code=404, detail="Not found")
70 |
71 | index_path = frontend_dist / "index.html"
72 | if index_path.exists():
73 | return FileResponse(index_path)
74 | return {"message": "Frontend not found"}
75 | else:
76 | log_warning("Frontend dist folder not found. Run 'npm run build' in frontend directory.")
77 |
78 | if __name__ == "__main__":
79 | import uvicorn
80 | uvicorn.run(app, host="0.0.0.0", port=8001)
--------------------------------------------------------------------------------
/backend/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | from api.utils.text import fix_unicode, romanize_japanese
2 | from api.utils.extraction import extract_items, extract_stream_url
3 |
4 | def test_fix_unicode():
5 | # Test valid unicode
6 | assert fix_unicode("Café") == "Café"
7 | # Test unicode escape sequences
8 | assert fix_unicode("\\u0043\\u0061\\u0066\\u00e9") == "Café"
9 | # Test normal string
10 | assert fix_unicode("Hello World") == "Hello World"
11 | # Test empty
12 | assert fix_unicode("") == ""
13 | assert fix_unicode(None) is None
14 |
15 | def test_romanize_japanese():
16 | # Test Japanese text
17 | # Note: pykakasi might not be installed or configured in the test env exactly same as prod,
18 | # but we test the function logic. If pykakasi missing, it returns None/Original?
19 | # The function catches ImportErrors and returns None.
20 |
21 | # We'll assume the library is present since it's in requirements.
22 | # However, for unit testing without relying on heavy external libs,
23 | # we might just want to check it returns *something* or None if not japanese.
24 |
25 | assert romanize_japanese("Hello") is None
26 |
27 | # If pykakasi is working:
28 | # assert romanize_japanese("こんにちは") == "konnichiha"
29 | # But let's be safe and just check it doesn't crash on empty
30 | assert romanize_japanese("") is None
31 | assert romanize_japanese(None) is None
32 |
33 | def test_extract_items():
34 | # Test dictionary with 'items'
35 | data = {"items": [{"id": 1}]}
36 | assert extract_items(data, "any_key") == [{"id": 1}]
37 |
38 | # Test dictionary with key pointing to items
39 | data = {"tracks": {"items": [{"id": 1}]}}
40 | assert extract_items(data, "tracks") == [{"id": 1}]
41 |
42 | # Test list
43 | data = [{"id": 1}]
44 | assert extract_items(data, "any") == [{"id": 1}]
45 |
46 | # Test nested list in wrapper
47 | data = [{"tracks": {"items": [{"id": 1}]}}]
48 | assert extract_items(data, "tracks") == [{"id": 1}]
49 |
50 | # Test empty
51 | assert extract_items({}, "tracks") == []
52 | assert extract_items(None, "tracks") == []
53 |
54 | def test_extract_stream_url():
55 | # Test direct url
56 | data = {"OriginalTrackUrl": "http://example.com/stream"}
57 | assert extract_stream_url(data) == "http://example.com/stream"
58 |
59 | # Test list
60 | data = [{"OriginalTrackUrl": "http://example.com/stream"}]
61 | assert extract_stream_url(data) == "http://example.com/stream"
62 |
63 | # Test manifest (base64 encoded json)
64 | import base64
65 | import json
66 |
67 | manifest = json.dumps({"urls": ["http://example.com/manifest"]})
68 | b64 = base64.b64encode(manifest.encode()).decode()
69 |
70 | data = {"manifest": b64}
71 | assert extract_stream_url(data) == "http://example.com/manifest"
72 |
73 | # Test empty/invalid
74 | assert extract_stream_url({}) is None
75 |
--------------------------------------------------------------------------------
/backend/test_quick.py:
--------------------------------------------------------------------------------
1 | """Quick API test without needing Troi installed"""
2 | import requests
3 | import json
4 |
5 | BASE_URL = "http://localhost:8001" # Changed from 8000
6 |
7 | def test_root():
8 | """Test root endpoint"""
9 | print("Testing root endpoint...")
10 | try:
11 | response = requests.get(f"{BASE_URL}/")
12 | print(f"Status: {response.status_code}")
13 |
14 | # Check if it's actually our API
15 | data = response.json()
16 | print(f"Response: {json.dumps(data, indent=2)}")
17 |
18 | if data.get("message") == "Tidaloader API":
19 | print("✓ Correct API responding")
20 | else:
21 | print("✗ Wrong service responding!")
22 | print()
23 | except Exception as e:
24 | print(f"✗ Error: {e}\n")
25 | raise
26 |
27 | def test_search():
28 | """Test track search"""
29 | print("Testing track search...")
30 | try:
31 | response = requests.get(f"{BASE_URL}/api/search/tracks", params={"q": "Radiohead OK Computer"})
32 | print(f"Status: {response.status_code}")
33 |
34 | if response.status_code == 200:
35 | data = response.json()
36 | print(f"Found {len(data['items'])} tracks")
37 | if data['items']:
38 | track = data['items'][0]
39 | print(f"First result: {track['artist']} - {track['title']}")
40 | print(f"Track ID: {track['id']}")
41 | else:
42 | print(f"Error: {response.text}")
43 | print()
44 | except Exception as e:
45 | print(f"✗ Error: {e}\n")
46 | raise
47 |
48 | def test_album_search():
49 | """Test album search"""
50 | print("Testing album search...")
51 | try:
52 | response = requests.get(f"{BASE_URL}/api/search/albums", params={"q": "OK Computer"})
53 | print(f"Status: {response.status_code}")
54 |
55 | if response.status_code == 200:
56 | data = response.json()
57 | print(f"Found {len(data['items'])} albums")
58 | if data['items']:
59 | album = data['items'][0]
60 | print(f"First result: {album.get('title')} by {album.get('artist', {}).get('name', 'Unknown')}")
61 | else:
62 | print(f"Error: {response.text}")
63 | print()
64 | except Exception as e:
65 | print(f"✗ Error: {e}\n")
66 | raise
67 |
68 | if __name__ == "__main__":
69 | print("🧪 Testing Tidaloader API")
70 | print(f"Base URL: {BASE_URL}\n")
71 | print("="*50 + "\n")
72 |
73 | try:
74 | test_root()
75 | test_search()
76 | test_album_search()
77 | print("✓ All basic tests passed!")
78 | except requests.exceptions.ConnectionError:
79 | print("✗ ERROR: Could not connect to server")
80 | print(" Make sure the backend is running:")
81 | print(" cd backend && .\\start.ps1")
82 | except Exception as e:
83 | print(f"✗ ERROR: {e}")
--------------------------------------------------------------------------------
/backend/api/services/search.py:
--------------------------------------------------------------------------------
1 | from api.utils.text import fix_unicode, romanize_japanese
2 | from api.utils.logging import log_info, log_success, log_error
3 | from api.utils.extraction import extract_items
4 | from api.clients import tidal_client
5 |
6 | async def search_track_with_fallback(artist: str, title: str, track_obj) -> bool:
7 | artist_fixed = fix_unicode(artist)
8 | title_fixed = fix_unicode(title)
9 |
10 | log_info(f"Searching: {artist_fixed} - {title_fixed}")
11 |
12 | query = f"{artist_fixed} {title_fixed}"
13 | result = tidal_client.search_tracks(query)
14 |
15 | if result:
16 | tidal_tracks = extract_items(result, 'tracks')
17 | if tidal_tracks and len(tidal_tracks) > 0:
18 | first_track = tidal_tracks[0]
19 | track_obj.tidal_id = first_track.get('id')
20 | track_obj.tidal_artist_id = first_track.get('artist', {}).get('id')
21 | album_data = first_track.get('album', {})
22 | track_obj.tidal_album_id = album_data.get('id') if isinstance(album_data, dict) else None
23 | track_obj.tidal_exists = True
24 |
25 | track_obj.album = album_data.get('title') if isinstance(album_data, dict) else None
26 | track_obj.cover = album_data.get('cover') if isinstance(album_data, dict) else None
27 |
28 | log_success(f"Found on Tidal - ID: {track_obj.tidal_id}")
29 | return True
30 |
31 | romanized_title = romanize_japanese(title_fixed)
32 | romanized_artist = romanize_japanese(artist_fixed)
33 |
34 | if romanized_title or romanized_artist:
35 | search_artist = romanized_artist if romanized_artist else artist_fixed
36 | search_title = romanized_title if romanized_title else title_fixed
37 |
38 | log_info(f"Trying romanized: {search_artist} - {search_title}")
39 |
40 | query_romanized = f"{search_artist} {search_title}"
41 | result = tidal_client.search_tracks(query_romanized)
42 |
43 | if result:
44 | tidal_tracks = extract_items(result, 'tracks')
45 | if tidal_tracks and len(tidal_tracks) > 0:
46 | first_track = tidal_tracks[0]
47 | track_obj.tidal_id = first_track.get('id')
48 | track_obj.tidal_artist_id = first_track.get('artist', {}).get('id')
49 | album_data = first_track.get('album', {})
50 | track_obj.tidal_album_id = album_data.get('id') if isinstance(album_data, dict) else None
51 | track_obj.tidal_exists = True
52 |
53 | track_obj.album = album_data.get('title') if isinstance(album_data, dict) else None
54 | track_obj.cover = album_data.get('cover') if isinstance(album_data, dict) else None
55 |
56 | log_success(f"Found via romanization - ID: {track_obj.tidal_id}")
57 | return True
58 |
59 | log_error("Not found on Tidal")
60 | return False
61 |
--------------------------------------------------------------------------------
/backend/api/utils/extraction.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 | from api.utils.logging import log_info, log_warning, log_error
3 |
4 | def extract_items(result, key: str) -> List:
5 | # log_info(f"extract_items called for key: {key}")
6 | # log_info(f"Result type: {type(result)}")
7 |
8 | if not result:
9 | log_warning("extract_items received empty result")
10 | return []
11 |
12 | # Debug extraction for troubleshooting
13 | if isinstance(result, dict):
14 | if key in result:
15 | pass
16 | # log_info(f"Key '{key}' found in result struct")
17 | else:
18 | log_warning(f"Key '{key}' NOT found in result struct. Keys: {list(result.keys())}")
19 |
20 |
21 | if isinstance(result, list):
22 | if len(result) > 0 and isinstance(result[0], dict):
23 | first_elem = result[0]
24 | if key in first_elem:
25 | nested = first_elem[key]
26 | if isinstance(nested, dict) and 'items' in nested:
27 | return nested['items']
28 | elif isinstance(nested, list):
29 | return nested
30 | return result
31 |
32 | if isinstance(result, dict):
33 | if key in result and isinstance(result[key], dict):
34 | return result[key].get('items', [])
35 |
36 | if 'items' in result:
37 | return result['items']
38 |
39 | return []
40 |
41 | def extract_track_data(track_response) -> List:
42 | if not track_response:
43 | return []
44 |
45 | if isinstance(track_response, list):
46 | for item in track_response:
47 | if isinstance(item, dict) and 'items' in item:
48 | return item['items']
49 | return []
50 |
51 | if isinstance(track_response, dict):
52 | return track_response.get('items', [])
53 |
54 | return []
55 |
56 | def extract_stream_url(track_data) -> Optional[str]:
57 | if isinstance(track_data, list):
58 | entries = track_data
59 | else:
60 | entries = [track_data]
61 |
62 | for entry in entries:
63 | if isinstance(entry, dict) and 'OriginalTrackUrl' in entry:
64 | return entry['OriginalTrackUrl']
65 |
66 | for entry in entries:
67 | if isinstance(entry, dict) and 'manifest' in entry:
68 | manifest = entry['manifest']
69 | try:
70 | import base64
71 | decoded = base64.b64decode(manifest).decode('utf-8')
72 |
73 | try:
74 | import json
75 | manifest_json = json.loads(decoded)
76 | if 'urls' in manifest_json and manifest_json['urls']:
77 | return manifest_json['urls'][0]
78 | except json.JSONDecodeError:
79 | pass
80 |
81 | import re
82 | url_match = re.search(r'https?://[^\s"]+', decoded)
83 | if url_match:
84 | return url_match.group(0)
85 | except Exception as e:
86 | log_error(f"Failed to decode manifest: {e}")
87 |
88 | return None
89 |
--------------------------------------------------------------------------------
/backend/api/routers/library.py:
--------------------------------------------------------------------------------
1 |
2 | from typing import Optional
3 | from pathlib import Path
4 | from fastapi import APIRouter, Depends, HTTPException, Response
5 | from fastapi.responses import FileResponse
6 | from api.auth import require_auth
7 | from api.services.library import library_service
8 | from api.utils.logging import log_info, log_error
9 |
10 | router = APIRouter()
11 |
12 | @router.get("/api/library/scan")
13 | async def scan_library(force: bool = False, username: str = Depends(require_auth)):
14 | try:
15 | log_info(f"Library scan requested (force={force})")
16 | data = library_service.scan_library(force=force)
17 | return {"status": "success", "artist_count": len(data)}
18 | except Exception as e:
19 | log_error(f"Error scanning library: {e}")
20 | raise HTTPException(status_code=500, detail=str(e))
21 |
22 | @router.get("/api/library/artists")
23 | async def get_library_artists(username: str = Depends(require_auth)):
24 | try:
25 | return library_service.get_artists()
26 | except Exception as e:
27 | log_error(f"Error getting library artists: {e}")
28 | raise HTTPException(status_code=500, detail=str(e))
29 |
30 | @router.get("/api/library/artist/{artist_name}")
31 | async def get_library_artist(artist_name: str, username: str = Depends(require_auth)):
32 | try:
33 | artist = library_service.get_artist(artist_name)
34 | if not artist:
35 | raise HTTPException(status_code=404, detail="Artist not found")
36 | return artist
37 | except HTTPException:
38 | raise
39 | except Exception as e:
40 | log_error(f"Error getting library artist {artist_name}: {e}")
41 | raise HTTPException(status_code=500, detail=str(e))
42 |
43 | from pydantic import BaseModel
44 |
45 | class ArtistMetadataUpdate(BaseModel):
46 | picture: Optional[str] = None
47 |
48 | @router.patch("/api/library/artist/{artist_name}")
49 | async def update_artist_metadata(
50 | artist_name: str,
51 | metadata: ArtistMetadataUpdate,
52 | username: str = Depends(require_auth)
53 | ):
54 | try:
55 | success = library_service.update_artist_metadata(artist_name, picture=metadata.picture)
56 | if not success:
57 | raise HTTPException(status_code=404, detail="Artist not found")
58 | return {"status": "success"}
59 | except HTTPException:
60 | raise
61 | except Exception as e:
62 | log_error(f"Error updating artist {artist_name}: {e}")
63 | raise HTTPException(status_code=500, detail=str(e))
64 |
65 | @router.get("/api/library/cover")
66 | async def get_local_cover(path: str, username: str = Depends(require_auth)):
67 | """Serve local cover image"""
68 | try:
69 | file_path = Path(path)
70 | if not file_path.exists():
71 | raise HTTPException(status_code=404, detail="Cover not found")
72 |
73 | # Security check: ensure path is within music dir (rudimentary)
74 | # In a real app we'd validate this more strictly against DOWNLOAD_DIR
75 | return FileResponse(file_path)
76 | except HTTPException:
77 | raise
78 | except Exception as e:
79 | raise HTTPException(status_code=404, detail="Cover not found")
80 |
--------------------------------------------------------------------------------
/backend/api/auth.py:
--------------------------------------------------------------------------------
1 | """
2 | Simple but secure authentication using HTTP Basic Auth
3 | """
4 | import os
5 | import secrets
6 | import base64
7 | from typing import Optional
8 | from fastapi import Depends, HTTPException, status, Header, Query
9 | from dotenv import load_dotenv
10 |
11 | load_dotenv()
12 |
13 | # Load credentials from environment
14 | AUTH_USERNAME = os.getenv("AUTH_USERNAME")
15 | AUTH_PASSWORD = os.getenv("AUTH_PASSWORD")
16 |
17 | if not AUTH_USERNAME or not AUTH_PASSWORD:
18 | raise RuntimeError(
19 | "AUTH_USERNAME and AUTH_PASSWORD must be set in .env file!\n"
20 | "Example:\n"
21 | "AUTH_USERNAME=admin\n"
22 | "AUTH_PASSWORD=your-secure-password"
23 | )
24 |
25 | def validate_auth_string(auth_string: str) -> str:
26 | """Helper to validate a raw Basic Auth string"""
27 | if not auth_string:
28 | raise HTTPException(
29 | status_code=status.HTTP_401_UNAUTHORIZED,
30 | detail="Missing authorization",
31 | )
32 |
33 | if not auth_string.startswith("Basic "):
34 | raise HTTPException(
35 | status_code=status.HTTP_401_UNAUTHORIZED,
36 | detail="Invalid authentication scheme",
37 | )
38 |
39 | try:
40 | encoded_credentials = auth_string.replace("Basic ", "")
41 | decoded = base64.b64decode(encoded_credentials).decode("utf-8")
42 | username, password = decoded.split(":", 1)
43 | except Exception:
44 | raise HTTPException(
45 | status_code=status.HTTP_401_UNAUTHORIZED,
46 | detail="Invalid credentials format",
47 | )
48 |
49 | # Constant-time comparison to prevent timing attacks
50 | is_correct_username = secrets.compare_digest(
51 | username.encode("utf8"),
52 | AUTH_USERNAME.encode("utf8")
53 | )
54 | is_correct_password = secrets.compare_digest(
55 | password.encode("utf8"),
56 | AUTH_PASSWORD.encode("utf8")
57 | )
58 |
59 | if not (is_correct_username and is_correct_password):
60 | raise HTTPException(
61 | status_code=status.HTTP_401_UNAUTHORIZED,
62 | detail="Invalid credentials",
63 | )
64 |
65 | return username
66 |
67 | def verify_credentials(authorization: Optional[str] = Header(None)) -> str:
68 | """Verify HTTP Basic Auth credentials from Authorization header."""
69 | if not authorization:
70 | raise HTTPException(
71 | status_code=status.HTTP_401_UNAUTHORIZED,
72 | detail="Missing authorization header",
73 | )
74 | return validate_auth_string(authorization)
75 |
76 | def require_auth_stream(token: Optional[str] = Query(None)) -> str:
77 | """
78 | Dependency for EventSource streams which cannot send headers.
79 | Expects 'token' query param containing the full 'Basic ...' string.
80 | """
81 | if not token:
82 | raise HTTPException(
83 | status_code=status.HTTP_401_UNAUTHORIZED,
84 | detail="Missing authentication token",
85 | )
86 | return validate_auth_string(token)
87 |
88 | # Dependency for protected endpoints
89 | def require_auth(username: str = Depends(verify_credentials)) -> str:
90 | """Dependency to require authentication on endpoints"""
91 | return username
--------------------------------------------------------------------------------
/backend/api/routers/listenbrainz.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | import json
3 | import asyncio
4 | from fastapi import APIRouter, BackgroundTasks, Depends
5 | from fastapi.responses import StreamingResponse
6 |
7 | from api.models import ListenBrainzGenerateRequest, ValidateTrackRequest
8 | from api.auth import require_auth, require_auth_stream
9 | from api.state import lb_progress_queues
10 | from api.services.listenbrainz import listenbrainz_generate_with_progress
11 | from api.services.search import search_track_with_fallback
12 |
13 | router = APIRouter()
14 |
15 | @router.post("/api/listenbrainz/generate")
16 | async def generate_listenbrainz_playlist(
17 | request: ListenBrainzGenerateRequest,
18 | background_tasks: BackgroundTasks,
19 | username: str = Depends(require_auth)
20 | ):
21 | progress_id = str(uuid.uuid4())
22 |
23 | background_tasks.add_task(
24 | listenbrainz_generate_with_progress,
25 | request.username,
26 | request.playlist_type,
27 | progress_id,
28 | request.should_validate
29 | )
30 |
31 | return {"progress_id": progress_id}
32 |
33 | @router.post("/api/listenbrainz/validate-track")
34 | async def validate_listenbrainz_track(
35 | request: ValidateTrackRequest,
36 | username: str = Depends(require_auth)
37 | ):
38 | track = request.track
39 | await search_track_with_fallback(track.artist, track.title, track)
40 |
41 | return {
42 | "title": track.title,
43 | "artist": track.artist,
44 | "mbid": track.mbid,
45 | "tidal_id": track.tidal_id,
46 | "tidal_artist_id": track.tidal_artist_id,
47 | "tidal_album_id": track.tidal_album_id,
48 | "tidal_exists": track.tidal_exists,
49 | "album": track.album,
50 | "cover": getattr(track, 'cover', None)
51 | }
52 |
53 | @router.get("/api/listenbrainz/progress/{progress_id}")
54 | async def listenbrainz_progress_stream(
55 | progress_id: str,
56 | username: str = Depends(require_auth_stream)
57 | ):
58 | async def event_generator():
59 | if progress_id not in lb_progress_queues:
60 | yield f"data: {json.dumps({'type': 'error', 'message': 'Invalid progress ID'})}\n\n"
61 | return
62 |
63 | queue = lb_progress_queues[progress_id]
64 |
65 | try:
66 | while True:
67 | try:
68 | message = await asyncio.wait_for(queue.get(), timeout=30.0)
69 |
70 | if message is None:
71 | break
72 |
73 | yield f"data: {json.dumps(message, ensure_ascii=False)}\n\n"
74 |
75 | except asyncio.TimeoutError:
76 | yield f"data: {json.dumps({'type': 'ping'})}\n\n"
77 |
78 | finally:
79 | if progress_id in lb_progress_queues:
80 | del lb_progress_queues[progress_id]
81 |
82 | return StreamingResponse(
83 | event_generator(),
84 | media_type="text/event-stream",
85 | headers={
86 | "Cache-Control": "no-cache",
87 | "Connection": "keep-alive",
88 | "X-Accel-Buffering": "no",
89 | "Content-Type": "text/event-stream; charset=utf-8"
90 | }
91 | )
92 |
--------------------------------------------------------------------------------
/frontend/src/components/Toast.jsx:
--------------------------------------------------------------------------------
1 | import { h } from "preact";
2 | import { useToastStore } from "../stores/toastStore";
3 |
4 | export function Toast() {
5 | const toasts = useToastStore((state) => state.toasts);
6 | const removeToast = useToastStore((state) => state.removeToast);
7 |
8 | return (
9 |
10 | {toasts.map((toast) => (
11 |
23 |
24 | {toast.type === "success" && (
25 |
32 | )}
33 | {toast.type === "error" && (
34 |
41 | )}
42 | {toast.type === "warning" && (
43 |
50 | )}
51 | {toast.type === "info" && (
52 |
59 | )}
60 |
61 |
62 | {toast.message}
63 |
64 |
76 |
77 | ))}
78 |
79 | );
80 | }
--------------------------------------------------------------------------------
/backend/api/services/lyrics.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from api.utils.logging import log_info, log_success, log_warning, log_step
3 | import asyncio
4 | import shutil
5 | from lyrics_client import lyrics_client
6 |
7 | async def fetch_and_store_lyrics(filepath: Path, metadata: dict, audio_file=None):
8 | if metadata.get('title') and metadata.get('artist'):
9 | try:
10 | log_info("Fetching lyrics...")
11 | lyrics_result = await lyrics_client.get_lyrics(
12 | track_name=metadata['title'],
13 | artist_name=metadata['artist'],
14 | album_name=metadata.get('album'),
15 | duration=metadata.get('duration')
16 | )
17 |
18 | if lyrics_result:
19 | if lyrics_result.synced_lyrics:
20 | metadata['synced_lyrics'] = lyrics_result.synced_lyrics
21 | log_success("Synced lyrics found (will save to .lrc)")
22 | elif lyrics_result.plain_lyrics:
23 | metadata['plain_lyrics'] = lyrics_result.plain_lyrics
24 | log_success("Plain lyrics found (will save to .txt)")
25 |
26 | except Exception as e:
27 | log_warning(f"Failed to fetch lyrics: {e}")
28 |
29 | if audio_file and metadata.get('synced_lyrics'):
30 | try:
31 | lyrics_text = metadata['synced_lyrics']
32 |
33 | for i, line in enumerate(lyrics_text.split('\n')):
34 | if line.strip():
35 | audio_file[f'LYRICS_LINE_{i+1}'] = line.strip()
36 |
37 | log_success(f"Embedded {len(lyrics_text.splitlines())} lines of lyrics")
38 | except Exception as e:
39 | log_warning(f"Failed to embed lyrics: {e}")
40 |
41 | async def embed_lyrics_with_ffmpeg(filepath: Path, metadata: dict):
42 | """Embed lyrics into the audio file using FFmpeg"""
43 | try:
44 | import subprocess
45 |
46 |
47 | try:
48 | subprocess.run(["ffmpeg", "-version"], check=True, capture_output=True)
49 | except (FileNotFoundError, subprocess.CalledProcessError):
50 | log_warning("FFmpeg not found. Skipping lyrics embedding.")
51 | return
52 |
53 | lyrics = metadata.get('synced_lyrics') or metadata.get('plain_lyrics')
54 | if not lyrics:
55 | log_info("No lyrics found to embed.")
56 | return
57 |
58 | log_step("3.8/4", f"Embedding lyrics with FFmpeg...")
59 |
60 |
61 | lyrics_path = filepath.with_suffix('.lyrics.txt')
62 | with open(lyrics_path, 'w', encoding='utf-8') as f:
63 | f.write(lyrics)
64 |
65 | output_path = filepath.with_suffix('.temp' + filepath.suffix)
66 |
67 |
68 | cmd = [
69 | "ffmpeg", "-y", "-i", str(filepath),
70 | "-map", "0", "-c", "copy",
71 | "-metadata", f"LYRICS={lyrics}",
72 | str(output_path)
73 | ]
74 |
75 |
76 |
77 | process = await asyncio.create_subprocess_exec(
78 | *cmd,
79 | stdout=asyncio.subprocess.PIPE,
80 | stderr=asyncio.subprocess.PIPE
81 | )
82 | stdout, stderr = await process.communicate()
83 |
84 | if process.returncode == 0:
85 |
86 | shutil.move(str(output_path), str(filepath))
87 | log_success("Lyrics embedded with FFmpeg")
88 | else:
89 | log_warning(f"FFmpeg lyrics embedding failed: {stderr.decode()}")
90 | if output_path.exists():
91 | output_path.unlink()
92 |
93 |
94 | if lyrics_path.exists():
95 | lyrics_path.unlink()
96 |
97 | except Exception as e:
98 | log_warning(f"Failed to embed lyrics with FFmpeg: {e}")
99 | import traceback
100 | traceback.print_exc()
101 |
--------------------------------------------------------------------------------
/backend/api/services/beets.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import asyncio
3 | import os
4 | import sys
5 | from api.utils.logging import log_info, log_success, log_warning, log_step
6 |
7 | async def run_beets_import(path: Path):
8 | """Run beets import on the downloaded file/directory"""
9 | try:
10 | import subprocess
11 |
12 | # Determine beet executable path
13 | beet_cmd = "beet"
14 |
15 | # Check if beet is in the same directory as python executable (common in venvs)
16 | python_dir = os.path.dirname(sys.executable)
17 | potential_beet = os.path.join(python_dir, "beet")
18 | if os.path.exists(potential_beet):
19 | beet_cmd = potential_beet
20 | elif os.path.exists(potential_beet + ".exe"):
21 | beet_cmd = potential_beet + ".exe"
22 |
23 | # Check if beet is installed/runnable
24 | try:
25 | subprocess.run([beet_cmd, "version"], check=True, capture_output=True)
26 | except (FileNotFoundError, subprocess.CalledProcessError):
27 | log_warning(f"Beets not found (tried '{beet_cmd}'). Skipping import.")
28 | return
29 |
30 | log_step("4/4", f"Running beets import on {path.name}...")
31 |
32 | use_custom_config = False
33 | custom_config_path = Path("tidaloader_beets.yaml").resolve()
34 |
35 | try:
36 | result = subprocess.run([beet_cmd, "config", "-p"], capture_output=True, text=True)
37 | config_path = result.stdout.strip()
38 |
39 | if result.returncode != 0 or not config_path or not os.path.exists(config_path):
40 | log_info("No existing Beets config found. Using auto-generated configuration.")
41 | use_custom_config = True
42 | else:
43 | log_info(f"Using existing Beets config at: {config_path}")
44 |
45 | except Exception as e:
46 | log_warning(f"Error checking Beets config: {e}. Defaulting to auto-generated config.")
47 | use_custom_config = True
48 |
49 | if use_custom_config:
50 | config_content = """
51 | directory: /tmp # Dummy, we don't move files
52 | original_date: no
53 | plugins: chroma fetchart embedart lastgenre
54 | import:
55 | write: yes
56 | copy: no
57 | move: no
58 | autotag: yes
59 | timid: no
60 | resume: ask
61 | incremental: no
62 | quiet_fallback: skip
63 | log: beets_import.log
64 | chroma:
65 | auto: yes
66 | fetchart:
67 | auto: yes
68 | embedart:
69 | auto: yes
70 | lastgenre:
71 | auto: yes
72 | source: artist
73 | musicbrainz:
74 | genres: yes
75 | match:
76 | strong_rec_thresh: 0.10
77 | distance_weights:
78 | missing_tracks: 0.0
79 | unmatched_tracks: 0.0
80 | """
81 | with open(custom_config_path, "w") as f:
82 | f.write(config_content)
83 | log_info("Generated/Updated custom Beets configuration.")
84 |
85 | log_step("4/4", f"Running beets import on {path.name}...")
86 |
87 | cmd = [beet_cmd, "-c", str(custom_config_path), "import", "-q", "-s", str(path)]
88 |
89 | process = await asyncio.create_subprocess_exec(
90 | *cmd,
91 | stdout=asyncio.subprocess.PIPE,
92 | stderr=asyncio.subprocess.PIPE
93 | )
94 | stdout, stderr = await process.communicate()
95 |
96 | stdout_str = stdout.decode()
97 | stderr_str = stderr.decode()
98 |
99 | if process.returncode == 0:
100 | log_success("Beets import completed successfully")
101 | print(f"Beets Output:\n{stdout_str}")
102 | if stderr_str:
103 | print(f"Beets Errors/Warnings:\n{stderr_str}")
104 | else:
105 | log_warning(f"Beets import failed: {stderr_str}")
106 | print(f"Beets Output:\n{stdout_str}")
107 |
108 | except Exception as e:
109 | log_warning(f"Failed to run beets import: {e}")
110 |
--------------------------------------------------------------------------------
/frontend/src/components/ReleaseNotes.jsx:
--------------------------------------------------------------------------------
1 | import { h } from "preact";
2 | import { releaseNotes } from "../data/releaseNotes";
3 |
4 | export function ReleaseNotes({ isOpen, onClose }) {
5 | // If there are no notes, don't render anything
6 | if (releaseNotes.length === 0) return null;
7 |
8 | if (!isOpen) return null;
9 |
10 | return (
11 |
12 |
e.stopPropagation()}
15 | >
16 | {/* Header */}
17 |
18 |
19 |
20 |
27 |
28 |
29 |
What's New
30 |
Version {releaseNotes[0].version}
31 |
32 |
33 |
39 |
40 |
41 | {/* Content */}
42 |
62 |
63 | {/* Footer */}
64 |
65 |
71 |
72 |
73 |
74 | );
75 | }
76 |
--------------------------------------------------------------------------------
/backend/download_state.py:
--------------------------------------------------------------------------------
1 | import json
2 | import time
3 | from pathlib import Path
4 | from typing import Dict, Optional
5 | from datetime import datetime, timedelta
6 |
7 | class DownloadStateManager:
8 |
9 | def __init__(self, state_file: Optional[Path] = None):
10 | if state_file is None:
11 | state_file = Path(__file__).parent / "download_state.json"
12 |
13 | self.state_file = state_file
14 | self.state = self._load_state()
15 | self._cleanup_old_entries()
16 |
17 | def _load_state(self) -> Dict:
18 | if self.state_file.exists():
19 | try:
20 | with open(self.state_file, 'r') as f:
21 | return json.load(f)
22 | except Exception:
23 | pass
24 | return {
25 | "active": {},
26 | "completed": {},
27 | "failed": {}
28 | }
29 |
30 | def _save_state(self):
31 | try:
32 | self.state_file.parent.mkdir(parents=True, exist_ok=True)
33 | with open(self.state_file, 'w') as f:
34 | json.dump(self.state, f, indent=2)
35 | except Exception as e:
36 | print(f"Failed to save download state: {e}")
37 |
38 | def _cleanup_old_entries(self):
39 | current_time = time.time()
40 | max_age = 3600
41 |
42 | for category in ["completed", "failed"]:
43 | expired_keys = [
44 | track_id for track_id, data in self.state[category].items()
45 | if current_time - data.get("timestamp", 0) > max_age
46 | ]
47 | for track_id in expired_keys:
48 | del self.state[category][track_id]
49 |
50 | if expired_keys:
51 | self._save_state()
52 |
53 | def get_download_state(self, track_id: int) -> Optional[Dict]:
54 | track_id_str = str(track_id)
55 |
56 | if track_id_str in self.state["active"]:
57 | return {
58 | "status": "downloading",
59 | **self.state["active"][track_id_str]
60 | }
61 |
62 | if track_id_str in self.state["completed"]:
63 | return {
64 | "status": "completed",
65 | **self.state["completed"][track_id_str]
66 | }
67 |
68 | if track_id_str in self.state["failed"]:
69 | return {
70 | "status": "failed",
71 | **self.state["failed"][track_id_str]
72 | }
73 |
74 | return None
75 |
76 | def set_downloading(self, track_id: int, progress: int = 0, metadata: Optional[Dict] = None):
77 | track_id_str = str(track_id)
78 |
79 | self.state["active"][track_id_str] = {
80 | "progress": progress,
81 | "timestamp": time.time(),
82 | "metadata": metadata or {}
83 | }
84 | self._save_state()
85 |
86 | def update_progress(self, track_id: int, progress: int):
87 | track_id_str = str(track_id)
88 |
89 | if track_id_str in self.state["active"]:
90 | self.state["active"][track_id_str]["progress"] = progress
91 | self.state["active"][track_id_str]["timestamp"] = time.time()
92 | self._save_state()
93 |
94 | def set_completed(self, track_id: int, filename: str, metadata: Optional[Dict] = None):
95 | track_id_str = str(track_id)
96 |
97 | if track_id_str in self.state["active"]:
98 | del self.state["active"][track_id_str]
99 |
100 | self.state["completed"][track_id_str] = {
101 | "filename": filename,
102 | "timestamp": time.time(),
103 | "metadata": metadata or {}
104 | }
105 | self._save_state()
106 |
107 | def set_failed(self, track_id: int, error: str, metadata: Optional[Dict] = None):
108 | track_id_str = str(track_id)
109 |
110 | if track_id_str in self.state["active"]:
111 | del self.state["active"][track_id_str]
112 |
113 | self.state["failed"][track_id_str] = {
114 | "error": error,
115 | "timestamp": time.time(),
116 | "metadata": metadata or {}
117 | }
118 | self._save_state()
119 |
120 | def clear_download(self, track_id: int):
121 | track_id_str = str(track_id)
122 |
123 | for category in ["active", "completed", "failed"]:
124 | if track_id_str in self.state[category]:
125 | del self.state[category][track_id_str]
126 |
127 | self._save_state()
128 |
129 | def get_all_active(self) -> Dict:
130 | return self.state["active"].copy()
131 |
132 | def get_all_completed(self) -> Dict:
133 | return self.state["completed"].copy()
134 |
135 | def get_all_failed(self) -> Dict:
136 | return self.state["failed"].copy()
137 |
138 | download_state_manager = DownloadStateManager()
--------------------------------------------------------------------------------
/backend/lyrics_client.py:
--------------------------------------------------------------------------------
1 | """
2 | LrcLib API Client for fetching song lyrics
3 | """
4 | from typing import Optional, Dict
5 | import aiohttp
6 | import asyncio
7 | from dataclasses import dataclass
8 |
9 | @dataclass
10 | class LyricsResult:
11 | """Container for lyrics data"""
12 | synced_lyrics: Optional[str] = None
13 | plain_lyrics: Optional[str] = None
14 | track_name: Optional[str] = None
15 | artist_name: Optional[str] = None
16 | album_name: Optional[str] = None
17 | duration: Optional[int] = None
18 |
19 | class LyricsClient:
20 | """Async client for LrcLib API"""
21 |
22 | BASE_URL = "https://lrclib.net/api"
23 | USER_AGENT = "tidaloader/1.0.0 (https://github.com/RayZ3R0/tidaloader)"
24 |
25 | def __init__(self):
26 | self.session = None
27 |
28 | async def _get_session(self) -> aiohttp.ClientSession:
29 | """Get or create aiohttp session"""
30 | if self.session is None or self.session.closed:
31 | self.session = aiohttp.ClientSession(
32 | headers={"User-Agent": self.USER_AGENT}
33 | )
34 | return self.session
35 |
36 | async def close(self):
37 | """Close the session"""
38 | if self.session and not self.session.closed:
39 | await self.session.close()
40 |
41 | async def get_lyrics(
42 | self,
43 | track_name: str,
44 | artist_name: str,
45 | album_name: Optional[str] = None,
46 | duration: Optional[int] = None
47 | ) -> Optional[LyricsResult]:
48 | """
49 | Get lyrics for a track
50 |
51 | Args:
52 | track_name: Name of the track
53 | artist_name: Name of the artist
54 | album_name: Name of the album (optional)
55 | duration: Duration in seconds (optional, helps matching)
56 |
57 | Returns:
58 | LyricsResult or None if not found
59 | """
60 | try:
61 | session = await self._get_session()
62 |
63 | params = {
64 | "track_name": track_name,
65 | "artist_name": artist_name,
66 | }
67 |
68 | if album_name:
69 | params["album_name"] = album_name
70 | if duration:
71 | params["duration"] = duration
72 |
73 | async with session.get(
74 | f"{self.BASE_URL}/get",
75 | params=params,
76 | timeout=aiohttp.ClientTimeout(total=10)
77 | ) as response:
78 | if response.status == 404:
79 | print(f" ℹ️ No lyrics found for: {artist_name} - {track_name}")
80 | return None
81 |
82 | if response.status != 200:
83 | print(f" ⚠️ Lyrics API returned {response.status}")
84 | return None
85 |
86 | data = await response.json()
87 |
88 | return LyricsResult(
89 | synced_lyrics=data.get("syncedLyrics"),
90 | plain_lyrics=data.get("plainLyrics"),
91 | track_name=data.get("trackName"),
92 | artist_name=data.get("artistName"),
93 | album_name=data.get("albumName"),
94 | duration=data.get("duration")
95 | )
96 |
97 | except asyncio.TimeoutError:
98 | print(f" ⚠️ Lyrics API timeout for: {track_name}")
99 | return None
100 | except Exception as e:
101 | print(f" ⚠️ Error fetching lyrics: {e}")
102 | return None
103 |
104 | async def search_lyrics(
105 | self,
106 | track_name: Optional[str] = None,
107 | artist_name: Optional[str] = None,
108 | album_name: Optional[str] = None
109 | ) -> list:
110 | """
111 | Search for lyrics
112 |
113 | Returns list of matching tracks
114 | """
115 | try:
116 | session = await self._get_session()
117 |
118 | params = {}
119 | if track_name:
120 | params["track_name"] = track_name
121 | if artist_name:
122 | params["artist_name"] = artist_name
123 | if album_name:
124 | params["album_name"] = album_name
125 |
126 | if not params:
127 | return []
128 |
129 | async with session.get(
130 | f"{self.BASE_URL}/search",
131 | params=params,
132 | timeout=aiohttp.ClientTimeout(total=10)
133 | ) as response:
134 | if response.status != 200:
135 | return []
136 |
137 | return await response.json()
138 |
139 | except Exception as e:
140 | print(f" ⚠️ Error searching lyrics: {e}")
141 | return []
142 |
143 | # Global instance
144 | lyrics_client = LyricsClient()
--------------------------------------------------------------------------------
/backend/api/services/listenbrainz.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | from api.state import lb_progress_queues
4 | from api.utils.logging import log_info, log_error, log_success
5 | from api.utils.text import fix_unicode
6 | from api.services.search import search_track_with_fallback
7 | from api.clients.listenbrainz import ListenBrainzClient
8 |
9 | async def listenbrainz_generate_with_progress(username: str, playlist_type: str, progress_id: str, validate: bool = True):
10 | queue = asyncio.Queue()
11 | lb_progress_queues[progress_id] = queue
12 |
13 | client = ListenBrainzClient()
14 |
15 | try:
16 | display_name = playlist_type.replace("-", " ").title()
17 | await queue.put({
18 | "type": "info",
19 | "message": f"Fetching {display_name} for {username}...",
20 | "progress": 0,
21 | "total": 0
22 | })
23 |
24 | # Use the requested playlist type
25 | tracks = await client.get_playlist_by_type(username, playlist_type)
26 |
27 | if not tracks:
28 | # Fallback for old 'weekly-jams' request potentially checking exploration?
29 | # No, let's be strict now or the UI selector doesn't mean much.
30 | if playlist_type == "weekly-jams":
31 | # If weekly jams specifically requested and not found, check exploration?
32 | # Nah, let user select it.
33 | pass
34 |
35 | raise Exception(f"No playlist found for type '{display_name}' for this user.")
36 |
37 | for track in tracks:
38 | track.title = fix_unicode(track.title)
39 | track.artist = fix_unicode(track.artist)
40 | if track.album:
41 | track.album = fix_unicode(track.album)
42 |
43 | await queue.put({
44 | "type": "info",
45 | "message": f"Found {len(tracks)} tracks from ListenBrainz",
46 | "progress": 0,
47 | "total": len(tracks)
48 | })
49 |
50 | validated_tracks = []
51 | if validate:
52 | for i, track in enumerate(tracks, 1):
53 | display_text = f"{track.artist} - {track.title}"
54 |
55 | await queue.put({
56 | "type": "validating",
57 | "message": f"Validating: {display_text}",
58 | "progress": i,
59 | "total": len(tracks),
60 | "current_track": {
61 | "artist": track.artist,
62 | "title": track.title
63 | }
64 | })
65 |
66 | log_info(f"[{i}/{len(tracks)}] Validating: {display_text}")
67 |
68 | await search_track_with_fallback(track.artist, track.title, track)
69 |
70 | validated_tracks.append({
71 | "title": track.title,
72 | "artist": track.artist,
73 | "mbid": track.mbid,
74 | "tidal_id": track.tidal_id,
75 | "tidal_artist_id": track.tidal_artist_id,
76 | "tidal_album_id": track.tidal_album_id,
77 | "tidal_exists": track.tidal_exists,
78 | "album": track.album,
79 | "cover": getattr(track, 'cover', None)
80 | })
81 |
82 | await asyncio.sleep(0.1)
83 | else:
84 | for track in tracks:
85 | validated_tracks.append({
86 | "title": track.title,
87 | "artist": track.artist,
88 | "mbid": track.mbid,
89 | "tidal_id": None,
90 | "tidal_artist_id": None,
91 | "tidal_album_id": None,
92 | "tidal_exists": False,
93 | "album": track.album,
94 | "cover": None
95 | })
96 |
97 | await queue.put({
98 | "type": "info",
99 | "message": "Skipping validation as requested.",
100 | "progress": len(tracks),
101 | "total": len(tracks)
102 | })
103 |
104 | found_count = sum(1 for t in validated_tracks if t.get("tidal_exists"))
105 |
106 | log_info(f"Validation complete: {found_count}/{len(validated_tracks)} found on Tidal")
107 |
108 | await queue.put({
109 | "type": "complete",
110 | "message": f"Validation complete: {found_count}/{len(validated_tracks)} found on Tidal",
111 | "progress": len(tracks),
112 | "total": len(tracks),
113 | "tracks": validated_tracks,
114 | "found_count": found_count
115 | })
116 |
117 | except Exception as e:
118 | log_error(f"ListenBrainz generation error: {str(e)}")
119 | await queue.put({
120 | "type": "error",
121 | "message": str(e),
122 | "progress": 0,
123 | "total": 0
124 | })
125 | finally:
126 | await client.close()
127 | await queue.put(None)
128 |
--------------------------------------------------------------------------------
/backend/tests/test_search.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import MagicMock
2 | import os
3 |
4 | # Set dummy auth for tests
5 | os.environ["AUTH_USERNAME"] = "test"
6 | os.environ["AUTH_PASSWORD"] = "test"
7 |
8 | # Mock Authorization header
9 | AUTH_HEADER = {"Authorization": "Basic dGVzdDp0ZXN0"} # test:test
10 |
11 | def test_search_tracks_success(client, mock_tidal_client):
12 | # Setup mock response
13 | mock_tidal_client.search_tracks.return_value = {
14 | "tracks": {
15 | "items": [
16 | {
17 | "id": 123,
18 | "title": "Test Track",
19 | "artist": {"name": "Test Artist"},
20 | "album": {"title": "Test Album", "cover": "abc-123"},
21 | "duration": 300,
22 | "audioQuality": "LOSSLESS"
23 | }
24 | ]
25 | }
26 | }
27 |
28 | response = client.get("/api/search/tracks?q=test", headers=AUTH_HEADER)
29 | assert response.status_code == 200
30 | data = response.json()
31 | assert "items" in data
32 | assert len(data["items"]) == 1
33 | assert data["items"][0]["title"] == "Test Track"
34 | assert data["items"][0]["artist"] == "Test Artist"
35 |
36 | def test_search_tracks_empty(client, mock_tidal_client):
37 | mock_tidal_client.search_tracks.return_value = {}
38 |
39 | response = client.get("/api/search/tracks?q=empty", headers=AUTH_HEADER)
40 | assert response.status_code == 200
41 | data = response.json()
42 | assert data["items"] == []
43 |
44 | def test_search_albums_success(client, mock_tidal_client):
45 | mock_tidal_client.search_albums.return_value = {
46 | "albums": {
47 | "items": [
48 | {"id": 456, "title": "Test Album"}
49 | ]
50 | }
51 | }
52 |
53 | response = client.get("/api/search/albums?q=test", headers=AUTH_HEADER)
54 | assert response.status_code == 200
55 | data = response.json()
56 | assert len(data["items"]) == 1
57 | assert data["items"][0]["title"] == "Test Album"
58 |
59 | def test_search_playlists_success(client, mock_tidal_client):
60 | mock_tidal_client.search_playlists.return_value = {
61 | "playlists": {
62 | "items": [
63 | {
64 | "uuid": "playlist-123",
65 | "title": "My Playlist",
66 | "creator": {"name": "Tester"},
67 | "numberOfTracks": 2,
68 | "image": "abc-123"
69 | }
70 | ]
71 | }
72 | }
73 |
74 | response = client.get("/api/search/playlists?q=test", headers=AUTH_HEADER)
75 | assert response.status_code == 200
76 | data = response.json()
77 | assert len(data["items"]) == 1
78 | assert data["items"][0]["id"] == "playlist-123"
79 | assert data["items"][0]["title"] == "My Playlist"
80 | assert data["items"][0]["cover"] == "abc-123"
81 |
82 |
83 | def test_search_playlists_squareimage_priority(client, mock_tidal_client):
84 | # squareImage should be preferred over image/cover
85 | mock_tidal_client.search_playlists.return_value = {
86 | "playlists": {
87 | "items": [
88 | {
89 | "id": "playlist-xyz",
90 | "title": "Square First",
91 | "image": "image-low",
92 | "cover": "cover-low",
93 | "squareImage": "square-hi"
94 | }
95 | ]
96 | }
97 | }
98 |
99 | response = client.get("/api/search/playlists?q=test", headers=AUTH_HEADER)
100 | assert response.status_code == 200
101 | data = response.json()
102 | assert len(data["items"]) == 1
103 | assert data["items"][0]["cover"] == "square-hi"
104 |
105 | def test_search_unauthorized(client):
106 | response = client.get("/api/search/tracks?q=test")
107 | assert response.status_code == 401
108 |
109 | def test_get_playlist_tracks(client, mock_tidal_client):
110 | mock_tidal_client.get_playlist_tracks.return_value = {
111 | "playlist": {
112 | "uuid": "playlist-123",
113 | "title": "My Playlist"
114 | },
115 | "items": [
116 | {
117 | "item": {
118 | "id": 111,
119 | "title": "Song One",
120 | "artist": {"name": "Artist A"},
121 | "album": {"title": "Album A", "cover": "cov1"},
122 | "duration": 200
123 | }
124 | },
125 | {
126 | "item": {
127 | "id": 222,
128 | "title": "Song Two",
129 | "artist": {"name": "Artist B"},
130 | "album": {"title": "Album B", "cover": "cov2"},
131 | "duration": 180
132 | }
133 | }
134 | ]
135 | }
136 |
137 | response = client.get("/api/playlist/playlist-123", headers=AUTH_HEADER)
138 | assert response.status_code == 200
139 | data = response.json()
140 | assert data["playlist"]["title"] == "My Playlist"
141 | assert len(data["items"]) == 2
142 | assert data["items"][0]["title"] == "Song One"
143 |
--------------------------------------------------------------------------------
/frontend/src/components/Login.jsx:
--------------------------------------------------------------------------------
1 | import { useState } from "preact/hooks";
2 | import { useAuthStore } from "../store/authStore";
3 |
4 | export function Login() {
5 | const [username, setUsername] = useState("");
6 | const [password, setPassword] = useState("");
7 | const [error, setError] = useState("");
8 | const [loading, setLoading] = useState(false);
9 |
10 | const setCredentials = useAuthStore((state) => state.setCredentials);
11 |
12 | const handleSubmit = async (e) => {
13 | e.preventDefault();
14 | setError("");
15 | setLoading(true);
16 |
17 | try {
18 | // Test credentials by making a simple API call
19 | const credentials = btoa(`${username}:${password}`);
20 | const response = await fetch("/api/search/tracks?q=test", {
21 | headers: {
22 | Authorization: `Basic ${credentials}`,
23 | },
24 | });
25 |
26 | if (response.status === 401) {
27 | setError("Invalid username or password");
28 | setLoading(false);
29 | return;
30 | }
31 |
32 | if (!response.ok) {
33 | setError("Failed to authenticate");
34 | setLoading(false);
35 | return;
36 | }
37 |
38 | // Credentials are valid
39 | setCredentials(username, password);
40 | } catch (err) {
41 | setError("Failed to connect to server");
42 | setLoading(false);
43 | }
44 | };
45 |
46 | return (
47 |
48 |
49 |
50 |
51 |
52 | 🦑
53 |
54 |
Tidaloader
55 |
Sign in to continue
56 |
57 |
58 |
143 |
144 |
145 |
146 | );
147 | }
148 |
--------------------------------------------------------------------------------
/backend/api/clients/listenbrainz.py:
--------------------------------------------------------------------------------
1 |
2 | import httpx
3 | from typing import List, Optional, Dict, Any
4 | import logging
5 | from api.models import PlaylistTrack
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 | class ListenBrainzClient:
10 | """Client for ListenBrainz API"""
11 |
12 | BASE_URL = "https://api.listenbrainz.org/1"
13 |
14 | def __init__(self):
15 | self.client = httpx.AsyncClient(timeout=30.0)
16 |
17 | async def close(self):
18 | await self.client.aclose()
19 |
20 | async def get_playlist(self, playlist_id: str) -> Dict[str, Any]:
21 | """Fetch a specific playlist by ID"""
22 | url = f"{self.BASE_URL}/playlist/{playlist_id}"
23 | try:
24 | response = await self.client.get(url)
25 | response.raise_for_status()
26 | return response.json()
27 | except Exception as e:
28 | logger.error(f"Error fetching playlist {playlist_id}: {e}")
29 | raise
30 |
31 | async def get_user_playlists(self, username: str) -> List[Dict[str, Any]]:
32 | """Fetch playlists created for a user (Weekly Jams, etc)"""
33 | url = f"{self.BASE_URL}/user/{username}/playlists/createdfor"
34 | try:
35 | response = await self.client.get(url)
36 | if response.status_code == 404:
37 | logger.warning(f"User {username} not found or has no playlists")
38 | return []
39 |
40 | response.raise_for_status()
41 | data = response.json()
42 | return data.get("playlists", [])
43 | except httpx.HTTPStatusError as e:
44 | logger.error(f"Error fetching playlists for {username}: {e}")
45 | raise
46 | except Exception as e:
47 | import traceback
48 | logger.error(f"Error fetching playlists for {username}: {repr(e)}")
49 | logger.error(traceback.format_exc())
50 | raise
51 |
52 | async def get_playlist_by_type(self, username: str, playlist_type: str) -> List[PlaylistTrack]:
53 | """
54 | Fetch a specific type of playlist for a user.
55 | Supported types: 'weekly-jams', 'weekly-exploration', 'year-in-review-discoveries', 'year-in-review-missed'
56 | """
57 | logger.info(f"Fetching {playlist_type} for {username}")
58 |
59 | playlists = await self.get_user_playlists(username)
60 |
61 | target_playlist = None
62 |
63 | # Define keywords for each type
64 | keywords = {
65 | "weekly-jams": "weekly jams",
66 | "weekly-exploration": "weekly exploration",
67 | "year-in-review-discoveries": "top discoveries",
68 | "year-in-review-missed": "top missed recordings"
69 | }
70 |
71 | search_term = keywords.get(playlist_type)
72 | if not search_term:
73 | logger.error(f"Unknown playlist type: {playlist_type}")
74 | return []
75 |
76 | # Find the latest playlist matching the keyword
77 | # Playlists are usually ordered by date descending from the API, but we'll checking carefully
78 | candidate_playlists = []
79 | for pl_wrapper in playlists:
80 | pl = pl_wrapper.get("playlist", {})
81 | title = pl.get("title", "").lower()
82 | if search_term in title:
83 | candidate_playlists.append(pl)
84 |
85 | # Sort by title (usually contains date/year) to get the latest?
86 | # Actually the API returns them usually sorted, but let's just take the first one found
87 | # which is typically the latest for Weeklys. For yearly, we might want the latest year.
88 | if candidate_playlists:
89 | # Simple heuristic: first one is usually latest
90 | target_playlist = candidate_playlists[0]
91 |
92 | if not target_playlist:
93 | logger.warning(f"No playlist found for type '{playlist_type}' for {username}")
94 | return []
95 |
96 | playlist_id_url = target_playlist.get("identifier")
97 | if not playlist_id_url:
98 | logger.error("Playlist found but has no identifier")
99 | return []
100 |
101 | uuid = playlist_id_url.split('/')[-1]
102 | logger.info(f"Fetching full details for playlist {uuid} ({target_playlist.get('title')})")
103 |
104 | try:
105 | full_playlist_data = await self.get_playlist(uuid)
106 | target_playlist = full_playlist_data.get("playlist", {})
107 | except Exception as e:
108 | logger.error(f"Failed to fetch full playlist {uuid}: {e}")
109 | return []
110 |
111 | tracks_data = target_playlist.get("track", [])
112 |
113 | playlist_tracks = []
114 | for t in tracks_data:
115 | title = t.get("title", "Unknown Title")
116 | artist = t.get("creator", "Unknown Artist")
117 | album = t.get("album")
118 |
119 | mbid = None
120 | identifiers = t.get("identifier", [])
121 | extension = t.get("extension", {})
122 |
123 | if "https://musicbrainz.org/doc/jspf#track" in extension:
124 | meta = extension["https://musicbrainz.org/doc/jspf#track"]
125 | pass
126 |
127 | if isinstance(identifiers, list):
128 | for ident in identifiers:
129 | if "musicbrainz.org/recording/" in ident:
130 | mbid = ident.split("recording/")[-1]
131 | break
132 |
133 | if not mbid and "musicbrainz_track_id" in extension:
134 | mbid = extension["musicbrainz_track_id"]
135 |
136 | playlist_tracks.append(PlaylistTrack(
137 | title=title,
138 | artist=artist,
139 | mbid=mbid,
140 | album=album
141 | ))
142 |
143 | logger.info(f"Found {len(playlist_tracks)} tracks in {playlist_type} for {username}")
144 | return playlist_tracks
145 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Tidaloader
2 |
3 | A full-stack web application for downloading high-quality music from Tidal with intelligent playlist generation via ListenBrainz integration. Features automatic metadata tagging, lyrics fetching, and organized file management.
4 |
5 | Heavily inspired from https://github.com/uimaxbai/tidal-ui and using https://github.com/sachinsenal0x64/hifi
6 |
7 | > [!IMPORTANT]
8 | >
9 | > # Project Terms
10 |
11 | - We do not encourage piracy. This project is made purely for educational and personal use.
12 | If you somehow download copyrighted content, you are solely responsible for complying with the relevant laws in your country.
13 |
14 | - The Tidaloader Project assumes no responsibility for any misuse or legal violations arising from the use of this project.
15 |
16 | - This project does not claim ownership of any music or audio content. All rights remain with their respective copyright holders. Users are **encouraged** to support artists and rights owners by maintaining a valid Tidal subscription. Tidaloader serves solely as a downloading interface for personal, non-commercial use.
17 |
18 | ## Screenshots
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | ## Key Features
29 |
30 | - **Multi-Format Support**:
31 | - **FLAC**: Hi-Res (24-bit/192kHz) & Lossless (16-bit/44.1kHz)
32 | - **Opus**: High-efficiency 192kbps VBR
33 | - **MP3**: 320kbps / 128kbps (Transcoded)
34 | - **AAC**: 320kbps / 96kbps
35 | - **Smart Playlists**: Generate "Daily Jams" using ListenBrainz history.
36 | - **Rich Metadata**: Auto-tagging with MusicBrainz IDs, Artist/Album organization, and embedded covers.
37 | - **Intelligent Library**: Strict ID-based album matching prevents duplicates. Artist covers are automatically fetched and cached for a beautiful, persistent browsing experience.
38 | - **Lyrics**: Synced (`.lrc`) and plain text lyrics via LrcLib.
39 | - **Queue Management**: Concurrent downloads, auto-retry, and persistence.
40 | - **Resilience**: Automatic rotation of Tidal API tokens and endpoints.
41 |
42 | ## Quick Start (Docker)
43 |
44 | The recommended way to run Tidaloader.
45 |
46 | 1. Create a `docker-compose.yml`:
47 |
48 | ```yaml
49 | version: '3.8'
50 | services:
51 | tidaloader:
52 | image: ghcr.io/rayz3r0/tidaloader:latest
53 | container_name: tidaloader
54 | ports:
55 | - "8001:8001"
56 | environment:
57 | - MUSIC_DIR=/music
58 | - AUTH_USERNAME=admin
59 | - AUTH_PASSWORD=changeme
60 | - MAX_CONCURRENT_DOWNLOADS=3
61 | - QUEUE_AUTO_PROCESS=true
62 | volumes:
63 | - ./music:/music
64 | restart: unless-stopped
65 | ```
66 |
67 | 2. Run the container:
68 | ```bash
69 | docker-compose up -d
70 | ```
71 |
72 | 3. Open `http://localhost:8001`.
73 |
74 | ## Configuration Options
75 |
76 | Configure these in your `docker-compose.yml` or `.env` file.
77 |
78 | | Variable | Description | Default |
79 | |----------|-------------|---------|
80 | | `MUSIC_DIR` | Internal container path for downloads | `/music` |
81 | | `AUTH_USERNAME` | Web UI Username | `admin` |
82 | | `AUTH_PASSWORD` | Web UI Password | `changeme` |
83 | | `MAX_CONCURRENT_DOWNLOADS` | Max parallel downloads | `3` |
84 | | `QUEUE_AUTO_PROCESS` | Start queue automatically on boot | `true` |
85 | | `MUSIC_DIR_HOST` | (Docker) Host directory to map | `./music` |
86 |
87 | ## Audio Quality Guide
88 |
89 | | Quality Setting | Details | Format |
90 | |-----------------|---------|--------|
91 | | `HI_RES` | Source quality (up to 24-bit/192kHz) | FLAC |
92 | | `LOSSLESS` | CD quality (16-bit/44.1kHz) | FLAC |
93 | | `HIGH` | Standard High (320kbps) | AAC |
94 | | `LOW` | Data Saver (96kbps) | AAC |
95 | | `MP3_320`/`256` | Transcoded High Quality | MP3 |
96 | | `OPUS_192` | Transcoded High Efficiency | Opus |
97 |
98 | ## Manual Installation
99 |
100 |
101 | Windows
102 |
103 | 1. **Clone**: `git clone https://github.com/RayZ3R0/tidaloader.git`
104 | 2. **Backend**:
105 | ```powershell
106 | cd backend
107 | python -m venv venv; .\venv\Scripts\Activate.ps1
108 | pip install -r requirements.txt
109 | cp .env.example .env # Edit .env with your settings
110 | ```
111 | 3. **Frontend**:
112 | ```powershell
113 | cd ..\frontend
114 | npm install; npm run build
115 | ```
116 | 4. **Run**: `cd ..\backend; .\start.ps1`
117 |
118 |
119 |
120 | Linux
121 |
122 | 1. **Clone**: `git clone https://github.com/RayZ3R0/tidaloader.git`
123 | 2. **Backend**:
124 | ```bash
125 | cd backend
126 | python3 -m venv venv; source venv/bin/activate
127 | pip install -r requirements.txt
128 | cp .env.example .env # Edit .env
129 | ```
130 | 3. **Frontend**:
131 | ```bash
132 | cd ../frontend
133 | npm install; npm run build
134 | ```
135 | 4. **Run**: `python -m uvicorn api.main:app --host 0.0.0.0 --port 8001`
136 |
137 |
138 |
139 | Android (Termux)
140 |
141 | 1. Install Termux (F-Droid).
142 | 2. Run Setup:
143 | ```bash
144 | curl -O https://raw.githubusercontent.com/RayZ3R0/tidaloader/main/backend/termux-setup.sh
145 | bash termux-setup.sh
146 | ```
147 | 3. Start: `./start-service.sh`
148 |
149 |
150 | ## Development
151 |
152 | * **Backend**: `uvicorn api.main:app --reload` (Port 8001)
153 | * **Frontend**: `npm run dev` (Port 5173)
154 |
155 | ## Credits
156 |
157 | Inspired by [tidal-ui](https://github.com/uimaxbai/tidal-ui). Playlist generation by ListenBrainz.
158 |
159 | ## License
160 |
161 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
162 |
163 | > **Disclaimer**: This project is for educational purposes only. The developers do not endorse piracy and are not responsible for how this software is used. Please support artists by purchasing their music.
164 |
--------------------------------------------------------------------------------
/backend/api/services/files.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import shutil
3 | import aiohttp
4 | from api.utils.logging import log_info, log_success, log_warning
5 | from api.settings import DOWNLOAD_DIR
6 |
7 | def sanitize_path_component(name: str) -> str:
8 | if not name:
9 | return "Unknown"
10 |
11 | invalid_chars = r'<>:"/\\|?*'
12 | for char in invalid_chars:
13 | name = name.replace(char, '_')
14 |
15 | name = name.strip('. ')
16 |
17 | if len(name) > 200:
18 | name = name[:200].strip()
19 |
20 | return name or "Unknown"
21 |
22 | async def organize_file_by_metadata(temp_filepath: Path, metadata: dict, template: str = "{Artist}/{Album}/{TrackNumber} - {Title}", group_compilations: bool = True) -> Path:
23 | try:
24 | artist = metadata.get('album_artist') or metadata.get('artist', 'Unknown Artist')
25 | album = metadata.get('album', 'Unknown Album')
26 | title = metadata.get('title', temp_filepath.stem)
27 | track_number = metadata.get('track_number')
28 | file_ext = metadata.get('file_ext')
29 | if not file_ext:
30 | file_ext = temp_filepath.suffix or '.flac'
31 | file_ext = file_ext if file_ext.startswith('.') else f".{file_ext}"
32 |
33 | s_artist = sanitize_path_component(artist)
34 | s_album = sanitize_path_component(album)
35 | s_title = sanitize_path_component(title)
36 |
37 | is_compilation = artist.lower() in ['various artists', 'various'] or metadata.get('compilation')
38 |
39 | track_str = str(track_number).zfill(2) if track_number else "00"
40 |
41 | template_artist = s_artist
42 | template_album = s_album
43 |
44 | if group_compilations and is_compilation:
45 | template_artist = "Compilations"
46 | if not template_album.startswith("VA - "):
47 | template_album = f"VA - {template_album}"
48 |
49 | template_vars = {
50 | "Artist": template_artist,
51 | "AlbumArtist": s_artist,
52 | "TrackArtist": sanitize_path_component(metadata.get('artist', artist)),
53 | "Album": template_album,
54 | "Title": s_title,
55 | "TrackNumber": track_str,
56 | "Year": str(metadata.get('date', '')).split('-')[0] if metadata.get('date') else "Unknown Year"
57 | }
58 |
59 | try:
60 | clean_template = template.lstrip('/')
61 | relative_path_str = clean_template.format(**template_vars)
62 | except KeyError as e:
63 | log_warning(f"Invalid template key: {e}. Falling back to default.")
64 | relative_path_str = f"{s_artist}/{s_album}/{track_str} - {s_title}"
65 |
66 | if not relative_path_str.endswith(file_ext):
67 | relative_path_str += file_ext
68 |
69 | final_path = DOWNLOAD_DIR / relative_path_str
70 | final_dir = final_path.parent
71 |
72 | final_dir.mkdir(parents=True, exist_ok=True)
73 |
74 | if final_path.exists():
75 | log_warning(f"File already exists at: {final_path}")
76 | if temp_filepath.exists() and temp_filepath != final_path:
77 | try:
78 | temp_filepath.unlink()
79 | temp_lrc = temp_filepath.with_suffix('.lrc')
80 | if temp_lrc.exists():
81 | temp_lrc.unlink()
82 | temp_txt = temp_filepath.with_suffix('.txt')
83 | if temp_txt.exists():
84 | temp_txt.unlink()
85 | except Exception:
86 | pass
87 | return final_path
88 |
89 | if temp_filepath != final_path:
90 | shutil.move(str(temp_filepath), str(final_path))
91 | log_success(f"Organized to: {relative_path_str}")
92 |
93 | temp_lrc_path = temp_filepath.with_suffix('.lrc')
94 | if temp_lrc_path.exists():
95 | final_lrc_path = final_path.with_suffix('.lrc')
96 | shutil.move(str(temp_lrc_path), str(final_lrc_path))
97 | log_success("Moved .lrc file to organized location")
98 |
99 | temp_txt_path = temp_filepath.with_suffix('.txt')
100 | if temp_txt_path.exists():
101 | final_txt_path = final_path.with_suffix('.txt')
102 | shutil.move(str(temp_txt_path), str(final_txt_path))
103 | log_success("Moved .txt file to organized location")
104 |
105 | if metadata.get('synced_lyrics') and metadata.get('target_format') != 'opus':
106 | lrc_path = final_path.with_suffix('.lrc')
107 | try:
108 | with open(lrc_path, 'w', encoding='utf-8') as f:
109 | f.write(metadata['synced_lyrics'])
110 | log_success("Saved synced lyrics to .lrc file")
111 | except Exception as e:
112 | log_warning(f"Failed to save .lrc file: {e}")
113 |
114 | elif metadata.get('plain_lyrics') and metadata.get('target_format') != 'opus':
115 | txt_path = final_path.with_suffix('.txt')
116 | try:
117 | with open(txt_path, 'w', encoding='utf-8') as f:
118 | f.write(metadata['plain_lyrics'])
119 | log_success("Saved plain lyrics to .txt file")
120 | except Exception as e:
121 | log_warning(f"Failed to save .txt file: {e}")
122 |
123 | if metadata.get('target_format') == 'opus' and metadata.get('cover_url'):
124 | cover_path = final_dir / 'cover.jpg'
125 | try:
126 | async with aiohttp.ClientSession() as session:
127 | async with session.get(metadata['cover_url']) as response:
128 | if response.status == 200:
129 | image_data = await response.read()
130 | with open(cover_path, 'wb') as f:
131 | f.write(image_data)
132 | log_success("Saved cover art to cover.jpg")
133 | except Exception as e:
134 | log_warning(f"Failed to save cover art: {e}")
135 |
136 | return final_path
137 |
138 | except Exception as e:
139 | log_warning(f"Failed to organize file: {e}")
140 | import traceback
141 | traceback.print_exc()
142 | return temp_filepath
143 |
--------------------------------------------------------------------------------
/frontend/src/components/LibraryPage.jsx:
--------------------------------------------------------------------------------
1 |
2 | import { h } from "preact";
3 | import { useState, useEffect } from "preact/hooks";
4 | import { api } from "../api/client";
5 | import { useToastStore } from "../stores/toastStore";
6 | import { LibraryArtistPage } from "./LibraryArtistPage";
7 |
8 | export function LibraryPage() {
9 | const [artists, setArtists] = useState([]);
10 | const [loading, setLoading] = useState(true);
11 | const [selectedArtist, setSelectedArtist] = useState(null);
12 | const addToast = useToastStore((state) => state.addToast);
13 |
14 | useEffect(() => {
15 | loadLibrary();
16 | }, []);
17 |
18 | const loadLibrary = async (force = false) => {
19 | setLoading(true);
20 | try {
21 | const result = await api.scanLibrary(force);
22 | // If result is empty or we scanned, fetch the list
23 | const artistsList = await api.getLibraryArtists();
24 | setArtists(artistsList);
25 | if (force) {
26 | addToast("Library scan complete", "success");
27 | }
28 | } catch (err) {
29 | console.error("Failed to load library:", err);
30 | addToast("Failed to load library: " + err.message, "error");
31 | } finally {
32 | setLoading(false);
33 | }
34 | };
35 |
36 | if (selectedArtist) {
37 | return (
38 | setSelectedArtist(null)}
42 | />
43 | );
44 | }
45 |
46 | return (
47 |
48 |
49 |
Your Library
50 |
70 |
71 |
72 | {loading && artists.length === 0 ? (
73 |
74 |
75 |
Scanning library files...
76 |
77 | ) : artists.length === 0 ? (
78 |
79 |
84 |
Library is Empty
85 |
86 | No music files found in your local directory. Download some music from the Search tab to populate your library.
87 |
88 |
89 | ) : (
90 |
91 | {artists.map((artist) => (
92 |
setSelectedArtist(artist)}
96 | />
97 | ))}
98 |
99 | )}
100 |
101 | );
102 | }
103 |
104 | function ArtistCard({ artist, onClick }) {
105 | const [picture, setPicture] = useState(artist.picture);
106 | const [loadingImage, setLoadingImage] = useState(false);
107 |
108 | useEffect(() => {
109 | // Lazy load Tidal picture if missing
110 | if (!picture && !artist.image && artist.tidal_id) {
111 | setLoadingImage(true);
112 | api.getArtist(artist.tidal_id)
113 | .then(details => {
114 | if (details.artist?.picture) {
115 | setPicture(details.artist.picture);
116 | // Persist to backend cache
117 | api.updateLibraryArtist(artist.name, { picture: details.artist.picture })
118 | .catch(e => console.warn("Failed to cache artist picture", e));
119 | }
120 | })
121 | .catch(err => console.debug(`Failed to fetch picture for ${artist.name}`, err))
122 | .finally(() => setLoadingImage(false));
123 | }
124 | }, [artist]);
125 |
126 | // Determine which image to show
127 | // Priority: Tidal Picture (URL) > Local Image (Path)
128 | const imageUrl = picture
129 | ? api.getCoverUrl(picture, 320)
130 | : (artist.image ? api.getLocalCoverUrl(artist.image) : null);
131 |
132 | return (
133 |
137 |
138 | {imageUrl ? (
139 |

{
145 | e.target.style.display = 'none';
146 | e.target.nextSibling.style.display = 'flex';
147 | }}
148 | />
149 | ) : null}
150 |
151 | {/* Fallback / Loading State */}
152 |
156 | {loadingImage ? (
157 |
⟳
158 | ) : (
159 | artist.name.charAt(0)
160 | )}
161 |
162 |
163 |
164 |
165 | {artist.name}
166 |
167 |
168 | {artist.album_count} albums • {artist.track_count} tracks
169 |
170 |
171 |
172 | );
173 | }
174 |
--------------------------------------------------------------------------------
/backend/api/services/download.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import asyncio
3 | import aiohttp
4 | import traceback
5 |
6 | from api.state import active_downloads
7 | from download_state import download_state_manager
8 | from api.utils.logging import log_error, log_info, log_step, log_success, log_warning
9 | from api.services.audio import transcode_to_mp3, transcode_to_opus, write_metadata_tags
10 | from api.services.files import organize_file_by_metadata
11 | from api.services.beets import run_beets_import
12 | from api.services.lyrics import embed_lyrics_with_ffmpeg
13 | from queue_manager import queue_manager
14 |
15 | async def download_file_async(
16 | track_id: int,
17 | stream_url: str,
18 | filepath: Path,
19 | filename: str,
20 | metadata: dict = None,
21 | organization_template: str = "{Artist}/{Album}/{TrackNumber} - {Title}",
22 | group_compilations: bool = True,
23 | run_beets: bool = False,
24 | embed_lyrics: bool = False
25 | ):
26 | processed_path = filepath
27 | try:
28 | log_step("3/4", f"Downloading {filename}...")
29 |
30 | if track_id not in active_downloads:
31 | active_downloads[track_id] = {'progress': 0, 'status': 'downloading'}
32 |
33 | download_state_manager.set_downloading(track_id, 0, metadata)
34 |
35 | async with aiohttp.ClientSession() as session:
36 | async with session.get(stream_url, timeout=aiohttp.ClientTimeout(total=300)) as response:
37 | if response.status != 200:
38 | error_msg = f"HTTP {response.status}"
39 | log_error(f"Download failed: {error_msg}")
40 | if track_id in active_downloads:
41 | active_downloads[track_id] = {'progress': 0, 'status': 'failed'}
42 | download_state_manager.set_failed(track_id, error_msg, metadata)
43 | await asyncio.sleep(5)
44 | del active_downloads[track_id]
45 | return
46 |
47 | total_size = int(response.headers.get('content-length', 0))
48 | downloaded = 0
49 |
50 | # Ensure the directory exists
51 | filepath.parent.mkdir(parents=True, exist_ok=True)
52 |
53 | with open(filepath, 'wb') as f:
54 | async for chunk in response.content.iter_chunked(8192):
55 | if chunk:
56 | f.write(chunk)
57 | downloaded += len(chunk)
58 |
59 | if total_size > 0:
60 | progress = int((downloaded / total_size) * 100)
61 | active_downloads[track_id] = {
62 | 'progress': progress,
63 | 'status': 'downloading'
64 | }
65 | download_state_manager.update_progress(track_id, progress)
66 | # Update queue manager for frontend sync
67 | queue_manager.update_active_progress(track_id, progress, 'downloading')
68 |
69 | await asyncio.sleep(0.01)
70 |
71 | if metadata:
72 | if metadata.get('target_format') == 'mp3':
73 | bitrate = metadata.get('bitrate_kbps', 256)
74 | mp3_path = filepath.with_suffix('.mp3')
75 | log_step("3.5/4", f"Transcoding to MP3 ({bitrate} kbps)...")
76 | active_downloads[track_id] = {
77 | 'progress': 95,
78 | 'status': 'transcoding'
79 | }
80 | download_state_manager.update_progress(track_id, 95)
81 | await transcode_to_mp3(filepath, mp3_path, bitrate)
82 | processed_path = mp3_path
83 | metadata['file_ext'] = '.mp3'
84 | try:
85 | filepath.unlink()
86 | except FileNotFoundError:
87 | pass
88 | except Exception as exc:
89 | log_warning(f"Failed to remove intermediate file: {exc}")
90 | elif metadata.get('target_format') == 'opus':
91 | bitrate = metadata.get('bitrate_kbps', 192)
92 | opus_path = filepath.with_suffix('.opus')
93 | log_step("3.5/4", f"Transcoding to Opus ({bitrate} kbps)...")
94 | active_downloads[track_id] = {
95 | 'progress': 95,
96 | 'status': 'transcoding'
97 | }
98 | download_state_manager.update_progress(track_id, 95)
99 | await transcode_to_opus(filepath, opus_path, bitrate)
100 | processed_path = opus_path
101 | metadata['file_ext'] = '.opus'
102 | try:
103 | filepath.unlink()
104 | except FileNotFoundError:
105 | pass
106 | except Exception as exc:
107 | log_warning(f"Failed to remove intermediate file: {exc}")
108 | else:
109 | processed_path = filepath
110 | metadata.setdefault('file_ext', filepath.suffix)
111 |
112 | if metadata:
113 | log_step("4/4", "Writing metadata tags...")
114 | await write_metadata_tags(processed_path, metadata)
115 |
116 | if embed_lyrics:
117 | await embed_lyrics_with_ffmpeg(processed_path, metadata)
118 |
119 | # Organize file
120 | log_step("4/4", "Organizing file...")
121 | final_path = await organize_file_by_metadata(
122 | processed_path,
123 | metadata,
124 | template=organization_template,
125 | group_compilations=group_compilations
126 | )
127 |
128 | # Run beets import if requested
129 | if run_beets:
130 | await run_beets_import(final_path)
131 |
132 | # Update state to completed - store the final path for fast file existence checks
133 | if metadata is None:
134 | metadata = {}
135 | metadata['final_path'] = str(final_path)
136 | download_state_manager.set_completed(track_id, final_path.name, metadata)
137 |
138 | file_size_mb = final_path.stat().st_size / 1024 / 1024
139 | display_name = final_path.name if final_path else filename
140 | log_success(f"Downloaded: {display_name} ({file_size_mb:.2f} MB)")
141 | log_info(f"Location: {final_path}")
142 |
143 | # Invalidate library cache so the new file/tags appear immediately
144 | try:
145 | from api.services.library import library_service
146 | library_service.invalidate_cache()
147 | except Exception as e:
148 | log_warning(f"Failed to invalidate library cache: {e}")
149 |
150 | print(f"{'='*60}\n")
151 |
152 | await asyncio.sleep(5)
153 |
154 | if track_id in active_downloads:
155 | del active_downloads[track_id]
156 |
157 | except Exception as e:
158 | log_error(f"Download error: {e}")
159 | traceback.print_exc()
160 |
161 | if track_id in active_downloads:
162 | active_downloads[track_id] = {'progress': 0, 'status': 'failed'}
163 | download_state_manager.set_failed(track_id, str(e), metadata)
164 | await asyncio.sleep(5)
165 | del active_downloads[track_id]
166 |
167 | if filepath.exists():
168 | try:
169 | filepath.unlink()
170 | log_info(f"Cleaned up partial file: {filename}")
171 | except Exception:
172 | pass
173 |
174 | if processed_path and processed_path != filepath and processed_path.exists():
175 | try:
176 | processed_path.unlink()
177 | log_info(f"Cleaned up partial file: {processed_path.name}")
178 | except Exception:
179 | pass
180 |
--------------------------------------------------------------------------------
/frontend/src/app.jsx:
--------------------------------------------------------------------------------
1 | import { h } from "preact";
2 | import { useState } from "preact/hooks";
3 | import { Router } from "preact-router";
4 | import logo from "./assets/tsunami.svg";
5 | import { useAuthStore } from "./store/authStore";
6 | import { Login } from "./components/Login";
7 | import { SearchBar } from "./components/SearchBar";
8 | import { WeeklyJamsGenerator } from "./components/WeeklyJamsGenerator";
9 | import { DownloadQueuePopout } from "./components/DownloadQueuePopout";
10 | import { SettingsPanel } from "./components/SettingsPanel";
11 | import { LibraryPage } from "./components/LibraryPage";
12 | import { Toast } from "./components/Toast";
13 |
14 | import { ThemePicker } from "./components/ThemePicker";
15 |
16 | import { ReleaseNotes } from "./components/ReleaseNotes";
17 | import { NyanCatEasterEgg } from "./components/NyanCatEasterEgg";
18 | import { releaseNotes } from "./data/releaseNotes";
19 | import { useEffect } from "preact/hooks";
20 |
21 | export function App() {
22 | const isAuthenticated = useAuthStore((state) => state.isAuthenticated);
23 | const clearCredentials = useAuthStore((state) => state.clearCredentials);
24 | const [activeTab, setActiveTab] = useState("search");
25 | const [showSettings, setShowSettings] = useState(false);
26 | const [showReleaseNotes, setShowReleaseNotes] = useState(false);
27 |
28 | useEffect(() => {
29 | try {
30 | if (releaseNotes.length > 0) {
31 | const latestVersion = releaseNotes[0].version;
32 | const lastSeen = localStorage.getItem("last-seen-version");
33 | if (lastSeen !== latestVersion) {
34 | setShowReleaseNotes(true);
35 | }
36 | }
37 | } catch (e) {
38 | console.warn("Failed to check release notes version", e);
39 | }
40 | }, []);
41 |
42 | const handleCloseReleaseNotes = () => {
43 | setShowReleaseNotes(false);
44 | if (releaseNotes.length > 0) {
45 | localStorage.setItem("last-seen-version", releaseNotes[0].version);
46 | }
47 | };
48 |
49 | if (!isAuthenticated) {
50 | return ;
51 | }
52 |
53 | return (
54 |
55 |
56 |
57 |
58 |
59 |
60 |

61 |
62 |
Tidaloader
63 |
64 |
65 |
79 |
80 |
99 |
100 |
101 |
102 |
103 |
107 |
108 |
109 |
110 |
111 |
112 | Tidaloader
113 |
114 |
139 |
140 |
141 | {showSettings && (
142 |
143 |
144 |
145 | )}
146 |
147 |
176 |
177 |
178 | {activeTab === "search" && }
179 | {activeTab === "weekly-jams" && }
180 | {activeTab === "library" && }
181 |
182 |
183 |
184 |
185 | );
186 | }
187 |
--------------------------------------------------------------------------------
/frontend/src/components/SettingsPanel.jsx:
--------------------------------------------------------------------------------
1 | import { h } from "preact";
2 | import { useDownloadStore } from "../stores/downloadStore";
3 |
4 | const QUALITY_OPTIONS = [
5 | {
6 | value: "HI_RES_LOSSLESS",
7 | label: "Hi-Res FLAC",
8 | description: "Up to 24-bit/192kHz",
9 | },
10 | { value: "LOSSLESS", label: "FLAC", description: "16-bit/44.1kHz" },
11 | { value: "MP3_256", label: "MP3 256kbps", description: "Transcoded MP3 (libmp3lame)" },
12 | { value: "MP3_128", label: "MP3 128kbps", description: "Transcoded MP3 (smaller size)" },
13 | { value: "OPUS_192VBR", label: "Opus 192kbps VBR", description: "Variable bitrate Opus (192kbps target)" },
14 | { value: "HIGH", label: "320kbps AAC", description: "High quality AAC" },
15 | { value: "LOW", label: "96kbps AAC", description: "Low quality AAC" },
16 | ];
17 |
18 | const TEMPLATE_OPTIONS = [
19 | { value: "{Artist}/{Album}/{TrackNumber} - {Title}", label: "Artist/Album/Track - Title (Default)" },
20 | { value: "{Album}/{TrackNumber} - {Title}", label: "Album/Track - Title" },
21 | { value: "{Artist} - {Title}", label: "Artist - Title" },
22 | { value: "{Artist}/{Album}/{Title}", label: "Artist/Album/Title" },
23 | ];
24 |
25 | export function SettingsPanel() {
26 | const quality = useDownloadStore((state) => state.quality);
27 | const setQuality = useDownloadStore((state) => state.setQuality);
28 |
29 | const organizationTemplate = useDownloadStore((state) => state.organizationTemplate);
30 | const setOrganizationTemplate = useDownloadStore((state) => state.setOrganizationTemplate);
31 |
32 | const groupCompilations = useDownloadStore((state) => state.groupCompilations);
33 | const setGroupCompilations = useDownloadStore((state) => state.setGroupCompilations);
34 |
35 | const runBeets = useDownloadStore((state) => state.runBeets);
36 | const setRunBeets = useDownloadStore((state) => state.setRunBeets);
37 |
38 | const embedLyrics = useDownloadStore((state) => state.embedLyrics);
39 | const setEmbedLyrics = useDownloadStore((state) => state.setEmbedLyrics);
40 |
41 | return (
42 |
43 |
44 | {/* Audio Quality */}
45 |
46 |
49 |
61 |
62 |
63 | {/* File Organization */}
64 |
65 |
68 |
83 |
84 |
setOrganizationTemplate(e.target.value)}
88 | class="input-field w-full text-sm font-mono"
89 | placeholder="Custom template..."
90 | />
91 |
92 | Available: {Artist}, {Album}, {Title}, {TrackNumber}, {Year}
93 |
94 |
95 |
96 |
97 |
98 | {/* Toggles */}
99 |
100 |
101 |
102 |
Put tracks in "Compilations" folder if Various Artists
103 |
104 |
113 |
114 |
115 |
116 |
117 |
118 |
Run "beet import" after download (requires beets)
119 |
120 |
129 |
130 |
131 |
132 |
133 |
134 |
135 | {quality.startsWith('OPUS')
136 | ? "Not available for Opus format"
137 | : "Use FFmpeg to embed lyrics (resolves sync issues)"}
138 |
139 |
140 |
150 |
151 |
152 |
153 | );
154 | }
155 |
--------------------------------------------------------------------------------
/frontend/src/api/client.js:
--------------------------------------------------------------------------------
1 | /**
2 | * API client for Tidaloader backend
3 | */
4 |
5 | import { useAuthStore } from "../store/authStore";
6 |
7 | const API_BASE = "/api";
8 |
9 | class ApiClient {
10 | /**
11 | * Get authorization headers
12 | */
13 | getHeaders() {
14 | const headers = {
15 | "Content-Type": "application/json",
16 | };
17 |
18 | const authHeader = useAuthStore.getState().getAuthHeader();
19 | if (authHeader) {
20 | headers["Authorization"] = authHeader;
21 | }
22 |
23 | return headers;
24 | }
25 |
26 | getAuthHeaders() {
27 | const authHeader = useAuthStore.getState().getAuthHeader();
28 | return authHeader ? { Authorization: authHeader } : {};
29 | }
30 |
31 | /**
32 | * Make GET request with auth
33 | */
34 | async get(path, params = {}) {
35 | const url = new URL(API_BASE + path, window.location.origin);
36 | Object.entries(params).forEach(([key, value]) => {
37 | if (value !== undefined && value !== null) {
38 | url.searchParams.append(key, value);
39 | }
40 | });
41 |
42 | const response = await fetch(url, {
43 | headers: this.getHeaders(),
44 | credentials: "include",
45 | });
46 |
47 | if (response.status === 401) {
48 | useAuthStore.getState().clearCredentials();
49 | throw new Error("Authentication required");
50 | }
51 |
52 | if (!response.ok) {
53 | throw new Error(`HTTP ${response.status}: ${response.statusText}`);
54 | }
55 | return response.json();
56 | }
57 |
58 | /**
59 | * Make POST request with auth
60 | */
61 | async post(path, data = {}) {
62 | const response = await fetch(API_BASE + path, {
63 | method: "POST",
64 | headers: this.getHeaders(),
65 | body: JSON.stringify(data),
66 | credentials: "include",
67 | });
68 |
69 | if (response.status === 401) {
70 | useAuthStore.getState().clearCredentials();
71 | throw new Error("Authentication required");
72 | }
73 |
74 | if (!response.ok) {
75 | const error = await response
76 | .json()
77 | .catch(() => ({ detail: response.statusText }));
78 | throw new Error(error.detail || `HTTP ${response.status}`);
79 | }
80 | return response.json();
81 | }
82 |
83 | /**
84 | * Search for tracks
85 | */
86 | searchTracks(query) {
87 | return this.get("/search/tracks", { q: query });
88 | }
89 |
90 | /**
91 | * Search for albums
92 | */
93 | searchAlbums(query) {
94 | return this.get("/search/albums", { q: query });
95 | }
96 |
97 | /**
98 | * Search for artists
99 | */
100 | searchArtists(query) {
101 | return this.get("/search/artists", { q: query });
102 | }
103 |
104 | /**
105 | * Search for playlists
106 | */
107 | searchPlaylists(query) {
108 | return this.get("/search/playlists", { q: query });
109 | }
110 |
111 | /**
112 | * Get album tracks
113 | */
114 | getAlbumTracks(albumId) {
115 | return this.get(`/album/${albumId}/tracks`);
116 | }
117 |
118 | /**
119 | * Get artist details
120 | */
121 | getArtist(artistId) {
122 | return this.get(`/artist/${artistId}`);
123 | }
124 |
125 | /**
126 | * Get playlist details and tracks
127 | */
128 | getPlaylist(playlistId) {
129 | return this.get(`/playlist/${playlistId}`);
130 | }
131 |
132 | /**
133 | * Get stream URL for track
134 | */
135 | getStreamUrl(trackId, quality = "LOSSLESS") {
136 | return this.get(`/download/stream/${trackId}`, { quality });
137 | }
138 |
139 | /**
140 | * Download track server-side
141 | */
142 | downloadTrack(trackId, artist, title, quality = "LOSSLESS") {
143 | return this.post("/download/track", {
144 | track_id: trackId,
145 | artist,
146 | title,
147 | quality,
148 | });
149 | }
150 |
151 | /**
152 | * Generate ListenBrainz playlist
153 | */
154 | generateListenBrainzPlaylist(username, playlistType = "periodic-jams", shouldValidate = true) {
155 | return this.post("/listenbrainz/generate", {
156 | username,
157 | playlist_type: playlistType,
158 | should_validate: shouldValidate,
159 | });
160 | }
161 |
162 | /**
163 | * Validate a single ListenBrainz track
164 | */
165 | validateListenBrainzTrack(track) {
166 | return this.post("/listenbrainz/validate-track", {
167 | track: track
168 | });
169 | }
170 |
171 | /**
172 | * Create ListenBrainz progress stream
173 | */
174 | createListenBrainzProgressStream(progressId) {
175 | const authHeader = useAuthStore.getState().getAuthHeader();
176 | let urlString = `${API_BASE}/listenbrainz/progress/${progressId}`;
177 |
178 | if (authHeader) {
179 | urlString += `?token=${encodeURIComponent(authHeader)}`;
180 | }
181 |
182 | const url = new URL(urlString, window.location.origin);
183 |
184 | const eventSource = new EventSource(url.toString(), {
185 | withCredentials: true,
186 | });
187 |
188 | return eventSource;
189 | }
190 |
191 | /**
192 | * Create Server-Sent Events stream for download progress
193 | */
194 | createProgressStream(trackId) {
195 | const authHeader = useAuthStore.getState().getAuthHeader();
196 | const url = new URL(
197 | `${API_BASE}/download/progress/${trackId}`,
198 | window.location.origin
199 | );
200 |
201 | return new EventSource(url.toString(), {
202 | withCredentials: true,
203 | });
204 | }
205 |
206 | /**
207 | * Get cover URL from Tidal
208 | */
209 | getCoverUrl(coverId, size = "640") {
210 | const variants = this.getCoverUrlVariants(coverId, [size]);
211 | return variants.length > 0 ? variants[0] : null;
212 | }
213 |
214 | /**
215 | * Return multiple size variants for a cover ID or URL (largest to smallest)
216 | */
217 | getCoverUrlVariants(coverId, sizes = ["640", "320", "160"]) {
218 | if (!coverId) return [];
219 | if (typeof coverId === "string" && coverId.startsWith("http")) {
220 | return [coverId];
221 | }
222 |
223 | const cleanOriginal = String(coverId).replace(/-/g, "/");
224 | const candidates = [
225 | cleanOriginal,
226 | cleanOriginal.toUpperCase(),
227 | cleanOriginal.toLowerCase(),
228 | ].filter(Boolean);
229 |
230 | const urls = [];
231 | for (const id of candidates) {
232 | for (const s of sizes) {
233 | const url = `https://resources.tidal.com/images/${id}/${s}x${s}.jpg`;
234 | if (!urls.includes(url)) {
235 | urls.push(url);
236 | }
237 | }
238 | }
239 | return urls;
240 | }
241 |
242 | get baseUrl() {
243 | return window.location.origin;
244 | }
245 |
246 | // ============================================================================
247 | // LIBRARY API METHODS
248 | // ============================================================================
249 |
250 | /**
251 | * Scan library for changes
252 | */
253 | scanLibrary(force = false) {
254 | return this.get("/library/scan", { force });
255 | }
256 |
257 | /**
258 | * Get all artists in library
259 | */
260 | getLibraryArtists() {
261 | return this.get("/library/artists");
262 | }
263 |
264 | /**
265 | * Get specific artist details from library
266 | */
267 | getLibraryArtist(artistName) {
268 | return this.get(`/library/artist/${encodeURIComponent(artistName)}`);
269 | }
270 |
271 | /**
272 | * Get local cover URL
273 | */
274 | getLocalCoverUrl(path) {
275 | if (!path) return null;
276 | return `${API_BASE}/library/cover?path=${encodeURIComponent(path)}`;
277 | }
278 |
279 | /**
280 | * Update artist metadata (e.g. picture)
281 | */
282 | updateLibraryArtist(artistName, metadata) {
283 | return this.patch(`/library/artist/${encodeURIComponent(artistName)}`, metadata);
284 | }
285 |
286 | /**
287 | * Make PATCH request with auth
288 | */
289 | async patch(path, data = {}) {
290 | const response = await fetch(API_BASE + path, {
291 | method: "PATCH",
292 | headers: this.getHeaders(),
293 | body: JSON.stringify(data),
294 | credentials: "include",
295 | });
296 |
297 | if (response.status === 401) {
298 | useAuthStore.getState().clearCredentials();
299 | throw new Error("Authentication required");
300 | }
301 |
302 | if (!response.ok) {
303 | const error = await response.json().catch(() => ({ detail: response.statusText }));
304 | throw new Error(error.detail || `HTTP ${response.status}`);
305 | }
306 | return response.json();
307 | }
308 |
309 | // ============================================================================
310 | // QUEUE API METHODS
311 | // ============================================================================
312 |
313 | /**
314 | * Get current queue state from server
315 | */
316 | getQueue() {
317 | return this.get("/queue");
318 | }
319 |
320 | /**
321 | * Add tracks to the server queue
322 | */
323 | addToQueue(tracks, options = {}) {
324 | return this.post("/queue/add", { tracks, ...options });
325 | }
326 |
327 | /**
328 | * Remove a track from the queue
329 | */
330 | async removeFromQueue(trackId) {
331 | const response = await fetch(`${API_BASE}/queue/${trackId}`, {
332 | method: "DELETE",
333 | headers: this.getHeaders(),
334 | credentials: "include",
335 | });
336 |
337 | if (!response.ok) {
338 | throw new Error(`Failed to remove from queue: ${response.status}`);
339 | }
340 | return response.json();
341 | }
342 |
343 | /**
344 | * Clear the queue
345 | */
346 | clearQueue() {
347 | return this.post("/queue/clear");
348 | }
349 |
350 | /**
351 | * Clear completed items
352 | */
353 | clearCompleted() {
354 | return this.post("/queue/clear-completed");
355 | }
356 |
357 | /**
358 | * Clear failed items
359 | */
360 | clearFailed() {
361 | return this.post("/queue/clear-failed");
362 | }
363 |
364 | /**
365 | * Retry all failed items
366 | */
367 | retryAllFailed() {
368 | return this.post("/queue/retry-failed");
369 | }
370 |
371 | /**
372 | * Retry a single failed item
373 | */
374 | retryFailed(trackId) {
375 | return this.post(`/queue/retry/${trackId}`);
376 | }
377 |
378 | /**
379 | * Start queue processing (for manual mode)
380 | */
381 | startQueue() {
382 | return this.post("/queue/start");
383 | }
384 |
385 | /**
386 | * Stop queue processing
387 | */
388 | stopQueue() {
389 | return this.post("/queue/stop");
390 | }
391 |
392 | /**
393 | * Get queue settings
394 | */
395 | getQueueSettings() {
396 | return this.get("/queue/settings");
397 | }
398 | }
399 |
400 | export const api = new ApiClient();
401 |
--------------------------------------------------------------------------------
/frontend/src/components/PlaylistPage.jsx:
--------------------------------------------------------------------------------
1 | import { h } from "preact";
2 | import { useEffect, useState } from "preact/hooks";
3 | import { api } from "../api/client";
4 | import { downloadManager } from "../utils/downloadManager";
5 | import { useToastStore } from "../stores/toastStore";
6 |
7 | export function PlaylistPage({ playlistId, onBack }) {
8 | const [loading, setLoading] = useState(true);
9 | const [playlist, setPlaylist] = useState(null);
10 | const [tracks, setTracks] = useState([]);
11 | const [selectedTracks, setSelectedTracks] = useState(new Set());
12 | const [error, setError] = useState(null);
13 |
14 | const addToast = useToastStore((state) => state.addToast);
15 |
16 | useEffect(() => {
17 | loadPlaylistData();
18 | }, [playlistId]);
19 |
20 | const loadPlaylistData = async () => {
21 | setLoading(true);
22 | setError(null);
23 |
24 | try {
25 | const result = await api.getPlaylist(playlistId);
26 |
27 | setPlaylist(
28 | result.playlist || {
29 | id: playlistId,
30 | title: "Unknown Playlist",
31 | }
32 | );
33 |
34 | const items = result.items || [];
35 | setTracks(items);
36 | setSelectedTracks(new Set(items.map((t) => t.id)));
37 | } catch (err) {
38 | setError(err.message);
39 | addToast(`Failed to load playlist: ${err.message}`, "error");
40 | } finally {
41 | setLoading(false);
42 | }
43 | };
44 |
45 | const toggleTrack = (trackId) => {
46 | const updated = new Set(selectedTracks);
47 | if (updated.has(trackId)) {
48 | updated.delete(trackId);
49 | } else {
50 | updated.add(trackId);
51 | }
52 | setSelectedTracks(updated);
53 | };
54 |
55 | const selectAll = () => setSelectedTracks(new Set(tracks.map((t) => t.id)));
56 | const deselectAll = () => setSelectedTracks(new Set());
57 |
58 | const handleDownloadTracks = () => {
59 | const selected = tracks.filter((t) => selectedTracks.has(t.id));
60 |
61 | if (selected.length === 0) {
62 | addToast("No tracks selected", "warning");
63 | return;
64 | }
65 |
66 | const payload = selected.map((t, idx) => ({
67 | tidal_id: t.id,
68 | title: t.title,
69 | artist: t.artist || "Unknown Artist",
70 | album: t.album,
71 | cover: t.cover || playlist?.cover,
72 | track_number: t.track_number || t.trackNumber || idx + 1,
73 | tidal_exists: true,
74 | }));
75 |
76 | downloadManager.addToServerQueue(payload).then((res) => {
77 | addToast(
78 | `Added ${res.added} track${res.added === 1 ? "" : "s"} to queue`,
79 | "success"
80 | );
81 | });
82 | };
83 |
84 | const totalDuration = tracks.reduce((sum, t) => sum + (t.duration || 0), 0);
85 |
86 | if (loading && !playlist) {
87 | return (
88 |
89 |
93 |
94 |
95 |
96 |
97 | Loading playlist...
98 |
99 |
100 |
101 |
102 | );
103 | }
104 |
105 | if (error) {
106 | return (
107 |
108 |
112 |
115 |
116 | );
117 | }
118 |
119 | return (
120 |
121 |
125 |
126 |
127 | {playlist?.cover ? (
128 |
129 | ) : (
130 |
131 | {playlist?.title?.charAt(0) || "?"}
132 |
133 | )}
134 |
135 |
136 |
137 | {playlist?.title || "Playlist"}
138 |
139 | {playlist?.creator && (
140 |
By {playlist.creator}
141 | )}
142 |
143 | {tracks.length > 0 && {tracks.length} tracks}
144 | {totalDuration > 0 && (
145 | <>
146 | •
147 | {formatTotalDuration(totalDuration)}
148 | >
149 | )}
150 |
151 | {playlist?.description && (
152 |
153 | {playlist.description}
154 |
155 | )}
156 |
157 |
158 |
159 | {tracks.length > 0 ? (
160 |
161 |
162 |
163 |
166 |
169 |
170 | {selectedTracks.size > 0 && (
171 |
175 | )}
176 |
177 |
178 |
179 | {tracks.map((track, idx) => {
180 | const isSelected = selectedTracks.has(track.id);
181 | const trackNumber =
182 | track.track_number || track.trackNumber || idx + 1;
183 | return (
184 |
214 | );
215 | })}
216 |
217 |
218 | ) : (
219 |
220 |
233 |
No tracks found for this playlist
234 |
235 | )}
236 |
237 | );
238 | }
239 |
240 | function CoverWithFallback({ cover, title }) {
241 | const variants = api.getCoverUrlVariants(cover);
242 | if (!variants.length) {
243 | return (
244 |
245 | {title?.charAt(0) || "?"}
246 |
247 | );
248 | }
249 |
250 | const handleError = (e) => {
251 | const idx = Number(e.target.dataset.idx || 0);
252 | const next = idx + 1;
253 | if (next < variants.length) {
254 | e.target.dataset.idx = String(next);
255 | e.target.src = variants[next];
256 | }
257 | };
258 |
259 | return (
260 |
267 | );
268 | }
269 |
270 | function BackIcon() {
271 | return (
272 |
280 | );
281 | }
282 |
283 | function formatDuration(seconds) {
284 | const minutes = Math.floor(seconds / 60);
285 | const secs = seconds % 60;
286 | return `${minutes}:${secs.toString().padStart(2, "0")}`;
287 | }
288 |
289 | function formatTotalDuration(seconds) {
290 | const hours = Math.floor(seconds / 3600);
291 | const minutes = Math.floor((seconds % 3600) / 60);
292 |
293 | if (hours > 0) {
294 | return `${hours} hr ${minutes} min`;
295 | }
296 | return `${minutes} min`;
297 | }
298 |
299 |
--------------------------------------------------------------------------------
/frontend/src/stores/downloadStore.js:
--------------------------------------------------------------------------------
1 | import { create } from "zustand";
2 | import { persist } from "zustand/middleware";
3 |
4 | export const useDownloadStore = create(
5 | persist(
6 | (set, get) => ({
7 | queue: [],
8 | downloading: [],
9 | completed: [],
10 | failed: [],
11 | quality: "LOSSLESS",
12 | maxConcurrent: 3,
13 | organizationTemplate: "{Artist}/{Album}/{TrackNumber} - {Title}",
14 | groupCompilations: true,
15 | runBeets: false,
16 | embedLyrics: false,
17 |
18 | // Server queue settings (synced from backend)
19 | serverQueueSettings: {
20 | max_concurrent: 3,
21 | auto_process: true,
22 | is_processing: false,
23 | },
24 |
25 | // Flag to indicate if we're using server queue
26 | useServerQueue: true,
27 |
28 | addToQueue: (tracks) =>
29 | set((state) => {
30 | const existingIds = new Set([
31 | ...state.queue.map((t) => t.tidal_id),
32 | ...state.downloading.map((t) => t.tidal_id),
33 | ...state.completed.map((t) => t.tidal_id),
34 | ]);
35 |
36 | const newTracks = tracks
37 | .filter((track) => !existingIds.has(track.tidal_id))
38 | .map((track) => ({
39 | ...track,
40 | id: `${track.tidal_id}-${Date.now()}`,
41 | status: "queued",
42 | progress: 0,
43 | addedAt: Date.now(),
44 | }));
45 |
46 | if (newTracks.length === 0) {
47 | console.log("All tracks already in queue");
48 | return state;
49 | }
50 |
51 | console.log(
52 | `Adding ${newTracks.length} new tracks to queue (${tracks.length - newTracks.length
53 | } duplicates skipped)`
54 | );
55 |
56 | return {
57 | queue: [...state.queue, ...newTracks],
58 | };
59 | }),
60 |
61 | removeFromQueue: (trackId) =>
62 | set((state) => ({
63 | queue: state.queue.filter((t) => t.id !== trackId),
64 | })),
65 |
66 | startDownload: (trackId) =>
67 | set((state) => {
68 | const track = state.queue.find((t) => t.id === trackId);
69 | if (!track) return state;
70 |
71 | return {
72 | queue: state.queue.filter((t) => t.id !== trackId),
73 | downloading: [
74 | ...state.downloading,
75 | { ...track, status: "downloading", startedAt: Date.now() },
76 | ],
77 | };
78 | }),
79 |
80 | updateProgress: (trackId, progress) =>
81 | set((state) => ({
82 | downloading: state.downloading.map((t) =>
83 | t.id === trackId ? { ...t, progress } : t
84 | ),
85 | })),
86 |
87 | completeDownload: (trackId, filename) =>
88 | set((state) => {
89 | const track = state.downloading.find((t) => t.id === trackId);
90 | if (!track) return state;
91 |
92 | return {
93 | downloading: state.downloading.filter((t) => t.id !== trackId),
94 | completed: [
95 | ...state.completed,
96 | {
97 | ...track,
98 | status: "completed",
99 | progress: 100,
100 | completedAt: Date.now(),
101 | filename,
102 | },
103 | ],
104 | };
105 | }),
106 |
107 | failDownload: (trackId, error) =>
108 | set((state) => {
109 | const track = state.downloading.find((t) => t.id === trackId);
110 | if (!track) return state;
111 |
112 | return {
113 | downloading: state.downloading.filter((t) => t.id !== trackId),
114 | failed: [
115 | ...state.failed,
116 | {
117 | ...track,
118 | status: "failed",
119 | error,
120 | failedAt: Date.now(),
121 | },
122 | ],
123 | };
124 | }),
125 |
126 | retryFailed: (trackId) =>
127 | set((state) => {
128 | const track = state.failed.find((t) => t.id === trackId);
129 | if (!track) return state;
130 |
131 | return {
132 | failed: state.failed.filter((t) => t.id !== trackId),
133 | queue: [
134 | ...state.queue,
135 | { ...track, status: "queued", error: undefined, progress: 0 },
136 | ],
137 | };
138 | }),
139 |
140 | clearCompleted: () => set({ completed: [] }),
141 |
142 | clearFailed: () => set({ failed: [] }),
143 |
144 | clearQueue: () => set({ queue: [] }),
145 |
146 | retryAllFailed: () =>
147 | set((state) => {
148 | const retryTracks = state.failed.map((track) => ({
149 | ...track,
150 | status: "queued",
151 | error: undefined,
152 | progress: 0,
153 | }));
154 | return {
155 | failed: [],
156 | queue: [...state.queue, ...retryTracks],
157 | };
158 | }),
159 |
160 | setQuality: (quality) => set({ quality }),
161 | setOrganizationTemplate: (template) => set({ organizationTemplate: template }),
162 | setGroupCompilations: (enabled) => set({ groupCompilations: enabled }),
163 | setRunBeets: (enabled) => set({ runBeets: enabled }),
164 | setEmbedLyrics: (enabled) => set({ embedLyrics: enabled }),
165 |
166 | // Server queue state sync methods
167 | setServerQueueState: ({ queue, downloading, completed, failed }) =>
168 | set((state) => ({
169 | queue: queue !== undefined ? queue : state.queue,
170 | downloading: downloading !== undefined ? downloading : state.downloading,
171 | completed: completed !== undefined ? completed : state.completed,
172 | failed: failed !== undefined ? failed : state.failed,
173 | })),
174 |
175 | setQueueSettings: (settings) =>
176 | set((state) => ({
177 | serverQueueSettings: {
178 | ...state.serverQueueSettings,
179 | ...settings,
180 | },
181 | // Also update maxConcurrent for backwards compatibility
182 | maxConcurrent: settings.max_concurrent || state.maxConcurrent,
183 | })),
184 |
185 | setUseServerQueue: (enabled) => set({ useServerQueue: enabled }),
186 |
187 | getStats: () => {
188 | const state = get();
189 | return {
190 | queued: state.queue.length,
191 | downloading: state.downloading.length,
192 | completed: state.completed.length,
193 | failed: state.failed.length,
194 | total:
195 | state.queue.length +
196 | state.downloading.length +
197 | state.completed.length +
198 | state.failed.length,
199 | };
200 | },
201 |
202 | // State reconciliation methods for backend sync
203 | syncDownloadState: (trackId, backendStatus, backendData) =>
204 | set((state) => {
205 | const track = state.downloading.find((t) => t.id === trackId);
206 | if (!track) return state;
207 |
208 | const newState = { ...state };
209 |
210 | if (backendStatus === "completed") {
211 | // Move from downloading to completed
212 | newState.downloading = state.downloading.filter(
213 | (t) => t.id !== trackId
214 | );
215 | newState.completed = [
216 | ...state.completed,
217 | {
218 | ...track,
219 | status: "completed",
220 | progress: 100,
221 | completedAt: Date.now(),
222 | filename: backendData.filename || track.title,
223 | },
224 | ];
225 | } else if (backendStatus === "failed") {
226 | // Move from downloading to failed
227 | newState.downloading = state.downloading.filter(
228 | (t) => t.id !== trackId
229 | );
230 | newState.failed = [
231 | ...state.failed,
232 | {
233 | ...track,
234 | status: "failed",
235 | error: backendData.error || "Download failed",
236 | failedAt: Date.now(),
237 | },
238 | ];
239 | } else if (backendStatus === "downloading") {
240 | // Update progress if available
241 | if (backendData.progress !== undefined) {
242 | newState.downloading = state.downloading.map((t) =>
243 | t.id === trackId ? { ...t, progress: backendData.progress } : t
244 | );
245 | }
246 | }
247 |
248 | return newState;
249 | }),
250 |
251 | bulkReconcileWithBackend: (backendState) =>
252 | set((state) => {
253 | const newState = {
254 | downloading: [],
255 | completed: [...state.completed],
256 | failed: [...state.failed],
257 | };
258 |
259 | // Process each downloading track
260 | for (const track of state.downloading) {
261 | const trackId = String(track.tidal_id || track.id);
262 |
263 | // Check backend status
264 | if (backendState.completed && backendState.completed[trackId]) {
265 | // Move to completed
266 | const backendData = backendState.completed[trackId];
267 | newState.completed.push({
268 | ...track,
269 | status: "completed",
270 | progress: 100,
271 | completedAt: Date.now(),
272 | filename: backendData.filename || track.title,
273 | });
274 | } else if (backendState.failed && backendState.failed[trackId]) {
275 | // Move to failed
276 | const backendData = backendState.failed[trackId];
277 | newState.failed.push({
278 | ...track,
279 | status: "failed",
280 | error: backendData.error || "Download failed",
281 | failedAt: Date.now(),
282 | });
283 | } else if (backendState.active && backendState.active[trackId]) {
284 | // Still downloading - update progress
285 | const backendData = backendState.active[trackId];
286 | newState.downloading.push({
287 | ...track,
288 | progress: backendData.progress || track.progress || 0,
289 | });
290 | } else {
291 | // Not found on backend - mark as failed
292 | newState.failed.push({
293 | ...track,
294 | status: "failed",
295 | error: "Download not found on server (may have been lost)",
296 | failedAt: Date.now(),
297 | });
298 | }
299 | }
300 |
301 | return newState;
302 | }),
303 | }),
304 | {
305 | name: "troi-download-queue",
306 | partialize: (state) => ({
307 | queue: state.queue,
308 | downloading: state.downloading,
309 | completed: state.completed,
310 | failed: state.failed,
311 | quality: state.quality,
312 | maxConcurrent: state.maxConcurrent,
313 | organizationTemplate: state.organizationTemplate,
314 | groupCompilations: state.groupCompilations,
315 | runBeets: state.runBeets,
316 | embedLyrics: state.embedLyrics,
317 | }),
318 | }
319 | )
320 | );
321 |
--------------------------------------------------------------------------------
/frontend/src/utils/downloadManager.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Download Manager - Server Queue Integration
3 | *
4 | * This module manages the connection between the frontend and the server's
5 | * download queue. All download processing happens server-side.
6 | *
7 | * Responsibilities:
8 | * - Sync queue state from server periodically
9 | * - Add tracks to server queue
10 | * - Control server queue (start/stop for manual mode)
11 | * - Provide queue operations (clear, retry, remove)
12 | */
13 |
14 | import { api } from "../api/client";
15 | import { useDownloadStore } from "../stores/downloadStore";
16 | import { useToastStore } from "../stores/toastStore";
17 | import { useAuthStore } from "../store/authStore";
18 |
19 | class DownloadManager {
20 | constructor() {
21 | this.initialized = false;
22 | this.syncInterval = null;
23 | this.syncIntervalMs = 1000; // 1 second for smooth progress updates
24 | }
25 |
26 | /**
27 | * Initialize the download manager
28 | * Starts periodic sync with server queue
29 | */
30 | async initialize() {
31 | if (this.initialized) {
32 | console.log("Download manager already initialized");
33 | return;
34 | }
35 |
36 | console.log("🔄 Initializing download manager...");
37 | this.initialized = true;
38 |
39 | // Subscribe to auth state changes
40 | useAuthStore.subscribe((state) => {
41 | if (state.isAuthenticated) {
42 | if (!this.syncInterval) {
43 | console.log("✅ User authenticated - starting queue sync");
44 | this.startSync();
45 | }
46 | } else {
47 | if (this.syncInterval) {
48 | console.log("🔒 User logged out - stopping queue sync");
49 | this.stopSync();
50 | }
51 | // Clear local queue state on logout
52 | useDownloadStore.getState().setServerQueueState({
53 | queue: [],
54 | downloading: [],
55 | completed: [],
56 | failed: []
57 | });
58 | }
59 | });
60 |
61 | // Initial check
62 | const { isAuthenticated } = useAuthStore.getState();
63 | if (isAuthenticated) {
64 | this.startSync();
65 | await this.syncQueueState();
66 | } else {
67 | console.log("⏳ Waiting for authentication to start queue sync");
68 | }
69 |
70 | console.log("✅ Download manager initialized - waiting for auth");
71 | }
72 |
73 | /**
74 | * Start periodic sync with server
75 | */
76 | startSync() {
77 | if (this.syncInterval) {
78 | clearInterval(this.syncInterval);
79 | }
80 |
81 | // Sync immediately
82 | this.syncQueueState();
83 |
84 | // Then sync periodically
85 | this.syncInterval = setInterval(() => {
86 | this.syncQueueState();
87 | }, this.syncIntervalMs);
88 |
89 | console.log(`🔄 Server queue sync started (every ${this.syncIntervalMs}ms)`);
90 | }
91 |
92 | /**
93 | * Stop periodic sync
94 | */
95 | stopSync() {
96 | if (this.syncInterval) {
97 | clearInterval(this.syncInterval);
98 | this.syncInterval = null;
99 | console.log("🛑 Server queue sync stopped");
100 | }
101 | }
102 |
103 | /**
104 | * Sync queue state from server to local store
105 | */
106 | async syncQueueState() {
107 | try {
108 | // Double check auth before request to avoid 401s
109 | if (!useAuthStore.getState().isAuthenticated) {
110 | return;
111 | }
112 |
113 | const serverState = await api.getQueue();
114 | if (!serverState) return;
115 |
116 | const store = useDownloadStore.getState();
117 |
118 | // Transform server state to store format
119 | const queue = (serverState.queue || []).map(item => ({
120 | id: `q-${item.track_id}`,
121 | tidal_id: item.track_id,
122 | title: item.title || "Unknown Title",
123 | artist: item.artist || "Unknown Artist",
124 | album: item.album || "",
125 | track_number: item.track_number,
126 | cover: item.cover,
127 | progress: 0,
128 | }));
129 |
130 | const downloading = (serverState.active || []).map(item => ({
131 | id: `d-${item.track_id}`,
132 | tidal_id: item.track_id,
133 | title: item.title || "Unknown Title",
134 | artist: item.artist || "Unknown Artist",
135 | album: item.album || "",
136 | progress: item.progress || 0,
137 | }));
138 |
139 | const completed = (serverState.completed || []).map(item => ({
140 | id: `c-${item.track_id}`,
141 | tidal_id: item.track_id,
142 | title: item.title || "Unknown Title",
143 | artist: item.artist || "Unknown Artist",
144 | filename: item.filename,
145 | }));
146 |
147 | const failed = (serverState.failed || []).map(item => ({
148 | id: `f-${item.track_id}`,
149 | tidal_id: item.track_id,
150 | title: item.title || "Unknown Title",
151 | artist: item.artist || "Unknown Artist",
152 | error: item.error,
153 | }));
154 |
155 | // Update store with server state
156 | store.setServerQueueState({ queue, downloading, completed, failed });
157 |
158 | // Update settings from server
159 | if (serverState.settings) {
160 | store.setQueueSettings(serverState.settings);
161 | }
162 | } catch (error) {
163 | // Silently fail - connection issues shouldn't spam errors
164 | console.debug("Queue sync failed:", error.message);
165 | }
166 | }
167 |
168 | /**
169 | * Add tracks to the server queue
170 | * @param {Array} tracks - Array of track objects with tidal_id, title, artist, etc.
171 | */
172 | async addToServerQueue(tracks) {
173 | try {
174 | const { quality, organizationTemplate, groupCompilations, runBeets, embedLyrics } =
175 | useDownloadStore.getState();
176 |
177 | // Transform tracks to API format
178 | const formattedTracks = tracks.map(track => ({
179 | track_id: Number(track.tidal_id || track.id),
180 | title: String(track.title || "Unknown Title"),
181 | artist: String(track.artist || "Unknown Artist"),
182 | album: track.album || "",
183 | album_artist: track.album_artist || track.albumArtist || null, // Pass Album Artist
184 | album_id: track.album_id || null,
185 | track_number: track.track_number || track.trackNumber || null,
186 | cover: track.cover || null,
187 | quality: quality || "HIGH",
188 | target_format: null,
189 | bitrate_kbps: null,
190 | run_beets: runBeets || false,
191 | embed_lyrics: embedLyrics || false,
192 | organization_template: organizationTemplate || "{Artist}/{Album}/{TrackNumber} - {Title}",
193 | group_compilations: groupCompilations !== false,
194 | ...(track.tidal_track_id && { tidal_track_id: String(track.tidal_track_id) }),
195 | ...(track.tidal_artist_id && { tidal_artist_id: String(track.tidal_artist_id) }),
196 | ...(track.tidal_album_id && { tidal_album_id: String(track.tidal_album_id) }),
197 | }));
198 |
199 | const result = await api.addToQueue(formattedTracks);
200 | console.log(`✅ Added ${result.added} tracks to server queue (${result.skipped} skipped)`);
201 |
202 | // Refresh state
203 | await this.syncQueueState();
204 |
205 | return result;
206 | } catch (error) {
207 | console.error("Error adding to server queue:", error);
208 | useToastStore.getState().addToast(
209 | `Failed to add to queue: ${error.message}`,
210 | "error"
211 | );
212 | return { added: 0, skipped: tracks.length };
213 | }
214 | }
215 |
216 | /**
217 | * Remove a track from the queue
218 | */
219 | async removeFromQueue(trackId) {
220 | try {
221 | await api.removeFromQueue(trackId);
222 | await this.syncQueueState();
223 | return true;
224 | } catch (error) {
225 | console.error("Error removing from queue:", error);
226 | return false;
227 | }
228 | }
229 |
230 | /**
231 | * Clear the server queue
232 | */
233 | async clearQueue() {
234 | try {
235 | const result = await api.clearQueue();
236 | console.log(`Cleared ${result.cleared} items from queue`);
237 | await this.syncQueueState();
238 | return result.cleared;
239 | } catch (error) {
240 | console.error("Error clearing queue:", error);
241 | return 0;
242 | }
243 | }
244 |
245 | /**
246 | * Clear completed items
247 | */
248 | async clearCompleted() {
249 | try {
250 | const result = await api.clearCompleted();
251 | await this.syncQueueState();
252 | return result.cleared;
253 | } catch (error) {
254 | console.error("Error clearing completed:", error);
255 | return 0;
256 | }
257 | }
258 |
259 | /**
260 | * Clear failed items
261 | */
262 | async clearFailed() {
263 | try {
264 | const result = await api.clearFailed();
265 | await this.syncQueueState();
266 | return result.cleared;
267 | } catch (error) {
268 | console.error("Error clearing failed:", error);
269 | return 0;
270 | }
271 | }
272 |
273 | /**
274 | * Retry all failed downloads
275 | */
276 | async retryAllFailed() {
277 | try {
278 | const result = await api.retryAllFailed();
279 | console.log(`Retried ${result.retried} failed downloads`);
280 | await this.syncQueueState();
281 | return result.retried;
282 | } catch (error) {
283 | console.error("Error retrying failed:", error);
284 | return 0;
285 | }
286 | }
287 |
288 | /**
289 | * Retry a single failed download
290 | */
291 | async retryFailed(trackId) {
292 | try {
293 | const result = await api.retryFailed(trackId);
294 | await this.syncQueueState();
295 | return result.success;
296 | } catch (error) {
297 | console.error("Error retrying failed download:", error);
298 | return false;
299 | }
300 | }
301 |
302 | /**
303 | * Start queue processing on server (for manual mode)
304 | */
305 | async start() {
306 | try {
307 | console.log("🎵 Requesting server queue processing start...");
308 | const result = await api.startQueue();
309 | console.log("✅ Server queue started:", result.message);
310 | await this.syncQueueState();
311 | } catch (error) {
312 | console.error("Error starting server queue:", error);
313 | }
314 | }
315 |
316 | /**
317 | * Stop queue processing on server
318 | */
319 | async stop() {
320 | try {
321 | console.log("🛑 Requesting server queue processing stop...");
322 | const result = await api.stopQueue();
323 | console.log("✅ Server queue stopped:", result.message);
324 | await this.syncQueueState();
325 | } catch (error) {
326 | console.error("Error stopping server queue:", error);
327 | }
328 | }
329 |
330 | /**
331 | * Get queue settings from server
332 | */
333 | async getQueueSettings() {
334 | try {
335 | return await api.getQueueSettings();
336 | } catch (error) {
337 | console.error("Error getting queue settings:", error);
338 | return { max_concurrent: 3, auto_process: true };
339 | }
340 | }
341 |
342 | // Legacy method aliases for backwards compatibility
343 | startServerQueueSync(intervalMs = 3000) {
344 | this.syncIntervalMs = intervalMs;
345 | this.startSync();
346 | }
347 |
348 | stopServerQueueSync() {
349 | this.stopSync();
350 | }
351 |
352 | syncServerQueueToStore() {
353 | return this.syncQueueState();
354 | }
355 | }
356 |
357 | export const downloadManager = new DownloadManager();
358 |
359 | // Auto-initialize on module load
360 | if (typeof window !== "undefined") {
361 | window.addEventListener("load", () => {
362 | console.log("Auto-initializing download manager on page load");
363 | downloadManager.initialize();
364 | });
365 | }
366 |
--------------------------------------------------------------------------------
/backend/api/services/library.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import json
4 | import time
5 | import logging
6 | import hashlib
7 | from pathlib import Path
8 | from typing import Dict, List, Optional
9 | import mutagen
10 | from mutagen.easyid3 import EasyID3
11 | from mutagen.flac import FLAC
12 | from mutagen.mp4 import MP4
13 |
14 | from api.settings import DOWNLOAD_DIR
15 |
16 | logger = logging.getLogger(__name__)
17 |
18 | class LibraryService:
19 | def __init__(self):
20 | self.cache_file = Path(__file__).parent.parent / ".cache" / "library_cache.json"
21 | self.cache_file.parent.mkdir(exist_ok=True)
22 | self.library_data = self._load_cache()
23 |
24 | def _load_cache(self) -> Dict:
25 | if self.cache_file.exists():
26 | try:
27 | with open(self.cache_file, 'r') as f:
28 | return json.load(f)
29 | except Exception:
30 | pass
31 | return {"artists": {}, "timestamp": 0}
32 |
33 | def _save_cache(self):
34 | try:
35 | with open(self.cache_file, 'w') as f:
36 | json.dump(self.library_data, f)
37 | except Exception as e:
38 | logger.error(f"Failed to save library cache: {e}")
39 |
40 | def _get_file_metadata(self, filepath: Path) -> Optional[Dict]:
41 | try:
42 | ext = filepath.suffix.lower()
43 | tags = {}
44 |
45 | if ext == '.mp3':
46 | try:
47 | audio = EasyID3(filepath)
48 | tags = audio
49 | except mutagen.id3.ID3NoHeaderError:
50 | audio = mutagen.File(filepath, easy=True)
51 | tags = audio or {}
52 | elif ext == '.flac':
53 | audio = FLAC(filepath)
54 | tags = audio
55 | elif ext == '.m4a':
56 | audio = MP4(filepath)
57 | # MP4 tags need mapping
58 | raw_tags = audio.tags or {}
59 | tags = {
60 | 'artist': raw_tags.get('\xa9ART', [None])[0],
61 | 'album': raw_tags.get('\xa9alb', [None])[0],
62 | 'title': raw_tags.get('\xa9nam', [None])[0],
63 | 'date': raw_tags.get('\xa9day', [None])[0],
64 | 'tracknumber': raw_tags.get('trkn', [(None, None)])[0][0],
65 | 'discnumber': raw_tags.get('disk', [(None, None)])[0][0],
66 | 'tidal_artist_id': (raw_tags.get('----:com.apple.iTunes:TIDAL_ARTIST_ID', [b''])[0]).decode('utf-8', errors='ignore') or None,
67 | 'tidal_album_id': (raw_tags.get('----:com.apple.iTunes:TIDAL_ALBUM_ID', [b''])[0]).decode('utf-8', errors='ignore') or None,
68 | 'tidal_track_id': (raw_tags.get('----:com.apple.iTunes:TIDAL_TRACK_ID', [b''])[0]).decode('utf-8', errors='ignore') or None,
69 | }
70 | elif ext == '.opus':
71 | audio = mutagen.File(filepath)
72 | tags = audio or {}
73 |
74 | # Normalize tags
75 | artist = tags.get('artist', ['Unknown Artist'])
76 | album = tags.get('album', ['Unknown Album'])
77 | title = tags.get('title', [filepath.stem])
78 | date = tags.get('date', [''])
79 | track_num = tags.get('tracknumber', [None])
80 | disc_num = tags.get('discnumber', [None])
81 |
82 | # Extract Tidal IDs (FLAC/Vorbis use uppercase, MP3/ID3 use TXXX)
83 | tidal_artist_id = tags.get('TIDAL_ARTIST_ID') or tags.get('TXXX:TIDAL_ARTIST_ID') or tags.get('tidal_artist_id')
84 | tidal_album_id = tags.get('TIDAL_ALBUM_ID') or tags.get('TXXX:TIDAL_ALBUM_ID') or tags.get('tidal_album_id')
85 | tidal_track_id = tags.get('TIDAL_TRACK_ID') or tags.get('TXXX:TIDAL_TRACK_ID') or tags.get('tidal_track_id')
86 |
87 | # Handle list returns from mutagen
88 | if isinstance(artist, list): artist = artist[0]
89 | if isinstance(album, list): album = album[0]
90 | if isinstance(title, list): title = title[0]
91 | if isinstance(date, list): date = date[0]
92 | if isinstance(track_num, list): track_num = track_num[0]
93 | if isinstance(track_num, list): track_num = track_num[0]
94 | if isinstance(disc_num, list): disc_num = disc_num[0]
95 | if isinstance(tidal_artist_id, list): tidal_artist_id = tidal_artist_id[0]
96 | if isinstance(tidal_album_id, list): tidal_album_id = tidal_album_id[0]
97 | if isinstance(tidal_track_id, list): tidal_track_id = tidal_track_id[0]
98 |
99 | # Clean up track numbers (e.g. "1/10")
100 | if track_num and isinstance(track_num, str) and '/' in track_num:
101 | track_num = track_num.split('/')[0]
102 |
103 | return {
104 | 'artist': artist or "Unknown Artist",
105 | 'album': album or "Unknown Album",
106 | 'title': title or filepath.stem,
107 | 'year': str(date)[:4] if date else "",
108 | 'track_number': int(track_num) if track_num else 0,
109 | 'disc_number': int(disc_num) if disc_num else 1,
110 | 'path': str(filepath),
111 | 'filename': filepath.name,
112 | 'format': ext[1:],
113 | 'duration': getattr(audio.info, 'length', 0),
114 | 'tidal_artist_id': tidal_artist_id,
115 | 'tidal_album_id': tidal_album_id,
116 | 'tidal_track_id': tidal_track_id
117 | }
118 | except Exception as e:
119 | logger.warning(f"Error reading metadata for {filepath}: {e}")
120 | return None
121 |
122 | def scan_library(self, force: bool = False) -> Dict:
123 | """
124 | Scans the download directory for music files and builds the library.
125 | Returns the simplified library structure.
126 | """
127 | # Simple cache check: if scanned less than 5 minutes ago and not forced
128 | if not force and (time.time() - self.library_data.get('timestamp', 0) < 300):
129 | return self.library_data['artists']
130 |
131 | logger.info("Starting library scan...")
132 | artists_data = {}
133 |
134 | # Walk through the directory
135 | for root, _, files in os.walk(DOWNLOAD_DIR):
136 | for file in files:
137 | if file.lower().endswith(('.mp3', '.flac', '.m4a', '.opus')):
138 | filepath = Path(root) / file
139 | meta = self._get_file_metadata(filepath)
140 |
141 | if meta:
142 | artist = meta['artist']
143 | album = meta['album']
144 |
145 | # Initialize Artist
146 | if artist not in artists_data:
147 | # Try to recover metadata from old cache
148 | old_data = self.library_data['artists'].get(artist, {})
149 |
150 | artists_data[artist] = {
151 | "name": artist,
152 | "albums": {},
153 | "track_count": 0,
154 | "tidal_id": meta.get('tidal_artist_id') or old_data.get('tidal_id'),
155 | "picture": old_data.get('picture') # Preserve Tidal picture
156 | }
157 | elif not artists_data[artist].get("tidal_id") and meta.get('tidal_artist_id'):
158 | # Update existing artist with ID if found later
159 | artists_data[artist]["tidal_id"] = meta['tidal_artist_id']
160 |
161 | # Initialize Album
162 | if album not in artists_data[artist]["albums"]:
163 | artists_data[artist]["albums"][album] = {
164 | "title": album,
165 | "year": meta['year'],
166 | "tracks": [],
167 | "cover_path": None,
168 | "tidal_id": meta.get('tidal_album_id')
169 | }
170 | elif not artists_data[artist]["albums"][album].get("tidal_id") and meta.get('tidal_album_id'):
171 | artists_data[artist]["albums"][album]["tidal_id"] = meta['tidal_album_id']
172 | # Try to find cover.jpg/png in the same folder
173 | cover_candidates = [filepath.parent / "cover.jpg", filepath.parent / "cover.png", filepath.parent / "folder.jpg"]
174 | for cand in cover_candidates:
175 | if cand.exists():
176 | artists_data[artist]["albums"][album]["cover_path"] = str(cand)
177 | break
178 |
179 | # Add Track
180 | artists_data[artist]["albums"][album]["tracks"].append(meta)
181 | artists_data[artist]["track_count"] += 1
182 |
183 | # Sort tracks by disc/track number
184 | for artist in artists_data.values():
185 | for album in artist["albums"].values():
186 | album["tracks"].sort(key=lambda x: (x.get('disc_number', 1), x.get('track_number', 0)))
187 |
188 | self.library_data = {
189 | "artists": artists_data,
190 | "timestamp": time.time()
191 | }
192 | self._save_cache()
193 | logger.info(f"Library scan complete. Found {len(artists_data)} artists.")
194 | return artists_data
195 |
196 | def invalidate_cache(self):
197 | """Forces the next scan to read from disk"""
198 | self.library_data['timestamp'] = 0
199 | self._save_cache()
200 | logger.info("Library cache invalidated.")
201 |
202 | def get_artists(self) -> List[Dict]:
203 | data = self.scan_library() # Will use cache if valid
204 | artists_list = []
205 | for name, data in data.items():
206 | # Pick a cover image from the first album that has one
207 | image = None
208 | for album in data["albums"].values():
209 | if album.get("cover_path"):
210 | image = album["cover_path"]
211 | break
212 |
213 | artists_list.append({
214 | "name": name,
215 | "album_count": len(data["albums"]),
216 | "track_count": data["track_count"],
217 | "image": image,
218 | "picture": data.get("picture"),
219 | "tidal_id": data.get("tidal_id")
220 | })
221 |
222 | return sorted(artists_list, key=lambda x: x["name"].lower())
223 |
224 | def get_artist(self, name: str) -> Optional[Dict]:
225 | data = self.scan_library()
226 | if name in data:
227 | # Return a copy to avoid modifying the cache structure
228 | artist_data = data[name].copy()
229 | # Convert albums dict to list for frontend
230 | artist_data['albums'] = list(artist_data['albums'].values())
231 | # Sort albums by year (newest first)
232 | artist_data['albums'].sort(key=lambda x: str(x.get('year', '0')), reverse=True)
233 | return artist_data
234 | return None
235 |
236 | def update_artist_metadata(self, name: str, picture: str = None):
237 | """Updates persistent metadata for an artist (e.g. Tidal Picture)"""
238 | if name in self.library_data['artists']:
239 | if picture:
240 | self.library_data['artists'][name]['picture'] = picture
241 | self._save_cache()
242 | logger.info(f"Updated metadata for artist {name}: picture={picture}")
243 | return True
244 | return False
245 |
246 | library_service = LibraryService()
247 |
--------------------------------------------------------------------------------