├── .python-version
├── src
└── alist_mikananirss
│ ├── alist
│ ├── __init__.py
│ ├── api.py
│ └── tasks.py
│ ├── common
│ ├── __init__.py
│ ├── config
│ │ ├── __init__.py
│ │ ├── bot_assistant.py
│ │ ├── remap.py
│ │ ├── notifier.py
│ │ ├── extractor.py
│ │ ├── config.py
│ │ └── basic.py
│ └── database.py
│ ├── extractor
│ ├── llm
│ │ ├── prompt
│ │ │ ├── __init__.py
│ │ │ ├── json_schema
│ │ │ │ ├── resource_title.txt
│ │ │ │ ├── anime_name.txt
│ │ │ │ ├── tmdb_retry_search.txt
│ │ │ │ ├── tmdb_find_anime.txt
│ │ │ │ └── tmdb_search_param.txt
│ │ │ ├── json_object
│ │ │ │ ├── anime_name.txt
│ │ │ │ ├── tmdb_retry_search.txt
│ │ │ │ ├── tmdb_search_param.txt
│ │ │ │ ├── tmdb_find_anime.txt
│ │ │ │ └── resource_title.txt
│ │ │ └── prompt.py
│ │ ├── base.py
│ │ ├── __init__.py
│ │ ├── deepseek.py
│ │ ├── openai.py
│ │ └── google.py
│ ├── __init__.py
│ ├── base.py
│ ├── models.py
│ ├── extractor.py
│ ├── regex.py
│ └── llm_extractor.py
│ ├── __main__.py
│ ├── __init__.py
│ ├── websites
│ ├── __init__.py
│ ├── base.py
│ ├── models.py
│ ├── default.py
│ ├── acgrip.py
│ ├── dmhy.py
│ └── mikan.py
│ ├── bot
│ ├── __init__.py
│ ├── tgbot.py
│ ├── bot_base.py
│ ├── pushplus_bot.py
│ └── notificationbot.py
│ ├── core
│ ├── __init__.py
│ ├── filter.py
│ ├── bot_assistant.py
│ ├── renamer.py
│ ├── notification_sender.py
│ ├── rss_monitor.py
│ └── remapper.py
│ ├── utils
│ ├── __init__.py
│ └── tmdb.py
│ └── main.py
├── imgs
├── show_pic1.png
└── dl_old_anime_rss_example.png
├── pytest.ini
├── .gitignore
├── tests
├── common
│ ├── test_config.py
│ └── test_database.py
├── websites
│ ├── test_factory.py
│ ├── test_acgrip.py
│ ├── test_mikan.py
│ ├── test_defaultwebsite.py
│ └── test_dmhy.py
├── bot
│ ├── test_telegrambot.py
│ ├── test_pushplusbot.py
│ └── test_notification_sender.py
├── extractor
│ └── test_extractor.py
├── core
│ ├── test_remapper.py
│ ├── test_filter.py
│ ├── download_manager
│ │ ├── test_build_download_path.py
│ │ ├── test_download_manager.py
│ │ └── test_task_monitor.py
│ ├── test_renamer.py
│ └── test_rss_monitor.py
└── alist
│ └── test_alist.py
├── .dockerignore
├── config.yaml.example
├── docker
└── Dockerfile
├── .github
└── workflows
│ ├── publish_pypi.yml
│ └── publish_docker.yml
├── pyproject.toml
├── full_config.yaml.example
└── README.md
/.python-version:
--------------------------------------------------------------------------------
1 | 3.11.13
--------------------------------------------------------------------------------
/src/alist_mikananirss/alist/__init__.py:
--------------------------------------------------------------------------------
1 | from .api import *
2 | from .tasks import *
3 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/__init__.py:
--------------------------------------------------------------------------------
1 | from .database import SubscribeDatabase
2 |
--------------------------------------------------------------------------------
/imgs/show_pic1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TwooSix/Alist-MikananiRss/HEAD/imgs/show_pic1.png
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | asyncio_mode = auto
3 | asyncio_default_fixture_loop_scope = function
4 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/__init__.py:
--------------------------------------------------------------------------------
1 | from .config import AppConfig, ConfigManager
2 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/__init__.py:
--------------------------------------------------------------------------------
1 | from .prompt import PromptType, load_prompt
2 |
--------------------------------------------------------------------------------
/imgs/dl_old_anime_rss_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TwooSix/Alist-MikananiRss/HEAD/imgs/dl_old_anime_rss_example.png
--------------------------------------------------------------------------------
/src/alist_mikananirss/__main__.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import alist_mikananirss
4 |
5 | sys.exit(alist_mikananirss.main())
6 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/__init__.py:
--------------------------------------------------------------------------------
1 | from .common import SubscribeDatabase
2 | from .common.config import AppConfig, ConfigManager
3 | from .core import *
4 | from .main import main
5 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/__init__.py:
--------------------------------------------------------------------------------
1 | from .acgrip import AcgRip
2 | from .base import Website, WebsiteFactory
3 | from .default import DefaultWebsite
4 | from .dmhy import Dmhy
5 | from .mikan import Mikan
6 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/bot/__init__.py:
--------------------------------------------------------------------------------
1 | from .bot_base import BotBase, BotFactory, BotType
2 | from .notificationbot import NotificationBot, NotificationMsg
3 | from .pushplus_bot import PushPlusBot, PushPlusChannel
4 | from .tgbot import TelegramBot
5 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_schema/resource_title.txt:
--------------------------------------------------------------------------------
1 | You are an anime resource categorization assistant. Given an anime resource name, you need to extract information that helps users categorize and organize the resource based on its name
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # python generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 | .coverage
9 | .vscode/
10 |
11 | # venv
12 | .venv
13 |
14 | # userdata
15 | data/
16 | log/
17 | *.yaml
18 |
19 | # debug file
20 | *.ipynb
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_schema/anime_name.txt:
--------------------------------------------------------------------------------
1 | You are an anime series resource categorization assistant. When given an anime series name, you need to parse out the original series name without season information, and the season information (default is Season 1)
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_schema/tmdb_retry_search.txt:
--------------------------------------------------------------------------------
1 | You are an anime series search assistant. You excel at breaking down original anime titles into keywords containing core information, then entering these keywords into search engines to find corresponding results.
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/bot_assistant.py:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class TelegramBotAssistantConfig(BaseModel):
7 | bot_type: Literal["telegram"]
8 | token: str = Field(..., description="The token for the Telegram bot.")
9 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_schema/tmdb_find_anime.txt:
--------------------------------------------------------------------------------
1 | You are an anime series search assistant. Now that the search part is completed, given a filename of an anime resource, you need to find the corresponding anime series from the search results based on the information contained in the filename
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_schema/tmdb_search_param.txt:
--------------------------------------------------------------------------------
1 | You are an anime series search assistant. You need to parse the filename of anime resources and extract search keywords from it. The keywords should be as concise as possible and should not include season information, to avoid missing relevant search results.
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/remap.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, Field
2 |
3 |
4 | class RemapConfig(BaseModel):
5 | enable: bool = Field(default=False, description="Enable remapping")
6 | cfg_path: str = Field(
7 | default="./remap.yaml",
8 | description="Path to the remap configuration file",
9 | )
10 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/__init__.py:
--------------------------------------------------------------------------------
1 | from .bot_assistant import BotAssistant
2 | from .download_manager import *
3 | from .filter import RegexFilter
4 | from .notification_sender import NotificationSender
5 | from .remapper import RemapFrom, Remapper, RemapperManager, RemapTo
6 | from .renamer import AnimeRenamer
7 | from .rss_monitor import RssMonitor
8 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/__init__.py:
--------------------------------------------------------------------------------
1 | from enum import StrEnum
2 |
3 | from .base import ExtractorBase
4 | from .extractor import Extractor
5 | from .llm import create_llm_provider
6 | from .llm_extractor import LLMExtractor
7 | from .models import AnimeNameExtractResult, ResourceTitleExtractResult, VideoQuality
8 | from .regex import RegexExtractor
9 |
--------------------------------------------------------------------------------
/tests/common/test_config.py:
--------------------------------------------------------------------------------
1 | from alist_mikananirss.common.config import ConfigManager
2 |
3 |
4 | def test_full_config_example():
5 | config = ConfigManager()
6 | config.load_config("full_config.yaml.example")
7 |
8 |
9 | def test_simple_config_example():
10 | config = ConfigManager()
11 | config.load_config("config.yaml.example")
12 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # python generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 | .coverage
9 |
10 | # venv
11 | .venv
12 |
13 | # userdata
14 | data/
15 | log/
16 | config.yaml
17 | remap.yaml
18 |
19 | # debug file
20 | *.ipynb
21 |
22 | # project files
23 | .github/
24 | imgs/
25 | tests/
26 | .gitignore
27 | *.yaml
28 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_object/anime_name.txt:
--------------------------------------------------------------------------------
1 | You are an anime series resource categorization assistant. When given an anime series name, you need to parse out the original series name without season information, and the season information (default is Season 1) and output them in JSON format.
2 |
3 | EXAMPLE INPUT:
4 | 金牌得主
5 |
6 | EXAMPLE JSON OUTPUT:
7 | {
8 | "anime_name": "金牌得主",
9 | "season": 1
10 | }
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_object/tmdb_retry_search.txt:
--------------------------------------------------------------------------------
1 | You are an anime series search assistant. You excel at breaking down original anime titles into keywords containing core information, then entering these keywords into search engines to find corresponding results and output them in JSON format."
2 |
3 | EXAMPLE INPUT:
4 | No results found for: 死神千年血战篇. Please try a different keyword.
5 |
6 | EXAMPLE JSON OUTPUT:
7 | {
8 | "query": "死神"
9 | }
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_object/tmdb_search_param.txt:
--------------------------------------------------------------------------------
1 | You are an anime series search assistant. You need to parse the filename of anime resources and extract search keywords from it. The keywords should be as concise as possible and should not include season information, to avoid missing relevant search results and output them in JSON format."
2 |
3 | EXAMPLE INPUT:
4 | 【喵萌奶茶屋】★01月新番★[金牌得主 / Medalist][13][1080p][简日双语][招募翻译]
5 |
6 | EXAMPLE JSON OUTPUT:
7 | {
8 | "query": "金牌得主"
9 | }
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_object/tmdb_find_anime.txt:
--------------------------------------------------------------------------------
1 | You are an anime series search assistant. Now that the search part is completed, given a filename of an anime resource, you need to find the exactly one anime info from the search results based on the information contained in the filename and output them in JSON format.
2 |
3 | EXAMPLE JSON OUTPUT:
4 | {
5 | "anime_name": "The anime name in search result which matches the resource title",
6 | "tvid": the tv id of the anime in search result
7 | }
--------------------------------------------------------------------------------
/config.yaml.example:
--------------------------------------------------------------------------------
1 | common:
2 | interval_time: 300
3 |
4 | alist:
5 | base_url: https://example.com # 修改为你的alist访问地址
6 | token: alist-xxx # 修改为你的alist token;可在"管理员后台->设置->其他"中找到
7 | downloader: qBittorrent # 或者 aria2
8 | download_path: Onedrive/Anime # 修改为你的下载路径(Alist中的路径)
9 |
10 | mikan:
11 | subscribe_url:
12 | - https://mikanani.me/RSS/MyBangumi?token=xxx # 修改为你的蜜柑订阅地址
13 | # - https://mikanani.me/RSS/MyBangumi?token=xxx2 # 多条RSS订阅链接情况
14 |
15 | filters:
16 | - 非合集 # 程序暂不支持合集等形式的重命名,若使用重命名功能推荐使用此过滤器
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/astral-sh/uv:python3.12-alpine
2 |
3 | COPY src/ /src
4 | COPY pyproject.toml /pyproject.toml
5 | COPY uv.lock /uv.lock
6 | COPY README.md /README.md
7 |
8 |
9 | WORKDIR /
10 |
11 | RUN apk add --no-cache gcc musl-dev linux-headers rust cargo && \
12 | uv sync --frozen --no-cache && \
13 | apk del gcc musl-dev linux-headers rust cargo && \
14 | uv cache clean && \
15 | rm -rf \
16 | /var/cache/apk/* \
17 | /root/.cache \
18 | /tmp/*
19 |
20 |
21 | ENV CONFIG_PATH /config.yaml
22 |
23 | CMD ["/bin/sh", "-c", "uv run alist-mikananirss --config $CONFIG_PATH"]
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/notifier.py:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | from pydantic import BaseModel, Field
4 |
5 | from alist_mikananirss.bot import PushPlusChannel
6 |
7 |
8 | class TelegramConfig(BaseModel):
9 | bot_type: Literal["telegram"]
10 | token: str = Field(..., description="Telegram Bot Token")
11 | user_id: str | int = Field(..., description="Telegram User ID")
12 |
13 |
14 | class PushPlusConfig(BaseModel):
15 | bot_type: Literal["pushplus"]
16 | token: str = Field(..., description="PushPlus Token")
17 | channel: PushPlusChannel = Field(
18 | PushPlusChannel.WECHAT, description="PushPlus Channel"
19 | )
20 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, List, Optional, Type, TypeVar
3 |
4 | T = TypeVar("T")
5 |
6 |
7 | class LLMProvider(ABC):
8 | """Base class for LLM providers"""
9 |
10 | @abstractmethod
11 | async def parse_with_schema(
12 | self, messages: List[Dict[str, str]], response_format: Type[T]
13 | ) -> Optional[T]:
14 | """Parse response with a schema"""
15 | pass
16 |
17 | @abstractmethod
18 | async def parse_as_json(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
19 | """Parse response as a JSON object"""
20 | pass
21 |
--------------------------------------------------------------------------------
/.github/workflows/publish_pypi.yml:
--------------------------------------------------------------------------------
1 | name: Build and Publish to PyPI
2 |
3 | on:
4 | workflow_dispatch:
5 | push:
6 | tags:
7 | - v*
8 |
9 | jobs:
10 | build-and-publish:
11 | runs-on: ubuntu-latest
12 | environment:
13 | name: publish
14 | steps:
15 | - uses: actions/checkout@v4
16 |
17 | - name: Install the latest version of uv
18 | uses: astral-sh/setup-uv@v5
19 | with:
20 | version: "latest"
21 |
22 | - name: Install dependencies
23 | run: |
24 | uv sync --no-dev
25 |
26 | - name: Build and publish to PyPI
27 | run: |
28 | uv build
29 | uv publish --token ${{ secrets.PYPI_TOKEN }}
30 |
--------------------------------------------------------------------------------
/tests/websites/test_factory.py:
--------------------------------------------------------------------------------
1 | from alist_mikananirss.websites import (
2 | AcgRip,
3 | DefaultWebsite,
4 | Dmhy,
5 | Mikan,
6 | WebsiteFactory,
7 | )
8 |
9 |
10 | def test_website_factory():
11 | assert isinstance(
12 | WebsiteFactory.get_website_parser(
13 | "https://mikanani.me/RSS/Bangumi?bangumiId=3519&subgroupid=382"
14 | ),
15 | Mikan,
16 | )
17 | assert isinstance(WebsiteFactory.get_website_parser("https://acg.rip/.xml"), AcgRip)
18 | assert isinstance(
19 | WebsiteFactory.get_website_parser("https://share.dmhy.org/topics/rss/rss.xml"),
20 | Dmhy,
21 | )
22 | assert isinstance(
23 | WebsiteFactory.get_website_parser("https://nyaa.si/?page=rss"), DefaultWebsite
24 | )
25 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/bot/tgbot.py:
--------------------------------------------------------------------------------
1 | import aiohttp
2 |
3 | from . import BotBase
4 |
5 |
6 | class TelegramBot(BotBase):
7 | def __init__(self, bot_token, user_id) -> None:
8 | self.bot_token = bot_token
9 | self.user_id = user_id
10 | self.support_markdown = True
11 |
12 | async def send_message(self, message: str) -> bool:
13 | """Send message via Telegram"""
14 | api_url = f"https://api.telegram.org/bot{self.bot_token}/sendMessage"
15 | body = {"chat_id": self.user_id, "text": message, "parse_mode": "HTML"}
16 | async with aiohttp.ClientSession(trust_env=True) as session:
17 | async with session.post(api_url, json=body) as response:
18 | response.raise_for_status()
19 | return True
20 |
--------------------------------------------------------------------------------
/tests/bot/test_telegrambot.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock, MagicMock, patch
2 |
3 | import pytest
4 | from alist_mikananirss.bot.tgbot import TelegramBot
5 |
6 |
7 | @pytest.fixture
8 | def telegram_bot():
9 | return TelegramBot(bot_token="test_token", user_id="test_user_id")
10 |
11 |
12 | @pytest.mark.asyncio
13 | @patch("aiohttp.ClientSession.post")
14 | async def test_telegram_bot_send_message(mock_post, telegram_bot):
15 | mock_response = AsyncMock()
16 | mock_response.raise_for_status = MagicMock()
17 | mock_post.return_value.__aenter__.return_value = mock_response
18 |
19 | message = "Test message"
20 | result = await telegram_bot.send_message(message)
21 |
22 | assert result is True
23 | mock_post.assert_called_once_with(
24 | "https://api.telegram.org/bottest_token/sendMessage",
25 | json={"chat_id": "test_user_id", "text": message, "parse_mode": "HTML"},
26 | )
27 | mock_response.raise_for_status.assert_called_once()
28 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/__init__.py:
--------------------------------------------------------------------------------
1 | from enum import StrEnum
2 |
3 | from .base import LLMProvider
4 |
5 |
6 | class LLMProviderType(StrEnum):
7 | """Enum for LLM provider types"""
8 |
9 | OPENAI = "openai"
10 | DEEPSEEK = "deepseek"
11 | GOOGLE = "google"
12 |
13 |
14 | def create_llm_provider(provider_type: LLMProviderType, **kwargs) -> LLMProvider:
15 | """Create an LLM provider instance based on the specified type"""
16 | if provider_type == LLMProviderType.OPENAI:
17 | from .openai import OpenAIProvider
18 |
19 | return OpenAIProvider(**kwargs)
20 | elif provider_type == LLMProviderType.DEEPSEEK:
21 | from .deepseek import DeepSeekProvider
22 |
23 | return DeepSeekProvider(**kwargs)
24 | elif provider_type == LLMProviderType.GOOGLE:
25 | from .google import GoogleProvider
26 |
27 | return GoogleProvider(**kwargs)
28 | else:
29 | raise ValueError(f"Unsupported provider type: {provider_type}")
30 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/base.py:
--------------------------------------------------------------------------------
1 | from .models import AnimeNameExtractResult, ResourceTitleExtractResult
2 |
3 |
4 | class ExtractorBase:
5 | def __init__(self):
6 | # 模板类,初始化为空
7 | pass
8 |
9 | async def analyse_anime_name(self, anime_name: str) -> AnimeNameExtractResult:
10 | """Analyse the anime name to get resource info.
11 |
12 | Args:
13 | anime_name (str)
14 |
15 | Returns:
16 | AnimeNameExtractResult: The extracted info in anime name.
17 | """
18 | raise NotImplementedError
19 |
20 | async def analyse_resource_title(
21 | self, resource_title: str, use_tmdb: bool = True
22 | ) -> ResourceTitleExtractResult:
23 | """Analyse the resource title to get resource info.
24 |
25 | Args:
26 | resource_title (str)
27 |
28 | Returns:
29 | ResourceTitleExtractResult: The extracted info in resource title.
30 | """
31 | raise NotImplementedError
32 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 |
3 | from .tmdb import TMDBClient
4 |
5 |
6 | class Singleton(type):
7 | _instances = {}
8 |
9 | def __call__(cls, *args, **kwargs):
10 | if cls not in cls._instances:
11 | cls._instances[cls] = super().__call__(*args, **kwargs)
12 | return cls._instances[cls]
13 |
14 | def destroy_instance(cls):
15 | if cls in cls._instances:
16 | cls._instances.pop(cls)
17 |
18 |
19 | class FixedSizeSet:
20 | def __init__(self, maxsize=10000):
21 | self.maxsize = maxsize
22 | self._set = OrderedDict()
23 |
24 | def add(self, item):
25 | self._set[item] = None
26 | if len(self._set) > self.maxsize:
27 | self._set.popitem(last=False)
28 |
29 | def __contains__(self, item):
30 | return item in self._set
31 |
32 |
33 | def is_video(s: str) -> bool:
34 | return s.lower().endswith((".mp4", ".mkv", ".avi", ".rmvb", ".wmv", ".flv"))
35 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/json_object/resource_title.txt:
--------------------------------------------------------------------------------
1 | You are an anime resource categorization assistant. Given an anime resource name, you need to extract information that helps users categorize and organize the resource based on its name and output them in JSON format.
2 | ps:
3 | 1. Season default to be 1. But if special episode, it should be 0
4 | 2. The episode number should be int. If float, it means special episode, please set episode and season to 0
5 | 3. The quality of the video is an enum value, which can be 2160p, 1080p, 720p
6 | 4. The subtitle language of the video is a list of enum values, which can be 繁, 简, 日, Unknown
7 |
8 | Example input:
9 | [ANi] 超超超超超喜欢你的 100 个女朋友 - 24 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]
10 |
11 | Example JSON output:
12 | {
13 | "anime_name": "超超超超超喜欢你的 100 个女朋友", // The name of the anime
14 | "season": 1, // The season of the anime.
15 | "episode": 24, // The episode number.
16 | "quality": "1080p", // The quality of the video.
17 | "fansub": "Ani", // The fansub of the video.
18 | "languages": ["繁"], // The subtitle language of the video.
19 | "version": 1 // The version of the video's subtitle, default to be 1.
20 | }
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/prompt/prompt.py:
--------------------------------------------------------------------------------
1 | import os
2 | from enum import StrEnum
3 | from functools import lru_cache
4 |
5 |
6 | class PromptType(StrEnum):
7 | JSON_OBJECT = "json_object"
8 | JSON_SCHEMA = "json_schema"
9 |
10 |
11 | @lru_cache(maxsize=32)
12 | def load_prompt(prompt_type: PromptType, prompt_name: str) -> str:
13 | """Load a prompt from a file"""
14 | if prompt_type == PromptType.JSON_OBJECT:
15 | prompt_dir = "json_object"
16 | elif prompt_type == PromptType.JSON_SCHEMA:
17 | prompt_dir = "json_schema"
18 | else:
19 | raise ValueError(f"Invalid prompt type: {prompt_type}")
20 | base_dir = os.path.join(os.path.dirname(__file__), prompt_dir)
21 | file_path = os.path.join(base_dir, f"{prompt_name}.txt")
22 |
23 | if not os.path.exists(file_path):
24 | raise FileNotFoundError(f"Prompt file not found: {file_path}")
25 |
26 | with open(file_path, "r", encoding="utf-8") as f:
27 | return f.read()
28 |
29 |
30 | if __name__ == "__main__":
31 | # Example usage
32 | try:
33 | prompt = load_prompt(PromptType.JSON_SCHEMA, "anime_name")
34 | print(prompt)
35 | except FileNotFoundError as e:
36 | print(e)
37 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/utils/tmdb.py:
--------------------------------------------------------------------------------
1 | import aiohttp
2 | from async_lru import alru_cache
3 |
4 |
5 | class TMDBClient:
6 | def __init__(
7 | self,
8 | api_key: str = "32b19d6a05b512190a056fa4e747cbbc",
9 | api_base_url: str = "https://api.tmdb.org/3",
10 | ):
11 | self.api_key = api_key
12 | self.base_url = api_base_url
13 |
14 | @alru_cache(maxsize=128)
15 | async def search_tv(self, query: str):
16 | endpoint = f"{self.base_url}/search/tv"
17 |
18 | async with aiohttp.ClientSession(trust_env=True) as session:
19 | async with session.get(
20 | endpoint,
21 | params={
22 | "api_key": self.api_key,
23 | "query": query,
24 | "language": "zh-CN",
25 | },
26 | ) as response:
27 | data = await response.json()
28 | results = []
29 | for result in data["results"]:
30 | tmp = {"title": result["name"], "id": result["id"]}
31 | # filter out trash results
32 | if result["popularity"] > 0:
33 | results.append(tmp)
34 | return results
35 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "alist-mikananirss"
3 | version = "0.5.5"
4 | description = "Automatically fetch anime updates from the Mikan Project's RSS feed and offline download them to the cloud drive through Alist."
5 | readme = "README.md"
6 | requires-python = ">=3.11"
7 | dependencies = [
8 | "aiohttp>=3.11",
9 | "aiosqlite>=0.20.0",
10 | "async-lru>=2.0.4",
11 | "asyncio>=3.4.3",
12 | "beautifulsoup4>=4.12.3",
13 | "feedparser>=6.0.11",
14 | "google-genai>=1.10.0",
15 | "loguru>=0.7.3",
16 | "openai>=1.58.1",
17 | "python-telegram-bot>=21.9",
18 | "pyyaml>=6.0.2",
19 | "tenacity>=9.0.0",
20 | ]
21 |
22 | [project.scripts]
23 | alist-mikananirss = "alist_mikananirss:main"
24 |
25 | [build-system]
26 | requires = ["hatchling", "hatch-vcs"]
27 | build-backend = "hatchling.build"
28 |
29 | [tool.hatch.metadata]
30 | allow-direct-references = true
31 |
32 | [tool.hatch.build.targets.wheel]
33 | packages = ["src/alist_mikananirss"]
34 |
35 | [dependency-groups]
36 | dev = [
37 | "pytest-asyncio>=0.25.1",
38 | "pytest>=8.3.4",
39 | "aioresponses>=0.7.7",
40 | "ipykernel>=6.29.5",
41 | "ruff>=0.8.4",
42 | "black>=24.10.0",
43 | "requests>=2.32.3",
44 | "coverage>=7.6.10",
45 | "pytest-order>=1.3.0",
46 | ]
47 |
--------------------------------------------------------------------------------
/tests/extractor/test_extractor.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock
2 |
3 | import pytest
4 |
5 | from alist_mikananirss.extractor import Extractor, ExtractorBase
6 |
7 |
8 | @pytest.fixture(autouse=True)
9 | def reset_extractor():
10 | Extractor.destroy_instance()
11 |
12 |
13 | def test_initialize():
14 | mock_extractor = AsyncMock(spec=ExtractorBase)
15 | Extractor.initialize(mock_extractor)
16 | extractor = Extractor()
17 | assert extractor._extractor == mock_extractor
18 |
19 |
20 | def test_set_extractor():
21 | mock_extractor1 = AsyncMock(spec=ExtractorBase)
22 | Extractor.initialize(mock_extractor1)
23 | m = Extractor()
24 | assert m._extractor == mock_extractor1
25 | mock_extractor2 = AsyncMock(spec=ExtractorBase)
26 | m.set_extractor(mock_extractor2)
27 | assert m._extractor == mock_extractor2
28 |
29 |
30 | @pytest.mark.asyncio
31 | async def test_not_initialized():
32 | extractor = Extractor()
33 | try:
34 | await extractor.analyse_anime_name("anime_name")
35 | except RuntimeError as e:
36 | assert str(e) == "Extractor is not initialized"
37 | try:
38 | await extractor.analyse_resource_title("resource_name")
39 | except RuntimeError as e:
40 | assert str(e) == "Extractor is not initialized"
41 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/bot/bot_base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from enum import Enum
3 |
4 |
5 | class BotType(Enum):
6 | TELEGRAM = "telegram"
7 | PUSHPLUS = "pushplus"
8 |
9 |
10 | class BotBase(ABC):
11 | def __init__(self) -> None:
12 | super().__init__()
13 |
14 | @abstractmethod
15 | async def send_message(self, message: str) -> bool:
16 | raise NotImplementedError
17 |
18 |
19 | class BotFactory:
20 | @staticmethod
21 | def create_bot(bot_type: str | BotType, **kwargs) -> BotBase:
22 | if isinstance(bot_type, str):
23 | try:
24 | bot_type = BotType(bot_type.lower())
25 | except ValueError:
26 | raise ValueError(f"Invalid bot type: {bot_type}")
27 |
28 | if bot_type == BotType.TELEGRAM:
29 | from .tgbot import TelegramBot
30 |
31 | bot_token = kwargs.get("token")
32 | user_id = kwargs.get("user_id")
33 | return TelegramBot(bot_token, user_id)
34 |
35 | elif bot_type == BotType.PUSHPLUS:
36 | from .pushplus_bot import PushPlusBot
37 |
38 | user_token = kwargs.get("token")
39 | channel = kwargs.get("channel")
40 | return PushPlusBot(user_token, channel)
41 |
42 | else:
43 | raise ValueError(f"Unsupported bot type: {bot_type}")
44 |
--------------------------------------------------------------------------------
/full_config.yaml.example:
--------------------------------------------------------------------------------
1 | common:
2 | interval_time: 300
3 | proxies:
4 | http: http://127.0.0.1:7890
5 | https: http://127.0.0.1:7890
6 |
7 | alist:
8 | base_url: https://www.example.com
9 | # base_url: http://127.0.0.1:port
10 | token: alist-xxx
11 | downloader: qBittorrent
12 | download_path: Onedrive/Anime
13 |
14 | mikan:
15 | subscribe_url:
16 | - https://mikanani.me/RSS/MyBangumi?token=xxx
17 | - https://mikanani.me/RSS/rss2
18 | regex_pattern:
19 | 简体: "(简体|简中|简日|CHS)"
20 | 繁体: "(繁体|繁中|繁日|CHT|Baha)"
21 | 1080p: "(X1080|1080P)"
22 | 非合集: "^(?!.*(\\d{2}-\\d{2}|合集)).*"
23 | filters:
24 | - 1080p
25 | - 非合集
26 |
27 | notification:
28 | enable: false
29 | interval_time: 300
30 | bots:
31 | - bot_type: telegram
32 | token: xxx:xxx
33 | user_id: 123456
34 | - bot_type: pushplus
35 | token: xxx
36 | channel: wechat
37 |
38 | rename:
39 | enable: false
40 | extractor:
41 | extractor_type: openai
42 | api_key: sk-***
43 | base_url: https://api.openai.com/v1
44 | model: gpt-4o
45 | output_type: json_object
46 | rename_format: "{name} S{season:02d}E{episode:02d}"
47 | remap:
48 | enable: true
49 | cfg_path: "remap.yaml"
50 |
51 | bot_assistant:
52 | enable: false
53 | bots:
54 | - bot_type: telegram
55 | token: xxx
56 |
57 | dev:
58 | log_level: INFO
59 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/extractor.py:
--------------------------------------------------------------------------------
1 | from typing import Annotated, Literal
2 |
3 | from pydantic import BaseModel, Field
4 |
5 | from alist_mikananirss.extractor.llm.prompt import PromptType
6 |
7 |
8 | class OpenAIConfig(BaseModel):
9 | extractor_type: Literal["openai"]
10 | api_key: str = Field(..., description="OpenAI API key")
11 | base_url: str = Field(
12 | "https://api.openai.com/v1", description="Base URL for OpenAI API"
13 | )
14 | model: str = Field("gpt-4o-2024-11-20", description="Model to use for OpenAI API")
15 | output_type: PromptType = Field(
16 | PromptType.JSON_OBJECT, description="Structure output type for OpenAI API"
17 | )
18 |
19 |
20 | class DeepSeekConfig(BaseModel):
21 | extractor_type: Literal["deepseek"]
22 | api_key: str = Field(..., description="DeepSeek API key")
23 | base_url: str = Field(
24 | "https://api.deepseek.com", description="Base URL for DeepSeek API"
25 | )
26 | model: str = Field("gpt-4o-2024-11-20", description="Model to use for DeepSeek API")
27 | output_type: PromptType = Field(
28 | PromptType.JSON_OBJECT, description="Structure output type for DeepSeek API"
29 | )
30 |
31 |
32 | class GoogleConfig(BaseModel):
33 | extractor_type: Literal["google"]
34 | api_key: str = Field(..., description="Google API key")
35 | model: str = Field("gemini-2.0-flash")
36 | output_type: PromptType = Field(PromptType.JSON_SCHEMA)
37 |
38 |
39 | ExtractorConfig = Annotated[
40 | OpenAIConfig | DeepSeekConfig | GoogleConfig, Field(discriminator="extractor_type")
41 | ]
42 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/deepseek.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import Any, Dict, List, Type, TypeVar, Optional
3 | from openai import AsyncOpenAI
4 | from .base import LLMProvider
5 |
6 | T = TypeVar("T")
7 |
8 |
9 | class DeepSeekProvider(LLMProvider):
10 | """DeepSeek-based LLM provider using OpenAI-compatible API"""
11 |
12 | def __init__(
13 | self,
14 | api_key: str,
15 | base_url: str = "https://api.deepseek.com/v1",
16 | model: str = "deepseek-chat",
17 | ):
18 | self._api_key = api_key
19 | self._base_url = base_url
20 | self.model = model
21 | self._client = None
22 |
23 | @property
24 | def client(self):
25 | if self._client is None:
26 | self._client = AsyncOpenAI(api_key=self._api_key)
27 | if self._base_url:
28 | self._client.base_url = self._base_url
29 | return self._client
30 |
31 | async def parse_with_schema(
32 | self, messages: List[Dict[str, str]], response_format: Type[T]
33 | ) -> Optional[T]:
34 | raise NotImplementedError(
35 | "DeepSeek does not support parsing with json schema. Use parse_as_json instead."
36 | )
37 |
38 | async def parse_as_json(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
39 | """Parse response as a JSON object"""
40 | response = await self.client.chat.completions.create(
41 | model=self.model,
42 | messages=messages,
43 | response_format={"type": "json_object"},
44 | )
45 | return json.loads(response.choices[0].message.content)
46 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/openai.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Type, TypeVar, Optional
2 | from openai import AsyncOpenAI
3 | from .base import LLMProvider
4 | import json
5 |
6 | T = TypeVar("T")
7 |
8 |
9 | class OpenAIProvider(LLMProvider):
10 | """OpenAI-based LLM provider"""
11 |
12 | def __init__(self, api_key: str, base_url: str = None, model: str = "gpt-4o-mini"):
13 | self._api_key = api_key
14 | self._base_url = base_url
15 | self.model = model
16 | self._client = None
17 |
18 | @property
19 | def client(self):
20 | if self._client is None:
21 | self._client = AsyncOpenAI(api_key=self._api_key)
22 | if self._base_url:
23 | self._client.base_url = self._base_url
24 | return self._client
25 |
26 | async def parse_with_schema(
27 | self, messages: List[Dict[str, str]], response_format: Type[T]
28 | ) -> Optional[T]:
29 | """Parse response with a schema (native OpenAI parsing)"""
30 | response = await self.client.beta.chat.completions.parse(
31 | model=self.model,
32 | messages=messages,
33 | response_format=response_format,
34 | )
35 | return response.choices[0].message.parsed
36 |
37 | async def parse_as_json(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
38 | """Parse response as a JSON object"""
39 | response = await self.client.chat.completions.create(
40 | model=self.model,
41 | messages=messages,
42 | response_format={"type": "json_object"},
43 | )
44 | return json.loads(response.choices[0].message.content)
45 |
--------------------------------------------------------------------------------
/tests/bot/test_pushplusbot.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock, MagicMock, patch
2 |
3 | import pytest
4 | from alist_mikananirss.bot.pushplus_bot import PushPlusBot, PushPlusChannel
5 |
6 |
7 | @pytest.fixture
8 | def pushplus_bot():
9 | return PushPlusBot(user_token="test_token")
10 |
11 |
12 | @pytest.mark.asyncio
13 | @patch("aiohttp.ClientSession.post")
14 | async def test_pushplus_bot_send_message(mock_post, pushplus_bot):
15 | mock_response = AsyncMock()
16 | mock_response.raise_for_status = MagicMock()
17 | mock_post.return_value.__aenter__.return_value = mock_response
18 | mock_response.json = AsyncMock(return_value={"code": 200})
19 |
20 | message = "Test message"
21 | result = await pushplus_bot.send_message(message)
22 |
23 | assert result is True
24 | mock_post.assert_called_once_with(
25 | "http://www.pushplus.plus/send/test_token",
26 | json={
27 | "title": "Alist MikananiRSS更新推送",
28 | "content": message,
29 | "channel": PushPlusChannel.WECHAT.value,
30 | "template": "html",
31 | },
32 | )
33 | mock_response.raise_for_status.assert_called_once()
34 |
35 |
36 | def test_pushplus_bot_invalid_channel():
37 | with pytest.raises(ValueError):
38 | PushPlusBot(user_token="test_token", channel="invalid_channel")
39 |
40 |
41 | @pytest.mark.parametrize(
42 | "channel",
43 | [
44 | PushPlusChannel.WECHAT,
45 | PushPlusChannel.WEBHOOK,
46 | PushPlusChannel.CP,
47 | PushPlusChannel.MAIL,
48 | ],
49 | )
50 | def test_pushplus_bot_valid_channels(channel):
51 | bot = PushPlusBot(user_token="test_token", channel=channel.value)
52 | assert bot.channel == channel
53 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/filter.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | class RegexFilter:
5 | _default_patterns = {
6 | "简体": r"(简体|简中|简日|CHS)",
7 | "繁体": r"(繁体|繁中|繁日|CHT|Baha)",
8 | "1080p": r"(X1080|1080P)",
9 | "非合集": r"^(?!.*(\d{2}-\d{2}|合集)).*",
10 | }
11 |
12 | def __init__(self, patterns_name: list = None):
13 | self.patterns = []
14 | if patterns_name is not None:
15 | for pattern in patterns_name:
16 | self.add_pattern(pattern)
17 |
18 | def update_regex(self, regex_pattern: dict) -> None:
19 | if regex_pattern is not None:
20 | self._default_patterns.update(regex_pattern)
21 |
22 | def add_pattern(self, pattern_name: str) -> None:
23 | """Add regex pattern to filter"""
24 | try:
25 | tmp_pattern = self._default_patterns[pattern_name]
26 | except KeyError:
27 | raise KeyError(f"Can't find the filter <{pattern_name}>")
28 | pattern = re.compile(tmp_pattern, re.IGNORECASE)
29 | self.patterns.append(pattern)
30 |
31 | def filt_single(self, string) -> bool:
32 | """Filter single string using regex"""
33 | match_result = True
34 | for pattern in self.patterns:
35 | match_result = match_result and bool(re.search(pattern, string))
36 | return match_result
37 |
38 | def filt_list(self, string_list: list[str]) -> list[int]:
39 | """Filter list of string using regex
40 |
41 | Args:
42 | string_list (list[str]): list of string to be filtered
43 |
44 | Returns:
45 | list[int]: index of string that match the regex pattern
46 | """
47 | return [i for i, string in enumerate(string_list) if self.filt_single(string)]
48 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/models.py:
--------------------------------------------------------------------------------
1 | from enum import StrEnum
2 | from typing import List, Optional
3 |
4 | from pydantic import BaseModel, Field
5 |
6 | from alist_mikananirss.websites.models import LanguageType, VideoQuality
7 |
8 |
9 | class ProviderType(StrEnum):
10 | OPENAI = "openai"
11 | DEEPSEEK = "deepseek"
12 |
13 |
14 | class TMDBSearchParam(BaseModel):
15 | query: str = Field(..., description="Keywords of the anime name to search")
16 |
17 |
18 | class TMDBTvInfo(BaseModel):
19 | anime_name: str = Field(..., description="The name of the anime in tmdb")
20 | tvid: int = Field(..., description="The tmdb id of the anime")
21 |
22 |
23 | class ResourceTitleExtractResult(BaseModel):
24 | anime_name: str = Field(..., description="The name of the anime")
25 | season: int = Field(
26 | ...,
27 | description="The season of the anime.Default to be 1. But if special episode, it should be 0",
28 | )
29 | episode: int = Field(
30 | ...,
31 | description="The episode number. It should be int. If float, it means special episode",
32 | )
33 | quality: Optional[VideoQuality] = Field(..., description="The quality of the video")
34 | fansub: Optional[str] = Field(..., description="The fansub of the video")
35 | languages: List[LanguageType] = Field(
36 | ..., description="The subtitle language of the video"
37 | )
38 | version: int = Field(
39 | ..., description="The version of the video's subtitle, default to be 1"
40 | )
41 |
42 |
43 | class AnimeNameExtractResult(BaseModel):
44 | anime_name: str = Field(
45 | ..., description="The pure name of the anime without season or other info"
46 | )
47 | season: int = Field(..., description="The season of the anime")
48 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/bot/pushplus_bot.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | import aiohttp
4 |
5 | from . import BotBase
6 |
7 |
8 | class PushPlusChannel(Enum):
9 | WECHAT = "wechat"
10 | WEBHOOK = "webhook"
11 | CP = "cp"
12 | MAIL = "mail"
13 |
14 |
15 | class PushPlusBot(BotBase):
16 | def __init__(self, user_token, channel=None) -> None:
17 | self.user_token = user_token
18 | if channel:
19 | try:
20 | self.channel = PushPlusChannel(channel)
21 | except ValueError:
22 | raise ValueError(f"Invalid channel: {channel}")
23 | else:
24 | self.channel = PushPlusChannel.WECHAT
25 |
26 | async def send_message(self, message: str) -> bool:
27 | api_url = f"http://www.pushplus.plus/send/{self.user_token}"
28 | body = {
29 | "title": "Alist MikananiRSS更新推送",
30 | "content": message,
31 | "channel": self.channel.value,
32 | "template": "html",
33 | }
34 | async with aiohttp.ClientSession(trust_env=True) as session:
35 | async with session.post(api_url, json=body) as response:
36 | response.raise_for_status()
37 | data = await response.json()
38 | if data["code"] != 200:
39 | error_code = data["code"]
40 | error_message = data.get("message") or "Unknown error"
41 | full_error_message = f"Error {error_code}: {error_message}"
42 | raise aiohttp.ClientResponseError(
43 | response.request_info,
44 | response.history,
45 | status=data["code"],
46 | message=full_error_message,
47 | )
48 | return True
49 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm/google.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import Any, Dict, List, Optional, Type, TypeVar
3 |
4 | from google import genai
5 |
6 | from .base import LLMProvider
7 |
8 | T = TypeVar("T")
9 |
10 |
11 | class GoogleProvider(LLMProvider):
12 | """Google Gemini"""
13 |
14 | def __init__(self, api_key: str, model: str = "gemini-2.0-flash"):
15 | self._api_key = api_key
16 | self.model = model
17 | self._client = None
18 |
19 | @property
20 | def client(self):
21 | if self._client is None:
22 | self._client = genai.Client(api_key=self._api_key)
23 | return self._client
24 |
25 | async def parse_with_schema(
26 | self, messages: List[Dict[str, str]], response_format: Type[T]
27 | ) -> Optional[T]:
28 | """Parse response with a schema"""
29 | prompt = messages[0]["content"]
30 | content = messages[1]["content"]
31 | response = await self.client.aio.models.generate_content(
32 | model=self.model,
33 | contents=f"{prompt}: \n{content}",
34 | config={
35 | "response_mime_type": "application/json",
36 | "response_schema": response_format,
37 | },
38 | )
39 | return response.parsed
40 |
41 | async def parse_as_json(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
42 | """Parse response as a JSON object"""
43 | prompt = messages[0]["content"]
44 | content = messages[1]["content"]
45 | response = await self.client.aio.models.generate_content(
46 | model=self.model,
47 | contents=f"{prompt}: \n{content}",
48 | config={
49 | "response_mime_type": "application/json",
50 | },
51 | )
52 | return json.loads(response.text)
53 |
--------------------------------------------------------------------------------
/tests/core/test_remapper.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 |
3 | import pytest
4 |
5 | from alist_mikananirss import RemapFrom, Remapper, RemapTo
6 | from alist_mikananirss.websites.models import LanguageType, ResourceInfo, VideoQuality
7 |
8 |
9 | @pytest.fixture
10 | def test_data():
11 | return ResourceInfo(
12 | resource_title="[ANi] BLEACH 死神 千年血战篇-相克谭- - 27 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]",
13 | torrent_url="https://mikanani.me/Download/20241005/e3635409901a94033045060113c2a82689b5c480.torrent",
14 | published_date="2024-10-05T23:32:50.15614",
15 | anime_name="死神 千年血战篇-相克谭-",
16 | season=1,
17 | episode=27,
18 | fansub="ANi",
19 | quality=VideoQuality.p1080,
20 | languages=[LanguageType.TRADITIONAL_CHINESE],
21 | version=1,
22 | )
23 |
24 |
25 | def test_match(test_data):
26 |
27 | test_match = Remapper(
28 | from_=RemapFrom(anime_name="死神 千年血战篇-相克谭-", season=1, fansub="ANi"),
29 | to_=RemapTo(anime_name="死神", season=1, episode_offset=0),
30 | )
31 |
32 | test_not_match = Remapper(
33 | from_=RemapFrom(anime_name="死神 千年血战篇-相克谭-", season=2, fansub="ANi"),
34 | to_=RemapTo(anime_name="死神", season=2, episode_offset=0),
35 | )
36 |
37 | assert test_match.match(test_data)
38 | assert not test_not_match.match(test_data)
39 |
40 |
41 | def test_remap(test_data):
42 | test_data_copy = deepcopy(test_data)
43 | test_remap = Remapper(
44 | from_=RemapFrom(anime_name="死神 千年血战篇-相克谭-", season=1, fansub="ANi"),
45 | to_=RemapTo(anime_name="死神", season=2, episode_offset=-26),
46 | )
47 |
48 | test_remap.remap(test_data_copy)
49 |
50 | assert test_data_copy.anime_name == "死神"
51 | assert test_data_copy.season == 2
52 | assert test_data_copy.episode == 1
53 | assert test_data_copy.fansub == "ANi"
54 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/config.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | import yaml
4 | from pydantic import BaseModel, Field
5 |
6 | from .basic import (
7 | AlistConfig,
8 | BotAssistantConfig,
9 | CommonConfig,
10 | DevConfig,
11 | MikanConfig,
12 | NotificationConfig,
13 | RenameConfig,
14 | )
15 |
16 |
17 | class AppConfig(BaseModel):
18 | common: CommonConfig = Field(default_factory=CommonConfig)
19 | alist: AlistConfig = Field(default_factory=AlistConfig)
20 | mikan: MikanConfig = Field(default_factory=MikanConfig)
21 | notification: NotificationConfig = Field(default_factory=NotificationConfig)
22 | rename: RenameConfig = Field(default_factory=RenameConfig)
23 | bot_assistant: BotAssistantConfig = Field(default_factory=BotAssistantConfig)
24 | dev: DevConfig = Field(default_factory=DevConfig)
25 |
26 | def __str__(self):
27 | # 为所有Enum子类定义representer
28 | def enum_representer(dumper, data):
29 | return dumper.represent_scalar("tag:yaml.org,2002:str", data.value)
30 |
31 | yaml.add_multi_representer(Enum, enum_representer)
32 |
33 | config_dict = self.model_dump()
34 | yaml_str = yaml.dump(
35 | config_dict, sort_keys=False, default_flow_style=False, allow_unicode=True
36 | )
37 | return yaml_str
38 |
39 |
40 | class ConfigManager:
41 | def __init__(self):
42 | self.config_path = None
43 | self.config = None
44 |
45 | def load_config(self, path):
46 | with open(path, "r", encoding="utf-8") as f:
47 | config_dict = yaml.safe_load(f)
48 | self.config_path = path
49 | self.config = AppConfig.model_validate(config_dict)
50 | return self.config
51 |
52 | def get_config(self):
53 | if not self.config:
54 | raise RuntimeError("Config not loaded")
55 | return self.config
56 |
--------------------------------------------------------------------------------
/.github/workflows/publish_docker.yml:
--------------------------------------------------------------------------------
1 | name: Build and Publish Docker Image
2 |
3 | on:
4 | workflow_dispatch:
5 | push:
6 | tags:
7 | - v*
8 |
9 | jobs:
10 | build-and-push:
11 | runs-on: ubuntu-latest
12 | environment:
13 | name: publish
14 | steps:
15 | - name: Checkout code
16 | uses: actions/checkout@v4
17 |
18 | - name: Set up Python
19 | uses: actions/setup-python@v4
20 | with:
21 | python-version: '3.12'
22 |
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | pip install toml
27 |
28 | - name: Extract version from pyproject.toml
29 | id: get_version
30 | run: |
31 | import toml
32 | with open('pyproject.toml', 'r') as f:
33 | data = toml.load(f)
34 | version = data['project']['version']
35 | print(f"::set-output name=version::{version}")
36 | shell: python
37 |
38 | - name: Show version
39 | run: |
40 | echo "Version: ${{ steps.get_version.outputs.version }}"
41 |
42 | - name: Set up QEMU
43 | uses: docker/setup-qemu-action@v2
44 |
45 | - name: Set up Docker Buildx
46 | uses: docker/setup-buildx-action@v2
47 |
48 | - name: Log in to Docker Hub
49 | uses: docker/login-action@v2
50 | with:
51 | username: ${{ secrets.DOCKERHUB_USERNAME }}
52 | password: ${{ secrets.DOCKERHUB_PASSWORD }}
53 |
54 | - name: Build and push Docker image
55 | uses: docker/build-push-action@v4
56 | with:
57 | context: .
58 | file: docker/Dockerfile
59 | platforms: linux/amd64,linux/arm64
60 | push: true
61 | tags: |
62 | twosix26/alist-mikananirss:v${{ steps.get_version.outputs.version }}
63 | twosix26/alist-mikananirss:latest
64 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/base.py:
--------------------------------------------------------------------------------
1 | import abc
2 | import asyncio
3 | from concurrent.futures import ThreadPoolExecutor
4 | from typing import Optional
5 |
6 | import feedparser
7 | from loguru import logger
8 |
9 | from .models import FeedEntry, ResourceInfo
10 |
11 |
12 | class Website(abc.ABC):
13 | """Website,虚基类,提供从各站点Rss链接中提取资源信息的接口"""
14 |
15 | def __init__(self, rss_url: str):
16 | self.rss_url = rss_url
17 |
18 | async def parse_feed(self, url) -> Optional[feedparser.FeedParserDict]:
19 | """使用feedparser库异步解析rss链接"""
20 | loop = asyncio.get_event_loop()
21 | with ThreadPoolExecutor() as pool:
22 | try:
23 | feed = await loop.run_in_executor(pool, feedparser.parse, url)
24 | return feed
25 | except Exception as e:
26 | logger.error(f"Failed to get rss feed: {e}")
27 | return None
28 |
29 | @abc.abstractmethod
30 | async def get_feed_entries(self) -> list[FeedEntry]:
31 | """从rss链接中获取所有的资源条目"""
32 | pass
33 |
34 | @abc.abstractmethod
35 | async def extract_resource_info(
36 | self, entry: FeedEntry, use_extractor: bool = False
37 | ) -> ResourceInfo:
38 | """从资源条目中提取番剧资源信息"""
39 | pass
40 |
41 |
42 | class WebsiteFactory:
43 | """Website工厂类,根据rss链接创建对应的Website类"""
44 |
45 | @staticmethod
46 | def get_website_parser(rss_url: str) -> Website:
47 | if "mikan" in rss_url:
48 | from . import Mikan
49 |
50 | return Mikan(rss_url)
51 | elif "dmhy" in rss_url:
52 | from . import Dmhy
53 |
54 | return Dmhy(rss_url)
55 | elif "acg.rip" in rss_url:
56 | from . import AcgRip
57 |
58 | return AcgRip(rss_url)
59 | else:
60 | from .default import DefaultWebsite
61 |
62 | return DefaultWebsite(rss_url)
63 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/models.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from enum import StrEnum
3 | from typing import List, Optional
4 |
5 |
6 | class VideoQuality(StrEnum):
7 | p2160 = "2160p"
8 | p1080 = "1080p"
9 | p720 = "720p"
10 |
11 |
12 | class LanguageType(StrEnum):
13 | SIMPLIFIED_CHINESE = "简"
14 | TRADITIONAL_CHINESE = "繁"
15 | JAPANESE = "日"
16 | UNKNOWN = "Unknown"
17 |
18 |
19 | @dataclass
20 | class ResourceInfo:
21 | resource_title: str
22 | torrent_url: str
23 |
24 | published_date: Optional[str] = None
25 | anime_name: Optional[str] = None
26 | season: Optional[int] = None
27 | episode: Optional[int] = None
28 | fansub: Optional[str] = None
29 | quality: Optional[VideoQuality] = None
30 | languages: List[str] = field(default_factory=list)
31 | version: int = 1
32 |
33 | def __hash__(self):
34 | return hash(self.resource_title)
35 |
36 | def __str__(self) -> str:
37 | fields = [
38 | ("Title", self.resource_title),
39 | ("Anime", self.anime_name),
40 | ("Season", f"{self.season:02d}" if self.season is not None else "--"),
41 | ("Episode", f"{self.episode:02d}" if self.episode is not None else "--"),
42 | ("Fansub", self.fansub or "--"),
43 | ("Quality", str(self.quality) if self.quality else "--"),
44 | ("Language", self.languages or "--"),
45 | ("Date", self.published_date or "--"),
46 | ("Version", self.version),
47 | ("URL", self.torrent_url),
48 | ]
49 |
50 | return "\n".join(f"{name:8}: {value}" for name, value in fields)
51 |
52 |
53 | @dataclass
54 | class FeedEntry:
55 | resource_title: str
56 | torrent_url: str
57 | published_date: Optional[str] = None
58 | homepage_url: Optional[str] = None
59 | author: Optional[str] = None
60 |
61 | def __hash__(self):
62 | return hash(self.resource_title)
63 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/extractor.py:
--------------------------------------------------------------------------------
1 | from async_lru import alru_cache
2 |
3 | from ..utils import Singleton
4 | from .base import ExtractorBase
5 | from .models import AnimeNameExtractResult, ResourceTitleExtractResult
6 | from .regex import RegexExtractor
7 |
8 |
9 | class Extractor(metaclass=Singleton):
10 | def __init__(self, extractor: ExtractorBase = None):
11 | self._extractor = extractor
12 | self._tmp_regex_extractor = RegexExtractor()
13 |
14 | @classmethod
15 | def initialize(cls, extractor: ExtractorBase):
16 | """Initialize the Extractor with a specific extractor."""
17 | instance_ = cls()
18 | instance_.set_extractor(extractor)
19 |
20 | def set_extractor(self, extractor: ExtractorBase):
21 | """Set the extractor to be used."""
22 | self._extractor = extractor
23 |
24 | @alru_cache(maxsize=128)
25 | async def _analyse_anime_name(self, anime_name: str) -> AnimeNameExtractResult:
26 | """Analyse the anime name."""
27 | return await self._tmp_regex_extractor.analyse_anime_name(anime_name)
28 |
29 | @alru_cache(maxsize=128)
30 | async def _analyse_resource_title(
31 | self, resource_name: str, use_tmdb: bool = True
32 | ) -> ResourceTitleExtractResult:
33 | """Analyse the resource title."""
34 | return await self._extractor.analyse_resource_title(resource_name, use_tmdb)
35 |
36 | @classmethod
37 | async def analyse_anime_name(cls, anime_name: str) -> AnimeNameExtractResult:
38 | # chatgpt对番剧名分析不稳定,所以固定用正则分析番剧名
39 | instance = cls()
40 | if instance._extractor is None:
41 | raise RuntimeError("Extractor is not initialized")
42 | return await instance._analyse_anime_name(anime_name)
43 |
44 | @classmethod
45 | async def analyse_resource_title(
46 | cls, resource_name: str, use_tmdb: bool = True
47 | ) -> ResourceTitleExtractResult:
48 | instance = cls()
49 | if instance._extractor is None:
50 | raise RuntimeError("Extractor is not initialized")
51 | return await instance._analyse_resource_title(resource_name, use_tmdb)
52 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/bot_assistant.py:
--------------------------------------------------------------------------------
1 | from telegram import Update
2 | from telegram.ext import Application, CommandHandler, ContextTypes
3 |
4 | from alist_mikananirss.websites.models import ResourceInfo
5 |
6 | from .rss_monitor import RssMonitor
7 |
8 |
9 | class BotAssistant:
10 | def __init__(self, token: str, rss_monitor: RssMonitor):
11 | """An assistant to manage the rss download tasks via Telegram Bot
12 |
13 | Args:
14 | token (str): Telegram Bot Token
15 | rss_monitor (RssMonitor): RssMonitor instance
16 |
17 | Example:
18 | bot_assistant = BotAssistant(cfg.bot_assistant_telegram_bot_token, rss_monitor)
19 | asyncio.create_task(bot_assistant.run())
20 | """
21 | self.app = Application.builder().token(token).build()
22 | self.rss_monitor = rss_monitor
23 | self._setup_handlers()
24 |
25 | def _setup_handlers(self):
26 | self.app.add_handler(CommandHandler("d", self._download_rss_command))
27 |
28 | async def _download_rss_command(
29 | self, update: Update, context: ContextTypes.DEFAULT_TYPE
30 | ):
31 | if not context.args:
32 | await update.message.reply_text("usage: /d ")
33 | return
34 |
35 | rss_url = context.args[0]
36 | try:
37 | await update.message.reply_text("分析 RSS 链接...")
38 | results: ResourceInfo = await self.rss_monitor.run_once_with_url(rss_url)
39 | if results:
40 | replymsg = "开始下载:\n" + "\n".join(
41 | [r_info.resource_title for r_info in results]
42 | )
43 | await update.message.reply_text(replymsg)
44 | else:
45 | await update.message.reply_text("未能找到新的资源")
46 | except Exception as e:
47 | await update.message.reply_text(f"RSS 下载失败:\n{str(e)}")
48 |
49 | async def run(self):
50 | """Initialize and start the bot"""
51 | await self.app.initialize()
52 | await self.app.start()
53 | await self.app.updater.start_polling()
54 |
55 | async def stop(self):
56 | """Stop the bot gracefully"""
57 | await self.app.stop()
58 | await self.app.shutdown()
59 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/bot/notificationbot.py:
--------------------------------------------------------------------------------
1 | from alist_mikananirss.websites.models import ResourceInfo
2 |
3 | from . import BotBase
4 |
5 |
6 | class NotificationMsg:
7 | """The class to generate notification message"""
8 |
9 | def __init__(self) -> None:
10 | self._update_info: dict[str, list] = {}
11 | self.msg = None
12 |
13 | def format_message(self):
14 | if not self._update_info:
15 | return "暂无番剧更新"
16 |
17 | msg = "你订阅的番剧"
18 | for name, titles in self._update_info.items():
19 | msg += f"[{name}] , "
20 | msg = msg.rstrip(", ") + " 更新啦:\n"
21 |
22 | for name, titles in self._update_info.items():
23 | msg += f"[{name}] :\n"
24 | for title in titles:
25 | msg += f"{title}\n"
26 | msg += "\n"
27 | return msg
28 |
29 | def __bool__(self):
30 | return bool(self._update_info)
31 |
32 | def __str__(self):
33 | if not self.msg:
34 | self.msg = self.format_message()
35 | return self.msg
36 |
37 | def update(self, anime_name: str, titles: list[str]):
38 | """update anime update info
39 |
40 | Args:
41 | anime_name (str): the downloaded anime name
42 | titles (list[str]): the downloaded resources' title of the anime
43 | """
44 | if anime_name not in self._update_info.keys():
45 | self._update_info[anime_name] = []
46 | self._update_info[anime_name].extend(titles)
47 |
48 | self.msg = None
49 |
50 | @classmethod
51 | def from_resources(cls, resources: list[ResourceInfo]):
52 | """Generate NotificationMsg from resources
53 |
54 | Args:
55 | resources (list[MikanAnimeResource]): the downloaded resources
56 |
57 | Returns:
58 | NotificationMsg: the NotificationMsg instance
59 | """
60 | msg = cls()
61 | for resource in resources:
62 | msg.update(resource.anime_name, [resource.resource_title])
63 | return msg
64 |
65 |
66 | class NotificationBot:
67 | def __init__(self, bot_handler: BotBase):
68 | self.bot = bot_handler
69 |
70 | async def send_message(self, msg: NotificationMsg):
71 | return await self.bot.send_message(str(msg))
72 |
--------------------------------------------------------------------------------
/tests/core/test_filter.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from alist_mikananirss import RegexFilter # 请替换为实际的模块名
4 |
5 |
6 | @pytest.fixture
7 | def regex_filter():
8 | return RegexFilter()
9 |
10 |
11 | @pytest.fixture
12 | def test_resources():
13 | resoureces = [
14 | "★10月新番★[葬送的芙莉莲 / Sousou no Frieren][06][1080p][简日双语][招募翻译] [709.7MB]",
15 | "[白圣女与黑牧师_Shiro Seijo to Kuro Bokushi][12][x264 1080p][CHS]",
16 | "★07月新番[彻夜之歌][01-13(全集)][1080P][繁体][MP4]",
17 | "葬送的芙莉莲 / Sousou no Frieren [08][WebRip][1080p][HEVC_AAC][繁日内嵌]",
18 | "[彻夜之歌 / Yofukashi no Uta][修正合集][繁日双语][1080P][WEBrip][MP4](急招校对、后期)",
19 | "[ANi] Undead Unluck / 不死不运 - 14 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4] [567.96 MB]",
20 | "【悠哈璃羽字幕社】[异种族风俗娘评鉴指南_Ishuzoku Rebyuazu][01-12][x264][CHT][AT-X 1280x720]",
21 | "[GJ.Y] 我内心的糟糕念头 第二季 / Boku no Kokoro no Yabai Yatsu Season 2 - 14 (Baha 1920x1080 AVC AAC MP4)",
22 | "[喵萌奶茶屋&LoliHouse] 僵尸100 ~变成僵尸前想要完成的100件事~ / Zom 100: Zombie ni Naru made ni Shitai 100 no Koto [01-12 精校合集][WebRip 1080p HEVC-10bit AAC][简繁日内封字幕][Fin]",
23 | ]
24 | return resoureces
25 |
26 |
27 | def test_init():
28 | rf = RegexFilter(["简体", "1080p"])
29 | assert len(rf.patterns) == 2
30 |
31 |
32 | def test_update_regex(regex_filter):
33 | regex_filter.update_regex({"新模式": r"测试"})
34 | assert "新模式" in regex_filter._default_patterns
35 | assert regex_filter._default_patterns["新模式"] == r"测试"
36 |
37 |
38 | def test_add_pattern(regex_filter):
39 | regex_filter.add_pattern("简体")
40 | assert len(regex_filter.patterns) == 1
41 |
42 |
43 | def test_add_invalid_pattern(regex_filter):
44 | with pytest.raises(KeyError):
45 | regex_filter.add_pattern("UnexistPattern")
46 |
47 |
48 | def test_filt_single(regex_filter):
49 | regex_filter.add_pattern("简体")
50 | regex_filter.add_pattern("1080p")
51 | assert regex_filter.filt_single("简体中文1080P版本")
52 | assert not regex_filter.filt_single("繁体中文720P版本")
53 |
54 |
55 | def test_filt_list(regex_filter, test_resources):
56 | regex_filter.add_pattern("简体")
57 | regex_filter.add_pattern("1080p")
58 | regex_filter.add_pattern("非合集")
59 | result = regex_filter.filt_list(test_resources)
60 | assert result == [0, 1]
61 |
62 |
63 | def test_non_collection_filter(test_resources):
64 | rf = RegexFilter(["非合集"])
65 | result = rf.filt_list(test_resources)
66 | assert result == [0, 1, 3, 5, 7]
67 |
68 |
69 | def test_multiple_patterns(test_resources):
70 | rf = RegexFilter(["简体", "1080p", "非合集"])
71 | result = rf.filt_list(test_resources)
72 | assert result == [0, 1]
73 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/renamer.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 |
4 | from loguru import logger
5 |
6 | from alist_mikananirss.alist import Alist
7 | from alist_mikananirss.websites.models import ResourceInfo
8 |
9 | from ..utils import Singleton
10 |
11 |
12 | class AnimeRenamer(metaclass=Singleton):
13 | _lock = asyncio.Lock()
14 |
15 | def __init__(self, alist: Alist, rename_format: str):
16 | self.alist_client = alist
17 | self.rename_format = rename_format
18 |
19 | @classmethod
20 | def initialize(cls, alist: Alist, rename_format: str):
21 | cls(alist, rename_format)
22 |
23 | async def _build_new_name(self, old_filepath: str, resource: ResourceInfo):
24 | name = resource.anime_name
25 | season = resource.season
26 | episode = resource.episode
27 | if season is None or episode is None:
28 | raise ValueError("Season or episode is none when rename")
29 | fansub = resource.fansub
30 | quality = resource.quality
31 | language_str = "".join(resource.languages)
32 | old_filedir = os.path.dirname(old_filepath)
33 | old_filename = os.path.basename(old_filepath)
34 | file_ext = os.path.splitext(old_filename)[-1].replace(".", "")
35 |
36 | if season == 0:
37 | # 总集篇/OVA 则以顺序命名
38 | file_list = await self.alist_client.list_dir(old_filedir, per_page=999)
39 | episode = len(file_list)
40 |
41 | new_filename = self.rename_format.format(
42 | name=name,
43 | season=season,
44 | episode=episode,
45 | fansub=fansub,
46 | quality=quality,
47 | language=language_str,
48 | )
49 | if resource.version != 1:
50 | new_filename += f" v{resource.version}"
51 | new_filename += f".{file_ext}"
52 | return new_filename
53 |
54 | @classmethod
55 | async def rename(cls, old_filepath: str, resource: ResourceInfo, max_retry=3):
56 | if (
57 | resource.anime_name is None
58 | or resource.season is None
59 | or resource.episode is None
60 | ):
61 | logger.error(f"rename failed due to resource info is invalid: {resource}")
62 | return
63 | instance = cls()
64 | for i in range(max_retry):
65 | try:
66 | new_filename = await instance._build_new_name(old_filepath, resource)
67 | await instance.alist_client.rename(old_filepath, new_filename)
68 | logger.info(f"Rename {old_filepath} to {new_filename}")
69 | break
70 | except Exception as e:
71 | if i < max_retry - 1:
72 | logger.warning(f"Failed to rename {old_filepath}, retrying...: {e}")
73 | else:
74 | logger.error(f"Error when rename {old_filepath}: {e}")
75 | await asyncio.sleep(5)
76 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/notification_sender.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import List
3 |
4 | from loguru import logger
5 | from tenacity import retry, stop_after_attempt, wait_exponential
6 |
7 | from alist_mikananirss.bot import NotificationBot, NotificationMsg
8 | from alist_mikananirss.websites.models import ResourceInfo
9 |
10 | from ..utils import Singleton
11 |
12 |
13 | class NotificationSender(metaclass=Singleton):
14 |
15 | def __init__(self, notification_bots: list[NotificationBot], interval: int = 60):
16 | self.notification_bots = notification_bots
17 | self._interval = interval
18 | self._queue = asyncio.Queue()
19 | self._max_retries = 3
20 |
21 | @classmethod
22 | def initialize(cls, notification_bots: List[NotificationBot], interval: int = 60):
23 | cls(notification_bots, interval)
24 |
25 | @classmethod
26 | def set_notification_bots(cls, notification_bots: List[NotificationBot]):
27 | instance = cls()
28 | instance.notification_bots = notification_bots
29 |
30 | @classmethod
31 | async def add_resource(cls, resource: ResourceInfo):
32 | instance = cls()
33 | await instance._queue.put(resource)
34 |
35 | @classmethod
36 | def set_interval(cls, interval: int):
37 | instance = cls()
38 | instance._interval = interval
39 |
40 | async def _run(self):
41 | while True:
42 | await asyncio.sleep(self._interval)
43 | resources = []
44 | while not self._queue.empty():
45 | try:
46 | resource = self._queue.get_nowait()
47 | resources.append(resource)
48 | except asyncio.QueueEmpty:
49 | break
50 | if resources:
51 | await self._send(resources)
52 |
53 | @classmethod
54 | async def run(cls):
55 | instance = cls()
56 | await instance._run()
57 |
58 | async def _send(self, resources: List[ResourceInfo]):
59 | if not self.notification_bots:
60 | return
61 | msg = NotificationMsg.from_resources(resources)
62 | logger.debug(f"Send notification\n: {msg}")
63 |
64 | tasks = [self._send_with_retry(bot, msg) for bot in self.notification_bots]
65 | results = await asyncio.gather(*tasks, return_exceptions=True)
66 |
67 | for result in results:
68 | if isinstance(result, Exception):
69 | logger.error(f"Failed to send notification after all retries: {result}")
70 |
71 | @retry(
72 | stop=stop_after_attempt(3),
73 | wait=wait_exponential(multiplier=3, min=5, max=30),
74 | reraise=True,
75 | )
76 | async def _send_with_retry(self, bot: NotificationBot, msg: NotificationMsg):
77 | try:
78 | await bot.send_message(msg)
79 | except Exception as e:
80 | logger.warning(f"Attempt failed for {type(bot.bot)}: {e}")
81 | raise
82 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/default.py:
--------------------------------------------------------------------------------
1 | from urllib.parse import urlparse
2 |
3 | from loguru import logger
4 |
5 | from alist_mikananirss import utils
6 | from alist_mikananirss.extractor import Extractor
7 | from alist_mikananirss.websites.models import FeedEntry, ResourceInfo
8 |
9 | from .base import Website
10 |
11 |
12 | class DefaultWebsite(Website):
13 | def __init__(self, rss_url: str):
14 | super().__init__(rss_url)
15 |
16 | async def get_feed_entries(self) -> list[FeedEntry]:
17 | feed = await self.parse_feed(self.rss_url)
18 | if feed is None:
19 | return []
20 | feed_entries = []
21 | for tmp_entry in feed.entries:
22 | resource_title = tmp_entry.get("title", None)
23 | torrent_url = None
24 | for link_entry in tmp_entry.get("links", []):
25 | # 判断是否是磁力链接
26 | link = link_entry["href"]
27 | if link.startswith("magnet:") or link.endswith(".torrent"):
28 | torrent_url = link
29 | break
30 | # 判断是否是视频直链
31 | link_parsed = urlparse(link)
32 | if utils.is_video(link_parsed.path):
33 | torrent_url = link
34 | break
35 | published_date = tmp_entry.get("published", None)
36 |
37 | if not resource_title or not torrent_url:
38 | logger.error(f"Unsupport rss feed format: {self.rss_url}")
39 | return []
40 |
41 | feed_entry = FeedEntry(
42 | resource_title=resource_title,
43 | torrent_url=torrent_url,
44 | published_date=published_date,
45 | )
46 | feed_entries.append(feed_entry)
47 | return feed_entries
48 |
49 | async def extract_resource_info(
50 | self, entry: FeedEntry, use_extractor: bool = False
51 | ) -> ResourceInfo:
52 | resource_info = ResourceInfo(
53 | resource_title=entry.resource_title,
54 | torrent_url=entry.torrent_url,
55 | published_date=entry.published_date,
56 | )
57 | if use_extractor:
58 | rtitle_extract_result = await Extractor.analyse_resource_title(
59 | resource_info.resource_title
60 | )
61 | resource_info = ResourceInfo(
62 | anime_name=rtitle_extract_result.anime_name,
63 | season=rtitle_extract_result.season,
64 | episode=rtitle_extract_result.episode,
65 | quality=rtitle_extract_result.quality,
66 | languages=rtitle_extract_result.languages,
67 | fansub=rtitle_extract_result.fansub,
68 | resource_title=resource_info.resource_title,
69 | torrent_url=resource_info.torrent_url,
70 | published_date=resource_info.published_date,
71 | version=rtitle_extract_result.version,
72 | )
73 | return resource_info
74 |
--------------------------------------------------------------------------------
/tests/bot/test_notification_sender.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from unittest.mock import AsyncMock
3 |
4 | import pytest
5 |
6 | from alist_mikananirss.bot import NotificationBot
7 | from alist_mikananirss.core import NotificationSender
8 | from alist_mikananirss.websites.models import ResourceInfo
9 |
10 |
11 | @pytest.fixture(autouse=True)
12 | def reset_notification_sender():
13 | NotificationSender.destroy_instance()
14 |
15 |
16 | @pytest.mark.asyncio
17 | async def test_initialization():
18 | mock_bot = AsyncMock(spec=NotificationBot)
19 | NotificationSender.initialize([mock_bot], interval=30)
20 | instance1 = NotificationSender()
21 | instance2 = NotificationSender()
22 | assert instance1 == instance2
23 | assert len(instance1.notification_bots) == 1
24 | assert instance1._interval == 30
25 | assert isinstance(instance1._queue, asyncio.Queue)
26 |
27 |
28 | @pytest.mark.asyncio
29 | async def test_set_notification_bots():
30 | NotificationSender.initialize([])
31 | mock_bot1 = AsyncMock(spec=NotificationBot)
32 | mock_bot2 = AsyncMock(spec=NotificationBot)
33 |
34 | NotificationSender.set_notification_bots([mock_bot1, mock_bot2])
35 |
36 | assert len(NotificationSender().notification_bots) == 2
37 |
38 |
39 | @pytest.mark.asyncio
40 | async def test_add_resource():
41 | NotificationSender.initialize([])
42 | resource = ResourceInfo(
43 | anime_name="test name",
44 | resource_title="Test",
45 | torrent_url="https://test.com",
46 | published_date="2023-01-01",
47 | )
48 |
49 | await NotificationSender.add_resource(resource)
50 |
51 | assert NotificationSender()._queue.qsize() == 1
52 |
53 |
54 | @pytest.mark.asyncio
55 | async def test_set_interval():
56 | NotificationSender.initialize([], interval=60)
57 |
58 | NotificationSender.set_interval(120)
59 |
60 | assert NotificationSender()._interval == 120
61 |
62 |
63 | @pytest.mark.asyncio
64 | async def test_run_method():
65 | mock_bot = AsyncMock(spec=NotificationBot)
66 | mock_bot.send_message.return_value = asyncio.Future()
67 | mock_bot.send_message.return_value.set_result(None)
68 |
69 | NotificationSender.initialize([mock_bot], interval=0.1)
70 | resource = ResourceInfo(
71 | anime_name="test name",
72 | resource_title="Test",
73 | torrent_url="https://test.com",
74 | published_date="2023-01-01",
75 | )
76 | await NotificationSender.add_resource(resource)
77 | task = asyncio.create_task(NotificationSender.run())
78 | await asyncio.sleep(0.2)
79 |
80 | mock_bot.send_message.assert_called_once()
81 | task.cancel()
82 |
83 |
84 | @pytest.mark.asyncio
85 | async def test_send_method_without_bots():
86 | NotificationSender.initialize([], interval=0.1)
87 | resource = ResourceInfo(
88 | anime_name="test name",
89 | resource_title="Test",
90 | torrent_url="https://test.com",
91 | published_date="2023-01-01",
92 | )
93 | await NotificationSender.add_resource(resource)
94 | task = asyncio.create_task(NotificationSender.run())
95 | await asyncio.sleep(0.2)
96 | task.cancel()
97 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/acgrip.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Optional
3 |
4 | import aiohttp
5 | import bs4
6 |
7 | from alist_mikananirss.extractor import Extractor
8 | from alist_mikananirss.websites.models import FeedEntry, ResourceInfo
9 |
10 | from .base import Website
11 |
12 |
13 | class AcgRip(Website):
14 | def __init__(self, rss_url: str):
15 | super().__init__(rss_url)
16 |
17 | async def parse_homepage(self, home_page_url: str) -> Optional[str]:
18 | async with aiohttp.ClientSession(trust_env=True) as session:
19 | async with session.get(home_page_url) as response:
20 | response.raise_for_status()
21 | html = await response.text()
22 | soup = bs4.BeautifulSoup(html, "html.parser")
23 | fansub_tag = soup.find("a", href=re.compile(r"/team/\d+"))
24 | if not fansub_tag:
25 | return None
26 | fansub = fansub_tag.text
27 | return fansub
28 |
29 | async def get_feed_entries(self) -> list[FeedEntry]:
30 | feed = await self.parse_feed(self.rss_url)
31 | if feed is None:
32 | return []
33 | feed_entries = []
34 | for tmp_entry in feed.entries:
35 | resource_title = tmp_entry.title
36 | torrent_url = None
37 | for link in tmp_entry.links:
38 | if link["type"] == "application/x-bittorrent":
39 | torrent_url = link["href"]
40 | if not torrent_url:
41 | raise RuntimeError("No torrent url found")
42 | homepage_url = tmp_entry.link
43 | published_date = tmp_entry.published
44 | feed_entry = FeedEntry(
45 | resource_title=resource_title,
46 | torrent_url=torrent_url,
47 | published_date=published_date,
48 | homepage_url=homepage_url,
49 | )
50 | feed_entries.append(feed_entry)
51 | return feed_entries
52 |
53 | async def extract_resource_info(
54 | self, entry: FeedEntry, use_extractor: bool = False
55 | ) -> ResourceInfo:
56 |
57 | resource_info = ResourceInfo(
58 | resource_title=entry.resource_title,
59 | torrent_url=entry.torrent_url,
60 | published_date=entry.published_date,
61 | )
62 | if use_extractor:
63 | rtitle_extract_result = await Extractor.analyse_resource_title(
64 | resource_info.resource_title
65 | )
66 |
67 | fansub = await self.parse_homepage(entry.homepage_url)
68 |
69 | resource_info = ResourceInfo(
70 | anime_name=rtitle_extract_result.anime_name,
71 | season=rtitle_extract_result.season,
72 | episode=rtitle_extract_result.episode,
73 | quality=rtitle_extract_result.quality,
74 | languages=rtitle_extract_result.languages,
75 | fansub=fansub if fansub else rtitle_extract_result.fansub,
76 | resource_title=resource_info.resource_title,
77 | torrent_url=resource_info.torrent_url,
78 | published_date=resource_info.published_date,
79 | version=rtitle_extract_result.version,
80 | )
81 | return resource_info
82 |
--------------------------------------------------------------------------------
/tests/core/download_manager/test_build_download_path.py:
--------------------------------------------------------------------------------
1 | import os
2 | from unittest.mock import AsyncMock
3 |
4 | import pytest
5 |
6 | from alist_mikananirss import DownloadManager
7 | from alist_mikananirss.websites.models import ResourceInfo
8 |
9 |
10 | @pytest.fixture
11 | def base_path():
12 | return "/base/path"
13 |
14 |
15 | @pytest.mark.asyncio
16 | async def test_initialize(base_path):
17 | mock_alist = AsyncMock()
18 | mock_db = AsyncMock()
19 | DownloadManager.initialize(mock_alist, base_path, True, True, mock_db)
20 | assert DownloadManager().base_download_path == base_path
21 |
22 |
23 | def test_build_download_path_with_anime_name(base_path):
24 | resource = ResourceInfo(
25 | resource_title="Test Resource",
26 | torrent_url="https://example.com/torrent",
27 | published_date="2023-05-20",
28 | anime_name="Test Anime",
29 | )
30 | expected_path = os.path.join(base_path, "Test Anime")
31 | test_instance = DownloadManager()
32 | assert test_instance._build_download_path(resource) == expected_path
33 |
34 |
35 | def test_build_download_path_with_anime_name_and_season(base_path):
36 | resource = ResourceInfo(
37 | resource_title="Test Resource",
38 | torrent_url="https://example.com/torrent",
39 | published_date="2023-05-20",
40 | anime_name="Test Anime",
41 | season=1,
42 | )
43 | expected_path = os.path.join(base_path, "Test Anime", "Season 1")
44 | test_instance = DownloadManager()
45 | assert test_instance._build_download_path(resource) == expected_path
46 |
47 |
48 | def test_build_download_path_with_illegal_characters(base_path):
49 | # 测试是否能正确处理动画名中的非法字符
50 | resource = ResourceInfo(
51 | resource_title="Test Resource",
52 | torrent_url="https://example.com/torrent",
53 | published_date="2023-05-20",
54 | anime_name="Test:Anime!",
55 | season=2,
56 | )
57 | expected_path = os.path.join(base_path, "Test Anime!", "Season 2")
58 | test_instance = DownloadManager()
59 | assert test_instance._build_download_path(resource) == expected_path
60 |
61 |
62 | def test_build_download_path_without_anime_name(base_path):
63 | # 无动画名时,不会创建子文件夹,将视频文件下载到下载目录的根目录下
64 | resource_none_name = ResourceInfo(
65 | resource_title="Test Resource",
66 | torrent_url="https://example.com/torrent",
67 | published_date="2023-05-20",
68 | season=4,
69 | )
70 | resource_empty_name = ResourceInfo(
71 | resource_title="Test Resource",
72 | torrent_url="https://example.com/torrent",
73 | published_date="2023-05-20",
74 | anime_name="",
75 | season=3,
76 | )
77 | expected_path = base_path
78 | test_instance = DownloadManager()
79 | assert test_instance._build_download_path(resource_none_name) == expected_path
80 | assert test_instance._build_download_path(resource_empty_name) == expected_path
81 |
82 |
83 | def test_build_download_path_with_special_season(base_path):
84 | resource = ResourceInfo(
85 | resource_title="Test Resource",
86 | torrent_url="https://example.com/torrent",
87 | published_date="2023-05-20",
88 | anime_name="Test Anime",
89 | season=0,
90 | )
91 | expected_path = os.path.join(base_path, "Test Anime", "Season 0")
92 | test_instance = DownloadManager()
93 | assert test_instance._build_download_path(resource) == expected_path
94 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/regex.py:
--------------------------------------------------------------------------------
1 | import re
2 | from functools import lru_cache
3 |
4 | from loguru import logger
5 |
6 | from .base import ExtractorBase
7 | from .models import AnimeNameExtractResult, ResourceTitleExtractResult
8 |
9 |
10 | class RegexExtractor(ExtractorBase):
11 | def __init__(self) -> None:
12 | self.num_dict: dict[str, int] = {
13 | "零": 0,
14 | "一": 1,
15 | "二": 2,
16 | "三": 3,
17 | "四": 4,
18 | "五": 5,
19 | "六": 6,
20 | "七": 7,
21 | "八": 8,
22 | "九": 9,
23 | }
24 | self.unit_dict: dict[str, int] = {"十": 10, "百": 100, "千": 1000}
25 |
26 | self.part_pattern = re.compile(r"\s*第(.+)部分")
27 | self.season_pattern = re.compile(r"(.+) 第(.+)[季期]")
28 | self.roman_season_pattern = re.compile(r"\s*([ⅠⅡⅢⅣⅤ])\s*")
29 | self.roman_numerals = {"Ⅰ": 1, "Ⅱ": 2, "Ⅲ": 3, "Ⅳ": 4, "Ⅴ": 5}
30 | self.episode_pattern = re.compile(r"第?(\d+(?:\.\d+)?)[(?:话|集)]?")
31 |
32 | @lru_cache(maxsize=128)
33 | def _chinese_to_arabic(self, chinese_num: str) -> int:
34 | if chinese_num == "十":
35 | return 10
36 |
37 | result = 0
38 | temp = 0
39 | for char in chinese_num:
40 | if char in self.unit_dict:
41 | result += (temp or 1) * self.unit_dict[char]
42 | temp = 0
43 | else:
44 | temp = self.num_dict[char]
45 | return result + temp
46 |
47 | async def analyse_anime_name(self, anime_name: str) -> AnimeNameExtractResult:
48 | # 去除名字中的"第x部分"(因为这种情况一般是分段播出,而非新的一季)
49 | anime_name = self.part_pattern.sub("", anime_name)
50 | match = self.season_pattern.search(anime_name)
51 | name = None
52 | season = None
53 | if match:
54 | # 根据"第x季"提取季数
55 | name, season = match.groups()
56 | season = (
57 | int(season) if season.isdigit() else self._chinese_to_arabic(season)
58 | )
59 | else:
60 | # 根据罗马数字判断季数(如:无职转生Ⅱ ~到了异世界就拿出真本事~)
61 | match = self.roman_season_pattern.search(anime_name)
62 | if match:
63 | season = self.roman_numerals[match.group(1)]
64 | name = self.roman_season_pattern.sub("", anime_name)
65 | else:
66 | # 默认为第一季
67 | name = anime_name
68 | season = 1
69 | info = AnimeNameExtractResult(anime_name=name, season=int(season))
70 | logger.debug(f"Regex analyse anime name: {anime_name} -> {info}")
71 | return info
72 |
73 | async def analyse_resource_title(
74 | self, resource_title: str, use_tmdb: bool = True
75 | ) -> ResourceTitleExtractResult:
76 | clean_name = re.sub(r"[\[\]【】()()]", " ", resource_title)
77 | match = self.episode_pattern.search(clean_name)
78 | if not match:
79 | raise ValueError(f"Can't find episode number in {resource_title}")
80 | episode = float(match.group(1))
81 | # if episode is a decimal, it means that it is a special episode, season = 0
82 | season = 0 if not episode.is_integer() else None
83 | episode = int(episode) if episode.is_integer() else 0
84 | info = ResourceTitleExtractResult(anime_name="", season=season, episode=episode)
85 | logger.debug(f"Regex analyse resource name: {resource_title} -> {info}")
86 | return info
87 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/dmhy.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import aiohttp
4 | import bs4
5 |
6 | from alist_mikananirss.extractor import Extractor
7 | from alist_mikananirss.websites.models import FeedEntry, ResourceInfo
8 |
9 | from .base import Website
10 |
11 |
12 | class Dmhy(Website):
13 | fansub_cache = {} # {publisher_name: fansub group name}
14 |
15 | def __init__(self, rss_url: str):
16 | super().__init__(rss_url)
17 |
18 | async def parse_homepage(self, home_page_url: str) -> Optional[str]:
19 | async with aiohttp.ClientSession(trust_env=True) as session:
20 | async with session.get(home_page_url) as response:
21 | response.raise_for_status()
22 | html = await response.text()
23 | soup = bs4.BeautifulSoup(html, "html.parser")
24 | target_p = soup.select('p:-soup-contains("所屬發佈組")')
25 | if len(target_p) == 0:
26 | return None
27 | fansub = target_p[0].a.text
28 | return fansub
29 |
30 | async def get_feed_entries(self) -> list[FeedEntry]:
31 | feed = await self.parse_feed(self.rss_url)
32 | if feed is None:
33 | return []
34 | feed_entries = []
35 | for tmp_entry in feed.entries:
36 | resource_title = tmp_entry.title
37 | torrent_url = None
38 | for link in tmp_entry.links:
39 | if link["type"] == "application/x-bittorrent":
40 | torrent_url = link["href"]
41 | if not torrent_url:
42 | raise RuntimeError("No torrent url found")
43 | homepage_url = tmp_entry.link
44 | published_date = tmp_entry.published
45 | author = tmp_entry.author
46 | feed_entry = FeedEntry(
47 | resource_title=resource_title,
48 | torrent_url=torrent_url,
49 | published_date=published_date,
50 | homepage_url=homepage_url,
51 | author=author,
52 | )
53 | feed_entries.append(feed_entry)
54 | return feed_entries
55 |
56 | async def extract_resource_info(
57 | self, entry: FeedEntry, use_extractor: bool = False
58 | ) -> ResourceInfo:
59 | resource_info = ResourceInfo(
60 | resource_title=entry.resource_title,
61 | torrent_url=entry.torrent_url,
62 | published_date=entry.published_date,
63 | )
64 | if use_extractor:
65 | rtitle_extract_result = await Extractor.analyse_resource_title(
66 | resource_info.resource_title
67 | )
68 |
69 | if entry.author in self.fansub_cache and self.fansub_cache[entry.author]:
70 | fansub = self.fansub_cache[entry.author]
71 | else:
72 | fansub = await self.parse_homepage(entry.homepage_url)
73 | self.fansub_cache[entry.author] = fansub
74 |
75 | resource_info = ResourceInfo(
76 | anime_name=rtitle_extract_result.anime_name,
77 | season=rtitle_extract_result.season,
78 | episode=rtitle_extract_result.episode,
79 | quality=rtitle_extract_result.quality,
80 | languages=rtitle_extract_result.languages,
81 | fansub=fansub if fansub else rtitle_extract_result.fansub,
82 | resource_title=resource_info.resource_title,
83 | torrent_url=resource_info.torrent_url,
84 | published_date=resource_info.published_date,
85 | version=rtitle_extract_result.version,
86 | )
87 | return resource_info
88 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/rss_monitor.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from loguru import logger
4 |
5 | from alist_mikananirss import SubscribeDatabase
6 | from alist_mikananirss.websites import Website, WebsiteFactory
7 | from alist_mikananirss.websites.models import ResourceInfo
8 |
9 | from .download_manager import DownloadManager
10 | from .filter import RegexFilter
11 | from .remapper import (
12 | RemapperManager,
13 | )
14 |
15 |
16 | class RssMonitor:
17 | def __init__(
18 | self,
19 | subscribe_urls: list[str],
20 | filter: RegexFilter,
21 | db: SubscribeDatabase,
22 | use_extractor: bool = False,
23 | ) -> None:
24 | """The rss feed manager"""
25 | self.subscribe_urls = subscribe_urls
26 | self.websites = [
27 | WebsiteFactory.get_website_parser(url) for url in subscribe_urls
28 | ]
29 | self.filter = filter
30 | self.db = db
31 | self.use_extractor = use_extractor
32 |
33 | self.interval_time = 300
34 |
35 | def set_interval_time(self, interval_time: int):
36 | self.interval_time = interval_time
37 |
38 | async def get_new_resources(
39 | self,
40 | m_websites: list[Website],
41 | m_filter: RegexFilter,
42 | ) -> list[ResourceInfo]:
43 | """Parse all rss url and get the filtered, unique resource info list"""
44 |
45 | async def process_entry(self, website: Website, entry):
46 | """Parse all rss url and get the filtered, unique resource info list"""
47 | async with asyncio.Semaphore(8):
48 | try:
49 | resource_info = await website.extract_resource_info(
50 | entry, self.use_extractor
51 | )
52 | except Exception as e:
53 | logger.error(f"Pass {entry.resource_title} because of error: {e}")
54 | return None
55 | remapper = RemapperManager.match(resource_info)
56 | if remapper:
57 | remapper.remap(resource_info)
58 | return resource_info
59 |
60 | new_resources_set: set[ResourceInfo] = set()
61 |
62 | for website in m_websites:
63 | feed_entries = await website.get_feed_entries()
64 | feed_entries_filted = filter(
65 | lambda entry: m_filter.filt_single(entry.resource_title),
66 | feed_entries,
67 | )
68 | tasks = []
69 | for entry in feed_entries_filted:
70 | if await self.db.is_resource_title_exist(entry.resource_title):
71 | continue
72 | task = asyncio.create_task(process_entry(self, website, entry))
73 | tasks.append(task)
74 | results = await asyncio.gather(*tasks)
75 | for resource_info in results:
76 | if not resource_info:
77 | continue
78 | new_resources_set.add(resource_info)
79 | logger.info(f"Find new resource: {resource_info}")
80 |
81 | new_resources = list(new_resources_set)
82 | return new_resources
83 |
84 | async def run(self):
85 | while 1:
86 | logger.info("Start update checking")
87 | new_resources = await self.get_new_resources(self.websites, self.filter)
88 | if not new_resources:
89 | logger.info("No new resources")
90 | else:
91 | await DownloadManager.add_download_tasks(new_resources)
92 | await asyncio.sleep(self.interval_time)
93 |
94 | async def run_once_with_url(self, url: str):
95 | logger.info(f"Start update checking for {url}")
96 | website = WebsiteFactory.get_website_parser(url)
97 | new_resources = await self.get_new_resources([website], self.filter)
98 | if not new_resources:
99 | logger.info("No new resources")
100 | else:
101 | await DownloadManager.add_download_tasks(new_resources)
102 | return new_resources
103 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 🌐 Language
5 |
30 |
31 |
32 |
33 |
34 | Alist-MikananiRss
35 |
36 |
37 | 从蜜柑计划 或其他动漫番剧相关的RSS订阅源中自动获取番剧更新并通过Alist离线下载至对应网盘
38 |
39 |
40 | 并结合使用ChatGPT分析资源名,将资源重命名为Emby可解析的格式。
41 |
42 |
43 | ---
44 |
45 | [使用文档](https://github.com/TwooSix/Alist-MikananiRss/wiki/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)
46 | ## 功能
47 | - 自动获取番剧更新并下载至对应网盘
48 | - 通过PushPlus, Telegram等渠道发送更新通知
49 | - 自动重命名为emby可识别格式,同时支持对自动解析的结果进行自定义重映射,让重命名结果更准确
50 |
51 | ## 准备工作
52 | 1. 请自行参照[Alist](https://github.com/alist-org/alist)项目文档部署Alist(版本须>=3.42.0),并搭建好Aria2/qBittorrent离线下载
53 | 2. 自行注册蜜柑计划账户,订阅番剧,获取订阅链接
54 |
55 | 附:对其余RSS订阅源也作了一定适配,理论上支持绝大多数订阅源(番剧相关),对于未能支持的RSS,也欢迎上交issue
56 |
57 | ## 如何使用
58 | Docker,源码运行等更多的运行方法详见[使用文档](https://github.com/TwooSix/Alist-MikananiRss/wiki/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)
59 |
60 | 使用pip安装运行
61 | 1. 请确保你的python版本在3.11以上
62 | 2. 使用pip安装: `pip install alist-mikananirss`
63 | 3. 在目录下新建一个`config.yaml`配置文件,并填写配置文件如下(完整功能示例详解见[配置说明](https://github.com/TwooSix/Alist-MikananiRss/wiki/%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E))
64 | ```yaml
65 | common:
66 | interval_time: 300
67 |
68 | alist:
69 | base_url: https://example.com # 修改为你的alist访问地址
70 | token: alist-xxx # 修改为你的alist token;可在"管理员后台->设置->其他"中找到
71 | downloader: qBittorrent # 或者 aria2
72 | download_path: Onedrive/Anime # 修改为你的下载路径(Alist中的路径)
73 |
74 | mikan:
75 | subscribe_url:
76 | - https://mikanani.me/RSS/MyBangumi?token=xxx # 修改为你的蜜柑订阅地址
77 | # - https://mikanani.me/RSS/MyBangumi?token=xxx2 # 多条RSS订阅链接情况
78 |
79 | filters:
80 | - 非合集 # 程序暂不支持合集等形式的重命名,若使用重命名功能推荐使用此过滤器
81 | ```
82 | 4. 运行代码:`python -m alist_mikananirss --config /path/to/config.yaml`
83 | 5. Enjoy
84 |
85 |
86 | ## 重命名效果展示
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/websites/mikan.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 |
3 | import aiohttp
4 | import bs4
5 | from async_lru import alru_cache
6 |
7 | from alist_mikananirss.extractor import Extractor
8 | from alist_mikananirss.websites.models import FeedEntry, ResourceInfo
9 |
10 | from .base import Website
11 |
12 |
13 | @dataclass
14 | class MikanHomePageInfo:
15 | anime_name: str
16 | fansub: str
17 |
18 |
19 | class Mikan(Website):
20 | def __init__(self, rss_url: str):
21 | super().__init__(rss_url)
22 |
23 | @alru_cache(maxsize=128)
24 | async def parse_homepage(self, home_page_url: str) -> MikanHomePageInfo:
25 | async with aiohttp.ClientSession(trust_env=True) as session:
26 | async with session.get(home_page_url) as response:
27 | response.raise_for_status()
28 | html = await response.text()
29 | soup = bs4.BeautifulSoup(html, "html.parser")
30 | title_element = soup.find("p", class_="bangumi-title")
31 | anime_name = title_element.text.strip() if title_element else None
32 | fansub = None
33 | bgm_info_elements = soup.find_all("p", class_="bangumi-info")
34 | for e in bgm_info_elements:
35 | if "字幕组" in e.text:
36 | text = e.text.strip()
37 | fansub = text.split(":")[-1]
38 | break
39 | return MikanHomePageInfo(anime_name=anime_name, fansub=fansub)
40 |
41 | async def get_feed_entries(self) -> list[FeedEntry]:
42 | feed = await self.parse_feed(self.rss_url)
43 | if feed is None:
44 | return []
45 | feed_entries = []
46 | for tmp_entry in feed.entries:
47 | resource_title = tmp_entry.title
48 | torrent_url = None
49 | for link in tmp_entry.links:
50 | if link["type"] == "application/x-bittorrent":
51 | torrent_url = link["href"]
52 | if not torrent_url:
53 | raise RuntimeError("No torrent url found")
54 | homepage_url = tmp_entry.link
55 | published_date = tmp_entry.published
56 | feed_entry = FeedEntry(
57 | resource_title=resource_title,
58 | torrent_url=torrent_url,
59 | published_date=published_date,
60 | homepage_url=homepage_url,
61 | )
62 | feed_entries.append(feed_entry)
63 | return feed_entries
64 |
65 | async def extract_resource_info(
66 | self, entry: FeedEntry, use_extractor: bool = False
67 | ) -> ResourceInfo:
68 | homepage_info = await self.parse_homepage(entry.homepage_url)
69 | resource_info = ResourceInfo(
70 | anime_name=homepage_info.anime_name,
71 | resource_title=entry.resource_title,
72 | torrent_url=entry.torrent_url,
73 | published_date=entry.published_date,
74 | fansub=homepage_info.fansub,
75 | )
76 | if use_extractor:
77 | anime_name = resource_info.anime_name
78 | season = None
79 | if not anime_name:
80 | # Can't get anime name from homepage, use tmdb result.
81 | rtitle_extract_result = await Extractor.analyse_resource_title(
82 | resource_info.resource_title, use_tmdb=True
83 | )
84 | anime_name = rtitle_extract_result.anime_name
85 | season = rtitle_extract_result.season
86 | else:
87 | name_extract_result = await Extractor.analyse_anime_name(anime_name)
88 | rtitle_extract_result = await Extractor.analyse_resource_title(
89 | resource_info.resource_title, use_tmdb=False
90 | )
91 | anime_name = name_extract_result.anime_name
92 | season = name_extract_result.season
93 | resource_info = ResourceInfo(
94 | anime_name=anime_name,
95 | season=season,
96 | episode=rtitle_extract_result.episode,
97 | quality=rtitle_extract_result.quality,
98 | languages=rtitle_extract_result.languages,
99 | fansub=resource_info.fansub,
100 | resource_title=resource_info.resource_title,
101 | torrent_url=resource_info.torrent_url,
102 | published_date=resource_info.published_date,
103 | version=rtitle_extract_result.version,
104 | )
105 | return resource_info
106 |
--------------------------------------------------------------------------------
/tests/core/test_renamer.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import AsyncMock, patch
2 |
3 | import pytest
4 | from loguru import logger
5 |
6 | from alist_mikananirss import AnimeRenamer
7 | from alist_mikananirss.alist import Alist
8 | from alist_mikananirss.websites.models import LanguageType, ResourceInfo, VideoQuality
9 |
10 |
11 | @pytest.fixture(autouse=True)
12 | def reset_anime_renamer():
13 | AnimeRenamer.destroy_instance()
14 |
15 |
16 | @pytest.fixture
17 | def alist_mock():
18 | return AsyncMock(spec=Alist)
19 |
20 |
21 | @pytest.fixture
22 | def resource_info():
23 | return ResourceInfo(
24 | resource_title="title",
25 | torrent_url="https://test1.torrent",
26 | published_date="1",
27 | anime_name="Test Anime",
28 | season=1,
29 | episode=5,
30 | fansub="TestSub",
31 | quality=VideoQuality.p1080,
32 | languages=[
33 | LanguageType.SIMPLIFIED_CHINESE,
34 | LanguageType.TRADITIONAL_CHINESE,
35 | LanguageType.JAPANESE,
36 | ],
37 | )
38 |
39 |
40 | @pytest.mark.asyncio
41 | async def test_initialize():
42 | alist = AsyncMock(spec=Alist)
43 | rename_format = "{name} - {season}x{episode}"
44 |
45 | AnimeRenamer.initialize(alist, rename_format)
46 |
47 | assert AnimeRenamer().alist_client == alist
48 | assert AnimeRenamer().rename_format == rename_format
49 |
50 |
51 | @pytest.mark.asyncio
52 | async def test_build_new_name(alist_mock, resource_info):
53 | AnimeRenamer.initialize(
54 | alist_mock, "{name} S{season:02d}E{episode:02d} {fansub} {quality} {language}"
55 | )
56 |
57 | old_filepath = "/path/to/old_file.mp4"
58 |
59 | alist_mock.list_dir.return_value = ["file1", "file2", "file3"]
60 |
61 | new_filename = await AnimeRenamer()._build_new_name(old_filepath, resource_info)
62 |
63 | expected_filename = "Test Anime S01E05 TestSub 1080p 简繁日.mp4"
64 | assert new_filename == expected_filename
65 |
66 |
67 | @pytest.mark.asyncio
68 | async def test_build_new_name_ova(alist_mock, resource_info):
69 | AnimeRenamer.initialize(alist_mock, "{name} S{season:02d}E{episode:02d}")
70 |
71 | old_filepath = "/path/to/old_file.mp4"
72 | resource_info.season = 0
73 |
74 | alist_mock.list_dir.return_value = ["file1", "file2", "file3"]
75 |
76 | new_filename = await AnimeRenamer()._build_new_name(old_filepath, resource_info)
77 |
78 | expected_filename = "Test Anime S00E03.mp4"
79 | assert new_filename == expected_filename
80 |
81 |
82 | @pytest.mark.asyncio
83 | async def test_build_new_name_version(alist_mock, resource_info):
84 | AnimeRenamer.initialize(alist_mock, "{name} S{season:02d}E{episode:02d}")
85 |
86 | old_filepath = "/path/to/old_file.mp4"
87 | resource_info.version = 2
88 |
89 | new_filename = await AnimeRenamer()._build_new_name(old_filepath, resource_info)
90 |
91 | expected_filename = "Test Anime S01E05 v2.mp4"
92 | assert new_filename == expected_filename
93 |
94 |
95 | @pytest.mark.asyncio
96 | async def test_rename_success(alist_mock, resource_info):
97 | AnimeRenamer.initialize(alist_mock, "{name} S{season:02d}E{episode:02d}")
98 |
99 | old_filepath = "/path/to/old_file.mp4"
100 |
101 | with patch.object(AnimeRenamer, "_build_new_name", return_value="new_file.mp4"):
102 | await AnimeRenamer.rename(old_filepath, resource_info)
103 |
104 | alist_mock.rename.assert_called_once_with(old_filepath, "new_file.mp4")
105 |
106 |
107 | @pytest.mark.asyncio
108 | async def test_rename_retry(alist_mock, resource_info):
109 | AnimeRenamer.initialize(alist_mock, "{name} S{season:02d}E{episode:02d}")
110 |
111 | old_filepath = "/path/to/old_file.mp4"
112 |
113 | alist_mock.rename.side_effect = [Exception("Error"), Exception("Error"), None]
114 |
115 | with patch.object(AnimeRenamer, "_build_new_name", return_value="new_file.mp4"):
116 | with patch("asyncio.sleep", new_callable=AsyncMock):
117 | await AnimeRenamer.rename(old_filepath, resource_info)
118 |
119 | assert alist_mock.rename.call_count == 3
120 |
121 |
122 | @pytest.mark.asyncio
123 | async def test_rename_max_retry_exceeded(alist_mock, resource_info):
124 | AnimeRenamer.initialize(alist_mock, "{name} S{season:02d}E{episode:02d}")
125 |
126 | old_filepath = "/path/to/old_file.mp4"
127 |
128 | alist_mock.rename.side_effect = Exception("Error")
129 |
130 | with patch.object(AnimeRenamer, "_build_new_name", return_value="new_file.mp4"):
131 | with patch.object(logger, "error") as mock_logger_error:
132 | with patch("asyncio.sleep", new_callable=AsyncMock):
133 | await AnimeRenamer.rename(old_filepath, resource_info)
134 |
135 | assert alist_mock.rename.call_count == 3
136 | mock_logger_error.assert_called_once()
137 |
--------------------------------------------------------------------------------
/tests/websites/test_acgrip.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import feedparser
4 | import pytest
5 |
6 | from alist_mikananirss.extractor import ( # noqa
7 | Extractor,
8 | ResourceTitleExtractResult,
9 | VideoQuality,
10 | )
11 | from alist_mikananirss.websites.acgrip import AcgRip
12 | from alist_mikananirss.websites.models import FeedEntry, LanguageType, ResourceInfo
13 |
14 |
15 | @pytest.fixture
16 | def acgrip():
17 | return AcgRip("https://acg.rip/.xml")
18 |
19 |
20 | @pytest.fixture
21 | def mock_rss_data():
22 | return """
23 |
24 | ACG.RIP
25 | ACG.RIP has super cow power
26 | https://acg.rip/.xml
27 | 1800
28 | -
29 |
[ANi] Hana wa Saku Shura no Gotoku / 群花綻放、彷如修羅 - 02 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]
30 | Torrent Info By: ANi API (Auto Generated) Subtitle: HardSub Mediainfo: Resolution: 1080P Video Format: AVC Audio Format: AAC Note: Xunlei, tor...
31 | Tue, 14 Jan 2025 09:35:59 -0800
32 | https://acg.rip/t/321423
33 | https://acg.rip/t/321423
34 |
35 |
36 |
37 | """
38 |
39 |
40 | @pytest.fixture
41 | def mock_extract_data():
42 | ret = {
43 | "mock_entry": FeedEntry(
44 | resource_title="[ANi] Hana wa Saku Shura no Gotoku / 群花綻放、彷如修羅 - 02 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]",
45 | torrent_url="https://acg.rip/t/321423.torrent",
46 | homepage_url="https://acg.rip/t/321423",
47 | ),
48 | "mock_extract_result": ResourceTitleExtractResult(
49 | anime_name="群花绽放,仿如修罗",
50 | season=1,
51 | episode=2,
52 | quality=VideoQuality.p1080,
53 | languages=[LanguageType.TRADITIONAL_CHINESE],
54 | fansub="ANi",
55 | version=1,
56 | ),
57 | }
58 | return ret
59 |
60 |
61 | @pytest.mark.asyncio
62 | async def test_get_feed_entries(acgrip, mock_rss_data):
63 | with patch.object(
64 | acgrip, "parse_feed", return_value=feedparser.parse(mock_rss_data)
65 | ):
66 | result = await acgrip.get_feed_entries()
67 |
68 | assert isinstance(result, list)
69 | assert len(result) == 1
70 | entry = result.pop()
71 | assert isinstance(entry, FeedEntry)
72 | assert (
73 | entry.resource_title
74 | == "[ANi] Hana wa Saku Shura no Gotoku / 群花綻放、彷如修羅 - 02 [1080P][Baha][WEB-DL][AAC AVC][CHT][MP4]"
75 | )
76 | assert entry.torrent_url == "https://acg.rip/t/321423.torrent"
77 | assert entry.homepage_url == "https://acg.rip/t/321423"
78 |
79 |
80 | @pytest.mark.asyncio
81 | async def test_get_feed_entries_real(acgrip):
82 | # 测试真实的RSS链接解析是否有报错
83 | await acgrip.get_feed_entries()
84 |
85 |
86 | @pytest.mark.asyncio
87 | async def test_parse_homepage_error(acgrip, mock_extract_data):
88 | # 非强需求;不报错
89 | with patch.object(acgrip, "parse_homepage", side_effect=Exception):
90 | await acgrip.extract_resource_info(
91 | mock_extract_data["mock_entry"], use_extractor=False
92 | )
93 |
94 |
95 | @pytest.mark.asyncio
96 | async def test_none_fansub(acgrip, mock_extract_data):
97 | # 无法从主页解析到fansub,使用extractor解析的fansub结果
98 |
99 | with patch.object(acgrip, "parse_homepage", return_value=None):
100 | with patch(
101 | "alist_mikananirss.extractor.Extractor.analyse_resource_title",
102 | return_value=mock_extract_data["mock_extract_result"],
103 | ):
104 | result = await acgrip.extract_resource_info(
105 | mock_extract_data["mock_entry"], use_extractor=True
106 | )
107 |
108 | assert isinstance(result, ResourceInfo)
109 | assert result.fansub == mock_extract_data["mock_extract_result"].fansub
110 |
111 |
112 | @pytest.mark.asyncio
113 | async def test_homepage_fansub(acgrip, mock_extract_data):
114 | # 从主页解析得到fansub,使用主页解析的fansub结果
115 |
116 | mock_extract_result = ResourceTitleExtractResult(
117 | anime_name="最弱技能《果实大师》",
118 | season=1,
119 | episode=3,
120 | quality=VideoQuality.p1080,
121 | languages=[LanguageType.JAPANESE],
122 | fansub="LoliHouse",
123 | version=1,
124 | )
125 |
126 | with patch.object(acgrip, "parse_homepage", return_value="homepage_fansub"):
127 | with patch(
128 | "alist_mikananirss.extractor.Extractor.analyse_resource_title",
129 | return_value=mock_extract_result,
130 | ):
131 | result = await acgrip.extract_resource_info(
132 | mock_extract_data["mock_entry"], use_extractor=True
133 | )
134 |
135 | assert isinstance(result, ResourceInfo)
136 | assert result.fansub == "homepage_fansub"
137 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/core/remapper.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional
3 |
4 | import yaml
5 | from loguru import logger
6 |
7 | from alist_mikananirss.websites.models import ResourceInfo
8 |
9 | from ..utils import Singleton
10 |
11 |
12 | @dataclass
13 | class RemapFrom:
14 | """Conditions to match for remapping"""
15 |
16 | anime_name: Optional[str] = None
17 | season: Optional[str] = None
18 | fansub: Optional[str] = None
19 |
20 |
21 | @dataclass
22 | class RemapTo:
23 | """Target values for remapping"""
24 |
25 | anime_name: Optional[str] = None
26 | season: Optional[str] = None
27 | episode_offset: Optional[int] = None
28 |
29 |
30 | class Remapper:
31 | """A class for mapping properties between ResourceInfo objects according to user-defined rules
32 |
33 | Attributes:
34 | from_ (dict): the 'from' section of YAML
35 | to_ (dict): the 'to' section of YAML
36 |
37 | Example:
38 | >>> remapper = Remapper(cfg["from"], cfg["to])
39 | >>> if(remapper.match(resource_info)):
40 | >>> remapper.remap(resource_info)
41 | """
42 |
43 | def __init__(self, from_: RemapFrom, to_: RemapTo):
44 | self.from_ = from_
45 | self.to_ = to_
46 |
47 | def match(self, resource_info: ResourceInfo) -> bool:
48 | if (
49 | (
50 | self.from_.anime_name
51 | and resource_info.anime_name != self.from_.anime_name
52 | )
53 | or (self.from_.season and resource_info.season != self.from_.season)
54 | or (self.from_.fansub and resource_info.fansub != self.from_.fansub)
55 | ):
56 | return False
57 | return True
58 |
59 | def remap(self, resource_info: ResourceInfo):
60 | if self.to_.anime_name:
61 | logger.info(
62 | f'Remap {resource_info.resource_title}\'s anime_name from "{resource_info.anime_name}" to "{self.to_.anime_name}"'
63 | )
64 | resource_info.anime_name = self.to_.anime_name
65 |
66 | if self.to_.season:
67 | logger.info(
68 | f'Remap {resource_info.resource_title}\'s season from "{resource_info.season}" to "{self.to_.season}"'
69 | )
70 | resource_info.season = self.to_.season
71 |
72 | if self.to_.episode_offset:
73 | logger.info(
74 | f'Remap {resource_info.resource_title}\'s episode_offset from "{resource_info.episode}" to "{resource_info.episode + self.to_.episode_offset}"'
75 | )
76 | resource_info.episode += self.to_.episode_offset
77 |
78 |
79 | class RemapperManager(metaclass=Singleton):
80 | """A class for managing Remapper objects
81 |
82 | Example:
83 | >>> RemapperManager.add_remapper(cfg["from"], cfg["to"])
84 | >>> remapper = RemapperManager.match(resource_info)
85 | >>> if remapper:
86 | >>> RemapperManager.remap(remapper, resource_info)
87 | """
88 |
89 | def __init__(self):
90 | self._remappers = []
91 |
92 | @classmethod
93 | def load_remappers_from_cfg(cls, cfg_path: str):
94 | with open(cfg_path, "r", encoding="utf-8") as f:
95 | yaml_data = yaml.safe_load(f)
96 | remapper_cfgs = yaml_data.get("remap")
97 | for cfg in remapper_cfgs:
98 | from_ = RemapFrom(
99 | cfg["from"].get("anime_name", None),
100 | cfg["from"].get("season", None),
101 | cfg["from"].get("fansub", None),
102 | )
103 | episode_offset = cfg["to"].get("episode_offset", None)
104 | episode_offset = int(episode_offset) if episode_offset else None
105 | to_ = RemapTo(
106 | cfg["to"].get("anime_name", None),
107 | cfg["to"].get("season", None),
108 | episode_offset,
109 | )
110 | RemapperManager.add_remapper(from_, to_)
111 |
112 | @classmethod
113 | def add_remapper(cls, from_: RemapFrom, to_: RemapTo) -> Remapper:
114 | instance = cls()
115 | remapper = Remapper(from_, to_)
116 | instance._remappers.append(remapper)
117 | return remapper
118 |
119 | @classmethod
120 | def remove_remapper(cls, remapper: Remapper):
121 | instance = cls()
122 | if remapper in instance._remappers:
123 | instance._remappers.remove(remapper)
124 |
125 | @classmethod
126 | def clear_remappers(cls):
127 | instance = cls()
128 | instance._remappers.clear()
129 |
130 | @classmethod
131 | def get_all_remappers(cls) -> list:
132 | instance = cls()
133 | return instance._remappers
134 |
135 | @classmethod
136 | def match(cls, resource_info: ResourceInfo) -> Remapper | None:
137 | instance = cls()
138 | for remapper in instance._remappers:
139 | if remapper.match(resource_info):
140 | return remapper
141 |
142 | @classmethod
143 | def remap(cls, remapper: Remapper, resource_info: ResourceInfo):
144 | remapper.remap(resource_info)
145 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/config/basic.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | from typing import Annotated, Dict, List
3 |
4 | from pydantic import BaseModel, Field, HttpUrl, field_validator, model_validator
5 |
6 | from alist_mikananirss.alist import AlistDownloaderType
7 |
8 | from .bot_assistant import TelegramBotAssistantConfig
9 | from .extractor import ExtractorConfig
10 | from .notifier import PushPlusConfig, TelegramConfig
11 | from .remap import RemapConfig
12 |
13 |
14 | class CommonConfig(BaseModel):
15 | interval_time: int = Field(
16 | default=300, ge=0, description="Interval time must be non-negative"
17 | )
18 | proxies: Dict[str, str] = Field(
19 | default_factory=dict, description="Proxies for requests"
20 | )
21 |
22 |
23 | class AlistConfig(BaseModel):
24 | base_url: str = Field(..., description="Base URL of Alist")
25 | token: str = Field(..., description="Token for Alist API")
26 | downloader: AlistDownloaderType = Field(
27 | default=AlistDownloaderType.QBIT, description="Alist Downloader type"
28 | )
29 | download_path: str = Field(..., description="Download path for Alist Downloader")
30 |
31 | @field_validator("base_url")
32 | @classmethod
33 | def validate_url(cls, url: str) -> str:
34 | try:
35 | # 正确的URL验证方式
36 | parsed_url = HttpUrl(url)
37 | return str(parsed_url)
38 | except ValueError:
39 | raise ValueError(f"Invalid URL: {url}")
40 |
41 |
42 | class MikanConfig(BaseModel):
43 | subscribe_url: List[str] = Field(min_length=1)
44 | regex_pattern: Dict[str, str] = Field(
45 | default_factory=dict,
46 | description="Regex pattern for filter",
47 | )
48 |
49 | filters: List[str] = Field(default_factory=list, description="Filters for rss")
50 |
51 | @field_validator("subscribe_url")
52 | @classmethod
53 | def validate_url(cls, url: List[str]) -> List[str]:
54 | for u in url:
55 | HttpUrl(u)
56 | return url
57 |
58 | @field_validator("regex_pattern")
59 | @classmethod
60 | def merge_regex_patterns(cls, patterns: Dict[str, str]) -> Dict[str, str]:
61 | default_patterns = {
62 | "简体": "(简体|简中|简日|CHS)",
63 | "繁体": "(繁体|繁中|繁日|CHT|Baha)",
64 | "1080p": "(X1080|1080P)",
65 | "非合集": "^(?!.*(\\d{2}-\\d{2}|合集)).*",
66 | }
67 | default_patterns.update(patterns)
68 | return default_patterns
69 |
70 |
71 | class RenameConfig(BaseModel):
72 | enable: bool = Field(default=False)
73 | extractor: ExtractorConfig | None = Field(default=None)
74 | rename_format: str = Field(
75 | "{name} S{season:02d}E{episode:02d}", description="Rename format"
76 | )
77 | remap: RemapConfig = Field(
78 | default_factory=RemapConfig, description="Remap configuration"
79 | )
80 |
81 | @model_validator(mode="after")
82 | def validate_rename_config(self):
83 | if self.enable and not self.extractor:
84 | raise ValueError("Rename is enabled but no extractor config provided")
85 | return self
86 |
87 | @field_validator("rename_format")
88 | @classmethod
89 | def validate_rename_format(cls, rename_format: str) -> str:
90 | if not rename_format:
91 | return rename_format
92 |
93 | all_key_test_data = {
94 | "name": "test",
95 | "season": 1,
96 | "episode": 1,
97 | "fansub": "fansub",
98 | "quality": "1080p",
99 | "language": "简体中文",
100 | }
101 | safe_dict = defaultdict(lambda: "undefined", all_key_test_data)
102 | res = rename_format.format_map(safe_dict)
103 | if "undefined" in res:
104 | unknown_keys = [
105 | key for key, value in safe_dict.items() if value == "undefined"
106 | ]
107 | raise ValueError(f"Error keys in rename format: {', '.join(unknown_keys)}")
108 | return rename_format
109 |
110 |
111 | NotificationBotConfig = Annotated[
112 | TelegramConfig | PushPlusConfig, Field(discriminator="bot_type")
113 | ]
114 |
115 |
116 | class NotificationConfig(BaseModel):
117 | enable: bool = Field(default=False)
118 | interval_time: int = Field(
119 | default=300, ge=0, description="Interval time must be non-negative"
120 | )
121 | bots: List[NotificationBotConfig] = Field(default_factory=list)
122 |
123 | @model_validator(mode="after")
124 | def validate_notification_config(self):
125 | if self.enable and not len(self.bots):
126 | raise ValueError("Notification is enabled but no notifier config provided")
127 | return self
128 |
129 |
130 | class BotAssistantConfig(BaseModel):
131 | enable: bool = Field(default=False)
132 | bots: List[TelegramBotAssistantConfig] = Field(default_factory=list)
133 |
134 | @model_validator(mode="after")
135 | def validate_bot_assistant_config(self):
136 | if self.enable and len(self.bots) == 0:
137 | raise ValueError("Bot assistant is enabled but no bot config provided")
138 | return self
139 |
140 |
141 | class DevConfig(BaseModel):
142 | log_level: str = Field(
143 | default="INFO", pattern="^(DEBUG|INFO|WARNING|ERROR|CRITICAL)$"
144 | )
145 |
--------------------------------------------------------------------------------
/tests/core/test_rss_monitor.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from unittest.mock import AsyncMock, MagicMock, call, patch
3 |
4 | import pytest
5 |
6 | from alist_mikananirss import RegexFilter, RssMonitor, SubscribeDatabase
7 | from alist_mikananirss.websites.models import FeedEntry, ResourceInfo
8 |
9 |
10 | @pytest.fixture
11 | def mock_website():
12 | return AsyncMock()
13 |
14 |
15 | @pytest.fixture
16 | def mock_filter():
17 | return MagicMock(spec=RegexFilter)
18 |
19 |
20 | @pytest.fixture
21 | def mock_db():
22 | return MagicMock(spec=SubscribeDatabase)
23 |
24 |
25 | @pytest.mark.asyncio
26 | async def test_rss_monitor_initialization(mock_db):
27 | urls = ["https://example.com/rss1", "https://example.com/rss2"]
28 | filter_mock = MagicMock(spec=RegexFilter)
29 |
30 | with patch(
31 | "alist_mikananirss.websites.WebsiteFactory.get_website_parser"
32 | ) as mock_factory:
33 | mock_factory.side_effect = [MagicMock(), MagicMock()]
34 | monitor = RssMonitor(urls, filter_mock, mock_db)
35 |
36 | assert monitor.subscribe_urls == urls
37 | assert len(monitor.websites) == 2
38 | assert monitor.filter == filter_mock
39 | assert not monitor.use_extractor
40 | assert isinstance(monitor.db, SubscribeDatabase)
41 | assert monitor.interval_time == 300
42 |
43 |
44 | @pytest.mark.asyncio
45 | async def test_set_interval_time(mock_db):
46 | with patch("alist_mikananirss.websites.WebsiteFactory.get_website_parser"):
47 | monitor = RssMonitor(
48 | ["https://example.com/rss"], MagicMock(spec=RegexFilter), mock_db
49 | )
50 | monitor.set_interval_time(600)
51 | assert monitor.interval_time == 600
52 |
53 |
54 | @pytest.mark.asyncio
55 | async def test_get_new_resources(mock_website, mock_filter, mock_db):
56 | feed_entries = [
57 | FeedEntry("Resource 1", "https://example.com/torrent1"),
58 | FeedEntry("Resource 2", "https://example.com/torrent2"),
59 | ]
60 | mock_website.get_feed_entries.return_value = feed_entries
61 | mock_filter.filt_single.side_effect = [True, False]
62 | mock_db.is_resource_title_exist.return_value = False
63 |
64 | resource_info = ResourceInfo("Resource 1", "https://example.com/torrent1")
65 | mock_website.extract_resource_info.return_value = resource_info
66 |
67 | monitor = RssMonitor(["https://mikanani.me/rss"], mock_filter, mock_db)
68 | monitor.db = mock_db
69 |
70 | new_resources = await monitor.get_new_resources([mock_website], mock_filter)
71 |
72 | assert len(new_resources) == 1
73 | assert new_resources[0] == resource_info
74 | mock_website.get_feed_entries.assert_called_once()
75 | mock_filter.filt_single.assert_has_calls([call("Resource 1"), call("Resource 2")])
76 | mock_db.is_resource_title_exist.assert_called_once_with("Resource 1")
77 | mock_website.extract_resource_info.assert_called_once_with(feed_entries[0], False)
78 |
79 |
80 | @pytest.mark.asyncio
81 | async def test_run(mock_website, mock_filter, mock_db):
82 | with (
83 | patch(
84 | "alist_mikananirss.websites.WebsiteFactory.get_website_parser",
85 | return_value=mock_website,
86 | ),
87 | patch("asyncio.sleep", new_callable=AsyncMock) as mock_sleep,
88 | patch(
89 | "alist_mikananirss.core.download_manager.DownloadManager.add_download_tasks",
90 | new_callable=AsyncMock,
91 | ) as mock_add_tasks,
92 | ):
93 |
94 | monitor = RssMonitor("https://mikanani.me/rss", mock_filter, mock_db)
95 | monitor.db = mock_db
96 | monitor.get_new_resources = AsyncMock(
97 | return_value=[ResourceInfo("New Resource", "https://example.com/new")]
98 | )
99 |
100 | # 让run方法在第二次循环后退出
101 | mock_sleep.side_effect = [None, asyncio.CancelledError]
102 |
103 | with pytest.raises(asyncio.CancelledError):
104 | await monitor.run()
105 |
106 | assert monitor.get_new_resources.call_count == 2
107 | assert mock_add_tasks.call_count == 2
108 | mock_sleep.assert_called_with(300)
109 |
110 |
111 | @pytest.mark.asyncio
112 | async def test_get_new_resources_with_exceptions(mock_website, mock_filter, mock_db):
113 | feed_entries = [
114 | FeedEntry("Resource 1", "https://mikanani.me/rss/torrent1"),
115 | FeedEntry("Resource 2", "https://mikanani.me/rss/torrent2"),
116 | ]
117 | mock_website.get_feed_entries.return_value = feed_entries
118 | mock_filter.filt_single.side_effect = [True, True]
119 | mock_db.is_resource_title_exist.return_value = False
120 | # extract_resource_info方法报错
121 | mock_website.extract_resource_info.side_effect = [
122 | ResourceInfo("Resource 1", "https://example.com/torrent1"),
123 | Exception("Network error"),
124 | ]
125 |
126 | monitor = RssMonitor(["https://mikanani.me/rss/rss"], mock_filter, mock_db)
127 | monitor.db = mock_db
128 |
129 | new_resources = await monitor.get_new_resources([mock_website], mock_filter)
130 |
131 | assert len(new_resources) == 1
132 | mock_website.extract_resource_info.assert_has_calls(
133 | [call(feed_entries[0], False), call(feed_entries[1], False)]
134 | )
135 |
136 |
137 | @pytest.mark.asyncio
138 | async def test_get_new_resources_with_non_feed(mock_filter, mock_db):
139 | monitor = RssMonitor(["https://mikanani.me/rss"], mock_filter, mock_db)
140 | monitor.db = mock_db
141 |
142 | new_resources = await monitor.get_new_resources([], mock_filter)
143 |
144 | assert len(new_resources) == 0
145 |
--------------------------------------------------------------------------------
/tests/websites/test_mikan.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import feedparser
4 | import pytest
5 |
6 | from alist_mikananirss.extractor import (
7 | AnimeNameExtractResult,
8 | ResourceTitleExtractResult,
9 | VideoQuality,
10 | )
11 | from alist_mikananirss.websites.mikan import Mikan, MikanHomePageInfo
12 | from alist_mikananirss.websites.models import FeedEntry, LanguageType, ResourceInfo
13 |
14 |
15 | @pytest.fixture
16 | def mikan():
17 | return Mikan("https://mikanani.me/RSS/Bangumi?bangumiId=3519&subgroupid=382")
18 |
19 |
20 | @pytest.fixture
21 | def mock_rss_data():
22 | return """
23 |
24 | Mikan Project - GIRLS BAND CRY
25 | https://mikanani.me/RSS/Bangumi?bangumiId=3298&subgroupid=382
26 | Mikan Project - GIRLS BAND CRY
27 | -
28 |
【喵萌Production】★04月新番★[GIRLS BAND CRY][01-13][1080p][繁日双语][招募翻译时轴]
29 | https://mikanani.me/Home/Episode/a19d5da34e2ec205bddd9c6935ab579ff37da7d7
30 | 【喵萌Production】★04月新番★[GIRLS BAND CRY][01-13][1080p][繁日双语][招募翻译时轴]
31 | 【喵萌Production】★04月新番★[GIRLS BAND CRY][01-13][1080p][繁日双语][招募翻译时轴][8.8GB]
32 |
33 | https://mikanani.me/Home/Episode/a19d5da34e2ec205bddd9c6935ab579ff37da7d7
34 | 9448928256
35 | 2024-07-17T19:09:00
36 |
37 |
38 |
39 |
40 | """
41 |
42 |
43 | @pytest.mark.asyncio
44 | async def test_get_feed_entries(mikan, mock_rss_data):
45 | with patch.object(
46 | mikan, "parse_feed", return_value=feedparser.parse(mock_rss_data)
47 | ):
48 | result = await mikan.get_feed_entries()
49 |
50 | assert isinstance(result, list)
51 | assert len(result) == 1
52 | entry = result.pop()
53 | assert isinstance(entry, FeedEntry)
54 | assert (
55 | entry.resource_title
56 | == "【喵萌Production】★04月新番★[GIRLS BAND CRY][01-13][1080p][繁日双语][招募翻译时轴]"
57 | )
58 | assert (
59 | entry.torrent_url
60 | == "https://mikanani.me/Download/20240717/a19d5da34e2ec205bddd9c6935ab579ff37da7d7.torrent"
61 | )
62 | assert entry.published_date == "2024-07-17T19:09:00"
63 | assert (
64 | entry.homepage_url
65 | == "https://mikanani.me/Home/Episode/a19d5da34e2ec205bddd9c6935ab579ff37da7d7"
66 | )
67 |
68 |
69 | @pytest.mark.asyncio
70 | async def test_get_feed_entries_real(mikan):
71 | # 测试真实的蜜柑RSS链接解析是否有报错
72 | await mikan.get_feed_entries()
73 |
74 |
75 | @pytest.mark.asyncio
76 | async def test_parse_homepage_error(mikan):
77 | # 对于蜜柑,强需求主页中的番剧名/字幕组信息,解析详情页时,如果出现异常,应该抛出异常
78 | mock_entry = FeedEntry(
79 | resource_title="【喵萌Production】★04月新番★[GIRLS BAND CRY][01-13][1080p][繁日双语][招募翻译时轴]",
80 | torrent_url="https://mikanani.me/Download/20240717/a19d5da34e2ec205bddd9c6935ab579ff37da7d7.torrent",
81 | published_date="2024-07-17T19:09:00",
82 | homepage_url="https://mikanani.me/Home/Episode/a19d5da34e2ec205bddd9c6935ab579ff37da7d7",
83 | )
84 | with patch.object(mikan, "parse_homepage", side_effect=Exception):
85 | with pytest.raises(Exception):
86 | await mikan.extract_resource_info(mock_entry, use_extractor=False)
87 |
88 |
89 | @pytest.mark.asyncio
90 | async def test_extract_resource_info(mikan):
91 | # 测试蜜柑是否使用主页提供的番剧名/字幕组信息
92 | mock_entry = FeedEntry(
93 | resource_title="【喵萌Production】★04月新番★[GIRLS BAND CRY][01][1080p][繁日双语][招募翻译时轴]",
94 | torrent_url="https://mikanani.me/Download/20240717/a19d5da34e2ec205bddd9c6935ab579ff37da7d7.torrent",
95 | published_date="2024-07-17T19:09:00",
96 | homepage_url="https://mikanani.me/Home/Episode/a19d5da34e2ec205bddd9c6935ab579ff37da7d7",
97 | )
98 |
99 | mock_homepage_info = MikanHomePageInfo(
100 | anime_name="GIRLS BAND CRY", fansub="喵萌奶茶屋"
101 | )
102 |
103 | mock_animename_extract_result = AnimeNameExtractResult(
104 | anime_name="GIRLS BAND CRY", season=1
105 | )
106 |
107 | mock_extract_result = ResourceTitleExtractResult(
108 | anime_name="tmdb_name",
109 | season=1,
110 | episode=1,
111 | quality=VideoQuality.p1080,
112 | languages=[LanguageType.SIMPLIFIED_CHINESE, LanguageType.JAPANESE],
113 | fansub="gpt_fansub",
114 | version=1,
115 | )
116 |
117 | with patch.object(mikan, "parse_homepage", return_value=mock_homepage_info):
118 | with patch(
119 | "alist_mikananirss.extractor.Extractor.analyse_resource_title",
120 | return_value=mock_extract_result,
121 | ):
122 | with patch(
123 | "alist_mikananirss.extractor.Extractor.analyse_anime_name",
124 | return_value=mock_animename_extract_result,
125 | ):
126 | result = await mikan.extract_resource_info(
127 | mock_entry, use_extractor=True
128 | )
129 |
130 | assert isinstance(result, ResourceInfo)
131 | assert result.anime_name == mock_homepage_info.anime_name
132 | assert result.resource_title == mock_entry.resource_title
133 | assert result.torrent_url == mock_entry.torrent_url
134 | assert result.season == mock_extract_result.season
135 | assert result.episode == mock_extract_result.episode
136 | assert result.fansub == mock_homepage_info.fansub
137 |
--------------------------------------------------------------------------------
/tests/websites/test_defaultwebsite.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import feedparser
4 | import pytest
5 | from loguru import logger
6 |
7 | from alist_mikananirss.websites.default import DefaultWebsite
8 | from alist_mikananirss.websites.models import FeedEntry
9 |
10 |
11 | @pytest.fixture
12 | def default_website():
13 | return DefaultWebsite("https://example.com/rss")
14 |
15 |
16 | @pytest.fixture
17 | def mock_nyaa_rss():
18 | return """
19 |
20 | Nyaa - Home - Torrent File RSS
21 | RSS Feed for Home
22 | https://nyaa.si/
23 |
24 | -
25 |
[FLE] Dr. Stone - S01 (BD 1080p HEVC x265 Opus) [Dual Audio] | Dr Stone Season 1
26 | https://nyaa.si/download/1921713.torrent
27 | https://nyaa.si/view/1921713
28 | Wed, 15 Jan 2025 08:08:41 -0000
29 | 24
30 | 443
31 | 38
32 | a1cdf8f9edf70d074bb4cd22e6f122bd5ad5aa3b
33 | 1_2
34 | Anime - English-translated
35 | 37.3 GiB
36 | 0
37 | No
38 | No
39 |
40 | #1921713 | [FLE] Dr. Stone - S01 (BD 1080p HEVC x265 Opus) [Dual Audio] | Dr Stone Season 1 | 37.3 GiB | Anime - English-translated | A1CDF8F9EDF70D074BB4CD22E6F122BD5AD5AA3B ]]>
41 |
42 |
43 |
44 | """
45 |
46 |
47 | @pytest.fixture
48 | def mock_aniapi_rss():
49 | return """
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 | https://open.ani.rip
58 | RSS By ANi API
59 | Tue, 14 Jan 2025 17:35:55 GMT
60 |
61 | -
62 |
63 |
64 |
65 | https://resources.ani.rip/2024-10/%5BANi%5D%20%E9%9D%92%E4%B9%8B%E5%A3%AC%E7%94%9F%E6%B5%AA%20-%2013%20%5B1080P%5D%5BBaha%5D%5BWEB-DL%5D%5BAAC%20AVC%5D%5BCHT%5D.mp4?d=true
66 | https://resources.ani.rip/2024-10/%5BANi%5D%20%E9%9D%92%E4%B9%8B%E5%A3%AC%E7%94%9F%E6%B5%AA%20-%2013%20%5B1080P%5D%5BBaha%5D%5BWEB-DL%5D%5BAAC%20AVC%5D%5BCHT%5D.mp4?d=true
67 | Sat, 11 Jan 2025 13:34:09 GMT
68 | 286.2 MB
69 | """
70 |
71 |
72 | @pytest.fixture
73 | def mock_unsupported_rss():
74 | return """
75 |
76 |
77 | Test RSS Feed
78 | http://example.com
79 | Test feed with unsupported format
80 | -
81 |
Test Item
82 | Description without torrent link or video link
83 | http://example.com/article
84 | Wed, 15 Jan 2025 08:08:41 -0000
85 |
86 |
87 | """
88 |
89 |
90 | @pytest.mark.asyncio
91 | async def test_nyaa(default_website, mock_nyaa_rss):
92 | with patch.object(
93 | default_website, "parse_feed", return_value=feedparser.parse(mock_nyaa_rss)
94 | ):
95 | result = await default_website.get_feed_entries()
96 |
97 | assert isinstance(result, list)
98 | assert len(result) == 1
99 | entry = result.pop()
100 | assert isinstance(entry, FeedEntry)
101 | assert (
102 | entry.resource_title
103 | == "[FLE] Dr. Stone - S01 (BD 1080p HEVC x265 Opus) [Dual Audio] | Dr Stone Season 1"
104 | )
105 | assert entry.torrent_url == "https://nyaa.si/download/1921713.torrent"
106 |
107 |
108 | @pytest.mark.asyncio
109 | async def test_aniapi(default_website, mock_aniapi_rss):
110 | with patch.object(
111 | default_website, "parse_feed", return_value=feedparser.parse(mock_aniapi_rss)
112 | ):
113 | result = await default_website.get_feed_entries()
114 |
115 | assert isinstance(result, list)
116 | assert len(result) == 1
117 | entry = result.pop()
118 | assert isinstance(entry, FeedEntry)
119 | assert (
120 | entry.resource_title
121 | == "[ANi] 青之壬生浪 - 13 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4"
122 | )
123 | assert (
124 | entry.torrent_url
125 | == "https://resources.ani.rip/2024-10/%5BANi%5D%20%E9%9D%92%E4%B9%8B%E5%A3%AC%E7%94%9F%E6%B5%AA%20-%2013%20%5B1080P%5D%5BBaha%5D%5BWEB-DL%5D%5BAAC%20AVC%5D%5BCHT%5D.mp4?d=true"
126 | )
127 |
128 |
129 | @pytest.mark.asyncio
130 | async def test_unsupported_rss(default_website, mock_unsupported_rss):
131 | with patch.object(
132 | default_website,
133 | "parse_feed",
134 | return_value=feedparser.parse(mock_unsupported_rss),
135 | ):
136 | with patch.object(logger, "error") as mock_logger_error:
137 | result = await default_website.get_feed_entries()
138 |
139 | mock_logger_error.assert_called_once()
140 | assert isinstance(result, list)
141 | assert len(result) == 0
142 |
--------------------------------------------------------------------------------
/tests/common/test_database.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 |
4 | import pytest
5 | import pytest_asyncio
6 |
7 | from alist_mikananirss.common.database import SubscribeDatabase, db_dirpath
8 | from alist_mikananirss.websites.models import LanguageType, ResourceInfo
9 |
10 |
11 | @pytest_asyncio.fixture
12 | async def test_db():
13 | # 为每个测试创建唯一的数据库文件名
14 | unique_db_name = f"test_db_{uuid.uuid4()}.db"
15 | db = await SubscribeDatabase.create(unique_db_name)
16 | db_filepath = os.path.join(db_dirpath, unique_db_name)
17 |
18 | yield db
19 |
20 | # 清理工作
21 | await db.close()
22 | if os.path.exists(db_filepath):
23 | try:
24 | os.remove(db_filepath)
25 | except PermissionError:
26 | import time
27 |
28 | time.sleep(0.1)
29 | os.remove(db_filepath)
30 |
31 |
32 | @pytest.mark.asyncio
33 | async def test_create_table(test_db):
34 | cursor = await test_db.db.execute(
35 | "SELECT name FROM sqlite_master WHERE type='table';"
36 | )
37 | tables = await cursor.fetchall()
38 | assert ("resource_data",) in tables
39 | assert ("db_version",) in tables
40 | await test_db.close()
41 |
42 |
43 | @pytest.mark.asyncio
44 | async def test_insert_and_check_existence(test_db):
45 | resource = ResourceInfo(
46 | resource_title="Test Anime",
47 | torrent_url="https://example.com/test.torrent",
48 | published_date="2023-05-20T12:00:00",
49 | anime_name="Test Anime",
50 | season=1,
51 | episode=1,
52 | fansub="TestSub",
53 | quality="1080p",
54 | languages=[LanguageType.SIMPLIFIED_CHINESE],
55 | )
56 |
57 | await test_db.insert_resource_info(resource)
58 |
59 | assert await test_db.is_resource_title_exist("Test Anime")
60 |
61 |
62 | @pytest.mark.asyncio
63 | async def test_delete_by_id(test_db):
64 | await test_db.insert(
65 | "Test Delete",
66 | "https://example.com/delete.torrent",
67 | "2023-05-20T12:00:00",
68 | "2023-05-20T12:01:00",
69 | "Test Anime",
70 | )
71 |
72 | await test_db.connect()
73 | cursor = await test_db.db.execute(
74 | "SELECT id FROM resource_data WHERE resource_title=?", ("Test Delete",)
75 | )
76 | id_to_delete = await cursor.fetchone()
77 | id_to_delete = id_to_delete[0]
78 | await test_db.close()
79 |
80 | await test_db.delete_by_id(id_to_delete)
81 |
82 | assert not await test_db.is_resource_title_exist("Test Delete")
83 |
84 |
85 | @pytest.mark.asyncio
86 | async def test_delete_by_torrent_url(test_db):
87 | url = "https://example.com/delete_url.torrent"
88 | await test_db.insert(
89 | "Test Delete URL",
90 | url,
91 | "2023-05-20T12:00:00",
92 | "2023-05-20T12:01:00",
93 | "Test Anime",
94 | )
95 |
96 | await test_db.delete_by_torrent_url(url)
97 |
98 | assert not await test_db.is_resource_title_exist("Test Delete URL")
99 |
100 |
101 | @pytest.mark.asyncio
102 | async def test_delete_by_resource_title(test_db):
103 | title = "Test Delete Title"
104 | await test_db.insert(
105 | title,
106 | "https://example.com/delete_title.torrent",
107 | "2023-05-20T12:00:00",
108 | "2023-05-20T12:01:00",
109 | "Test Anime",
110 | )
111 |
112 | await test_db.delete_by_resource_title(title)
113 |
114 | assert not await test_db.is_resource_title_exist(title)
115 |
116 |
117 | @pytest.mark.asyncio
118 | async def test_upgrade_database(test_db):
119 | await test_db.connect()
120 | await test_db.db.execute("DROP TABLE IF EXISTS resource_data")
121 | await test_db.db.execute("DROP TABLE IF EXISTS db_version")
122 | await test_db.db.execute(
123 | """
124 | CREATE TABLE resource_data (
125 | id INTEGER PRIMARY KEY AUTOINCREMENT,
126 | title TEXT NOT NULL,
127 | link TEXT UNIQUE,
128 | published_date TEXT,
129 | downloaded_date TEXT,
130 | anime_name TEXT
131 | )
132 | """
133 | )
134 | await test_db.db.commit()
135 | await test_db._upgrade_database()
136 | cursor = await test_db.db.execute("SELECT version FROM db_version")
137 | version = await cursor.fetchone()
138 | version = version[0]
139 | assert version == 1
140 |
141 | cursor = await test_db.db.execute("PRAGMA table_info(resource_data)")
142 | columns = [info[1] for info in await cursor.fetchall()]
143 | expected_columns = [
144 | "id",
145 | "resource_title",
146 | "torrent_url",
147 | "published_date",
148 | "downloaded_date",
149 | "anime_name",
150 | "season",
151 | "episode",
152 | "fansub",
153 | "quality",
154 | "language",
155 | ]
156 | assert all(column in columns for column in expected_columns)
157 | await test_db.close()
158 |
159 |
160 | @pytest.mark.asyncio
161 | async def test_insert_duplicate(test_db):
162 | await test_db.insert(
163 | "Duplicate Test",
164 | "https://example.com/duplicate.torrent",
165 | "2023-05-20T12:00:00",
166 | "2023-05-20T12:01:00",
167 | "Test Anime",
168 | )
169 |
170 | await test_db.insert(
171 | "Duplicate Test",
172 | "https://example.com/duplicate.torrent",
173 | "2023-05-20T12:00:00",
174 | "2023-05-20T12:01:00",
175 | "Test Anime",
176 | )
177 |
178 | await test_db.connect()
179 | cursor = await test_db.db.execute(
180 | "SELECT COUNT(*) FROM resource_data WHERE resource_title=?", ("Duplicate Test",)
181 | )
182 | count = await cursor.fetchone()
183 | count = count[0]
184 | await test_db.close()
185 | assert count == 1
186 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import asyncio
3 | import os
4 | import sys
5 |
6 | from loguru import logger
7 |
8 | from alist_mikananirss import (
9 | AnimeRenamer,
10 | AppConfig,
11 | BotAssistant,
12 | ConfigManager,
13 | DownloadManager,
14 | NotificationSender,
15 | RegexFilter,
16 | RemapperManager,
17 | RssMonitor,
18 | SubscribeDatabase,
19 | )
20 | from alist_mikananirss.alist import Alist
21 | from alist_mikananirss.bot import BotFactory, NotificationBot
22 | from alist_mikananirss.extractor import Extractor, LLMExtractor, create_llm_provider
23 |
24 |
25 | def init_logging(cfg: AppConfig):
26 | log_level = cfg.dev.log_level
27 | logger.remove()
28 |
29 | # 确保日志目录存在
30 | os.makedirs("log", exist_ok=True)
31 |
32 | # 使用loguru的动态日期格式化功能
33 | log_filename = "log/alist_mikanrss_{time:YYYY-MM-DD}.log"
34 | logger.add(
35 | log_filename, rotation="00:00", retention="7 days", level=log_level, mode="a"
36 | )
37 | logger.add(sys.stderr, level=log_level)
38 |
39 |
40 | def init_proxies(cfg: AppConfig):
41 | proxies = cfg.common.proxies
42 | if not proxies:
43 | return
44 | if "http" in proxies:
45 | os.environ["HTTP_PROXY"] = proxies["http"]
46 | if "https" in proxies:
47 | os.environ["HTTPS_PROXY"] = proxies["https"]
48 |
49 |
50 | def init_notification(cfg: AppConfig):
51 | notification_bots = []
52 | if not cfg.notification.enable:
53 | return
54 |
55 | for bot_cfg in cfg.notification.bots:
56 | bot_kwargs = bot_cfg.model_dump(exclude={"bot_type"})
57 |
58 | try:
59 | bot = BotFactory.create_bot(bot_cfg.bot_type, **bot_kwargs)
60 | notification_bots.append(NotificationBot(bot))
61 | except ValueError as e:
62 | logger.error(f"Failed to create notification bot: {e}")
63 |
64 | if not notification_bots:
65 | logger.warning(
66 | "Notification enabled but no valid bots were configured or created."
67 | )
68 | cfg.notification.enable = False
69 | return
70 |
71 | NotificationSender.initialize(notification_bots, cfg.notification.interval_time)
72 |
73 |
74 | async def run():
75 | parser = argparse.ArgumentParser(description="Alist Mikanani RSS")
76 | parser.add_argument(
77 | "--config",
78 | default="config.yaml",
79 | help="Path to the configuration file",
80 | )
81 |
82 | args = parser.parse_args()
83 |
84 | cfg_manager = ConfigManager()
85 | cfg = cfg_manager.load_config(args.config)
86 | # logger
87 | init_logging(cfg)
88 |
89 | logger.info("Loaded config Successfully")
90 | logger.info(f"Config: \n{cfg}")
91 |
92 | # proxy
93 | init_proxies(cfg)
94 |
95 | # database
96 | db = await SubscribeDatabase.create()
97 |
98 | # alist
99 | alist_client = Alist(cfg.alist.base_url, cfg.alist.token, cfg.alist.downloader)
100 | alist_ver = await alist_client.get_alist_ver()
101 | if alist_ver < "3.42.0":
102 | raise ValueError(f"Unsupported Alist version: {alist_ver}")
103 |
104 | # download manager
105 | DownloadManager.initialize(
106 | alist_client=alist_client,
107 | base_download_path=cfg.alist.download_path,
108 | use_renamer=cfg.rename.enable,
109 | need_notification=cfg.notification.enable,
110 | db=db,
111 | )
112 |
113 | # extractor
114 | if cfg.rename.enable:
115 | extractor_cfg = cfg.rename.extractor
116 | provider_kwargs = extractor_cfg.model_dump(
117 | exclude={"extractor_type", "output_type"}
118 | )
119 | try:
120 | llm_provider = create_llm_provider(
121 | extractor_cfg.extractor_type, **provider_kwargs
122 | )
123 | extractor = LLMExtractor(llm_provider, extractor_cfg.output_type)
124 | except ValueError as e:
125 | logger.error(f"Failed to create LLM provider: {e}")
126 | raise
127 | Extractor.initialize(extractor)
128 |
129 | AnimeRenamer.initialize(alist_client, cfg.rename.rename_format)
130 |
131 | # remapper
132 | if cfg.rename.remap.enable:
133 | cfg_path = cfg.rename.remap.cfg_path
134 | RemapperManager.load_remappers_from_cfg(cfg_path)
135 |
136 | # rss monitor
137 | regex_filter = RegexFilter()
138 | filters_name = cfg.mikan.filters
139 | regex_pattern = cfg.mikan.regex_pattern
140 | regex_filter.update_regex(regex_pattern)
141 | for name in filters_name:
142 | regex_filter.add_pattern(name)
143 |
144 | subscribe_url = cfg.mikan.subscribe_url
145 | rss_monitor = RssMonitor(
146 | subscribe_urls=subscribe_url,
147 | db=db,
148 | filter=regex_filter,
149 | use_extractor=cfg.rename.enable,
150 | )
151 | rss_monitor.set_interval_time(cfg.common.interval_time)
152 |
153 | tasks = []
154 | tasks.append(rss_monitor.run())
155 | # notification
156 | if cfg.notification.enable:
157 | init_notification(cfg)
158 | tasks.append(NotificationSender.run())
159 |
160 | # Initialize bot assistant
161 | if cfg.bot_assistant.enable:
162 | # Only telegram bot is supported now
163 | if cfg.bot_assistant.bots[0].bot_type == "telegram":
164 | bot_assistant = BotAssistant(cfg.bot_assistant.bots[0].token, rss_monitor)
165 | tasks.append(bot_assistant.run())
166 |
167 | try:
168 | await asyncio.gather(*tasks)
169 | finally:
170 | # cleanup after program exit
171 | await db.close()
172 | await alist_client.close()
173 | if cfg.bot_assistant.enable:
174 | await bot_assistant.stop()
175 |
176 |
177 | def main():
178 | asyncio.run(run())
179 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/extractor/llm_extractor.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional, Type
2 |
3 | from loguru import logger
4 |
5 | from ..utils.tmdb import TMDBClient
6 | from .base import ExtractorBase
7 | from .llm import LLMProvider
8 | from .llm.prompt import PromptType, load_prompt
9 | from .models import (
10 | AnimeNameExtractResult,
11 | ResourceTitleExtractResult,
12 | TMDBSearchParam,
13 | TMDBTvInfo,
14 | )
15 |
16 |
17 | class LLMExtractor(ExtractorBase):
18 | """Generic extractor that works with any LLM provider"""
19 |
20 | def __init__(
21 | self, llm_provider: LLMProvider, parse_mode: PromptType = PromptType.JSON_OBJECT
22 | ):
23 | """
24 | Initialize the extractor
25 |
26 | Args:
27 | llm_provider: The LLM provider to use
28 | parse_mode: The parsing mode ('json_object' or 'json_schema')
29 | """
30 | self.llm = llm_provider
31 | self.parse_mode = parse_mode
32 | self.tmdb_client = TMDBClient()
33 |
34 | async def _parse(self, messages: List[Dict[str, str]], response_type: Type):
35 | """Parse the response based on the selected mode"""
36 | if self.parse_mode == PromptType.JSON_SCHEMA:
37 | return await self.llm.parse_with_schema(messages, response_type)
38 | else: # json_object mode
39 | json_result = await self.llm.parse_as_json(messages)
40 | return response_type(**json_result)
41 |
42 | async def analyse_anime_name(self, anime_name: str) -> AnimeNameExtractResult:
43 | """Analyse the anime name to extract series and season info"""
44 | system_prompt = load_prompt(self.parse_mode, "anime_name")
45 |
46 | messages = [
47 | {"role": "system", "content": system_prompt},
48 | {"role": "user", "content": anime_name},
49 | ]
50 |
51 | try:
52 | result = await self._parse(messages, AnimeNameExtractResult)
53 | if result is None:
54 | raise ValueError(f"Failed to parse anime name: {anime_name}")
55 | logger.debug(f"Analyse anime name: {anime_name} -> {result}")
56 | return result
57 | except Exception as e:
58 | logger.error(f"Error parsing anime name: {e}")
59 | raise ValueError(f"Failed to parse anime name: {anime_name}") from e
60 |
61 | async def search_name_in_tmdb(
62 | self, resource_title: str, max_retry_times: int = 5
63 | ) -> Optional[TMDBTvInfo]:
64 | """Search for anime name in TMDB"""
65 | # 1. Ask LLM to parse the resource title and extract search keyword
66 | system_prompt = load_prompt(self.parse_mode, "tmdb_search_param")
67 |
68 | messages = [
69 | {"role": "system", "content": system_prompt},
70 | {"role": "user", "content": resource_title},
71 | ]
72 |
73 | try:
74 | search_param = await self._parse(messages, TMDBSearchParam)
75 | if search_param is None:
76 | logger.error(f"Failed to parse resource title: {resource_title}")
77 | return None
78 | logger.debug(f"Search param: {search_param}")
79 |
80 | # 2. Use the keyword to search in TMDB
81 | search_results = await self.tmdb_client.search_tv(search_param.query)
82 |
83 | # Try different search keywords if no results
84 | i = 0
85 | while i < max_retry_times and len(search_results) == 0:
86 | logger.warning(
87 | f"Unable to find anime name in TMDB, search param: {search_param}. "
88 | f"Retry {i+1}/{max_retry_times}"
89 | )
90 |
91 | # Get a new search parameter
92 | retry_prompt = load_prompt(self.parse_mode, "tmdb_retry_search")
93 | retry_messages = [
94 | {"role": "system", "content": retry_prompt},
95 | {
96 | "role": "user",
97 | "content": f"No results found for: {search_param.query}. Please try a different keyword.",
98 | },
99 | ]
100 |
101 | search_param = await self._parse(retry_messages, TMDBSearchParam)
102 | if search_param is None:
103 | logger.warning(f"Failed to parse resource title: {resource_title}")
104 | continue
105 |
106 | search_results = await self.tmdb_client.search_tv(search_param.query)
107 | i += 1
108 |
109 | if len(search_results) == 0:
110 | logger.error("Unable to find anime name in TMDB")
111 | return None
112 |
113 | logger.debug(f"Search results: {search_results}")
114 |
115 | # 3. Ask LLM to find the correct anime in the search results
116 | find_anime_prompt = load_prompt(self.parse_mode, "tmdb_find_anime")
117 | find_messages = [
118 | {"role": "system", "content": find_anime_prompt},
119 | {
120 | "role": "user",
121 | "content": f"resource_file_name: {resource_title}, search_results: {search_results}",
122 | },
123 | ]
124 |
125 | tmdb_info = await self._parse(find_messages, TMDBTvInfo)
126 | if tmdb_info is None:
127 | logger.error(
128 | f"Failed to find the anime of {resource_title} in search result: {search_results}"
129 | )
130 | return None
131 |
132 | logger.debug(f"TMDB info: {tmdb_info}")
133 | return tmdb_info
134 |
135 | except Exception as e:
136 | logger.error(f"Error searching in TMDB: {e}")
137 | return None
138 |
139 | async def analyse_resource_title(
140 | self, resource_title: str, use_tmdb: bool = True
141 | ) -> ResourceTitleExtractResult:
142 | """Analyse the resource title to extract all info"""
143 | system_prompt = load_prompt(self.parse_mode, "resource_title")
144 |
145 | messages = [
146 | {"role": "system", "content": system_prompt},
147 | {"role": "user", "content": resource_title},
148 | ]
149 |
150 | try:
151 | result = await self._parse(messages, ResourceTitleExtractResult)
152 | if result is None:
153 | raise ValueError(f"Failed to parse resource title: {resource_title}")
154 |
155 | # Get anime name from TMDB if enabled
156 | if use_tmdb:
157 | tmdb_info = await self.search_name_in_tmdb(resource_title)
158 | result.anime_name = (
159 | tmdb_info.anime_name if tmdb_info else result.anime_name
160 | )
161 |
162 | logger.debug(f"Analyse resource title: {resource_title} -> {result}")
163 | return result
164 | except Exception as e:
165 | logger.error(f"Error parsing resource title: {e}")
166 | raise ValueError(f"Failed to parse resource title: {resource_title}") from e
167 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/alist/api.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import mimetypes
3 | import os
4 | import urllib.parse
5 | from typing import List
6 |
7 | import aiohttp
8 |
9 | from alist_mikananirss.alist.tasks import (
10 | AlistDeletePolicy,
11 | AlistDownloaderType,
12 | AlistDownloadTask,
13 | AlistTask,
14 | AlistTaskType,
15 | AlistTransferTask,
16 | )
17 |
18 |
19 | class AlistClientError(Exception):
20 | pass
21 |
22 |
23 | class Alist:
24 | def __init__(self, base_url: str, token: str, downloader: AlistDownloaderType):
25 | self.base_url = base_url
26 | self.token = token
27 | self.downloader = downloader
28 | self.session = None
29 | self._session_lock = asyncio.Lock()
30 |
31 | async def _ensure_session(self):
32 | async with self._session_lock:
33 | if self.session is None or self.session.closed:
34 | self.session = aiohttp.ClientSession(trust_env=True)
35 |
36 | async def close(self):
37 | if self.session:
38 | await self.session.close()
39 | self.session = None
40 |
41 | async def _api_call(
42 | self,
43 | method: str,
44 | endpoint: str,
45 | custom_headers: dict[str, str] = None,
46 | **kwargs,
47 | ):
48 | await self._ensure_session()
49 | url = urllib.parse.urljoin(self.base_url, endpoint)
50 | headers = {
51 | "Authorization": self.token,
52 | "Content-Type": "application/json",
53 | "User-Agent": "Alist-Mikanirss",
54 | }
55 | if custom_headers:
56 | headers.update(custom_headers)
57 | async with self.session.request(method, url, headers=headers, **kwargs) as resp:
58 | resp.raise_for_status()
59 | data = await resp.json()
60 | if data["code"] != 200:
61 | raise AlistClientError(data.get("message", "Unknown error"))
62 | return data["data"]
63 |
64 | async def _init_alist_version(self):
65 | response_data = await self._api_call("GET", "/api/public/settings")
66 | self.version = response_data["version"][1:] # 去掉字母v
67 |
68 | async def get_alist_ver(self):
69 | if not hasattr(self, "version"):
70 | await self._init_alist_version()
71 | return self.version
72 |
73 | async def add_offline_download_task(
74 | self,
75 | save_path: str,
76 | urls: list[str],
77 | policy: AlistDeletePolicy = AlistDeletePolicy.DeleteAlways,
78 | ) -> list[AlistDownloadTask]:
79 | response_data = await self._api_call(
80 | "POST",
81 | "api/fs/add_offline_download",
82 | json={
83 | "delete_policy": policy.value,
84 | "path": save_path,
85 | "urls": urls,
86 | "tool": self.downloader.value,
87 | },
88 | )
89 | return [AlistDownloadTask.from_json(task) for task in response_data["tasks"]]
90 |
91 | async def upload(self, save_path: str, file_path: str) -> bool:
92 | """upload local file to Alist.
93 |
94 | Args:
95 | save_path (str): Alist path
96 | file_path (str): local file path
97 | """
98 | file_path = os.path.abspath(file_path)
99 | file_name = os.path.basename(file_path)
100 |
101 | # Use utf-8 encoding to avoid UnicodeEncodeError
102 | file_path_encoded = file_path.encode("utf-8")
103 |
104 | mime_type = mimetypes.guess_type(file_name)[0]
105 | file_stat = os.stat(file_path)
106 | upload_path = urllib.parse.quote(f"{save_path}/{file_name}")
107 |
108 | headers = {
109 | "Content-Type": mime_type,
110 | "Content-Length": str(file_stat.st_size),
111 | "file-path": upload_path,
112 | }
113 |
114 | with open(file_path_encoded, "rb") as f:
115 | await self._api_call("PUT", "api/fs/put", custom_headers=headers, data=f)
116 | return True
117 |
118 | async def list_dir(
119 | self, path, password=None, page=1, per_page=30, refresh=False
120 | ) -> list[str]:
121 | """List dir.
122 |
123 | Args:
124 | path (str): dir path
125 | password (str, optional): dir's password. Defaults to None.
126 | page (int, optional): page number. Defaults to 1.
127 | per_page (int, optional): how many item in one page. Defaults to 30.
128 | refresh (bool, optional): force to refresh. Defaults to False.
129 |
130 | Returns:
131 | Tuple[bool, List[str]]: Success flag and a list of files in the dir.
132 | """
133 | response_data = await self._api_call(
134 | "POST",
135 | "api/fs/list",
136 | json={
137 | "path": path,
138 | "password": password,
139 | "page": page,
140 | "per_page": per_page,
141 | "refresh": refresh,
142 | },
143 | )
144 | if response_data["content"]:
145 | files_list = [file_info["name"] for file_info in response_data["content"]]
146 | else:
147 | files_list = []
148 | return files_list
149 |
150 | async def _fetch_tasks(
151 | self, task_type: AlistTaskType, status: str
152 | ) -> List[AlistTask]:
153 | json_data = await self._api_call("GET", f"/api/task/{task_type.value}/{status}")
154 |
155 | if task_type == AlistTaskType.TRANSFER:
156 | task_class = AlistTransferTask
157 | else:
158 | task_class = AlistDownloadTask
159 |
160 | tasks = [task_class.from_json(task) for task in json_data] if json_data else []
161 | return tasks
162 |
163 | async def get_task_list(
164 | self, task_type: AlistTaskType, status: str = ""
165 | ) -> List[AlistTask]:
166 | """
167 | Get Alist task list.
168 |
169 | Args:
170 | task_type (TaskType):
171 | status (str): undone | done; If None, return all tasks. Defaults to None.
172 |
173 | Returns:
174 | TaskList: The list contains all query tasks.
175 | """
176 | if not status:
177 | done_tasks = await self._fetch_tasks(task_type, "done")
178 | undone_tasks = await self._fetch_tasks(task_type, "undone")
179 | return done_tasks + undone_tasks
180 | elif status.lower() in ["done", "undone"]:
181 | return await self._fetch_tasks(task_type, status)
182 | else:
183 | raise ValueError("Unknown status when get task list.")
184 |
185 | async def cancel_task(
186 | self,
187 | task: AlistTask,
188 | ) -> bool:
189 | await self._api_call(
190 | "POST", f"/api/task/{task.task_type.value}/cancel?tid={task.tid}"
191 | )
192 | return True
193 |
194 | async def rename(self, path: str, new_name: str):
195 | """Rename a file or dir.
196 |
197 | Args:
198 | path (str): The absolute path of the file or dir of Alist
199 | new_name (str): Only name, not include path.
200 | """
201 | await self._api_call(
202 | "POST", "api/fs/rename", json={"path": path, "name": new_name}
203 | )
204 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/alist/tasks.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import re
4 | from abc import ABC
5 | from dataclasses import dataclass, field
6 | from datetime import datetime
7 | from enum import Enum
8 |
9 |
10 | # https://github.com/alist-org/alist/blob/86b35ae5cfec400871072356fec4dea88303195d/pkg/task/task.go#L27
11 | class AlistTaskState(Enum):
12 | Pending = 0
13 | Running = 1
14 | Succeeded = 2
15 | Canceling = 3
16 | Canceled = 4
17 | Errored = 5
18 | Failing = 6
19 | Failed = 7
20 | StateWaitingRetry = 8
21 | StateBeforeRetry = 9
22 | UNKNOWN = 10
23 |
24 |
25 | class AlistDownloaderType(Enum):
26 | ARIA = "aria2"
27 | QBIT = "qBittorrent"
28 |
29 |
30 | class AlistDeletePolicy(Enum):
31 | DeleteOnUploadSucceed = "delete_on_upload_succeed"
32 | DeleteOnUploadFailed = "delete_on_upload_failed"
33 | DeleteNever = "delete_never"
34 | DeleteAlways = "delete_always"
35 |
36 |
37 | class AlistTaskType(Enum):
38 | DOWNLOAD = "offline_download"
39 | TRANSFER = "offline_download_transfer"
40 | UNKNOWN = "unknown"
41 |
42 |
43 | class CreatorRole(Enum):
44 | USER = 0
45 | GUEST = 1
46 | ADMIN = 2
47 |
48 |
49 | DOWNLOAD_DES_PATTERN = re.compile(r"download\s+(.+?)\s+to \((.+?)\)")
50 | TRANSFER_DES_PATTERN = re.compile(r"transfer \[.*\]\((.+)\) to \[(.+)\]\((.+)\)")
51 | UUID_PATTERN = re.compile(
52 | r"([a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})"
53 | )
54 |
55 |
56 | class AlistTaskError(Exception):
57 | """Base exception for AlistTask related errors."""
58 |
59 |
60 | class InvalidTaskDescription(AlistTaskError):
61 | """Raised when task description is invalid."""
62 |
63 |
64 | @dataclass
65 | class AlistTask(ABC):
66 | """Alist offical task object.
67 | refer to: https://alist.nn.ci/zh/guide/api/task.html#%E8%BF%94%E5%9B%9E%E7%BB%93%E6%9E%9C
68 | """
69 |
70 | creator: str
71 | creator_role: CreatorRole
72 | end_time: datetime
73 | error: str
74 | tid: str
75 | name: str
76 | progress: float
77 | start_time: datetime
78 | state: AlistTaskState
79 | status: str
80 | total_bytes: int
81 |
82 | task_type: AlistTaskType = field(init=False)
83 |
84 | @classmethod
85 | def from_json(cls, json_data: dict) -> "AlistTask":
86 | """Creates an AlistTask instance from a JSON dictionary."""
87 | creator = json_data["creator"]
88 | creator_role = CreatorRole(json_data["creator_role"])
89 | end_time = (
90 | datetime.strptime(json_data["end_time"][:26] + "Z", "%Y-%m-%dT%H:%M:%S.%fZ")
91 | if json_data["end_time"]
92 | else None
93 | )
94 | error = json_data["error"]
95 | tid = json_data["id"]
96 | name = json_data["name"]
97 | progress = json_data["progress"]
98 | start_time = (
99 | datetime.strptime(
100 | json_data["start_time"][:26] + "Z", "%Y-%m-%dT%H:%M:%S.%fZ"
101 | )
102 | if json_data["start_time"]
103 | else None
104 | )
105 | state = AlistTaskState(json_data["state"])
106 | status = json_data["status"]
107 | total_bytes = json_data["total_bytes"]
108 |
109 | return cls(
110 | creator=creator,
111 | creator_role=creator_role,
112 | end_time=end_time,
113 | error=error,
114 | tid=tid,
115 | name=name,
116 | progress=progress,
117 | start_time=start_time,
118 | state=state,
119 | status=status,
120 | total_bytes=total_bytes,
121 | )
122 |
123 | def __hash__(self):
124 | return hash(self.tid)
125 |
126 |
127 | @dataclass
128 | class AlistTransferTask(AlistTask):
129 | """Parsed some neccesary information for transfer task from AlistTask object."""
130 |
131 | uuid: str = field(init=False) # local temp directory uuid
132 | target_path: str = field(init=False) # transfer target filepath
133 | task_type: AlistTaskType = field(default=AlistTaskType.TRANSFER, init=False)
134 |
135 | def __post_init__(self):
136 | self.task_type = AlistTaskType.TRANSFER
137 | match = re.match(TRANSFER_DES_PATTERN, self.name)
138 | if match:
139 | # case exmaple:
140 | # transfer [](/opt/alist/data/temp/qBittorrent/2788fc32-b898-4b54-8657-efa59962f637/[ANi] 歲月流逝飯菜依舊美味 - 09 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4) to [/Google](/Anime/时光流逝,饭菜依旧美味/Season 1)
141 |
142 | # /opt/alist/data/temp/qBittorrent/2788fc32-b898-4b54-8657-efa59962f637/[ANi] 歲月流逝飯菜依舊美味 - 09 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4
143 | temp_filepath = match.group(1)
144 | # /Google
145 | target_drive = match.group(2)
146 | # /Anime/时光流逝,饭菜依旧美味/Season 1
147 | drive_subdir = match.group(3)
148 | # /Google/Anime/时光流逝,饭菜依旧美味/Season 1
149 | target_dirpath = f"{target_drive}{drive_subdir}".rstrip("/")
150 | # 2788fc32-b898-4b54-8657-efa59962f637
151 | uuid = re.search(UUID_PATTERN, temp_filepath).group(1)
152 | # [ANi] 歲月流逝飯菜依舊美味 - 09 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4
153 | sub_path = temp_filepath[temp_filepath.rfind(uuid) + len(uuid) + 1 :]
154 | # /Google/Anime/时光流逝,饭菜依旧美味/Season 1/[ANi] 歲月流逝飯菜依舊美味 - 09 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4
155 | target_file_path = f"{target_dirpath}/{sub_path}"
156 | else:
157 | raise InvalidTaskDescription(
158 | f"Failed to get uuid and target filepath from task description: {self.name}"
159 | )
160 | self.uuid = uuid
161 | self.target_path = target_file_path
162 |
163 | def __hash__(self):
164 | return hash(self.tid)
165 |
166 |
167 | @dataclass
168 | class AlistDownloadTask(AlistTask):
169 | url: str = field(init=False) # download url
170 | download_path: str = field(init=False) # The target path in Alist to download to
171 | task_type: AlistTaskType = field(default=AlistTaskType.DOWNLOAD, init=False)
172 |
173 | def __post_init__(self):
174 | match = re.match(DOWNLOAD_DES_PATTERN, self.name)
175 | if match:
176 | url = match.group(1)
177 | download_path = match.group(2)
178 | self.url = url
179 | self.download_path = download_path
180 | else:
181 | raise InvalidTaskDescription(
182 | f"Failed to get url and download path from task description: {self.name}"
183 | )
184 | # If seeding, the task status will still be Running
185 | # We need to change it to Succeeded manually to ensure the task is marked as completed
186 | if (
187 | self.state == AlistTaskState.Running
188 | and "offline download completed" in self.status
189 | ):
190 | self.state = AlistTaskState.Succeeded
191 |
192 | def __hash__(self):
193 | return hash(self.tid)
194 |
195 |
196 | if __name__ == "__main__":
197 | # openlist
198 | example_json = {
199 | "id": "rubfcBLgOOZW8SFPHZbkj",
200 | "name": "transfer [](/data/tmp/qBittorrent/f32e8969-8a6c-4878-8932-ca3318f9933e/Summer Pockets - S01E14 - [三明治摆烂组][简体内嵌][H264 8bit 1080P].mp4) to [/crypt-gd1](/)",
201 | "creator": "admin",
202 | "creator_role": 2,
203 | "state": 1,
204 | "status": "getting src object",
205 | "progress": 0,
206 | "start_time": "2025-07-11T07:48:41.4354376Z",
207 | "end_time": None,
208 | "total_bytes": 390419899,
209 | "error": "",
210 | }
211 | task = AlistTransferTask.from_json(example_json)
212 | print(task)
213 |
--------------------------------------------------------------------------------
/tests/core/download_manager/test_download_manager.py:
--------------------------------------------------------------------------------
1 | import os
2 | from unittest.mock import AsyncMock, MagicMock
3 |
4 | import pytest
5 |
6 | from alist_mikananirss.alist.tasks import AlistDownloadTask
7 | from alist_mikananirss.core.download_manager import DownloadManager
8 | from alist_mikananirss.websites.models import ResourceInfo, VideoQuality
9 |
10 |
11 | @pytest.fixture
12 | def setup_download_manager():
13 | mock_alist_client = AsyncMock()
14 | mock_alist_client.add_offline_download_task = AsyncMock()
15 |
16 | mock_db = AsyncMock()
17 | mock_db.insert_resource_info = AsyncMock()
18 | mock_db.delete_by_resource_title = AsyncMock()
19 |
20 | DownloadManager.destroy_instance()
21 | dm = DownloadManager(
22 | alist_client=mock_alist_client, base_download_path="/anime", db=mock_db
23 | )
24 |
25 | # Mock the task_monitor
26 | dm.task_monitor = MagicMock()
27 | dm.task_monitor.monitor = AsyncMock()
28 |
29 | return dm, mock_alist_client, mock_db
30 |
31 |
32 | @pytest.fixture
33 | def resources():
34 | return [
35 | ResourceInfo(
36 | resource_title="Test Anime S01E01",
37 | torrent_url="https://example.com/test1.torrent",
38 | anime_name="Test Anime",
39 | season=1,
40 | episode=1,
41 | quality=VideoQuality.p1080,
42 | ),
43 | ResourceInfo(
44 | resource_title="Another Anime S02E03",
45 | torrent_url="https://example.com/test2.torrent",
46 | anime_name="Another Anime",
47 | season=2,
48 | episode=3,
49 | ),
50 | ResourceInfo(
51 | resource_title="No Season E01",
52 | torrent_url="https://example.com/test3.torrent",
53 | anime_name="No Season",
54 | episode=1,
55 | ),
56 | ResourceInfo(
57 | resource_title="No Anime", torrent_url="https://example.com/test4.torrent"
58 | ),
59 | ]
60 |
61 |
62 | @pytest.mark.asyncio
63 | async def test_download(setup_download_manager, resources):
64 | # Mock the return value of add_offline_download_task
65 | dm, mock_alist_client, _ = setup_download_manager
66 | mock_tasks = [
67 | MagicMock(spec=AlistDownloadTask, url=resources[0].torrent_url),
68 | MagicMock(spec=AlistDownloadTask, url=resources[1].torrent_url),
69 | MagicMock(spec=AlistDownloadTask, url=resources[2].torrent_url),
70 | MagicMock(spec=AlistDownloadTask, url=resources[3].torrent_url),
71 | ]
72 |
73 | mock_alist_client.add_offline_download_task.side_effect = [
74 | [mock_tasks[0]],
75 | [mock_tasks[1]],
76 | [mock_tasks[2]],
77 | [mock_tasks[3]],
78 | ]
79 |
80 | # Call the function
81 | tasks = await dm.download(resources)
82 |
83 | # Check that add_offline_download_task was called with correct arguments
84 | assert mock_alist_client.add_offline_download_task.call_count == 4
85 |
86 | # Check that the correct number of tasks were returned
87 | assert len(tasks) == 4
88 |
89 | # Check that the returned tasks are the ones we mocked
90 | for mock_task in mock_tasks:
91 | assert mock_task in tasks
92 |
93 |
94 | @pytest.mark.asyncio
95 | async def test_download_grouping_by_path(setup_download_manager):
96 | dm, mock_alist_client, _ = setup_download_manager
97 | # Create resources that will have the same download path
98 | resources = [
99 | ResourceInfo(
100 | resource_title="Same Path 1",
101 | torrent_url="http://example.com/same1.torrent",
102 | anime_name="Same Anime",
103 | season=1,
104 | episode=1,
105 | ),
106 | ResourceInfo(
107 | resource_title="Same Path 2",
108 | torrent_url="http://example.com/same2.torrent",
109 | anime_name="Same Anime",
110 | season=1,
111 | episode=2,
112 | ),
113 | ]
114 |
115 | mock_tasks = [
116 | MagicMock(spec=AlistDownloadTask, url=resources[0].torrent_url),
117 | MagicMock(spec=AlistDownloadTask, url=resources[1].torrent_url),
118 | ]
119 | mock_alist_client.add_offline_download_task.return_value = mock_tasks
120 |
121 | # Call the function
122 | tasks = await dm.download(resources)
123 |
124 | # Check that add_offline_download_task was called once (grouped by path)
125 | mock_alist_client.add_offline_download_task.assert_called_once()
126 |
127 | # Check it was called with both URLs
128 | call_args = mock_alist_client.add_offline_download_task.call_args
129 | assert call_args[0][0] == os.path.join("/anime", "Same Anime", "Season 1")
130 | assert sorted(call_args[0][1]) == sorted([r.torrent_url for r in resources])
131 |
132 | assert len(tasks) == 2
133 | assert all(t in tasks for t in mock_tasks)
134 |
135 |
136 | @pytest.mark.asyncio
137 | async def test_download_exception_handling(setup_download_manager, resources):
138 | dm, mock_alist_client, _ = setup_download_manager
139 | # Make add_offline_download_task raise an exception
140 | mock_alist_client.add_offline_download_task.side_effect = Exception(
141 | "Test exception"
142 | )
143 |
144 | # Call the function
145 | tasks = await dm.download(resources)
146 |
147 | # Check that no tasks were returned
148 | assert len(tasks) == 0
149 |
150 |
151 | @pytest.mark.asyncio
152 | async def test_add_download_tasks(setup_download_manager, resources):
153 | # Mock the download method
154 | dm, _, mock_db = setup_download_manager
155 | mock_tasks = [
156 | MagicMock(spec=AlistDownloadTask, url=resources[0].torrent_url),
157 | MagicMock(spec=AlistDownloadTask, url=resources[1].torrent_url),
158 | ]
159 | dm.download = AsyncMock(return_value=mock_tasks)
160 |
161 | # Call the function
162 | await DownloadManager.add_download_tasks([resources[0], resources[1]])
163 |
164 | # Check that download was called with the correct arguments
165 | dm.download.assert_called_once_with([resources[0], resources[1]])
166 |
167 | # Check that insert_resource_info was called for each resource
168 | mock_db.insert_resource_info.assert_any_call(resources[0])
169 | mock_db.insert_resource_info.assert_any_call(resources[1])
170 |
171 | # Check that monitor was called for each task
172 | dm.task_monitor.monitor.assert_any_call(mock_tasks[0], resources[0])
173 | dm.task_monitor.monitor.assert_any_call(mock_tasks[1], resources[1])
174 |
175 |
176 | @pytest.mark.asyncio
177 | async def test_add_download_tasks_no_tasks(setup_download_manager, resources):
178 | # Mock the download method to return an empty list
179 | dm, _, mock_db = setup_download_manager
180 | dm.download = AsyncMock(return_value=[])
181 |
182 | # Call the function
183 | await DownloadManager.add_download_tasks([resources[0], resources[1]])
184 |
185 | # Check that insert_resource_info was not called (no tasks returned)
186 | mock_db.insert_resource_info.assert_not_called()
187 |
188 | # Check that monitor was not called (no tasks returned)
189 | dm.task_monitor.monitor.assert_not_called()
190 |
191 |
192 | @pytest.mark.asyncio
193 | async def test_add_download_tasks_unmatched_task(setup_download_manager, resources):
194 | dm, _, mock_db = setup_download_manager
195 | # Mock a task that doesn't match any resource
196 | unmatched_task = MagicMock(
197 | spec=AlistDownloadTask, url="http://example.com/unmatched.torrent"
198 | )
199 | matched_task = MagicMock(spec=AlistDownloadTask, url=resources[0].torrent_url)
200 | dm.download = AsyncMock(return_value=[matched_task, unmatched_task])
201 |
202 | # Call the function
203 | await DownloadManager.add_download_tasks([resources[0]])
204 |
205 | # Check that only the matched resource was processed
206 | mock_db.insert_resource_info.assert_called_once_with(resources[0])
207 | dm.task_monitor.monitor.assert_called_once_with(matched_task, resources[0])
208 |
--------------------------------------------------------------------------------
/tests/alist/test_alist.py:
--------------------------------------------------------------------------------
1 | import random
2 | from datetime import datetime
3 | from unittest.mock import AsyncMock, patch
4 |
5 | import pytest
6 |
7 | from alist_mikananirss.alist.api import Alist
8 | from alist_mikananirss.alist.tasks import (
9 | AlistDeletePolicy,
10 | AlistDownloaderType,
11 | AlistDownloadTask,
12 | AlistTaskState,
13 | AlistTaskType,
14 | AlistTransferTask,
15 | )
16 |
17 |
18 | @pytest.fixture
19 | def alist():
20 | return Alist(
21 | base_url="https://example.com",
22 | token="test_token",
23 | downloader=AlistDownloaderType.ARIA,
24 | )
25 |
26 |
27 | @pytest.fixture
28 | def create_task_json():
29 | def _create_download_task_json(tid, state, url, download_path, status=None):
30 | task_json_dict = {
31 | "creator": "alist_mikananirss",
32 | "creator_role": 2,
33 | "end_time": None,
34 | "error": "",
35 | "id": tid,
36 | "name": f"download {url} to ({download_path})",
37 | "progress": 100,
38 | "start_time": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
39 | "state": state,
40 | "status": status if status else "[qBittorrent]: [qBittorrent] downloading",
41 | "total_bytes": random.randint(1000000, 1000000000),
42 | }
43 | return task_json_dict
44 |
45 | def _create_transfer_task_json(
46 | tid, state, uuid, target_drive, target_dir, filename, total_bytes=None
47 | ):
48 | task_json_dict = {
49 | "creator": "twosix",
50 | "creator_role": 2,
51 | "end_time": None,
52 | "error": "",
53 | "id": tid,
54 | "name": f"transfer [](/opt/alist/data/temp/qBittorrent/{uuid}/{filename}) to [{target_drive}]({target_dir})",
55 | "progress": 100,
56 | "start_time": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
57 | "state": state,
58 | "status": "getting src object",
59 | "total_bytes": (
60 | total_bytes if total_bytes else random.randint(1000000, 1000000000)
61 | ),
62 | }
63 | return task_json_dict
64 |
65 | return _create_download_task_json, _create_transfer_task_json
66 |
67 |
68 | @pytest.mark.asyncio
69 | async def test_alist_init(alist):
70 | assert alist.base_url == "https://example.com"
71 | assert alist.token == "test_token"
72 | assert alist.downloader == AlistDownloaderType.ARIA
73 |
74 |
75 | @pytest.mark.asyncio
76 | async def test_get_alist_ver(alist):
77 | with patch.object(alist, "_api_call", new_callable=AsyncMock) as mock_api_call:
78 | mock_api_call.return_value = {"version": "v2.0.0"}
79 | version = await alist.get_alist_ver()
80 | assert version == "2.0.0"
81 | mock_api_call.assert_called_once_with("GET", "/api/public/settings")
82 |
83 |
84 | @pytest.mark.asyncio
85 | async def test_add_offline_download_task(alist, create_task_json):
86 | with patch.object(alist, "_api_call", new_callable=AsyncMock) as mock_api_call:
87 | create_dl_task_json, _ = create_task_json
88 | json_data = create_dl_task_json(
89 | "dl1",
90 | AlistTaskState.Running,
91 | "magnet:?xt=xxx",
92 | "/Local/Anime/Season 1",
93 | )
94 | mock_api_call.return_value = {
95 | "tasks": [json_data],
96 | }
97 |
98 | tasks = await alist.add_offline_download_task(
99 | "/Local/Anime/Season 1", ["magnet:?xt=xxx"]
100 | )
101 | assert isinstance(tasks[0], AlistDownloadTask)
102 | mock_api_call.assert_called_once_with(
103 | "POST",
104 | "api/fs/add_offline_download",
105 | json={
106 | "delete_policy": AlistDeletePolicy.DeleteAlways.value,
107 | "path": "/Local/Anime/Season 1",
108 | "urls": ["magnet:?xt=xxx"],
109 | "tool": alist.downloader.value,
110 | },
111 | )
112 |
113 |
114 | @pytest.mark.asyncio
115 | async def test_list_dir(alist):
116 | with patch.object(alist, "_api_call", new_callable=AsyncMock) as mock_api_call:
117 | mock_api_call.return_value = {
118 | "content": [{"name": "file1.txt"}, {"name": "file2.txt"}]
119 | }
120 | files = await alist.list_dir("/test/path")
121 | assert files == ["file1.txt", "file2.txt"]
122 |
123 |
124 | @pytest.mark.asyncio
125 | async def test_cancel_task(alist, create_task_json):
126 | with patch.object(alist, "_api_call", new_callable=AsyncMock) as mock_api_call:
127 | create_dl_task_json, _ = create_task_json
128 | json_data = create_dl_task_json(
129 | "dl1", AlistTaskState.Running, "magnet:?xt=xxx", "/Local/Anime/Season 1"
130 | )
131 | task = AlistDownloadTask.from_json(json_data)
132 | result = await alist.cancel_task(task)
133 | assert result is True
134 | mock_api_call.assert_called_once_with(
135 | "POST", f"/api/task/offline_download/cancel?tid={json_data['id']}"
136 | )
137 |
138 |
139 | @pytest.mark.asyncio
140 | async def test_rename(alist):
141 | with patch.object(alist, "_api_call", new_callable=AsyncMock) as mock_api_call:
142 | await alist.rename("/old/path/file.txt", "new_file.txt")
143 | mock_api_call.assert_called_once_with(
144 | "POST",
145 | "api/fs/rename",
146 | json={"path": "/old/path/file.txt", "name": "new_file.txt"},
147 | )
148 |
149 |
150 | def test_dl_task_extract(create_task_json):
151 | create_dl_task_json, _ = create_task_json
152 | test_url = "magnet:?xt=testtorrent1"
153 | test_path = "/Local/Anime/Season 1"
154 | json_data = create_dl_task_json(
155 | "dl1",
156 | AlistTaskState.Running,
157 | test_url,
158 | test_path,
159 | )
160 |
161 | task: AlistDownloadTask = AlistDownloadTask.from_json(json_data)
162 |
163 | assert task.url == test_url
164 | assert task.download_path == test_path
165 | assert task.task_type == AlistTaskType.DOWNLOAD
166 |
167 | # seeding situation
168 | json_data = create_dl_task_json(
169 | "dl1",
170 | AlistTaskState.Running,
171 | test_url,
172 | test_path,
173 | "offline download completed, waiting for seeding",
174 | )
175 | task2 = AlistDownloadTask.from_json(json_data)
176 | assert task2.state == AlistTaskState.Succeeded
177 |
178 |
179 | def test_tf_task_extract(create_task_json):
180 | _, create_tf_task_json = create_task_json
181 | test_uuid = "d82ddec1-08f6-4894-b7ed-d9c9f25dc4db"
182 | test_target_drive = "/Google"
183 | test_target_dir = "/Debug/test/Season 1"
184 | test_filename = "test.mkv"
185 | json_data = create_tf_task_json(
186 | "tf1",
187 | AlistTaskState.Running,
188 | test_uuid,
189 | test_target_drive,
190 | test_target_dir,
191 | test_filename,
192 | )
193 | task: AlistTransferTask = AlistTransferTask.from_json(
194 | json_data,
195 | )
196 |
197 | assert task.uuid == test_uuid
198 | assert task.target_path == test_target_drive + test_target_dir + "/" + test_filename
199 | assert task.task_type == AlistTaskType.TRANSFER
200 |
201 | # openlist case
202 | json_data = create_tf_task_json(
203 | "tf1",
204 | AlistTaskState.Running,
205 | test_uuid,
206 | test_target_drive,
207 | test_target_dir,
208 | "test.mp4",
209 | )
210 | json_data["name"] = (
211 | "transfer [](/data/tmp/qBittorrent/f32e8969-8a6c-4878-8932-ca3318f9933e/Summer Pockets - S01E14 - [三明治摆烂组][简体内嵌][H264 8bit 1080P].mp4) to [/crypt-gd1](/)"
212 | )
213 | task2: AlistTransferTask = AlistTransferTask.from_json(json_data)
214 | assert task2.uuid == "f32e8969-8a6c-4878-8932-ca3318f9933e"
215 | assert (
216 | task2.target_path
217 | == "/crypt-gd1/Summer Pockets - S01E14 - [三明治摆烂组][简体内嵌][H264 8bit 1080P].mp4"
218 | )
219 | assert task2.task_type == AlistTaskType.TRANSFER
220 |
--------------------------------------------------------------------------------
/tests/core/download_manager/test_task_monitor.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from unittest.mock import AsyncMock, MagicMock, patch
3 |
4 | import pytest
5 |
6 | from alist_mikananirss.alist.tasks import (
7 | AlistDownloadTask,
8 | AlistTaskState,
9 | AlistTaskType,
10 | AlistTransferTask,
11 | )
12 | from alist_mikananirss.core.download_manager import TaskMonitor
13 | from alist_mikananirss.websites.models import ResourceInfo
14 |
15 |
16 | @pytest.fixture
17 | def setup_task_monitor():
18 | alist_mock = AsyncMock()
19 |
20 | db_mock = AsyncMock()
21 |
22 | tm = TaskMonitor(
23 | alist_client=alist_mock,
24 | db=db_mock,
25 | use_renamer=True,
26 | need_notification=True,
27 | )
28 |
29 | return tm, alist_mock, db_mock
30 |
31 |
32 | @pytest.fixture
33 | def create_task():
34 | def _create_download_task(tid, state, url, download_path):
35 | task = MagicMock(spec=AlistDownloadTask)
36 | task.tid = tid
37 | task.state = state
38 | task.url = url
39 | task.download_path = download_path
40 | task.task_type = AlistTaskType.DOWNLOAD
41 | task.error = ""
42 | task.start_time = datetime.now()
43 | task.end_time = None
44 | task.progress = 0.0
45 | return task
46 |
47 | def _create_transfer_task(tid, state, uuid, target_path):
48 | task = MagicMock(spec=AlistTransferTask)
49 | task.tid = tid
50 | task.state = state
51 | task.uuid = uuid
52 | task.target_path = target_path
53 | task.task_type = AlistTaskType.TRANSFER
54 | task.error = ""
55 | task.start_time = datetime.now()
56 | task.end_time = None
57 | task.progress = 0.0
58 | return task
59 |
60 | return _create_download_task, _create_transfer_task
61 |
62 |
63 | @pytest.mark.asyncio
64 | async def test_monitor_successful_download_and_transfer(
65 | setup_task_monitor, create_task
66 | ):
67 | tm, alist_mock, db_mock = setup_task_monitor
68 | create_download_task, create_transfer_task = create_task
69 |
70 | # Create a download task in running state
71 | dl_task = create_download_task(
72 | tid="dl1",
73 | state=AlistTaskState.Running,
74 | url="http://example.com/test.torrent",
75 | download_path="/test/path/anime",
76 | )
77 |
78 | # Create a resource info
79 | resource = ResourceInfo(
80 | resource_title="Test Anime S01E01",
81 | torrent_url="http://example.com/test.torrent",
82 | anime_name="Test Anime",
83 | season=1,
84 | episode=1,
85 | )
86 |
87 | # Create a transfer task that will be returned when the download completes
88 | tf_task = create_transfer_task(
89 | tid="tf1",
90 | state=AlistTaskState.Running,
91 | uuid="uuid123",
92 | target_path="/test/path/anime/test_video.mkv",
93 | )
94 |
95 | # Mock the get_task_list method to simulate task state transitions
96 | # First call: download task is now succeeded
97 | # Second call: transfer task is running
98 | # Third call: transfer task is succeeded
99 | calls = []
100 |
101 | async def get_task_list_side_effect(task_type):
102 | calls.append(task_type)
103 |
104 | if len(calls) <= 2: # First iteration
105 | if task_type == AlistTaskType.DOWNLOAD:
106 | dl_task.state = AlistTaskState.Succeeded
107 | return [dl_task]
108 | else:
109 | return [tf_task]
110 | elif len(calls) <= 4: # Second iteration
111 | if task_type == AlistTaskType.DOWNLOAD:
112 | return [dl_task]
113 | else:
114 | return [tf_task]
115 | else: # Third iteration
116 | if task_type == AlistTaskType.DOWNLOAD:
117 | return [dl_task]
118 | else:
119 | tf_task.state = AlistTaskState.Succeeded
120 | return [tf_task]
121 |
122 | alist_mock.get_task_list.side_effect = get_task_list_side_effect
123 |
124 | # Mock is_video to always return True
125 | with patch("alist_mikananirss.utils.is_video", return_value=True):
126 | # Mock _find_transfer_task to return our transfer task
127 | tm._find_transfer_task = AsyncMock(return_value=tf_task)
128 |
129 | # Mock _post_process
130 | tm._post_process = AsyncMock()
131 |
132 | # Run the monitor function with a timeout to prevent infinite loop
133 | await tm.monitor(dl_task, resource)
134 | await tm.wait_finished()
135 |
136 | # Verify that the download task is linked to a transfer task
137 | assert tm._find_transfer_task.called, "Should have tried to find a transfer task"
138 | assert tm._find_transfer_task.await_args[0][0] == dl_task
139 |
140 | # Verify that post-process was called for the successful transfer task
141 | assert tm._post_process.called
142 | post_process_args = tm._post_process.await_args[0]
143 | assert post_process_args[0] == tf_task
144 | assert post_process_args[1] == resource
145 |
146 | # Verify that completed tasks are removed from running_tasks
147 | assert not tm.running_tasks, "All tasks should be removed from running_tasks"
148 | assert (
149 | not tm.task_resource_map
150 | ), "All tasks should be removed from task_resource_map"
151 |
152 |
153 | @pytest.mark.asyncio
154 | async def test_monitor_failed_download(setup_task_monitor, create_task):
155 | tm, alist_mock, db_mock = setup_task_monitor
156 | create_download_task, _ = create_task
157 |
158 | # Create a download task that will fail
159 | dl_task = create_download_task(
160 | tid="dl1",
161 | state=AlistTaskState.Running,
162 | url="http://example.com/test.torrent",
163 | download_path="/test/path/anime",
164 | )
165 |
166 | # Create a resource info
167 | resource = ResourceInfo(
168 | resource_title="Test Anime S01E01",
169 | torrent_url="http://example.com/test.torrent",
170 | anime_name="Test Anime",
171 | season=1,
172 | episode=1,
173 | )
174 |
175 | # Mock get_task_list to return the failed download task
176 | async def get_task_list_side_effect(task_type):
177 | if task_type == AlistTaskType.DOWNLOAD:
178 | dl_task.state = AlistTaskState.Failed
179 | dl_task.error = "Download error"
180 | return [dl_task]
181 | else:
182 | return []
183 |
184 | alist_mock.get_task_list.side_effect = get_task_list_side_effect
185 |
186 | # Run the monitor
187 | await tm.monitor(dl_task, resource)
188 | await tm.wait_finished()
189 |
190 | # Verify that the task is removed from running_tasks
191 | assert not tm.running_tasks, "Failed task should be removed from running_tasks"
192 | assert (
193 | not tm.task_resource_map
194 | ), "Failed task should be removed from task_resource_map"
195 |
196 | # Verify that the resource is deleted from the database
197 | db_mock.delete_by_resource_title.assert_called_once_with(resource.resource_title)
198 |
199 |
200 | @pytest.mark.asyncio
201 | async def test_monitor_no_transfer_task_found(setup_task_monitor, create_task):
202 | tm, alist_mock, db_mock = setup_task_monitor
203 | create_download_task, _ = create_task
204 |
205 | # Create a download task that will succeed but with no matching transfer task
206 | dl_task = create_download_task(
207 | tid="dl1",
208 | state=AlistTaskState.Running,
209 | url="http://example.com/test.torrent",
210 | download_path="/test/path/anime",
211 | )
212 |
213 | # Create a resource info
214 | resource = ResourceInfo(
215 | resource_title="Test Anime S01E01",
216 | torrent_url="http://example.com/test.torrent",
217 | anime_name="Test Anime",
218 | season=1,
219 | episode=1,
220 | )
221 |
222 | # Mock get_task_list to return the successful download task
223 | async def get_task_list_side_effect(task_type):
224 | if task_type == AlistTaskType.DOWNLOAD:
225 | dl_task.state = AlistTaskState.Succeeded
226 | return [dl_task]
227 | else:
228 | return []
229 |
230 | alist_mock.get_task_list.side_effect = get_task_list_side_effect
231 |
232 | # Mock _find_transfer_task to return None
233 | tm._find_transfer_task = AsyncMock(return_value=None)
234 |
235 | # Run the monitor
236 | await tm.monitor(dl_task, resource)
237 | await tm.wait_finished()
238 |
239 | # Verify that _find_transfer_task was called
240 | tm._find_transfer_task.assert_called_once_with(dl_task)
241 |
242 | # Verify that the task is removed from running_tasks
243 | assert not tm.running_tasks, "Task should be removed from running_tasks"
244 | assert not tm.task_resource_map, "Task should be removed from task_resource_map"
245 |
--------------------------------------------------------------------------------
/src/alist_mikananirss/common/database.py:
--------------------------------------------------------------------------------
1 | import os
2 | from datetime import datetime
3 |
4 | import aiosqlite
5 | from loguru import logger
6 |
7 | from alist_mikananirss.websites.models import ResourceInfo
8 |
9 | db_dirpath = "data"
10 | os.makedirs(db_dirpath, exist_ok=True)
11 |
12 |
13 | class SubscribeDatabase:
14 | def __init__(self):
15 | raise RuntimeError("Use `SubscribeDatabase.create()` to create an instance")
16 |
17 | @classmethod
18 | async def create(cls, db_name="subscribe_database.db"):
19 | self = cls.__new__(cls)
20 | self.db_filepath = os.path.join(db_dirpath, db_name)
21 | self.db = None
22 | await self.initialize()
23 | return self
24 |
25 | async def initialize(self):
26 | if not os.path.exists(self.db_filepath):
27 | await self.connect()
28 | await self.__create_table()
29 | else:
30 | await self.connect()
31 | await self._upgrade_database()
32 |
33 | async def connect(self):
34 | if self.db:
35 | return
36 | self.db = await aiosqlite.connect(self.db_filepath)
37 | if not self.db:
38 | raise RuntimeError("Failed to connect to database")
39 |
40 | async def close(self):
41 | if self.db:
42 | await self.db.close()
43 | self.db = None
44 |
45 | async def __create_table(self):
46 | await self.db.execute(
47 | """
48 | CREATE TABLE IF NOT EXISTS resource_data (
49 | id INTEGER PRIMARY KEY AUTOINCREMENT,
50 | resource_title TEXT NOT NULL,
51 | torrent_url TEXT UNIQUE,
52 | published_date TEXT,
53 | downloaded_date TEXT,
54 | anime_name TEXT,
55 | season INTEGER,
56 | episode INTEGER,
57 | fansub TEXT,
58 | quality TEXT,
59 | language TEXT
60 | )
61 | """
62 | )
63 | await self.db.execute(
64 | """
65 | CREATE TABLE IF NOT EXISTS db_version (
66 | version INTEGER PRIMARY KEY
67 | )
68 | """
69 | )
70 | await self.db.execute("INSERT INTO db_version (version) VALUES (1)")
71 | await self.db.commit()
72 |
73 | async def _upgrade_database(self):
74 | try:
75 | cursor = await self.db.execute("SELECT version FROM db_version")
76 | version = await cursor.fetchone()
77 | version = version[0]
78 | except aiosqlite.OperationalError:
79 | await self.db.execute(
80 | """
81 | CREATE TABLE IF NOT EXISTS db_version (
82 | version INTEGER PRIMARY KEY
83 | )
84 | """
85 | )
86 | version = 0
87 |
88 | if version < 1:
89 | try:
90 | await self.db.execute(
91 | """
92 | CREATE TABLE resource_data_new (
93 | id INTEGER PRIMARY KEY AUTOINCREMENT,
94 | resource_title TEXT NOT NULL,
95 | torrent_url TEXT UNIQUE,
96 | published_date TEXT,
97 | downloaded_date TEXT,
98 | anime_name TEXT,
99 | season INTEGER,
100 | episode INTEGER,
101 | fansub TEXT,
102 | quality TEXT,
103 | language TEXT
104 | )
105 | """
106 | )
107 | # 迁移数据
108 | await self.db.execute(
109 | """
110 | INSERT INTO resource_data_new (resource_title, torrent_url, published_date, downloaded_date, anime_name)
111 | SELECT title, link, published_date, downloaded_date, anime_name FROM resource_data
112 | """
113 | )
114 | # 删除旧表
115 | await self.db.execute("DROP TABLE resource_data")
116 | # 重命名新表
117 | await self.db.execute(
118 | "ALTER TABLE resource_data_new RENAME TO resource_data"
119 | )
120 | # 创建新的索引
121 | await self.db.execute(
122 | """
123 | CREATE UNIQUE INDEX idx_title
124 | ON resource_data(resource_title)
125 | """
126 | )
127 | await self.db.execute(
128 | "INSERT OR REPLACE INTO db_version (version) VALUES (1)"
129 | )
130 | await self.db.commit()
131 | logger.info("Database upgraded to version 1")
132 | except Exception as e:
133 | logger.error(f"Error during database upgrade: {e}")
134 | await self.db.rollback()
135 |
136 | async def insert(
137 | self,
138 | resource_title,
139 | torrent_url,
140 | published_date,
141 | downloaded_date,
142 | anime_name,
143 | season=None,
144 | episode=None,
145 | fansub=None,
146 | quality=None,
147 | language=None,
148 | ):
149 | language_str = "".join(language) if language else None
150 | try:
151 | await self.db.execute(
152 | """
153 | INSERT INTO resource_data
154 | (resource_title, torrent_url, published_date, downloaded_date, anime_name, season, episode, fansub, quality, language)
155 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
156 | """,
157 | (
158 | resource_title,
159 | torrent_url,
160 | published_date,
161 | downloaded_date,
162 | anime_name,
163 | season,
164 | episode,
165 | fansub,
166 | quality,
167 | language_str,
168 | ),
169 | )
170 | await self.db.commit()
171 | logger.debug(f"Insert new resource: {anime_name}, {resource_title}")
172 | except aiosqlite.IntegrityError:
173 | logger.debug(f"Resource already exists: {anime_name}, {resource_title}")
174 | except Exception as e:
175 | logger.error(f"Error when inserting resource: {e}")
176 |
177 | async def insert_resource_info(self, resource: ResourceInfo):
178 | downloaded_date = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
179 | await self.insert(
180 | resource.resource_title,
181 | resource.torrent_url,
182 | resource.published_date,
183 | downloaded_date,
184 | resource.anime_name,
185 | season=resource.season,
186 | episode=resource.episode,
187 | fansub=resource.fansub,
188 | quality=resource.quality,
189 | language=resource.languages,
190 | )
191 |
192 | async def is_resource_title_exist(self, resource_title: str):
193 | try:
194 | cursor = await self.db.execute(
195 | "SELECT 1 FROM resource_data WHERE resource_title = ? LIMIT 1",
196 | (resource_title,),
197 | )
198 | return await cursor.fetchone() is not None
199 | except Exception as e:
200 | logger.error(f"Error checking resource existence: {e}")
201 | return False
202 |
203 | async def delete_by_id(self, id):
204 | try:
205 | await self.db.execute("DELETE FROM resource_data WHERE id=?", (id,))
206 | await self.db.commit()
207 | logger.debug(f"Delete resource data: {id}")
208 | except Exception as e:
209 | logger.error(f"Error when delete resource data:\n {e}")
210 |
211 | async def delete_by_torrent_url(self, url: str):
212 | try:
213 | await self.db.execute(
214 | "DELETE FROM resource_data WHERE torrent_url=?", (url,)
215 | )
216 | await self.db.commit()
217 | logger.debug(f"Delete resource data: {url}")
218 | except Exception as e:
219 | logger.error(f"Error when delete resource data:\n {e}")
220 |
221 | async def delete_by_resource_title(self, title: str):
222 | try:
223 | await self.db.execute(
224 | "DELETE FROM resource_data WHERE resource_title=?", (title,)
225 | )
226 | await self.db.commit()
227 | logger.debug(f"Delete resource data: {title}")
228 | except Exception as e:
229 | logger.error(f"Error when delete resource data:\n {e}")
230 |
--------------------------------------------------------------------------------
/tests/websites/test_dmhy.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | import feedparser
4 | import pytest
5 |
6 | from alist_mikananirss.extractor import ResourceTitleExtractResult
7 | from alist_mikananirss.websites.dmhy import Dmhy
8 | from alist_mikananirss.websites.models import (
9 | FeedEntry,
10 | LanguageType,
11 | ResourceInfo,
12 | VideoQuality,
13 | )
14 |
15 |
16 | @pytest.fixture
17 | def dmhy():
18 | return Dmhy("https://dmhy.org/topics/rss/rss.xml")
19 |
20 |
21 | @pytest.fixture
22 | def mock_rss_data():
23 | return """
24 |
25 |
26 |
27 |
28 | http://share.dmhy.org
29 |
30 |
31 |
32 | zh-cn
33 | Wed, 15 Jan 2025 16:57:29 +0800
34 | -
35 |
36 |
37 |
38 | http://share.dmhy.org/topics/view/687329_LoliHouse_Kinomi_Master_-_03_WebRip_1080p_HEVC-10bit_AAC.html
39 | Wed, 15 Jan 2025 15:09:14 +0800
40 |
41 |
最弱技能《果实大师》 ~关于能无限食用技能果实(吃了就会死)这件事~ Hazure Skill "Kinomi Master": Skill no Mi (Tabetara Shinu) wo Mugen ni Taberareru You ni Natta Ken ni Tsuite 外れスキル《木の実マスター》 ~スキルの実(食べたら死ぬ)を無限に食べられるようになった件について~
字幕:没有 脚本:S01T004721 压制:帕鲁奇亚籽 本片版权字幕质量堪忧,有试看需求请自行前往获取 。
本组作品首发于: nyaa.si
另备份发布于: acg.rip | dmhy.org | bangumi.moe | acgnx.se
备份发布情况取决于各站点可用性,如有缺失烦请移步其他站点下载。
其余站点系自发抓取非我组正式发布。
为了顺利地观看我们的作品,推荐大家使用以下播放器:
Windows:
44 |
45 |
46 |
47 | http://share.dmhy.org/topics/view/687329_LoliHouse_Kinomi_Master_-_03_WebRip_1080p_HEVC-10bit_AAC.html
48 |
49 |
50 |
51 |
52 |
53 | """
54 |
55 |
56 | @pytest.mark.asyncio
57 | async def test_get_feed_entries(dmhy, mock_rss_data):
58 | with patch.object(dmhy, "parse_feed", return_value=feedparser.parse(mock_rss_data)):
59 | result = await dmhy.get_feed_entries()
60 |
61 | assert isinstance(result, list)
62 | assert len(result) == 1
63 | entry = result.pop()
64 | assert isinstance(entry, FeedEntry)
65 | assert (
66 | entry.resource_title
67 | == "[LoliHouse] 最弱技能《果实大师》 ~关于能无限食用技能果实(吃了就会死)这件事~ / Kinomi Master - 03 [WebRip 1080p HEVC-10bit AAC][无字幕]"
68 | )
69 | assert (
70 | entry.torrent_url
71 | == "magnet:?xt=urn:btih:WJBPAWPQKWDXUJKQB4EIPP24RAQGRSEM&dn=&tr=http%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=udp%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=http%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ftracker.publicbt.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker.prq.to%2Fannounce&tr=http%3A%2F%2Fopen.acgtracker.com%3A1096%2Fannounce&tr=https%3A%2F%2Ft-115.rhcloud.com%2Fonly_for_ylbud&tr=http%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=http%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=udp%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ft.nyaatracker.com%3A80%2Fannounce&tr=http%3A%2F%2Fnyaa.tracker.wf%3A7777%2Fannounce&tr=http%3A%2F%2Ftr.bangumi.moe%3A6969%2Fannounce&tr=https%3A%2F%2Ftr.bangumi.moe%3A9696%2Fannounce&tr=http%3A%2F%2Ft.acg.rip%3A6699%2Fannounce&tr=http%3A%2F%2Fopen.acgnxtracker.com%2Fannounce&tr=http%3A%2F%2Fshare.camoe.cn%3A8080%2Fannounce&tr=https%3A%2F%2Ftracker.nanoha.org%2Fannounce"
72 | )
73 | assert (
74 | entry.homepage_url
75 | == "http://share.dmhy.org/topics/view/687329_LoliHouse_Kinomi_Master_-_03_WebRip_1080p_HEVC-10bit_AAC.html"
76 | )
77 | assert entry.author == "LoliHouse"
78 |
79 |
80 | @pytest.mark.asyncio
81 | async def test_get_feed_entries_real(dmhy):
82 | # 测试真实的RSS链接解析是否有报错
83 | await dmhy.get_feed_entries()
84 |
85 |
86 | @pytest.mark.asyncio
87 | async def test_parse_homepage_error(dmhy):
88 | # 非强需求;不报错
89 | mock_entry = FeedEntry(
90 | resource_title="[LoliHouse] 最弱技能《果实大师》 ~关于能无限食用技能果实(吃了就会死)这件事~ / Kinomi Master - 03 [WebRip 1080p HEVC-10bit AAC][无字幕] ]",
91 | torrent_url="magnet:?xt=urn:btih:WJBPAWPQKWDXUJKQB4EIPP24RAQGRSEM&dn=&tr=http%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=udp%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=http%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ftracker.publicbt.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker.prq.to%2Fannounce&tr=http%3A%2F%2Fopen.acgtracker.com%3A1096%2Fannounce&tr=https%3A%2F%2Ft-115.rhcloud.com%2Fonly_for_ylbud&tr=http%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=http%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=udp%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ft.nyaatracker.com%3A80%2Fannounce&tr=http%3A%2F%2Fnyaa.tracker.wf%3A7777%2Fannounce&tr=http%3A%2F%2Ftr.bangumi.moe%3A6969%2Fannounce&tr=https%3A%2F%2Ftr.bangumi.moe%3A9696%2Fannounce&tr=http%3A%2F%2Ft.acg.rip%3A6699%2Fannounce&tr=http%3A%2F%2Fopen.acgnxtracker.com%2Fannounce&tr=http%3A%2F%2Fshare.camoe.cn%3A8080%2Fannounce&tr=https%3A%2F%2Ftracker.nanoha.org%2Fannounce",
92 | author="LoliHouse",
93 | )
94 | with patch.object(dmhy, "parse_homepage", side_effect=Exception):
95 | await dmhy.extract_resource_info(mock_entry, use_extractor=False)
96 |
97 |
98 | @pytest.mark.asyncio
99 | async def test_none_fansub(dmhy):
100 | # 无法从主页解析到fansub,使用extractor解析的fansub结果
101 | mock_entry = FeedEntry(
102 | resource_title="[LoliHouse] 最弱技能《果实大师》 ~关于能无限食用技能果实(吃了就会死)这件事~ / Kinomi Master - 03 [WebRip 1080p HEVC-10bit AAC][无字幕] ]",
103 | torrent_url="magnet:?xt=urn:btih:WJBPAWPQKWDXUJKQB4EIPP24RAQGRSEM&dn=&tr=http%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=udp%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=http%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ftracker.publicbt.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker.prq.to%2Fannounce&tr=http%3A%2F%2Fopen.acgtracker.com%3A1096%2Fannounce&tr=https%3A%2F%2Ft-115.rhcloud.com%2Fonly_for_ylbud&tr=http%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=http%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=udp%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ft.nyaatracker.com%3A80%2Fannounce&tr=http%3A%2F%2Fnyaa.tracker.wf%3A7777%2Fannounce&tr=http%3A%2F%2Ftr.bangumi.moe%3A6969%2Fannounce&tr=https%3A%2F%2Ftr.bangumi.moe%3A9696%2Fannounce&tr=http%3A%2F%2Ft.acg.rip%3A6699%2Fannounce&tr=http%3A%2F%2Fopen.acgnxtracker.com%2Fannounce&tr=http%3A%2F%2Fshare.camoe.cn%3A8080%2Fannounce&tr=https%3A%2F%2Ftracker.nanoha.org%2Fannounce",
104 | author="LoliHouse",
105 | )
106 | mock_extract_result = ResourceTitleExtractResult(
107 | anime_name="最弱技能《果实大师》",
108 | season=1,
109 | episode=3,
110 | quality=VideoQuality.p1080,
111 | languages=[LanguageType.JAPANESE],
112 | fansub="LoliHouse",
113 | version=1,
114 | )
115 |
116 | with patch.object(dmhy, "parse_homepage", return_value=None):
117 | with patch(
118 | "alist_mikananirss.extractor.Extractor.analyse_resource_title",
119 | return_value=mock_extract_result,
120 | ):
121 | result = await dmhy.extract_resource_info(mock_entry, use_extractor=True)
122 |
123 | assert isinstance(result, ResourceInfo)
124 | assert result.fansub == "LoliHouse"
125 |
126 |
127 | @pytest.mark.asyncio
128 | async def test_homepage_fansub(dmhy):
129 | # 从主页解析得到fansub,使用主页解析的fansub结果
130 |
131 | mock_entry = FeedEntry(
132 | resource_title="[LoliHouse] 最弱技能《果实大师》 ~关于能无限食用技能果实(吃了就会死)这件事~ / Kinomi Master - 03 [WebRip 1080p HEVC-10bit AAC][无字幕] ]",
133 | torrent_url="magnet:?xt=urn:btih:WJBPAWPQKWDXUJKQB4EIPP24RAQGRSEM&dn=&tr=http%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=udp%3A%2F%2F104.143.10.186%3A8000%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=http%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ftracker.publicbt.com%3A80%2Fannounce&tr=http%3A%2F%2Ftracker.prq.to%2Fannounce&tr=http%3A%2F%2Fopen.acgtracker.com%3A1096%2Fannounce&tr=https%3A%2F%2Ft-115.rhcloud.com%2Fonly_for_ylbud&tr=http%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=http%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker1.itzmx.com%3A8080%2Fannounce&tr=udp%3A%2F%2Ftracker2.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker3.itzmx.com%3A6961%2Fannounce&tr=udp%3A%2F%2Ftracker4.itzmx.com%3A2710%2Fannounce&tr=http%3A%2F%2Ft.nyaatracker.com%3A80%2Fannounce&tr=http%3A%2F%2Fnyaa.tracker.wf%3A7777%2Fannounce&tr=http%3A%2F%2Ftr.bangumi.moe%3A6969%2Fannounce&tr=https%3A%2F%2Ftr.bangumi.moe%3A9696%2Fannounce&tr=http%3A%2F%2Ft.acg.rip%3A6699%2Fannounce&tr=http%3A%2F%2Fopen.acgnxtracker.com%2Fannounce&tr=http%3A%2F%2Fshare.camoe.cn%3A8080%2Fannounce&tr=https%3A%2F%2Ftracker.nanoha.org%2Fannounce",
134 | author="LoliHouse",
135 | )
136 | mock_extract_result = ResourceTitleExtractResult(
137 | anime_name="最弱技能《果实大师》",
138 | season=1,
139 | episode=3,
140 | quality=VideoQuality.p1080,
141 | languages=[LanguageType.JAPANESE],
142 | fansub="LoliHouse",
143 | version=1,
144 | )
145 |
146 | with patch.object(dmhy, "parse_homepage", return_value="homepage_fansub"):
147 | with patch(
148 | "alist_mikananirss.extractor.Extractor.analyse_resource_title",
149 | return_value=mock_extract_result,
150 | ):
151 | result = await dmhy.extract_resource_info(mock_entry, use_extractor=True)
152 |
153 | assert isinstance(result, ResourceInfo)
154 | assert result.fansub == "homepage_fansub"
155 |
--------------------------------------------------------------------------------