├── rss ├── app │ ├── api │ │ ├── __init__.py │ │ └── endpoints │ │ │ └── __init__.py │ ├── services │ │ └── __init__.py │ ├── core │ │ ├── __init__.py │ │ └── config.py │ ├── __init__.py │ ├── configs │ │ └── title_template.json │ ├── models │ │ └── entry.py │ ├── crud │ │ └── entry.py │ ├── templates │ │ ├── login.html │ │ └── register.html │ └── routes │ │ └── auth.py └── main.py ├── ufb ├── requirements.txt └── ufb_client.py ├── .gitattributes ├── images ├── 1 (2).png ├── 1 (3).png ├── image.png ├── flow_chart.png ├── rss_login.png ├── user_spy.png ├── settings_ai.png ├── rss_dashboard.png ├── settings_main.png ├── settings_media.png ├── settings_other.png ├── settings_push.png ├── rss_create_config.png ├── logo │ └── png │ │ └── logo-title.png ├── settings_media_sub1.png ├── settings_push_sub1.png └── Fluent_Reader_rrt59DN9LZ.png ├── requirements.txt ├── .github ├── FUNDING.yml └── workflows │ └── close-inactive-issues.yml ├── ai ├── grok_provider.py ├── deepseek_provider.py ├── qwen_provider.py ├── base.py ├── __init__.py ├── openai_provider.py ├── claude_provider.py ├── openai_base_provider.py └── gemini_provider.py ├── .gitignore ├── enums └── enums.py ├── Dockerfile ├── .dockerignore ├── docker-compose.yml ├── version.py ├── utils ├── log_config.py ├── media.py ├── constants.py ├── auto_delete.py ├── settings.py └── file_creator.py ├── filters ├── keyword_filter.py ├── base_filter.py ├── filter_chain.py ├── context.py ├── delete_original_filter.py ├── init_filter.py ├── reply_filter.py ├── process.py ├── replace_filter.py ├── delay_filter.py ├── edit_filter.py ├── info_filter.py ├── sender_filter.py └── comment_button_filter.py ├── handlers ├── list_handlers.py ├── user_handler.py ├── link_handlers.py ├── bot_handler.py └── prompt_handlers.py ├── managers └── state_manager.py ├── .env.example ├── scheduler └── chat_updater.py ├── message_listener.py └── main.py /rss/app/api/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | API package 3 | """ -------------------------------------------------------------------------------- /rss/app/services/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Service layer 3 | """ -------------------------------------------------------------------------------- /rss/app/api/endpoints/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | API endpoints 3 | """ -------------------------------------------------------------------------------- /rss/app/core/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core functionality 3 | """ -------------------------------------------------------------------------------- /ufb/requirements.txt: -------------------------------------------------------------------------------- 1 | websockets>=11.0.3 2 | 3 | python-dotenv>=1.0.0 -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /images/1 (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/1 (2).png -------------------------------------------------------------------------------- /images/1 (3).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/1 (3).png -------------------------------------------------------------------------------- /images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/image.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/requirements.txt -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | ko_fi: 0heavrnl 3 | 4 | -------------------------------------------------------------------------------- /images/flow_chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/flow_chart.png -------------------------------------------------------------------------------- /images/rss_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/rss_login.png -------------------------------------------------------------------------------- /images/user_spy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/user_spy.png -------------------------------------------------------------------------------- /images/settings_ai.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_ai.png -------------------------------------------------------------------------------- /images/rss_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/rss_dashboard.png -------------------------------------------------------------------------------- /images/settings_main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_main.png -------------------------------------------------------------------------------- /images/settings_media.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_media.png -------------------------------------------------------------------------------- /images/settings_other.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_other.png -------------------------------------------------------------------------------- /images/settings_push.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_push.png -------------------------------------------------------------------------------- /images/rss_create_config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/rss_create_config.png -------------------------------------------------------------------------------- /images/logo/png/logo-title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/logo/png/logo-title.png -------------------------------------------------------------------------------- /images/settings_media_sub1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_media_sub1.png -------------------------------------------------------------------------------- /images/settings_push_sub1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/settings_push_sub1.png -------------------------------------------------------------------------------- /images/Fluent_Reader_rrt59DN9LZ.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Heavrnl/TelegramForwarder/HEAD/images/Fluent_Reader_rrt59DN9LZ.png -------------------------------------------------------------------------------- /rss/app/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | TG Forwarder RSS Application 3 | """ 4 | 5 | from fastapi import FastAPI 6 | from fastapi.templating import Jinja2Templates 7 | from .routes.auth import router as auth_router 8 | 9 | app = FastAPI(title="TG Forwarder RSS") 10 | 11 | 12 | # 注册路由 13 | app.include_router(auth_router) 14 | 15 | # 模板配置 16 | templates = Jinja2Templates(directory="rss/app/templates") -------------------------------------------------------------------------------- /ai/grok_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | from openai import AsyncOpenAI 3 | from .openai_base_provider import OpenAIBaseProvider 4 | import os 5 | import logging 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class GrokProvider(OpenAIBaseProvider): 10 | def __init__(self): 11 | super().__init__( 12 | env_prefix='GROK', 13 | default_model='grok-2-latest', 14 | default_api_base='https://api.x.ai/v1' 15 | ) -------------------------------------------------------------------------------- /ai/deepseek_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | from openai import AsyncOpenAI 3 | from .openai_base_provider import OpenAIBaseProvider 4 | import os 5 | import logging 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class DeepSeekProvider(OpenAIBaseProvider): 10 | def __init__(self): 11 | super().__init__( 12 | env_prefix='DEEPSEEK', 13 | default_model='deepseek-chat', 14 | default_api_base='https://api.deepseek.com/v1' 15 | ) 16 | -------------------------------------------------------------------------------- /ai/qwen_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | from openai import AsyncOpenAI 3 | from .openai_base_provider import OpenAIBaseProvider 4 | import os 5 | import logging 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class QwenProvider(OpenAIBaseProvider): 10 | def __init__(self): 11 | super().__init__( 12 | env_prefix='QWEN', 13 | default_model='qwen-plus', 14 | default_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1' 15 | ) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # 环境变量文件 2 | .env 3 | 4 | # 数据库文件 5 | *.db 6 | 7 | # Python 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # 虚拟环境 13 | venv/ 14 | env/ 15 | ENV/ 16 | 17 | # Telethon session 文件 18 | *.session 19 | *.session-journal 20 | /.idea 21 | /example 22 | /config 23 | /ufb/.idea 24 | ufb/config/config.json 25 | /images/logo/svg 26 | /test 27 | db/forward.db1 28 | handlers/bot_handler copy.py 29 | /temp 30 | 使用场景示例.md 31 | /rss/media 32 | /rss/data 33 | 34 | /logs 35 | logs/telegram_forwarder.log 36 | -------------------------------------------------------------------------------- /enums/enums.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | # 四个模式,仅黑名单,仅白名单,先黑名单后白名单,先白名单后黑名单 4 | class ForwardMode(enum.Enum): 5 | WHITELIST = 'whitelist' 6 | BLACKLIST = 'blacklist' 7 | BLACKLIST_THEN_WHITELIST = 'blacklist_then_whitelist' 8 | WHITELIST_THEN_BLACKLIST = 'whitelist_then_blacklist' 9 | 10 | 11 | class PreviewMode(enum.Enum): 12 | ON = 'on' 13 | OFF = 'off' 14 | FOLLOW = 'follow' # 跟随原消息的预览设置 15 | 16 | class MessageMode(enum.Enum): 17 | MARKDOWN = 'Markdown' 18 | HTML = 'HTML' 19 | 20 | class AddMode(enum.Enum): 21 | WHITELIST = 'whitelist' 22 | BLACKLIST = 'blacklist' 23 | 24 | class HandleMode(enum.Enum): 25 | FORWARD = 'FORWARD' 26 | EDIT = 'EDIT' -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim 2 | 3 | # 设置工作目录 4 | WORKDIR /app 5 | 6 | # 设置Docker日志配置 7 | ENV DOCKER_LOG_MAX_SIZE=10m 8 | ENV DOCKER_LOG_MAX_FILE=3 9 | 10 | # 安装系统依赖 11 | RUN apt-get update && apt-get install -y \ 12 | tzdata \ 13 | && ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ 14 | && dpkg-reconfigure -f noninteractive tzdata \ 15 | && apt-get install -y \ 16 | gcc \ 17 | python3-dev \ 18 | && rm -rf /var/lib/apt/lists/* 19 | 20 | # 复制依赖文件并安装 21 | COPY requirements.txt . 22 | RUN pip install --no-cache-dir -r requirements.txt 23 | 24 | # 创建临时文件目录 25 | RUN mkdir -p /app/temp 26 | 27 | # 复制应用代码 28 | COPY . . 29 | 30 | # 设置环境变量 31 | ENV PYTHONUNBUFFERED=1 32 | 33 | # 启动命令 34 | CMD ["python", "main.py"] -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # 忽略环境变量文件 2 | .env 3 | .env.example 4 | 5 | # 忽略数据库文件 6 | *.db 7 | db/forward.db1 8 | 9 | # 忽略 Python 生成的缓存文件 10 | **/__pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # 忽略虚拟环境 15 | **/venv/ 16 | **/env/ 17 | **/ENV/ 18 | 19 | # 忽略 Telethon 会话文件 20 | *.session 21 | *.session-journal 22 | 23 | # 忽略 IDE 配置文件 24 | .idea/ 25 | ufb/.idea 26 | 27 | # 忽略示例和临时配置文件 28 | /example 29 | /config/* 30 | ufb/config/* 31 | 32 | # 忽略无用的图片和测试目录 33 | **/test/ 34 | **/images/ 35 | 36 | # 忽略 RSS 相关数据和临时文件 37 | /rss/media/* 38 | /rss/data/* 39 | 40 | # 忽略日志文件 41 | logs/* 42 | 43 | # 忽略临时文件夹 44 | /temp/* 45 | 46 | # 额外忽略 `.git` 和 Docker 相关文件,防止意外复制 47 | .git 48 | .gitignore 49 | .dockerignore 50 | Dockerfile 51 | docker-compose.yml 52 | .github 53 | .gitattributes 54 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | telegram-forwarder: 3 | image: heavrnl/telegramforwarder:latest 4 | container_name: telegram-forwarder 5 | # 如果需要使用 RSS 功能,请取消以下注释 6 | # ports: 7 | # - 9804:8000 8 | restart: unless-stopped 9 | volumes: 10 | - ./db:/app/db 11 | - ./.env:/app/.env 12 | - ./logs:/app/logs 13 | - ./sessions:/app/sessions 14 | - ./temp:/app/temp 15 | - ./ufb/config:/app/ufb/config 16 | - ./config:/app/config 17 | - ./rss/data:/app/rss/data 18 | - ./rss/media:/app/rss/media 19 | logging: 20 | driver: "json-file" 21 | options: 22 | max-size: "10m" 23 | max-file: "3" 24 | compress: "true" 25 | stdin_open: true 26 | tty: true 27 | 28 | -------------------------------------------------------------------------------- /version.py: -------------------------------------------------------------------------------- 1 | VERSION = "1.7.2" 2 | 3 | # 版本号说明 4 | VERSION_INFO = { 5 | "major": 1, # 主版本号:重大更新,可能不兼容旧版本 6 | "feature": 7, # 功能版本号:添加重要新功能 7 | "minor": 2, # 次要版本号:添加小功能或优化 8 | "patch": 0, # 补丁版本号:Bug修复和小改动 9 | } 10 | 11 | 12 | UPDATE_INFO = """
✨ 更新日志 v1.7.2 13 | 14 | - 提高AI总结的健壮性 @iCross https://github.com/Heavrnl/TelegramForwarder/pull/47 15 | 16 |17 | """ 18 | 19 | 20 | WELCOME_TEXT = """ 21 | 🎉 欢迎使用 TelegramForwarder ! 22 | 23 | 如果您觉得这个项目对您有帮助,欢迎通过以下方式支持我: 24 | 25 |
⭐ 给项目点个小小的 Star: TelegramForwarder 26 | ☕ 请我喝杯咖啡: Ko-fi27 | 28 | 当前版本: v1.7.2 29 | 更新日志: /changelog 30 | 31 | 感谢您的支持! 32 | """ -------------------------------------------------------------------------------- /.github/workflows/close-inactive-issues.yml: -------------------------------------------------------------------------------- 1 | name: Close inactive issues 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | jobs: 7 | close-issues: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | steps: 13 | - uses: actions/stale@v9 14 | with: 15 | days-before-issue-stale: 3 16 | days-before-issue-close: 5 17 | stale-issue-label: "stale" 18 | stale-issue-message: "This issue is stale because it has been open for 5 days with no activity." 19 | close-issue-message: "This issue was closed because it has been inactive for 8 days since being marked as stale." 20 | days-before-pr-stale: -1 21 | days-before-pr-close: -1 22 | repo-token: ${{ secrets.GITHUB_TOKEN }} 23 | -------------------------------------------------------------------------------- /utils/log_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from pathlib import Path 4 | from dotenv import load_dotenv 5 | 6 | def setup_logging(): 7 | """ 8 | 配置日志系统,将所有日志输出到标准输出, 9 | 由Docker收集并管理日志 10 | """ 11 | # 加载环境变量 12 | load_dotenv() 13 | 14 | # 创建根日志记录器 15 | root_logger = logging.getLogger() 16 | 17 | # 设置日志级别 - 默认使用INFO级别 18 | root_logger.setLevel(logging.INFO) 19 | 20 | # 创建一个处理器,用于将日志输出到控制台 21 | console_handler = logging.StreamHandler() 22 | 23 | # 创建格式化器 24 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 25 | 26 | # 将格式化器添加到处理器 27 | console_handler.setFormatter(formatter) 28 | 29 | # 将处理器添加到根日志记录器 30 | root_logger.addHandler(console_handler) 31 | 32 | # 返回配置的日志记录器 33 | return root_logger -------------------------------------------------------------------------------- /filters/keyword_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | from utils.common import get_sender_info,check_keywords 4 | from filters.base_filter import BaseFilter 5 | from enums.enums import ForwardMode 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class KeywordFilter(BaseFilter): 10 | """ 11 | 关键字过滤器,检查消息是否包含指定关键字 12 | """ 13 | 14 | async def _process(self, context): 15 | """ 16 | 检查消息是否包含规则中的关键字 17 | 18 | Args: 19 | context: 消息上下文 20 | 21 | Returns: 22 | bool: 若消息应继续处理则返回True,否则返回False 23 | """ 24 | rule = context.rule 25 | message_text = context.message_text 26 | event = context.event 27 | 28 | 29 | should_forward = await check_keywords(rule, message_text, event) 30 | 31 | return should_forward 32 | 33 | -------------------------------------------------------------------------------- /ai/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Optional, Dict, Any, List 3 | 4 | class BaseAIProvider(ABC): 5 | """AI提供者的基类""" 6 | 7 | @abstractmethod 8 | async def process_message(self, 9 | message: str, 10 | prompt: Optional[str] = None, 11 | images: Optional[List[Dict[str, str]]] = None, 12 | **kwargs) -> str: 13 | """ 14 | 处理消息的抽象方法 15 | 16 | Args: 17 | message: 要处理的消息内容 18 | prompt: 可选的提示词 19 | images: 可选的图片列表,每个图片是一个字典,包含data和mime_type 20 | **kwargs: 其他参数 21 | 22 | Returns: 23 | str: 处理后的消息 24 | """ 25 | pass 26 | 27 | @abstractmethod 28 | async def initialize(self, **kwargs) -> None: 29 | """初始化AI提供者""" 30 | pass -------------------------------------------------------------------------------- /rss/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from fastapi.staticfiles import StaticFiles 3 | from fastapi.templating import Jinja2Templates 4 | from rss.app.routes.auth import router as auth_router 5 | from rss.app.routes.rss import router as rss_router 6 | from rss.app.api.endpoints import feed 7 | import uvicorn 8 | import logging 9 | import sys 10 | import os 11 | from pathlib import Path 12 | from utils.log_config import setup_logging 13 | 14 | 15 | 16 | root_dir = Path(__file__).resolve().parent.parent 17 | sys.path.append(str(root_dir)) 18 | 19 | 20 | # 获取日志记录器 21 | logger = logging.getLogger(__name__) 22 | 23 | app = FastAPI(title="TG Forwarder RSS") 24 | 25 | # 注册路由 26 | app.include_router(auth_router) 27 | app.include_router(rss_router) 28 | app.include_router(feed.router) 29 | 30 | # 模板配置 31 | templates = Jinja2Templates(directory="rss/app/templates") 32 | 33 | def run_server(host: str = "0.0.0.0", port: int = 8000): 34 | """运行 RSS 服务器""" 35 | uvicorn.run(app, host=host, port=port) 36 | 37 | # 添加直接运行支持 38 | if __name__ == "__main__": 39 | # 只有在直接运行时才设置日志(而不是被导入时) 40 | setup_logging() 41 | run_server() -------------------------------------------------------------------------------- /rss/app/configs/title_template.json: -------------------------------------------------------------------------------- 1 | { 2 | "patterns": [ 3 | { 4 | "pattern": "^(?:#\\S+\\s*)+\\n\\s*\\n([^\\n]+)", 5 | "description": "第一行全是标签后的标题:#标签1 #标签2\\n\\n标题内容" 6 | }, 7 | { 8 | "pattern": "^#[^\\s]+\\s+\\*\\*([^\\*]+?)\\*\\*", 9 | "description": "带标签的粗体标题:#标签 **标题**" 10 | }, 11 | { 12 | "pattern": "^#[^\\s]+\\s+(.+?)(?=\\n|$)", 13 | "description": "带标签的标题:#标签 标题内容" 14 | }, 15 | { 16 | "pattern": "^\\[\\*\\*([^\\*]+?)\\*\\*\\]\\([^\\)]+?\\)", 17 | "description": "带链接的粗体标题:[**标题**](链接)" 18 | }, 19 | { 20 | "pattern": "^\\[([^\\]]+?)\\]\\([^\\)]+?\\)", 21 | "description": "带链接的标题:[标题](链接)" 22 | }, 23 | { 24 | "pattern": "^\\*\\*([^\\*]+?)\\*\\*", 25 | "description": "粗体标题:**标题**" 26 | }, 27 | { 28 | "pattern": "^【([^】]+?)】", 29 | "description": "中文方括号标题:【标题】" 30 | }, 31 | { 32 | "pattern": "^\\[([^\\]]+?)\\]", 33 | "description": "中括号标题:[标题]" 34 | }, 35 | { 36 | "pattern": "^(.+?)\\n", 37 | "description": "第一行作为标题" 38 | } 39 | ] 40 | } -------------------------------------------------------------------------------- /filters/base_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from abc import ABC, abstractmethod 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | class BaseFilter(ABC): 7 | """ 8 | 基础过滤器类,定义过滤器接口 9 | """ 10 | 11 | def __init__(self, name=None): 12 | """ 13 | 初始化过滤器 14 | 15 | Args: 16 | name: 过滤器名称,如果为None则使用类名 17 | """ 18 | self.name = name or self.__class__.__name__ 19 | 20 | async def process(self, context): 21 | """ 22 | 处理消息上下文 23 | 24 | Args: 25 | context: 包含消息处理所需所有信息的上下文对象 26 | 27 | Returns: 28 | bool: 表示是否应该继续处理消息 29 | """ 30 | logger.debug(f"开始执行过滤器: {self.name}") 31 | result = await self._process(context) 32 | logger.debug(f"过滤器 {self.name} 处理结果: {'通过' if result else '不通过'}") 33 | return result 34 | 35 | @abstractmethod 36 | async def _process(self, context): 37 | """ 38 | 具体的处理逻辑,子类需要实现 39 | 40 | Args: 41 | context: 包含消息处理所需所有信息的上下文对象 42 | 43 | Returns: 44 | bool: 表示是否应该继续处理消息 45 | """ 46 | pass -------------------------------------------------------------------------------- /utils/media.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | async def get_media_size(media): 7 | """获取媒体文件大小""" 8 | if not media: 9 | return 0 10 | 11 | try: 12 | # 对于所有类型的媒体,先尝试获取 document 13 | if hasattr(media, 'document') and media.document: 14 | return media.document.size 15 | 16 | # 对于照片,获取最大尺寸 17 | if hasattr(media, 'photo') and media.photo: 18 | # 获取最大尺寸的照片 19 | largest_photo = max(media.photo.sizes, key=lambda x: x.size if hasattr(x, 'size') else 0) 20 | return largest_photo.size if hasattr(largest_photo, 'size') else 0 21 | 22 | # 如果是其他类型,尝试直接获取 size 属性 23 | if hasattr(media, 'size'): 24 | return media.size 25 | 26 | except Exception as e: 27 | logger.error(f'获取媒体大小时出错: {str(e)}') 28 | 29 | return 0 30 | 31 | async def get_max_media_size(): 32 | """获取媒体文件大小上限""" 33 | max_media_size_str = os.getenv('MAX_MEDIA_SIZE') 34 | if not max_media_size_str: 35 | logger.error('未设置 MAX_MEDIA_SIZE 环境变量') 36 | raise ValueError('必须在 .env 文件中设置 MAX_MEDIA_SIZE') 37 | return float(max_media_size_str) * 1024 * 1024 # 转换为字节,支持小数 -------------------------------------------------------------------------------- /rss/app/core/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dotenv import load_dotenv 3 | from pathlib import Path 4 | import logging 5 | import sys 6 | from utils.constants import RSS_HOST, RSS_PORT,DEFAULT_TIMEZONE,PROJECT_NAME 7 | # 添加项目根目录到系统路径 8 | sys.path.append(str(Path(__file__).resolve().parent.parent.parent.parent)) 9 | 10 | # 导入统一的常量 11 | from utils.constants import RSS_MEDIA_DIR, RSS_MEDIA_PATH, RSS_DATA_DIR, get_rule_media_dir, get_rule_data_dir 12 | 13 | # 加载环境变量 14 | load_dotenv() 15 | 16 | class Settings: 17 | PROJECT_NAME: str = PROJECT_NAME 18 | HOST: str = RSS_HOST 19 | PORT: int = RSS_PORT 20 | TIMEZONE: str = DEFAULT_TIMEZONE 21 | # 数据存储路径 22 | BASE_DIR = Path(__file__).resolve().parent.parent.parent.parent 23 | DATA_PATH = RSS_DATA_DIR 24 | 25 | # 使用统一的媒体路径常量 26 | RSS_MEDIA_PATH = RSS_MEDIA_PATH 27 | MEDIA_PATH = RSS_MEDIA_DIR 28 | 29 | 30 | # 获取规则特定路径的方法 31 | @classmethod 32 | def get_rule_media_path(cls, rule_id): 33 | """获取指定规则的媒体目录""" 34 | return get_rule_media_dir(rule_id) 35 | 36 | @classmethod 37 | def get_rule_data_path(cls, rule_id): 38 | """获取指定规则的数据目录""" 39 | return get_rule_data_dir(rule_id) 40 | 41 | # 确保目录存在 42 | def __init__(self): 43 | os.makedirs(self.DATA_PATH, exist_ok=True) 44 | os.makedirs(self.MEDIA_PATH, exist_ok=True) 45 | logger = logging.getLogger(__name__) 46 | logger.info(f"RSS数据路径: {self.DATA_PATH}") 47 | logger.info(f"RSS媒体路径: {self.MEDIA_PATH}") 48 | 49 | settings = Settings() -------------------------------------------------------------------------------- /filters/filter_chain.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from filters.base_filter import BaseFilter 3 | from filters.context import MessageContext 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | class FilterChain: 8 | """ 9 | 过滤器链,用于组织和执行多个过滤器 10 | """ 11 | 12 | def __init__(self): 13 | """初始化过滤器链""" 14 | self.filters = [] 15 | 16 | def add_filter(self, filter_obj): 17 | """ 18 | 添加过滤器到链中 19 | 20 | Args: 21 | filter_obj: 要添加的过滤器对象,必须是BaseFilter的子类 22 | """ 23 | if not isinstance(filter_obj, BaseFilter): 24 | raise TypeError("过滤器必须是BaseFilter的子类") 25 | self.filters.append(filter_obj) 26 | return self 27 | 28 | async def process(self, client, event, chat_id, rule): 29 | """ 30 | 处理消息 31 | 32 | Args: 33 | client: 机器人客户端 34 | event: 消息事件 35 | chat_id: 聊天ID 36 | rule: 转发规则 37 | 38 | Returns: 39 | bool: 表示处理是否成功 40 | """ 41 | # 创建消息上下文 42 | context = MessageContext(client, event, chat_id, rule) 43 | 44 | logger.info(f"开始过滤器链处理,共 {len(self.filters)} 个过滤器") 45 | 46 | # 依次执行每个过滤器 47 | for filter_obj in self.filters: 48 | try: 49 | should_continue = await filter_obj.process(context) 50 | if not should_continue: 51 | logger.info(f"过滤器 {filter_obj.name} 中断了处理链") 52 | return False 53 | except Exception as e: 54 | logger.error(f"过滤器 {filter_obj.name} 处理出错: {str(e)}") 55 | context.errors.append(f"过滤器 {filter_obj.name} 错误: {str(e)}") 56 | return False 57 | 58 | logger.info("过滤器链处理完成") 59 | return True -------------------------------------------------------------------------------- /ai/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseAIProvider 2 | from .openai_provider import OpenAIProvider 3 | from .gemini_provider import GeminiProvider 4 | from .deepseek_provider import DeepSeekProvider 5 | from .qwen_provider import QwenProvider 6 | from .grok_provider import GrokProvider 7 | from .claude_provider import ClaudeProvider 8 | import os 9 | import logging 10 | from utils.settings import load_ai_models 11 | from utils.constants import DEFAULT_AI_MODEL 12 | 13 | # 获取日志记录器 14 | logger = logging.getLogger(__name__) 15 | 16 | async def get_ai_provider(model=None): 17 | """获取AI提供者实例""" 18 | if not model: 19 | model = DEFAULT_AI_MODEL 20 | 21 | # 加载提供商配置(使用dict格式) 22 | providers_config = load_ai_models(type="dict") 23 | 24 | # 根据模型名称选择对应的提供者 25 | provider = None 26 | 27 | # 遍历配置中的每个提供商 28 | for provider_name, models_list in providers_config.items(): 29 | # 检查完全匹配 30 | if model in models_list: 31 | if provider_name == "openai": 32 | provider = OpenAIProvider() 33 | elif provider_name == "gemini": 34 | provider = GeminiProvider() 35 | elif provider_name == "deepseek": 36 | provider = DeepSeekProvider() 37 | elif provider_name == "qwen": 38 | provider = QwenProvider() 39 | elif provider_name == "grok": 40 | provider = GrokProvider() 41 | elif provider_name == "claude": 42 | provider = ClaudeProvider() 43 | break 44 | 45 | if not provider: 46 | raise ValueError(f"不支持的模型: {model}") 47 | 48 | return provider 49 | 50 | 51 | __all__ = [ 52 | 'BaseAIProvider', 53 | 'OpenAIProvider', 54 | 'GeminiProvider', 55 | 'DeepSeekProvider', 56 | 'QwenProvider', 57 | 'GrokProvider', 58 | 'ClaudeProvider', 59 | 'get_ai_provider' 60 | ] -------------------------------------------------------------------------------- /filters/context.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | class MessageContext: 4 | """ 5 | 消息上下文类,包含处理消息所需的所有信息 6 | """ 7 | 8 | def __init__(self, client, event, chat_id, rule): 9 | """ 10 | 初始化消息上下文 11 | 12 | Args: 13 | client: 机器人客户端 14 | event: 消息事件 15 | chat_id: 聊天ID 16 | rule: 转发规则 17 | """ 18 | self.client = client 19 | self.event = event 20 | self.chat_id = chat_id 21 | self.rule = rule 22 | 23 | # 初始消息文本,保持不变用于引用 24 | self.original_message_text = event.message.text or '' 25 | 26 | # 当前处理的消息文本 27 | self.message_text = event.message.text or '' 28 | 29 | # 用于检查的消息文本(可能包含发送者信息等) 30 | self.check_message_text = event.message.text or '' 31 | 32 | # 记录处理过程中的媒体文件 33 | self.media_files = [] 34 | 35 | # 记录发送者信息 36 | self.sender_info = '' 37 | 38 | # 记录时间信息 39 | self.time_info = '' 40 | 41 | # 原始链接 42 | self.original_link = '' 43 | 44 | # 按钮 45 | self.buttons = event.message.buttons if hasattr(event.message, 'buttons') else None 46 | 47 | # 是否继续处理 48 | self.should_forward = True 49 | 50 | # 用于记录媒体组消息 51 | self.is_media_group = event.message.grouped_id is not None 52 | self.media_group_id = event.message.grouped_id 53 | self.media_group_messages = [] 54 | 55 | # 用于跟踪被跳过的超大媒体 56 | self.skipped_media = [] 57 | 58 | # 记录任何可能的错误 59 | self.errors = [] 60 | 61 | # 记录已转发的消息 62 | self.forwarded_messages = [] 63 | 64 | # 评论区链接 65 | self.comment_link = None 66 | 67 | def clone(self): 68 | """创建上下文的副本""" 69 | return copy.deepcopy(self) -------------------------------------------------------------------------------- /rss/app/models/entry.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | from typing import List, Dict, Any, Optional 3 | from datetime import datetime 4 | 5 | class Media(BaseModel): 6 | """媒体文件信息""" 7 | url: str 8 | type: str 9 | size: int = 0 10 | filename: str 11 | original_name: Optional[str] = None 12 | 13 | def get(self, key: str, default: Any = None) -> Any: 14 | """获取属性值,如果不存在返回默认值""" 15 | return getattr(self, key, default) 16 | 17 | class Entry(BaseModel): 18 | """RSS条目数据模型""" 19 | id: Optional[str] = None 20 | rule_id: int 21 | message_id: str 22 | title: str 23 | content: str 24 | published: str # ISO格式的日期时间字符串 25 | author: str = "" 26 | link: str = "" 27 | media: List[Media] = [] 28 | created_at: Optional[str] = None # 添加到系统的时间 29 | original_link: Optional[str] = None 30 | sender_info: Optional[str] = None 31 | 32 | 33 | def __init__(self, **data): 34 | # 处理媒体数据,确保它是Media对象列表 35 | if "media" in data and isinstance(data["media"], list): 36 | media_list = [] 37 | for item in data["media"]: 38 | try: 39 | if isinstance(item, dict): 40 | media_list.append(Media(**item)) 41 | elif not isinstance(item, Media): 42 | # 尝试转换为字典 43 | if hasattr(item, '__dict__'): 44 | media_list.append(Media(**item.__dict__)) 45 | else: 46 | media_list.append(item) 47 | except Exception as e: 48 | # 忽略无法转换的媒体项 49 | pass 50 | data["media"] = media_list 51 | 52 | # 确保必要字段有默认值 53 | if "message_id" not in data and "id" in data: 54 | data["message_id"] = data["id"] 55 | 56 | # 调用父类初始化 57 | super().__init__(**data) -------------------------------------------------------------------------------- /handlers/list_handlers.py: -------------------------------------------------------------------------------- 1 | from handlers.button.button_helpers import * 2 | from utils.auto_delete import reply_and_delete 3 | 4 | async def show_list(event, command, items, formatter, title, page=1): 5 | """显示分页列表""" 6 | 7 | # KEYWORDS_PER_PAGE 8 | PAGE_SIZE = KEYWORDS_PER_PAGE 9 | total_items = len(items) 10 | total_pages = (total_items + PAGE_SIZE - 1) // PAGE_SIZE 11 | 12 | if not items: 13 | try: 14 | return await event.edit(f'没有找到任何{title}') 15 | except: 16 | return await reply_and_delete(event,f'没有找到任何{title}') 17 | 18 | # 获取当前页的项目 19 | start = (page - 1) * PAGE_SIZE 20 | end = min(start + PAGE_SIZE, total_items) 21 | current_items = items[start:end] 22 | 23 | # 格式化列表项 24 | item_list = [] 25 | for i, item in enumerate(current_items): 26 | formatted_item = formatter(i + start + 1, item) 27 | # 如果是关键字列表,给关键字添加反引号 28 | if command == 'keyword': 29 | # 分割序号和关键字内容 30 | parts = formatted_item.split('. ', 1) 31 | if len(parts) == 2: 32 | number = parts[0] 33 | content = parts[1] 34 | # 如果是正则表达式,在关键字部分添加反引号 35 | if ' (正则)' in content: 36 | keyword, regex_mark = content.split(' (正则)') 37 | formatted_item = f'{number}. `{keyword}` (正则)' 38 | else: 39 | formatted_item = f'{number}. `{content}`' 40 | item_list.append(formatted_item) 41 | 42 | # 创建分页按钮 43 | buttons = await create_list_buttons(total_pages, page, command) 44 | 45 | # 构建消息文本 46 | text = f'{title}\n{chr(10).join(item_list)}' 47 | if len(text) > 4096: # Telegram消息长度限制 48 | text = text[:4093] + '...' 49 | 50 | try: 51 | return await event.edit(text, buttons=buttons, parse_mode='markdown') 52 | except: 53 | return await reply_and_delete(event,text, buttons=buttons, parse_mode='markdown') 54 | 55 | -------------------------------------------------------------------------------- /filters/delete_original_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from filters.base_filter import BaseFilter 3 | from utils.common import get_main_module 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | class DeleteOriginalFilter(BaseFilter): 8 | """ 9 | 删除原始消息过滤器,处理转发后是否要删除原始消息 10 | """ 11 | 12 | async def _process(self, context): 13 | """ 14 | 处理是否删除原始消息 15 | 16 | Args: 17 | context: 消息上下文 18 | 19 | Returns: 20 | bool: 是否继续处理 21 | """ 22 | rule = context.rule 23 | event = context.event 24 | 25 | # 如果不需要删除原始消息,直接返回 26 | if not rule.is_delete_original: 27 | return True 28 | 29 | try: 30 | # 获取 main.py 中的用户客户端 31 | main = await get_main_module() 32 | user_client = main.user_client # 获取用户客户端 33 | 34 | # 媒体组消息 35 | if event.message.grouped_id: 36 | # 使用用户客户端获取并删除媒体组消息 37 | async for message in user_client.iter_messages( 38 | event.chat_id, 39 | min_id=event.message.id - 10, 40 | max_id=event.message.id + 10, 41 | reverse=True 42 | ): 43 | if message.grouped_id == event.message.grouped_id: 44 | await message.delete() 45 | logger.info(f'已删除媒体组消息 ID: {message.id}') 46 | else: 47 | # 单条消息的删除逻辑 48 | message = await user_client.get_messages(event.chat_id, ids=event.message.id) 49 | await message.delete() 50 | logger.info(f'已删除原始消息 ID: {event.message.id}') 51 | 52 | return True 53 | except Exception as e: 54 | logger.error(f'删除原始消息时出错: {str(e)}') 55 | context.errors.append(f"删除原始消息错误: {str(e)}") 56 | return True # 即使删除失败,也继续处理 -------------------------------------------------------------------------------- /managers/state_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Dict, Tuple, Optional, Union 3 | from telethon.tl.custom import Message 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | class StateManager: 8 | def __init__(self): 9 | self._states: Dict[Tuple[int, int], Tuple[str, Optional[Message], Optional[str]]] = {} 10 | logger.info("StateManager 初始化") 11 | 12 | def set_state(self, user_id: int, chat_id: int, state: str, message: Optional[Message] = None, state_type: Optional[str] = None) -> None: 13 | """设置用户状态""" 14 | key = (user_id, chat_id) 15 | self._states[key] = (state, message, state_type) 16 | logger.info(f"设置状态 - key: {key}, state: {state}, type: {state_type}") 17 | logger.debug(f"当前所有状态: {self._states}") # 改为 debug 级别 18 | 19 | def get_state(self, user_id: int, chat_id: int) -> Union[Tuple[str, Optional[Message], Optional[str]], Tuple[None, None, None]]: 20 | """获取用户状态""" 21 | key = (user_id, chat_id) 22 | state_data = self._states.get(key) 23 | if state_data: # 只在状态存在时记录日志 24 | if len(state_data) == 3: # 兼容新格式 25 | state, message, state_type = state_data 26 | logger.info(f"获取状态 - key: {key}, state: {state}, type: {state_type}") 27 | else: # 兼容旧格式 28 | state, message = state_data 29 | state_type = None 30 | logger.info(f"获取状态 - key: {key}, state: {state}, type: None (旧格式)") 31 | return state, message, state_type 32 | return None, None, None 33 | 34 | def clear_state(self, user_id: int, chat_id: int) -> None: 35 | """清除用户状态""" 36 | key = (user_id, chat_id) 37 | if key in self._states: 38 | del self._states[key] 39 | logger.info(f"清除状态 - key: {key}") 40 | logger.debug(f"当前所有状态: {self._states}") # 改为 debug 级别 41 | 42 | def check_state(self) -> bool: 43 | """检查是否存在状态""" 44 | return bool(self._states) 45 | 46 | # 创建全局实例 47 | state_manager = StateManager() 48 | logger.info("StateManager 全局实例已创建") -------------------------------------------------------------------------------- /filters/init_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import pytz 4 | import asyncio 5 | from utils.constants import TEMP_DIR 6 | from utils.media import get_max_media_size 7 | 8 | from filters.base_filter import BaseFilter 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class InitFilter(BaseFilter): 13 | """ 14 | 初始化过滤器,为context添加基本信息 15 | """ 16 | 17 | async def _process(self, context): 18 | """ 19 | 添加原始链接和发送者信息 20 | 21 | Args: 22 | context: 消息上下文 23 | 24 | Returns: 25 | bool: 是否继续处理 26 | """ 27 | rule = context.rule 28 | event = context.event 29 | 30 | # logger.info(f"InitFilter处理消息前,context: {context.__dict__}") 31 | try: 32 | #处理媒体组消息 33 | if event.message.grouped_id: 34 | # 等待更长时间让所有媒体消息到达 35 | # await asyncio.sleep(1) 36 | 37 | # 收集媒体组的所有消息 38 | try: 39 | async for message in event.client.iter_messages( 40 | event.chat_id, 41 | limit=20, 42 | min_id=event.message.id - 10, 43 | max_id=event.message.id + 10 44 | ): 45 | if message.grouped_id == event.message.grouped_id: 46 | if message.text: 47 | # 保存第一条消息的文本和按钮 48 | context.message_text = message.text or '' 49 | context.original_message_text = message.text or '' 50 | context.check_message_text = message.text or '' 51 | context.buttons = message.buttons if hasattr(message, 'buttons') else None 52 | logger.info(f'获取到媒体组文本并添加到context: {message.text}') 53 | 54 | except Exception as e: 55 | logger.error(f'收集媒体组消息时出错: {str(e)}') 56 | context.errors.append(f"收集媒体组消息错误: {str(e)}") 57 | 58 | finally: 59 | # logger.info(f"InitFilter处理消息后,context: {context.__dict__}") 60 | return True 61 | -------------------------------------------------------------------------------- /filters/reply_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import asyncio 3 | from telethon import Button 4 | from filters.base_filter import BaseFilter 5 | from utils.common import get_main_module 6 | import traceback 7 | logger = logging.getLogger(__name__) 8 | 9 | class ReplyFilter(BaseFilter): 10 | """ 11 | 回复过滤器,用于处理媒体组消息的评论区按钮 12 | 由于媒体组消息无法直接添加按钮,此过滤器会使用bot回复已转发的消息,并添加评论区按钮 13 | """ 14 | 15 | async def _process(self, context): 16 | """ 17 | 处理媒体组消息的评论区按钮 18 | 19 | Args: 20 | context: 消息上下文 21 | 22 | Returns: 23 | bool: 是否继续处理 24 | """ 25 | try: 26 | # 如果规则不存在或未启用评论按钮功能,直接跳过 27 | if not context.rule or not context.rule.enable_comment_button: 28 | return True 29 | 30 | # 只处理媒体组消息 31 | if not context.is_media_group: 32 | return True 33 | 34 | # 检查是否有评论区链接和已转发的消息 35 | if not context.comment_link or not context.forwarded_messages: 36 | logger.info("没有评论区链接或已转发消息,无法添加评论区按钮回复") 37 | return True 38 | 39 | # 使用bot客户端(context.client) 40 | client = context.client 41 | 42 | # 获取目标聊天信息 43 | rule = context.rule 44 | target_chat = rule.target_chat 45 | target_chat_id = int(target_chat.telegram_chat_id) 46 | 47 | # 获取已转发的第一条消息ID 48 | first_forwarded_msg = context.forwarded_messages[0] 49 | 50 | # 创建评论区按钮 51 | comment_button = Button.url("💬 查看评论区", context.comment_link) 52 | buttons = [[comment_button]] 53 | 54 | # 回复已转发的媒体组消息 55 | logger.info(f"正在使用Bot给已转发的媒体组消息 {first_forwarded_msg.id} 发送评论区按钮回复") 56 | 57 | # 发送回复消息,附带评论区按钮 58 | await client.send_message( 59 | entity=target_chat_id, 60 | message="💬 评论区", 61 | buttons=buttons, 62 | reply_to=first_forwarded_msg.id, 63 | ) 64 | logger.info("成功发送评论区按钮回复") 65 | 66 | return True 67 | 68 | except Exception as e: 69 | logger.error(f"ReplyFilter处理消息时出错: {str(e)}") 70 | 71 | logger.error(traceback.format_exc()) 72 | return True -------------------------------------------------------------------------------- /ai/openai_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | from openai import AsyncOpenAI 3 | from .base import BaseAIProvider 4 | import os 5 | import logging 6 | from .openai_base_provider import OpenAIBaseProvider 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class OpenAIProvider(OpenAIBaseProvider): 11 | def __init__(self): 12 | super().__init__( 13 | env_prefix='OPENAI', 14 | default_model='gpt-4o-mini', 15 | default_api_base='https://api.openai.com/v1' 16 | ) 17 | 18 | async def process_message(self, 19 | message: str, 20 | prompt: Optional[str] = None, 21 | images: Optional[List[Dict[str, str]]] = None, 22 | **kwargs) -> str: 23 | """处理消息""" 24 | try: 25 | if not self.client: 26 | await self.initialize(**kwargs) 27 | 28 | messages = [] 29 | if prompt: 30 | messages.append({"role": "system", "content": prompt}) 31 | 32 | # 如果有图片,需要添加到消息中 33 | if images and len(images) > 0: 34 | # 创建包含文本和图片的内容数组 35 | content = [] 36 | 37 | # 添加文本 38 | content.append({ 39 | "type": "text", 40 | "text": message 41 | }) 42 | 43 | # 添加每张图片 44 | for img in images: 45 | content.append({ 46 | "type": "image_url", 47 | "image_url": { 48 | "url": f"data:{img['mime_type']};base64,{img['data']}" 49 | } 50 | }) 51 | 52 | messages.append({"role": "user", "content": content}) 53 | else: 54 | # 没有图片,只添加文本 55 | messages.append({"role": "user", "content": message}) 56 | 57 | response = await self.client.chat.completions.create( 58 | model=self.model, 59 | messages=messages 60 | ) 61 | 62 | return response.choices[0].message.content 63 | 64 | except Exception as e: 65 | logger.error(f"OpenAI处理消息时出错: {str(e)}", exc_info=True) 66 | return f"AI处理失败: {str(e)}" -------------------------------------------------------------------------------- /filters/process.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from filters.filter_chain import FilterChain 3 | from filters.keyword_filter import KeywordFilter 4 | from filters.replace_filter import ReplaceFilter 5 | from filters.ai_filter import AIFilter 6 | from filters.info_filter import InfoFilter 7 | from filters.media_filter import MediaFilter 8 | from filters.sender_filter import SenderFilter 9 | from filters.delete_original_filter import DeleteOriginalFilter 10 | from filters.delay_filter import DelayFilter 11 | from filters.edit_filter import EditFilter 12 | from filters.comment_button_filter import CommentButtonFilter 13 | from filters.init_filter import InitFilter 14 | from filters.reply_filter import ReplyFilter 15 | from filters.rss_filter import RSSFilter 16 | from filters.push_filter import PushFilter 17 | logger = logging.getLogger(__name__) 18 | 19 | async def process_forward_rule(client, event, chat_id, rule): 20 | """ 21 | 处理转发规则 22 | 23 | Args: 24 | client: 机器人客户端 25 | event: 消息事件 26 | chat_id: 聊天ID 27 | rule: 转发规则 28 | 29 | Returns: 30 | bool: 处理是否成功 31 | """ 32 | logger.info(f'使用过滤器链处理规则 ID: {rule.id}') 33 | 34 | # 创建过滤器链 35 | filter_chain = FilterChain() 36 | 37 | # 添加初始化过滤器 38 | filter_chain.add_filter(InitFilter()) 39 | 40 | # 延迟处理过滤器(如果启用了延迟处理) 41 | filter_chain.add_filter(DelayFilter()) 42 | 43 | # 添加关键字过滤器(如果消息不匹配关键字,会中断处理链) 44 | filter_chain.add_filter(KeywordFilter()) 45 | 46 | # 添加替换过滤器 47 | filter_chain.add_filter(ReplaceFilter()) 48 | 49 | # 添加媒体过滤器(处理媒体内容) 50 | filter_chain.add_filter(MediaFilter()) 51 | 52 | # 添加AI处理过滤器(如果启用了AI处理后的关键字检查,可能会中断处理链) 53 | filter_chain.add_filter(AIFilter()) 54 | 55 | # 添加信息过滤器(处理原始链接和发送者信息) 56 | filter_chain.add_filter(InfoFilter()) 57 | 58 | # 添加评论区按钮过滤器 59 | filter_chain.add_filter(CommentButtonFilter()) 60 | 61 | # 添加RSS过滤器 62 | filter_chain.add_filter(RSSFilter()) 63 | 64 | # 添加编辑过滤器(编辑原始消息) 65 | filter_chain.add_filter(EditFilter()) 66 | 67 | # 添加发送过滤器(发送消息) 68 | filter_chain.add_filter(SenderFilter()) 69 | 70 | # 添加回复过滤器(处理媒体组消息的评论区按钮) 71 | filter_chain.add_filter(ReplyFilter()) 72 | 73 | # 添加推送过滤器 74 | filter_chain.add_filter(PushFilter()) 75 | 76 | # 添加删除原始消息过滤器(最后执行) 77 | filter_chain.add_filter(DeleteOriginalFilter()) 78 | 79 | # 执行过滤器链 80 | result = await filter_chain.process(client, event, chat_id, rule) 81 | 82 | return result 83 | -------------------------------------------------------------------------------- /filters/replace_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | from filters.base_filter import BaseFilter 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | class ReplaceFilter(BaseFilter): 8 | """ 9 | 替换过滤器,根据规则替换消息文本 10 | """ 11 | 12 | async def _process(self, context): 13 | """ 14 | 处理消息文本替换 15 | 16 | Args: 17 | context: 消息上下文 18 | 19 | Returns: 20 | bool: 是否继续处理 21 | """ 22 | rule = context.rule 23 | message_text = context.message_text 24 | 25 | #打印context的所有属性 26 | # logger.info(f"ReplaceFilter处理消息前,context: {context.__dict__}") 27 | # 如果不需要替换,直接返回 28 | if not rule.is_replace or not message_text: 29 | return True 30 | 31 | try: 32 | # 应用所有替换规则 33 | for replace_rule in rule.replace_rules: 34 | if replace_rule.pattern == '.*': 35 | # 全文替换 36 | logger.info(f'执行全文替换:\n原文: "{message_text}"\n替换为: "{replace_rule.content or ""}"') 37 | message_text = replace_rule.content or '' 38 | break # 如果是全文替换,就不继续处理其他规则 39 | else: 40 | try: 41 | # 正则替换 42 | old_text = message_text 43 | matches = re.finditer(replace_rule.pattern, message_text) 44 | message_text = re.sub( 45 | replace_rule.pattern, 46 | replace_rule.content or '', 47 | message_text 48 | ) 49 | if old_text != message_text: 50 | matched_texts = [m.group(0) for m in matches] 51 | logger.info(f'执行部分替换:\n原文: "{old_text}"\n匹配内容: {matched_texts}\n替换规则: "{replace_rule.pattern}" -> "{replace_rule.content}"\n替换后: "{message_text}"') 52 | except re.error as e: 53 | logger.error(f'替换规则格式错误: {replace_rule.pattern}, 错误: {str(e)}') 54 | 55 | # 更新上下文中的消息文本 56 | context.message_text = message_text 57 | context.check_message_text = message_text 58 | 59 | return True 60 | except Exception as e: 61 | logger.error(f'应用替换规则时出错: {str(e)}') 62 | context.errors.append(f"替换规则错误: {str(e)}") 63 | return True # 即使替换出错,仍然继续处理 64 | finally: 65 | # logger.info(f"ReplaceFilter处理消息后,context: {context.__dict__}") 66 | pass -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | ######### 必填项 ######### 2 | # Telegram API 配置 (从 https://my.telegram.org/apps 获取) 3 | API_ID= 4 | API_HASH= 5 | 6 | # 用户账号登录用的手机号 (格式如: +8613812345678) 7 | PHONE_NUMBER= 8 | 9 | # Bot Token 10 | BOT_TOKEN= 11 | 12 | # 用户ID (从 @userinfobot 获取) 13 | USER_ID= 14 | 15 | 16 | 17 | ################ 以下均为可选项 ################## 18 | 19 | # 管理员列表(此处填user_id,留空默认上方的USER_ID,多个用户用逗号分隔) 20 | ADMINS= 21 | 22 | # bot消息删除时间 (秒),0表示立即删除, -1表示不删除 23 | BOT_MESSAGE_DELETE_TIMEOUT=300 24 | 25 | # 是否自动删除用户发送的指令消息 (true/false) 26 | USER_MESSAGE_DELETE_ENABLE=false 27 | 28 | # 默认最大媒体文件大小限制(单位:MB) 29 | DEFAULT_MAX_MEDIA_SIZE=15 30 | 31 | # 默认时区 32 | DEFAULT_TIMEZONE=Asia/Shanghai 33 | 34 | # 自动更新数据库中聊天窗口名字时间 (24小时制) 35 | CHAT_UPDATE_TIME=03:00 36 | 37 | # 数据库配置 38 | DATABASE_URL=sqlite:///./db/forward.db 39 | 40 | ######### UI 布局配置 ######### 41 | AI_MODELS_PER_PAGE=10 42 | KEYWORDS_PER_PAGE=10 43 | PUSH_CHANNEL_PER_PAGE=10 44 | 45 | # 总结列表(行) 46 | SUMMARY_TIME_ROWS=10 47 | # 总结列表(列) 48 | SUMMARY_TIME_COLS=6 49 | 50 | # 延迟时间列表(行) 51 | DELAY_TIME_ROWS=10 52 | # 延迟时间列表(列) 53 | DELAY_TIME_COLS=6 54 | 55 | # 媒体大小列表(行) 56 | MEDIA_SIZE_ROWS=10 57 | # 媒体大小列表(列) 58 | MEDIA_SIZE_COLS=6 59 | 60 | # 媒体扩展名列表(行) 61 | MEDIA_EXTENSIONS_ROWS=10 62 | # 媒体扩展名列表(列) 63 | MEDIA_EXTENSIONS_COLS=6 64 | 65 | # 每页显示的规则数量 66 | RULES_PER_PAGE=20 67 | 68 | ######### AI设置 ######### 69 | 70 | # 默认AI模型 71 | DEFAULT_AI_MODEL=gemini-2.0-flash 72 | 73 | # OpenAi API Key 74 | OPENAI_API_KEY=your_openai_api_key 75 | # 留空使用官方接口 https://api.openai.com/v1 76 | OPENAI_API_BASE= 77 | 78 | # Claude API Key 79 | CLAUDE_API_KEY=your_claude_api_key 80 | # 留空使用官方接口 81 | CLAUDE_API_BASE= 82 | 83 | # Gemini API Key 84 | # 默认使用官方接口 85 | GEMINI_API_KEY=your_gemini_api_key 86 | # 兼容OpenAI接口标准的第三方API Base,如官方的:https://generativelanguage.googleapis.com/v1beta 87 | GEMINI_API_BASE= 88 | 89 | # DeepSeek API Key 90 | DEEPSEEK_API_KEY=your_deepseek_api_key 91 | # 留空使用官方接口 https://api.deepseek.com/v1 92 | DEEPSEEK_API_BASE= 93 | 94 | # Qwen API Key 95 | QWEN_API_KEY=your_qwen_api_key 96 | # 留空使用官方接口 https://dashscope.aliyuncs.com/compatible-mode/v1 97 | QWEN_API_BASE= 98 | 99 | # Grok API Key 100 | GROK_API_KEY=your_grok_api_key 101 | # 留空使用官方接口 https://api.x.ai/v1 102 | GROK_API_BASE= 103 | 104 | # 默认AI提示词 105 | DEFAULT_AI_PROMPT=请尊重原意,保持原有格式不变,用简体中文重写下面的内容: 106 | 107 | # 默认AI总结提示词 108 | DEFAULT_SUMMARY_PROMPT=请总结以下频道/群组24小时内的消息。 109 | # 默认总结时间 (24小时制) 110 | DEFAULT_SUMMARY_TIME=07:00 111 | 112 | 113 | # AI总结每次爬取消息数量 114 | SUMMARY_BATCH_SIZE=20 115 | # AI总结每次爬取消息间隔时间(秒) 116 | SUMMARY_BATCH_DELAY=2 117 | 118 | 119 | ######### RSS配置 ######### 120 | # 是否启用RSS功能 (true/false) 121 | RSS_ENABLED=false 122 | 123 | # RSS基础访问URL 124 | RSS_BASE_URL= 125 | 126 | # RSS媒体文件基础URL 127 | RSS_MEDIA_BASE_URL= 128 | 129 | 130 | ######### 扩展内容 ######### 131 | 132 | # 是否开启与通用论坛屏蔽插件服务端的同步服务 (true/false) 133 | UFB_ENABLED=false 134 | # 服务端地址 135 | UFB_SERVER_URL= 136 | # 用户API_KEY 137 | UFB_TOKEN= 138 | 139 | 140 | 141 | 142 | -------------------------------------------------------------------------------- /handlers/user_handler.py: -------------------------------------------------------------------------------- 1 | from models.models import ForwardMode 2 | import re 3 | import logging 4 | import asyncio 5 | from utils.common import check_keywords, get_sender_info 6 | 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | async def process_forward_rule(client, event, chat_id, rule): 11 | """处理转发规则(用户模式)""" 12 | 13 | 14 | if not rule.enable_rule: 15 | logger.info(f'规则 ID: {rule.id} 已禁用,跳过处理') 16 | return 17 | 18 | message_text = event.message.text or '' 19 | check_message_text = message_text 20 | # 添加日志 21 | logger.info(f'处理规则 ID: {rule.id}') 22 | logger.info(f'消息内容: {message_text}') 23 | logger.info(f'规则模式: {rule.forward_mode.value}') 24 | 25 | 26 | if rule.is_filter_user_info: 27 | sender_info = await get_sender_info(event, rule.id) # 调用新的函数获取 sender_info 28 | if sender_info: 29 | check_message_text = f"{sender_info}:\n{message_text}" 30 | logger.info(f'附带用户信息后的消息: {message_text}') 31 | else: 32 | logger.warning(f"规则 ID: {rule.id} - 无法获取发送者信息") 33 | 34 | should_forward = await check_keywords(rule,check_message_text) 35 | 36 | logger.info(f'最终决定: {"转发" if should_forward else "不转发"}') 37 | 38 | if should_forward: 39 | target_chat = rule.target_chat 40 | target_chat_id = int(target_chat.telegram_chat_id) 41 | 42 | try: 43 | 44 | 45 | if event.message.grouped_id: 46 | # 等待一段时间以确保收到所有媒体组消息 47 | await asyncio.sleep(1) 48 | 49 | # 收集媒体组的所有消息 50 | messages = [] 51 | async for message in client.iter_messages( 52 | event.chat_id, 53 | limit=20, # 限制搜索范围 54 | min_id=event.message.id - 10, 55 | max_id=event.message.id + 10 56 | ): 57 | if message.grouped_id == event.message.grouped_id: 58 | messages.append(message.id) 59 | logger.info(f'找到媒体组消息: ID={message.id}') 60 | 61 | # 按照ID排序,确保转发顺序正确 62 | messages.sort() 63 | 64 | # 一次性转发所有消息 65 | await client.forward_messages( 66 | target_chat_id, 67 | messages, 68 | event.chat_id 69 | ) 70 | logger.info(f'[用户] 已转发 {len(messages)} 条媒体组消息到: {target_chat.name} ({target_chat_id})') 71 | 72 | else: 73 | # 处理单条消息 74 | await client.forward_messages( 75 | target_chat_id, 76 | event.message.id, 77 | event.chat_id 78 | ) 79 | logger.info(f'[用户] 消息已转发到: {target_chat.name} ({target_chat_id})') 80 | 81 | 82 | except Exception as e: 83 | logger.error(f'转发消息时出错: {str(e)}') 84 | logger.exception(e) -------------------------------------------------------------------------------- /ai/claude_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | import anthropic 3 | from .base import BaseAIProvider 4 | import os 5 | import logging 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class ClaudeProvider(BaseAIProvider): 10 | def __init__(self): 11 | self.client = None 12 | self.model = None 13 | self.default_model = 'claude-3-5-sonnet-latest' 14 | 15 | async def initialize(self, **kwargs): 16 | """初始化Claude客户端""" 17 | api_key = os.getenv('CLAUDE_API_KEY') 18 | if not api_key: 19 | raise ValueError("未设置CLAUDE_API_KEY环境变量") 20 | 21 | # 检查是否配置了自定义API基础URL 22 | api_base = os.getenv('CLAUDE_API_BASE', '').strip() 23 | if api_base: 24 | logger.info(f"使用自定义Claude API基础URL: {api_base}") 25 | self.client = anthropic.Anthropic( 26 | api_key=api_key, 27 | base_url=api_base 28 | ) 29 | else: 30 | # 使用默认URL 31 | self.client = anthropic.Anthropic(api_key=api_key) 32 | 33 | self.model = kwargs.get('model', self.default_model) 34 | 35 | async def process_message(self, 36 | message: str, 37 | prompt: Optional[str] = None, 38 | images: Optional[List[Dict[str, str]]] = None, 39 | **kwargs) -> str: 40 | """处理消息""" 41 | try: 42 | if not self.client: 43 | await self.initialize(**kwargs) 44 | 45 | # 构建消息列表 46 | messages = [] 47 | if prompt: 48 | messages.append({"role": "system", "content": prompt}) 49 | 50 | # 如果有图片,需要添加到消息中 51 | if images and len(images) > 0: 52 | # 构建包含图片的内容列表 53 | content = [] 54 | 55 | # 添加文本 56 | content.append({ 57 | "type": "text", 58 | "text": message 59 | }) 60 | 61 | # 添加每张图片 62 | for img in images: 63 | content.append({ 64 | "type": "image", 65 | "source": { 66 | "type": "base64", 67 | "media_type": img["mime_type"], 68 | "data": img["data"] 69 | } 70 | }) 71 | logger.info(f"已添加一张类型为 {img['mime_type']} 的图片,大小约 {len(img['data']) // 1000} KB") 72 | 73 | # 添加用户消息 74 | messages.append({"role": "user", "content": content}) 75 | else: 76 | # 没有图片,只添加文本 77 | messages.append({"role": "user", "content": message}) 78 | 79 | # 使用流式输出 - 按照官方文档正确实现 80 | with self.client.messages.stream( 81 | model=self.model, 82 | max_tokens=4096, 83 | messages=messages 84 | ) as stream: 85 | # 使用专用的text_stream迭代器直接获取文本 86 | full_response = "" 87 | for text in stream.text_stream: 88 | full_response += text 89 | 90 | return full_response 91 | 92 | except Exception as e: 93 | logger.error(f"Claude API 调用失败: {str(e)}") 94 | return f"AI处理失败: {str(e)}" -------------------------------------------------------------------------------- /utils/constants.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from dotenv import load_dotenv 4 | 5 | # 加载环境变量 6 | load_dotenv() 7 | 8 | # 目录配置 9 | BASE_DIR = Path(__file__).parent.parent 10 | TEMP_DIR = os.path.join(BASE_DIR, 'temp') 11 | 12 | RSS_HOST = os.getenv('RSS_HOST', '127.0.0.1') 13 | RSS_PORT = os.getenv('RSS_PORT', '8000') 14 | 15 | # RSS基础URL,如果未设置,则使用请求的URL 16 | RSS_BASE_URL = os.environ.get('RSS_BASE_URL', None) 17 | 18 | # RSS媒体文件的基础URL,用于生成媒体链接,如果未设置,则使用请求的URL 19 | RSS_MEDIA_BASE_URL = os.getenv('RSS_MEDIA_BASE_URL', '') 20 | 21 | RSS_ENABLED = os.getenv('RSS_ENABLED', 'false') 22 | 23 | RULES_PER_PAGE = int(os.getenv('RULES_PER_PAGE', 20)) 24 | 25 | PUSH_CHANNEL_PER_PAGE = int(os.getenv('PUSH_CHANNEL_PER_PAGE', 10)) 26 | 27 | DEFAULT_TIMEZONE = os.getenv('DEFAULT_TIMEZONE', 'Asia/Shanghai') 28 | PROJECT_NAME = os.getenv('PROJECT_NAME', 'TG Forwarder RSS') 29 | # RSS相关路径配置 30 | RSS_MEDIA_PATH = os.getenv('RSS_MEDIA_PATH', './rss/media') 31 | 32 | # 转换为绝对路径 33 | RSS_MEDIA_DIR = os.path.abspath(os.path.join(BASE_DIR, RSS_MEDIA_PATH) 34 | if not os.path.isabs(RSS_MEDIA_PATH) 35 | else RSS_MEDIA_PATH) 36 | 37 | # RSS数据路径 38 | RSS_DATA_PATH = os.getenv('RSS_DATA_PATH', './rss/data') 39 | RSS_DATA_DIR = os.path.abspath(os.path.join(BASE_DIR, RSS_DATA_PATH) 40 | if not os.path.isabs(RSS_DATA_PATH) 41 | else RSS_DATA_PATH) 42 | 43 | # 默认AI模型 44 | DEFAULT_AI_MODEL = os.getenv('DEFAULT_AI_MODEL', 'gpt-4o') 45 | # 默认AI总结提示词 46 | DEFAULT_SUMMARY_PROMPT = os.getenv('DEFAULT_SUMMARY_PROMPT', '请总结以下频道/群组24小时内的消息。') 47 | # 默认AI提示词 48 | DEFAULT_AI_PROMPT = os.getenv('DEFAULT_AI_PROMPT', '请尊重原意,保持原有格式不变,用简体中文重写下面的内容:') 49 | 50 | # 分页配置 51 | MODELS_PER_PAGE = int(os.getenv('AI_MODELS_PER_PAGE', 10)) 52 | KEYWORDS_PER_PAGE = int(os.getenv('KEYWORDS_PER_PAGE', 50)) 53 | 54 | # 按钮布局配置 55 | SUMMARY_TIME_ROWS = int(os.getenv('SUMMARY_TIME_ROWS', 10)) 56 | SUMMARY_TIME_COLS = int(os.getenv('SUMMARY_TIME_COLS', 6)) 57 | 58 | DELAY_TIME_ROWS = int(os.getenv('DELAY_TIME_ROWS', 10)) 59 | DELAY_TIME_COLS = int(os.getenv('DELAY_TIME_COLS', 6)) 60 | 61 | MEDIA_SIZE_ROWS = int(os.getenv('MEDIA_SIZE_ROWS', 10)) 62 | MEDIA_SIZE_COLS = int(os.getenv('MEDIA_SIZE_COLS', 6)) 63 | 64 | MEDIA_EXTENSIONS_ROWS = int(os.getenv('MEDIA_EXTENSIONS_ROWS', 6)) 65 | MEDIA_EXTENSIONS_COLS = int(os.getenv('MEDIA_EXTENSIONS_COLS', 6)) 66 | 67 | LOG_MAX_SIZE_MB = 10 68 | LOG_BACKUP_COUNT = 3 69 | 70 | # 默认消息删除时间 (秒) 71 | BOT_MESSAGE_DELETE_TIMEOUT = int(os.getenv("BOT_MESSAGE_DELETE_TIMEOUT", 300)) 72 | 73 | # 自动删除用户发送的指令消息 74 | USER_MESSAGE_DELETE_ENABLE = os.getenv("USER_MESSAGE_DELETE_ENABLE", "false") 75 | 76 | # 是否启用UFB 77 | UFB_ENABLED = os.getenv("UFB_ENABLED", "false") 78 | 79 | # 菜单标题 80 | AI_SETTINGS_TEXT = """ 81 | 当前AI提示词: 82 | 83 | `{ai_prompt}` 84 | 85 | 当前总结提示词: 86 | 87 | `{summary_prompt}` 88 | """ 89 | 90 | # 媒体设置文本 91 | MEDIA_SETTINGS_TEXT = """ 92 | 媒体设置: 93 | """ 94 | PUSH_SETTINGS_TEXT = """ 95 | 推送设置: 96 | 请前往 https://github.com/caronc/apprise/wiki 查看添加推送配置格式说明 97 | 如 `ntfy://ntfy.sh/你的主题名` 98 | """ 99 | 100 | 101 | # 为每个规则生成特定的路径 102 | def get_rule_media_dir(rule_id): 103 | """获取指定规则的媒体目录""" 104 | rule_path = os.path.join(RSS_MEDIA_DIR, str(rule_id)) 105 | # 确保目录存在 106 | os.makedirs(rule_path, exist_ok=True) 107 | return rule_path 108 | 109 | def get_rule_data_dir(rule_id): 110 | """获取指定规则的数据目录""" 111 | rule_path = os.path.join(RSS_DATA_DIR, str(rule_id)) 112 | # 确保目录存在 113 | os.makedirs(rule_path, exist_ok=True) 114 | return rule_path -------------------------------------------------------------------------------- /filters/delay_filter.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from filters.base_filter import BaseFilter 4 | from utils.common import get_main_module 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | class DelayFilter(BaseFilter): 9 | """ 10 | 延迟过滤器,等待消息可能的编辑后再处理 11 | 12 | 有些频道在发送消息后会有自己的机器人对消息进行编辑, 13 | 添加引用、标注等内容。此过滤器会等待一段时间后, 14 | 重新获取消息的最新内容再进行处理。 15 | """ 16 | 17 | async def _process(self, context): 18 | """ 19 | 根据规则配置,决定是否等待并获取最新的消息内容 20 | 21 | Args: 22 | context: 消息上下文 23 | 24 | Returns: 25 | bool: 是否继续处理 26 | """ 27 | rule = context.rule 28 | message = context.event 29 | 30 | # 如果规则未启用延迟处理或延迟秒数为0,则直接通过 31 | if not rule.enable_delay or rule.delay_seconds <= 0: 32 | logger.debug(f"[规则ID:{rule.id}] 延迟处理未启用或延迟秒数为0,跳过延迟处理") 33 | return True 34 | 35 | # 如果消息不完整,则直接通过 36 | if not message or not hasattr(message, "chat_id") or not hasattr(message, "id"): 37 | logger.debug(f"[规则ID:{rule.id}] 消息不完整,无法应用延迟处理") 38 | return True 39 | 40 | try: 41 | 42 | original_id = message.id 43 | chat_id = message.chat_id 44 | 45 | logger.info(f"[规则ID:{rule.id}] 延迟处理消息 {original_id},等待 {rule.delay_seconds} 秒...") 46 | 47 | # 等待指定的秒数 48 | await asyncio.sleep(rule.delay_seconds) 49 | logger.info(f"[规则ID:{rule.id}] 延迟 {rule.delay_seconds} 秒结束,正在获取最新消息...") 50 | 51 | # 尝试获取用户客户端 52 | try: 53 | main = await get_main_module() 54 | client = main.user_client if (main and hasattr(main, 'user_client')) else context.client 55 | 56 | # 获取更新后的消息 57 | logger.info(f"[规则ID:{rule.id}] 正在获取聊天 {chat_id} 的消息 {original_id}...") 58 | updated_message = await client.get_messages(chat_id, ids=original_id) 59 | 60 | 61 | if updated_message: 62 | updated_text = getattr(updated_message, "text", "") 63 | 64 | # 不管消息内容是否有变化,都更新上下文中的所有相关字段 65 | logger.info(f"[规则ID:{rule.id}] 正在更新上下文中的消息数据...") 66 | 67 | # 更新上下文中的消息文本相关字段 68 | context.message_text = updated_text 69 | context.check_message_text = updated_text 70 | 71 | # 更新事件中的消息对象 72 | context.event.message = updated_message 73 | 74 | # 更新其他相关字段 75 | context.original_message_text = updated_text 76 | context.buttons = updated_message.buttons if hasattr(updated_message, 'buttons') else None 77 | 78 | # 更新媒体相关信息 79 | if hasattr(updated_message, 'media') and updated_message.media: 80 | context.is_media_group = updated_message.grouped_id is not None 81 | context.media_group_id = updated_message.grouped_id 82 | 83 | logger.info(f"[规则ID:{rule.id}] 上下文消息数据已更新完成") 84 | else: 85 | logger.warning(f"[规则ID:{rule.id}] 无法获取更新的消息,使用原始消息") 86 | except Exception as e: 87 | logger.warning(f"[规则ID:{rule.id}] 获取更新消息时出错: {str(e)}") 88 | # 继续使用原始消息 89 | 90 | logger.info(f"[规则ID:{rule.id}] 延迟处理完成,继续后续过滤器") 91 | return True 92 | 93 | except Exception as e: 94 | logger.error(f"[规则ID:{rule.id}] 延迟处理消息时出现错误: {str(e)}") 95 | return True 96 | -------------------------------------------------------------------------------- /utils/auto_delete.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import logging 4 | from functools import wraps 5 | from utils.constants import BOT_MESSAGE_DELETE_TIMEOUT, USER_MESSAGE_DELETE_ENABLE 6 | logger = logging.getLogger(__name__) 7 | 8 | # 从环境变量获取默认超时时间 9 | 10 | async def delete_after(message, seconds): 11 | """等待指定秒数后删除消息 12 | 13 | 参数: 14 | message: 要删除的消息 15 | seconds: 等待多少秒后删除, 0表示立即删除, -1表示不删除 16 | """ 17 | if seconds == -1: # -1 表示不删除 18 | return 19 | 20 | if seconds > 0: # 正数表示等待指定秒数再删除 21 | await asyncio.sleep(seconds) 22 | 23 | try: 24 | await message.delete() 25 | except Exception as e: 26 | logger.error(f"删除消息失败: {e}") 27 | 28 | async def reply_and_delete(event, text, delete_after_seconds=None, **kwargs): 29 | """回复消息并安排自动删除 30 | 31 | 参数: 32 | event: Telethon事件对象 33 | text: 要发送的文本 34 | delete_after_seconds: 多少秒后删除消息,None使用默认值,0表示立即删除,-1表示不删除 35 | **kwargs: 传递给reply方法的其他参数 36 | """ 37 | # 如果没有指定删除时间,使用环境变量中的默认值 38 | if delete_after_seconds is None: 39 | deletion_timeout = BOT_MESSAGE_DELETE_TIMEOUT 40 | else: 41 | deletion_timeout = delete_after_seconds 42 | 43 | # 发送回复 44 | message = await event.reply(text, **kwargs) 45 | 46 | # 安排删除任务,只有当deletion_timeout不等于-1时才删除 47 | if deletion_timeout != -1: 48 | asyncio.create_task(delete_after(message, deletion_timeout)) 49 | 50 | return message 51 | 52 | async def respond_and_delete(event, text, delete_after_seconds=None, **kwargs): 53 | """使用respond回复消息并安排自动删除 54 | 55 | 参数: 56 | event: Telethon事件对象 57 | text: 要发送的文本 58 | delete_after_seconds: 多少秒后删除消息,None使用默认值,0表示立即删除,-1表示不删除 59 | **kwargs: 传递给respond方法的其他参数 60 | """ 61 | # 如果没有指定删除时间,使用环境变量中的默认值 62 | if delete_after_seconds is None: 63 | deletion_timeout = BOT_MESSAGE_DELETE_TIMEOUT 64 | else: 65 | deletion_timeout = delete_after_seconds 66 | 67 | # 发送回复 68 | message = await event.respond(text, **kwargs) 69 | 70 | # 安排删除任务,只有当deletion_timeout不等于-1时才删除 71 | if deletion_timeout != -1: 72 | asyncio.create_task(delete_after(message, deletion_timeout)) 73 | 74 | return message 75 | 76 | async def send_message_and_delete(client, entity, text, delete_after_seconds=None, **kwargs): 77 | """发送消息并安排自动删除 78 | 79 | 参数: 80 | client: Telethon客户端对象 81 | entity: 聊天对象或ID 82 | text: 要发送的文本 83 | delete_after_seconds: 多少秒后删除消息,None使用默认值,0表示立即删除,-1表示不删除 84 | **kwargs: 传递给send_message方法的其他参数 85 | """ 86 | # 如果没有指定删除时间,使用环境变量中的默认值 87 | if delete_after_seconds is None: 88 | deletion_timeout = BOT_MESSAGE_DELETE_TIMEOUT 89 | else: 90 | deletion_timeout = delete_after_seconds 91 | 92 | # 发送消息 93 | message = await client.send_message(entity, text, **kwargs) 94 | 95 | # 安排删除任务,只有当deletion_timeout不等于-1时才删除 96 | if deletion_timeout != -1: 97 | asyncio.create_task(delete_after(message, deletion_timeout)) 98 | 99 | return message 100 | 101 | # 删除用户消息 102 | async def async_delete_user_message(client, chat_id, message_id, seconds): 103 | """删除用户消息 104 | 105 | 参数: 106 | client: bot客户端 107 | chat_id: 聊天ID 108 | message_id: 消息ID 109 | seconds: 等待多少秒后删除, 0表示立即删除, -1表示不删除 110 | """ 111 | if USER_MESSAGE_DELETE_ENABLE == "false": 112 | return 113 | 114 | if seconds == -1: # -1 表示不删除 115 | return 116 | 117 | if seconds > 0: # 正数表示等待指定秒数再删除 118 | await asyncio.sleep(seconds) 119 | 120 | try: 121 | await client.delete_messages(chat_id, message_id) 122 | except Exception as e: 123 | logger.error(f"删除用户消息失败: {e}") 124 | 125 | -------------------------------------------------------------------------------- /ai/openai_base_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | from openai import AsyncOpenAI 3 | from .base import BaseAIProvider 4 | import os 5 | import logging 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class OpenAIBaseProvider(BaseAIProvider): 10 | def __init__(self, env_prefix: str = 'OPENAI', default_model: str = 'gpt-4o-mini', 11 | default_api_base: str = 'https://api.openai.com/v1'): 12 | """ 13 | 初始化基础OpenAI格式提供者 14 | 15 | Args: 16 | env_prefix: 环境变量前缀,如 'OPENAI', 'GROK', 'DEEPSEEK', 'QWEN' 17 | default_model: 默认模型名称 18 | default_api_base: 默认API基础URL 19 | """ 20 | super().__init__() 21 | self.env_prefix = env_prefix 22 | self.default_model = default_model 23 | self.default_api_base = default_api_base 24 | self.client = None 25 | self.model = None 26 | 27 | async def initialize(self, **kwargs) -> None: 28 | """初始化OpenAI客户端""" 29 | try: 30 | api_key = os.getenv(f'{self.env_prefix}_API_KEY') 31 | if not api_key: 32 | raise ValueError(f"未设置 {self.env_prefix}_API_KEY 环境变量") 33 | 34 | api_base = os.getenv(f'{self.env_prefix}_API_BASE', '').strip() or self.default_api_base 35 | 36 | self.client = AsyncOpenAI( 37 | api_key=api_key, 38 | base_url=api_base 39 | ) 40 | 41 | self.model = kwargs.get('model', self.default_model) 42 | logger.info(f"初始化OpenAI模型: {self.model}") 43 | 44 | except Exception as e: 45 | error_msg = f"初始化 {self.env_prefix} 客户端时出错: {str(e)}" 46 | logger.error(error_msg, exc_info=True) 47 | raise 48 | 49 | async def process_message(self, 50 | message: str, 51 | prompt: Optional[str] = None, 52 | images: Optional[List[Dict[str, str]]] = None, 53 | **kwargs) -> str: 54 | """处理消息""" 55 | try: 56 | if not self.client: 57 | await self.initialize(**kwargs) 58 | 59 | messages = [] 60 | if prompt: 61 | messages.append({"role": "system", "content": prompt}) 62 | 63 | # 如果有图片,需要添加到消息中 64 | if images and len(images) > 0: 65 | # 创建包含文本和图片的内容数组 66 | content = [] 67 | 68 | # 添加文本 69 | content.append({ 70 | "type": "text", 71 | "text": message 72 | }) 73 | 74 | # 添加每张图片 75 | for img in images: 76 | content.append({ 77 | "type": "image_url", 78 | "image_url": { 79 | "url": f"data:{img['mime_type']};base64,{img['data']}" 80 | } 81 | }) 82 | logger.info(f"已添加一张类型为 {img['mime_type']} 的图片,大小约 {len(img['data']) // 1000} KB") 83 | 84 | messages.append({"role": "user", "content": content}) 85 | else: 86 | # 没有图片,只添加文本 87 | messages.append({"role": "user", "content": message}) 88 | 89 | logger.info(f"实际使用的OpenAI模型: {self.model}") 90 | 91 | # 所有模型统一使用流式调用 92 | completion = await self.client.chat.completions.create( 93 | model=self.model, 94 | messages=messages, 95 | stream=True 96 | ) 97 | 98 | # 收集所有内容 99 | collected_content = "" 100 | collected_reasoning = "" 101 | 102 | async for chunk in completion: 103 | if not chunk.choices: 104 | continue 105 | 106 | delta = chunk.choices[0].delta 107 | 108 | # 处理思考内容(如果存在) 109 | if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None: 110 | collected_reasoning += delta.reasoning_content 111 | 112 | # 处理回答内容 113 | if hasattr(delta, 'content') and delta.content is not None: 114 | collected_content += delta.content 115 | 116 | # 如果没有内容但有思考过程,可能是思考模型只返回了思考过程 117 | if not collected_content and collected_reasoning: 118 | logger.warning("模型只返回了思考过程,没有最终回答") 119 | return "模型未能生成有效回答" 120 | 121 | return collected_content 122 | 123 | except Exception as e: 124 | logger.error(f"{self.env_prefix} API 调用失败: {str(e)}", exc_info=True) 125 | return f"AI处理失败: {str(e)}" 126 | -------------------------------------------------------------------------------- /utils/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | 5 | from utils.file_creator import create_default_configs, AI_MODELS_CONFIG 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | def load_ai_models(type="list"): 10 | """ 11 | 加载AI模型配置 12 | 13 | 参数: 14 | type (str): 返回类型 15 | - "list": 返回所有模型的平铺列表 [model1, model2, ...] 16 | - "dict"/"json": 返回原始配置格式 {provider: [model1, model2, ...]} 17 | 18 | 返回值: 19 | 根据type参数返回不同格式的模型配置 20 | """ 21 | try: 22 | models_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'ai_models.json') 23 | 24 | # 如果配置文件不存在,创建默认配置 25 | if not os.path.exists(models_path): 26 | create_default_configs() 27 | 28 | # 读取JSON配置文件 29 | with open(models_path, 'r', encoding='utf-8') as f: 30 | models_config = json.load(f) 31 | 32 | # 根据type参数返回不同格式 33 | if type.lower() in ["dict", "json"]: 34 | return models_config 35 | 36 | # 默认返回模型列表 37 | all_models = [] 38 | for provider, models in models_config.items(): 39 | all_models.extend(models) 40 | 41 | # 确保列表不为空 42 | if all_models: 43 | return all_models 44 | 45 | except (FileNotFoundError, IOError, json.JSONDecodeError) as e: 46 | logger.error(f"加载AI模型配置失败: {e}") 47 | 48 | # 如果出现任何问题,根据type返回默认值 49 | if type.lower() in ["dict", "json"]: 50 | return AI_MODELS_CONFIG 51 | 52 | # 默认返回模型列表 53 | return ["gpt-3.5-turbo", "gemini-1.5-flash", "claude-3-sonnet"] 54 | 55 | def load_summary_times(): 56 | """加载总结时间列表""" 57 | try: 58 | times_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'summary_times.txt') 59 | if not os.path.exists(times_path): 60 | create_default_configs() 61 | 62 | with open(times_path, 'r', encoding='utf-8') as f: 63 | times = [line.strip() for line in f if line.strip()] 64 | if times: 65 | return times 66 | except (FileNotFoundError, IOError) as e: 67 | logger.warning(f"summary_times.txt 加载失败: {e},使用默认时间列表") 68 | return ['00:00', '06:00', '12:00', '18:00'] 69 | 70 | def load_delay_times(): 71 | """加载延迟时间列表""" 72 | try: 73 | times_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'delay_times.txt') 74 | if not os.path.exists(times_path): 75 | create_default_configs() 76 | 77 | with open(times_path, 'r', encoding='utf-8') as f: 78 | times = [line.strip() for line in f if line.strip()] 79 | if times: 80 | return times 81 | except (FileNotFoundError, IOError) as e: 82 | logger.warning(f"delay_times.txt 加载失败: {e},使用默认时间列表") 83 | return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 84 | 85 | def load_max_media_size(): 86 | """加载媒体大小限制""" 87 | try: 88 | size_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'max_media_size.txt') 89 | if not os.path.exists(size_path): 90 | create_default_configs() 91 | 92 | with open(size_path, 'r', encoding='utf-8') as f: 93 | size = [line.strip() for line in f if line.strip()] 94 | if size: 95 | return size 96 | 97 | except (FileNotFoundError, IOError) as e: 98 | logger.warning(f"max_media_size.txt 加载失败: {e},使用默认大小限制") 99 | return [5,10,15,20,50,100,200,300,500,1024,2048] 100 | 101 | 102 | def load_media_extensions(): 103 | """加载媒体扩展名""" 104 | try: 105 | size_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'media_extensions.txt') 106 | if not os.path.exists(size_path): 107 | create_default_configs() 108 | 109 | with open(size_path, 'r', encoding='utf-8') as f: 110 | size = [line.strip() for line in f if line.strip()] 111 | if size: 112 | return size 113 | 114 | except (FileNotFoundError, IOError) as e: 115 | logger.warning(f"media_extensions.txt 加载失败: {e},使用默认扩展名") 116 | return ['无扩展名','txt','jpg','png','gif','mp4','mp3','wav','ogg','flac','aac','wma','m4a','m4v','mov','avi','mkv','webm','mpg','mpeg','mpe','mp3','mp2','m4a','m4p','m4b','m4r','m4v','mpg','mpeg','mp2','mp3','mp4','mpc','oga','ogg','wav','wma','3gp','3g2','3gpp','3gpp2','amr','awb','caf','flac','m4a','m4b','m4p','oga','ogg','opus','spx','vorbis','wav','wma','webm','aac','ac3','dts','dtshd','flac','mp3','mp4','m4a','m4b','m4p','oga','ogg','wav','wma','webm','aac','ac3','dts','dtshd','flac','mp3','mp4','m4a','m4b','m4p','oga','ogg','wav','wma','webm'] -------------------------------------------------------------------------------- /utils/file_creator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | # 默认AI模型配置(JSON格式) 8 | AI_MODELS_CONFIG = { 9 | "openai": [ 10 | "gpt-4o", 11 | "chatgpt-4o-latest", 12 | "gpt-4o-mini", 13 | "gpt-4-turbo", 14 | "gpt-4-turbo-preview", 15 | "gpt-4", 16 | "gpt-3.5-turbo", 17 | "gpt-3.5-turbo-instruct", 18 | "o1", 19 | "o1-mini", 20 | "o1-preview", 21 | "o3-mini" 22 | ], 23 | "gemini": [ 24 | 'gemini-2.5-pro-exp-03-25', 25 | "gemini-2.0-flash", 26 | "gemini-2.0-flash-lite-preview-02-05", 27 | "gemini-2.0-pro-exp-02-05", 28 | "gemini-1.5-flash", 29 | "gemini-1.5-flash-8b", 30 | "gemini-1.5-pro" 31 | ], 32 | "grok": [ 33 | "grok-3-beta", 34 | "grok-3-fast-beta", 35 | "grok-3-mini-beta", 36 | "grok-3-mini-fast-beta", 37 | "grok-2-vision-1212", 38 | "grok-2-image-1212", 39 | "grok-2-latest" 40 | ], 41 | "deepseek": [ 42 | "deepseek-chat" 43 | ], 44 | "claude": [ 45 | "claude-3-7-sonnet-latest", 46 | "claude-3-5-sonnet-latest", 47 | "claude-3-5-haiku-latest", 48 | "claude-3-opus-latest", 49 | "claude-3-sonnet-20240229", 50 | "claude-3-haiku-20240307" 51 | ], 52 | "qwen": [ 53 | "qwq-plus", 54 | "qwq-plus-latest", 55 | "qwq-32b", 56 | 'qvq-max', 57 | 'qvq-max-latest', 58 | 'qwen-vl-max', 59 | 'qwen-vl-max-latest', 60 | 'qwen-vl-plus', 61 | 'qwen-vl-plus-latest', 62 | 'qwen-vl-ocr', 63 | 'qwen-vl-ocr-latest', 64 | 'qwen-omni-turbo', 65 | 'qwen-omni-turbo-latest', 66 | 'qwen-max', 67 | 'qwen-max-latest', 68 | 'qwen-plus', 69 | 'qwen-plus-latest', 70 | "qwen-turbo", 71 | "qwen-turbo-latest", 72 | "qwen-long" 73 | ] 74 | } 75 | 76 | # 汇总时间列表 77 | SUMMARY_TIMES_CONTENT = """00:00 78 | 00:30 79 | 01:00 80 | 01:30 81 | 02:00 82 | 02:30 83 | 03:00 84 | 03:30 85 | 04:00 86 | 04:30 87 | 05:00 88 | 05:30 89 | 06:00 90 | 06:30 91 | 07:00 92 | 07:30 93 | 08:00 94 | 08:30 95 | 09:00 96 | 09:30 97 | 10:00 98 | 10:30 99 | 11:00 100 | 11:30 101 | 12:00 102 | 12:30 103 | 13:00 104 | 13:30 105 | 14:00 106 | 14:30 107 | 15:00 108 | 15:30 109 | 16:00 110 | 16:30 111 | 17:00 112 | 17:30 113 | 18:00 114 | 18:30 115 | 19:00 116 | 19:30 117 | 20:00 118 | 20:30 119 | 21:00 120 | 21:30 121 | 22:00 122 | 22:30 123 | 23:00 124 | 23:30 125 | 23:50""" 126 | 127 | # 延迟时间列表 128 | DELAY_TIMES_CONTENT = """1 129 | 2 130 | 3 131 | 4 132 | 5 133 | 6 134 | 7 135 | 8 136 | 9 137 | 10""" 138 | 139 | # 最大媒体大小列表 140 | MAX_MEDIA_SIZE_CONTENT = """1 141 | 2 142 | 3 143 | 4 144 | 5 145 | 6 146 | 7 147 | 8 148 | 9 149 | 10 150 | 15 151 | 20 152 | 25 153 | 30 154 | 35 155 | 40 156 | 45 157 | 50 158 | 55 159 | 60 160 | 65 161 | 70 162 | 75 163 | 80 164 | 85 165 | 90 166 | 95 167 | 100 168 | 150 169 | 200 170 | 250 171 | 300 172 | 350 173 | 400 174 | 450 175 | 500 176 | 550 177 | 600 178 | 650 179 | 700 180 | 750 181 | 800 182 | 850 183 | 900 184 | 950 185 | 1024 186 | 2048 187 | """ 188 | 189 | MEDIA_EXTENSIONS_CONTENT = """无扩展名 190 | jpg 191 | jpeg 192 | png 193 | gif 194 | bmp 195 | webp 196 | tiff 197 | raw 198 | heic 199 | svg 200 | mp4 201 | avi 202 | mkv 203 | mov 204 | wmv 205 | flv 206 | webm 207 | m4v 208 | mpeg 209 | mpg 210 | 3gp 211 | rmvb 212 | mp3 213 | wav 214 | ogg 215 | m4a 216 | aac 217 | flac 218 | wma 219 | opus 220 | mid 221 | midi 222 | txt 223 | doc 224 | docx 225 | pdf 226 | xls 227 | xlsx 228 | ppt 229 | pptx 230 | csv 231 | rtf 232 | odt 233 | zip 234 | rar 235 | 7z 236 | tar 237 | gz 238 | bz2 239 | exe 240 | apk 241 | iso 242 | bin 243 | json 244 | xml 245 | html 246 | css 247 | js 248 | py 249 | """ 250 | 251 | 252 | def create_default_configs(): 253 | """创建默认配置文件""" 254 | config_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config') 255 | os.makedirs(config_dir, exist_ok=True) 256 | 257 | # 定义默认配置内容 258 | default_configs = { 259 | 'summary_times.txt': SUMMARY_TIMES_CONTENT, 260 | 'delay_times.txt': DELAY_TIMES_CONTENT, 261 | 'max_media_size.txt': MAX_MEDIA_SIZE_CONTENT, 262 | 'media_extensions.txt': MEDIA_EXTENSIONS_CONTENT, 263 | } 264 | 265 | # 检查并创建每个配置文件 266 | for filename, content in default_configs.items(): 267 | file_path = os.path.join(config_dir, filename) 268 | if not os.path.exists(file_path): 269 | with open(file_path, 'w', encoding='utf-8') as f: 270 | f.write(content.strip()) 271 | logger.info(f"Created {filename}") 272 | 273 | # 创建JSON格式的AI模型配置文件 274 | json_config_path = os.path.join(config_dir, 'ai_models.json') 275 | if not os.path.exists(json_config_path): 276 | try: 277 | with open(json_config_path, 'w', encoding='utf-8') as f: 278 | json.dump(AI_MODELS_CONFIG, f, ensure_ascii=False, indent=4) 279 | logger.info("Created ai_models.json") 280 | except Exception as e: 281 | logger.error(f"创建 ai_models.json 失败: {e}") -------------------------------------------------------------------------------- /rss/app/crud/entry.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import uuid 4 | from pathlib import Path 5 | from datetime import datetime 6 | from typing import List, Dict, Any, Optional 7 | from ..models.entry import Entry 8 | from ..core.config import settings 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | # 确保数据存储目录存在 13 | def ensure_storage_exists(): 14 | """确保数据存储目录存在""" 15 | entries_dir = Path(settings.DATA_PATH) 16 | entries_dir.mkdir(parents=True, exist_ok=True) 17 | 18 | # 获取规则对应的条目存储文件路径 19 | def get_rule_entries_path(rule_id: int) -> Path: 20 | """获取规则对应的条目存储文件路径""" 21 | # 使用规则特定的数据目录 22 | rule_data_path = settings.get_rule_data_path(rule_id) 23 | return Path(rule_data_path) / "entries.json" 24 | 25 | async def get_entries(rule_id: int, limit: int = 100, offset: int = 0) -> List[Entry]: 26 | """获取规则对应的条目""" 27 | try: 28 | file_path = get_rule_entries_path(rule_id) 29 | 30 | # 如果文件不存在,返回空列表 31 | if not file_path.exists(): 32 | return [] 33 | 34 | # 读取文件内容 35 | with open(file_path, 'r', encoding='utf-8') as file: 36 | data = json.load(file) 37 | 38 | # 将数据转换为Entry对象 39 | entries = [Entry(**entry) for entry in data] 40 | 41 | # 按发布时间排序(新的在前) 42 | entries.sort(key=lambda x: x.published, reverse=True) 43 | 44 | # 应用分页 45 | return entries[offset:offset + limit] 46 | except Exception as e: 47 | logger.error(f"获取条目时出错: {str(e)}") 48 | return [] 49 | 50 | async def create_entry(entry: Entry) -> bool: 51 | """创建新条目""" 52 | try: 53 | # 设置条目ID和创建时间 54 | if not entry.id: 55 | entry.id = str(uuid.uuid4()) 56 | 57 | entry.created_at = datetime.now().isoformat() 58 | 59 | # 获取规则对应的条目 60 | file_path = get_rule_entries_path(entry.rule_id) 61 | 62 | entries = [] 63 | # 如果文件已存在,读取现有条目 64 | if file_path.exists(): 65 | with open(file_path, 'r', encoding='utf-8') as file: 66 | try: 67 | entries = json.load(file) 68 | except json.JSONDecodeError: 69 | logger.warning(f"解析条目文件时出错,将创建新文件: {file_path}") 70 | entries = [] 71 | 72 | # 转换Entry对象为字典并添加到列表 73 | entries.append(entry.dict()) 74 | 75 | # 获取规则的RSS配置,获取最大条目数量 76 | try: 77 | from models.models import get_session, RSSConfig 78 | session = get_session() 79 | rss_config = session.query(RSSConfig).filter(RSSConfig.rule_id == entry.rule_id).first() 80 | max_items = rss_config.max_items if rss_config and hasattr(rss_config, 'max_items') else 50 81 | session.close() 82 | except Exception as e: 83 | logger.warning(f"获取RSS配置失败,使用默认最大条目数量(50): {str(e)}") 84 | max_items = 50 85 | 86 | # 限制条目数量,保留最新的N条 87 | if len(entries) > max_items: 88 | # 按发布时间排序(新的在前) 89 | entries.sort(key=lambda x: x.get('published', ''), reverse=True) 90 | entries = entries[:max_items] 91 | 92 | # 保存到文件 93 | with open(file_path, 'w', encoding='utf-8') as file: 94 | json.dump(entries, file, ensure_ascii=False, indent=2) 95 | 96 | return True 97 | except Exception as e: 98 | logger.error(f"创建条目时出错: {str(e)}") 99 | return False 100 | 101 | async def update_entry(rule_id: int, entry_id: str, updated_data: Dict[str, Any]) -> bool: 102 | """更新条目""" 103 | try: 104 | file_path = get_rule_entries_path(rule_id) 105 | 106 | # 如果文件不存在,返回False 107 | if not file_path.exists(): 108 | return False 109 | 110 | # 读取文件内容 111 | with open(file_path, 'r', encoding='utf-8') as file: 112 | entries = json.load(file) 113 | 114 | # 查找并更新条目 115 | found = False 116 | for i, entry in enumerate(entries): 117 | if entry.get('id') == entry_id: 118 | entries[i].update(updated_data) 119 | found = True 120 | break 121 | 122 | if not found: 123 | return False 124 | 125 | # 保存到文件 126 | with open(file_path, 'w', encoding='utf-8') as file: 127 | json.dump(entries, file, ensure_ascii=False, indent=2) 128 | 129 | return True 130 | except Exception as e: 131 | logger.error(f"更新条目时出错: {str(e)}") 132 | return False 133 | 134 | async def delete_entry(rule_id: int, entry_id: str) -> bool: 135 | """删除条目""" 136 | try: 137 | file_path = get_rule_entries_path(rule_id) 138 | 139 | # 如果文件不存在,返回False 140 | if not file_path.exists(): 141 | return False 142 | 143 | # 读取文件内容 144 | with open(file_path, 'r', encoding='utf-8') as file: 145 | entries = json.load(file) 146 | 147 | # 查找并删除条目 148 | original_length = len(entries) 149 | entries = [entry for entry in entries if entry.get('id') != entry_id] 150 | 151 | if len(entries) == original_length: 152 | return False # 没有找到对应ID的条目 153 | 154 | # 保存到文件 155 | with open(file_path, 'w', encoding='utf-8') as file: 156 | json.dump(entries, file, ensure_ascii=False, indent=2) 157 | 158 | return True 159 | except Exception as e: 160 | logger.error(f"删除条目时出错: {str(e)}") 161 | return False -------------------------------------------------------------------------------- /handlers/link_handlers.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | import logging 4 | from utils.common import get_main_module, get_user_id 5 | from utils.constants import TEMP_DIR 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | async def handle_message_link(client, event): 10 | """处理 Telegram 消息链接""" 11 | if not event.message.text: 12 | return 13 | 14 | # 解析消息链接 15 | match = re.match(r'https?://t\.me/(?:c/(\d+)|([^/]+))/(\d+)', event.message.text) 16 | if not match: 17 | return 18 | 19 | try: 20 | chat_id = None 21 | message_id = int(match.group(3)) 22 | 23 | if match.group(1): # 私有频道格式 24 | chat_id = int('-100' + match.group(1)) 25 | else: # 公开频道格式 26 | chat_name = match.group(2) 27 | try: 28 | entity = await client.get_entity(chat_name) 29 | chat_id = entity.id 30 | except Exception as e: 31 | logger.error(f'获取频道信息失败: {str(e)}') 32 | await reply_and_delete(event,'⚠️ 无法访问该频道,请确保已关注该频道。') 33 | return 34 | 35 | # 获取用户客户端 36 | main = await get_main_module() 37 | user_client = main.user_client 38 | 39 | # 获取原始消息 40 | message = await user_client.get_messages(chat_id, ids=message_id) 41 | if not message: 42 | await reply_and_delete(event,'⚠️ 无法获取该消息,可能是消息已被删除或无权限访问。') 43 | return 44 | 45 | # 检查是否是媒体组消息 46 | if message.grouped_id: 47 | await handle_media_group(client, user_client, chat_id, message, event) 48 | else: 49 | await handle_single_message(client, message, event) 50 | 51 | 52 | except Exception as e: 53 | logger.error(f'处理消息链接时出错: {str(e)}') 54 | await reply_and_delete(event,'⚠️ 处理消息时出错,请确保链接正确且有权限访问该消息。') 55 | 56 | async def handle_media_group(client, user_client, chat_id, message, event): 57 | """处理媒体组消息""" 58 | files = [] # 将 files 移到外层作用域 59 | try: 60 | # 收集媒体组的所有消息 61 | media_group_messages = [] 62 | caption = None 63 | buttons = None 64 | 65 | # 在消息ID前后范围内搜索同组消息 66 | async for grouped_message in user_client.iter_messages( 67 | chat_id, 68 | limit=20, 69 | min_id=message.id - 10, 70 | max_id=message.id + 10 71 | ): 72 | if grouped_message.grouped_id == message.grouped_id: 73 | media_group_messages.append(grouped_message) 74 | # 保存第一条消息的文本和按钮 75 | if not caption: 76 | caption = grouped_message.text 77 | buttons = grouped_message.buttons if hasattr(grouped_message, 'buttons') else None 78 | 79 | if media_group_messages: 80 | # 下载所有媒体文件 81 | for msg in media_group_messages: 82 | if msg.media: 83 | try: 84 | file_path = await msg.download_media(TEMP_DIR) 85 | if file_path: 86 | files.append(file_path) 87 | logger.info(f'已下载媒体文件: {file_path}') 88 | except Exception as e: 89 | logger.error(f'下载媒体文件失败: {str(e)}') 90 | 91 | if files: 92 | # 发送媒体组 93 | await client.send_file( 94 | event.chat_id, 95 | files, 96 | caption=caption, 97 | parse_mode='Markdown', 98 | buttons=buttons 99 | ) 100 | logger.info(f'已转发媒体组消息,共 {len(files)} 个文件') 101 | 102 | except Exception as e: 103 | logger.error(f'处理媒体组消息时出错: {str(e)}') 104 | raise 105 | finally: 106 | # 确保清理所有临时文件 107 | for file_path in files: 108 | try: 109 | if os.path.exists(file_path): 110 | os.remove(file_path) 111 | logger.info(f'已删除临时文件: {file_path}') 112 | except Exception as e: 113 | logger.error(f'删除临时文件失败 {file_path}: {str(e)}') 114 | 115 | async def handle_single_message(client, message, event): 116 | """处理单条消息""" 117 | parse_mode = 'Markdown' 118 | buttons = message.buttons if hasattr(message, 'buttons') else None 119 | file_path = None 120 | 121 | try: 122 | if message.media: 123 | # 处理媒体消息 124 | file_path = await message.download_media(TEMP_DIR) 125 | if file_path: 126 | logger.info(f'已下载媒体文件: {file_path}') 127 | caption = message.text if message.text else '' 128 | await client.send_file( 129 | event.chat_id, 130 | file_path, 131 | caption=caption, 132 | parse_mode=parse_mode, 133 | buttons=buttons 134 | ) 135 | logger.info('已转发单条媒体消息') 136 | else: 137 | # 处理纯文本消息 138 | await client.send_message( 139 | event.chat_id, 140 | message.text, 141 | parse_mode=parse_mode, 142 | link_preview=True, 143 | buttons=buttons 144 | ) 145 | logger.info('已转发文本消息') 146 | 147 | except Exception as e: 148 | logger.error(f'处理单条消息时出错: {str(e)}') 149 | raise 150 | finally: 151 | # 确保清理临时文件 152 | if file_path and os.path.exists(file_path): 153 | try: 154 | os.remove(file_path) 155 | logger.info(f'已删除临时文件: {file_path}') 156 | except Exception as e: 157 | logger.error(f'删除临时文件失败 {file_path}: {str(e)}') 158 | -------------------------------------------------------------------------------- /ai/gemini_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Dict 2 | import google.generativeai as genai 3 | # 移除对不存在的模块的导入 4 | # from google.genai import types 5 | from .base import BaseAIProvider 6 | from .openai_base_provider import OpenAIBaseProvider 7 | import os 8 | import logging 9 | import base64 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | class GeminiOpenAIProvider(OpenAIBaseProvider): 14 | """使用OpenAI兼容接口的Gemini提供者""" 15 | def __init__(self): 16 | super().__init__( 17 | env_prefix='GEMINI', 18 | default_model='gemini-pro', 19 | default_api_base='' # API_BASE必须在环境变量中提供 20 | ) 21 | 22 | class GeminiProvider(BaseAIProvider): 23 | def __init__(self): 24 | self.model = None 25 | self.model_name = None # 添加model_name属性 26 | self.provider = None 27 | 28 | async def initialize(self, **kwargs): 29 | """初始化Gemini客户端""" 30 | # 检查是否配置了GEMINI_API_BASE,如果有则使用兼容OpenAI的接口 31 | api_base = os.getenv('GEMINI_API_BASE', '').strip() 32 | 33 | if api_base: 34 | logger.info(f"检测到GEMINI_API_BASE环境变量: {api_base},使用兼容OpenAI的接口") 35 | self.provider = GeminiOpenAIProvider() 36 | await self.provider.initialize(**kwargs) 37 | return 38 | 39 | # 原来的Gemini API初始化代码 40 | api_key = os.getenv('GEMINI_API_KEY') 41 | if not api_key: 42 | raise ValueError("未设置GEMINI_API_KEY环境变量") 43 | 44 | # 使用传入的model参数,如果没有才使用默认值 45 | if not self.model_name: # 如果model_name还没设置 46 | self.model_name = kwargs.get('model') 47 | 48 | if not self.model_name: # 如果kwargs中也没有model 49 | self.model_name = 'gemini-pro' # 最后才使用默认值 50 | 51 | logger.info(f"初始化Gemini模型: {self.model_name}") 52 | 53 | # 配置安全设置 - 只使用基本类别 54 | safety_settings = [ 55 | { 56 | "category": "HARM_CATEGORY_HARASSMENT", 57 | "threshold": "BLOCK_NONE" 58 | }, 59 | { 60 | "category": "HARM_CATEGORY_HATE_SPEECH", 61 | "threshold": "BLOCK_NONE" 62 | }, 63 | { 64 | "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", 65 | "threshold": "BLOCK_NONE" 66 | }, 67 | { 68 | "category": "HARM_CATEGORY_DANGEROUS_CONTENT", 69 | "threshold": "BLOCK_NONE" 70 | } 71 | ] 72 | 73 | genai.configure(api_key=api_key) 74 | # 使用self.model_name初始化模型 75 | self.model = genai.GenerativeModel( 76 | model_name=self.model_name, 77 | safety_settings=safety_settings 78 | ) 79 | 80 | async def process_message(self, 81 | message: str, 82 | prompt: Optional[str] = None, 83 | images: Optional[List[Dict[str, str]]] = None, 84 | **kwargs) -> str: 85 | """处理消息""" 86 | try: 87 | if not self.provider and not self.model: 88 | await self.initialize(**kwargs) 89 | 90 | # 如果使用的是OpenAI兼容接口,则调用该接口的处理方法 91 | if self.provider: 92 | return await self.provider.process_message(message, prompt, images, **kwargs) 93 | 94 | # 使用Gemini API的流式处理 95 | logger.info(f"实际使用的Gemini模型: {self.model_name}") 96 | 97 | # 组合提示词和消息 98 | if prompt: 99 | user_message = f"{prompt}\n\n{message}" 100 | else: 101 | user_message = message 102 | 103 | # 检查是否有图片 104 | if images and len(images) > 0: 105 | try: 106 | # 使用MultimodalContent添加图片 107 | contents = [] 108 | # 添加文本 109 | contents.append({"role": "user", "parts": [{"text": user_message}]}) 110 | 111 | # 对每张图片进行处理 112 | for img in images: 113 | try: 114 | # 直接添加图片字节到模型的输入 115 | image_part = { 116 | "inline_data": { 117 | "mime_type": img["mime_type"], 118 | "data": img["data"] # 使用原始base64数据 119 | } 120 | } 121 | contents[0]["parts"].append(image_part) 122 | logger.info(f"已添加一张类型为 {img['mime_type']} 的图片,大小约 {len(img['data']) // 1000} KB") 123 | except Exception as img_error: 124 | logger.error(f"处理单张图片时出错: {str(img_error)}") 125 | 126 | # 使用流式输出 - 不设置额外参数,使用默认值 127 | response_stream = self.model.generate_content( 128 | contents, 129 | stream=True 130 | ) 131 | except Exception as e: 132 | logger.error(f"Gemini处理带图片消息时出错: {str(e)}") 133 | # 如果处理图片失败,尝试只用文本 134 | response_stream = self.model.generate_content( 135 | [{"role": "user", "parts": [{"text": user_message}]}], 136 | stream=True 137 | ) 138 | else: 139 | # 无图片,使用流式输出 140 | response_stream = self.model.generate_content( 141 | [{"role": "user", "parts": [{"text": user_message}]}], 142 | stream=True 143 | ) 144 | 145 | # 收集完整响应 146 | full_response = "" 147 | for chunk in response_stream: 148 | if hasattr(chunk, 'text'): 149 | full_response += chunk.text 150 | 151 | return full_response 152 | 153 | except Exception as e: 154 | logger.error(f"Gemini处理消息时出错: {str(e)}") 155 | return f"AI处理失败: {str(e)}" -------------------------------------------------------------------------------- /filters/edit_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from filters.base_filter import BaseFilter 4 | from enums.enums import HandleMode, PreviewMode 5 | from utils.common import get_main_module 6 | from telethon.tl.types import Channel 7 | import traceback 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | class EditFilter(BaseFilter): 12 | """ 13 | 编辑过滤器,用于在编辑模式下修改原始消息 14 | 仅在频道消息中生效 15 | """ 16 | 17 | async def _process(self, context): 18 | """ 19 | 处理消息编辑 20 | 21 | Args: 22 | context: 消息上下文 23 | 24 | Returns: 25 | bool: 是否继续处理 26 | """ 27 | rule = context.rule 28 | event = context.event 29 | 30 | 31 | 32 | logger.debug(f"开始处理编辑过滤器,消息ID: {event.message.id}, 聊天ID: {event.chat_id}") 33 | 34 | # 如果不是编辑模式,继续后续处理 35 | if rule.handle_mode != HandleMode.EDIT: 36 | logger.debug(f"当前规则非编辑模式 (当前模式: {rule.handle_mode}),跳过编辑处理") 37 | return True 38 | 39 | # 检查是否为频道消息 40 | chat = await event.get_chat() 41 | logger.debug(f"聊天类型: {type(chat).__name__}, 聊天ID: {chat.id}, 聊天标题: {getattr(chat, 'title', '未知')}") 42 | 43 | if not isinstance(chat, Channel): 44 | logger.info(f"不是频道消息 (聊天类型: {type(chat).__name__}),跳过编辑") 45 | return False 46 | 47 | try: 48 | # 获取用户客户端 49 | logger.debug("尝试获取用户客户端") 50 | main = await get_main_module() 51 | user_client = main.user_client if (main and hasattr(main, 'user_client')) else None 52 | 53 | if not user_client: 54 | logger.error("无法获取用户客户端,无法执行编辑操作") 55 | return False 56 | 57 | logger.debug("成功获取用户客户端") 58 | 59 | # 根据预览模式设置 link_preview 60 | link_preview = { 61 | PreviewMode.ON: True, 62 | PreviewMode.OFF: False, 63 | PreviewMode.FOLLOW: event.message.media is not None # 跟随原消息 64 | }[rule.is_preview] 65 | 66 | logger.debug(f"预览模式: {rule.is_preview}, link_preview值: {link_preview}") 67 | 68 | # 组合消息文本 69 | message_text = context.sender_info + context.message_text + context.time_info + context.original_link 70 | 71 | logger.debug(f"原始消息文本: '{event.message.text}'") 72 | logger.debug(f"新消息文本: '{message_text}'") 73 | 74 | # 检查文本是否有变化 75 | if message_text == event.message.text: 76 | logger.info("消息文本没有变化,跳过编辑") 77 | return False 78 | 79 | # 处理媒体组消息 80 | if context.is_media_group: 81 | logger.info(f"处理媒体组消息,媒体组ID: {context.media_group_id}, 消息数量: {len(context.media_group_messages) if context.media_group_messages else '未知'}") 82 | # 尝试编辑媒体组中的每条消息 83 | if not context.media_group_messages: 84 | logger.warning("媒体组消息列表为空,无法编辑") 85 | return False 86 | 87 | for message in context.media_group_messages: 88 | try: 89 | # 只在第一条消息上添加文本 90 | text_to_edit = message_text if message.id == event.message.id else "" 91 | logger.debug(f"尝试编辑媒体组消息 {message.id}, 媒体类型: {type(message.media).__name__ if message.media else '无媒体'}") 92 | 93 | await user_client.edit_message( 94 | event.chat_id, 95 | message.id, 96 | text=text_to_edit, 97 | parse_mode=rule.message_mode.value, 98 | link_preview=link_preview 99 | ) 100 | logger.info(f"成功编辑媒体组消息 {message.id}") 101 | except Exception as e: 102 | error_details = str(e) 103 | if "was not modified" not in error_details: 104 | logger.error(f"编辑媒体组消息 {message.id} 失败: {error_details}") 105 | logger.debug(f"异常详情: {traceback.format_exc()}") 106 | else: 107 | logger.debug(f"媒体组消息 {message.id} 内容未修改,无需编辑") 108 | return False 109 | # 处理所有其他消息(包括单条媒体消息和纯文本消息) 110 | else: 111 | try: 112 | logger.debug(f"尝试编辑单条消息 {event.message.id}, 消息类型: {type(event.message).__name__}, 媒体类型: {type(event.message.media).__name__ if event.message.media else '无媒体'}") 113 | logger.debug(f"使用解析模式: {rule.message_mode.value}") 114 | 115 | await user_client.edit_message( 116 | event.chat_id, 117 | event.message.id, 118 | text=message_text, 119 | parse_mode=rule.message_mode.value, 120 | link_preview=link_preview 121 | ) 122 | logger.info(f"成功编辑消息 {event.message.id}") 123 | return False 124 | except Exception as e: 125 | error_details = str(e) 126 | if "was not modified" not in error_details: 127 | logger.error(f"编辑消息 {event.message.id} 失败: {error_details}") 128 | logger.debug(f"尝试编辑的消息ID: {event.message.id}, 聊天ID: {event.chat_id}") 129 | logger.debug(f"消息文本长度: {len(message_text)}, 解析模式: {rule.message_mode.value}") 130 | logger.debug(f"异常详情: {traceback.format_exc()}") 131 | else: 132 | logger.debug(f"消息 {event.message.id} 内容未修改,无需编辑") 133 | return False 134 | 135 | except Exception as e: 136 | logger.error(f"编辑过滤器处理出错: {str(e)}") 137 | logger.debug(f"异常详情: {traceback.format_exc()}") 138 | logger.debug(f"上下文信息 - 消息ID: {event.message.id}, 聊天ID: {event.chat_id}, 规则ID: {rule.id if hasattr(rule, 'id') else '未知'}") 139 | return False -------------------------------------------------------------------------------- /filters/info_filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import pytz 4 | import re 5 | from datetime import datetime 6 | from filters.base_filter import BaseFilter 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class InfoFilter(BaseFilter): 11 | """ 12 | 信息过滤器,添加原始链接和发送者信息 13 | """ 14 | 15 | async def _process(self, context): 16 | """ 17 | 添加原始链接和发送者信息 18 | 19 | Args: 20 | context: 消息上下文 21 | 22 | Returns: 23 | bool: 是否继续处理 24 | """ 25 | rule = context.rule 26 | event = context.event 27 | 28 | # logger.info(f"InfoFilter处理消息前,context: {context.__dict__}") 29 | try: 30 | 31 | # 添加原始链接 32 | if rule.is_original_link: 33 | # 获取原始链接的基本信息 34 | original_link = f"https://t.me/c/{str(event.chat_id)[4:]}/{event.message.id}" 35 | 36 | # 检查是否有原始链接模板 37 | if hasattr(rule, 'original_link_template') and rule.original_link_template: 38 | try: 39 | # 使用自定义链接模板 40 | link_info = rule.original_link_template 41 | link_info = link_info.replace("{original_link}", original_link) 42 | 43 | context.original_link = f"\n\n{link_info}" 44 | except Exception as le: 45 | logger.error(f'使用自定义链接模板出错: {str(le)},使用默认格式') 46 | context.original_link = f"\n\n原始消息: {original_link}" 47 | else: 48 | # 使用默认格式 49 | context.original_link = f"\n\n原始消息: {original_link}" 50 | 51 | logger.info(f'添加原始链接: {context.original_link}') 52 | 53 | # 添加发送者信息 54 | if rule.is_original_sender: 55 | try: 56 | logger.info("开始获取发送者信息") 57 | sender_name = "Unknown Sender" # 默认值 58 | sender_id = "Unknown" 59 | 60 | if hasattr(event.message, 'sender_chat') and event.message.sender_chat: 61 | # 用户以频道身份发送消息 62 | sender = event.message.sender_chat 63 | sender_name = sender.title if hasattr(sender, 'title') else "Unknown Channel" 64 | sender_id = sender.id 65 | logger.info(f"使用频道信息: {sender_name} (ID: {sender_id})") 66 | 67 | elif event.sender: 68 | # 用户以个人身份发送消息 69 | sender = event.sender 70 | sender_name = ( 71 | sender.title if hasattr(sender, 'title') 72 | else f"{sender.first_name or ''} {sender.last_name or ''}".strip() 73 | ) 74 | sender_id = sender.id 75 | logger.info(f"使用发送者信息: {sender_name} (ID: {sender_id})") 76 | 77 | elif hasattr(event.message, 'peer_id') and event.message.peer_id: 78 | # 尝试从 peer_id 获取信息 79 | peer = event.message.peer_id 80 | if hasattr(peer, 'channel_id'): 81 | sender_id = peer.channel_id 82 | try: 83 | # 尝试获取频道信息 84 | channel = await event.client.get_entity(peer) 85 | sender_name = channel.title if hasattr(channel, 'title') else "Unknown Channel" 86 | except Exception as ce: 87 | logger.error(f'获取频道信息失败: {str(ce)}') 88 | sender_name = "Unknown Channel" 89 | logger.info(f"使用peer_id信息: {sender_name} (ID: {sender_id})") 90 | 91 | # 检查是否有用户自定义模板 92 | if hasattr(rule, 'userinfo_template') and rule.userinfo_template: 93 | # 替换模板中的变量 94 | user_info = rule.userinfo_template 95 | user_info = user_info.replace("{name}", sender_name) 96 | user_info = user_info.replace("{id}", str(sender_id)) 97 | 98 | context.sender_info = f"{user_info}\n\n" 99 | else: 100 | # 使用默认格式 101 | context.sender_info = f"{sender_name}\n\n" 102 | 103 | logger.info(f'添加发送者信息: {context.sender_info}') 104 | except Exception as e: 105 | logger.error(f'获取发送者信息出错: {str(e)}') 106 | 107 | # 添加时间信息 108 | if rule.is_original_time: 109 | try: 110 | # 创建时区对象 111 | timezone = pytz.timezone(os.getenv('DEFAULT_TIMEZONE', 'Asia/Shanghai')) 112 | local_time = event.message.date.astimezone(timezone) 113 | 114 | # 默认格式化的时间 115 | formatted_time = local_time.strftime('%Y-%m-%d %H:%M:%S') 116 | 117 | # 检查是否有时间模板 118 | if hasattr(rule, 'time_template') and rule.time_template: 119 | try: 120 | # 使用自定义时间模板 121 | time_info = rule.time_template.replace("{time}", formatted_time) 122 | context.time_info = f"\n\n{time_info}" 123 | except Exception as te: 124 | logger.error(f'使用自定义时间模板出错: {str(te)},使用默认格式') 125 | context.time_info = f"\n\n{formatted_time}" 126 | else: 127 | # 使用默认格式 128 | context.time_info = f"\n\n{formatted_time}" 129 | 130 | logger.info(f'添加时间信息: {context.time_info}') 131 | except Exception as e: 132 | logger.error(f'处理时间信息时出错: {str(e)}') 133 | 134 | return True 135 | finally: 136 | # logger.info(f"InfoFilter处理消息后,context: {context.__dict__}") 137 | pass -------------------------------------------------------------------------------- /scheduler/chat_updater.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from datetime import datetime, timedelta 3 | import pytz 4 | import os 5 | import logging 6 | from dotenv import load_dotenv 7 | from telethon import TelegramClient 8 | from models.models import get_session, Chat 9 | import traceback 10 | from utils.constants import DEFAULT_TIMEZONE 11 | logger = logging.getLogger(__name__) 12 | 13 | class ChatUpdater: 14 | def __init__(self, user_client: TelegramClient): 15 | self.user_client = user_client 16 | self.timezone = pytz.timezone(DEFAULT_TIMEZONE) 17 | self.task = None 18 | # 从环境变量获取更新时间,默认凌晨3点 19 | self.update_time = os.getenv('CHAT_UPDATE_TIME', "03:00") 20 | 21 | async def start(self): 22 | """启动定时更新任务""" 23 | logger.info("开始启动聊天信息更新器...") 24 | try: 25 | # 计算下一次执行时间 26 | now = datetime.now(self.timezone) 27 | next_time = self._get_next_run_time(now, self.update_time) 28 | wait_seconds = (next_time - now).total_seconds() 29 | 30 | logger.info(f"下一次聊天信息更新时间: {next_time.strftime('%Y-%m-%d %H:%M:%S')}") 31 | logger.info(f"等待时间: {wait_seconds:.2f} 秒") 32 | 33 | # 创建定时任务 34 | self.task = asyncio.create_task(self._run_update_task()) 35 | logger.info("聊天信息更新器启动完成") 36 | except Exception as e: 37 | logger.error(f"启动聊天信息更新器时出错: {str(e)}") 38 | logger.error(f"错误详情: {traceback.format_exc()}") 39 | 40 | def _get_next_run_time(self, now, target_time): 41 | """计算下一次运行时间""" 42 | hour, minute = map(int, target_time.split(':')) 43 | next_time = now.replace(hour=hour, minute=minute, second=0, microsecond=0) 44 | 45 | if next_time <= now: 46 | next_time += timedelta(days=1) 47 | 48 | return next_time 49 | 50 | async def _run_update_task(self): 51 | """运行更新任务""" 52 | while True: 53 | try: 54 | # 计算下一次执行时间 55 | now = datetime.now(self.timezone) 56 | target_time = self._get_next_run_time(now, self.update_time) 57 | 58 | # 等待到执行时间 59 | wait_seconds = (target_time - now).total_seconds() 60 | await asyncio.sleep(wait_seconds) 61 | 62 | # 执行更新任务 63 | await self._update_all_chats() 64 | 65 | except asyncio.CancelledError: 66 | logger.info("聊天信息更新任务已取消") 67 | break 68 | except Exception as e: 69 | logger.error(f"聊天信息更新任务出错: {str(e)}") 70 | logger.error(f"错误详情: {traceback.format_exc()}") 71 | await asyncio.sleep(60) # 出错后等待一分钟再重试 72 | 73 | async def _update_all_chats(self): 74 | """更新所有聊天信息""" 75 | logger.info("开始更新所有聊天信息...") 76 | session = get_session() 77 | try: 78 | # 获取所有聊天 79 | chats = session.query(Chat).all() 80 | total_chats = len(chats) 81 | logger.info(f"找到 {total_chats} 个聊天需要更新信息") 82 | 83 | updated_count = 0 84 | skipped_count = 0 85 | error_count = 0 86 | 87 | # 处理每个聊天 88 | for i, chat in enumerate(chats, 1): 89 | try: 90 | # 每10个聊天报告一次进度 91 | if i % 10 == 0 or i == total_chats: 92 | logger.info(f"进度: {i}/{total_chats} ({i/total_chats*100:.1f}%)") 93 | 94 | chat_id = chat.telegram_chat_id 95 | # 尝试获取聊天实体 96 | try: 97 | # 尝试转换聊天ID为整数 98 | try: 99 | chat_id_int = int(chat_id) 100 | except ValueError: 101 | logger.warning(f"聊天ID '{chat_id}' 不是有效的数字格式") 102 | skipped_count += 1 103 | continue 104 | 105 | entity = await self.user_client.get_entity(chat_id_int) 106 | # 更新聊天名称 107 | new_name = entity.title if hasattr(entity, 'title') else ( 108 | f"{entity.first_name} {entity.last_name}" if hasattr(entity, 'last_name') and entity.last_name 109 | else entity.first_name if hasattr(entity, 'first_name') 110 | else "私聊" 111 | ) 112 | 113 | # 只有当名称有变化时才更新 114 | if chat.name != new_name: 115 | old_name = chat.name or "未命名" 116 | chat.name = new_name 117 | session.commit() 118 | logger.info(f"已更新聊天 {chat_id}: {old_name} -> {new_name}") 119 | updated_count += 1 120 | else: 121 | skipped_count += 1 122 | 123 | except ValueError as e: 124 | logger.warning(f"无法获取聊天 {chat_id} 的信息: 无效的ID格式 - {str(e)}") 125 | skipped_count += 1 126 | continue 127 | except Exception as e: 128 | logger.warning(f"无法获取聊天 {chat_id} 的信息: {str(e)}") 129 | skipped_count += 1 130 | continue 131 | 132 | except Exception as e: 133 | logger.error(f"处理聊天 {chat.telegram_chat_id} 时出错: {str(e)}") 134 | error_count += 1 135 | continue 136 | 137 | # 每个聊天处理后暂停一会,避免请求过于频繁 138 | await asyncio.sleep(1) 139 | 140 | logger.info(f"聊天信息更新完成。总计: {total_chats}, 更新: {updated_count}, 跳过: {skipped_count}, 错误: {error_count}") 141 | 142 | except Exception as e: 143 | logger.error(f"更新聊天信息时出错: {str(e)}") 144 | logger.error(f"错误详情: {traceback.format_exc()}") 145 | finally: 146 | session.close() 147 | 148 | def stop(self): 149 | """停止定时任务""" 150 | if self.task: 151 | self.task.cancel() 152 | logger.info("聊天信息更新任务已停止") -------------------------------------------------------------------------------- /rss/app/templates/login.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |