├── .python-version ├── handlers ├── heros │ ├── 于丹.jpg │ ├── 刘能.jpg │ ├── 杨澜.jpg │ ├── 莫言.jpg │ ├── 马云.jpg │ ├── 鲁迅.jpg │ ├── 乔布斯.jpg │ ├── 罗永浩.jpg │ ├── 郭德纲.jpg │ └── 马斯克.jpg ├── tweet.py ├── github.py ├── summary │ ├── utils.py │ ├── __main__.py │ ├── messages.py │ └── __init__.py ├── __init__.py ├── dify.py ├── sd.py ├── kling.py ├── map.py ├── qwen.py ├── llama.py ├── cohere.py ├── fake_liuneng.py ├── _tts.py ├── _yi.py ├── _utils.py ├── claude.py ├── _telegraph.py ├── gemini.py └── chatgpt.py ├── .editorconfig ├── .github └── workflows │ └── CI.yaml ├── init_tigong_db.py ├── Dockerfile ├── pyproject.toml ├── LICENSE ├── config.py ├── tg.py ├── requirements.txt ├── .gitignore ├── README.md └── setup.sh /.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /handlers/heros/于丹.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/于丹.jpg -------------------------------------------------------------------------------- /handlers/heros/刘能.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/刘能.jpg -------------------------------------------------------------------------------- /handlers/heros/杨澜.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/杨澜.jpg -------------------------------------------------------------------------------- /handlers/heros/莫言.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/莫言.jpg -------------------------------------------------------------------------------- /handlers/heros/马云.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/马云.jpg -------------------------------------------------------------------------------- /handlers/heros/鲁迅.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/鲁迅.jpg -------------------------------------------------------------------------------- /handlers/heros/乔布斯.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/乔布斯.jpg -------------------------------------------------------------------------------- /handlers/heros/罗永浩.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/罗永浩.jpg -------------------------------------------------------------------------------- /handlers/heros/郭德纲.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/郭德纲.jpg -------------------------------------------------------------------------------- /handlers/heros/马斯克.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yihong0618/tg_bot_collections/HEAD/handlers/heros/马斯克.jpg -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | 6 | [*.sh] 7 | indent_style = space 8 | indent_size = 4 9 | charset = utf-8 10 | end_of_line = lf 11 | trim_trailing_whitespace = true -------------------------------------------------------------------------------- /.github/workflows/CI.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.event.number || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | testing: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: install python 3.9 20 | uses: actions/setup-python@v5 21 | with: 22 | python-version: "3.9" 23 | cache: "pip" # caching pip dependencies 24 | - name: Check formatting (black) 25 | run: | 26 | pip install black 27 | black . --check 28 | -------------------------------------------------------------------------------- /init_tigong_db.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """初始化提肛提醒数据库""" 3 | 4 | from handlers.summary.messages import MessageStore 5 | 6 | 7 | def main(): 8 | print("正在初始化提肛提醒数据库...") 9 | 10 | # 初始化数据库 11 | store = MessageStore("data/messages.db") 12 | 13 | print("✅ 数据库初始化完成!") 14 | print("\n数据库位置: data/messages.db") 15 | print("\n已创建的表:") 16 | print(" - messages: 存储聊天消息") 17 | print(" - tigong_alerts: 存储提肛提醒用户队列") 18 | print("\n可用的命令:") 19 | print(" /alert_me - 加入提肛提醒队列") 20 | print(" /confirm - 确认完成今日提肛") 21 | print(" /standup - 手动发送提肛提醒") 22 | print("\n功能:") 23 | print(" - 每天北京时间 8:00-18:00,每2小时自动提醒") 24 | print(" - 每达到100条消息的整数倍时自动提醒") 25 | print(" - 提醒会 @ 所有未确认的用户") 26 | 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM bitnami/python:3.10 2 | WORKDIR /app 3 | # Update the package list and install necessary packages including GDAL and Cairo 4 | RUN apt-get update \ 5 | && apt-get install -y --no-install-recommends \ 6 | gdal-bin \ 7 | libgdal-dev \ 8 | libcairo2 \ 9 | && rm -rf /var/lib/apt/lists/* 10 | # Set environment variables so that Python package installer can find gdal-config 11 | ENV CPLUS_INCLUDE_PATH=/usr/include/gdal 12 | ENV C_INCLUDE_PATH=/usr/include/gdal 13 | # Copy necessary files 14 | COPY requirements.txt ./ 15 | # Upgrade pip and install dependencies from requirements.txt 16 | RUN pip install --upgrade pip && pip install --no-cache-dir -r requirements.txt 17 | # Copy project files into the container 18 | COPY *.py . 19 | COPY handlers /app/handlers 20 | # Command to run the application, using the TELEGRAM_BOT_TOKEN environment variable 21 | CMD python tg.py ${TELEGRAM_BOT_TOKEN} 22 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | 2 | [project] 3 | name = "tg_bot_collections" 4 | # PEP 621 project metadata 5 | # See https://www.python.org/dev/peps/pep-0621/ 6 | dependencies = [ 7 | "pyTelegramBotAPI>=4.16", 8 | "cairosvg", 9 | "github-poster", 10 | "prettymapp", 11 | "google-generativeai>=0.4", 12 | "anthropic", 13 | "telegramify-markdown", 14 | "openai", 15 | "requests", 16 | "urlextract", 17 | "groq", 18 | "together>=1.1.5", 19 | "dify-client>=0.1.10", 20 | "expiringdict>=1.2.2", 21 | "beautifulsoup4>=4.12.3", 22 | "Markdown>=3.6", 23 | "cohere>=5.5.8", 24 | "kling-creator>=0.0.3", 25 | "pydantic-settings>=2.10.1", 26 | "pydantic>=2.11.7", 27 | "telethon>=1.40.0", 28 | "pysocks>=1.7.1", 29 | "wcwidth>=0.2.13", 30 | ] 31 | requires-python = ">=3.10" 32 | 33 | [tool.pdm] 34 | distribution = false 35 | 36 | [tool.pdm.scripts] 37 | dev = "python tg.py --debug" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 yihong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /handlers/tweet.py: -------------------------------------------------------------------------------- 1 | from telebot import TeleBot 2 | from telebot.types import Message 3 | from urlextract import URLExtract 4 | 5 | from ._utils import bot_reply_first, bot_reply_markdown 6 | 7 | 8 | def tweet_handler(message: Message, bot: TeleBot): 9 | """tweet: /t """ 10 | who = "tweet" 11 | 12 | extractor = URLExtract() 13 | links = extractor.find_urls(message.text) 14 | 15 | only_links = len("".join(links)) == len(message.text.strip()) 16 | if links: 17 | reply_id = bot_reply_first(message, who, bot) 18 | processed_links = [ 19 | link.replace("https://twitter.com", "https://fxtwitter.com").replace( 20 | "https://x.com", "https://fixupx.com" 21 | ) 22 | for link in links 23 | ] 24 | bot_reply_markdown(reply_id, who, "\n".join(processed_links), bot) 25 | 26 | if only_links: 27 | bot.delete_message(message.chat.id, message.message_id) 28 | 29 | 30 | def register(bot: TeleBot) -> None: 31 | bot.register_message_handler(tweet_handler, commands=["t"], pass_bot=True) 32 | bot.register_message_handler(tweet_handler, regexp="^t:", pass_bot=True) 33 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | from functools import cached_property 2 | 3 | import openai 4 | from pydantic_settings import BaseSettings, SettingsConfigDict 5 | 6 | 7 | class Settings(BaseSettings): 8 | model_config = SettingsConfigDict(env_file=".env", extra="ignore") 9 | 10 | telegram_bot_token: str 11 | timezone: str = "Asia/Shanghai" 12 | 13 | openai_api_key: str | None = None 14 | openai_model: str = "gpt-4o-mini" 15 | openai_base_url: str = "https://api.openai.com/v1" 16 | 17 | google_gemini_api_key: str | None = None 18 | anthropic_api_key: str | None = None 19 | telegra_ph_token: str | None = None 20 | ollama_web_search_api_key: str | None = None 21 | ollama_web_search_max_results: int = 5 22 | ollama_web_search_timeout: int = 10 23 | 24 | @cached_property 25 | def openai_client(self) -> openai.OpenAI: 26 | return openai.OpenAI( 27 | api_key=self.openai_api_key, 28 | base_url=self.openai_base_url, 29 | ) 30 | 31 | @cached_property 32 | def telegraph_client(self): 33 | from handlers._telegraph import TelegraphAPI 34 | 35 | return TelegraphAPI(self.telegra_ph_token) 36 | 37 | 38 | settings = Settings() # type: ignore 39 | -------------------------------------------------------------------------------- /handlers/github.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | from telebot import TeleBot 4 | from telebot.types import Message 5 | 6 | 7 | def github_poster_handler(message: Message, bot: TeleBot): 8 | """github poster: /github [-]""" 9 | m = message.text.strip() 10 | message_list = m.split(",") 11 | name = message_list[0].strip() 12 | cmd_list = ["github_poster", "github", "--github_user_name", name, "--me", name] 13 | if len(message_list) > 1: 14 | years = message_list[1] 15 | cmd_list.append("--year") 16 | cmd_list.append(years.strip()) 17 | r = subprocess.check_output(cmd_list).decode("utf-8") 18 | if "done" in r: 19 | # TODO windows path 20 | r = subprocess.check_output( 21 | ["cairosvg", "OUT_FOLDER/github.svg", "-o", f"github_{name}.png"] 22 | ).decode("utf-8") 23 | with open(f"github_{name}.png", "rb") as photo: 24 | bot.send_photo( 25 | message.chat.id, photo, reply_to_message_id=message.message_id 26 | ) 27 | 28 | 29 | def register(bot: TeleBot) -> None: 30 | bot.register_message_handler( 31 | github_poster_handler, commands=["github"], pass_bot=True 32 | ) 33 | bot.register_message_handler( 34 | github_poster_handler, regexp="^github:", pass_bot=True 35 | ) 36 | -------------------------------------------------------------------------------- /tg.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | from telebot import TeleBot 5 | 6 | from config import settings 7 | from handlers import list_available_commands, load_handlers 8 | 9 | logger = logging.getLogger("bot") 10 | 11 | 12 | def setup_logging(debug: bool): 13 | logger.setLevel(logging.DEBUG if debug else logging.INFO) 14 | handler = logging.StreamHandler() 15 | handler.setFormatter( 16 | logging.Formatter( 17 | "%(asctime)s - [%(levelname)s] - %(filename)s:%(lineno)d - %(message)s" 18 | ) 19 | ) 20 | logger.addHandler(handler) 21 | 22 | 23 | def main(): 24 | # Init args 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument( 27 | "tg_token", help="tg token", default=settings.telegram_bot_token, nargs="?" 28 | ) 29 | parser.add_argument( 30 | "--debug", "--verbose", "-v", action="store_true", help="Enable debug mode" 31 | ) 32 | 33 | # 'disable-command' option 34 | # The action 'append' will allow multiple entries to be saved into a list 35 | # The variable name is changed to 'disable_commands' 36 | parser.add_argument( 37 | "--disable-command", 38 | action="append", 39 | dest="disable_commands", 40 | help="Specify a command to disable. Can be used multiple times.", 41 | default=[], 42 | choices=list_available_commands(), 43 | ) 44 | 45 | options = parser.parse_args() 46 | setup_logging(options.debug) 47 | 48 | # Init bot 49 | bot = TeleBot(options.tg_token) 50 | load_handlers(bot, options.disable_commands) 51 | logger.info("Bot init done.") 52 | 53 | # Start bot 54 | logger.info("Starting tg collections bot.") 55 | bot.infinity_polling(timeout=10, long_polling_timeout=5) 56 | 57 | 58 | if __name__ == "__main__": 59 | main() 60 | -------------------------------------------------------------------------------- /handlers/summary/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import zoneinfo 3 | from datetime import datetime, timedelta 4 | 5 | from telebot import TeleBot 6 | from telebot.types import Message 7 | 8 | PROMPT = """\ 9 | 请将下面的聊天记录进行总结,包含讨论了哪些话题,有哪些亮点发言和主要观点。 10 | 引用用户名请加粗。直接返回内容即可,不要包含引导词和标题。 11 | --- Messages Start --- 12 | {messages} 13 | --- Messages End --- 14 | """ 15 | 16 | 17 | def contains_non_ascii(text: str) -> bool: 18 | return not text.isascii() 19 | 20 | 21 | def filter_message(message: Message, bot: TeleBot, check_chinese: bool = False) -> bool: 22 | """过滤消息,排除非文本消息和命令消息 23 | 24 | Args: 25 | message: 消息对象 26 | bot: Bot 实例 27 | check_chinese: 是否允许检查中文消息(即不过滤命令) 28 | """ 29 | if not message.text: 30 | return False 31 | if not message.from_user: 32 | return False 33 | if message.from_user.id == bot.get_me().id: 34 | return False 35 | # 如果需要检查中文,则不过滤命令消息(让 handle_message 处理) 36 | if not check_chinese and message.text.startswith("/"): 37 | return False 38 | return True 39 | 40 | 41 | date_regex = re.compile(r"^(\d+)([dhm])$") 42 | 43 | 44 | def parse_date(date_str: str, locale: str) -> tuple[datetime, datetime]: 45 | date_str = date_str.strip().lower() 46 | now = datetime.now(tz=zoneinfo.ZoneInfo(locale)) 47 | if date_str == "today": 48 | return now.replace(hour=0, minute=0, second=0, microsecond=0), now 49 | elif m := date_regex.match(date_str): 50 | number = int(m.group(1)) 51 | unit = m.group(2) 52 | match unit: 53 | case "d": 54 | return now - timedelta(days=number), now 55 | case "h": 56 | return now - timedelta(hours=number), now 57 | case "m": 58 | return now - timedelta(minutes=number), now 59 | raise ValueError(f"Unsupported date format: {date_str}") 60 | -------------------------------------------------------------------------------- /handlers/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from pathlib import Path 3 | 4 | from telebot import TeleBot 5 | from telebot.types import BotCommand 6 | 7 | from ._utils import logger, wrap_handler 8 | 9 | DEFAULT_LOAD_PRIORITY = 10 10 | 11 | 12 | def list_available_commands() -> list[str]: 13 | commands = [] 14 | this_path = Path(__file__).parent 15 | for child in this_path.iterdir(): 16 | if child.name.startswith("_"): 17 | continue 18 | commands.append(child.stem) 19 | return commands 20 | 21 | 22 | def load_handlers(bot: TeleBot, disable_commands: list[str]) -> None: 23 | # import all submodules 24 | modules_with_priority = [] 25 | for name in list_available_commands(): 26 | if name in disable_commands: 27 | continue 28 | module = importlib.import_module(f".{name}", __package__) 29 | load_priority = getattr(module, "load_priority", DEFAULT_LOAD_PRIORITY) 30 | modules_with_priority.append((module, name, load_priority)) 31 | 32 | modules_with_priority.sort(key=lambda x: x[-1]) 33 | for module, name, priority in modules_with_priority: 34 | if hasattr(module, "register"): 35 | logger.debug(f"Loading {name} handlers with priority {priority}.") 36 | module.register(bot) 37 | logger.info("Loading handlers done.") 38 | 39 | all_commands: list[BotCommand] = [] 40 | for handler in bot.message_handlers: 41 | help_text = getattr(handler["function"], "__doc__", "") 42 | # Add pre-processing and error handling to all callbacks 43 | handler["function"] = wrap_handler(handler["function"], bot) 44 | for command in handler["filters"].get("commands", []): 45 | all_commands.append(BotCommand(command, help_text)) 46 | 47 | if all_commands: 48 | bot.set_my_commands(all_commands) 49 | logger.info("Setting commands done.") 50 | -------------------------------------------------------------------------------- /handlers/summary/__main__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import os 5 | import sys 6 | 7 | from .messages import ChatMessage, MessageStore 8 | 9 | 10 | async def fetch_messages(chat_id: int) -> None: 11 | from telethon import TelegramClient 12 | from telethon.tl.types import Message 13 | 14 | store = MessageStore("data/messages.db") 15 | 16 | api_id = int(os.getenv("TELEGRAM_API_ID")) 17 | api_hash = os.getenv("TELEGRAM_API_HASH") 18 | async with TelegramClient("test", api_id, api_hash) as client: 19 | assert isinstance(client, TelegramClient) 20 | with store.connect() as conn: 21 | async for message in client.iter_messages(chat_id, reverse=True): 22 | if not isinstance(message, Message) or not message.message: 23 | continue 24 | if not message.from_id: 25 | continue 26 | print(message.pretty_format(message)) 27 | user = await client.get_entity(message.from_id) 28 | fullname = user.first_name 29 | if user.last_name: 30 | fullname += f" {user.last_name}" 31 | store.add_message( 32 | ChatMessage( 33 | chat_id=chat_id, 34 | message_id=message.id, 35 | content=message.message, 36 | user_id=message.from_id.user_id, 37 | user_name=fullname, 38 | timestamp=message.date, 39 | ), 40 | conn=conn, 41 | ) 42 | 43 | 44 | if __name__ == "__main__": 45 | if len(sys.argv) != 2: 46 | print("Usage: python -m handlers.summary ") 47 | sys.exit(1) 48 | chat_id = int(sys.argv[1]) 49 | asyncio.run(fetch_messages(chat_id)) # 替换为实际的群组ID 50 | -------------------------------------------------------------------------------- /handlers/dify.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import time 4 | 5 | # TODO: update requirements.txt and setup tools 6 | # pip install dify-client 7 | from dify_client import ChatClient 8 | from telebot import TeleBot 9 | from telebot.types import Message 10 | 11 | from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls 12 | 13 | 14 | def dify_handler(message: Message, bot: TeleBot) -> None: 15 | """dify : /dify API_Key """ 16 | m = message.text.strip() 17 | 18 | if re.match(r"^app-\w+$", m, re.IGNORECASE): 19 | bot.reply_to( 20 | message, 21 | "Thanks!\nFor conversation, please make a space between your API_Key and your question.", 22 | ) 23 | return 24 | if re.match(r"^app-[a-zA-Z0-9]+ .*$", m, re.IGNORECASE): 25 | Dify_API_KEY = m.split(" ", 1)[0] 26 | m = m.split(" ", 1)[1] 27 | else: 28 | bot.reply_to(message, "Please provide a valid API key.") 29 | return 30 | client = ChatClient(api_key=Dify_API_KEY) 31 | # Init client with API key 32 | 33 | m = enrich_text_with_urls(m) 34 | 35 | who = "dify" 36 | # show something, make it more responsible 37 | reply_id = bot_reply_first(message, who, bot) 38 | 39 | try: 40 | r = client.create_chat_message( 41 | inputs={}, 42 | query=m, 43 | user=str(message.from_user.id), 44 | response_mode="streaming", 45 | ) 46 | s = "" 47 | start = time.time() 48 | overall_start = time.time() 49 | for chunk in r.iter_lines(decode_unicode=True): 50 | chunk = chunk.split("data:", 1)[-1] 51 | if chunk.strip(): 52 | chunk = json.loads(chunk.strip()) 53 | answer_chunk = chunk.get("answer", "") 54 | s += answer_chunk 55 | if time.time() - start > 1.5: 56 | start = time.time() 57 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 58 | if time.time() - overall_start > 120: # Timeout 59 | s += "\n\nTimeout" 60 | break 61 | # maybe not complete 62 | try: 63 | bot_reply_markdown(reply_id, who, s, bot) 64 | except: 65 | pass 66 | 67 | except Exception as e: 68 | print(e) 69 | bot.reply_to(message, "answer wrong maybe up to the max token") 70 | # pop my user 71 | return 72 | 73 | # reply back as Markdown and fallback to plain text if failed. 74 | bot_reply_markdown(reply_id, who, s, bot) 75 | 76 | 77 | if True: 78 | 79 | def register(bot: TeleBot) -> None: 80 | bot.register_message_handler(dify_handler, commands=["dify"], pass_bot=True) 81 | bot.register_message_handler(dify_handler, regexp="^dify:", pass_bot=True) 82 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file is @generated by PDM. 2 | # Please do not edit it manually. 3 | 4 | aiohttp==3.9.5 5 | aiosignal==1.3.1 6 | annotated-types==0.6.0 7 | anthropic==0.32.0 8 | anyio==4.3.0 9 | async-timeout==4.0.3; python_version < "3.11" 10 | attrs==23.2.0 11 | beautifulsoup4==4.12.3 12 | boto3==1.34.135 13 | botocore==1.34.135 14 | cachetools==5.3.3 15 | cairocffi==1.7.0 16 | cairosvg==2.7.1 17 | certifi==2024.2.2 18 | cffi==1.16.0 19 | charset-normalizer==3.3.2 20 | click==8.1.7 21 | click-plugins==1.1.1 22 | cligj==0.7.2 23 | cohere==5.6.2 24 | colorama==0.4.6; platform_system == "Windows" 25 | colour==0.1.5 26 | contourpy==1.2.1 27 | cssselect2==0.7.0 28 | cycler==0.12.1 29 | defusedxml==0.7.1 30 | dify-client==0.1.10 31 | distro==1.9.0 32 | emoji==2.11.1 33 | eval-type-backport==0.2.0 34 | exceptiongroup==1.2.1; python_version < "3.11" 35 | expiringdict==1.2.2 36 | fake-useragent==1.5.1 37 | fastavro==1.9.4 38 | filelock==3.14.0 39 | fiona==1.9.6 40 | fonttools==4.51.0 41 | frozenlist==1.4.1 42 | fsspec==2024.3.1 43 | geopandas==0.14.4 44 | github-poster==2.7.4 45 | google-ai-generativelanguage==0.6.6 46 | google-api-core[grpc]==2.19.0 47 | google-api-python-client==2.128.0 48 | google-auth==2.29.0 49 | google-auth-httplib2==0.2.0 50 | google-generativeai==0.7.2 51 | googleapis-common-protos==1.63.0 52 | groq==0.9.0 53 | grpcio==1.63.0 54 | grpcio-status==1.62.2 55 | h11==0.14.0 56 | httpcore==1.0.5 57 | httplib2==0.22.0 58 | httpx==0.27.0 59 | httpx-sse==0.4.0 60 | huggingface-hub==0.23.0 61 | idna==3.7 62 | jiter==0.5.0 63 | jmespath==1.0.1 64 | kiwisolver==1.4.5 65 | kling-creator==0.3.0 66 | markdown==3.6 67 | markdown-it-py==3.0.0 68 | matplotlib==3.8.4 69 | mdurl==0.1.2 70 | mistletoe==1.4.0 71 | multidict==6.0.5 72 | networkx==3.3 73 | numpy==1.26.4 74 | openai==1.37.2 75 | osmnx==1.9.2 76 | packaging==24.0 77 | pandas==2.2.2 78 | parameterized==0.9.0 79 | pendulum==3.0.0 80 | pillow==10.3.0 81 | platformdirs==4.2.1 82 | prettymapp==0.3.0 83 | proto-plus==1.23.0 84 | protobuf==4.25.3 85 | pyaes==1.6.1 86 | pyarrow==16.0.0 87 | pyasn1==0.6.0 88 | pyasn1-modules==0.4.0 89 | pycparser==2.22 90 | pydantic==2.11.7 91 | pydantic-core==2.33.2 92 | pydantic-settings==2.10.1 93 | pygments==2.18.0 94 | pyogrio==0.7.2 95 | pyparsing==3.1.2 96 | pyproj==3.6.1 97 | pysocks==1.7.1 98 | pytelegrambotapi==4.21.0 99 | python-dateutil==2.9.0.post0 100 | python-dotenv==1.1.1 101 | pytz==2024.1 102 | pyyaml==6.0.1 103 | requests==2.32.3 104 | rich==13.7.1 105 | rsa==4.9 106 | s3transfer==0.10.2 107 | shapely==2.0.4 108 | shellingham==1.5.4 109 | six==1.16.0 110 | sniffio==1.3.1 111 | soupsieve==2.5 112 | svgwrite==1.4.3 113 | tabulate==0.9.0 114 | telegramify-markdown==0.1.9 115 | telethon==1.40.0 116 | time-machine==2.14.1; implementation_name != "pypy" 117 | tinycss2==1.3.0 118 | together==1.2.5 119 | tokenizers==0.19.1 120 | tqdm==4.66.4 121 | typer==0.12.3 122 | types-requests==2.32.0.20240622 123 | typing-extensions==4.14.1 124 | typing-inspection==0.4.1 125 | tzdata==2024.1 126 | uritemplate==4.1.1 127 | uritools==4.0.2 128 | urlextract==1.9.0 129 | urllib3==2.2.1 130 | wcwidth==0.2.13 131 | webencodings==0.5.1 132 | yarl==1.9.4 133 | -------------------------------------------------------------------------------- /handlers/sd.py: -------------------------------------------------------------------------------- 1 | from os import environ 2 | 3 | import requests 4 | from telebot import TeleBot 5 | from telebot.types import Message 6 | 7 | from config import settings 8 | 9 | SD_API_KEY = environ.get("SD3_KEY") 10 | 11 | # TODO refactor this shit to __init__ 12 | CHATGPT_PRO_MODEL = settings.openai_model 13 | 14 | 15 | def get_user_balance(): 16 | api_host = "https://api.stability.ai" 17 | url = f"{api_host}/v1/user/balance" 18 | 19 | response = requests.get(url, headers={"Authorization": f"Bearer {SD_API_KEY}"}) 20 | 21 | if response.status_code != 200: 22 | print("Non-200 response: " + str(response.text)) 23 | 24 | # Do something with the payload... 25 | payload = response.json() 26 | return payload["credits"] 27 | 28 | 29 | def generate_sd3_image(prompt): 30 | response = requests.post( 31 | "https://api.stability.ai/v2beta/stable-image/generate/sd3", 32 | headers={"authorization": f"Bearer {SD_API_KEY}", "accept": "image/*"}, 33 | files={"none": ""}, 34 | data={ 35 | "prompt": prompt, 36 | "model": "sd3-turbo", 37 | "output_format": "jpeg", 38 | }, 39 | ) 40 | 41 | if response.status_code == 200: 42 | with open("sd3.jpeg", "wb") as file: 43 | file.write(response.content) 44 | return True 45 | else: 46 | print(str(response.json())) 47 | return False 48 | 49 | 50 | def sd_handler(message: Message, bot: TeleBot): 51 | """pretty sd3: /sd3
""" 52 | credits = get_user_balance() 53 | bot.reply_to( 54 | message, 55 | f"Generating pretty sd3-turbo image may take some time please left credits {credits} every try will cost 4 criedits wait:", 56 | ) 57 | m = message.text.strip() 58 | prompt = m.strip() 59 | r = generate_sd3_image(prompt) 60 | if r: 61 | with open("sd3.jpeg", "rb") as photo: 62 | bot.send_photo( 63 | message.chat.id, photo, reply_to_message_id=message.message_id 64 | ) 65 | else: 66 | bot.reply_to(message, "prompt error") 67 | 68 | 69 | def sd_pro_handler(message: Message, bot: TeleBot): 70 | """pretty sd3_pro: /sd3_pro
""" 71 | credits = get_user_balance() 72 | m = message.text.strip() 73 | prompt = m.strip() 74 | rewrite_prompt = ( 75 | f"revise `{prompt}` to a DALL-E prompt only return the prompt in English." 76 | ) 77 | completion = settings.openai_client.chat.completions.create( 78 | messages=[{"role": "user", "content": rewrite_prompt}], 79 | max_tokens=2048, 80 | model=CHATGPT_PRO_MODEL, 81 | ) 82 | sd_prompt = completion.choices[0].message.content.encode("utf8").decode() 83 | # drop all the Chinese characters 84 | sd_prompt = "".join([i for i in sd_prompt if ord(i) < 128]) 85 | bot.reply_to( 86 | message, 87 | f"Generating pretty sd3-turbo image may take some time please left credits {credits} every try will cost 4 criedits wait:\n the real prompt is: {sd_prompt}", 88 | ) 89 | r = generate_sd3_image(sd_prompt) 90 | if r: 91 | with open("sd3.jpeg", "rb") as photo: 92 | bot.send_photo( 93 | message.chat.id, photo, reply_to_message_id=message.message_id 94 | ) 95 | else: 96 | bot.reply_to(message, "prompt error") 97 | 98 | 99 | if SD_API_KEY and settings.openai_api_key: 100 | 101 | def register(bot: TeleBot) -> None: 102 | bot.register_message_handler(sd_handler, commands=["sd3"], pass_bot=True) 103 | bot.register_message_handler(sd_handler, regexp="^sd3:", pass_bot=True) 104 | bot.register_message_handler( 105 | sd_pro_handler, commands=["sd3_pro"], pass_bot=True 106 | ) 107 | bot.register_message_handler(sd_pro_handler, regexp="^sd3_pro:", pass_bot=True) 108 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | *.jpg 162 | *.png 163 | *.jpeg 164 | OUT_FOLDER/ 165 | cache/ 166 | nohup.out 167 | *.mp3 168 | *.mp4 169 | *.pdf 170 | .pdm-python 171 | *.wav 172 | token_key.json 173 | messages.db 174 | *.session 175 | -------------------------------------------------------------------------------- /handlers/kling.py: -------------------------------------------------------------------------------- 1 | import re 2 | from os import environ 3 | 4 | import requests 5 | from expiringdict import ExpiringDict 6 | from kling import ImageGen, VideoGen 7 | from telebot import TeleBot 8 | from telebot.types import InputMediaPhoto, Message 9 | 10 | from ._utils import logger 11 | 12 | KLING_COOKIE = environ.get("KLING_COOKIE") 13 | pngs_link_dict = ExpiringDict(max_len=100, max_age_seconds=60 * 10) 14 | 15 | 16 | def kling_handler(message: Message, bot: TeleBot): 17 | """kling: /kling
""" 18 | bot.reply_to( 19 | message, 20 | "Generating pretty kling image may take some time please wait", 21 | ) 22 | m = message.text.strip() 23 | prompt = m.strip() 24 | links = None 25 | try: 26 | i = ImageGen(KLING_COOKIE) 27 | links = i.get_images(prompt) 28 | # set the dict 29 | try: 30 | pngs_link_dict[str(message.from_user.id)] = links 31 | except Exception as e: 32 | print(str(e)) 33 | except Exception as e: 34 | print(str(e)) 35 | bot.reply_to(message, "kling error maybe block the prompt") 36 | return 37 | photos_list = [InputMediaPhoto(i) for i in links] 38 | bot.send_media_group( 39 | message.chat.id, 40 | photos_list, 41 | reply_to_message_id=message.message_id, 42 | disable_notification=True, 43 | ) 44 | 45 | 46 | def kling_pro_handler(message: Message, bot: TeleBot): 47 | """kling: /kling
""" 48 | bot.reply_to( 49 | message, 50 | "Generating pretty kling video may take a long time about 2mins to 5mins please wait", 51 | ) 52 | m = message.text.strip() 53 | prompt = m.strip() 54 | # drop all the spaces 55 | prompt = prompt.replace(" ", "") 56 | # find `图{number}` in prompt 57 | number = re.findall(r"图\d+", prompt) 58 | number = number[0] if number else None 59 | if number: 60 | number = int(number.replace("图", "")) 61 | v = VideoGen(KLING_COOKIE) 62 | video_links = None 63 | image_url = None 64 | if number and number <= 9 and pngs_link_dict.get(str(message.from_user.id)): 65 | if number - 1 <= len(pngs_link_dict.get(str(message.from_user.id))): 66 | image_url = pngs_link_dict.get(str(message.from_user.id))[number - 1] 67 | print(image_url) 68 | try: 69 | video_links = v.get_video(prompt, image_url=image_url) 70 | except Exception as e: 71 | print(str(e)) 72 | bot.reply_to(message, "kling error maybe block the prompt") 73 | return 74 | if not video_links: 75 | bot.reply_to(message, "video not generate") 76 | return 77 | response = requests.get(video_links[0]) 78 | if response.status_code != 200: 79 | bot.reply_to(message, "could not fetch the video") 80 | # save response to file 81 | with open("kling.mp4", "wb") as output_file: 82 | output_file.write(response.content) 83 | bot.send_video( 84 | message.chat.id, 85 | open("kling.mp4", "rb"), 86 | caption=prompt, 87 | reply_to_message_id=message.message_id, 88 | ) 89 | 90 | 91 | def kling_photo_handler(message: Message, bot: TeleBot) -> None: 92 | s = message.caption 93 | prompt = s.strip() 94 | # show something, make it more responsible 95 | # get the high quaility picture. 96 | max_size_photo = max(message.photo, key=lambda p: p.file_size) 97 | file_path = bot.get_file(max_size_photo.file_id).file_path 98 | downloaded_file = bot.download_file(file_path) 99 | bot.reply_to( 100 | message, 101 | "Generating pretty kling image using your photo may take some time please wait", 102 | ) 103 | with open("kling.jpg", "wb") as temp_file: 104 | temp_file.write(downloaded_file) 105 | i = ImageGen(KLING_COOKIE) 106 | links = None 107 | try: 108 | links = i.get_images(prompt, "kling.jpg") 109 | # set the dict 110 | try: 111 | pngs_link_dict[str(message.from_user.id)] = links 112 | except Exception: 113 | logger.exception("Kling photo handler error") 114 | except Exception: 115 | logger.exception("Kling photo handler error") 116 | bot.reply_to(message, "kling error maybe block the prompt") 117 | return 118 | photos_list = [InputMediaPhoto(i) for i in links] 119 | bot.send_media_group( 120 | message.chat.id, 121 | photos_list, 122 | reply_to_message_id=message.message_id, 123 | disable_notification=True, 124 | ) 125 | 126 | 127 | if KLING_COOKIE: 128 | 129 | def register(bot: TeleBot) -> None: 130 | bot.register_message_handler(kling_handler, commands=["kling"], pass_bot=True) 131 | bot.register_message_handler(kling_handler, regexp="^kling:", pass_bot=True) 132 | # kling pro means video 133 | bot.register_message_handler( 134 | kling_pro_handler, commands=["kling_pro"], pass_bot=True 135 | ) 136 | -------------------------------------------------------------------------------- /handlers/map.py: -------------------------------------------------------------------------------- 1 | import gc 2 | import random 3 | import shutil 4 | from tempfile import SpooledTemporaryFile 5 | 6 | import numpy as np 7 | import PIL.Image 8 | from matplotlib import figure 9 | from prettymapp.geo import get_aoi 10 | from prettymapp.osm import get_osm_geometries 11 | from prettymapp.plotting import Plot as PrettyPlot 12 | from prettymapp.settings import STYLES 13 | from telebot import TeleBot 14 | from telebot.types import Message 15 | 16 | MAX_IN_MEMORY = 10 * 1024 * 1024 # 10MiB 17 | PIL.Image.MAX_IMAGE_PIXELS = 933120000 18 | 19 | 20 | class Plot(PrettyPlot): 21 | # memory leak fix for Plot. thanks @higuoxing https://github.com/higuoxing 22 | # refer to: https://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg11809.html 23 | def __post_init__(self): 24 | ( 25 | self.xmin, 26 | self.ymin, 27 | self.xmax, 28 | self.ymax, 29 | ) = self.aoi_bounds 30 | # take from aoi geometry bounds, otherwise problematic if unequal geometry distribution over plot. 31 | self.xmid = (self.xmin + self.xmax) / 2 32 | self.ymid = (self.ymin + self.ymax) / 2 33 | self.xdif = self.xmax - self.xmin 34 | self.ydif = self.ymax - self.ymin 35 | 36 | self.bg_buffer_x = (self.bg_buffer / 100) * self.xdif 37 | self.bg_buffer_y = (self.bg_buffer / 100) * self.ydif 38 | 39 | # self.fig, self.ax = subplots( 40 | # 1, 1, figsize=(12, 12), constrained_layout=True, dpi=1200 41 | # ) 42 | self.fig = figure.Figure(figsize=(12, 12), constrained_layout=True, dpi=1200) 43 | self.ax = self.fig.subplots(1, 1) 44 | self.ax.set_aspect(1 / np.cos(self.ymid * np.pi / 180)) 45 | 46 | self.ax.axis("off") 47 | self.ax.set_xlim(self.xmin - self.bg_buffer_x, self.xmax + self.bg_buffer_x) 48 | self.ax.set_ylim(self.ymin - self.bg_buffer_y, self.ymax + self.bg_buffer_y) 49 | 50 | 51 | def sizeof_image(image): 52 | with SpooledTemporaryFile(max_size=MAX_IN_MEMORY) as f: 53 | image.save(f, format="JPEG", quality=95) 54 | return f.tell() 55 | 56 | 57 | def compress_image(input_image, output_image, target_size): 58 | quality = 95 59 | factor = 1.0 60 | with PIL.Image.open(input_image) as img: 61 | while sizeof_image(img) > target_size: 62 | factor -= 0.05 63 | width, height = img.size 64 | img = img.resize( 65 | (int(width * factor), int(height * factor)), 66 | PIL.Image.Resampling.LANCZOS, 67 | ) 68 | img.save(output_image, format="JPEG", quality=quality) 69 | output_image.seek(0) 70 | 71 | 72 | def draw_pretty_map(location, style, output_file): 73 | aoi = get_aoi(address=location, radius=1100, rectangular=True) 74 | df = get_osm_geometries(aoi=aoi) 75 | fig = Plot(df=df, aoi_bounds=aoi.bounds, draw_settings=STYLES[style]).plot_all() 76 | with SpooledTemporaryFile(max_size=MAX_IN_MEMORY) as buffer: 77 | fig.savefig(buffer, format="jpeg") 78 | buffer.seek(0) 79 | compress_image( 80 | buffer, 81 | output_file, 82 | 10 * 1024 * 1024, # telegram tog need png less than 10MB 83 | ) 84 | 85 | 86 | def map_handler(message: Message, bot: TeleBot): 87 | """pretty map: /map
""" 88 | bot.reply_to(message, "Generating pretty map may take some time please wait:") 89 | m = message.text.strip() 90 | location = m.strip() 91 | styles_list = list(STYLES.keys()) 92 | style = random.choice(styles_list) 93 | with SpooledTemporaryFile(max_size=MAX_IN_MEMORY) as out_image: 94 | try: 95 | draw_pretty_map(location, style, out_image) 96 | # tg can only send image less than 10MB 97 | with open("map_out.jpg", "wb") as f: # for debug 98 | shutil.copyfileobj(out_image, f) 99 | out_image.seek(0) 100 | bot.send_photo( 101 | message.chat.id, out_image, reply_to_message_id=message.message_id 102 | ) 103 | finally: 104 | gc.collect() 105 | 106 | 107 | def map_location_handler(message: Message, bot: TeleBot): 108 | # TODO refactor the function 109 | location = "{0}, {1}".format(message.location.latitude, message.location.longitude) 110 | styles_list = list(STYLES.keys()) 111 | style = random.choice(styles_list) 112 | try: 113 | with SpooledTemporaryFile(max_size=MAX_IN_MEMORY) as out_image: 114 | draw_pretty_map(location, style, out_image) 115 | # tg can only send image less than 10MB 116 | with open("map_out.jpg", "wb") as f: # for debug 117 | shutil.copyfileobj(out_image, f) 118 | out_image.seek(0) 119 | bot.send_photo( 120 | message.chat.id, out_image, reply_to_message_id=message.message_id 121 | ) 122 | 123 | finally: 124 | gc.collect() 125 | 126 | 127 | def register(bot: TeleBot) -> None: 128 | bot.register_message_handler(map_handler, commands=["map"], pass_bot=True) 129 | bot.register_message_handler(map_handler, regexp="^map:", pass_bot=True) 130 | bot.register_message_handler( 131 | map_location_handler, content_types=["location", "venue"], pass_bot=True 132 | ) 133 | -------------------------------------------------------------------------------- /handlers/qwen.py: -------------------------------------------------------------------------------- 1 | # qwen use https://api.together.xyz 2 | import time 3 | from os import environ 4 | 5 | from expiringdict import ExpiringDict 6 | from telebot import TeleBot 7 | from telebot.types import Message 8 | from telegramify_markdown import markdownify 9 | from together import Together 10 | 11 | from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls, logger 12 | 13 | 14 | QWEN_API_KEY = environ.get("TOGETHER_API_KEY") 15 | QWEN_MODEL = "Qwen/Qwen2-72B-Instruct" 16 | 17 | if QWEN_API_KEY: 18 | client = Together(api_key=QWEN_API_KEY) 19 | 20 | # Global history cache 21 | qwen_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 22 | qwen_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 23 | 24 | 25 | def qwen_handler(message: Message, bot: TeleBot) -> None: 26 | """qwen : /qwen """ 27 | m = message.text.strip() 28 | 29 | player_message = [] 30 | # restart will lose all TODO 31 | if str(message.from_user.id) not in qwen_player_dict: 32 | qwen_player_dict[str(message.from_user.id)] = ( 33 | player_message # for the imuutable list 34 | ) 35 | else: 36 | player_message = qwen_player_dict[str(message.from_user.id)] 37 | if m.strip() == "clear": 38 | bot.reply_to( 39 | message, 40 | "just clear your qwen messages history", 41 | ) 42 | player_message.clear() 43 | return 44 | if m[:4].lower() == "new ": 45 | m = m[4:].strip() 46 | player_message.clear() 47 | m = enrich_text_with_urls(m) 48 | 49 | who = "qwen" 50 | # show something, make it more responsible 51 | reply_id = bot_reply_first(message, who, bot) 52 | 53 | player_message.append({"role": "user", "content": m}) 54 | # keep the last 5, every has two ask and answer. 55 | if len(player_message) > 10: 56 | player_message = player_message[2:] 57 | 58 | qwen_reply_text = "" 59 | try: 60 | r = client.chat.completions.create( 61 | messages=player_message, max_tokens=8192, model=QWEN_MODEL 62 | ) 63 | content = r.choices[0].message.content.encode("utf8").decode() 64 | if not content: 65 | qwen_reply_text = f"{who} did not answer." 66 | player_message.pop() 67 | else: 68 | qwen_reply_text = content 69 | player_message.append( 70 | { 71 | "role": "assistant", 72 | "content": qwen_reply_text, 73 | } 74 | ) 75 | 76 | except Exception: 77 | logger.exception("Qwen handler error") 78 | bot.reply_to(message, "answer wrong maybe up to the max token") 79 | # pop my user 80 | player_message.pop() 81 | return 82 | 83 | # reply back as Markdown and fallback to plain text if failed. 84 | bot_reply_markdown(reply_id, who, qwen_reply_text, bot) 85 | 86 | 87 | def qwen_pro_handler(message: Message, bot: TeleBot) -> None: 88 | """qwen_pro : /qwen_pro """ 89 | m = message.text.strip() 90 | 91 | player_message = [] 92 | # restart will lose all TODO 93 | if str(message.from_user.id) not in qwen_pro_player_dict: 94 | qwen_pro_player_dict[str(message.from_user.id)] = ( 95 | player_message # for the imuutable list 96 | ) 97 | else: 98 | player_message = qwen_pro_player_dict[str(message.from_user.id)] 99 | if m.strip() == "clear": 100 | bot.reply_to( 101 | message, 102 | "just clear your qwen messages history", 103 | ) 104 | player_message.clear() 105 | return 106 | if m[:4].lower() == "new ": 107 | m = m[4:].strip() 108 | player_message.clear() 109 | m = enrich_text_with_urls(m) 110 | 111 | who = "qwen Pro" 112 | reply_id = bot_reply_first(message, who, bot) 113 | 114 | player_message.append({"role": "user", "content": m}) 115 | # keep the last 5, every has two ask and answer. 116 | if len(player_message) > 10: 117 | player_message = player_message[2:] 118 | 119 | try: 120 | r = client.chat.completions.create( 121 | messages=player_message, 122 | max_tokens=8192, 123 | model=QWEN_MODEL, 124 | stream=True, 125 | ) 126 | s = "" 127 | start = time.time() 128 | for chunk in r: 129 | if chunk.choices[0].delta.content is None: 130 | break 131 | s += chunk.choices[0].delta.content 132 | if time.time() - start > 1.7: 133 | start = time.time() 134 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 135 | 136 | if not bot_reply_markdown(reply_id, who, s, bot): 137 | # maybe not complete 138 | # maybe the same message 139 | player_message.clear() 140 | return 141 | 142 | player_message.append( 143 | { 144 | "role": "assistant", 145 | "content": markdownify(s), 146 | } 147 | ) 148 | 149 | except Exception: 150 | logger.exception("Qwen Pro handler error") 151 | bot.reply_to(message, "answer wrong maybe up to the max token") 152 | player_message.clear() 153 | return 154 | 155 | 156 | if QWEN_API_KEY: 157 | 158 | def register(bot: TeleBot) -> None: 159 | bot.register_message_handler(qwen_handler, commands=["qwen"], pass_bot=True) 160 | bot.register_message_handler(qwen_handler, regexp="^qwen:", pass_bot=True) 161 | bot.register_message_handler( 162 | qwen_pro_handler, commands=["qwen_pro"], pass_bot=True 163 | ) 164 | bot.register_message_handler( 165 | qwen_pro_handler, regexp="^qwen_pro:", pass_bot=True 166 | ) 167 | -------------------------------------------------------------------------------- /handlers/llama.py: -------------------------------------------------------------------------------- 1 | import time 2 | from os import environ 3 | 4 | from expiringdict import ExpiringDict 5 | from groq import Groq 6 | from telebot import TeleBot 7 | from telebot.types import Message 8 | from telegramify_markdown import markdownify 9 | 10 | from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls, logger 11 | 12 | 13 | LLAMA_API_KEY = environ.get("GROQ_API_KEY") 14 | LLAMA_MODEL = "llama-3.1-70b-versatile" 15 | LLAMA_PRO_MODEL = "llama-3.1-70b-versatile" 16 | 17 | if LLAMA_API_KEY: 18 | client = Groq(api_key=LLAMA_API_KEY) 19 | 20 | # Global history cache 21 | llama_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 22 | llama_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 23 | 24 | 25 | def llama_handler(message: Message, bot: TeleBot) -> None: 26 | """llama : /llama """ 27 | m = message.text.strip() 28 | 29 | player_message = [] 30 | # restart will lose all TODO 31 | if str(message.from_user.id) not in llama_player_dict: 32 | llama_player_dict[str(message.from_user.id)] = ( 33 | player_message # for the imuutable list 34 | ) 35 | else: 36 | player_message = llama_player_dict[str(message.from_user.id)] 37 | if m.strip() == "clear": 38 | bot.reply_to( 39 | message, 40 | "just clear your llama messages history", 41 | ) 42 | player_message.clear() 43 | return 44 | if m[:4].lower() == "new ": 45 | m = m[4:].strip() 46 | player_message.clear() 47 | m = enrich_text_with_urls(m) 48 | 49 | who = "llama" 50 | # show something, make it more responsible 51 | reply_id = bot_reply_first(message, who, bot) 52 | 53 | player_message.append({"role": "user", "content": m}) 54 | # keep the last 5, every has two ask and answer. 55 | if len(player_message) > 10: 56 | player_message = player_message[2:] 57 | 58 | llama_reply_text = "" 59 | try: 60 | r = client.chat.completions.create(messages=player_message, model=LLAMA_MODEL) 61 | content = r.choices[0].message.content.encode("utf8").decode() 62 | if not content: 63 | llama_reply_text = f"{who} did not answer." 64 | player_message.pop() 65 | else: 66 | llama_reply_text = content 67 | player_message.append( 68 | { 69 | "role": "assistant", 70 | "content": llama_reply_text, 71 | } 72 | ) 73 | 74 | except Exception: 75 | logger.exception("Llama handler error") 76 | bot.reply_to(message, "answer wrong maybe up to the max token") 77 | # pop my user 78 | player_message.pop() 79 | return 80 | 81 | # reply back as Markdown and fallback to plain text if failed. 82 | bot_reply_markdown(reply_id, who, llama_reply_text, bot) 83 | 84 | 85 | def llama_pro_handler(message: Message, bot: TeleBot) -> None: 86 | """llama_pro : /llama_pro """ 87 | m = message.text.strip() 88 | 89 | player_message = [] 90 | # restart will lose all TODO 91 | if str(message.from_user.id) not in llama_pro_player_dict: 92 | llama_pro_player_dict[str(message.from_user.id)] = ( 93 | player_message # for the imuutable list 94 | ) 95 | else: 96 | player_message = llama_pro_player_dict[str(message.from_user.id)] 97 | if m.strip() == "clear": 98 | bot.reply_to( 99 | message, 100 | "just clear your llama messages history", 101 | ) 102 | player_message.clear() 103 | return 104 | if m[:4].lower() == "new ": 105 | m = m[4:].strip() 106 | player_message.clear() 107 | m = enrich_text_with_urls(m) 108 | 109 | who = "llama Pro" 110 | reply_id = bot_reply_first(message, who, bot) 111 | 112 | player_message.append({"role": "user", "content": m}) 113 | # keep the last 5, every has two ask and answer. 114 | if len(player_message) > 10: 115 | player_message = player_message[2:] 116 | 117 | try: 118 | r = client.chat.completions.create( 119 | messages=player_message, 120 | model=LLAMA_PRO_MODEL, 121 | stream=True, 122 | ) 123 | s = "" 124 | start = time.time() 125 | for chunk in r: 126 | if chunk.choices[0].delta.content is None: 127 | break 128 | s += chunk.choices[0].delta.content 129 | # 0.7 is enough for llama3 here its very fast 130 | if time.time() - start > 0.7: 131 | start = time.time() 132 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 133 | 134 | if not bot_reply_markdown(reply_id, who, s, bot): 135 | # maybe not complete 136 | # maybe the same message 137 | player_message.clear() 138 | return 139 | 140 | player_message.append( 141 | { 142 | "role": "assistant", 143 | "content": markdownify(s), 144 | } 145 | ) 146 | 147 | except Exception: 148 | logger.exception("Llama Pro handler error") 149 | bot.reply_to(message, "answer wrong maybe up to the max token") 150 | player_message.clear() 151 | return 152 | 153 | 154 | if LLAMA_API_KEY: 155 | 156 | def register(bot: TeleBot) -> None: 157 | bot.register_message_handler(llama_handler, commands=["llama"], pass_bot=True) 158 | bot.register_message_handler(llama_handler, regexp="^llama:", pass_bot=True) 159 | bot.register_message_handler( 160 | llama_pro_handler, commands=["llama_pro"], pass_bot=True 161 | ) 162 | bot.register_message_handler( 163 | llama_pro_handler, regexp="^llama_pro:", pass_bot=True 164 | ) 165 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tg_bot_collections 2 | Collections of yihong0618's telegram bot 3 | 4 | for yihong0618's channel: https://t.me/hyi0618 5 | 6 | 7 | ## Bot -> poster 8 | 9 | ![image](https://github.com/yihong0618/tg_bot_collections/assets/15976103/6cf6b2c0-9f43-42f4-ba5f-be768ea27fd1) 10 | 11 | ## Bot -> pretty mapper 12 | 13 | ![image](https://github.com/yihong0618/tg_bot_collections/assets/15976103/29848d22-5289-4953-8ab0-4e84c16f79e3) 14 | 15 | ## Bot -> fake 16 | 1. install font wqy-microhei first 17 | 2. use `fake: ${message}` to generate 18 | 19 | ## Bot -> [ChatTTS](https://github.com/2noise/ChatTTS) 20 | 21 | 1. export USE_CHATTTS=true 22 | 2. use `tts: ${message}` to generate 23 | 24 | ## Bot -> [Kling](https://github.com/yihong0618/klingCreator) 25 | 26 | 1. export KLING_COOKIE=${cookie} 27 | 2. use `kling: ${message}` to generate images 28 | 3. use `/kling_pro ${message}` to generate videos 29 | 30 | 31 | ## Bot -> Gemini player 32 | 33 | 1. visit https://makersuite.google.com/app/apikey get the key 34 | 2. export GOOGLE_GEMINI_KEY=${the_key} 35 | 3. use `gemini: ${message}` to ask 36 | 37 | ![telegram-cloud-photo-size-5-6336976091083817765-y](https://github.com/yihong0618/tg_bot_collections/assets/15976103/683a9c22-6f64-4a51-93e6-5e36218e1668) 38 | 39 | 40 | ## Bot -> Claude 3 41 | 42 | 1. visit https://console.anthropic.com/ get the key 43 | 2. export ANTHROPIC_API_KEY=${the_key} 44 | 3. use `claude: ${message}` to ask 45 | 46 | Note, if you are using third party service, you need to `export ANTHROPIC_BASE_URL=${the_url}` to change the url. 47 | 48 | 49 | ## Bot -> lingyiwanwu 50 | 51 | 1. visit https://platform.lingyiwanwu.com/apikeys get the key 52 | 2. export YI_API_KEY=${the_key} 53 | 3. export YI_BASE_URL=${the_url} 54 | 3. use `yi: ${message}` to ask 55 | 56 | ![image](https://github.com/yihong0618/tg_bot_collections/assets/15976103/11d96dde-447b-4b7e-886d-c3564e27b0d6) 57 | 58 | 59 | ## Bot -> ChatGPT 60 | 61 | 1. visit https://platform.openai.com/account/api-keys get the key 62 | 2. export OPENAI_API_KEY=${the_key} 63 | 3. use `gpt: ${message}` to ask 64 | 65 | Note, if you are using third party service, you need to `export OPENAI_API_BASE=${the_url}` to change the url. 66 | Optional web search support: 67 | - export `OLLAMA_WEB_SEARCH_API_KEY=${the_ollama_web_search_api_key}` (and `OLLAMA_WEB_SEARCH_MAX_RESULTS` as needed) 68 | 69 | ## Bot -> llama3 70 | 71 | 1. visit https://console.groq.com/docs/quickstart get the key 72 | 2. export GROQ_API_KEY=${the_key} 73 | 3. use `llama_pro: ${message}` to ask 74 | 75 | ## Bot -> qwen 76 | 77 | 1. visit https://api.together.xyz/settings/api-keys get the key 78 | 2. export TOGETHER_API_KEY=${the_key} 79 | 3. use `qwen_pro: ${message}` to ask 80 | 81 | ## Bot -> dify 82 | 83 | 1. visit https://cloud.dify.ai/ get selected Chatbot's API Secret key 84 | 2. export DIFY_API_KEY=${the_key} 85 | 3. use `dify: ${message}` to ask 86 | 87 | Note, currently its support dify Chatbot with instructions(System prompt) and different MODEL with its parameters. 88 | 89 | ## Bot -> Cohere 90 | 91 | 1. visit https://dashboard.cohere.com/api-keys get the key 92 | 2. export COHERE_API_KEY=${the_key} 93 | 3. use `cohere: ${message}` to ask 94 | 95 | ## Function -> Telegraph 96 | 97 | ### Skip token (default) 98 | 99 | You do not need to do anything. 100 | 101 | But you may not be able to edit any generated post since you do not have the token. 102 | 103 | ### Store token (recommended) 104 | 105 | Change "Store_Token" to "True" in "handlers/__init__.py" TelegraphAPI/_create_ph_account. It will store the token in "token_key.json". 106 | 107 | ### Get token manually from Telegram account 108 | 109 | 1. https://t.me/telegraph Create or login Telegraph account 110 | 2. `Log in as ${Account} on this device` 111 | 3. On Browser at https://telegra.ph/, press F12 or right click and inspect 112 | 4. Go to Application -> Storage -> Cookies -> https://telegra.ph/ 113 | 5. The token at `tph_token` is the token for telegra.ph API 114 | 115 | Do not share the token with others, it's like a password. 116 | 117 | ## HOW TO Install and Run 118 | 119 | ### Manually install 120 | 1. pip install -r requirements.txt 121 | 2. Get tg token, ask Google or ChatGPT, need get it from [BotFather](https://t.me/BotFather) 122 | 3. export GOOGLE_GEMINI_KEY=${your_google_gemini_apikey} 123 | 4. python tg.py ${telegram_bot_token} 124 | 125 | ### Run from Docker 126 | #### build docker image 127 | `docker build -t tg_bot_collections .` 128 | #### Run Gemini 129 | `docker run -d --name tg_bot_collections -e GOOGLE_GEMINI_KEY='${GOOGLE_GEMINI_KEY}' -e TELEGRAM_BOT_TOKEN='${TELEGRAM_BOT_TOKEN}' --network host tg_bot_collections` 130 | #### Run Claude 3 131 | `docker run -d --name tg_bot_collections -e ANTHROPIC_API_KEY='${ANTHROPIC_API_KEY}' -e TELEGRAM_BOT_TOKEN='${TELEGRAM_BOT_TOKEN}' --network host tg_bot_collections` 132 | #### Run lingyiwanwu 133 | `docker run -d --name tg_bot_collections -e YI_API_KEY='${YI_API_KEY}' -e YI_BASE_URL='${YI_BASE_URL}' -e TELEGRAM_BOT_TOKEN='${TELEGRAM_BOT_TOKEN}' --network host tg_bot_collections` 134 | #### Run ChatGPT 135 | `docker run -d --name tg_bot_collections -e OPENAI_API_KEY='${CHATGPT_API_KEY}' -e TELEGRAM_BOT_TOKEN='${TELEGRAM_BOT_TOKEN}' --network host tg_bot_collections` 136 | 137 | ### Run as shell 138 | 139 | Note, this may break your system config -> check this https://github.com/yihong0618/tg_bot_collections/issues/5 140 | 141 | 142 | ## HOW TO Use 143 | 144 | 1. Type `/gemini: ${message}` to ask 145 | 2. Type `gemini: ${message}` and upload picture to ask with picture 146 | 147 | > [!Note] 148 | > If you don't want to use one of these command, you can use `--disable-command ` option to disable it. This option can be used multiple times. 149 | 150 | 151 | ## Contribution 152 | 153 | - Any issue reports or PRs are welcome. 154 | - Before PR, use `pip install -U black` then `black .` first 155 | 156 | ## Acknowledge 157 | 158 | - poster use my repo -> https://github.com/yihong0618/GitHubPoster 159 | - pretty map use wonder repo -> https://github.com/chrieke/prettymapp 160 | - Gemini use -> https://github.com/google/generative-ai-python 161 | - Telegram markdownV2 change code copy from https://github.com/yym68686/md2tgmd/blob/main/src/md2tgmd.py thanks a lot. 162 | - Telegram markdownV2 change to telegramify-markdown 163 | - ChatGPT use -> https://github.com/openai/openai-python 164 | 165 | ## Appreciation 166 | 167 | - Thank you, that's enough. Just enjoy it. 168 | -------------------------------------------------------------------------------- /handlers/cohere.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import re 3 | import time 4 | from os import environ 5 | 6 | import cohere 7 | from expiringdict import ExpiringDict 8 | from telebot import TeleBot 9 | from telebot.types import Message 10 | from telegramify_markdown import markdownify 11 | 12 | from config import settings 13 | 14 | from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls 15 | 16 | 17 | COHERE_API_KEY = environ.get("COHERE_API_KEY") 18 | COHERE_MODEL = "command-r-plus" # command-r may cause Chinese garbled code, and non stream mode also may cause garbled code. 19 | if COHERE_API_KEY: 20 | co = cohere.Client(api_key=COHERE_API_KEY) 21 | 22 | 23 | # Global history cache 24 | cohere_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 25 | 26 | 27 | def clean_text(text): 28 | """Clean up the garbled code in the UTF-8 encoded Chinese string. 29 | 30 | Args: 31 | text: String that needs to be cleaned. 32 | 33 | Returns: 34 | The cleaned string, if garbled code is detected, a prompt message is added at the end. 35 | """ 36 | if "�" in text: 37 | # Use re.sub to clean up garbled code 38 | cleaned_text = re.sub(r"�.*?([,。!?;:]|$)", r"\1", text) 39 | cleaned_text = re.sub(r"\s+", " ", cleaned_text).strip() 40 | print(f"\n---------\nOriginal text:\n{text}\n---------") 41 | return cleaned_text + "\n\n~~(乱码已去除,可能存在错误,请注意)~~" 42 | else: 43 | return text 44 | 45 | 46 | def cohere_handler(message: Message, bot: TeleBot) -> None: 47 | """cohere : /cohere_pro Come with a telegraph link""" 48 | m = message.text.strip() 49 | 50 | player_message = [] 51 | if str(message.from_user.id) not in cohere_player_dict: 52 | cohere_player_dict[str(message.from_user.id)] = player_message 53 | else: 54 | player_message = cohere_player_dict[str(message.from_user.id)] 55 | 56 | if m.strip() == "clear": 57 | bot.reply_to( 58 | message, 59 | "Just cleared your Cohere messages history", 60 | ) 61 | player_message.clear() 62 | return 63 | 64 | if m[:4].lower() == "new ": 65 | m = m[4:].strip() 66 | player_message.clear() 67 | 68 | m = enrich_text_with_urls(m) 69 | 70 | who = "Command R Plus" 71 | reply_id = bot_reply_first(message, who, bot) 72 | 73 | player_message.append({"role": "User", "message": m}) 74 | # keep the last 5, every has two ask and answer. 75 | if len(player_message) > 10: 76 | player_message = player_message[2:] 77 | 78 | try: 79 | current_time = datetime.datetime.now(datetime.timezone.utc) 80 | preamble = ( 81 | f"You are Command, a large language model trained to have polite, helpful, and inclusive conversations with people. Your responses should be accurate and graceful in user's original language." 82 | f"The current UTC time is {current_time.strftime('%Y-%m-%d %H:%M:%S')}, " 83 | f"UTC-4 (e.g. New York) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=-4))).strftime('%Y-%m-%d %H:%M:%S')}, " 84 | f"UTC-7 (e.g. Los Angeles) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=-7))).strftime('%Y-%m-%d %H:%M:%S')}, " 85 | f"and UTC+8 (e.g. Beijing) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S')}." 86 | ) 87 | stream = co.chat_stream( 88 | model=COHERE_MODEL, 89 | message=m, 90 | temperature=0.8, 91 | chat_history=player_message, 92 | prompt_truncation="AUTO", 93 | connectors=[{"id": "web-search"}], 94 | citation_quality="accurate", 95 | preamble=preamble, 96 | ) 97 | 98 | s = "" 99 | source = "" 100 | start = time.time() 101 | for event in stream: 102 | if event.event_type == "stream-start": 103 | bot_reply_markdown(reply_id, who, "Thinking...", bot) 104 | elif event.event_type == "search-queries-generation": 105 | bot_reply_markdown(reply_id, who, "Searching online...", bot) 106 | elif event.event_type == "search-results": 107 | bot_reply_markdown(reply_id, who, "Reading...", bot) 108 | for doc in event.documents: 109 | source += f"\n{doc['title']}\n{doc['url']}\n" 110 | elif event.event_type == "text-generation": 111 | s += event.text.encode("utf-8").decode("utf-8") 112 | if time.time() - start > 1.4: 113 | start = time.time() 114 | s = clean_text(s) 115 | if len(s) > 3900: 116 | bot_reply_markdown( 117 | reply_id, 118 | who, 119 | f"\nStill thinking{len(s)}...\n", 120 | bot, 121 | split_text=True, 122 | ) 123 | else: 124 | bot_reply_markdown( 125 | reply_id, 126 | who, 127 | f"\nStill thinking{len(s)}...\n{s}", 128 | bot, 129 | split_text=True, 130 | ) 131 | elif event.event_type == "stream-end": 132 | break 133 | content = ( 134 | s 135 | + "\n\n---\n" 136 | + source 137 | + f"\nLast Update{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} at UTC+8\n" 138 | ) 139 | ph_s = settings.telegraph_client.create_page_md( 140 | title="Cohere", markdown_text=content 141 | ) # or edit_page with get_page so not producing massive pages 142 | s += f"\n\n[View]({ph_s})" 143 | 144 | try: 145 | bot_reply_markdown( 146 | reply_id, who, s, bot, split_text=True, disable_web_page_preview=True 147 | ) 148 | except Exception: 149 | pass 150 | 151 | player_message.append( 152 | { 153 | "role": "Chatbot", 154 | "message": markdownify(s), 155 | } 156 | ) 157 | 158 | except Exception as e: 159 | print(e) 160 | bot.reply_to(message, "answer wrong maybe up to the max token") 161 | player_message.clear() 162 | return 163 | 164 | 165 | if COHERE_API_KEY: 166 | 167 | def register(bot: TeleBot) -> None: 168 | bot.register_message_handler(cohere_handler, commands=["cohere"], pass_bot=True) 169 | bot.register_message_handler(cohere_handler, regexp="^cohere:", pass_bot=True) 170 | -------------------------------------------------------------------------------- /handlers/fake_liuneng.py: -------------------------------------------------------------------------------- 1 | import random 2 | import re 3 | from os import listdir 4 | 5 | from PIL import Image, ImageDraw, ImageFont 6 | from telebot import TeleBot 7 | from telebot.types import Message 8 | 9 | 10 | def split_lines(text, max_length=30): 11 | def split_line(line): 12 | punctuation = r"[,.!?;,。!?;]" 13 | parts = re.split(f"({punctuation})", line) 14 | 15 | result = [] 16 | current = "" 17 | 18 | for part in parts: 19 | if len(current) + len(part) <= max_length: 20 | current += part 21 | else: 22 | if current: 23 | result.append(current.strip()) 24 | 25 | while len(part) > max_length: 26 | result.append(part[:max_length]) 27 | part = part[max_length:] 28 | 29 | current = part 30 | 31 | if current: 32 | result.append(current.strip()) 33 | 34 | return result 35 | 36 | lines = text.split("\n") 37 | final_result = [] 38 | for line in lines: 39 | final_result.extend(split_line(line)) 40 | 41 | return final_result 42 | 43 | 44 | def extract_prompt(message: str, bot_name: str) -> str: 45 | """ 46 | This function filters messages for prompts. 47 | 48 | Returns: 49 | str: If it is not a prompt, return None. Otherwise, return the trimmed prefix of the actual prompt. 50 | """ 51 | # remove '@bot_name' as it is considered part of the command when in a group chat. 52 | message = re.sub(re.escape(f"@{bot_name}"), "", message).strip() 53 | # add a whitespace after the first colon as we separate the prompt from the command by the first whitespace. 54 | message = re.sub(":", ": ", message, count=1).strip() 55 | try: 56 | left, message = message.split(maxsplit=1) 57 | except ValueError: 58 | return "" 59 | if ":" not in left: 60 | # the replacement happens in the right part, restore it. 61 | message = message.replace(": ", ":", 1) 62 | return message.strip() 63 | 64 | 65 | class ImageRenderer: 66 | def __init__(self): 67 | self.canvas_width = 512 68 | self.quotes = [ 69 | "我敬佩两种人\n年轻时陪男人过苦日子的女人\n富裕时陪女人过好日子的男人", 70 | "人生就像一杯茶\n不会苦一辈子\n但总会苦一阵子", 71 | "不要总拿自己跟别人比\n你羡慕别人瘦\n别人还羡慕你肠胃好\n你羡慕别人有钱\n别人还羡慕没人找你借钱", 72 | "彪悍的人生不需要解释\n只要你按时达到目的地\n很少有人在乎你开的是奔驰还是拖拉机", 73 | "如果你不够优秀\n人脉是不值钱的\n它不是追求来的\n而是吸引来的\n只有等价的交换\n才能得到合理的帮助\n虽然听起来很冷\n但这是事实", 74 | "喜欢在你背后说三道四\n捏造故事的人\n无非就三个原因\n没达到你的层次\n你有的东西他没有\n模仿你的生活方式未遂", 75 | "做一个特别简单的人\n好相处就处\n不好相处就不处\n不要一厢情愿去迎合别人\n你努力合群的样子并不漂亮\n不必对每个人好\n他们又不给你打钱", 76 | ] 77 | 78 | def render_image(self, image_path, text): 79 | image = Image.open(image_path) 80 | scale_factor = self.canvas_width / image.width 81 | scaled_height = int(image.height * scale_factor) 82 | line_height = 50 83 | font_size = 20 84 | image_line_height = int(line_height / scale_factor) 85 | lines = split_lines(text) 86 | canvas_height = scaled_height 87 | if len(lines) > 1: 88 | canvas_height += (len(lines) - 1) * line_height 89 | 90 | canvas = Image.new("RGB", (self.canvas_width, canvas_height)) 91 | canvas.paste(image.resize((self.canvas_width, scaled_height))) 92 | 93 | draw = ImageDraw.Draw(canvas) 94 | # font = ImageFont.load_default() 95 | font = ImageFont.truetype("wqy-microhei.ttc", font_size) 96 | 97 | for i, line in enumerate(lines): 98 | if i > 0: 99 | bottom_strip = image.crop( 100 | (0, image.height - image_line_height, image.width, image.height) 101 | ) 102 | canvas.paste( 103 | bottom_strip.resize((self.canvas_width, line_height)), 104 | (0, scaled_height + (i - 1) * line_height), 105 | ) 106 | 107 | y = scaled_height + i * line_height - (line_height - font_size) // 2 108 | draw.text( 109 | (self.canvas_width // 2, y), 110 | line, 111 | fill="white", 112 | font=font, 113 | anchor="mm", 114 | stroke_width=2, 115 | stroke_fill="black", 116 | ) 117 | 118 | return canvas 119 | 120 | def save_image(self, image, filename="fake.jpg"): 121 | image.save(filename) 122 | 123 | def get_random_quote(self): 124 | return random.choice(self.quotes) 125 | 126 | 127 | def fake_handler(message: Message, bot: TeleBot) -> None: 128 | """ignore""" 129 | who = "LiuNeng" 130 | bot.reply_to(message, f"Generating {who}'s fake image") 131 | m = message.text.strip() 132 | prompt = m.strip() 133 | prompt = extract_prompt(message.text, bot.get_me().username) 134 | # Usage 135 | renderer = ImageRenderer() 136 | heros_list = listdir("handlers/heros") 137 | image_path = f"handlers/heros/{random.choice(heros_list)}" 138 | if prompt: 139 | text = prompt 140 | else: 141 | text = renderer.get_random_quote() 142 | rendered_image = renderer.render_image(image_path, text) 143 | renderer.save_image(rendered_image) 144 | with open("fake.jpg", "rb") as f: 145 | bot.send_photo( 146 | message.chat.id, 147 | f, 148 | reply_to_message_id=message.message_id, 149 | caption="Generated image", 150 | ) 151 | 152 | 153 | def fake_photo_handler(message: Message, bot: TeleBot) -> None: 154 | """ignore""" 155 | s = message.caption 156 | s = s.replace("/fake", "").strip() 157 | s = s.replace("fake:", "").strip() 158 | prompt = s.strip() 159 | bot.reply_to(message, "Generating LiuNeng's fake image") 160 | # get the high quaility picture. 161 | max_size_photo = max(message.photo, key=lambda p: p.file_size) 162 | file_path = bot.get_file(max_size_photo.file_id).file_path 163 | downloaded_file = bot.download_file(file_path) 164 | downloaded_file = bot.download_file(file_path) 165 | with open("fake.jpg", "wb") as temp_file: 166 | temp_file.write(downloaded_file) 167 | renderer = ImageRenderer() 168 | rendered_image = renderer.render_image("fake.jpg", prompt) 169 | renderer.save_image(rendered_image) 170 | with open("fake.jpg", "rb") as f: 171 | bot.send_photo( 172 | message.chat.id, 173 | f, 174 | reply_to_message_id=message.message_id, 175 | caption="Generated image", 176 | ) 177 | 178 | 179 | def register(bot: TeleBot) -> None: 180 | bot.register_message_handler(fake_handler, commands=["fake"], pass_bot=True) 181 | bot.register_message_handler(fake_handler, regexp="^fake:", pass_bot=True) 182 | bot.register_message_handler( 183 | fake_photo_handler, 184 | content_types=["photo"], 185 | func=lambda m: m.caption and m.caption.startswith(("fake:", "/fake")), 186 | pass_bot=True, 187 | ) 188 | -------------------------------------------------------------------------------- /handlers/_tts.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import threading 3 | import subprocess 4 | from os import environ, remove 5 | 6 | from telebot import TeleBot 7 | from telebot.types import Message 8 | 9 | from . import * 10 | 11 | import wave 12 | import numpy as np 13 | from ChatTTS import Chat 14 | 15 | 16 | def check_ffmpeg(): 17 | try: 18 | subprocess.run( 19 | ["ffmpeg", "-version"], 20 | check=True, 21 | stdout=subprocess.PIPE, 22 | stderr=subprocess.PIPE, 23 | ) 24 | return True 25 | except (subprocess.CalledProcessError, FileNotFoundError): 26 | return False 27 | 28 | 29 | HAS_FFMPEG = check_ffmpeg() 30 | USE_CHATTTS = environ.get("USE_CHATTTS") 31 | if USE_CHATTTS: 32 | chat = Chat() 33 | chat.load_models() 34 | lock = threading.Lock() # Initialize a lock 35 | 36 | def save_data_to_wav(filename, data): 37 | sample_rate = 24000 38 | # Open a .wav file to write into 39 | with wave.open(filename, "w") as wf: 40 | wf.setnchannels(1) # Mono channel 41 | wf.setsampwidth(2) # 2 bytes per sample 42 | wf.setframerate(sample_rate) 43 | wf.writeframes(data.tobytes()) 44 | 45 | def generate_tts_wav(prompt, output_filename, seed=None): 46 | texts = [ 47 | prompt, 48 | ] 49 | if seed: 50 | r = chat.sample_random_speaker(seed) 51 | params_infer_code = { 52 | "spk_emb": r, # add sampled speaker 53 | "temperature": 0.3, # using custom temperature 54 | "top_P": 0.7, # top P decode 55 | "top_K": 20, # top K decode 56 | } 57 | wavs = chat.infer( 58 | texts, use_decoder=True, params_infer_code=params_infer_code 59 | ) 60 | else: 61 | wavs = chat.infer(texts, use_decoder=True) 62 | 63 | audio_data = np.array( 64 | wavs[0], dtype=np.float32 65 | ) # Ensure the data type is correct 66 | # Normalize the audio data to 16-bit PCM range 67 | audio_data = (audio_data * 32767).astype(np.int16) 68 | save_data_to_wav(output_filename, audio_data) 69 | 70 | if seed: 71 | print(f"Audio has been saved to {output_filename} with seed {seed}") 72 | else: 73 | print(f"Audio has been saved to {output_filename}") 74 | 75 | def tts_handler(message: Message, bot: TeleBot): 76 | """pretty tts: /tts """ 77 | bot.reply_to( 78 | message, "Generating ChatTTS may take some time please wait some time." 79 | ) 80 | m = message.text.strip() 81 | prompt = m.strip() 82 | if len(prompt) > 150: 83 | bot.reply_to(message, "prompt too long must length < 150") 84 | return 85 | try: 86 | with lock: 87 | generate_tts_wav(prompt, "tts.wav") 88 | with open("tts.wav", "rb") as audio: 89 | bot.send_audio( 90 | message.chat.id, audio, reply_to_message_id=message.message_id 91 | ) 92 | except Exception as e: 93 | print(e) 94 | bot.reply_to(message, "tts error") 95 | 96 | def tts_pro_handler(message: Message, bot: TeleBot): 97 | """pretty tts_pro: /tts_pro ,""" 98 | m = message.text.strip() 99 | prompt = m.strip() 100 | seed = prompt.split(",")[0] 101 | bot.reply_to( 102 | message, 103 | f"Generating ChatTTS with seed: {seed} may take some time please wait some time.", 104 | ) 105 | if not seed.isdigit(): 106 | bot.reply_to(message, "first argument must be a number") 107 | return 108 | prompt = prompt[len(str(seed)) + 1 :] 109 | # split the prompt by 100 characters 110 | prompt_split = [prompt[i : i + 50] for i in range(0, len(prompt), 50)] 111 | if not HAS_FFMPEG: 112 | if len(prompt) > 150: 113 | bot.reply_to(message, "prompt too long must length < 150") 114 | return 115 | try: 116 | with lock: 117 | if len(prompt_split) > 1: 118 | bot.reply_to( 119 | message, 120 | "Will split the text and use the same to generate the audio and use ffmpeg to combin them pleas wait more time", 121 | ) 122 | for k, v in enumerate(prompt_split): 123 | generate_tts_wav(v, f"{k}.wav", seed) 124 | with open("input.txt", "a") as f: 125 | f.write(f"file {k}.wav\n") 126 | output_file = "tts_pro.wav" 127 | # Run the FFmpeg command 128 | try: 129 | # make sure remove it 130 | try: 131 | remove("tts_pro.wav") 132 | except: 133 | pass 134 | subprocess.run( 135 | [ 136 | "ffmpeg", 137 | "-f", 138 | "concat", 139 | "-safe", 140 | "0", 141 | "-i", 142 | "input.txt", 143 | "-c", 144 | "copy", 145 | "tts_pro.wav", 146 | ], 147 | check=True, 148 | ) 149 | except Exception as e: 150 | print(f"Error combining audio files, {e}") 151 | bot.reply_to(message, "tts error please check the log") 152 | remove("input.txt") 153 | return 154 | print(f"Combined audio saved as {output_file}") 155 | with open("tts_pro.wav", "rb") as audio: 156 | bot.send_audio( 157 | message.chat.id, 158 | audio, 159 | reply_to_message_id=message.message_id, 160 | ) 161 | remove("input.txt") 162 | for file in glob.glob("*.wav"): 163 | try: 164 | remove(file) 165 | except OSError as e: 166 | print(e) 167 | else: 168 | generate_tts_wav(prompt, "tts_pro.wav", seed) 169 | with open("tts_pro.wav", "rb") as audio: 170 | bot.send_audio( 171 | message.chat.id, 172 | audio, 173 | reply_to_message_id=message.message_id, 174 | ) 175 | except Exception as e: 176 | print(e) 177 | bot.reply_to(message, "tts error") 178 | 179 | def register(bot: TeleBot) -> None: 180 | bot.register_message_handler(tts_handler, commands=["tts"], pass_bot=True) 181 | bot.register_message_handler(tts_handler, regexp="^tts:", pass_bot=True) 182 | bot.register_message_handler( 183 | tts_pro_handler, commands=["tts_pro"], pass_bot=True 184 | ) 185 | bot.register_message_handler(tts_pro_handler, regexp="^tts_pro:", pass_bot=True) 186 | -------------------------------------------------------------------------------- /handlers/_yi.py: -------------------------------------------------------------------------------- 1 | import time 2 | from os import environ 3 | 4 | import requests 5 | from expiringdict import ExpiringDict 6 | from openai import OpenAI 7 | from telebot import TeleBot 8 | from telebot.types import Message 9 | from telegramify_markdown import markdownify 10 | 11 | from . import * 12 | 13 | YI_BASE_URL = environ.get("YI_BASE_URL") 14 | YI_API_KEY = environ.get("YI_API_KEY") 15 | YI_MODEL = "yi-34b-chat-200k" 16 | YI_PRO_MODEL = "yi-large" 17 | 18 | client = OpenAI( 19 | # defaults to os.environ.get("OPENAI_API_KEY") 20 | api_key=YI_API_KEY, 21 | base_url=YI_BASE_URL, 22 | ) 23 | 24 | # Global history cache 25 | yi_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 26 | yi_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 27 | 28 | 29 | def yi_handler(message: Message, bot: TeleBot) -> None: 30 | """yi : /yi """ 31 | m = message.text.strip() 32 | 33 | player_message = [] 34 | # restart will lose all TODO 35 | if str(message.from_user.id) not in yi_player_dict: 36 | yi_player_dict[str(message.from_user.id)] = ( 37 | player_message # for the imuutable list 38 | ) 39 | else: 40 | player_message = yi_player_dict[str(message.from_user.id)] 41 | if m.strip() == "clear": 42 | bot.reply_to( 43 | message, 44 | "just clear your yi messages history", 45 | ) 46 | player_message.clear() 47 | return 48 | if m[:4].lower() == "new ": 49 | m = m[4:].strip() 50 | player_message.clear() 51 | m = enrich_text_with_urls(m) 52 | 53 | who = "Yi" 54 | # show something, make it more responsible 55 | reply_id = bot_reply_first(message, who, bot) 56 | 57 | player_message.append({"role": "user", "content": m}) 58 | # keep the last 5, every has two ask and answer. 59 | if len(player_message) > 10: 60 | player_message = player_message[2:] 61 | 62 | yi_reply_text = "" 63 | try: 64 | if len(player_message) > 2: 65 | if player_message[-1]["role"] == player_message[-2]["role"]: 66 | # tricky 67 | player_message.pop() 68 | r = client.chat.completions.create(messages=player_message, model=YI_MODEL) 69 | 70 | content = r.choices[0].message.content.encode("utf8").decode() 71 | if not content: 72 | yi_reply_text = f"{who} did not answer." 73 | player_message.pop() 74 | else: 75 | yi_reply_text = content 76 | player_message.append( 77 | { 78 | "role": "assistant", 79 | "content": yi_reply_text, 80 | } 81 | ) 82 | 83 | except Exception as e: 84 | print(e) 85 | bot.reply_to(message, "answer wrong maybe up to the max token") 86 | # pop my user 87 | player_message.pop() 88 | return 89 | 90 | # reply back as Markdown and fallback to plain text if failed. 91 | bot_reply_markdown(reply_id, who, yi_reply_text, bot) 92 | 93 | 94 | def yi_pro_handler(message: Message, bot: TeleBot) -> None: 95 | """yi_pro : /yi_pro """ 96 | m = message.text.strip() 97 | 98 | player_message = [] 99 | # restart will lose all TODO 100 | if str(message.from_user.id) not in yi_pro_player_dict: 101 | yi_pro_player_dict[str(message.from_user.id)] = ( 102 | player_message # for the imuutable list 103 | ) 104 | else: 105 | player_message = yi_pro_player_dict[str(message.from_user.id)] 106 | if m.strip() == "clear": 107 | bot.reply_to( 108 | message, 109 | "just clear your yi messages history", 110 | ) 111 | player_message.clear() 112 | return 113 | if m[:4].lower() == "new ": 114 | m = m[4:].strip() 115 | player_message.clear() 116 | m = enrich_text_with_urls(m) 117 | 118 | who = "yi Pro" 119 | reply_id = bot_reply_first(message, who, bot) 120 | 121 | player_message.append({"role": "user", "content": m}) 122 | # keep the last 5, every has two ask and answer. 123 | if len(player_message) > 10: 124 | player_message = player_message[2:] 125 | 126 | try: 127 | r = client.chat.completions.create( 128 | messages=player_message, 129 | max_tokens=8192, 130 | model=YI_PRO_MODEL, 131 | stream=True, 132 | ) 133 | s = "" 134 | start = time.time() 135 | for chunk in r: 136 | if chunk.choices[0].delta.content is None: 137 | break 138 | s += chunk.choices[0].delta.content 139 | # 0.7 is enough for yi3 here its very fast 140 | if time.time() - start > 0.7: 141 | start = time.time() 142 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 143 | 144 | if not bot_reply_markdown(reply_id, who, s, bot): 145 | # maybe not complete 146 | # maybe the same message 147 | player_message.clear() 148 | return 149 | 150 | player_message.append( 151 | { 152 | "role": "assistant", 153 | "content": markdownify(s), 154 | } 155 | ) 156 | 157 | except Exception as e: 158 | print(e) 159 | bot.reply_to(message, "answer wrong maybe up to the max token") 160 | player_message.clear() 161 | return 162 | 163 | 164 | def yi_photo_handler(message: Message, bot: TeleBot) -> None: 165 | s = message.caption 166 | prompt = s.strip() 167 | who = "Yi Vision" 168 | # show something, make it more responsible 169 | reply_id = bot_reply_first(message, who, bot) 170 | # get the high quaility picture. 171 | max_size_photo = max(message.photo, key=lambda p: p.file_size) 172 | file_path = bot.get_file(max_size_photo.file_id).file_path 173 | downloaded_file = bot.download_file(file_path) 174 | with open("yi_temp.jpg", "wb") as temp_file: 175 | temp_file.write(downloaded_file) 176 | 177 | headers = { 178 | "Content-Type": "application/json", 179 | "Authorization": f"Bearer {client.api_key}", 180 | } 181 | 182 | payload = { 183 | "model": "yi-vl-plus", 184 | "messages": [ 185 | { 186 | "role": "user", 187 | "content": [ 188 | {"type": "text", "text": prompt}, 189 | { 190 | "type": "image_url", 191 | "image_url": {"url": image_to_data_uri("yi_temp.jpg")}, 192 | }, 193 | ], 194 | } 195 | ], 196 | "max_tokens": 2048, 197 | } 198 | 199 | response = requests.post( 200 | "https://api.lingyiwanwu.com/v1/chat/completions", 201 | headers=headers, 202 | json=payload, 203 | ).json() 204 | try: 205 | text = response["choices"][0]["message"]["content"].encode("utf8").decode() 206 | bot_reply_markdown(reply_id, who, text, bot) 207 | except Exception as e: 208 | print(e) 209 | bot.reply_to(message, "answer wrong maybe up to the max token") 210 | 211 | 212 | if YI_API_KEY and YI_BASE_URL: 213 | 214 | def register(bot: TeleBot) -> None: 215 | bot.register_message_handler(yi_handler, commands=["yi"], pass_bot=True) 216 | bot.register_message_handler(yi_handler, regexp="^yi:", pass_bot=True) 217 | bot.register_message_handler(yi_handler, commands=["yi_pro"], pass_bot=True) 218 | bot.register_message_handler(yi_handler, regexp="^yi_pro:", pass_bot=True) 219 | bot.register_message_handler( 220 | yi_photo_handler, 221 | content_types=["photo"], 222 | func=lambda m: m.caption and m.caption.startswith(("yi:", "/yi")), 223 | pass_bot=True, 224 | ) 225 | -------------------------------------------------------------------------------- /handlers/_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import base64 4 | import logging 5 | import re 6 | from functools import update_wrapper 7 | from mimetypes import guess_type 8 | from typing import Any, Callable, TypeVar 9 | 10 | import requests 11 | import telegramify_markdown 12 | from expiringdict import ExpiringDict 13 | from telebot import TeleBot 14 | from telebot.types import Message 15 | from telebot.util import smart_split 16 | from telegramify_markdown.customize import get_runtime_config 17 | from urlextract import URLExtract 18 | 19 | get_runtime_config().markdown_symbol.head_level_1 = ( 20 | "📌" # If you want, Customizing the head level 1 symbol 21 | ) 22 | get_runtime_config().markdown_symbol.link = ( 23 | "🔗" # If you want, Customizing the link symbol 24 | ) 25 | 26 | T = TypeVar("T", bound=Callable) 27 | logger = logging.getLogger("bot") 28 | 29 | 30 | BOT_MESSAGE_LENGTH = 4000 31 | 32 | REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=600) 33 | 34 | 35 | def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: 36 | """Create the first reply message which make user feel the bot is working.""" 37 | return bot.reply_to( 38 | message, f"*{who}* is _thinking_ \\.\\.\\.", parse_mode="MarkdownV2" 39 | ) 40 | 41 | 42 | def bot_reply_markdown( 43 | reply_id: Message, 44 | who: str, 45 | text: str, 46 | bot: TeleBot, 47 | split_text: bool = True, 48 | disable_web_page_preview: bool = False, 49 | ) -> bool: 50 | """ 51 | reply the Markdown by take care of the message length. 52 | it will fallback to plain text in case of any failure 53 | """ 54 | try: 55 | cache_key = f"{reply_id.chat.id}_{reply_id.message_id}" 56 | if cache_key in REPLY_MESSAGE_CACHE and REPLY_MESSAGE_CACHE[cache_key] == text: 57 | logger.info(f"Skipping duplicate message for {cache_key}") 58 | return True 59 | REPLY_MESSAGE_CACHE[cache_key] = text 60 | if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text: 61 | bot.edit_message_text( 62 | f"*{who}*:\n{telegramify_markdown.markdownify(text)}", 63 | chat_id=reply_id.chat.id, 64 | message_id=reply_id.message_id, 65 | parse_mode="MarkdownV2", 66 | disable_web_page_preview=disable_web_page_preview, 67 | ) 68 | return True 69 | 70 | # Need a split of message 71 | msgs = smart_split(text, BOT_MESSAGE_LENGTH) 72 | bot.edit_message_text( 73 | f"*{who}* \\[1/{len(msgs)}\\]:\n{telegramify_markdown.markdownify(msgs[0])}", 74 | chat_id=reply_id.chat.id, 75 | message_id=reply_id.message_id, 76 | parse_mode="MarkdownV2", 77 | disable_web_page_preview=disable_web_page_preview, 78 | ) 79 | for i in range(1, len(msgs)): 80 | bot.reply_to( 81 | reply_id.reply_to_message, 82 | f"*{who}* \\[{i + 1}/{len(msgs)}\\]:\n{telegramify_markdown.markdownify(msgs[i])}", 83 | parse_mode="MarkdownV2", 84 | ) 85 | 86 | return True 87 | except Exception: 88 | logger.exception("Error in bot_reply_markdown") 89 | # logger.info(f"wrong markdown format: {text}") 90 | bot.edit_message_text( 91 | f"*{who}*:\n{text}", 92 | chat_id=reply_id.chat.id, 93 | message_id=reply_id.message_id, 94 | disable_web_page_preview=disable_web_page_preview, 95 | ) 96 | return False 97 | 98 | 99 | def extract_prompt(message: str, bot_name: str) -> str: 100 | """ 101 | This function filters messages for prompts. 102 | 103 | Returns: 104 | str: If it is not a prompt, return None. Otherwise, return the trimmed prefix of the actual prompt. 105 | """ 106 | # remove '@bot_name' as it is considered part of the command when in a group chat. 107 | message = re.sub(re.escape(f"@{bot_name}"), "", message).strip() 108 | # add a whitespace after the first colon as we separate the prompt from the command by the first whitespace. 109 | message = re.sub(":", ": ", message, count=1).strip() 110 | try: 111 | left, message = message.split(maxsplit=1) 112 | except ValueError: 113 | return "" 114 | if ":" not in left: 115 | # the replacement happens in the right part, restore it. 116 | message = message.replace(": ", ":", 1) 117 | return message.strip() 118 | 119 | 120 | def remove_prompt_prefix(message: str) -> str: 121 | """ 122 | Remove "/cmd" or "/cmd@bot_name" or "cmd:" 123 | """ 124 | message += " " 125 | # Explanation of the regex pattern: 126 | # ^ - Match the start of the string 127 | # ( - Start of the group 128 | # / - Literal forward slash 129 | # [a-zA-Z] - Any letter (start of the command) 130 | # [a-zA-Z0-9_]* - Any number of letters, digits, or underscores 131 | # (@\w+)? - Optionally match @ followed by one or more word characters (for bot name) 132 | # \s - A single whitespace character (space or newline) 133 | # | - OR 134 | # [a-zA-Z] - Any letter (start of the command) 135 | # [a-zA-Z0-9_]* - Any number of letters, digits, or underscores 136 | # :\s - Colon followed by a single whitespace character 137 | # ) - End of the group 138 | pattern = r"^(/[a-zA-Z][a-zA-Z0-9_]*(@\w+)?\s|[a-zA-Z][a-zA-Z0-9_]*:\s)" 139 | 140 | return re.sub(pattern, "", message).strip() 141 | 142 | 143 | def non_llm_handler(handler: T) -> T: 144 | handler.__is_llm_handler__ = False 145 | return handler 146 | 147 | 148 | def wrap_handler(handler: T, bot: TeleBot) -> T: 149 | def wrapper(message: Message, *args: Any, **kwargs: Any) -> None: 150 | try: 151 | if getattr(handler, "__is_llm_handler__", True): 152 | m = "" 153 | 154 | if message.text is not None: 155 | m = message.text = extract_prompt( 156 | message.text, bot.get_me().username 157 | ) 158 | elif message.caption is not None: 159 | m = message.caption = extract_prompt( 160 | message.caption, bot.get_me().username 161 | ) 162 | elif message.location and message.location.latitude is not None: 163 | # for location map handler just return 164 | return handler(message, *args, **kwargs) 165 | if not m: 166 | bot.reply_to(message, "Please provide info after start words.") 167 | return 168 | return handler(message, *args, **kwargs) 169 | except Exception as e: 170 | logger.exception("Error in handler %s: %s", handler.__name__, e) 171 | # handle more here 172 | if str(e).find("RECITATION") > 0: 173 | bot.reply_to(message, "Your prompt `RECITATION` please check the log") 174 | else: 175 | bot.reply_to(message, "Something wrong, please check the log") 176 | 177 | return update_wrapper(wrapper, handler) 178 | 179 | 180 | def extract_url_from_text(text: str) -> list[str]: 181 | extractor = URLExtract() 182 | urls = extractor.find_urls(text) 183 | return urls 184 | 185 | 186 | def get_text_from_jina_reader(url: str): 187 | try: 188 | r = requests.get(f"https://r.jina.ai/{url}") 189 | return r.text 190 | except Exception as e: 191 | logger.exception("Error fetching text from Jina reader: %s", e) 192 | return None 193 | 194 | 195 | def enrich_text_with_urls(text: str) -> str: 196 | urls = extract_url_from_text(text) 197 | for u in urls: 198 | try: 199 | url_text = get_text_from_jina_reader(u) 200 | url_text = f"\n```markdown\n{url_text}\n```\n" 201 | text = text.replace(u, url_text) 202 | except Exception: 203 | # just ignore the error 204 | pass 205 | 206 | return text 207 | 208 | 209 | def image_to_data_uri(file_path): 210 | content_type = guess_type(file_path)[0] 211 | with open(file_path, "rb") as image_file: 212 | encoded_image = base64.b64encode(image_file.read()).decode("utf-8") 213 | return f"data:{content_type};base64,{encoded_image}" 214 | -------------------------------------------------------------------------------- /handlers/claude.py: -------------------------------------------------------------------------------- 1 | import time 2 | from os import environ 3 | from pathlib import Path 4 | 5 | from anthropic import Anthropic, APITimeoutError 6 | from expiringdict import ExpiringDict 7 | from telebot import TeleBot 8 | from telebot.types import Message 9 | from telegramify_markdown import markdownify 10 | 11 | from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls 12 | 13 | 14 | ANTHROPIC_API_KEY = environ.get("ANTHROPIC_API_KEY") 15 | ANTHROPIC_BASE_URL = environ.get("ANTHROPIC_BASE_URL") 16 | ANTHROPIC_MODEL = "claude-3-haiku-20240307" 17 | ANTHROPIC_PRO_MODEL = "claude-3-opus-20240229" 18 | 19 | if environ.get("ANTHROPIC_BASE_URL"): 20 | client = Anthropic(base_url=ANTHROPIC_BASE_URL, api_key=ANTHROPIC_API_KEY) 21 | else: 22 | client = Anthropic(api_key=ANTHROPIC_API_KEY) 23 | 24 | 25 | # Global history cache 26 | claude_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) 27 | claude_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) 28 | 29 | 30 | def claude_handler(message: Message, bot: TeleBot) -> None: 31 | """claude : /claude """ 32 | m = message.text.strip() 33 | player_message = [] 34 | # restart will lose all TODO 35 | if str(message.from_user.id) not in claude_player_dict: 36 | claude_player_dict[str(message.from_user.id)] = ( 37 | player_message # for the imuutable list 38 | ) 39 | else: 40 | player_message = claude_player_dict[str(message.from_user.id)] 41 | 42 | if m.strip() == "clear": 43 | bot.reply_to( 44 | message, 45 | "just clear you claude messages history", 46 | ) 47 | player_message.clear() 48 | return 49 | if m[:4].lower() == "new ": 50 | m = m[4:].strip() 51 | player_message.clear() 52 | m = enrich_text_with_urls(m) 53 | 54 | who = "Claude" 55 | # show something, make it more responsible 56 | reply_id = bot_reply_first(message, who, bot) 57 | 58 | player_message.append({"role": "user", "content": m}) 59 | # keep the last 5, every has two ask and answer. 60 | if len(player_message) > 10: 61 | player_message = player_message[2:] 62 | 63 | claude_reply_text = "" 64 | try: 65 | if len(player_message) > 2: 66 | if player_message[-1]["role"] == player_message[-2]["role"]: 67 | # tricky 68 | player_message.pop() 69 | r = client.messages.create( 70 | max_tokens=4096, messages=player_message, model=ANTHROPIC_MODEL 71 | ) 72 | if not r.content: 73 | claude_reply_text = f"{who} did not answer." 74 | player_message.pop() 75 | else: 76 | claude_reply_text = r.content[0].text 77 | player_message.append( 78 | { 79 | "role": r.role, 80 | "content": r.content, 81 | } 82 | ) 83 | 84 | except APITimeoutError: 85 | bot_reply_markdown(reply_id, who, "answer timeout", bot) 86 | # pop my user 87 | player_message.clear() 88 | return 89 | 90 | bot_reply_markdown(reply_id, who, claude_reply_text, bot) 91 | 92 | 93 | def claude_pro_handler(message: Message, bot: TeleBot) -> None: 94 | """claude_pro : /claude_pro TODO refactor""" 95 | m = message.text.strip() 96 | player_message = [] 97 | if str(message.from_user.id) not in claude_pro_player_dict: 98 | claude_pro_player_dict[str(message.from_user.id)] = ( 99 | player_message # for the imuutable list 100 | ) 101 | else: 102 | player_message = claude_pro_player_dict[str(message.from_user.id)] 103 | q = m.strip() 104 | if q == "clear" or len(q) == 0: 105 | bot.reply_to( 106 | message, 107 | "just clear you claude opus messages history", 108 | ) 109 | player_message.clear() 110 | return 111 | if m[:4].lower() == "new ": 112 | m = m[4:].strip() 113 | player_message.clear() 114 | m = enrich_text_with_urls(m) 115 | 116 | who = "Claude Pro" 117 | # show something, make it more responsible 118 | reply_id = bot_reply_first(message, who, bot) 119 | 120 | player_message.append({"role": "user", "content": m}) 121 | # keep the last 2, every has two ask and answer. 122 | # its too expensive 123 | if len(player_message) > 4: 124 | player_message = player_message[2:] 125 | 126 | try: 127 | if len(player_message) > 2: 128 | if player_message[-1]["role"] == player_message[-2]["role"]: 129 | # tricky 130 | player_message.pop() 131 | r = client.messages.create( 132 | max_tokens=2048, 133 | messages=player_message, 134 | model=ANTHROPIC_PRO_MODEL, 135 | stream=True, 136 | ) 137 | s = "" 138 | start = time.time() 139 | for e in r: 140 | if e.type == "content_block_delta": 141 | s += e.delta.text 142 | if time.time() - start > 1.7: 143 | start = time.time() 144 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 145 | 146 | if not bot_reply_markdown(reply_id, who, s, bot): 147 | # maybe not complete 148 | # maybe the same message 149 | player_message.clear() 150 | return 151 | 152 | player_message.append( 153 | { 154 | "role": "assistant", 155 | "content": markdownify(s), 156 | } 157 | ) 158 | 159 | except APITimeoutError: 160 | bot.reply_to(message, "answer wrong maybe up to the max token") 161 | # pop my user 162 | player_message.clear() 163 | return 164 | 165 | 166 | def claude_photo_handler(message: Message, bot: TeleBot) -> None: 167 | s = message.caption 168 | prompt = s.strip() 169 | who = "Claude Vision" 170 | # show something, make it more responsible 171 | reply_id = bot_reply_first(message, who, bot) 172 | # get the high quaility picture. 173 | max_size_photo = max(message.photo, key=lambda p: p.file_size) 174 | file_path = bot.get_file(max_size_photo.file_id).file_path 175 | downloaded_file = bot.download_file(file_path) 176 | with open("claude_temp.jpg", "wb") as temp_file: 177 | temp_file.write(downloaded_file) 178 | 179 | f = Path("claude_temp.jpg") 180 | try: 181 | with f: 182 | r = client.messages.create( 183 | max_tokens=1024, 184 | messages=[ 185 | { 186 | "role": "user", 187 | "content": [ 188 | { 189 | "type": "text", 190 | "text": prompt, 191 | }, 192 | { 193 | "type": "image", 194 | "source": { 195 | "type": "base64", 196 | "media_type": "image/jpeg", 197 | "data": f, 198 | }, 199 | }, 200 | ], 201 | }, 202 | ], 203 | model=ANTHROPIC_MODEL, 204 | stream=True, 205 | ) 206 | s = "" 207 | start = time.time() 208 | for e in r: 209 | if e.type == "content_block_delta": 210 | s += e.delta.text 211 | if time.time() - start > 1.7: 212 | start = time.time() 213 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 214 | 215 | bot_reply_markdown(reply_id, who, s, bot) 216 | except Exception as e: 217 | print(e) 218 | bot_reply_markdown(reply_id, who, "answer wrong", bot) 219 | 220 | 221 | if ANTHROPIC_API_KEY: 222 | 223 | def register(bot: TeleBot) -> None: 224 | bot.register_message_handler(claude_handler, commands=["claude"], pass_bot=True) 225 | bot.register_message_handler(claude_handler, regexp="^claude:", pass_bot=True) 226 | bot.register_message_handler( 227 | claude_pro_handler, commands=["claude_pro"], pass_bot=True 228 | ) 229 | bot.register_message_handler( 230 | claude_pro_handler, regexp="^claude_pro:", pass_bot=True 231 | ) 232 | bot.register_message_handler( 233 | claude_photo_handler, 234 | content_types=["photo"], 235 | func=lambda m: m.caption and m.caption.startswith(("claude:", "/claude")), 236 | pass_bot=True, 237 | ) 238 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python_bin_path="$(which python3)" 4 | venv_dir=".venv" 5 | project_path="$(pwd)" 6 | service_name="tgbotyh" 7 | 8 | source .env 9 | 10 | google_gemini_api_key="${GOOGLE_GEMINI_API_KEY}" 11 | telegram_bot_token="${TELEGRAM_BOT_TOKEN}" 12 | anthropic_api_key="${ANTHROPIC_API_KEY}" 13 | openai_api_key="${OPENAI_API_KEY}" 14 | yi_api_key="${YI_API_KEY}" 15 | yi_base_url="${YI_BASE_URL}" 16 | 17 | 18 | if [ -n "$PYTHON_BIN_PATH" ]; then 19 | python_bin_path="$PYTHON_BIN_PATH" 20 | fi 21 | 22 | if [ -n "$PYTHON_VENV_PATH" ]; then 23 | venv_dir="${PYTHON_VENV_PATH}" 24 | fi 25 | 26 | sudoCmd="" 27 | if [[ $(/usr/bin/id -u) -ne 0 ]]; then 28 | sudoCmd="sudo" 29 | fi 30 | 31 | # Check Virtual Environment exist 32 | if [ -d "$venv_dir" ]; then 33 | echo "Virtual Environment already exist" 34 | exit 1 35 | fi 36 | 37 | # created virtual environment 38 | $python_bin_path -m venv "$venv_dir" 39 | if [ $? -eq 0 ]; then 40 | echo "Successfully created virtual environment." 41 | else 42 | echo "Failed to create virtual environment." 43 | fi 44 | 45 | source $venv_dir/bin/activate 46 | python -m pip install --upgrade pip 47 | pip install -r requirements.txt 48 | 49 | osSystemMdPath="/lib/systemd/system/" 50 | 51 | # Chcek OS release distribution 52 | function getLinuxOSRelease(){ 53 | if [[ -f /etc/redhat-release ]]; then 54 | osRelease="centos" 55 | osSystemPackage="yum" 56 | osSystemMdPath="/usr/lib/systemd/system/" 57 | elif cat /etc/issue | grep -Eqi "debian|raspbian"; then 58 | osRelease="debian" 59 | osSystemPackage="apt-get" 60 | osSystemMdPath="/lib/systemd/system/" 61 | elif cat /etc/issue | grep -Eqi "ubuntu"; then 62 | osRelease="ubuntu" 63 | osSystemPackage="apt-get" 64 | osSystemMdPath="/lib/systemd/system/" 65 | elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then 66 | osRelease="centos" 67 | osSystemPackage="yum" 68 | osSystemMdPath="/usr/lib/systemd/system/" 69 | elif cat /proc/version | grep -Eqi "debian|raspbian"; then 70 | osRelease="debian" 71 | osSystemPackage="apt-get" 72 | osSystemMdPath="/lib/systemd/system/" 73 | elif cat /proc/version | grep -Eqi "ubuntu"; then 74 | osRelease="ubuntu" 75 | osSystemPackage="apt-get" 76 | osSystemMdPath="/lib/systemd/system/" 77 | elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then 78 | osRelease="centos" 79 | osSystemPackage="yum" 80 | osSystemMdPath="/usr/lib/systemd/system/" 81 | fi 82 | } 83 | 84 | function installPythonVirtualEnv(){ 85 | echo 86 | echo "==============================" 87 | echo "Prapare to Install telegram bot" 88 | echo 89 | echo "Project path: $project_path" 90 | echo "Python bin path: $python_bin_path" 91 | echo "Google_Gemini_API_Key: $google_gemini_api_key" 92 | echo "Telegram Bot Token: $telegram_bot_token" 93 | echo "Anthropic API Key: $anthropic_api_key" 94 | echo "Openai API Key: $openai_api_key" 95 | echo "Yi API Key: $yi_api_key" 96 | echo "Yi Base Url: $yi_base_url" 97 | echo "==============================" 98 | 99 | echo 100 | 101 | # Check Virtual Environment exist 102 | if [ -d "$venv_dir" ]; then 103 | echo "Virtual Environment already exist" 104 | if [ -z "$1" ]; then 105 | exit 1 106 | else 107 | source $venv_dir/bin/activate 108 | fi 109 | else 110 | # created virtual environment 111 | echo "Creating virtual environment..." 112 | $python_bin_path -m venv "$venv_dir" 113 | 114 | if [ $? -eq 0 ]; then 115 | echo "Successfully created virtual environment." 116 | 117 | source $venv_dir/bin/activate 118 | python -m pip install --upgrade pip 119 | pip install -r requirements.txt 120 | else 121 | echo "Failed to create virtual environment." 122 | exit 1 123 | fi 124 | fi 125 | } 126 | 127 | function installSystemd(){ 128 | installPythonVirtualEnv 129 | 130 | cat > ${osSystemMdPath}${service_name}.service <<-EOF 131 | 132 | [Unit] 133 | Description=$service_name service 134 | After=network.target 135 | 136 | [Service] 137 | User=root 138 | Group=root 139 | 140 | Environment="GOOGLE_GEMINI_KEY=${google_gemini_api_key}" 141 | Environment="ANTHROPIC_API_KEY=${anthropic_api_key}" 142 | Environment="OPENAI_API_KEY=${openai_api_key}" 143 | Environment="YI_API_KEY=${yi_api_key}" 144 | Environment="YI_BASE_URL=${yi_base_url}" 145 | 146 | 147 | WorkingDirectory=$project_path 148 | ExecStart=$project_path/venv/bin/python $project_path/tg.py "${telegram_bot_token}" 149 | 150 | Restart=on-failure 151 | RestartSec=30 152 | 153 | [Install] 154 | WantedBy=multi-user.target 155 | EOF 156 | 157 | ${sudoCmd} chmod +x ${osSystemMdPath}${service_name}.service 158 | ${sudoCmd} systemctl daemon-reload 159 | ${sudoCmd} systemctl enable ${service_name}.service 160 | ${sudoCmd} systemctl start ${service_name}.service 161 | 162 | echo 163 | echo "${service_name}.service running successfully" 164 | echo 165 | echo "Run following command to start / stop telegram bot" 166 | echo "Start: systemctl start ${service_name}.service" 167 | echo "Stop: systemctl stop ${service_name}.service" 168 | echo "Check running status: systemctl status ${service_name}.service" 169 | echo "==============================" 170 | } 171 | function uninstallSystemd(){ 172 | ${sudoCmd} systemctl stop ${service_name}.service 173 | ${sudoCmd} systemctl disable ${service_name}.service 174 | ${sudoCmd} rm -rf ${osSystemMdPath}${service_name}.service 175 | ${sudoCmd} systemctl daemon-reload 176 | } 177 | 178 | function installCommandLine(){ 179 | installPythonVirtualEnv "noexit" 180 | 181 | echo 182 | echo "==============================" 183 | export GOOGLE_GEMINI_KEY=$google_gemini_api_key 184 | export ANTHROPIC_API_KEY=$anthropic_api_key 185 | export OPENAI_API_KEY=$openai_api_key 186 | export YI_API_KEY=$yi_api_key 187 | export YI_BASE_URL=$yi_base_url 188 | python tg.py "${telegram_bot_token}" 189 | } 190 | 191 | function runSystemd(){ 192 | echo 193 | if [ "$1" == "start" ]; then 194 | echo "systemctl start ${service_name}.service" 195 | ${sudoCmd} systemctl start ${service_name}.service 196 | 197 | elif [ "$1" == "restart" ]; then 198 | echo "systemctl restart ${service_name}.service" 199 | ${sudoCmd} systemctl restart ${service_name}.service 200 | 201 | elif [ "$1" == "stop" ]; then 202 | echo "systemctl stop ${service_name}.service" 203 | ${sudoCmd} systemctl stop ${service_name}.service 204 | 205 | elif [ "$1" == "status" ]; then 206 | echo "systemctl status ${service_name}.service" 207 | ${sudoCmd} systemctl status ${service_name}.service 208 | 209 | else 210 | echo "journalctl -n 30 -u ${service_name}.service " 211 | ${sudoCmd} journalctl -n 30 -u ${service_name}.service 212 | fi 213 | echo 214 | } 215 | 216 | function start_menu(){ 217 | clear 218 | 219 | echo "==============================" 220 | echo " 1. Install telegram bot and Run with Systemd Service" 221 | echo " 2. Install and Run with Command Line" 222 | echo 223 | echo " 3. Uninstall telegram bot and Systemd Service" 224 | echo 225 | echo " 4. Restart ${service_name} Systemd Service" 226 | echo " 5. Stop ${service_name} Systemd Service" 227 | echo " 6. Check Status of ${service_name} Systemd Service" 228 | echo " 7. Show Log of ${service_name} Systemd Service" 229 | echo 230 | echo " 0. exit" 231 | 232 | echo 233 | read -r -p "Please input number:" menuNumberInput 234 | case "$menuNumberInput" in 235 | 1 ) 236 | installSystemd 237 | ;; 238 | 2 ) 239 | installCommandLine 240 | ;; 241 | 3 ) 242 | uninstallSystemd 243 | ;; 244 | 4 ) 245 | runSystemd "restart" 246 | ;; 247 | 5 ) 248 | runSystemd "stop" 249 | ;; 250 | 6 ) 251 | runSystemd "status" 252 | ;; 253 | 7 ) 254 | runSystemd 255 | ;; 256 | 0 ) 257 | exit 1 258 | ;; 259 | * ) 260 | clear 261 | echo "Please input correct number !" 262 | sleep 2s 263 | start_menu 264 | ;; 265 | esac 266 | } 267 | 268 | function showMenu(){ 269 | 270 | if [ -z "$1" ]; then 271 | start_menu 272 | elif [ "$1" == "1" ]; then 273 | installSystemd 274 | elif [ "$1" == "2" ]; then 275 | installCommandLine 276 | elif [ "$1" == "3" ]; then 277 | uninstallSystemd 278 | elif [ "$1" == "4" ]; then 279 | runSystemd "restart" 280 | elif [ "$1" == "5" ]; then 281 | runSystemd "stop" 282 | elif [ "$1" == "6" ]; then 283 | runSystemd "status" 284 | elif [ "$1" == "7" ]; then 285 | runSystemd 286 | else 287 | start_menu 288 | fi 289 | } 290 | 291 | showMenu $1 -------------------------------------------------------------------------------- /handlers/_telegraph.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from mimetypes import guess_type 4 | 5 | import markdown 6 | import requests 7 | from bs4 import BeautifulSoup 8 | 9 | from ._utils import logger 10 | 11 | 12 | class TelegraphAPI: 13 | def __init__( 14 | self, 15 | access_token=None, 16 | short_name="tg_bot_collections", 17 | author_name="Telegram Bot Collections", 18 | author_url=None, 19 | ): 20 | self.access_token = ( 21 | access_token 22 | if access_token 23 | else self._create_ph_account(short_name, author_name, author_url) 24 | ) 25 | self.base_url = "https://api.telegra.ph" 26 | 27 | # Get account info on initialization 28 | account_info = self.get_account_info() 29 | self.short_name = account_info.get("short_name") 30 | self.author_name = account_info.get("author_name") 31 | self.author_url = account_info.get("author_url") 32 | 33 | def _create_ph_account(self, short_name, author_name, author_url): 34 | Store_Token = False 35 | TELEGRAPH_API_URL = "https://api.telegra.ph/createAccount" 36 | TOKEN_FILE = "token_key.json" 37 | 38 | # Try to load existing token information 39 | try: 40 | with open(TOKEN_FILE, "r") as f: 41 | tokens = json.load(f) 42 | if "TELEGRA_PH_TOKEN" in tokens and tokens["TELEGRA_PH_TOKEN"] != "example": 43 | return tokens["TELEGRA_PH_TOKEN"] 44 | except FileNotFoundError: 45 | tokens = {} 46 | 47 | # If no existing valid token in TOKEN_FILE, create a new account 48 | data = { 49 | "short_name": short_name, 50 | "author_name": author_name, 51 | "author_url": author_url, 52 | } 53 | 54 | # Make API request 55 | response = requests.post(TELEGRAPH_API_URL, data=data) 56 | response.raise_for_status() 57 | 58 | account = response.json() 59 | access_token = account["result"]["access_token"] 60 | 61 | # Update the token in the dictionary 62 | tokens["TELEGRA_PH_TOKEN"] = access_token 63 | 64 | # Store the updated tokens 65 | if Store_Token: 66 | with open(TOKEN_FILE, "w") as f: 67 | json.dump(tokens, f, indent=4) 68 | else: 69 | logger.info( 70 | f"Token not stored to file, but here is your token:\n{access_token}" 71 | ) 72 | 73 | # Store it to the environment variable 74 | os.environ["TELEGRA_PH_TOKEN"] = access_token 75 | 76 | return access_token 77 | 78 | def create_page( 79 | self, title, content, author_name=None, author_url=None, return_content=False 80 | ): 81 | url = f"{self.base_url}/createPage" 82 | data = { 83 | "access_token": self.access_token, 84 | "title": title, 85 | "content": json.dumps(content), 86 | "return_content": return_content, 87 | "author_name": author_name if author_name else self.author_name, 88 | "author_url": author_url if author_url else self.author_url, 89 | } 90 | 91 | # Max 65,536 characters/64KB. 92 | if len(json.dumps(content)) > 65536: 93 | content = content[:64000] 94 | data["content"] = json.dumps(content) 95 | 96 | try: 97 | response = requests.post(url, data=data) 98 | response.raise_for_status() 99 | response = response.json() 100 | page_url = response["result"]["url"] 101 | return page_url 102 | except requests.exceptions.RequestException: 103 | return "https://telegra.ph/api" 104 | 105 | def get_account_info(self): 106 | url = f'{self.base_url}/getAccountInfo?access_token={self.access_token}&fields=["short_name","author_name","author_url","auth_url"]' 107 | response = requests.get(url) 108 | 109 | if response.status_code == 200: 110 | return response.json()["result"] 111 | else: 112 | logger.info(f"Fail getting telegra.ph token info: {response.status_code}") 113 | return None 114 | 115 | def edit_page( 116 | self, 117 | path, 118 | title, 119 | content, 120 | author_name=None, 121 | author_url=None, 122 | return_content=False, 123 | ): 124 | url = f"{self.base_url}/editPage" 125 | data = { 126 | "access_token": self.access_token, 127 | "path": path, 128 | "title": title, 129 | "content": json.dumps(content), 130 | "return_content": return_content, 131 | "author_name": author_name if author_name else self.author_name, 132 | "author_url": author_url if author_url else self.author_url, 133 | } 134 | 135 | response = requests.post(url, data=data) 136 | response.raise_for_status() 137 | response = response.json() 138 | 139 | page_url = response["result"]["url"] 140 | return page_url 141 | 142 | def get_page(self, path): 143 | url = f"{self.base_url}/getPage/{path}?return_content=true" 144 | response = requests.get(url) 145 | response.raise_for_status() 146 | return response.json()["result"]["content"] 147 | 148 | def create_page_md( 149 | self, 150 | title, 151 | markdown_text, 152 | author_name=None, 153 | author_url=None, 154 | return_content=False, 155 | ): 156 | content = self._md_to_dom(markdown_text) 157 | return self.create_page(title, content, author_name, author_url, return_content) 158 | 159 | def edit_page_md( 160 | self, 161 | path, 162 | title, 163 | markdown_text, 164 | author_name=None, 165 | author_url=None, 166 | return_content=False, 167 | ): 168 | content = self._md_to_dom(markdown_text) 169 | return self.edit_page( 170 | path, title, content, author_name, author_url, return_content 171 | ) 172 | 173 | def authorize_browser(self): 174 | url = f'{self.base_url}/getAccountInfo?access_token={self.access_token}&fields=["auth_url"]' 175 | response = requests.get(url) 176 | response.raise_for_status() 177 | return response.json()["result"]["auth_url"] 178 | 179 | def _md_to_dom(self, markdown_text): 180 | html = markdown.markdown( 181 | markdown_text, 182 | extensions=["markdown.extensions.extra", "markdown.extensions.sane_lists"], 183 | ) 184 | 185 | soup = BeautifulSoup(html, "html.parser") 186 | 187 | def parse_element(element): 188 | tag_dict = {"tag": element.name} 189 | if element.name in ["h1", "h2", "h3", "h4", "h5", "h6"]: 190 | if element.name == "h1": 191 | tag_dict["tag"] = "h3" 192 | elif element.name == "h2": 193 | tag_dict["tag"] = "h4" 194 | else: 195 | tag_dict["tag"] = "p" 196 | tag_dict["children"] = [ 197 | {"tag": "strong", "children": element.contents} 198 | ] 199 | 200 | if element.attrs: 201 | tag_dict["attrs"] = element.attrs 202 | if element.contents: 203 | children = [] 204 | for child in element.contents: 205 | if isinstance(child, str): 206 | children.append(child.strip()) 207 | else: 208 | children.append(parse_element(child)) 209 | tag_dict["children"] = children 210 | else: 211 | if element.attrs: 212 | tag_dict["attrs"] = element.attrs 213 | if element.contents: 214 | children = [] 215 | for child in element.contents: 216 | if isinstance(child, str): 217 | children.append(child.strip()) 218 | else: 219 | children.append(parse_element(child)) 220 | if children: 221 | tag_dict["children"] = children 222 | return tag_dict 223 | 224 | new_dom = [] 225 | for element in soup.contents: 226 | if isinstance(element, str) and not element.strip(): 227 | continue 228 | elif isinstance(element, str): 229 | new_dom.append({"tag": "text", "content": element.strip()}) 230 | else: 231 | new_dom.append(parse_element(element)) 232 | 233 | return new_dom 234 | 235 | def upload_image(self, file_name: str) -> str: 236 | base_url = "https://telegra.ph" 237 | upload_url = f"{base_url}/upload" 238 | 239 | try: 240 | content_type = guess_type(file_name)[0] 241 | with open(file_name, "rb") as f: 242 | response = requests.post( 243 | upload_url, files={"file": ("blob", f, content_type)} 244 | ) 245 | response.raise_for_status() 246 | # [{'src': '/file/xx.jpg'}] 247 | response = response.json() 248 | image_url = f"{base_url}{response[0]['src']}" 249 | return image_url 250 | except Exception as e: 251 | logger.info(f"upload image: {e}") 252 | return "https://telegra.ph/api" 253 | -------------------------------------------------------------------------------- /handlers/summary/messages.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sqlite3 3 | from dataclasses import dataclass 4 | from datetime import datetime, timedelta, timezone 5 | 6 | 7 | @dataclass(frozen=True) 8 | class ChatMessage: 9 | chat_id: int 10 | message_id: int 11 | content: str 12 | user_id: int 13 | user_name: str 14 | timestamp: datetime 15 | 16 | 17 | @dataclass(frozen=True) 18 | class StatsEntry: 19 | date: str 20 | message_count: int 21 | 22 | 23 | @dataclass(frozen=True) 24 | class UserStatsEntry: 25 | user_id: int 26 | user_name: str 27 | message_count: int 28 | 29 | 30 | class MessageStore: 31 | def __init__(self, db_file: str): 32 | parent_folder = os.path.dirname(db_file) 33 | if not os.path.exists(parent_folder): 34 | os.makedirs(parent_folder) 35 | self._db_file = db_file 36 | self._init_db() 37 | 38 | def connect(self) -> sqlite3.Connection: 39 | """Create a new database connection.""" 40 | return sqlite3.connect(self._db_file) 41 | 42 | def _init_db(self): 43 | with self.connect() as conn: 44 | conn.execute( 45 | """ 46 | CREATE TABLE IF NOT EXISTS messages ( 47 | chat_id INTEGER, 48 | message_id INTEGER, 49 | content TEXT, 50 | user_id INTEGER, 51 | user_name TEXT, 52 | timestamp TEXT, 53 | PRIMARY KEY (chat_id, message_id) 54 | ); 55 | """ 56 | ) 57 | conn.execute( 58 | """ 59 | CREATE INDEX IF NOT EXISTS idx_chat_timestamp ON messages (chat_id, timestamp); 60 | """ 61 | ) 62 | conn.execute( 63 | """ 64 | CREATE TABLE IF NOT EXISTS tigong_alerts ( 65 | chat_id INTEGER, 66 | user_id INTEGER, 67 | user_name TEXT, 68 | username TEXT, 69 | date TEXT, 70 | confirmed INTEGER DEFAULT 0, 71 | PRIMARY KEY (chat_id, user_id, date) 72 | ); 73 | """ 74 | ) 75 | conn.commit() 76 | 77 | def add_message( 78 | self, message: ChatMessage, conn: sqlite3.Connection | None = None 79 | ) -> None: 80 | need_close = False 81 | if conn is None: 82 | conn = self.connect() 83 | need_close = True 84 | try: 85 | conn.execute( 86 | """ 87 | INSERT OR REPLACE INTO messages (chat_id, message_id, content, user_id, user_name, timestamp) 88 | VALUES (?, ?, ?, ?, ?, ?); 89 | """, 90 | ( 91 | message.chat_id, 92 | message.message_id, 93 | message.content, 94 | message.user_id, 95 | message.user_name, 96 | message.timestamp.isoformat(), 97 | ), 98 | ) 99 | self._clean_old_messages(message.chat_id, conn) 100 | conn.commit() 101 | finally: 102 | if need_close: 103 | conn.close() 104 | 105 | def get_messages_since(self, chat_id: int, since: datetime) -> list[ChatMessage]: 106 | with self.connect() as conn: 107 | cursor = conn.cursor() 108 | cursor.execute( 109 | """ 110 | SELECT chat_id, message_id, content, user_id, user_name, timestamp 111 | FROM messages 112 | WHERE chat_id = ? AND timestamp >= ? 113 | ORDER BY timestamp ASC; 114 | """, 115 | (chat_id, since.astimezone(timezone.utc).isoformat()), 116 | ) 117 | rows = cursor.fetchall() 118 | return [ 119 | ChatMessage( 120 | chat_id=row[0], 121 | message_id=row[1], 122 | content=row[2], 123 | user_id=row[3], 124 | user_name=row[4], 125 | timestamp=datetime.fromisoformat(row[5]), 126 | ) 127 | for row in rows 128 | ] 129 | 130 | def get_stats(self, chat_id: int) -> list[StatsEntry]: 131 | with self.connect() as conn: 132 | self._clean_old_messages(chat_id, conn) 133 | cursor = conn.cursor() 134 | cursor.execute( 135 | """ 136 | SELECT DATE(timestamp), COUNT(*) 137 | FROM messages 138 | WHERE chat_id = ? 139 | GROUP BY DATE(timestamp) 140 | ORDER BY DATE(timestamp) ASC; 141 | """, 142 | (chat_id,), 143 | ) 144 | rows = cursor.fetchall() 145 | return [StatsEntry(date=row[0], message_count=row[1]) for row in rows] 146 | 147 | def get_user_stats(self, chat_id: int, limit: int = 10) -> list[UserStatsEntry]: 148 | with self.connect() as conn: 149 | self._clean_old_messages(chat_id, conn) 150 | cursor = conn.cursor() 151 | cursor.execute( 152 | """ 153 | SELECT user_id, 154 | (SELECT user_name FROM messages m0 WHERE m0.user_id = m1.user_id LIMIT 1) AS name, 155 | COUNT(*) AS num 156 | FROM messages m1 157 | WHERE chat_id = ? 158 | GROUP BY user_id 159 | ORDER BY num DESC 160 | LIMIT ?;""", 161 | (chat_id, limit), 162 | ) 163 | rows = cursor.fetchall() 164 | return [UserStatsEntry(*row) for row in rows] 165 | 166 | def search_messages( 167 | self, chat_id: int, keyword: str, limit: int = 10 168 | ) -> list[ChatMessage]: 169 | # TODO: Fuzzy search with full-text search or similar 170 | with self.connect() as conn: 171 | cursor = conn.cursor() 172 | cursor.execute( 173 | """ 174 | SELECT chat_id, message_id, content, user_id, user_name, timestamp 175 | FROM messages 176 | WHERE chat_id = ? AND content LIKE ? 177 | ORDER BY timestamp DESC 178 | LIMIT ?; 179 | """, 180 | (chat_id, f"%{keyword}%", limit), 181 | ) 182 | rows = cursor.fetchall() 183 | return [ 184 | ChatMessage( 185 | chat_id=row[0], 186 | message_id=row[1], 187 | content=row[2], 188 | user_id=row[3], 189 | user_name=row[4], 190 | timestamp=datetime.fromisoformat(row[5]), 191 | ) 192 | for row in rows 193 | ] 194 | 195 | def _clean_old_messages( 196 | self, chat_id: int, conn: sqlite3.Connection, days: int = 30 197 | ) -> None: 198 | cursor = conn.cursor() 199 | threshold_date = datetime.now(tz=timezone.utc) - timedelta(days=days) 200 | cursor.execute( 201 | "DELETE FROM messages WHERE chat_id = ? AND timestamp < ?;", 202 | (chat_id, threshold_date.isoformat()), 203 | ) 204 | 205 | def add_tigong_alert_user( 206 | self, chat_id: int, user_id: int, user_name: str, username: str, date: str 207 | ) -> None: 208 | """添加用户到提肛提醒队列""" 209 | with self.connect() as conn: 210 | conn.execute( 211 | """ 212 | INSERT OR REPLACE INTO tigong_alerts (chat_id, user_id, user_name, username, date, confirmed) 213 | VALUES (?, ?, ?, ?, ?, 0); 214 | """, 215 | (chat_id, user_id, user_name, username, date), 216 | ) 217 | conn.commit() 218 | 219 | def confirm_tigong_alert(self, chat_id: int, user_id: int, date: str) -> bool: 220 | """确认用户完成提肛""" 221 | with self.connect() as conn: 222 | cursor = conn.cursor() 223 | cursor.execute( 224 | """ 225 | UPDATE tigong_alerts SET confirmed = 1 226 | WHERE chat_id = ? AND user_id = ? AND date = ?; 227 | """, 228 | (chat_id, user_id, date), 229 | ) 230 | conn.commit() 231 | return cursor.rowcount > 0 232 | 233 | def get_unconfirmed_users(self, chat_id: int, date: str) -> list[dict]: 234 | """获取当天未确认的用户列表""" 235 | with self.connect() as conn: 236 | cursor = conn.cursor() 237 | cursor.execute( 238 | """ 239 | SELECT user_id, user_name, username 240 | FROM tigong_alerts 241 | WHERE chat_id = ? AND date = ? AND confirmed = 0; 242 | """, 243 | (chat_id, date), 244 | ) 245 | rows = cursor.fetchall() 246 | return [ 247 | {"user_id": row[0], "user_name": row[1], "username": row[2]} for row in rows 248 | ] 249 | 250 | def get_today_message_count(self, chat_id: int, date_str: str) -> int: 251 | """获取当天的消息数量""" 252 | with self.connect() as conn: 253 | cursor = conn.cursor() 254 | cursor.execute( 255 | """ 256 | SELECT COUNT(*) 257 | FROM messages 258 | WHERE chat_id = ? AND DATE(timestamp) = ?; 259 | """, 260 | (chat_id, date_str), 261 | ) 262 | result = cursor.fetchone() 263 | return result[0] if result else 0 264 | -------------------------------------------------------------------------------- /handlers/gemini.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | from os import environ 4 | 5 | import google.generativeai as genai 6 | from expiringdict import ExpiringDict 7 | from google.generativeai import ChatSession 8 | from google.generativeai.types.generation_types import StopCandidateException 9 | from telebot import TeleBot 10 | from telebot.types import Message 11 | 12 | from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls, logger 13 | 14 | 15 | GOOGLE_GEMINI_KEY = environ.get("GEMIMI_PRO_KEY") 16 | 17 | genai.configure(api_key=GOOGLE_GEMINI_KEY) 18 | generation_config = { 19 | "temperature": 0.7, 20 | "top_p": 1, 21 | "top_k": 1, 22 | "max_output_tokens": 8192, 23 | } 24 | 25 | safety_settings = [ 26 | {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"}, 27 | {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}, 28 | {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"}, 29 | {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"}, 30 | ] 31 | 32 | # Global history cache 33 | gemini_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 34 | gemini_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 35 | gemini_file_player_dict = ExpiringDict(max_len=100, max_age_seconds=600) 36 | 37 | 38 | def make_new_gemini_convo(is_pro=False) -> ChatSession: 39 | model_name = "gemini-1.5-flash-002" 40 | if is_pro: 41 | model_name = "gemini-2.0-flash-exp" 42 | 43 | model = genai.GenerativeModel( 44 | model_name=model_name, 45 | generation_config=generation_config, 46 | safety_settings=safety_settings, 47 | ) 48 | convo = model.start_chat() 49 | return convo 50 | 51 | 52 | def remove_gemini_player(player_id: str, is_pro: bool) -> None: 53 | if is_pro: 54 | if player_id in gemini_pro_player_dict: 55 | del gemini_pro_player_dict[player_id] 56 | if player_id in gemini_file_player_dict: 57 | del gemini_file_player_dict[player_id] 58 | else: 59 | if player_id in gemini_player_dict: 60 | del gemini_player_dict[player_id] 61 | 62 | 63 | def get_gemini_player(player_id: str, is_pro: bool) -> ChatSession: 64 | player = None 65 | if is_pro: 66 | if player_id not in gemini_pro_player_dict: 67 | gemini_pro_player_dict[player_id] = make_new_gemini_convo(is_pro) 68 | player = gemini_pro_player_dict[player_id] 69 | else: 70 | if player_id not in gemini_player_dict: 71 | gemini_player_dict[player_id] = make_new_gemini_convo() 72 | player = gemini_player_dict[player_id] 73 | 74 | return player 75 | 76 | 77 | def gemini_handler(message: Message, bot: TeleBot) -> None: 78 | """Gemini : /gemini """ 79 | m = message.text.strip() 80 | player_id = str(message.from_user.id) 81 | is_pro = False 82 | if m.strip() == "clear": 83 | bot.reply_to(message, "just clear you gemini messages history") 84 | remove_gemini_player(player_id, is_pro) 85 | return 86 | if m[:4].lower() == "new ": 87 | m = m[4:].strip() 88 | remove_gemini_player(player_id, is_pro) 89 | 90 | # restart will lose all TODO 91 | player = get_gemini_player(player_id, is_pro) 92 | m = enrich_text_with_urls(m) 93 | 94 | who = "Gemini" 95 | # show something, make it more responsible 96 | reply_id = bot_reply_first(message, who, bot) 97 | 98 | # keep the last 5, every has two ask and answer. 99 | if len(player.history) > 10: 100 | player.history = player.history[2:] 101 | 102 | try: 103 | player.send_message(m) 104 | gemini_reply_text = player.last.text.strip() 105 | # Gemini is often using ':' in **Title** which not work in Telegram Markdown 106 | gemini_reply_text = gemini_reply_text.replace(":**", "\\:**") 107 | gemini_reply_text = gemini_reply_text.replace(":**", "**\\: ") 108 | except StopCandidateException as e: 109 | match = re.search(r'content\s*{\s*parts\s*{\s*text:\s*"([^"]+)"', str(e)) 110 | if match: 111 | gemini_reply_text = match.group(1) 112 | gemini_reply_text = re.sub(r"\\n", "\n", gemini_reply_text) 113 | else: 114 | print("No meaningful text was extracted from the exception.") 115 | bot.reply_to(message, "answer wrong maybe up to the max token") 116 | return 117 | 118 | # By default markdown 119 | bot_reply_markdown(reply_id, who, gemini_reply_text, bot) 120 | 121 | 122 | def gemini_pro_handler(message: Message, bot: TeleBot) -> None: 123 | """Gemini : /gemini_pro """ 124 | m = message.text.strip() 125 | player_id = str(message.from_user.id) 126 | is_pro = True 127 | if m.strip() == "clear": 128 | bot.reply_to(message, "just clear you gemini messages history") 129 | remove_gemini_player(player_id, is_pro) 130 | return 131 | if m[:4].lower() == "new ": 132 | m = m[4:].strip() 133 | remove_gemini_player(player_id, is_pro) 134 | 135 | # restart will lose all TODO 136 | player = get_gemini_player(player_id, is_pro) 137 | m = enrich_text_with_urls(m) 138 | 139 | who = "Gemini Pro" 140 | # show something, make it more responsible 141 | reply_id = bot_reply_first(message, who, bot) 142 | 143 | # keep the last 5, every has two ask and answer. 144 | if len(player.history) > 10: 145 | player.history = player.history[2:] 146 | 147 | try: 148 | if path := gemini_file_player_dict.get(player_id): 149 | m = [m, path] 150 | r = player.send_message(m, stream=True) 151 | s = "" 152 | start = time.time() 153 | for e in r: 154 | s += e.text 155 | if time.time() - start > 1.7: 156 | start = time.time() 157 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 158 | 159 | if not bot_reply_markdown(reply_id, who, s, bot): 160 | # maybe not complete 161 | # maybe the same message 162 | player.history.clear() 163 | return 164 | except Exception as e: 165 | logger.exception("Gemini audio handler error") 166 | bot.reply_to(message, "answer wrong maybe up to the max token") 167 | try: 168 | player.history.clear() 169 | except Exception: 170 | print(f"\n------\n{who} history.clear() Error / Unstoppable\n------\n") 171 | return 172 | 173 | 174 | def gemini_photo_handler(message: Message, bot: TeleBot) -> None: 175 | s = message.caption 176 | prompt = s.strip() 177 | who = "Gemini Vision" 178 | # show something, make it more responsible 179 | reply_id = bot_reply_first(message, who, bot) 180 | # get the high quaility picture. 181 | max_size_photo = max(message.photo, key=lambda p: p.file_size) 182 | file_path = bot.get_file(max_size_photo.file_id).file_path 183 | downloaded_file = bot.download_file(file_path) 184 | with open("gemini_temp.jpg", "wb") as temp_file: 185 | temp_file.write(downloaded_file) 186 | 187 | model = genai.GenerativeModel("gemini-2.0-flash-exp") 188 | with open("gemini_temp.jpg", "rb") as image_file: 189 | image_data = image_file.read() 190 | contents = { 191 | "parts": [{"mime_type": "image/jpeg", "data": image_data}, {"text": prompt}] 192 | } 193 | try: 194 | r = model.generate_content(contents=contents, stream=True) 195 | s = "" 196 | start = time.time() 197 | for e in r: 198 | s += e.text 199 | if time.time() - start > 1.7: 200 | start = time.time() 201 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 202 | 203 | # maybe not complete 204 | try: 205 | bot_reply_markdown(reply_id, who, s, bot) 206 | except Exception: 207 | pass 208 | except Exception as e: 209 | logger.exception("Gemini photo handler error") 210 | bot.reply_to(message, "answer wrong maybe up to the max token") 211 | 212 | 213 | def gemini_audio_handler(message: Message, bot: TeleBot) -> None: 214 | s = message.caption 215 | prompt = s.strip() 216 | who = "Gemini File Audio" 217 | player_id = str(message.from_user.id) 218 | # restart will lose all TODO 219 | player = get_gemini_player(player_id, is_pro=True) 220 | file_path = None 221 | # for file handler like {user_id: [player, file_path], user_id2: [player, file_path]} 222 | reply_id = bot_reply_first(message, who, bot) 223 | file_path = bot.get_file(message.audio.file_id).file_path 224 | downloaded_file = bot.download_file(file_path) 225 | path = f"{player_id}_gemini.mp3" 226 | with open(path, "wb") as temp_file: 227 | temp_file.write(downloaded_file) 228 | gemini_mp3_file = genai.upload_file(path=path) 229 | r = player.send_message([prompt, gemini_mp3_file], stream=True) 230 | # need set it for the conversation 231 | gemini_file_player_dict[player_id] = gemini_mp3_file 232 | try: 233 | s = "" 234 | start = time.time() 235 | for e in r: 236 | s += e.text 237 | if time.time() - start > 1.7: 238 | start = time.time() 239 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 240 | 241 | if not bot_reply_markdown(reply_id, who, s, bot): 242 | # maybe not complete 243 | # maybe the same message 244 | player.history.clear() 245 | return 246 | except Exception as e: 247 | logger.exception("Gemini audio handler error") 248 | bot.reply_to(message, "answer wrong maybe up to the max token") 249 | try: 250 | player.history.clear() 251 | except Exception: 252 | print(f"\n------\n{who} history.clear() Error / Unstoppable\n------\n") 253 | return 254 | 255 | 256 | if GOOGLE_GEMINI_KEY: 257 | 258 | def register(bot: TeleBot) -> None: 259 | bot.register_message_handler(gemini_handler, commands=["gemini"], pass_bot=True) 260 | bot.register_message_handler(gemini_handler, regexp="^gemini:", pass_bot=True) 261 | bot.register_message_handler( 262 | gemini_pro_handler, commands=["gemini_pro"], pass_bot=True 263 | ) 264 | bot.register_message_handler( 265 | gemini_pro_handler, regexp="^gemini_pro:", pass_bot=True 266 | ) 267 | bot.register_message_handler( 268 | gemini_photo_handler, 269 | content_types=["photo"], 270 | func=lambda m: m.caption and m.caption.startswith(("gemini:", "/gemini")), 271 | pass_bot=True, 272 | ) 273 | bot.register_message_handler( 274 | gemini_audio_handler, 275 | content_types=["audio"], 276 | func=lambda m: m.caption and m.caption.startswith(("gemini:", "/gemini")), 277 | pass_bot=True, 278 | ) 279 | -------------------------------------------------------------------------------- /handlers/summary/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | import random 5 | import zoneinfo 6 | from datetime import datetime, timezone 7 | from functools import partial 8 | import shlex 9 | import threading 10 | 11 | import telegramify_markdown 12 | from telebot import TeleBot 13 | from telebot.types import Message 14 | from wcwidth import wcswidth 15 | 16 | from config import settings 17 | from handlers._utils import non_llm_handler 18 | 19 | from .messages import ChatMessage, MessageStore 20 | from .utils import PROMPT, filter_message, parse_date, contains_non_ascii 21 | from datetime import timedelta 22 | 23 | from rich import print 24 | 25 | logger = logging.getLogger("bot") 26 | store = MessageStore("data/messages.db") 27 | 28 | # 从环境变量获取提肛群组 ID 29 | TIGONG_CHAT_ID = settings.tigong_chat_id 30 | 31 | 32 | def get_display_width(text: str) -> int: 33 | """获取字符串的显示宽度,考虑中文字符""" 34 | width = wcswidth(text) 35 | return width if width is not None else len(text) 36 | 37 | 38 | def pad_to_width(text: str, target_width: int) -> str: 39 | """根据显示宽度填充字符串到指定宽度""" 40 | current_width = get_display_width(text) 41 | padding = target_width - current_width 42 | return text + " " * max(0, padding) 43 | 44 | 45 | @non_llm_handler 46 | def handle_message(message: Message, bot: TeleBot): 47 | logger.debug( 48 | "Received message: %s, chat_id=%d, from=%s", 49 | message.text, 50 | message.chat.id, 51 | message.from_user.id, 52 | ) 53 | 54 | # 检测中文消息并删除(仅在特定时间和群组) 55 | # 只在提肛群组且每天北京时间 15:00-16:00 之间删除 56 | if ( 57 | TIGONG_CHAT_ID 58 | and message.chat.id == TIGONG_CHAT_ID 59 | and message.text 60 | and contains_non_ascii(message.text) 61 | ): 62 | beijing_tz = zoneinfo.ZoneInfo("Asia/Shanghai") 63 | current_time = datetime.now(tz=beijing_tz) 64 | current_hour = current_time.hour 65 | 66 | # 检查是否在北京时间 15:00-16:00 之间 67 | if 15 <= current_hour < 16: 68 | try: 69 | bot.delete_message(message.chat.id, message.message_id) 70 | bot.send_message( 71 | message.chat.id, 72 | f"已删除 @{message.from_user.username or message.from_user.full_name} 的中文消息", 73 | ) 74 | logger.info( 75 | "Deleted Chinese message from user %s in chat %d at %s", 76 | message.from_user.full_name, 77 | message.chat.id, 78 | current_time.strftime("%H:%M:%S"), 79 | ) 80 | return 81 | except Exception as e: 82 | logger.error("Failed to delete message: %s", e) 83 | 84 | store.add_message( 85 | ChatMessage( 86 | chat_id=message.chat.id, 87 | message_id=message.id, 88 | content=message.text or "", 89 | user_id=message.from_user.id, 90 | user_name=message.from_user.full_name, 91 | timestamp=datetime.fromtimestamp(message.date, tz=timezone.utc), 92 | ) 93 | ) 94 | 95 | # 检测100整数倍消息提醒 96 | if TIGONG_CHAT_ID and message.chat.id == TIGONG_CHAT_ID: 97 | beijing_tz = zoneinfo.ZoneInfo("Asia/Shanghai") 98 | today = datetime.now(tz=beijing_tz).strftime("%Y-%m-%d") 99 | count = store.get_today_message_count(message.chat.id, today) 100 | 101 | if count > 0 and count % 100 == 0: 102 | bot.send_message( 103 | message.chat.id, 104 | f"🎉 今日第 {count} 条消息!提肛小助手提醒:该做提肛运动啦!", 105 | ) 106 | 107 | 108 | @non_llm_handler 109 | def summary_command(message: Message, bot: TeleBot): 110 | """生成消息摘要。示例:/summary today; /summary 2d""" 111 | text_parts = message.text.split(maxsplit=1) 112 | if len(text_parts) < 2: 113 | date = "today" 114 | else: 115 | date = text_parts[1].strip() 116 | since, now = parse_date(date, settings.timezone) 117 | messages = store.get_messages_since(message.chat.id, since) 118 | messages_text = "\n".join( 119 | f"{msg.timestamp.isoformat()} - @{msg.user_name}: {msg.content}" 120 | for msg in messages 121 | ) 122 | if not messages_text: 123 | bot.reply_to(message, "没有找到指定时间范围内的历史消息。") 124 | return 125 | new_message = bot.reply_to(message, "正在生成摘要,请稍候...") 126 | response = settings.openai_client.chat.completions.create( 127 | model=settings.openai_model, 128 | messages=[ 129 | {"role": "user", "content": PROMPT.format(messages=messages_text)}, 130 | ], 131 | ) 132 | reply_text = f"""*👇 前情提要 👇 \\({since.strftime("%Y/%m/%d %H:%M")} \\- {now.strftime("%Y/%m/%d %H:%M")}\\)* 133 | 134 | {telegramify_markdown.markdownify(response.choices[0].message.content)} 135 | """ 136 | logger.debug("Generated summary:\n%s", reply_text) 137 | bot.edit_message_text( 138 | chat_id=new_message.chat.id, 139 | message_id=new_message.message_id, 140 | text=reply_text, 141 | parse_mode="MarkdownV2", 142 | ) 143 | 144 | 145 | @non_llm_handler 146 | def stats_command(message: Message, bot: TeleBot): 147 | """获取群组消息统计信息""" 148 | stats = store.get_stats(message.chat.id) 149 | if not stats: 150 | bot.reply_to(message, "没有找到任何统计信息。") 151 | return 152 | 153 | # 计算数字部分的最大宽度 154 | max_count_width = max(len(str(entry.message_count)) for entry in stats) 155 | stats_text = "\n".join( 156 | f"{entry.message_count:>{max_count_width}} messages - {entry.date}" 157 | for entry in stats 158 | ) 159 | 160 | text_args = shlex.split(message.text) 161 | if len(text_args) > 1 and text_args[1].isdigit(): 162 | limit = int(text_args[1]) 163 | else: 164 | limit = 30 165 | user_stats = store.get_user_stats(message.chat.id, limit=limit) 166 | if user_stats: 167 | # 计算用户消息数量的最大宽度 168 | max_user_count_width = max( 169 | len(str(entry.message_count)) for entry in user_stats 170 | ) 171 | user_text = "\n".join( 172 | f"{entry.message_count:>{max_user_count_width}} messages - {entry.user_name}" 173 | for entry in user_stats 174 | ) 175 | else: 176 | user_text = "" 177 | 178 | return_message = f"📊 群组消息统计信息:\n```\n{stats_text}\n```\n👤 用户消息统计信息:\n```\n{user_text}\n```\\-\\-\\-\n" 179 | 180 | bot.reply_to( 181 | message, 182 | return_message, 183 | parse_mode="MarkdownV2", 184 | ) 185 | 186 | 187 | @non_llm_handler 188 | def search_command(message: Message, bot: TeleBot): 189 | """搜索群组消息(示例:/search 关键词 [N])""" 190 | text_parts = shlex.split(message.text) 191 | if len(text_parts) < 2: 192 | bot.reply_to(message, "请提供要搜索的关键词。") 193 | return 194 | keyword = text_parts[1].strip() 195 | if len(text_parts) > 2 and text_parts[2].isdigit(): 196 | limit = int(text_parts[2]) 197 | else: 198 | limit = 10 199 | messages = store.search_messages(message.chat.id, keyword, limit=limit) 200 | if not messages: 201 | bot.reply_to(message, "没有找到匹配的消息。") 202 | return 203 | chat_id = str(message.chat.id) 204 | if chat_id.startswith("-100"): 205 | chat_id = chat_id[4:] 206 | items = [] 207 | for msg in messages: 208 | link = f"https://t.me/c/{chat_id}/{msg.message_id}" 209 | items.append(f"{link}\n```\n{msg.user_name}: {msg.content}\n```") 210 | message_text = telegramify_markdown.markdownify("\n".join(items)) 211 | bot.reply_to( 212 | message, 213 | f"🔍 *搜索结果\\(只显示前 {limit} 个\\):*\n{message_text}", 214 | parse_mode="MarkdownV2", 215 | ) 216 | 217 | 218 | TIGONG_MESSAGES = [ 219 | "💪 提肛时间到!记得做提肛运动哦~", 220 | "🏋️ 该做提肛运动了!坚持就是胜利!", 221 | "⏰ 提肛小助手提醒:现在是提肛时间!", 222 | "🎯 提肛运动打卡时间!加油!", 223 | "💯 定时提醒:做做提肛运动,健康生活每一天!", 224 | "🌟 提肛运动不能停!现在开始吧!", 225 | "✨ 提肛小助手:该运动啦!", 226 | ] 227 | 228 | 229 | @non_llm_handler 230 | def alert_me_command(message: Message, bot: TeleBot): 231 | """加入提肛提醒队列""" 232 | if TIGONG_CHAT_ID and message.chat.id == TIGONG_CHAT_ID: 233 | beijing_tz = zoneinfo.ZoneInfo("Asia/Shanghai") 234 | today = datetime.now(tz=beijing_tz).strftime("%Y-%m-%d") 235 | username = message.from_user.username or "" 236 | store.add_tigong_alert_user( 237 | message.chat.id, 238 | message.from_user.id, 239 | message.from_user.full_name, 240 | username, 241 | today, 242 | ) 243 | bot.reply_to( 244 | message, 245 | "✅ 已加入今日提肛提醒队列!每次提醒都会 @ 你,记得 /confirm 打卡哦!", 246 | ) 247 | else: 248 | bot.reply_to(message, "此命令仅在指定群组中可用。") 249 | 250 | 251 | @non_llm_handler 252 | def confirm_command(message: Message, bot: TeleBot): 253 | """确认完成今日提肛""" 254 | if TIGONG_CHAT_ID and message.chat.id == TIGONG_CHAT_ID: 255 | beijing_tz = zoneinfo.ZoneInfo("Asia/Shanghai") 256 | today = datetime.now(tz=beijing_tz).strftime("%Y-%m-%d") 257 | success = store.confirm_tigong_alert( 258 | message.chat.id, message.from_user.id, today 259 | ) 260 | if success: 261 | bot.reply_to(message, "✅ 今日提肛已打卡!明天继续加油!") 262 | else: 263 | bot.reply_to(message, "你还没有加入提醒队列,请先使用 /alert_me 加入。") 264 | else: 265 | bot.reply_to(message, "此命令仅在指定群组中可用。") 266 | 267 | 268 | @non_llm_handler 269 | def standup_command(message: Message, bot: TeleBot): 270 | """手动发送提肛提醒消息""" 271 | if TIGONG_CHAT_ID and message.chat.id == TIGONG_CHAT_ID: 272 | try: 273 | send_random_tigong_reminder(bot) 274 | # 不需要reply,因为send_random_tigong_reminder已经发送消息了 275 | except Exception as e: 276 | logger.error("Error in standup_command: %s", e) 277 | bot.reply_to(message, "❌ 发送提醒失败,请稍后重试。") 278 | else: 279 | bot.reply_to(message, "此命令仅在指定群组中可用。") 280 | 281 | 282 | def send_random_tigong_reminder(bot: TeleBot): 283 | """发送随机提肛提醒消息""" 284 | try: 285 | beijing_tz = zoneinfo.ZoneInfo("Asia/Shanghai") 286 | today = datetime.now(tz=beijing_tz).strftime("%Y-%m-%d") 287 | 288 | # 获取未确认用户列表 289 | unconfirmed_users = store.get_unconfirmed_users(TIGONG_CHAT_ID, today) 290 | 291 | message = random.choice(TIGONG_MESSAGES) 292 | 293 | # 如果有未确认用户,@他们 294 | if unconfirmed_users: 295 | message += "\n\n" 296 | mentions = [] 297 | 298 | for user in unconfirmed_users: 299 | # 使用 username 或者 text mention 300 | username = user.get("username", "") 301 | if username: 302 | mentions.append(f"@{username}") 303 | else: 304 | # 如果没有 username,使用名字(但不能点击) 305 | mentions.append(user["user_name"]) 306 | 307 | message += " ".join(mentions) + " 记得打卡哦!" 308 | 309 | # 发送消息 310 | bot.send_message(TIGONG_CHAT_ID, message) 311 | 312 | logger.info( 313 | "Sent tigong reminder to chat %d with %d mentions", 314 | TIGONG_CHAT_ID, 315 | len(unconfirmed_users), 316 | ) 317 | except Exception as e: 318 | logger.error("Failed to send tigong reminder: %s", e, exc_info=True) 319 | raise 320 | 321 | 322 | def schedule_tigong_reminders(bot: TeleBot): 323 | """安排提肛提醒任务:每天北京时间8:00-19:00,每2小时发送一次""" 324 | 325 | def run_scheduler(): 326 | import time 327 | 328 | beijing_tz = zoneinfo.ZoneInfo("Asia/Shanghai") 329 | while True: 330 | now = datetime.now(tz=beijing_tz) 331 | current_hour = now.hour 332 | 333 | # 检查是否在北京时间8:00-19:00之间 334 | if 8 <= current_hour < 19: 335 | # 检查是否在偶数小时的整点(8, 10, 12, 14, 16, 18) 336 | if current_hour % 2 == 0 and now.minute == 0 and now.second < 30: 337 | send_random_tigong_reminder(bot) 338 | time.sleep(30) # 避免在同一分钟内重复发送 339 | 340 | # 每30秒检查一次 341 | time.sleep(30) 342 | 343 | # 在后台线程中运行调度器 344 | scheduler_thread = threading.Thread(target=run_scheduler, daemon=True) 345 | scheduler_thread.start() 346 | logger.info("Tigong reminder scheduler started") 347 | 348 | 349 | load_priority = 5 350 | if settings.openai_api_key: 351 | 352 | def register(bot: TeleBot): 353 | """注册命令处理器""" 354 | bot.register_message_handler( 355 | summary_command, commands=["summary"], pass_bot=True 356 | ) 357 | bot.register_message_handler(stats_command, commands=["stats"], pass_bot=True) 358 | bot.register_message_handler(search_command, commands=["search"], pass_bot=True) 359 | bot.register_message_handler( 360 | standup_command, commands=["standup"], pass_bot=True 361 | ) 362 | bot.register_message_handler( 363 | alert_me_command, commands=["alert_me"], pass_bot=True 364 | ) 365 | bot.register_message_handler( 366 | confirm_command, commands=["confirm"], pass_bot=True 367 | ) 368 | bot.register_message_handler( 369 | handle_message, 370 | func=partial(filter_message, bot=bot, check_chinese=True), 371 | pass_bot=True, 372 | ) 373 | 374 | # 启动提肛提醒定时任务 375 | schedule_tigong_reminders(bot) 376 | -------------------------------------------------------------------------------- /handlers/chatgpt.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | import uuid 4 | from typing import Any 5 | 6 | import requests 7 | from expiringdict import ExpiringDict 8 | from telebot import TeleBot 9 | from telebot.types import Message 10 | 11 | from config import settings 12 | 13 | from ._utils import ( 14 | bot_reply_first, 15 | bot_reply_markdown, 16 | enrich_text_with_urls, 17 | image_to_data_uri, 18 | logger, 19 | ) 20 | 21 | CHATGPT_MODEL = settings.openai_model 22 | CHATGPT_PRO_MODEL = settings.openai_model 23 | 24 | 25 | client = settings.openai_client 26 | 27 | 28 | # Web search / tool-calling configuration 29 | WEB_SEARCH_TOOL_NAME = "web_search" 30 | WEB_SEARCH_SYSTEM_PROMPT = { 31 | "role": "system", 32 | "content": "You are a helpful assistant that uses the Ollama Cloud Web Search API to fetch recent information " 33 | "from the public internet when needed. Always cite your sources using the format [number](URL) in your responses.\n\n", 34 | } 35 | OLLAMA_WEB_SEARCH_URL = "https://ollama.com/api/web_search" 36 | WEB_SEARCH_TOOL = { 37 | "type": "function", 38 | "function": { 39 | "name": WEB_SEARCH_TOOL_NAME, 40 | "description": ( 41 | "Use the Ollama Cloud Web Search API to fetch recent information" 42 | " from the public internet. Call this when you need up-to-date" 43 | " facts, news, or citations." 44 | ), 45 | "parameters": { 46 | "type": "object", 47 | "properties": { 48 | "query": { 49 | "type": "string", 50 | "description": "Search keywords or question.", 51 | }, 52 | "max_results": { 53 | "type": "integer", 54 | "description": ( 55 | "Maximum number of search results to fetch; defaults" 56 | " to the bot configuration if omitted." 57 | ), 58 | "minimum": 1, 59 | "maximum": 10, 60 | }, 61 | }, 62 | "required": ["query"], 63 | }, 64 | }, 65 | } 66 | STREAMING_UPDATE_INTERVAL = 1.2 67 | MAX_TOOL_ITERATIONS = 3 68 | 69 | 70 | # Global history cache 71 | chatgpt_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 72 | chatgpt_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) 73 | 74 | 75 | def _web_search_available() -> bool: 76 | return bool(settings.ollama_web_search_api_key) 77 | 78 | 79 | def _format_web_search_results(payload: dict[str, Any]) -> str: 80 | results = payload.get("results") or payload.get("data") or [] 81 | if not isinstance(results, list): 82 | results = [] 83 | formatted: list[str] = [] 84 | for idx, item in enumerate(results, start=1): 85 | if not isinstance(item, dict): 86 | continue 87 | title = ( 88 | item.get("title") or item.get("name") or item.get("url") or f"Result {idx}" 89 | ) 90 | url = item.get("url") or item.get("link") or item.get("source") or "" 91 | snippet = ( 92 | item.get("snippet") 93 | or item.get("summary") 94 | or item.get("content") 95 | or item.get("description") 96 | or "" 97 | ).strip() 98 | snippet = snippet.replace("\n", " ") 99 | if len(snippet) > 400: 100 | snippet = snippet[:397].rstrip() + "..." 101 | entry = f"[{idx}] {title}" 102 | if url: 103 | entry = f"{entry}\nURL: {url}" 104 | if snippet: 105 | entry = f"{entry}\n{snippet}" 106 | formatted.append(entry) 107 | if formatted: 108 | return "\n\n".join(formatted) 109 | return json.dumps(payload, ensure_ascii=False) 110 | 111 | 112 | def _call_ollama_web_search(query: str, max_results: int | None = None) -> str: 113 | if not _web_search_available(): 114 | return "Web search is not configured." 115 | payload: dict[str, Any] = {"query": query.strip()} 116 | limit = max_results if isinstance(max_results, int) else None 117 | if limit is None or limit <= 0: 118 | limit = settings.ollama_web_search_max_results 119 | if limit: 120 | payload["max_results"] = int(limit) 121 | headers = { 122 | "Authorization": f"Bearer {settings.ollama_web_search_api_key}", 123 | } 124 | try: 125 | response = requests.post( 126 | OLLAMA_WEB_SEARCH_URL, 127 | json=payload, 128 | headers=headers, 129 | timeout=settings.ollama_web_search_timeout, 130 | ) 131 | response.raise_for_status() 132 | data = response.json() 133 | except requests.RequestException as exc: 134 | logger.exception("Ollama web search failed: %s", exc) 135 | return f"Web search error: {exc}" 136 | except ValueError: 137 | logger.exception("Invalid JSON payload from Ollama web search") 138 | return "Web search error: invalid payload." 139 | return _format_web_search_results(data) 140 | 141 | 142 | def _available_tools() -> list[dict[str, Any]]: 143 | if not _web_search_available(): 144 | return [] 145 | return [WEB_SEARCH_TOOL] 146 | 147 | 148 | def _accumulate_tool_call_deltas( 149 | buffer: dict[int, dict[str, Any]], 150 | deltas: list[Any], 151 | ) -> None: 152 | for delta in deltas: 153 | idx = getattr(delta, "index", 0) or 0 154 | entry = buffer.setdefault( 155 | idx, 156 | { 157 | "id": getattr(delta, "id", None), 158 | "type": getattr(delta, "type", "function") or "function", 159 | "function": {"name": "", "arguments": ""}, 160 | }, 161 | ) 162 | if getattr(delta, "id", None): 163 | entry["id"] = delta.id 164 | if getattr(delta, "type", None): 165 | entry["type"] = delta.type 166 | func = getattr(delta, "function", None) 167 | if func is not None: 168 | if getattr(func, "name", None): 169 | entry["function"]["name"] = func.name 170 | if getattr(func, "arguments", None): 171 | entry["function"]["arguments"] += func.arguments 172 | 173 | 174 | def _finalize_tool_calls(buffer: dict[int, dict[str, Any]]) -> list[dict[str, Any]]: 175 | tool_calls: list[dict[str, Any]] = [] 176 | 177 | for idx in sorted(buffer): 178 | entry = buffer[idx] 179 | function_name = entry.get("function", {}).get("name") 180 | if not function_name: 181 | continue 182 | arguments = entry.get("function", {}).get("arguments", "{}") 183 | tool_calls.append( 184 | { 185 | "id": entry.get("id") or str(uuid.uuid4()), 186 | "type": entry.get("type") or "function", 187 | "function": { 188 | "name": function_name, 189 | "arguments": arguments, 190 | }, 191 | } 192 | ) 193 | return tool_calls 194 | 195 | 196 | def _execute_tool(function_name: str, arguments_json: str) -> str: 197 | try: 198 | arguments = json.loads(arguments_json or "{}") 199 | except json.JSONDecodeError as exc: 200 | logger.exception("Invalid tool arguments for %s: %s", function_name, exc) 201 | return f"Invalid arguments for {function_name}: {exc}" 202 | 203 | if function_name == WEB_SEARCH_TOOL_NAME: 204 | query = (arguments.get("query") or "").strip() 205 | if not query: 206 | return "Web search error: no query provided." 207 | max_results = arguments.get("max_results") 208 | if isinstance(max_results, str): 209 | max_results = int(max_results) if max_results.isdigit() else None 210 | elif not isinstance(max_results, int): 211 | max_results = None 212 | return _call_ollama_web_search(query, max_results) 213 | 214 | return f"Function {function_name} is not implemented." 215 | 216 | 217 | def _append_tool_messages( 218 | conversation: list[dict[str, Any]], tool_calls: list[dict[str, Any]] 219 | ) -> None: 220 | if not tool_calls: 221 | return 222 | conversation.append( 223 | { 224 | "role": "assistant", 225 | "content": None, 226 | "tool_calls": tool_calls, 227 | } 228 | ) 229 | for call in tool_calls: 230 | result = _execute_tool( 231 | call["function"]["name"], call["function"].get("arguments", "{}") 232 | ) 233 | conversation.append( 234 | { 235 | "role": "tool", 236 | "tool_call_id": call["id"], 237 | "content": result, 238 | } 239 | ) 240 | 241 | 242 | def _stream_chatgpt_pro_response( 243 | conversation: list[dict[str, Any]], 244 | reply_id: Message, 245 | who: str, 246 | bot: TeleBot, 247 | ) -> str: 248 | tools = _available_tools() 249 | tool_loops_remaining = MAX_TOOL_ITERATIONS if tools else 0 250 | final_response = "" 251 | if tools: 252 | conversation.insert(0, WEB_SEARCH_SYSTEM_PROMPT) 253 | while True: 254 | request_payload: dict[str, Any] = { 255 | "messages": conversation, 256 | "model": CHATGPT_PRO_MODEL, 257 | "stream": True, 258 | } 259 | if tools: 260 | request_payload.update(tools=tools, tool_choice="auto") 261 | 262 | stream = client.chat.completions.create(**request_payload) 263 | buffer = "" 264 | pending_tool_call = False 265 | tool_buffer: dict[int, dict[str, Any]] = {} 266 | last_update = time.time() 267 | 268 | for chunk in stream: 269 | if not chunk.choices: 270 | continue 271 | delta = chunk.choices[0].delta 272 | if delta is None: 273 | continue 274 | if delta.tool_calls: 275 | pending_tool_call = True 276 | _accumulate_tool_call_deltas(tool_buffer, delta.tool_calls) 277 | continue 278 | content_piece = delta.content 279 | if isinstance(content_piece, list): 280 | content_piece = "".join( 281 | getattr(part, "text", "") for part in content_piece 282 | ) 283 | if not content_piece: 284 | continue 285 | buffer += content_piece 286 | now = time.time() 287 | if not pending_tool_call and now - last_update > STREAMING_UPDATE_INTERVAL: 288 | last_update = now 289 | bot_reply_markdown(reply_id, who, buffer, bot, split_text=False) 290 | 291 | if pending_tool_call and tools: 292 | if tool_loops_remaining <= 0: 293 | logger.warning( 294 | "chatgpt_pro_handler reached the maximum number of tool calls" 295 | ) 296 | final_response = buffer or "Unable to finish after calling tools." 297 | break 298 | tool_calls = _finalize_tool_calls(tool_buffer) 299 | if any( 300 | call["function"]["name"] == WEB_SEARCH_TOOL_NAME for call in tool_calls 301 | ): 302 | bot_reply_markdown( 303 | reply_id, 304 | who, 305 | "Searching the web for up-to-date information…", 306 | bot, 307 | split_text=False, 308 | disable_web_page_preview=True, 309 | ) 310 | _append_tool_messages(conversation, tool_calls) 311 | tool_loops_remaining -= 1 312 | continue 313 | 314 | final_response = buffer 315 | break 316 | 317 | if not final_response: 318 | final_response = "I could not generate a response." 319 | bot_reply_markdown(reply_id, who, final_response, bot, split_text=True) 320 | return final_response 321 | 322 | 323 | def chatgpt_handler(message: Message, bot: TeleBot) -> None: 324 | """gpt : /gpt """ 325 | logger.debug(message) 326 | m = message.text.strip() 327 | 328 | player_message = [] 329 | # restart will lose all TODO 330 | if str(message.from_user.id) not in chatgpt_player_dict: 331 | chatgpt_player_dict[str(message.from_user.id)] = ( 332 | player_message # for the imuutable list 333 | ) 334 | else: 335 | player_message = chatgpt_player_dict[str(message.from_user.id)] 336 | if m.strip() == "clear": 337 | bot.reply_to( 338 | message, 339 | "just clear your chatgpt messages history", 340 | ) 341 | player_message.clear() 342 | return 343 | if m[:4].lower() == "new ": 344 | m = m[4:].strip() 345 | player_message.clear() 346 | m = enrich_text_with_urls(m) 347 | 348 | who = "ChatGPT" 349 | # show something, make it more responsible 350 | reply_id = bot_reply_first(message, who, bot) 351 | 352 | player_message.append({"role": "user", "content": m}) 353 | # keep the last 5, every has two ask and answer. 354 | if len(player_message) > 10: 355 | player_message = player_message[2:] 356 | 357 | chatgpt_reply_text = "" 358 | try: 359 | r = client.chat.completions.create( 360 | messages=player_message, max_tokens=1024, model=CHATGPT_MODEL 361 | ) 362 | content = r.choices[0].message.content.encode("utf8").decode() 363 | if not content: 364 | chatgpt_reply_text = f"{who} did not answer." 365 | player_message.pop() 366 | else: 367 | chatgpt_reply_text = content 368 | player_message.append( 369 | { 370 | "role": "assistant", 371 | "content": chatgpt_reply_text, 372 | } 373 | ) 374 | 375 | except Exception: 376 | logger.exception("ChatGPT handler error") 377 | bot.reply_to(message, "answer wrong maybe up to the max token") 378 | # pop my user 379 | player_message.pop() 380 | return 381 | 382 | # reply back as Markdown and fallback to plain text if failed. 383 | bot_reply_markdown(reply_id, who, chatgpt_reply_text, bot) 384 | 385 | 386 | def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None: 387 | """gpt_pro : /gpt_pro """ 388 | m = message.text.strip() 389 | 390 | player_message = [] 391 | # restart will lose all TODO 392 | if str(message.from_user.id) not in chatgpt_pro_player_dict: 393 | chatgpt_pro_player_dict[str(message.from_user.id)] = ( 394 | player_message # for the imuutable list 395 | ) 396 | else: 397 | player_message = chatgpt_pro_player_dict[str(message.from_user.id)] 398 | if m.strip() == "clear": 399 | bot.reply_to( 400 | message, 401 | "just clear your chatgpt messages history", 402 | ) 403 | player_message.clear() 404 | return 405 | if m[:4].lower() == "new ": 406 | m = m[4:].strip() 407 | player_message.clear() 408 | m = enrich_text_with_urls(m) 409 | 410 | who = "ChatGPT Pro" 411 | reply_id = bot_reply_first(message, who, bot) 412 | 413 | player_message.append({"role": "user", "content": m}) 414 | # keep the last 3, every has two ask and answer. 415 | # save me some money 416 | if len(player_message) > 6: 417 | player_message = player_message[2:] 418 | 419 | try: 420 | reply_text = _stream_chatgpt_pro_response(player_message[:], reply_id, who, bot) 421 | player_message.append( 422 | { 423 | "role": "assistant", 424 | "content": reply_text, 425 | } 426 | ) 427 | 428 | except Exception: 429 | logger.exception("ChatGPT handler error") 430 | # bot.reply_to(message, "answer wrong maybe up to the max token") 431 | player_message.clear() 432 | return 433 | 434 | 435 | def chatgpt_photo_handler(message: Message, bot: TeleBot) -> None: 436 | s = message.caption 437 | prompt = s.strip() 438 | who = "ChatGPT Vision" 439 | # show something, make it more responsible 440 | reply_id = bot_reply_first(message, who, bot) 441 | # get the high quaility picture. 442 | max_size_photo = max(message.photo, key=lambda p: p.file_size) 443 | file_path = bot.get_file(max_size_photo.file_id).file_path 444 | downloaded_file = bot.download_file(file_path) 445 | with open("chatgpt_temp.jpg", "wb") as temp_file: 446 | temp_file.write(downloaded_file) 447 | 448 | try: 449 | r = client.chat.completions.create( 450 | max_tokens=2048, 451 | messages=[ 452 | { 453 | "role": "user", 454 | "content": [ 455 | {"type": "text", "text": prompt}, 456 | { 457 | "type": "image_url", 458 | "image_url": {"url": image_to_data_uri("chatgpt_temp.jpg")}, 459 | }, 460 | ], 461 | } 462 | ], 463 | model=CHATGPT_PRO_MODEL, 464 | stream=True, 465 | ) 466 | s = "" 467 | start = time.time() 468 | for chunk in r: 469 | if chunk.choices[0].delta.content is None: 470 | break 471 | s += chunk.choices[0].delta.content 472 | if time.time() - start > 2.0: 473 | start = time.time() 474 | bot_reply_markdown(reply_id, who, s, bot, split_text=False) 475 | # maybe not complete 476 | try: 477 | bot_reply_markdown(reply_id, who, s, bot) 478 | except Exception: 479 | pass 480 | 481 | except Exception: 482 | logger.exception("ChatGPT handler error") 483 | bot.reply_to(message, "answer wrong maybe up to the max token") 484 | 485 | 486 | if settings.openai_api_key: 487 | 488 | def register(bot: TeleBot) -> None: 489 | bot.register_message_handler(chatgpt_handler, commands=["gpt"], pass_bot=True) 490 | bot.register_message_handler(chatgpt_handler, regexp="^gpt:", pass_bot=True) 491 | bot.register_message_handler( 492 | chatgpt_pro_handler, commands=["gpt_pro"], pass_bot=True 493 | ) 494 | bot.register_message_handler( 495 | chatgpt_pro_handler, regexp="^gpt_pro:", pass_bot=True 496 | ) 497 | bot.register_message_handler( 498 | chatgpt_photo_handler, 499 | content_types=["photo"], 500 | func=lambda m: m.caption 501 | and m.caption.startswith(("gpt:", "/gpt", "gpt_pro:", "/gpt_pro")), 502 | pass_bot=True, 503 | ) 504 | --------------------------------------------------------------------------------