├── src ├── services │ ├── __init__.py │ ├── gpt4o_service.py │ ├── gemini_service.py │ ├── user_service.py │ ├── openai_service.py │ ├── claude_service.py │ ├── storage_service.py │ └── message_service.py ├── bot │ ├── middlewares │ │ ├── __init__.py │ │ └── language.py │ ├── handlers │ │ ├── __init__.py │ │ ├── language.py │ │ ├── chat.py │ │ └── common.py │ ├── keyboards.py │ └── message_templates.py ├── config │ ├── __init__.py │ └── config.py └── __init__.py ├── requirements.txt ├── LICENSE ├── .gitignore ├── main.py └── README.md /src/services/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/bot/middlewares/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import Config 2 | 3 | __all__ = ['Config'] -------------------------------------------------------------------------------- /src/bot/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | from . import common 2 | from . import language 3 | from . import chat 4 | 5 | __all__ = ['common', 'language', 'chat'] 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiogram>=3.15.0 2 | python-dotenv>=0.19.0 3 | openai>=1.0.0 4 | aiohttp>=3.8.0 5 | google-generativeai>=0.3.0 6 | urllib3<2.0.0 7 | anthropic>=0.40.0 -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | from src.bot.handlers import common, language, chat 2 | from src.services.message_service import MessageService 3 | from src.services.openai_service import OpenAIService 4 | from src.bot.middlewares.language import LanguageMiddleware 5 | 6 | __all__ = [ 7 | 'common', 8 | 'language', 9 | 'chat', 10 | 'MessageService', 11 | 'OpenAIService', 12 | 'LanguageMiddleware' 13 | ] -------------------------------------------------------------------------------- /src/bot/middlewares/language.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Dict, Any, Awaitable 2 | from aiogram import BaseMiddleware 3 | from aiogram.types import Message 4 | from src.services.message_service import MessageService 5 | 6 | class LanguageMiddleware(BaseMiddleware): 7 | def __init__(self, message_service: MessageService): 8 | self.message_service = message_service 9 | 10 | async def __call__( 11 | self, 12 | handler: Callable[[Message, Dict[str, Any]], Awaitable[Any]], 13 | event: Message, 14 | data: Dict[str, Any] 15 | ) -> Any: 16 | data["message_service"] = self.message_service 17 | return await handler(event, data) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Nikita Triblya 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/services/gpt4o_service.py: -------------------------------------------------------------------------------- 1 | from openai import AsyncOpenAI 2 | from typing import List, Dict, Any 3 | from logging import getLogger 4 | 5 | logger = getLogger(__name__) 6 | 7 | class GPT4OService: 8 | def __init__(self, api_key: str): 9 | if not api_key: 10 | raise ValueError("OpenAI API key is required") 11 | self.client = AsyncOpenAI(api_key=api_key) 12 | 13 | async def generate_chat_response(self, messages: List[Dict[str, str]], user_id: str, is_mini: bool = False) -> str: 14 | try: 15 | model = "gpt-4o-mini" if is_mini else "gpt-4o" 16 | completion = await self.client.chat.completions.create( 17 | model=model, 18 | messages=messages, 19 | max_tokens=2500, 20 | temperature=0.7, 21 | frequency_penalty=0, 22 | presence_penalty=0, 23 | user=user_id 24 | ) 25 | return completion.choices[0].message.content 26 | except Exception as e: 27 | logger.error(f"GPT-4O API error for user {user_id}: {str(e)}") 28 | raise Exception(f"GPT-4O API error: {str(e)}") -------------------------------------------------------------------------------- /src/services/gemini_service.py: -------------------------------------------------------------------------------- 1 | import google.generativeai as genai 2 | from typing import List, Dict 3 | import logging 4 | 5 | class GeminiService: 6 | def __init__(self, api_key: str): 7 | if not api_key: 8 | raise ValueError("Gemini API key is required") 9 | 10 | genai.configure(api_key=api_key) 11 | self.model = genai.GenerativeModel('gemini-pro') 12 | 13 | async def generate_chat_response(self, messages: List[Dict[str, str]], user_id: str) -> str: 14 | try: 15 | # Конвертируем формат сообщений из OpenAI в формат Gemini 16 | chat = self.model.start_chat(history=[]) 17 | 18 | for message in messages: 19 | if message["role"] == "user": 20 | response = chat.send_message(message["content"]) 21 | 22 | return response.text 23 | 24 | except Exception as e: 25 | error_str = str(e) 26 | if "SAFETY" in error_str or "safety_ratings" in error_str: 27 | logging.warning(f"Gemini safety error for user {user_id}: {error_str}") 28 | raise ValueError("safety_error") 29 | else: 30 | logging.error(f"Gemini API error for user {user_id}: {error_str}") 31 | raise Exception(f"Gemini API error: {error_str}") -------------------------------------------------------------------------------- /src/services/user_service.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | from src.services.storage_service import StorageService 3 | 4 | class UserService: 5 | def __init__(self, storage: StorageService): 6 | self.user_models: Dict[str, str] = {} 7 | self.storage = storage 8 | 9 | def load_user_state(self, user_id: str) -> None: 10 | """Load user state from storage""" 11 | data = self.storage.load_user_data(user_id) 12 | if data and 'model' in data: 13 | self.user_models[user_id] = data['model'] 14 | 15 | def save_user_state(self, user_id: str) -> None: 16 | """Save user state to storage""" 17 | data = { 18 | 'model': self.user_models.get(user_id, 'gpt4') 19 | } 20 | self.storage.save_user_data(user_id, data) 21 | 22 | def get_user_model(self, user_id: str) -> str: 23 | """Get user's current model (default to GPT-4)""" 24 | if user_id not in self.user_models: 25 | self.load_user_state(user_id) 26 | return self.user_models.get(user_id, "gpt4") 27 | 28 | def set_user_model(self, user_id: str, model: str) -> None: 29 | """Set user's preferred model and save state""" 30 | allowed_models = [ 31 | "gpt4", "gemini", "gpt4o", "gpt4o_mini", 32 | "claude", "claude_haiku" 33 | ] 34 | if model not in allowed_models: 35 | raise ValueError(f"Unsupported model: {model}") 36 | self.user_models[user_id] = model 37 | self.save_user_state(user_id) -------------------------------------------------------------------------------- /src/bot/handlers/language.py: -------------------------------------------------------------------------------- 1 | from aiogram import Router, F 2 | from aiogram.types import Message, CallbackQuery 3 | from aiogram.filters import Command 4 | from src.services.message_service import MessageService 5 | from src.bot.keyboards import get_language_keyboard, get_main_keyboard 6 | 7 | router = Router(name='language') 8 | 9 | __all__ = ['router'] 10 | 11 | @router.message(Command("language")) 12 | @router.message(F.text.contains("Language")) 13 | @router.message(F.text.contains("Язык")) 14 | @router.message(F.text.contains("Мова")) 15 | async def language_cmd(message: Message, message_service: MessageService): 16 | """Language selection handler""" 17 | await message_service.send_message( 18 | message.from_user.id, 19 | "language_selection", 20 | message, 21 | reply_markup=get_language_keyboard() 22 | ) 23 | 24 | @router.callback_query(F.data.in_({"en", "ru", "ua"})) 25 | async def process_language_callback(callback: CallbackQuery, message_service: MessageService): 26 | """Language selection callback handler""" 27 | user_id = str(callback.from_user.id) 28 | message_service.load_user_state(user_id) 29 | 30 | # Устанавливаем новый язык 31 | await message_service.set_user_language(callback.from_user.id, callback.data) 32 | 33 | # Отправляем подтверждение с новой клавиатурой 34 | keyboard = get_main_keyboard(callback.data) 35 | await message_service.send_message( 36 | callback.from_user.id, 37 | "language_confirmation", 38 | callback.message, 39 | reply_markup=keyboard 40 | ) 41 | 42 | # Закрываем меню выбора языка 43 | await callback.answer() -------------------------------------------------------------------------------- /src/services/openai_service.py: -------------------------------------------------------------------------------- 1 | from openai import AsyncOpenAI 2 | from typing import List, Dict, Any 3 | from logging import getLogger 4 | 5 | logger = getLogger(__name__) 6 | 7 | class OpenAIService: 8 | def __init__(self, api_key: str): 9 | if not api_key: 10 | raise ValueError("OpenAI API key is required") 11 | self.client = AsyncOpenAI(api_key=api_key) 12 | 13 | async def generate_chat_response(self, messages: List[Dict[str, str]], user_id: str) -> str: 14 | try: 15 | completion = await self.client.chat.completions.create( 16 | model="gpt-4", 17 | messages=messages, 18 | max_tokens=2500, 19 | temperature=0.7, 20 | frequency_penalty=0, 21 | presence_penalty=0, 22 | user=user_id 23 | ) 24 | return completion.choices[0].message.content 25 | except Exception as e: 26 | logger.error(f"OpenAI API error for user {user_id}: {str(e)}") 27 | raise Exception(f"OpenAI API error: {str(e)}") 28 | 29 | async def generate_image(self, prompt: str) -> str: 30 | if not prompt: 31 | raise ValueError("Image prompt cannot be empty") 32 | 33 | try: 34 | response = await self.client.images.generate( 35 | prompt=prompt, 36 | n=1, 37 | size="512x512", 38 | response_format="url" 39 | ) 40 | return response.data[0].url 41 | except Exception as e: 42 | logger.error(f"Image generation error for prompt '{prompt}': {str(e)}") 43 | raise Exception(f"Image generation error: {str(e)}") -------------------------------------------------------------------------------- /src/services/claude_service.py: -------------------------------------------------------------------------------- 1 | from anthropic import Anthropic 2 | from typing import List, Dict 3 | import logging 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | class ClaudeService: 8 | def __init__(self, api_key: str): 9 | if not api_key: 10 | raise ValueError("Anthropic API key is required") 11 | self.client = Anthropic(api_key=api_key) 12 | 13 | async def generate_chat_response(self, messages: List[Dict[str, str]], user_id: str, is_haiku: bool = False) -> str: 14 | try: 15 | # Convert message history to Claude format 16 | formatted_messages = [] 17 | for msg in messages: 18 | role = "assistant" if msg["role"] == "assistant" else "user" 19 | formatted_messages.append({"role": role, "content": msg["content"]}) 20 | 21 | # Choose model based on type 22 | model = "claude-3-5-haiku-latest" if is_haiku else "claude-3-5-sonnet-latest" 23 | 24 | # Create message 25 | message = await self.client.messages.create( 26 | model=model, 27 | max_tokens=2048, 28 | messages=formatted_messages, 29 | temperature=0.7 30 | ) 31 | 32 | return message.content[0].text 33 | 34 | except Exception as e: 35 | logger.error(f"Claude API error for user {user_id}: {str(e)}") 36 | raise Exception(f"Claude API error: {str(e)}") 37 | 38 | async def generate_haiku_response(self, messages: List[Dict[str, str]], user_id: str) -> str: 39 | """Generate response using Claude Haiku model""" 40 | return await self.generate_chat_response(messages, user_id, is_haiku=True) -------------------------------------------------------------------------------- /src/services/storage_service.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from typing import Dict, List 4 | import logging 5 | 6 | class StorageService: 7 | def __init__(self, storage_dir: str = "data"): 8 | self.storage_dir = storage_dir 9 | self.ensure_storage_dir() 10 | 11 | def ensure_storage_dir(self): 12 | """Create storage directory if it doesn't exist""" 13 | if not os.path.exists(self.storage_dir): 14 | os.makedirs(self.storage_dir) 15 | 16 | def _get_user_file_path(self, user_id: str) -> str: 17 | """Get path to user's data file""" 18 | return os.path.join(self.storage_dir, f"user_{user_id}.json") 19 | 20 | def save_user_data(self, user_id: str, data: Dict) -> None: 21 | """Save user data to file, merging with existing data""" 22 | try: 23 | file_path = self._get_user_file_path(user_id) 24 | 25 | # Load existing data 26 | existing_data = {} 27 | if os.path.exists(file_path): 28 | with open(file_path, 'r', encoding='utf-8') as f: 29 | existing_data = json.load(f) 30 | 31 | # Merge new data with existing data 32 | existing_data.update(data) 33 | 34 | # Save merged data 35 | with open(file_path, 'w', encoding='utf-8') as f: 36 | json.dump(existing_data, f, ensure_ascii=False, indent=2) 37 | 38 | logging.info(f"Saved user data for {user_id}: {existing_data}") 39 | 40 | except Exception as e: 41 | logging.error(f"Error saving user data: {e}") 42 | 43 | def load_user_data(self, user_id: str) -> Dict: 44 | """Load user data from file""" 45 | try: 46 | file_path = self._get_user_file_path(user_id) 47 | if os.path.exists(file_path): 48 | with open(file_path, 'r', encoding='utf-8') as f: 49 | data = json.load(f) 50 | logging.info(f"Loaded user data for {user_id}: {data}") 51 | return data 52 | except Exception as e: 53 | logging.error(f"Error loading user data: {e}") 54 | return {} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # User data 132 | data/ 133 | -------------------------------------------------------------------------------- /src/bot/keyboards.py: -------------------------------------------------------------------------------- 1 | from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup, KeyboardButton 2 | 3 | def get_language_keyboard() -> InlineKeyboardMarkup: 4 | """Language selection keyboard""" 5 | keyboard = InlineKeyboardMarkup(inline_keyboard=[ 6 | [ 7 | InlineKeyboardButton(text="English 🇬🇧", callback_data="en"), 8 | InlineKeyboardButton(text="Русский 🇷🇺", callback_data="ru"), 9 | InlineKeyboardButton(text="Українська 🇺🇦", callback_data="ua") 10 | ] 11 | ]) 12 | return keyboard 13 | 14 | def get_model_keyboard() -> InlineKeyboardMarkup: 15 | """Model selection keyboard""" 16 | keyboard = InlineKeyboardMarkup(inline_keyboard=[ 17 | [ 18 | InlineKeyboardButton(text="GPT-4 🤖", callback_data="model_gpt4"), 19 | InlineKeyboardButton(text="Gemini ✨", callback_data="model_gemini") 20 | ], 21 | [ 22 | InlineKeyboardButton(text="GPT-4O 🔮", callback_data="model_gpt4o"), 23 | InlineKeyboardButton(text="GPT-4O-mini 🎯", callback_data="model_gpt4o_mini") 24 | ], 25 | [ 26 | InlineKeyboardButton(text="Claude 3.5 Sonnet 🎭", callback_data="model_claude"), 27 | InlineKeyboardButton(text="Claude 3.5 Haiku 🎋", callback_data="model_claude_haiku") 28 | ] 29 | ]) 30 | return keyboard 31 | 32 | def get_main_keyboard(lang: str = 'en') -> ReplyKeyboardMarkup: 33 | """Main command keyboard with localization""" 34 | commands = { 35 | 'en': { 36 | 'help': '❓ Help', 37 | 'model': '🔄 Change Model', 38 | 'language': '🌐 Language', 39 | 'about': 'ℹ️ About', 40 | 'new_topic': '🆕 New Topic', 41 | 'image': '🎨 Generate Image' 42 | }, 43 | 'ru': { 44 | 'help': '❓ Помощь', 45 | 'model': '🔄 Сменить модель', 46 | 'language': '🌐 Язык', 47 | 'about': 'ℹ️ О боте', 48 | 'new_topic': '🆕 Новая тема', 49 | 'image': '🎨 Создать изображение' 50 | }, 51 | 'ua': { 52 | 'help': '❓ Довідка', 53 | 'model': '🔄 Змінити модель', 54 | 'language': '🌐 Мова', 55 | 'about': 'ℹ️ Про бота', 56 | 'new_topic': '🆕 Нова тема', 57 | 'image': '🎨 Створити зображення' 58 | } 59 | } 60 | 61 | cmd = commands.get(lang, commands['en']) 62 | 63 | keyboard = ReplyKeyboardMarkup( 64 | keyboard=[ 65 | [ 66 | KeyboardButton(text=cmd['help']), 67 | KeyboardButton(text=cmd['model']) 68 | ], 69 | [ 70 | KeyboardButton(text=cmd['language']), 71 | KeyboardButton(text=cmd['about']) 72 | ], 73 | [ 74 | KeyboardButton(text=cmd['new_topic']), 75 | KeyboardButton(text=cmd['image']) 76 | ] 77 | ], 78 | resize_keyboard=True, 79 | input_field_placeholder="Send a message or use buttons below" 80 | ) 81 | return keyboard -------------------------------------------------------------------------------- /src/bot/handlers/chat.py: -------------------------------------------------------------------------------- 1 | from aiogram import Router, F 2 | from aiogram.types import Message 3 | from aiogram.enums import ChatAction 4 | from src.services.message_service import MessageService 5 | from src.services.openai_service import OpenAIService 6 | from src.services.gemini_service import GeminiService 7 | from src.services.gpt4o_service import GPT4OService 8 | from src.services.claude_service import ClaudeService 9 | from src.services.user_service import UserService 10 | import logging 11 | 12 | router = Router(name='chat') 13 | 14 | @router.message(F.text) 15 | async def handle_message( 16 | message: Message, 17 | message_service: MessageService, 18 | openai_service: OpenAIService, 19 | gemini_service: GeminiService, 20 | gpt4o_service: GPT4OService, 21 | claude_service: ClaudeService, 22 | user_service: UserService 23 | ): 24 | """Handle user messages""" 25 | try: 26 | user_id = str(message.from_user.id) 27 | 28 | # Skip processing commands 29 | if message.text.startswith('/'): 30 | return 31 | 32 | # Load user state 33 | message_service.load_user_state(user_id) 34 | 35 | # Add user message to history 36 | message_service.add_message(user_id, "user", message.text) 37 | 38 | # Show typing status 39 | await message.bot.send_chat_action( 40 | chat_id=message.chat.id, 41 | action=ChatAction.TYPING 42 | ) 43 | 44 | # Get current model and generate response 45 | model = user_service.get_user_model(user_id) 46 | response = "" 47 | 48 | if model == "gpt4": 49 | response = await openai_service.generate_chat_response( 50 | message_service.get_messages(user_id), 51 | user_id 52 | ) 53 | elif model == "gemini": 54 | response = await gemini_service.generate_chat_response( 55 | message_service.get_messages(user_id), 56 | user_id 57 | ) 58 | elif model == "gpt4o": 59 | response = await gpt4o_service.generate_chat_response( 60 | message_service.get_messages(user_id), 61 | user_id 62 | ) 63 | elif model == "gpt4o_mini": 64 | response = await gpt4o_service.generate_chat_response( 65 | message_service.get_messages(user_id), 66 | user_id, 67 | is_mini=True 68 | ) 69 | elif model == "claude": 70 | response = await claude_service.generate_chat_response( 71 | message_service.get_messages(user_id), 72 | user_id 73 | ) 74 | elif model == "claude_haiku": 75 | response = await claude_service.generate_haiku_response( 76 | message_service.get_messages(user_id), 77 | user_id 78 | ) 79 | 80 | # Add response to history 81 | message_service.add_message(user_id, "assistant", response) 82 | 83 | # Send response 84 | await message_service.send_message( 85 | message.from_user.id, 86 | response, 87 | message, 88 | is_response=True 89 | ) 90 | 91 | except ValueError as e: 92 | if "safety_error" in str(e): 93 | await message_service.send_message( 94 | message.from_user.id, 95 | "safety_error", 96 | message 97 | ) 98 | else: 99 | logging.error(f"Error processing message: {str(e)}") 100 | await message_service.send_message( 101 | message.from_user.id, 102 | "error", 103 | message 104 | ) 105 | except Exception as e: 106 | logging.error(f"Error processing message: {str(e)}") 107 | await message_service.send_message( 108 | message.from_user.id, 109 | "error", 110 | message 111 | ) -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import sys 4 | from os import path 5 | 6 | # Add project root to sys.path 7 | project_root = path.dirname(path.abspath(__file__)) 8 | if project_root not in sys.path: 9 | sys.path.append(project_root) 10 | 11 | from aiogram import Bot, Dispatcher 12 | from aiogram.enums import ParseMode 13 | from aiogram.fsm.storage.memory import MemoryStorage 14 | from aiogram.client.session.aiohttp import AiohttpSession 15 | from aiogram.exceptions import TelegramAPIError 16 | from aiogram.client.default import DefaultBotProperties 17 | 18 | from src.config import Config 19 | from src.bot.handlers import common, language, chat 20 | from src.services.message_service import MessageService 21 | from src.services.openai_service import OpenAIService 22 | from src.services.gemini_service import GeminiService 23 | from src.services.gpt4o_service import GPT4OService 24 | from src.services.claude_service import ClaudeService 25 | from src.services.user_service import UserService 26 | from src.bot.middlewares.language import LanguageMiddleware 27 | from src.services.storage_service import StorageService 28 | 29 | async def create_bot(token: str) -> Bot: 30 | """Create and validate bot instance""" 31 | session = AiohttpSession() 32 | default = DefaultBotProperties(parse_mode=ParseMode.HTML) 33 | bot = Bot(token=token, session=session, default=default) 34 | 35 | try: 36 | # Test bot token by getting bot info 37 | bot_info = await bot.get_me() 38 | logging.info(f"Successfully initialized bot: {bot_info.full_name}") 39 | return bot 40 | except TelegramAPIError as e: 41 | await session.close() 42 | error_msg = f"Failed to initialize bot: {str(e)}" 43 | logging.error(error_msg) 44 | raise ValueError(error_msg) 45 | 46 | async def main(): 47 | # Configure logging 48 | logging.basicConfig( 49 | level=logging.INFO, 50 | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", 51 | ) 52 | 53 | bot = None 54 | try: 55 | # Load config 56 | config = Config() 57 | 58 | # Initialize bot 59 | bot = await create_bot(config.bot_token) 60 | 61 | # Initialize services 62 | storage_service = StorageService() 63 | message_service = MessageService(storage_service) 64 | openai_service = OpenAIService(config.openai_api_key) 65 | gemini_service = GeminiService(config.gemini_api_key) 66 | gpt4o_service = GPT4OService(config.openai_api_key) 67 | claude_service = ClaudeService(config.anthropic_api_key) 68 | user_service = UserService(storage_service) 69 | 70 | # Clear all messages at startup 71 | message_service.clear_all_messages() 72 | logging.info("All message histories cleared") 73 | 74 | # Initialize dispatcher 75 | dp = Dispatcher(storage=MemoryStorage()) 76 | 77 | # Register middlewares 78 | dp.message.middleware(LanguageMiddleware(message_service)) 79 | 80 | # Register routers 81 | dp.include_router(language.router) 82 | dp.include_router(common.router) 83 | dp.include_router(chat.router) 84 | 85 | # Set up services for handlers 86 | dp["message_service"] = message_service 87 | dp["openai_service"] = openai_service 88 | dp["gemini_service"] = gemini_service 89 | dp["gpt4o_service"] = gpt4o_service 90 | dp["claude_service"] = claude_service 91 | dp["user_service"] = user_service 92 | 93 | # Start polling 94 | logging.info("Starting bot...") 95 | await dp.start_polling(bot) 96 | 97 | except Exception as e: 98 | logging.error(f"Startup error: {str(e)}") 99 | raise 100 | finally: 101 | if bot is not None: 102 | await bot.session.close() 103 | 104 | if __name__ == "__main__": 105 | try: 106 | asyncio.run(main()) 107 | except KeyboardInterrupt: 108 | logging.info("Bot stopped!") 109 | except Exception as e: 110 | logging.error(f"Fatal error: {str(e)}") 111 | -------------------------------------------------------------------------------- /src/config/config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from os import path, environ 3 | from dotenv import load_dotenv, find_dotenv, dotenv_values 4 | import logging 5 | import re 6 | 7 | @dataclass 8 | class Config: 9 | bot_token: str 10 | openai_api_key: str 11 | gemini_api_key: str 12 | anthropic_api_key: str 13 | 14 | def __init__(self): 15 | # Configure logging 16 | logging.basicConfig(level=logging.INFO) 17 | 18 | # Try to find .env file 19 | env_path = path.join(path.dirname(path.dirname(path.dirname(__file__))), '.env') 20 | logging.info(f"Looking for .env at: {env_path}") 21 | 22 | # Also try to find .env using dotenv's built-in search 23 | alt_env_path = find_dotenv() 24 | if alt_env_path: 25 | logging.info(f"Alternative .env found at: {alt_env_path}") 26 | 27 | if path.exists(env_path): 28 | logging.info(f"Loading .env from: {env_path}") 29 | 30 | # Read values directly from .env file 31 | env_values = dotenv_values(env_path) 32 | logging.info("Direct values from .env file:") 33 | for key, value in env_values.items(): 34 | masked_value = value[:4] + '...' + value[-4:] if len(value) > 8 else '***' 35 | logging.info(f"{key}={masked_value}") 36 | 37 | # Force set environment variables 38 | for key, value in env_values.items(): 39 | environ[key] = value 40 | 41 | logging.info("Environment variables after setting:") 42 | if 'BOT_TOKEN' in environ: 43 | token = environ['BOT_TOKEN'] 44 | masked_token = token[:4] + '...' + token[-4:] if len(token) > 8 else '***' 45 | logging.info(f"BOT_TOKEN={masked_token}") 46 | else: 47 | logging.error(f".env file not found at {env_path}") 48 | raise FileNotFoundError(f".env file not found at {env_path}") 49 | 50 | # Get and validate bot token 51 | self.bot_token = self._get_and_validate_token('BOT_TOKEN') 52 | self.openai_api_key = environ.get('OPENAI_API_KEY', '').strip() 53 | self.gemini_api_key = environ.get('GEMINI_API_KEY', '').strip() 54 | self.anthropic_api_key = environ.get('ANTHROPIC_API_KEY', '').strip() 55 | 56 | def _get_and_validate_token(self, env_var: str) -> str: 57 | """Get and validate bot token from environment variables""" 58 | # First check if variable exists 59 | if env_var not in environ: 60 | error_msg = f"{env_var} is not set in environment variables" 61 | logging.error(error_msg) 62 | logging.info(f"Available environment variables: {list(environ.keys())}") 63 | raise ValueError(error_msg) 64 | 65 | token = environ[env_var].strip() # Use direct dictionary access 66 | 67 | # Log raw token for debugging 68 | logging.info(f"Raw {env_var} value: '{token}'") 69 | logging.info(f"Raw {env_var} length: {len(token)}") 70 | 71 | if not token: 72 | error_msg = f"{env_var} is empty" 73 | logging.error(error_msg) 74 | raise ValueError(error_msg) 75 | 76 | # Log token details for debugging 77 | if len(token) > 10: 78 | logging.info(f"{env_var} starts with: '{token[:8]}'") 79 | logging.info(f"{env_var} middle part: '{token[8:16]}'") 80 | logging.info(f"{env_var} ends with: '{token[-8:]}'") 81 | 82 | # Log character types for debugging 83 | logging.info("Token structure analysis:") 84 | parts = token.split(':') 85 | if len(parts) == 2: 86 | logging.info(f"ID part (before :): '{parts[0]}' (length: {len(parts[0])})") 87 | logging.info(f"Hash part (after :): '{parts[1]}' (length: {len(parts[1])})") 88 | else: 89 | logging.info(f"Token does not contain ':' separator. Parts found: {len(parts)}") 90 | 91 | # Validate token format (NUMBER:LETTERS) 92 | if not re.match(r'^\d+:[A-Za-z0-9_-]+$', token): 93 | error_msg = f"Invalid {env_var} format. Token should be in format: NUMBER:LETTERS" 94 | logging.error(error_msg) 95 | logging.error(f"Token format validation failed. Token contains invalid characters or format") 96 | raise ValueError(error_msg) 97 | 98 | return token -------------------------------------------------------------------------------- /src/bot/handlers/common.py: -------------------------------------------------------------------------------- 1 | from aiogram import Router, F 2 | from aiogram.types import Message, CallbackQuery 3 | from aiogram.filters import Command 4 | from src.services.message_service import MessageService 5 | from src.services.user_service import UserService 6 | from src.bot.keyboards import get_main_keyboard, get_model_keyboard 7 | 8 | router = Router(name='common') 9 | 10 | @router.message(Command("start")) 11 | async def start_cmd(message: Message, message_service: MessageService): 12 | """Start command handler""" 13 | user_id = str(message.from_user.id) 14 | message_service.load_user_state(user_id) 15 | 16 | lang = message_service.get_user_language(message.from_user.id) 17 | keyboard = get_main_keyboard(lang) 18 | 19 | await message_service.send_message( 20 | message.from_user.id, 21 | "start", 22 | message, 23 | reply_markup=keyboard 24 | ) 25 | 26 | @router.message(Command("help")) 27 | @router.message(F.text.contains("❓")) 28 | async def help_cmd(message: Message, message_service: MessageService): 29 | """Help command handler""" 30 | await message_service.send_message( 31 | message.from_user.id, 32 | "help", 33 | message 34 | ) 35 | 36 | @router.message(Command("about")) 37 | @router.message(F.text.contains("ℹ️")) 38 | async def about_cmd(message: Message, message_service: MessageService): 39 | """About command handler""" 40 | await message_service.send_message( 41 | message.from_user.id, 42 | "about", 43 | message 44 | ) 45 | 46 | @router.message(Command("stats")) 47 | async def stats_cmd(message: Message, message_service: MessageService, user_service: UserService): 48 | """Show chat statistics""" 49 | user_id = str(message.from_user.id) 50 | stats = message_service.get_message_stats(user_id) 51 | model = user_service.get_user_model(user_id) 52 | 53 | await message_service.send_message( 54 | message.from_user.id, 55 | "stats", 56 | message, 57 | messages=stats["message_count"], 58 | tokens=stats["estimated_tokens"], 59 | max_messages=stats["max_messages"], 60 | max_tokens=stats["max_tokens"], 61 | model=model.upper() 62 | ) 63 | 64 | @router.message(Command("model")) 65 | @router.message(F.text.contains("🔄")) 66 | async def model_cmd(message: Message, message_service: MessageService, user_service: UserService): 67 | """Model selection handler""" 68 | user_id = str(message.from_user.id) 69 | current_model = user_service.get_user_model(user_id) 70 | await message_service.send_message( 71 | message.from_user.id, 72 | "current_model", 73 | message, 74 | reply_markup=get_model_keyboard(), 75 | model=current_model.upper() 76 | ) 77 | 78 | @router.callback_query(F.data.startswith("model_")) 79 | async def process_model_callback(callback: CallbackQuery, message_service: MessageService, user_service: UserService): 80 | """Model selection callback handler""" 81 | user_id = str(callback.from_user.id) 82 | model = callback.data.replace("model_", "") 83 | 84 | # Save user's model choice 85 | user_service.set_user_model(user_id, model) 86 | 87 | # Send confirmation with main keyboard 88 | lang = message_service.get_user_language(callback.from_user.id) 89 | keyboard = get_main_keyboard(lang) 90 | 91 | # Get message key based on model 92 | msg_key = f"model_switched_{model}" 93 | 94 | await message_service.send_message( 95 | callback.from_user.id, 96 | msg_key, 97 | callback.message, 98 | reply_markup=keyboard 99 | ) 100 | 101 | await callback.answer() 102 | 103 | @router.message(Command("newtopic")) 104 | @router.message(F.text.contains("🆕")) 105 | async def new_topic_cmd(message: Message, message_service: MessageService): 106 | """New topic command handler""" 107 | user_id = str(message.from_user.id) 108 | message_service.clear_messages(user_id) 109 | await message_service.send_message( 110 | message.from_user.id, 111 | "new_topic", 112 | message 113 | ) 114 | 115 | @router.message(Command("image")) 116 | @router.message(F.text.contains("🎨")) 117 | async def image_cmd(message: Message, message_service: MessageService): 118 | """Image generation command handler""" 119 | await message_service.send_message( 120 | message.from_user.id, 121 | "image_prompt", 122 | message 123 | ) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Assistant Bot v2.0 🤖 2 | 3 | A powerful Telegram bot that combines multiple AI models to provide intelligent conversations, code assistance, and image generation. 4 | 5 | ## Features ✨ 6 | 7 | ### Multiple AI Models 8 | - **GPT-4** 🤖 - Advanced language model from OpenAI 9 | - **Gemini** ✨ - Google's latest AI model 10 | - **GPT-4O** 🔮 - Optimized GPT-4 version 11 | - **GPT-4O-mini** 🎯 - Faster, lighter version 12 | - **Claude 3.5 Sonnet** 🎭 - Anthropic's latest model 13 | - **Claude 3.5 Haiku** 🎋 - Fast and efficient version 14 | 15 | ### Core Features 16 | - Multi-language support (English 🇬🇧, Russian 🇷🇺, Ukrainian 🇺🇦) 17 | - Code block formatting with syntax highlighting 18 | - Image generation capabilities 19 | - Smart conversations with context awareness 20 | - User preferences persistence 21 | - Chat statistics tracking 22 | 23 | ## Setup 🛠️ 24 | 25 | ### Prerequisites 26 | - Python 3.8 or higher 27 | - Telegram Bot Token 28 | - API keys for AI services: 29 | - OpenAI API key 30 | - Google Gemini API key 31 | - Anthropic API key 32 | 33 | ### Installation 34 | 35 | 1. Clone the repository: 36 | ```bash 37 | git clone https://github.com/yourusername/ai-assistant-bot.git 38 | cd ai-assistant-bot 39 | ``` 40 | 41 | 2. Install dependencies: 42 | ```bash 43 | pip install -r requirements.txt 44 | ``` 45 | 46 | 3. Create `.env` file with your API keys: 47 | ```env 48 | BOT_TOKEN=your_telegram_bot_token 49 | OPENAI_API_KEY=your_openai_api_key 50 | GEMINI_API_KEY=your_gemini_api_key 51 | ANTHROPIC_API_KEY=your_anthropic_api_key 52 | ``` 53 | 54 | 4. Run the bot: 55 | ```bash 56 | python main.py 57 | ``` 58 | 59 | ## Usage 💡 60 | 61 | ### Available Commands 62 | - `/start` - Initialize the bot 63 | - `/help` - Show available commands 64 | - `/model` - Change AI model 65 | - `/language` - Change interface language 66 | - `/newtopic` - Start new conversation 67 | - `/image` - Generate images 68 | - `/stats` - Show chat statistics 69 | - `/about` - About the bot 70 | 71 | ### Quick Access Buttons 72 | - ❓ Help 73 | - 🔄 Change Model 74 | - 🌐 Language 75 | - ℹ️ About 76 | - 🆕 New Topic 77 | - 🎨 Generate Image 78 | 79 | ### Code Formatting 80 | The bot supports code blocks with syntax highlighting for multiple languages: 81 | - Python 82 | - C++ 83 | - JavaScript 84 | - HTML/CSS 85 | - Java 86 | - Rust 87 | - Go 88 | - And more... 89 | 90 | Example: 91 | \```python 92 | def hello_world(): 93 | print("Hello, World!") 94 | \``` 95 | 96 | ### Image Generation 97 | Use the `/image` command followed by your description to generate images: 98 | ``` 99 | /image A futuristic city at night with neon lights 100 | ``` 101 | 102 | ## Features in Detail 📝 103 | 104 | ### AI Models 105 | - **GPT-4**: Best for complex tasks and detailed explanations 106 | - **Gemini**: Excellent for general-purpose conversations 107 | - **GPT-4O**: Optimized for better performance 108 | - **GPT-4O-mini**: Fast responses for simple queries 109 | - **Claude Sonnet**: Advanced reasoning and analysis 110 | - **Claude Haiku**: Quick and efficient responses 111 | 112 | ### Language Support 113 | - 🇬🇧 English: Full support with all features 114 | - 🇷🇺 Russian: Complete localization 115 | - 🇺🇦 Ukrainian: Full interface translation 116 | 117 | ### Message Handling 118 | - Smart message chunking for long responses 119 | - Code block preservation 120 | - Inline code formatting 121 | - Anti-flood protection 122 | - Error handling and retries 123 | 124 | ### User Experience 125 | - Persistent user preferences 126 | - Chat history management 127 | - Response time tracking 128 | - Usage statistics 129 | - Model-specific optimizations 130 | 131 | ## Technical Details 🔧 132 | 133 | ### Architecture 134 | - Built with aiogram 3.15.0 135 | - Asynchronous design 136 | - Modular service structure 137 | - State management system 138 | - Message queue implementation 139 | 140 | ### Storage 141 | - User preferences persistence 142 | - Chat history management 143 | - Statistics tracking 144 | - Model usage metrics 145 | 146 | ### Security 147 | - API key protection 148 | - Content safety checks 149 | - Error message sanitization 150 | - Rate limiting 151 | 152 | ## Contributing 🤝 153 | 154 | Contributions are welcome! Please feel free to submit a Pull Request. 155 | 156 | ## License 📄 157 | 158 | This project is licensed under the MIT License - see the LICENSE file for details. 159 | 160 | ## Support 💬 161 | 162 | If you have any questions or need help, feel free to: 163 | - Open an issue 164 | - Contact @tr3ble on Telegram 165 | - Send an email to tr3ble@outlook.com 166 | 167 | ## Acknowledgments 🙏 168 | 169 | - OpenAI for GPT-4 170 | - Google for Gemini 171 | - Anthropic for Claude 172 | - The aiogram community 173 | -------------------------------------------------------------------------------- /src/bot/message_templates.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any 2 | 3 | message_templates: Dict[str, Dict[str, str]] = { 4 | 'en': { 5 | 'start': """👋 Hello! I'm a smart AI assistant. 6 | 7 | I can help you with various tasks using powerful AI models: 8 | • GPT-4 🤖 - Advanced language model 9 | • Gemini ✨ - Google's latest AI 10 | • GPT-4O 🔮 - Optimized GPT-4 11 | • GPT-4O-mini 🎯 - Faster version 12 | • Claude 3.5 Sonnet 🎭 - Anthropic's latest model 13 | • Claude 3.5 Haiku 🎋 - Fast and efficient 14 | 15 | Features: 16 | • Multi-language support 🌐 17 | • Image generation 🎨 18 | • Smart conversations 💭 19 | 20 | Use the buttons below to interact with me!""", 21 | 'help': """🔍 Available Commands: 22 | 23 | /start - Start bot 24 | /help - Show this help 25 | /about - About bot 26 | /language - Change language 27 | /model - Change AI model 28 | /newtopic - Start new topic 29 | /image - Generate image 30 | /stats - Show chat statistics 31 | 32 | You can also use the buttons below for quick access.""", 33 | 'about': """🤖 AI Assistant Bot 34 | 35 | This bot combines multiple powerful AI models: 36 | • OpenAI GPT-4 37 | • Google Gemini 38 | • GPT-4O 39 | • GPT-4O-mini 40 | • Claude 3.5 Sonnet 41 | • Claude 3.5 Haiku 42 | 43 | Version: 2.0 44 | Developer: @your_username""", 45 | 'stats': """📊 Chat Statistics 46 | 47 | Current model: {model} 48 | Messages in history: {messages}/{max_messages} 49 | Estimated tokens: {tokens}/{max_tokens} 50 | 51 | Use /newtopic to clear history""", 52 | 'new_topic': '🆕 Starting a new topic!', 53 | 'image_prompt': '🎨 Please describe the image you want to generate:', 54 | 'image_error': '❌ An error occurred during image generation:', 55 | 'language_confirmation': "🌐 Language has been set to English", 56 | 'language_selection': "🌍 Choose your language:", 57 | 'processing': "⏳ Processing your request...", 58 | 'error': "❌ An error occurred. Please try again later.", 59 | 'model_switched_gpt4': "🤖 Switched to GPT-4 model", 60 | 'model_switched_gemini': "✨ Switched to Gemini model", 61 | 'model_switched_gpt4o': "🔮 Switched to GPT-4O model", 62 | 'model_switched_gpt4o_mini': "🎯 Switched to GPT-4O-mini model", 63 | 'model_switched_claude': "🎭 Switched to Claude 3.5 Sonnet model", 64 | 'model_switched_claude_haiku': "🎋 Switched to Claude 3.5 Haiku model", 65 | 'current_model': "Current model: {model}", 66 | 'safety_error': """⚠️ I cannot process this request due to content safety guidelines. 67 | Please rephrase your message to be more appropriate.""" 68 | }, 69 | 'ru': { 70 | 'start': """👋 Привет! Я умный ИИ-ассистент. 71 | 72 | Я могу помочь с разными задачами, используя мощные модели ИИ: 73 | • GPT-4 🤖 - Продвинутая языковая модель 74 | • Gemini ✨ - Новейший ИИ от Google 75 | • GPT-4O 🔮 - Оптимизированный GPT-4 76 | • GPT-4O-mini 🎯 - Быстрая версия 77 | • Claude 3.5 Sonnet 🎭 - Новейшая модель от Anthropic 78 | • Claude 3.5 Haiku 🎋 - Быстрая и эффективная 79 | 80 | Возможности: 81 | • Поддержка разных языков 🌐 82 | • Генерация изображений 🎨 83 | • Умные диалоги 💭 84 | 85 | Используйте кнопки ниже для взаимодействия!""", 86 | 'help': """🔍 Доступные команды: 87 | 88 | /start - Запустить бота 89 | /help - Показать помощь 90 | /about - О боте 91 | /language - Сменить язык 92 | /model - Сменить модель ИИ 93 | /newtopic - Начать новую тему 94 | /image - Создать изображение 95 | /stats - Показать статистику чата 96 | 97 | Также можно использовать кнопки ниже для быстрого доступа.""", 98 | 'about': """🤖 ИИ-Ассистент Бот 99 | 100 | Этот бот объединяет несколько мощных моделей ИИ: 101 | • OpenAI GPT-4 102 | • Google Gemini 103 | • GPT-4O 104 | • GPT-4O-mini 105 | • Claude 3.5 Sonnet 106 | • Claude 3.5 Haiku 107 | 108 | Версия: 2.0 109 | Разработчик: @your_username""", 110 | 'stats': """📊 Статистика чата 111 | 112 | Текущая модель: {model} 113 | Сообщений в истории: {messages}/{max_messages} 114 | Примерное количество токенов: {tokens}/{max_tokens} 115 | 116 | Используйте /newtopic для очистки истории""", 117 | 'new_topic': '🆕 Начинаем новую тему!', 118 | 'image_prompt': '🎨 Пожалуйста, опишите изображение, которое хотите создать:', 119 | 'image_error': '❌ Произошла ошибка при создании изображения:', 120 | 'language_confirmation': "🌐 Язык изменён на русский", 121 | 'language_selection': "🌍 Выберите язык:", 122 | 'processing': "⏳ Обрабатываю ваш запрос...", 123 | 'error': "❌ Произошла ошибка. Пожалуйста, попробуйте позже.", 124 | 'model_switched_gpt4': "🤖 Переключено на модель GPT-4", 125 | 'model_switched_gemini': "✨ Переключено на модель Gemini", 126 | 'model_switched_gpt4o': "🔮 Переключено на модель GPT-4O", 127 | 'model_switched_gpt4o_mini': "🎯 Переключено на модель GPT-4O-mini", 128 | 'model_switched_claude': "🎭 Переключено на модель Claude 3.5 Sonnet", 129 | 'model_switched_claude_haiku': "🎋 Переключено на модель Claude 3.5 Haiku", 130 | 'current_model': "Текущая модель: {model}", 131 | 'safety_error': """⚠️ Я не могу обработать этот запрос из-за правил безопасности. 132 | Пожалуйста, перефразируйте сообщение.""" 133 | }, 134 | 'ua': { 135 | 'start': """👋 Привіт! Я розумний ІІ-асистент. 136 | 137 | Я можу допомогти з різними завданнями, використовуючи потужні моделі ІІ: 138 | • GPT-4 🤖 - Просунута мовна модель 139 | • Gemini ✨ - Найновіший ІІ від Google 140 | • GPT-4O 🔮 - Оптимізований GPT-4 141 | • GPT-4O-mini 🎯 - Швидка версія 142 | • Claude 3.5 Sonnet 🎭 - Найновіша модель від Anthropic 143 | • Claude 3.5 Haiku 🎋 - Швидка та ефективна 144 | 145 | Можливості: 146 | • Підтримка різних мов 🌐 147 | • Генерація зображень 🎨 148 | • Розумні діалоги 💭 149 | 150 | Використовуйте кнопки нижче для взаємодії!""", 151 | 'help': """🔍 Доступні команди: 152 | 153 | /start - Запустити бота 154 | /help - Показати довідку 155 | /about - Про бота 156 | /language - Змінити мову 157 | /model - Змінити модель ІІ 158 | /newtopic - Почати нову тему 159 | /image - Створити зображення 160 | /stats - Показати статистику чату 161 | 162 | Також можна використовувати кнопки нижче для швидкого доступу.""", 163 | 'about': """🤖 ІІ-Асистент Бот 164 | 165 | Цей бот поєднує кілька потужних моделей ІІ: 166 | • OpenAI GPT-4 167 | • Google Gemini 168 | • GPT-4O 169 | • GPT-4O-mini 170 | • Claude 3.5 Sonnet 171 | • Claude 3.5 Haiku 172 | 173 | Версія: 2.0 174 | Розробник: @your_username""", 175 | 'stats': """📊 Статистика чату 176 | 177 | Поточна модель: {model} 178 | Повідомлень в історії: {messages}/{max_messages} 179 | Приблизна кількість токенів: {tokens}/{max_tokens} 180 | 181 | Використовуйте /newtopic для очищення історії""", 182 | 'new_topic': '🆕 Починаємо нову тему!', 183 | 'image_prompt': '🎨 Будь ласка, опишіть зображення, яке хочете створити:', 184 | 'image_error': '❌ Сталася помилка при створенні зображення:', 185 | 'language_confirmation': "🌐 Мову змінено на українську", 186 | 'language_selection': "🌍 Оберіть мову:", 187 | 'processing': "⏳ Обробляю ваш запит...", 188 | 'error': "❌ Сталася помилка. Будь ласка, спробуйте пізніше.", 189 | 'model_switched_gpt4': "🤖 Переключено на модель GPT-4", 190 | 'model_switched_gemini': "✨ Переключено на модель Gemini", 191 | 'model_switched_gpt4o': "🔮 Переключено на модель GPT-4O", 192 | 'model_switched_gpt4o_mini': "🎯 Переключено на модель GPT-4O-mini", 193 | 'model_switched_claude': "🎭 Переключено на модель Claude 3.5 Sonnet", 194 | 'model_switched_claude_haiku': "🎋 Переключено на модель Claude 3.5 Haiku", 195 | 'current_model': "Поточна модель: {model}", 196 | 'safety_error': """⚠️ Я не можу обробити цей запит через правила безпеки. 197 | Будь ласка, перефразуйте повідомлення.""" 198 | } 199 | } 200 | 201 | __all__ = ['message_templates'] -------------------------------------------------------------------------------- /src/services/message_service.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Optional, List, Union 2 | from aiogram import types 3 | from aiogram.types import InlineKeyboardMarkup, ReplyKeyboardMarkup 4 | from src.bot.message_templates import message_templates 5 | from src.services.storage_service import StorageService 6 | import os 7 | import logging 8 | import asyncio 9 | from collections import deque 10 | import html 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | class MessageService: 15 | def __init__(self, storage: StorageService, max_messages: int = 20, max_tokens: int = 4000): 16 | self.message_templates = message_templates 17 | self.user_languages: Dict[int, str] = {} 18 | self.messages: Dict[str, List[Dict[str, str]]] = {} 19 | self.storage = storage 20 | self.max_messages = max_messages 21 | self.max_tokens = max_tokens 22 | self.message_queue: Dict[int, deque] = {} 23 | self.MAX_MESSAGE_LENGTH = 4096 # Telegram's max message length 24 | 25 | def _escape_html(self, text: str) -> str: 26 | """Escape HTML special characters""" 27 | return html.escape(text) 28 | 29 | def _format_code_block(self, code: str, language: str = "") -> str: 30 | """Format code block with proper HTML tags and syntax highlighting hint""" 31 | escaped_code = self._escape_html(code) 32 | if language: 33 | return f'
{escaped_code}
' 34 | return f'
{escaped_code}
' 35 | 36 | def _format_inline_code(self, code: str) -> str: 37 | """Format inline code with proper HTML tags""" 38 | escaped_code = self._escape_html(code) 39 | return f'{escaped_code}' 40 | 41 | def _split_long_message(self, text: str) -> List[str]: 42 | """Split long messages into chunks respecting code blocks and markdown""" 43 | if len(text) <= self.MAX_MESSAGE_LENGTH: 44 | return [text] 45 | 46 | chunks = [] 47 | current_chunk = "" 48 | in_code_block = False 49 | 50 | lines = text.split('\n') 51 | 52 | for line in lines: 53 | # Check if this line would exceed the limit 54 | if len(current_chunk) + len(line) + 1 > self.MAX_MESSAGE_LENGTH: 55 | if in_code_block: 56 | current_chunk += '' 57 | chunks.append(current_chunk) 58 | current_chunk = "" 59 | if in_code_block: 60 | current_chunk += '
'
 61 |                     
 62 |             if line.strip().startswith('```'):
 63 |                 in_code_block = not in_code_block
 64 |                 if in_code_block:
 65 |                     current_chunk += '
'
 66 |                 else:
 67 |                     current_chunk += '
' 68 | else: 69 | current_chunk += line + '\n' 70 | 71 | if current_chunk: 72 | if in_code_block: 73 | current_chunk += '
' 74 | chunks.append(current_chunk) 75 | 76 | return chunks 77 | 78 | async def _send_message_safe(self, 79 | message: types.Message, 80 | text: str, 81 | reply_markup: Optional[Union[InlineKeyboardMarkup, ReplyKeyboardMarkup]] = None, 82 | retry_count: int = 3 83 | ) -> bool: 84 | """Send message with retries and error handling""" 85 | for attempt in range(retry_count): 86 | try: 87 | await message.answer( 88 | text, 89 | reply_markup=reply_markup, 90 | parse_mode="HTML" 91 | ) 92 | return True 93 | except Exception as e: 94 | if attempt == retry_count - 1: # Last attempt 95 | logger.error(f"Failed to send message after {retry_count} attempts: {str(e)}") 96 | # Try without HTML formatting as last resort 97 | try: 98 | plain_text = text.replace('
', '').replace('
', '') 99 | plain_text = plain_text.replace('', '').replace('', '') 100 | await message.answer(plain_text, reply_markup=reply_markup) 101 | return True 102 | except Exception as e2: 103 | logger.error(f"Failed to send plain text message: {str(e2)}") 104 | return False 105 | await asyncio.sleep(1) # Wait before retry 106 | return False 107 | 108 | async def send_message( 109 | self, 110 | user_id: int, 111 | message_key_or_text: str, 112 | message: types.Message, 113 | reply_markup: Union[InlineKeyboardMarkup, ReplyKeyboardMarkup, None] = None, 114 | is_response: bool = False, 115 | **kwargs 116 | ) -> None: 117 | """Send message to user using template with optional formatting and keyboard""" 118 | # If it's a template key, get the template 119 | if not is_response: 120 | language = self.get_user_language(user_id) 121 | text = self.get_message(message_key_or_text, language) 122 | 123 | # Format message if kwargs provided 124 | if kwargs: 125 | text = text.format(**kwargs) 126 | else: 127 | # If it's a direct response, use the text as is 128 | text = message_key_or_text 129 | 130 | # Handle inline code blocks (between single backticks) 131 | while '`' in text and text.count('`') >= 2: 132 | start = text.find('`') 133 | end = text.find('`', start + 1) 134 | if start != -1 and end != -1: 135 | code = text[start + 1:end] 136 | text = text[:start] + self._format_inline_code(code) + text[end + 1:] 137 | 138 | # Handle code blocks for Telegram formatting 139 | if "```" in text: 140 | # Split text by code blocks 141 | parts = text.split("```") 142 | formatted_text = parts[0] # First part (before any code block) 143 | 144 | for i in range(1, len(parts), 2): 145 | if i < len(parts): 146 | # Extract code and language (if specified) 147 | code_part = parts[i].strip() 148 | if code_part and "\n" in code_part: 149 | first_line = code_part.split("\n")[0] 150 | if first_line.strip() in ["cpp", "c++", "python", "js", "javascript", "html", "css", "java", "rust", "go"]: 151 | code = "\n".join(code_part.split("\n")[1:]) 152 | lang = first_line.strip() 153 | else: 154 | code = code_part 155 | lang = "" 156 | else: 157 | code = code_part 158 | lang = "" 159 | 160 | # Format code block 161 | formatted_text += "\n" + self._format_code_block(code, lang) + "\n" 162 | 163 | # Add text between code blocks 164 | if i + 1 < len(parts): 165 | formatted_text += parts[i + 1] 166 | 167 | text = formatted_text 168 | 169 | # Split long messages 170 | message_parts = self._split_long_message(text) 171 | 172 | # Send each part 173 | for part in message_parts: 174 | success = await self._send_message_safe(message, part, reply_markup if part == message_parts[-1] else None) 175 | if not success: 176 | logger.error(f"Failed to send message part to user {user_id}") 177 | # Send error message to user 178 | await self._send_message_safe( 179 | message, 180 | self.get_message("error", self.get_user_language(user_id)), 181 | None 182 | ) 183 | break 184 | # Small delay between parts to prevent flooding 185 | if len(message_parts) > 1: 186 | await asyncio.sleep(0.5) 187 | 188 | def load_user_state(self, user_id: str) -> None: 189 | """Load user state from storage""" 190 | data = self.storage.load_user_data(user_id) 191 | if data: 192 | if 'language' in data: 193 | self.user_languages[int(user_id)] = data['language'] 194 | if 'messages' in data: 195 | self.messages[user_id] = data['messages'] 196 | 197 | def save_user_state(self, user_id: str) -> None: 198 | """Save user state to storage""" 199 | data = { 200 | 'language': self.user_languages.get(int(user_id), 'en'), 201 | 'messages': self.messages.get(user_id, []) 202 | } 203 | self.storage.save_user_data(user_id, data) 204 | 205 | def get_user_language(self, user_id: int) -> str: 206 | """Get user's language or return default language (en)""" 207 | return self.user_languages.get(user_id, 'en') 208 | 209 | async def set_user_language(self, user_id: int, language: str) -> None: 210 | """Set user's preferred language and save state""" 211 | if language not in self.message_templates: 212 | raise ValueError(f"Unsupported language: {language}") 213 | self.user_languages[user_id] = language 214 | self.save_user_state(str(user_id)) 215 | 216 | def get_message(self, key: str, language: str) -> str: 217 | """Get message template by key and language""" 218 | try: 219 | return self.message_templates[language][key] 220 | except KeyError: 221 | # Fallback to English if translation is missing 222 | return self.message_templates['en'][key] 223 | 224 | def clear_messages(self, user_id: str) -> None: 225 | """Clear message history for user""" 226 | self.messages[user_id] = [] 227 | self.save_user_state(user_id) 228 | 229 | def add_message(self, user_id: str, role: str, content: str) -> None: 230 | """Add message to user's history and save state""" 231 | if user_id not in self.messages: 232 | self.messages[user_id] = [] 233 | 234 | # Add new message 235 | message = {"role": role, "content": content} 236 | self.messages[user_id].append(message) 237 | 238 | # Check message count limit 239 | if len(self.messages[user_id]) > self.max_messages: 240 | # Remove oldest messages but keep the system message if it exists 241 | if self.messages[user_id][0]["role"] == "system": 242 | self.messages[user_id] = [self.messages[user_id][0]] + self.messages[user_id][-self.max_messages+1:] 243 | else: 244 | self.messages[user_id] = self.messages[user_id][-self.max_messages:] 245 | 246 | # Estimate token count (rough estimation) 247 | total_tokens = sum(len(msg["content"].split()) * 1.3 for msg in self.messages[user_id]) 248 | 249 | # If exceeding token limit, remove oldest messages 250 | while total_tokens > self.max_tokens and len(self.messages[user_id]) > 1: 251 | # Keep system message if it exists 252 | if self.messages[user_id][0]["role"] == "system" and len(self.messages[user_id]) > 2: 253 | removed_msg = self.messages[user_id][1] 254 | self.messages[user_id].pop(1) 255 | else: 256 | removed_msg = self.messages[user_id][0] 257 | self.messages[user_id].pop(0) 258 | total_tokens -= len(removed_msg["content"].split()) * 1.3 259 | 260 | self.save_user_state(user_id) 261 | 262 | # Log message stats 263 | logger.info(f"User {user_id} messages: {len(self.messages[user_id])}, estimated tokens: {int(total_tokens)}") 264 | 265 | def get_messages(self, user_id: str) -> List[Dict[str, str]]: 266 | """Get all messages for user""" 267 | return self.messages.get(user_id, []) 268 | 269 | def get_message_stats(self, user_id: str) -> Dict[str, int]: 270 | """Get message statistics for user""" 271 | messages = self.messages.get(user_id, []) 272 | total_tokens = sum(len(msg["content"].split()) * 1.3 for msg in messages) 273 | return { 274 | "message_count": len(messages), 275 | "estimated_tokens": int(total_tokens), 276 | "max_messages": self.max_messages, 277 | "max_tokens": self.max_tokens 278 | } 279 | 280 | def clear_all_messages(self) -> None: 281 | """Clear all messages for all users""" 282 | self.messages = {} 283 | # Clear messages in storage for all users 284 | storage_dir = self.storage.storage_dir 285 | if os.path.exists(storage_dir): 286 | for filename in os.listdir(storage_dir): 287 | if filename.startswith("user_") and filename.endswith(".json"): 288 | user_id = filename[5:-5] # Extract user ID from filename 289 | data = self.storage.load_user_data(user_id) 290 | if data: 291 | data['messages'] = [] 292 | self.storage.save_user_data(user_id, data) --------------------------------------------------------------------------------