├── logo.png ├── banner.png ├── run.sh ├── requirements.txt ├── .gitignore ├── module_configs ├── gemini.json ├── groq.json ├── ms_copilot_designer.json ├── ms_copilot.json ├── lmao_ms_copilot.json └── lmao_chatgpt.json ├── test_doctests.py ├── .github └── workflows │ ├── test.yml │ ├── release.yml │ └── ci.yml ├── _version.py ├── Dockerfile ├── async_helper.py ├── .dockerignore ├── caption_command_handler.py ├── langs ├── zho.json ├── eng.json ├── fas.json ├── bel.json ├── rus.json ├── ind.json ├── vie.json ├── ukr.json ├── spa.json ├── tof.json └── fra.json ├── request_response_container.py ├── request_processor.py ├── logging_handler.py ├── messages.py ├── ms_copilot_designer_module.py ├── config.json ├── queue_container_helpers.py ├── main.py ├── groq_module.py ├── google_ai_module.py ├── README.md ├── lmao_process_loop.py └── ms_copilot_module.py /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/F33RNI/GPT-Telegramus/HEAD/logo.png -------------------------------------------------------------------------------- /banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/F33RNI/GPT-Telegramus/HEAD/banner.png -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | venv/bin/python -m pip install -r requirements.txt --upgrade 3 | venv/bin/python main.py 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/F33RNI/EdgeGPT@main#egg=EdgeGPT 2 | git+https://github.com/F33RNI/md2tgmd.git@main 3 | llm-api-open~=2.2 4 | revChatGPT==6.8.6 5 | python-telegram-bot>=20.3 6 | openai>=0.26.4 7 | tiktoken>=0.2.0 8 | OpenAIAuth>=0.3.2 9 | requests>=2.28.1 10 | psutil>=5.9.4 11 | BingImageCreator>=0.5.0 12 | langdetect>=1.0.9 13 | google-generativeai >= 0.3.1 14 | packaging>=23.2 15 | requests>=2,<3 16 | groq>=0,<1 17 | httpx>=0,<1 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | *.zip 3 | test.py 4 | logs/ 5 | chats/ 6 | users.json 7 | conversations/ 8 | user_images 9 | EdgeGPT_cookies.json 10 | MSCopilot_cookies.json 11 | MS_Copilot_cookies.json 12 | Bard_cookies.json 13 | ChatGPT_cookies.json 14 | data/ 15 | Banner.psd 16 | Banner.xcf 17 | Banner.tif 18 | banner.psd 19 | banner.xcf 20 | banner.tif 21 | UsersRecover.py 22 | venv 23 | .venv 24 | env 25 | .env 26 | __pycache__ 27 | certificate.* 28 | private.* 29 | *.tar.gz 30 | -------------------------------------------------------------------------------- /module_configs/gemini.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "Api key from ", 3 | "api_key": "", 4 | 5 | "__comment02__": "AI config. See for more", 6 | "temperature": 0.9, 7 | "top_p": 1, 8 | "top_k": 1, 9 | "max_output_tokens": 2048, 10 | 11 | "__comment03__": "Minimum interval (seconds, can be float) between each Gemini request", 12 | "cooldown_seconds": 1, 13 | 14 | "__comment04__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", 15 | "__comment05__": "Use http://username:password@ip:port format in case of proxy with authorization", 16 | "proxy": "", 17 | 18 | "__comment06__": "Response timeout (in seconds)", 19 | "timeout_seconds": 120, 20 | 21 | "__comment07__": "How often each user can send requests to this module (specify 0 to remove the restriction)", 22 | "user_cooldown_seconds": 0 23 | } 24 | -------------------------------------------------------------------------------- /test_doctests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane, Hanssen 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import doctest 22 | import bot_handler 23 | import unittest 24 | 25 | 26 | def load_tests(loader, tests, ignore): 27 | tests.addTests(doctest.DocTestSuite(bot_handler)) 28 | return tests 29 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test Build and Run Docker Image 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | pull_request: 8 | merge_group: 9 | workflow_dispatch: 10 | 11 | jobs: 12 | build: 13 | strategy: 14 | matrix: 15 | platform: 16 | - os: linux 17 | arch: amd64 18 | - os: linux 19 | arch: arm64 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - name: Check out code 25 | uses: actions/checkout@v4 26 | - name: Set up QEMU 27 | uses: docker/setup-qemu-action@v3 28 | - name: Set up Docker Buildx 29 | uses: docker/setup-buildx-action@v3 30 | - uses: docker/build-push-action@v5 31 | with: 32 | context: . 33 | push: false 34 | load: true 35 | tags: telegramus-test-${{ matrix.platform.os }}-${{ matrix.platform.arch}} 36 | platforms: ${{ matrix.platform.os }}/${{ matrix.platform.arch}} 37 | - run: | 38 | docker run --rm "telegramus-test-${{ matrix.platform.os }}-${{ matrix.platform.arch}}" /app/telegramus --version -------------------------------------------------------------------------------- /_version.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | from packaging import version 22 | 23 | __version__ = "5.4.2" 24 | 25 | 26 | def version_major() -> int: 27 | """ 28 | Returns: 29 | int: major version 30 | """ 31 | return version.parse(__version__).major 32 | -------------------------------------------------------------------------------- /module_configs/groq.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "Api key from ", 3 | "api_key": "", 4 | 5 | "__comment02__": "Groq API base URL", 6 | "base_url": "https://api.groq.com", 7 | 8 | "__comment03__": "Default model to use (users can change this by using /model command)", 9 | "model_default": "llama3-8b-8192", 10 | 11 | "__comment04__": "Available (and enabled) models. See for more info", 12 | "models": ["llama3-8b-8192", "llama3-70b-8192", "llama2-70b-4096", "mixtral-8x7b-32768", "gemma-7b-it"], 13 | 14 | "__comment05__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", 15 | "__comment06__": "Use http://username:password@ip:port format in case of proxy with authorization", 16 | "proxy": "", 17 | 18 | "__comment07__": "Response timeout (in seconds)", 19 | "timeout_seconds": 120, 20 | 21 | "__comment08__": "How often each user can send requests to this module (specify 0 to remove the restriction)", 22 | "user_cooldown_seconds": 0 23 | } 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:labs 2 | 3 | # Dockerfile for GPT-Telegramus using multi-stage build 4 | # Use buildkit syntax labs 5 | # https://github.com/moby/buildkit 6 | 7 | FROM python:3.10-slim AS build 8 | RUN --mount=type=cache,target=/root/.cache/pip \ 9 | apt-get update && \ 10 | apt-get install -y git binutils build-essential && \ 11 | pip install pyinstaller 12 | 13 | # Install dependencies 14 | RUN --mount=type=cache,target=/root/.cache/pip \ 15 | --mount=type=bind,source=requirements.txt,target=requirements.txt \ 16 | pip install -r requirements.txt 17 | 18 | WORKDIR /src 19 | RUN --mount=type=bind,source=. \ 20 | pyinstaller --specpath /app --distpath /app/dist --workpath /app/work \ 21 | --hidden-import tiktoken_ext.openai_public \ 22 | --onefile --name telegramus main.py 23 | 24 | # Build application image 25 | FROM alpine 26 | ENV TELEGRAMUS_CONFIG_FILE "/app/config.json" 27 | ENV PATH /app:$PATH 28 | 29 | COPY --link --from=python:3.10-slim /li[b] /lib 30 | COPY --link --from=python:3.10-slim /lib6[4] /lib64 31 | COPY --link --from=build /app/dist/telegramus /app/telegramus 32 | 33 | WORKDIR /app 34 | COPY config.json module_configs/ langs/ /app/ 35 | 36 | # Run main script 37 | CMD ["telegramus"] 38 | -------------------------------------------------------------------------------- /module_configs/ms_copilot_designer.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "File for loading and saving cookies. Install cookie editor extension, for example:", 3 | "__comment02__": "", 4 | "__comment03__": "Go to log into your account, and ask Copilot about something", 5 | "__comment04__": "Open the extension, click Export on the bottom right, then Export as JSON", 6 | "__comment05__": "(This saves your cookies to clipboard). Create a new .json file and paste cookies into it", 7 | "__comment06__": "save file and specify path to it here. File also must have write permissions", 8 | "cookies_file": "EdgeGPT_cookies.json", 9 | 10 | "__comment07__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", 11 | "__comment08__": "Use http://username:password@ip:port format in case of proxy with authorization", 12 | "proxy": "", 13 | 14 | "__comment09__": "Response timeout (in seconds)", 15 | "timeout_seconds": 240, 16 | 17 | "__comment10__": "How often each user can send requests to this module (specify 0 to remove the restriction)", 18 | "user_cooldown_seconds": 30 19 | } 20 | -------------------------------------------------------------------------------- /module_configs/ms_copilot.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "File for loading and saving cookies. Install cookie editor extension, for example:", 3 | "__comment02__": "", 4 | "__comment03__": "Go to log into your account, and ask Copilot about something", 5 | "__comment04__": "Open the extension, click Export on the bottom right, then Export as JSON", 6 | "__comment05__": "(This saves your cookies to clipboard). Create a new .json file and paste cookies into it", 7 | "__comment06__": "save file and specify path to it here. File also must have write permissions", 8 | "cookies_file": "EdgeGPT_cookies.json", 9 | 10 | "__comment07__": "Default conversation style (precise / balanced / creative)", 11 | "conversation_style_type_default": "balanced", 12 | 13 | "__comment08__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", 14 | "__comment09__": "Use http://username:password@ip:port format in case of proxy with authorization", 15 | "proxy": "", 16 | 17 | "__comment10__": "Response timeout (in seconds)", 18 | "timeout_seconds": 240, 19 | 20 | "__comment11__": "How often each user can send requests to this module (specify 0 to remove the restriction)", 21 | "user_cooldown_seconds": 0 22 | } 23 | -------------------------------------------------------------------------------- /async_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import asyncio 22 | 23 | 24 | def async_helper(awaitable_) -> None: 25 | """Runs async function inside sync 26 | TODO: Get rid of this 27 | 28 | Args: 29 | awaitable_ (_type_): coroutine 30 | """ 31 | # Try to get current event loop 32 | try: 33 | loop = asyncio.get_running_loop() 34 | except RuntimeError: 35 | loop = None 36 | 37 | # Check it 38 | if loop and loop.is_running(): 39 | loop.create_task(awaitable_) 40 | 41 | # We need new event loop 42 | else: 43 | asyncio.run(awaitable_) 44 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | .gitattributes 5 | 6 | # CI 7 | .codeclimate.yml 8 | .travis.yml 9 | .taskcluster.yml 10 | 11 | # Docker 12 | docker-compose.yml 13 | Dockerfile 14 | .docker 15 | .dockerignore 16 | 17 | # Byte-compiled / optimized / DLL files 18 | **/__pycache__/ 19 | **/*.py[cod] 20 | 21 | # C extensions 22 | *.so 23 | 24 | # Distribution / packaging 25 | .Python 26 | env/ 27 | build/ 28 | develop-eggs/ 29 | dist/ 30 | downloads/ 31 | eggs/ 32 | lib/ 33 | lib64/ 34 | parts/ 35 | sdist/ 36 | var/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | 41 | # PyInstaller 42 | # Usually these files are written by a python script from a template 43 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 44 | *.manifest 45 | *.spec 46 | 47 | # Installer logs 48 | pip-log.txt 49 | pip-delete-this-directory.txt 50 | 51 | # Unit test / coverage reports 52 | htmlcov/ 53 | .tox/ 54 | .coverage 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Virtual environment 73 | .env 74 | .venv/ 75 | venv/ 76 | 77 | # PyCharm 78 | .idea 79 | 80 | # Python mode for VIM 81 | .ropeproject 82 | **/.ropeproject 83 | 84 | # Vim swap files 85 | **/*.swp 86 | 87 | # VS Code 88 | .vscode/ 89 | 90 | # GitHub 91 | .github/ 92 | 93 | # Documents 94 | README.md 95 | Screenshots/ 96 | Banner.png 97 | Logo.png 98 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Build binary files for release 2 | 3 | on: 4 | release: 5 | types: 6 | - 'created' 7 | - 'edited' 8 | 9 | jobs: 10 | build-linux: 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | platform: 15 | - os: linux 16 | arch: amd64 17 | - os: linux 18 | arch: arm64 19 | 20 | runs-on: 'ubuntu-latest' 21 | 22 | steps: 23 | - name: Check out code 24 | uses: actions/checkout@v4 25 | - name: Set up QEMU 26 | uses: docker/setup-qemu-action@v3 27 | - name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v3 29 | - name: Build Docker Image 30 | id: build 31 | uses: docker/build-push-action@v5 32 | with: 33 | context: . 34 | platforms: ${{ matrix.platform.os }}/${{ matrix.platform.arch }} 35 | outputs: docker-output 36 | cache-from: type=gha 37 | cache-to: type=gha,mode=max 38 | - run: mkdir dist 39 | - run: cp docker-output/app/telegramus dist/telegramus-${{ matrix.platform.os}}-${{ matrix.platform.arch }} 40 | - name: Release 41 | uses: softprops/action-gh-release@v1 42 | with: 43 | files: | 44 | dist/* 45 | 46 | build: 47 | strategy: 48 | fail-fast: false 49 | matrix: 50 | os: 51 | - 'windows-latest' 52 | - 'ubuntu-latest' 53 | - 'macos-latest' 54 | 55 | runs-on: ${{ matrix.os }} 56 | 57 | steps: 58 | - uses: actions/checkout@v4 59 | - uses: actions/setup-python@v5 60 | with: 61 | python-version: '3.10' 62 | - run: pip install -r requirements.txt pyinstaller 63 | - run: pyinstaller --hidden-import tiktoken_ext.openai_public --onefile --name telegramus-${{ matrix.os }} main.py 64 | - name: Release 65 | uses: softprops/action-gh-release@v1 66 | with: 67 | files: | 68 | dist/* -------------------------------------------------------------------------------- /caption_command_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane, Hanssen 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | from typing import List, Optional, Tuple, Union 22 | 23 | from telegram import MessageEntity, Update 24 | from telegram.ext import CommandHandler 25 | from telegram.ext._utils.types import FilterDataDict 26 | 27 | 28 | class CaptionCommandHandler(CommandHandler): 29 | def check_update( 30 | self, update: object 31 | ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, FilterDataDict]]]]]: 32 | """Determines whether an update should be passed to this handler's :attr:`callback`. 33 | 34 | Args: 35 | update (:class:`telegram.Update` | :obj:`object`): Incoming update. 36 | 37 | Returns: 38 | :obj:`list`: The list of args for the handler. 39 | 40 | """ 41 | if isinstance(update, Update) and update.effective_message: 42 | message = update.effective_message 43 | text = message.text or message.caption 44 | entities = message.entities or message.caption_entities 45 | 46 | if ( 47 | entities 48 | and entities[0].type == MessageEntity.BOT_COMMAND 49 | and entities[0].offset == 0 50 | and text 51 | and message.get_bot() 52 | ): 53 | command = text[1 : entities[0].length] 54 | args = text.split()[1:] 55 | command_parts = command.split("@") 56 | command_parts.append(message.get_bot().username) 57 | 58 | if not ( 59 | command_parts[0].lower() in self.commands 60 | and command_parts[1].lower() == message.get_bot().username.lower() 61 | ): 62 | return None 63 | 64 | filter_result = self.filters.check_update(update) 65 | if filter_result: 66 | return args, filter_result 67 | return False 68 | return None 69 | -------------------------------------------------------------------------------- /module_configs/lmao_ms_copilot.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "Microsoft Copilot module config file ", 3 | 4 | "__comment02__": "File for loading and saving cookies. Install cookie editor extension, for example:", 5 | "__comment03__": "", 6 | "__comment04__": "Go to log in and ask about something. Wait for response", 7 | "__comment05__": "Open the extension, click Export on the bottom right, then Export as JSON", 8 | "__comment06__": "(This saves your cookies to clipboard). Create a new .json file and paste cookies into it", 9 | "__comment07__": "save file and specify path to it here. File also must have write permissions", 10 | "cookies_file": "MS_Copilot_cookies.json", 11 | 12 | "__comment08__": "Path to browser executable or empty to auto-detect (ex. /usr/bin/google-chrome-stable)", 13 | "browser_executable_path": "", 14 | 15 | "__comment09__": "Major version of browser to pass as version_main argument or null to auto-detect (ex. 122)", 16 | "version_main_manual": null, 17 | 18 | "__comment10__": "Path to driver executable to pass as driver_executable_path argument or empty for auto", 19 | "__comment11__": "It's recommended ot leave this empty", 20 | "driver_executable_path": "", 21 | 22 | "__comment12__": "Set to true to enable proxy", 23 | "proxy_enabled": false, 24 | 25 | "__comment13__": "Proxy host (ip) as string", 26 | "proxy_host": "123.45.67.89", 27 | 28 | "__comment14__": "Proxy port as integer", 29 | "proxy_port": 8080, 30 | 31 | "__comment15__": "Proxy username or empty", 32 | "proxy_user": "", 33 | 34 | "__comment16__": "Proxy password or empty", 35 | "proxy_password": "", 36 | 37 | "__comment17__": "MS Copilot Web page address", 38 | "base_url": "https://copilot.microsoft.com/", 39 | 40 | "__comment18__": "Start browser in headless mode (without GUI)", 41 | "headless": true, 42 | 43 | "__comment19__": "Chrome arguments", 44 | "chrome_options": [ 45 | "--disable-infobars", 46 | "--ignore-ssl-errors=yes", 47 | "--ignore-certificate-errors", 48 | "--disable-default-apps", 49 | "--disable-notifications", 50 | "--disable-popup-window", 51 | "--no-sandbox", 52 | "--auto-open-devtools-for-tabs", 53 | "--dns-prefetch-disable", 54 | "--disable-gpu", 55 | "--window-size=1920x960" 56 | ], 57 | 58 | "__comment20__": "--headless= argument. Leave empty to use default value", 59 | "headless_mode": "old", 60 | 61 | "__comment21__": "Time (s) for automatically refresh page if no new prompts or responses (to update session)", 62 | "__comment22__": "Set to 0 to disable auto-refresher", 63 | "auto_refresh_interval": 300, 64 | 65 | "__comment23__": "User agent to prevent detection of headless chrome. Leave empty to disable it", 66 | "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" 67 | } 68 | -------------------------------------------------------------------------------- /module_configs/lmao_chatgpt.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "Original config: ", 3 | 4 | "__comment02__": "File for loading and saving cookies. Install cookie editor extension, for example:", 5 | "__comment03__": "", 6 | "__comment04__": "Go to and ask ChatGPT about something", 7 | "__comment05__": "Open the extension, click Export on the bottom right, then Export as JSON", 8 | "__comment06__": "(This saves your cookies to clipboard). Create a new .json file and paste cookies into it", 9 | "__comment07__": "save file and specify path to it here. File also must have write permissions", 10 | "cookies_file": "ChatGPT_cookies.json", 11 | 12 | "__comment08__": "Path to browser executable or empty to auto-detect (ex. /usr/bin/google-chrome-stable)", 13 | "browser_executable_path": "", 14 | 15 | "__comment09__": "Major version of browser to pass as version_main argument or null to auto-detect (ex. 122)", 16 | "version_main_manual": null, 17 | 18 | "__comment10__": "Path to driver executable to pass as driver_executable_path argument or empty for auto", 19 | "__comment11__": "It's recommended ot leave this empty", 20 | "driver_executable_path": "", 21 | 22 | "__comment12__": "Set to true to enable proxy", 23 | "proxy_enabled": false, 24 | 25 | "__comment13__": "Proxy host (ip) as string", 26 | "proxy_host": "123.45.67.89", 27 | 28 | "__comment14__": "Proxy port as integer", 29 | "proxy_port": 8080, 30 | 31 | "__comment15__": "Proxy username or empty", 32 | "proxy_user": "", 33 | 34 | "__comment16__": "Proxy password or empty", 35 | "proxy_password": "", 36 | 37 | "__comment17__": "ChatGPT Web page address", 38 | "base_url": "https://chat.openai.com/", 39 | 40 | "__comment18__": "Start browser in headless mode (without GUI)", 41 | "headless": true, 42 | 43 | "__comment19__": "Chrome arguments", 44 | "chrome_options": [ 45 | "--disable-infobars", 46 | "--ignore-ssl-errors=yes", 47 | "--ignore-certificate-errors", 48 | "--disable-default-apps", 49 | "--disable-notifications", 50 | "--disable-popup-window", 51 | "--no-sandbox", 52 | "--auto-open-devtools-for-tabs", 53 | "--dns-prefetch-disable", 54 | "--disable-gpu", 55 | "--window-size=1920x960" 56 | ], 57 | 58 | "__comment20__": "--headless= argument. Leave empty to use default value", 59 | "headless_mode": "old", 60 | 61 | "__comment21__": "Time (s) for automatically refresh page if no new prompts or responses (to update session)", 62 | "__comment22__": "Set to 0 to disable auto-refresher", 63 | "auto_refresh_interval": 120, 64 | 65 | "__comment23__": "User agent to prevent detection of headless chrome. Leave empty to disable it", 66 | "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36", 67 | 68 | "__comment24__": "Response timeout (in seconds)", 69 | "timeout_seconds": 120, 70 | 71 | "__comment25__": "How often each user can send requests to this module (specify 0 to remove the restriction)", 72 | "user_cooldown_seconds": 0 73 | } 74 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push Docker Image 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | tags: 8 | - '*' 9 | 10 | jobs: 11 | build: 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | platform: 16 | - os: linux 17 | arch: amd64 18 | - os: linux 19 | arch: arm64 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - id: lowercaseRepo 25 | uses: ASzc/change-string-case-action@v5 26 | with: 27 | string: ${{ github.repository }} 28 | - name: Check out code 29 | uses: actions/checkout@v4 30 | - name: Set up QEMU 31 | uses: docker/setup-qemu-action@v3 32 | - name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@v3 34 | - name: Login to GHCR 35 | uses: docker/login-action@v3 36 | with: 37 | registry: ghcr.io 38 | username: ${{ github.actor }} 39 | password: ${{ secrets.GITHUB_TOKEN }} 40 | - name: Docker metadata 41 | id: meta 42 | uses: docker/metadata-action@v5 43 | with: 44 | images: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }} 45 | - name: Build Docker Image 46 | id: build 47 | uses: docker/build-push-action@v5 48 | with: 49 | context: . 50 | labels: ${{ steps.meta.outputs.labels }} 51 | platforms: ${{ matrix.platform.os }}/${{ matrix.platform.arch }} 52 | outputs: type=image,name=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }},push-by-digest=true,name-canonical=true,push=true 53 | cache-from: type=gha 54 | cache-to: type=gha,mode=max 55 | - name: Export digest 56 | run: | 57 | mkdir -p /tmp/digests 58 | digest="${{ steps.build.outputs.digest }}" 59 | touch "/tmp/digests/${digest#sha256:}" 60 | - name: Upload digest 61 | uses: actions/upload-artifact@v4 62 | with: 63 | name: digests-${{ matrix.platform.os }}-${{ matrix.platform.arch }} 64 | path: /tmp/digests/* 65 | if-no-files-found: error 66 | retention-days: 1 67 | 68 | merge: 69 | runs-on: ubuntu-latest 70 | needs: 71 | - build 72 | steps: 73 | - id: lowercaseRepo 74 | uses: ASzc/change-string-case-action@v5 75 | with: 76 | string: ${{ github.repository }} 77 | - name: Download digests 78 | uses: actions/download-artifact@v4 79 | with: 80 | path: /tmp/digests 81 | - name: Set up Docker Buildx 82 | uses: docker/setup-buildx-action@v3 83 | - name: Login to GHCR 84 | uses: docker/login-action@v3 85 | with: 86 | registry: ghcr.io 87 | username: ${{ github.actor }} 88 | password: ${{ secrets.GITHUB_TOKEN }} 89 | - name: Extract metadata (tags, labels) for Docker 90 | id: meta 91 | uses: docker/metadata-action@v5 92 | with: 93 | flavor: latest=auto 94 | tags: | 95 | type=edge,branch=next 96 | type=ref,event=branch 97 | type=ref,event=tag 98 | type=semver,pattern={{version}} 99 | type=semver,pattern={{major}}.{{minor}} 100 | type=semver,pattern={{major}} 101 | images: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }} 102 | - name: Create manifest list and push 103 | working-directory: /tmp/digests 104 | run: | 105 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ 106 | $(find -maxdepth 2 -mindepth 2 -type f -printf "ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}@sha256:%f ") 107 | - name: Inspect image 108 | run: | 109 | docker buildx imagetools inspect ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ steps.meta.outputs.version }} -------------------------------------------------------------------------------- /langs/zho.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇨🇳 简体中文", 3 | "language_icon": "🇨🇳", 4 | "language_select_error": "❌ 在选择语言时遇到错误!\n\n{error_text}", 5 | "language_select": "请选择一个界面语言", 6 | "language_changed": "🇨🇳 你已经选择了简体中文\n\n你可以通过输入 /lang 指令来随时修改语言", 7 | "start_message": "你好, 👋 朋友! 👋\n\n📄 GPT-Telegramus 版本 {version}\n\n💜 GPT-Telegramus 作者: Fern (aka F3RNI)\n💻 贡献者:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 GitHub page: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 通过购买我的音乐来支持这个项目: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - 欢迎信息和机器人版本\n❓ /help - 展示这条消息\n↕ /module - 变更要对话的模块\n🧹 /clear - 清除对话历史\n🌎 /lang - 变更语言\n🆔 /chatid - 显示你的 chat_id\n\n现在,发送给我任何消息以开始 💬", 9 | "help_message_admin": "管理员指令:\n\n💬 /queue - 展示请求队列\n🔃 /restart [模块, 可选] - 重新启动模块(或所有模块),重新加载配置,语言和命令\n👤 /users - 展示所有用户列表\n🔨 /ban [reason] - 封禁一个用户,附带理由(可选)\n🔓 /unban - 通过 id 解除对用户的封禁\n📢 /broadcast - 给所有人发送消息,除了被封禁的用户", 10 | "empty_request_module_changed": "✅ 你的模块已变更为 {module_name}\n现在以消息来向我发送你的请求", 11 | "media_group_response": "已为请求 \"{request_text}\" 生成图片", 12 | "permissions_deny": "❌ 你没有使用这个指令的权限!", 13 | "queue_overflow": "队列已满,稍后再试 ⏳", 14 | "queue_empty": "🗑 队列为空", 15 | "queue_accepted": "✅ 向模块 {module_name} 的请求已经添加到队列中。\n位置: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ 错误: {error_text}\n\n清除对话历史并稍后再重试", 17 | "empty_message": "⚠️ 消息为空!查看日志以获取详情", 18 | "regenerate_error_not_last": "❌ 错误!只能重新生成最后一个请求!", 19 | "regenerate_error_empty": "❌ 错误!请求为空!", 20 | "continue_error_not_last": "❌ 错误!只能继续最后一个请求!", 21 | "stop_error_not_last": "❌ 错误!只能放弃最后一个请求!", 22 | "stop_error": "❌ 错误!无法停止生成!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ 可能只能使用最后一条消息中提出的请求!", 26 | "users_read_error": "❌ 读取或解析用户列表时出错!", 27 | "users_admin": "封禁? 管理? 语言 模块 ID 名字- 请求数\n\n{users_data}", 28 | "users_total_stats": "用户:{users_num},已封禁:{banned_num},管理员:{admins_num}", 29 | "restarting": "🙏 正在重启中...\n请稍等", 30 | "restarting_done": "{reload_logs}\n✅ 重启已完成", 31 | "chat_cleared": "✅ 已为 {module_name} 清除对话历史", 32 | "clear_error": "❌ 在清除历史时遇到错误!\n\n{error_text}", 33 | "clear_select_module": "选择你想清理对话历史的模块\n或忽略这条消息", 34 | "module_select_module": "你当前选择的模块:{current_module}\n选择想要对话的模块,\n或忽略这条消息", 35 | "user_cooldown_error": "❌ 在请求 {time_formatted} 模组之前,请等待 {module_name}!", 36 | "hours": "h", 37 | "minutes": "m", 38 | "seconds": "s", 39 | "ban_message_admin": "✅ 已封禁用户:{banned_user}\n\n理由:{ban_reason}", 40 | "ban_no_user_id": "❌ 错误!请指定 user_id", 41 | "ban_message_user": "❌ 你不在白名单中或你已被封禁!\n\n理由:{ban_reason}", 42 | "ban_reason_default": "理由未指定", 43 | "unban_message_admin": "✅ 已解除封禁用户 {unbanned_user}", 44 | "broadcast_no_message": "❌ 没有要广播的消息!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ 正在广播中... 请稍等,并不要发送新的消息!", 47 | "broadcast_done": "✅ 广播完成!消息共发送用户数:\n{broadcast_ok_users}", 48 | "style_changed": "✅ 对话风格变更至 {changed_style}", 49 | "style_change_error": "❌ 变更风格时遇到错误!\n\n{error_text}", 50 | "style_select": "你选择的聊天风格:{current_style}\n为 Bing 选择新的聊天风格,\n或忽略这条消息", 51 | "style_precise": "📏 精确", 52 | "style_balanced": "⚖️ 平衡", 53 | "style_creative": "🎨 创意", 54 | "model_changed": "✅ 模块 {module_name} 的模型已更改为 {changed_model}", 55 | "model_select": "您当前的 {module_name} 模块模型:{current_model}\n选择 {module_name} 模块的新模型,\n或忽略此消息", 56 | "model_change_error": "❌ 更改模型时出错!\n\n{error_text}", 57 | "model_no_models": "❌ 您无法更改当前模块的模型!", 58 | "button_model_change": "⚙️ 更改模型", 59 | "button_stop_generating": "🛑 停止生成", 60 | "button_continue": "⏩ 继续", 61 | "button_regenerate": "🔄 重新生成", 62 | "button_clear": "🧹 清除历史", 63 | "button_module": "↕️ 变更模组", 64 | "button_style_change": "⚙️ 变更风格", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /request_response_container.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | from typing import List, Tuple 22 | 23 | from telegram import InlineKeyboardMarkup 24 | 25 | 26 | PROCESSING_STATE_IN_QUEUE = 0 27 | PROCESSING_STATE_INITIALIZING = 1 28 | PROCESSING_STATE_ACTIVE = 2 29 | PROCESSING_STATE_DONE = 3 30 | PROCESSING_STATE_TIMED_OUT = 4 31 | PROCESSING_STATE_CANCEL = 5 32 | PROCESSING_STATE_CANCELING = 6 33 | PROCESSING_STATE_ABORT = 7 34 | 35 | # State to string 36 | PROCESSING_STATE_NAMES = ["Waiting", "Starting", "Active", "Done", "Timed out", "Canceling", "Canceling", "Abort"] 37 | 38 | 39 | class RequestResponseContainer: 40 | def __init__( 41 | self, 42 | user_id: int, 43 | reply_message_id: int, 44 | module_name: str, 45 | request_text: str or None = None, 46 | request_image: bytes or None = None, 47 | request_timestamp: str or None = None, 48 | response_text: str or None = None, 49 | response_images: List[str] or None = None, 50 | response_suggestions: List[Tuple[str, str]] or None = None, 51 | response_timestamp: str or None = None, 52 | response_send_timestamp_last: float = 0.0, 53 | processing_state: int = PROCESSING_STATE_IN_QUEUE, 54 | message_id: int = -1, 55 | reply_markup: InlineKeyboardMarkup or None = None, 56 | pid: int = 0, 57 | ) -> None: 58 | """_summary_ 59 | 60 | Args: 61 | user_id (int): ID of the user 62 | reply_message_id (int): ID of user's message (to reply on) 63 | module_name (str): name of requested module ("lmao_chatgpt", "gemini", etc.) 64 | request_text (str or None, optional): user's request text 65 | request_image (bytes or None, optional): user's request image as bytes 66 | request_timestamp (str or None, optional): formatted time of the request 67 | response_text (str or None, optional): module's response text 68 | response_images (List[str] or None, optional): links to images of module's response 69 | response_suggestions (List[Tuple[str, str]] or None, optional): list of suggested requests (ID, text) 70 | response_timestamp (str or None, optional): formatted time of final response 71 | response_send_timestamp_last (float, optional): timestamp of last response (for editing aka live replying) 72 | processing_state (int, optional): state of container. Defaults to PROCESSING_STATE_IN_QUEUE 73 | message_id (int, optional): id or response message (after sending it, for editing) 74 | reply_markup (InlineKeyboardMarkup or None, optional): message buttons 75 | pid (int, optional): PID of module's process 76 | """ 77 | # Required args 78 | self.user_id = user_id 79 | self.reply_message_id = reply_message_id 80 | self.module_name = module_name 81 | 82 | # Request 83 | self.request_text = request_text 84 | self.request_image = request_image 85 | self.request_timestamp = request_timestamp 86 | 87 | # Response 88 | self.response_text = response_text 89 | self.response_images = [] 90 | if response_images is not None: 91 | for response_image in response_images: 92 | self.response_images.append(response_image) 93 | self.response_suggestions = [] 94 | if response_suggestions is not None: 95 | for response_suggestion in response_suggestions: 96 | self.response_suggestions.append(response_suggestion) 97 | self.response_timestamp = response_timestamp 98 | 99 | # Other args 100 | self.response_send_timestamp_last = response_send_timestamp_last 101 | self.processing_state = processing_state 102 | self.message_id = message_id 103 | self.reply_markup = reply_markup 104 | self.pid = pid 105 | 106 | self.processing_start_timestamp = 0.0 107 | self.error = False 108 | 109 | # Used by BotHandler to split large message into smaller ones 110 | self.response_next_chunk_start_index = 0 111 | self.response_sent_len = 0 112 | 113 | # Unique ID for container to get it from queue (it's address) 114 | self.id = -1 115 | -------------------------------------------------------------------------------- /request_processor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import logging 22 | import multiprocessing 23 | from typing import Dict 24 | 25 | import logging_handler 26 | import messages 27 | import users_handler 28 | import request_response_container 29 | import module_wrapper_global 30 | from async_helper import async_helper 31 | from bot_sender import send_message_async 32 | from queue_container_helpers import get_container_from_queue, put_container_to_queue 33 | 34 | 35 | def request_processor( 36 | config: Dict, 37 | messages_: messages.Messages, 38 | users_handler_: users_handler.UsersHandler, 39 | logging_queue: multiprocessing.Queue, 40 | request_response_queue: multiprocessing.Queue, 41 | lock: multiprocessing.Lock, 42 | request_id: int, 43 | module: module_wrapper_global.ModuleWrapperGlobal, 44 | ) -> None: 45 | """Processes request to any module 46 | This method should be called from multiprocessing as process 47 | 48 | Args: 49 | config (Dict): global config 50 | messages_ (messages.Messages): initialized messages handler 51 | users_handler_ (users_handler.UsersHandler): initialized users handler 52 | logging_queue (multiprocessing.Queue): logging queue from logging handler 53 | request_response_queue (multiprocessing.Queue): queue of request-response containers 54 | lock (multiprocessing.Lock): lock from queue handler 55 | request_id (int): ID of container 56 | module (module_wrapper_global.ModuleWrapperGlobal): requested module 57 | """ 58 | # Setup logging for current process 59 | logging_handler.worker_configurer(logging_queue) 60 | logging.info("request_processor started") 61 | 62 | # Get request 63 | request_ = get_container_from_queue(request_response_queue, lock, request_id) 64 | user_id = request_.user_id 65 | 66 | # Check request 67 | if request_ is None: 68 | logging.error("Error retrieving container from the queue") 69 | return 70 | 71 | try: 72 | # Send initial message 73 | if config.get("telegram").get("response_initial_message"): 74 | request_.response_text = config.get("telegram").get("response_initial_message") 75 | async_helper(send_message_async(config.get("telegram"), messages_, request_, end=False)) 76 | 77 | request_.response_text = "" 78 | 79 | # Set active state 80 | request_.processing_state = request_response_container.PROCESSING_STATE_ACTIVE 81 | 82 | user = users_handler_.get_user(user_id) 83 | 84 | # Increment number of requests for statistics 85 | users_handler_.set_key( 86 | user_id, f"requests_{module.name}", users_handler_.get_key(0, f"requests_{module.name}", 0, user=user) + 1 87 | ) 88 | users_handler_.set_key( 89 | user_id, "requests_total", users_handler_.get_key(0, "requests_total", 0, user=user) + 1 90 | ) 91 | 92 | # Save request data (for regenerate function) 93 | users_handler_.set_key(user_id, "request_last", request_.request_text) 94 | if request_.request_image: 95 | users_handler_.save_request_image(user_id, request_.request_image) 96 | else: 97 | users_handler_.save_request_image(user_id, None) 98 | users_handler_.set_key(user_id, "reply_message_id_last", request_.reply_message_id) 99 | 100 | # Update container in the queue 101 | put_container_to_queue(request_response_queue, lock, request_) 102 | 103 | # Process request 104 | module.process_request(request_) 105 | 106 | # Error during processing request 107 | except Exception as e: 108 | request_.error = True 109 | lang_id = users_handler_.get_key(user_id, "lang_id", "eng") 110 | error_text = str(e)[:1000] 111 | request_.response_text = messages_.get_message("response_error", lang_id=lang_id).format(error_text=error_text) 112 | async_helper(send_message_async(config.get("telegram"), messages_, request_, end=True)) 113 | logging.error("Error processing request", exc_info=e) 114 | 115 | # Set done state 116 | request_.processing_state = request_response_container.PROCESSING_STATE_DONE 117 | 118 | # Finally, update container in the queue 119 | put_container_to_queue(request_response_queue, lock, request_) 120 | -------------------------------------------------------------------------------- /logging_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import datetime 22 | import logging 23 | import logging.handlers 24 | import multiprocessing 25 | import os 26 | 27 | # Logging level 28 | LOGGING_LEVEL = logging.INFO 29 | 30 | # Where to save log files 31 | LOGS_DIR = "logs" 32 | 33 | # Logs entry to ignore if they started with any string from list below 34 | LOGS_IGNORE_PREFIXES = ["HTTP Request: POST https://api.telegram.org/bot"] 35 | 36 | # Logging formatter 37 | FORMATTER_FMT = "[%(asctime)s] [%(process)-7d] [%(levelname)-7s] %(message)s" 38 | FORMATTER_DATEFMT = "%Y-%m-%d %H:%M:%S" 39 | 40 | 41 | def worker_configurer(queue: multiprocessing.Queue, log_test_message: bool = True): 42 | """Call this method in your process 43 | 44 | Args: 45 | queue (multiprocessing.Queue): logging queue 46 | log_test_message (bool, optional): set to False to disable test log message with process PID. Defaults to True 47 | """ 48 | # Remove all current handlers 49 | root_logger = logging.getLogger() 50 | if root_logger.handlers: 51 | for handler in root_logger.handlers: 52 | root_logger.removeHandler(handler) 53 | 54 | # Setup queue handler 55 | queue_handler = logging.handlers.QueueHandler(queue) 56 | root_logger.addHandler(queue_handler) 57 | root_logger.setLevel(LOGGING_LEVEL) 58 | 59 | # Log test message 60 | if log_test_message: 61 | logging.info(f"Logging setup is complete for process with PID: {multiprocessing.current_process().pid}") 62 | 63 | 64 | class LoggingHandler: 65 | def __init__(self): 66 | # Logging queue 67 | self.queue = multiprocessing.Queue(-1) 68 | 69 | def configure_and_start_listener(self): 70 | """ 71 | Initializes logging and starts listening. Send None to queue to stop it 72 | :return: 73 | """ 74 | # Create logs directory is not exists 75 | if not os.path.exists(LOGS_DIR): 76 | os.makedirs(LOGS_DIR) 77 | 78 | # Create logs formatter 79 | log_formatter = logging.Formatter(FORMATTER_FMT, datefmt=FORMATTER_DATEFMT) 80 | 81 | # Setup logging into file 82 | file_handler = logging.FileHandler( 83 | os.path.join(LOGS_DIR, datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + ".log"), encoding="utf-8" 84 | ) 85 | file_handler.setFormatter(log_formatter) 86 | 87 | # This import must be here 88 | # pylint: disable=import-outside-toplevel 89 | import sys 90 | 91 | # pylint: enable=import-outside-toplevel 92 | 93 | # Setup logging into console 94 | console_handler = logging.StreamHandler(sys.stdout) 95 | console_handler.setFormatter(log_formatter) 96 | 97 | # Add all handlers and setup level 98 | root_logger = logging.getLogger() 99 | root_logger.addHandler(file_handler) 100 | root_logger.addHandler(console_handler) 101 | root_logger.setLevel(LOGGING_LEVEL) 102 | 103 | # Start queue listener 104 | while True: 105 | try: 106 | # Get logging record 107 | record = self.queue.get() 108 | 109 | # Send None to exit 110 | if record is None: 111 | break 112 | 113 | # Skip empty messages 114 | if record.message is None: 115 | continue 116 | 117 | # Ignore some record messages 118 | ignore = False 119 | for ignore_str in LOGS_IGNORE_PREFIXES: 120 | if str(record.message).startswith(ignore_str): 121 | ignore = True 122 | break 123 | if ignore: 124 | continue 125 | 126 | # Handle current logging record 127 | logger = logging.getLogger(record.name) 128 | logger.handle(record) 129 | 130 | # Ignore Ctrl+C (call queue.put(None) to stop this listener) 131 | except (SystemExit, KeyboardInterrupt): 132 | pass 133 | 134 | # Error! WHY??? 135 | except Exception: 136 | # pylint: disable=import-outside-toplevel 137 | import traceback 138 | 139 | # pylint: enable=import-outside-toplevel 140 | 141 | print("Logging error: ", file=sys.stderr) 142 | traceback.print_exc(file=sys.stderr) 143 | -------------------------------------------------------------------------------- /langs/eng.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇺🇸 English", 3 | "language_icon": "🇺🇸", 4 | "language_select_error": "❌ Error selecting language!\n\n{error_text}", 5 | "language_select": "Please select an interface language", 6 | "language_changed": "🇺🇸 You've selected English\n\nYou can change the language at any time by typing the /lang command", 7 | "start_message": "Hello, 👋 choomba! 👋\n\n📄 GPT-Telegramus version {version}\n\n💜 GPT-Telegramus author: Fern (aka F3RNI)\n💻 Contributors:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 GitHub page: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Support the project by buying my music: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Welcome message and bot version\n❓ /help - Show this message\n↕ /module - Change module to chat with\n🧹 /clear - Clear chat history\n🌎 /lang - Change the language\n🆔 /chatid - Show your chat_id\n\nNow to get started send me any message 💬", 9 | "help_message_admin": "Admin commands:\n\n💬 /queue - Show requests queue\n🔃 /restart [module, optional] - Restart module (or all of them), reload config, languages and commands\n👤 /users - Show list of all users\n🔨 /ban [reason] - Ban a user by their id with reason (optional)\n🔓 /unban - Unban a user by their id\n📢 /broadcast - Send text message to everyone except banned users", 10 | "empty_request_module_changed": "✅ Your module has been changed to {module_name}\nNow send me your request as a message", 11 | "media_group_response": "Generated images for \"{request_text}\" request", 12 | "permissions_deny": "❌ You do not have permission for this command!", 13 | "queue_overflow": "Queue is full. Try again later ⏳", 14 | "queue_empty": "🗑 Queue is empty", 15 | "queue_accepted": "✅ Request to the {module_name} module added to the queue.\nPosition: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Error: {error_text}\n\nClear the history and make another request or try again later", 17 | "empty_message": "⚠️ Empty! See logs for details", 18 | "regenerate_error_not_last": "❌ Error! Can only regenerate the last request!", 19 | "regenerate_error_empty": "❌ Error! Empty request!", 20 | "continue_error_not_last": "❌ Error! Can only continue the last request!", 21 | "stop_error_not_last": "❌ Error! Can only abort the last request!", 22 | "stop_error": "❌ Error! Can not stop generating!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ It is possible to use the suggested request only from the last message!", 26 | "users_read_error": "❌ Error reading or parsing list of users!", 27 | "users_admin": "Banned? Admin? Language Module ID Name - Requests\n\n{users_data}", 28 | "users_total_stats": "Users: {users_num}, banned {banned_num}, admins: {admins_num}", 29 | "restarting": "🙏 Restarting in progress...\nPlease wait", 30 | "restarting_done": "{reload_logs}\n✅ The restart is completed", 31 | "chat_cleared": "✅ Chat history cleared for {module_name}", 32 | "clear_error": "❌ Error clearing chat history!\n\n{error_text}", 33 | "clear_select_module": "Select the module whose chat history you want to clear,\nor ignore this message", 34 | "module_select_module": "Your current module: {current_module}\nSelect the module to chat with,\nor ignore this message", 35 | "user_cooldown_error": "❌ Please wait {time_formatted} before sending request to the {module_name} module!", 36 | "hours": "h", 37 | "minutes": "m", 38 | "seconds": "s", 39 | "ban_message_admin": "✅ Banned user: {banned_user}\n\nReason: {ban_reason}", 40 | "ban_no_user_id": "❌ Error! Please specify user_id", 41 | "ban_message_user": "❌ You're not whitelisted or you have been banned!\n\nReason: {ban_reason}", 42 | "ban_reason_default": "Reason not specified", 43 | "unban_message_admin": "✅ Unbanned user: {unbanned_user}", 44 | "broadcast_no_message": "❌ No message to broadcast!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Broadcasting in progress... Please wait and don't send new messages!", 47 | "broadcast_done": "✅ Broadcast completed! Messages sent to users:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Conversation style changed to {changed_style}", 49 | "style_change_error": "❌ Error changing conversation style!\n\n{error_text}", 50 | "style_select": "Your conversation style: {current_style}\nSelect new conversation style of Bing,\nor ignore this message", 51 | "style_precise": "📏 Precise", 52 | "style_balanced": "⚖️ Balanced", 53 | "style_creative": "🎨 Creative", 54 | "model_changed": "✅ Model from module {module_name} changed to {changed_model}", 55 | "model_select": "Your model of {module_name} module: {current_model}\nSelect new model of {module_name} module,\nor ignore this message", 56 | "model_change_error": "❌ Error changing model!\n\n{error_text}", 57 | "model_no_models": "❌ You can't change model of current module!", 58 | "button_model_change": "⚙️ Change model", 59 | "button_stop_generating": "🛑 Stop generating", 60 | "button_continue": "⏩ Continue", 61 | "button_regenerate": "🔄 Regenerate", 62 | "button_clear": "🧹 Clear history", 63 | "button_module": "↕️ Change module", 64 | "button_style_change": "⚙️ Change style", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /langs/fas.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "فارسی 🇮🇷", 3 | "language_icon": "🇮🇷", 4 | "language_select_error": "❌ خطا در انتخاب زبان!\n\n{error_text}", 5 | "language_select": "لطفاً یک زبان رابط انتخاب کنید", 6 | "language_changed": "🇮🇷 شما فارسی را انتخاب کرده اید\n\nمی توانید زبان را در هر زمان با تایپ کردن این دستور تغییر دهید /lang", 7 | "start_message": "سلام, 👋 درود! 👋\n\n📄 GPT-Telegramus version {version}\n\n💜 سازنده GPT-Telegramus: Fern (aka F3RNI)\n💻 همکاران:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 صفحه GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 با خرید آهنگ من از پروژه حمایت کنید: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - پیام خوش آمد گویی و نسخه ربات\n❓ /help - نمایش این پیام\n↕ /module - ماژول را برای چت با آن تغییر دهید\n🧹 /clear - پاک کردن تاریخچه پیام\n🌎 /lang - زبان را تغییر دهید\n🆔 /chatid - شناسه چت خود را نشان دهید\n\nاکنون برای شروع هر پیامی را برای من ارسال کنید 💬", 9 | "help_message_admin": "دستورات ادمین:\n\n💬 /queue - نمایش صف درخواست ها\n🔃 /restart [ماژول، اختیاری] - راه‌اندازی مجدد ماژول (یا همه آنها)، بارگذاری مجدد تنظیمات، زبان‌ها و دستورات\n👤 /users - نمایش لیست همه کاربران\n🔨 /ban [reason] - ممنوعیت کاربر با کد شناسایی به دلیل (اختیاری)\n🔓 /unban - لغو ممنوعیت یک کاربر با کد شناسایی آن\n📢 /broadcast - برای همه به جز کاربران ممنوعه پیام ارسال کنید", 10 | "empty_request_module_changed": "✅ ماژول شما تغییر یافت به {module_name}\nاکنون درخواست خود را به عنوان پیام برای من ارسال کنید", 11 | "media_group_response": "تولید تصاویر برای \"{request_text}\" درخواست", 12 | "permissions_deny": "❌ شما مجوز این دستور را ندارید!", 13 | "queue_overflow": "صف پر است بعدا دوباره تلاش کنید ⏳", 14 | "queue_empty": "🗑 صف خالی است", 15 | "queue_accepted": "✅ درخواست برای {module_name} ماژول به صف اضافه شد.\nموقعیت: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ خطا: {error_text}\n\nتاریخچه را پاک کنید و درخواست دیگری بدهید یا بعداً دوباره امتحان کنید", 17 | "empty_message": "⚠️ خالی! برای جزئیات بیشتر به گزارش‌ها مراجعه کنید", 18 | "regenerate_error_not_last": "❌ خطا! فقط می تواند آخرین درخواست را بازسازی کند!", 19 | "regenerate_error_empty": "❌ خطا! درخواست خالی!", 20 | "continue_error_not_last": "❌ خطا! فقط آخرین درخواست را می توان ادامه داد!", 21 | "stop_error_not_last": "❌ خطا! فقط می توان آخرین درخواست را لغو کرد!", 22 | "stop_error": "❌ خطا! نمی توان تولید را متوقف کرد!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ ممکن است از درخواست ارائه شده فقط از آخرین پیام استفاده شود!", 26 | "users_read_error": "❌ خطا در خواندن یا تجزیه لیست کاربران!", 27 | "users_admin": "ممنوع شد؟ مدیر؟ شناسه کاربری ماژول زبان نام - درخواست ها\n\n{users_data}", 28 | "users_total_stats": "کاربران: {users_num}، مسدود‌شده‌ها: {banned_num}، مدیران: {admins_num}", 29 | "restarting": "🙏 شروع مجدد در حال انجام است...\nلطفا صبور باشید", 30 | "restarting_done": "{reload_logs}\n✅ راه اندازی مجدد کامل شد", 31 | "chat_cleared": "✅ سابقه چت پاک شد برای {module_name}", 32 | "clear_error": "❌ خطا در پاک کردن سابقه چت!\n\n{error_text}", 33 | "clear_select_module": "ماژولی را انتخاب کنید که تاریخچه چت آن را می خواهید پاک کنید,\nیا این پیام را نادیده بگیرید", 34 | "module_select_module": "ماژول فعلی شما: {current_module}\nماژول مورد نظر برای چت را انتخاب کنید,\nیا این پیام را نادیده بگیرید", 35 | "user_cooldown_error": "❌ لطفا صبور باشید {time_formatted} قبل از ارسال درخواست به {module_name} ماژول!", 36 | "hours": "h", 37 | "minutes": "m", 38 | "seconds": "s", 39 | "ban_message_admin": "✅ کاربر ممنوعه: {banned_user}\n\nدلیل: {ban_reason}", 40 | "ban_no_user_id": "❌ خطا! لطفا مشخص کنید user_id", 41 | "ban_message_user": "❌ شما در لیست سفید قرار نگرفته اید یا تحریم شده اید!\n\nدلیل: {ban_reason}", 42 | "ban_reason_default": "دلیل مشخص نشده است", 43 | "unban_message_admin": "✅ کاربر ممنوع نشده: {unbanned_user}", 44 | "broadcast_no_message": "❌ پیامی برای پخش وجود ندارد!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ پخش در حال انجام است... لطفاً صبر کنید و پیام جدید نفرستید!", 47 | "broadcast_done": "✅ پخش تمام شد! پیام های ارسالی به کاربران:\n{broadcast_ok_users}", 48 | "style_changed": "✅ تغیر نوع مکالمه به {changed_style}", 49 | "style_change_error": "❌ خطا در تغییر سبک مکالمه!\n\n{error_text}", 50 | "style_select": "سبک مکالمه شما: {current_style}\nسبک مکالمه جدید Bing را انتخاب کنید,\nیا این پیام را نادیده بگیرید", 51 | "style_precise": "📏 دقیق", 52 | "style_balanced": "⚖️ متعادل", 53 | "style_creative": "🎨 خلاق", 54 | "model_changed": "✅ مدل از ماژول {module_name} به {changed_model} تغییر کرد", 55 | "model_select": "مدل فعلی شما از ماژول {module_name}: {current_model}\nمدل جدید ماژول {module_name} را انتخاب کنید،\nیا این پیام را نادیده بگیرید", 56 | "model_change_error": "❌ خطا در تغییر مدل!\n\n{error_text}", 57 | "model_no_models": "❌ شما نمی‌توانید مدل ماژول فعلی را تغییر دهید!", 58 | "button_model_change": "⚙️ تغییر مدل", 59 | "button_stop_generating": "🛑 تولید را متوقف کنید", 60 | "button_continue": "⏩ ادامه", 61 | "button_regenerate": "🔄 بازسازی کنید", 62 | "button_clear": "🧹 پاک کردن تاریخچه", 63 | "button_module": "↕️ تغیر ماژول", 64 | "button_style_change": "⚙️ تغیر استایل", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /messages.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import json 22 | import logging 23 | import os 24 | from multiprocessing import Manager 25 | from typing import Any 26 | 27 | from users_handler import UsersHandler 28 | 29 | # Required language file keys 30 | _LANG_FILE_KEYS = [ 31 | "language_name", 32 | "language_icon", 33 | "language_select_error", 34 | "language_select", 35 | "language_changed", 36 | "start_message", 37 | "help_message", 38 | "help_message_admin", 39 | "empty_request_module_changed", 40 | "media_group_response", 41 | "permissions_deny", 42 | "queue_overflow", 43 | "queue_empty", 44 | "queue_accepted", 45 | "response_error", 46 | "empty_message", 47 | "regenerate_error_not_last", 48 | "regenerate_error_empty", 49 | "continue_error_not_last", 50 | "stop_error_not_last", 51 | "stop_error", 52 | "response_link_format", 53 | "suggestion_format", 54 | "suggestion_error", 55 | "users_read_error", 56 | "users_admin", 57 | "users_total_stats", 58 | "restarting", 59 | "restarting_done", 60 | "chat_cleared", 61 | "clear_error", 62 | "clear_select_module", 63 | "module_select_module", 64 | "user_cooldown_error", 65 | "hours", 66 | "minutes", 67 | "seconds", 68 | "ban_message_admin", 69 | "ban_no_user_id", 70 | "ban_message_user", 71 | "ban_reason_default", 72 | "unban_message_admin", 73 | "broadcast_no_message", 74 | "broadcast", 75 | "broadcast_initiated", 76 | "broadcast_done", 77 | "style_changed", 78 | "style_change_error", 79 | "style_select", 80 | "style_precise", 81 | "style_balanced", 82 | "style_creative", 83 | "model_changed", 84 | "model_select", 85 | "model_change_error", 86 | "model_no_models", 87 | "button_model_change", 88 | "button_stop_generating", 89 | "button_continue", 90 | "button_regenerate", 91 | "button_clear", 92 | "button_module", 93 | "button_style_change", 94 | "modules", 95 | ] 96 | 97 | 98 | class Messages: 99 | def __init__(self, users_handler: UsersHandler) -> None: 100 | self.users_handler = users_handler 101 | 102 | self._manager = Manager() 103 | 104 | # self.langs contains all messages in format 105 | # { 106 | # "lang_id": { 107 | # "message_id": "Message text", 108 | # ... 109 | # }, 110 | # ... 111 | # } 112 | self.langs = self._manager.dict() 113 | 114 | def langs_load(self, langs_dir: str) -> None: 115 | """Loads and parses languages from json files into multiprocessing dictionary 116 | 117 | Args: 118 | langs_dir (str): path to directory with language files 119 | 120 | Raises: 121 | Exception: file read error / parse error / no keys 122 | """ 123 | logging.info(f"Parsing {langs_dir} directory") 124 | for file in os.listdir(langs_dir): 125 | # Parse only .json files 126 | if file.lower().endswith(".json"): 127 | # Read file 128 | lang_id = os.path.splitext(os.path.basename(file))[0] 129 | logging.info(f"Loading file {file} as language with ID {lang_id}") 130 | file_path = os.path.join(langs_dir, file) 131 | with open(file_path, "r", encoding="utf-8") as file_: 132 | lang_dict = json.loads(file_.read()) 133 | 134 | # Check keys (just a basic file validation) 135 | keys = lang_dict.keys() 136 | for key in _LANG_FILE_KEYS: 137 | if key not in keys: 138 | raise Exception(f"No {key} key in {file} language file") 139 | 140 | # Append to loaded languages 141 | self.langs[lang_id] = lang_dict 142 | 143 | # Sort alphabetically 144 | self.langs = {key: value for key, value in sorted(self.langs.items())} 145 | 146 | # Print final number of languages 147 | logging.info(f"Loaded {len(self.langs)} languages") 148 | 149 | def get_message( 150 | self, 151 | message_key: str, 152 | lang_id: str or None = None, 153 | user_id: int or None = None, 154 | default_value: Any = None, 155 | ) -> Any: 156 | """Retrieves message from language 157 | 158 | Args: 159 | message_key (str): key from lang file 160 | lang_id (str or None, optional): ID of language or None to retrieve from user. Defaults to None. 161 | user_id (int or None, optional): ID of user to retrieve lang_id. Defaults to None. 162 | default_value (Any, optional): fallback value in case of no message_key. Defaults to None. 163 | 164 | Returns: 165 | Any: values of message_key or default_value 166 | """ 167 | # Retrieve lang_id from user 168 | if lang_id is None and user_id is not None: 169 | lang_id = self.users_handler.get_key(user_id, "lang_id", "eng") 170 | 171 | # Fallback to English 172 | if lang_id is None: 173 | lang_id = "eng" 174 | 175 | # Get messages 176 | messages = self.langs.get(lang_id) 177 | 178 | # Check if lang_id exists or fallback to English 179 | if messages is None: 180 | logging.warning(f"No language with ID {lang_id}") 181 | messages = self.langs.get("eng") 182 | 183 | return messages.get(message_key, default_value) 184 | -------------------------------------------------------------------------------- /langs/bel.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇧🇾 Беларуская", 3 | "language_icon": "🇧🇾", 4 | "language_select_error": "❌ Памылка выбару мовы!\n\n{error_text}", 5 | "language_select": "Калі ласка, абярыце мову інтэрфейса", 6 | "language_changed": "🇧🇾 Вы абралі беларускую мову\n\nВы можаце змяніць мову хоць калі, ўжываючы каманду /lang", 7 | "start_message": "Вітаю, 👋 чумба! 👋\n\n📄 GPT-Telegramus version {version}\n\n💜 Распрацоўка: Ферн (F3RNI)\n💻 Суаўтары:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Страница GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Падтрымайце праект, набыўшы маю музыку: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Прывітальнае паведамленне і версія бота\n❓ /help - Паказаць гэта паведамленне\n↕ /module - Змяніць модуль для чата\n🧹 /clear - Ачысціць гісторыю чата\n🌎 /lang - Змяніць мову\n🆔 /chatid - Паказаць ваш chat_id\n\nЦяпер, каб пачаць, адпраўце мне любое паведамленне 💬", 9 | "help_message_admin": "Каманды адміністратара:\n\n💬 /queue - Паказаць чаргу запросаў\n🔃 /restart [модуль, неабавязкова] - Перазагрузка модуля (або ўсіх іх), перазагрузка канфігурацыі, моваў і каманд\n👤 /users - Паказаць спіс усіх карыстальнікаў\n🔨 /ban [reason] - Заблакаваць карыстальніка па яго id з указаннем прычыны (апцыянальна)\n🔓 /unban - Разблакаваць карыстальніка па id\n📢 /broadcast - Адправіць тэкставае паведамленне ўсем, акрамя заблакаваных карыстальнікаў", 10 | "empty_request_module_changed": "✅ Модуль быў зменены на {module_name}\nЦяпер прышліце мне ваш запрос паведамленнем", 11 | "media_group_response": "Згенераваныя відарысы па запросу \"{request_text}\"", 12 | "permissions_deny": "❌ Вы не маеце дазволаў на гэту каманду!", 13 | "queue_overflow": "Чарга перапоўненая. Паспрабуйце пазней ⏳", 14 | "queue_empty": "🗑 Чарга пустая", 15 | "queue_accepted": "✅ Запрос да {module_name} модулю дададзены да чаргі.\nПазіцыя ў чарге: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Памылка: {error_text}\n\nАчысціце гісторыю і зрабіце яшчэ адзін запрос альбо паўтарыце спробу пазней", 17 | "empty_message": "⚠️ Пустое паведамленне! Глядзіце логі для падрабязнай інфармацыі", 18 | "regenerate_error_not_last": "❌ Памылка! Можна згенераваць нанова толькі апошні запрос", 19 | "regenerate_error_empty": "❌ Памылка! Пусты запрос!", 20 | "continue_error_not_last": "❌ Памылка! Можна працягнуць толькі апошні запрос!", 21 | "stop_error_not_last": "❌ Памылка! Можна спыніць толькі апошні запрос!", 22 | "stop_error": "❌ Памылка! Немагчыма спыніць дэгенерацыю", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Магчыма выкарыстоўваць прапанаваны запыт толькі з апошняга паведамлення!", 26 | "users_read_error": "❌ Памылка чытання ці аналізу спісу карыстальнікаў!", 27 | "users_admin": "Забанены? Адмін? Мова Модуль ID Имя - Запросаў\n\n{users_data}", 28 | "users_total_stats": "Карыстальнікі: {users_num}, забаненыя {banned_num}, адміністратары: {admins_num}", 29 | "restarting": "🙏 Выконваецца перазапуск...\nКалі ласка, пачакайце", 30 | "restarting_done": "{reload_logs}\n✅ Перазапуск выкананы", 31 | "chat_cleared": "✅ Гісторыя чата была ачышчана для {module_name}", 32 | "clear_error": "❌ Памылка ачысткі гісторыі чата!\n\n{error_text}", 33 | "clear_select_module": "Абярыце модуль, гісторыю чата якога хочаце ачысціць\nЦі праігнаруйце гэта паведамленне", 34 | "module_select_module": "Бягучы модуль: {current_module}\nАбярыце модуль для чата\nЦі праігнаруйце гэта паведамленне", 35 | "user_cooldown_error": "❌ Калі ласка, пачакайце {time_formatted}, перад тым, як адпраўляць запрос да {module_name}", 36 | "hours": "г", 37 | "minutes": "м", 38 | "seconds": "с", 39 | "ban_message_admin": "✅ Заблакаваны карыстальнік: {banned_user}\n\nПрычына: {ban_reason}", 40 | "ban_no_user_id": "❌ Памылка! Калі ласка, ўкажыце user_id", 41 | "ban_message_user": "❌ Вы не знаходзіцеся ў белым спісе ці былі заблакаваныя!\n\nПрычына: {ban_reason}", 42 | "ban_reason_default": "Прычына не вызначана", 43 | "unban_message_admin": "✅ Разблакаваны карыстальнік: {unbanned_user}", 44 | "broadcast_no_message": "❌ Няма паведамлення для вяшчання!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Вяшчанне ў працэсе... Калі ласка, чакайце і не адпраўляйце новых паведамленняў!", 47 | "broadcast_done": "✅ Вяшчанне завершана! Паведамленні адпраўленыя карыстальнікам:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Стыль размовы зменены на {changed_style}", 49 | "style_change_error": "❌ Памылка змены стылю размовы!\n\n{error_text}", 50 | "style_select": "Бягучы стыль размовы: {current_style}\nАбярыце новы стыль размовы Bing,\nЦі праігнаруйце гэта паведамленне", 51 | "style_precise": "📏 Дакладны", 52 | "style_balanced": "⚖️ Збалансаваны", 53 | "style_creative": "🎨 Творчы", 54 | "model_changed": "✅ Мадэль з мадуля {module_name} змяніўся на {changed_model}", 55 | "model_select": "Ваш мадэль {module_name} мадуля: {current_model}\nВыберыце новую мадэль {module_name} мадуля,\nабо ігнараваць гэтае паведамленне", 56 | "model_change_error": "❌ Памылка пры змене мадэлі!\n\n{error_text}", 57 | "model_no_models": "❌ Вы не можаце змяніць мадэль бягучага мадуля!", 58 | "button_model_change": "⚙️ Змяніць мадэль", 59 | "button_stop_generating": "🛑 Спыніць генерацыю", 60 | "button_continue": "⏩ Працягнуць", 61 | "button_regenerate": "🔄 Згенераваць зноў", 62 | "button_clear": "🧹 Ачысціць гісторыю", 63 | "button_module": "↕️ Змяніць модуль", 64 | "button_style_change": "⚙️ Змяніць стыль", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /langs/rus.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇷🇺 Русский", 3 | "language_icon": "🇷🇺", 4 | "language_select_error": "❌ Ошибка выбора языка!\n\n{error_text}", 5 | "language_select": "Пожалуйста, выберите язык интерфейса", 6 | "language_changed": "🇷🇺 Вы выбрали русский язык\n\nВы можете изменить язык в любое время прописав команду /lang", 7 | "start_message": "Привет, 👋 чумба! 👋\n\n📄 GPT-Telegramus версии {version}\n\n💜 Разработка: Ферн (F3RNI)\n💻 Соавторы:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Страница GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Поддержите проект, купив мою музыку: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Приветственное сообщение и версия бота\n❓ /help - Показать это сообщение\n↕ /module - Изменить модуль для чата\n🧹 /clear - Очистить историю чата\n🌎 /lang - Изменить язык\n🆔 /chatid - Показать ваш chat_id\n\nТеперь, чтобы начать, отправьте мне любое сообщение 💬", 9 | "help_message_admin": "Команды администратора:\n\n💬 /queue - Показать очередь запросов\n🔃 /restart [модуль, опционально] - Перезапуск модуля (или всех), перезагрузка конфигурации, языков и команд\n👤 /users - Показать список всех пользователей\n🔨 /ban [reason] - Заблокировать пользователя по его id с указанием причины (опционально)\n🔓 /unban - Разблокировать пользователя по id\n📢 /broadcast - Отправить текстовое сообщение всем, кроме заблокированных пользователей", 10 | "empty_request_module_changed": "✅ Модуль был изменен на {module_name}\nТеперь пришлите мне ваш запрос в виде сообщения", 11 | "media_group_response": "Сгенерированные изображения по запросу \"{request_text}\"", 12 | "permissions_deny": "❌ У вас нет разрешения на эту команду!", 13 | "queue_overflow": "Очередь заполнена. Попробуйте позже ⏳", 14 | "queue_empty": "🗑 Очередь пуста", 15 | "queue_accepted": "✅ Запрос к {module_name} модулю добавлен в очередь.\nПозиция в очереди: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Ошибка: {error_text}\n\nОчистите историю и сделайте еще один запрос или повторите попытку позже", 17 | "empty_message": "⚠️ Пустое сообщение! Смотрите логи для подробной информации", 18 | "regenerate_error_not_last": "❌ Ошибка! Можно сгенерировать заново только последний запрос!", 19 | "regenerate_error_empty": "❌ Ошибка! Пустой запрос!", 20 | "continue_error_not_last": "❌ Ошибка! Можно продолжить только последний запрос!", 21 | "stop_error_not_last": "❌ Ошибка! Можно прервать только последний запрос!", 22 | "stop_error": "❌ Ошибка! Невозможно прекратить дегенерацию!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Возможно использовать предложенный запрос только из последнего сообщения!", 26 | "users_read_error": "❌ Ошибка чтения или анализа списка пользователей!", 27 | "users_admin": "Забанен? Админ? Язык Модуль ID Имя - Запросов\n\n{users_data}", 28 | "users_total_stats": "Пользователи: {users_num}, заблокированные: {banned_num}, администраторы: {admins_num}", 29 | "restarting": "🙏 Выполняется перезапуск...\nПожалуйста, подождите", 30 | "restarting_done": "{reload_logs}\n✅ Перезапуск завершен", 31 | "chat_cleared": "✅ История чата была очищена для {module_name}", 32 | "clear_error": "❌ Ошибка очистки истории чата!\n\n{error_text}", 33 | "clear_select_module": "Выберите модуль, историю чата которого хотите очистить\nИли проигнорируйте это сообщение", 34 | "module_select_module": "Текущий модуль: {current_module}\nВыберите модуль для чата\nИли проигнорируйте это сообщение", 35 | "user_cooldown_error": "❌ Пожалуйста подождите {time_formatted}, прежде чем отправлять запрос к {module_name}", 36 | "hours": "ч", 37 | "minutes": "м", 38 | "seconds": "с", 39 | "ban_message_admin": "✅ Заблокирован пользователь: {banned_user}\n\nПричина: {ban_reason}", 40 | "ban_no_user_id": "❌ Ошибка! Пожалуйста, укажите user_id", 41 | "ban_message_user": "❌ Вы не находитесь в белом списке или вы были забанены!\n\nПричина: {ban_reason}", 42 | "ban_reason_default": "Причина не указана", 43 | "unban_message_admin": "✅ Разблокирован пользователь: {unbanned_user}", 44 | "broadcast_no_message": "❌ Нет сообщения для вещания!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Bещание в процессе... Пожалуйста ожидайте и не отправляйте новых сообщений!", 47 | "broadcast_done": "✅ Вещание завершено! Сообщения отправлены пользователям:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Стиль общения изменен на {changed_style}", 49 | "style_change_error": "❌ Ошибка смены стиля общения!\n\n{error_text}", 50 | "style_select": "Текущий стиль общения: {current_style}\nВыберите новый стиль общения Bing,\nИли проигнорируйте это сообщение", 51 | "style_precise": "📏 Точный", 52 | "style_balanced": "⚖️ Сбалансированный", 53 | "style_creative": "🎨 Творческий", 54 | "model_changed": "✅ Модель из модуля {module_name} изменена на {changed_model}", 55 | "model_select": "Ваша модель модуля {module_name}: {current_model}\nВыберите новую модель модуля {module_name},\nили проигнорируйте это сообщение", 56 | "model_change_error": "❌ Ошибка при изменении модели!\n\n{error_text}", 57 | "model_no_models": "❌ Вы не можете изменить модель текущего модуля!", 58 | "button_model_change": "⚙️ Изменить модель", 59 | "button_stop_generating": "🛑 Прекратить генерацию", 60 | "button_continue": "⏩ Продолжить", 61 | "button_regenerate": "🔄 Перегенерировать", 62 | "button_clear": "🧹 Очистить историю", 63 | "button_module": "↕️ Сменить модуль", 64 | "button_style_change": "⚙️ Сменить стиль", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /langs/ind.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇮🇩 Bahasa Indonesia", 3 | "language_icon": "🇮🇩", 4 | "language_select_error": "❌ Kesalahan dalam memilih bahasa!\n\n{error_text}", 5 | "language_select": "Silakan pilih bahasa antarmuka", 6 | "language_changed": "🇮🇩 Anda telah memilih Bahasa Indonesia\n\nAnda dapat mengganti bahasa kapan saja dengan mengetik perintah /lang", 7 | "start_message": "Halo, 👋 sahabat! 👋\n\n📄 Versi GPT-Telegramus {version}\n\n💜 Penulis GPT-Telegramus: Fern (juga dikenal sebagai F3RNI)\n💻 Kontributor:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Halaman GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Dukung proyek ini dengan membeli musik saya: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Pesan selamat datang dan versi bot\n❓ /help - Tampilkan pesan ini\n↕ /module - Ganti modul untuk berbicara\n🧹 /clear - Bersihkan riwayat obrolan\n🌎 /lang - Ganti bahasa\n🆔 /chatid - Tampilkan chat_id Anda\n\nSekarang untuk memulai, kirimkan saya pesan apa pun 💬", 9 | "help_message_admin": "Perintah Admin:\n\n💬 /queue - Tampilkan antrian permintaan\n🔃 /restart [modul, opsional] - Mulai ulang modul (atau semua modul), muat ulang konfigurasi, bahasa, dan perintah\n👤 /users - Tampilkan daftar semua pengguna\n🔨 /ban [alasan] - Larang pengguna berdasarkan id dengan alasan (opsional)\n🔓 /unban - Buka larangan pengguna berdasarkan id\n📢 /broadcast - Kirim pesan teks kepada semua kecuali pengguna yang diblokir", 10 | "empty_request_module_changed": "✅ Modul Anda telah diubah menjadi {module_name}\nSekarang kirimkan permintaan Anda sebagai pesan", 11 | "media_group_response": "Gambar yang dihasilkan untuk permintaan \"{request_text}\"", 12 | "permissions_deny": "❌ Anda tidak memiliki izin untuk perintah ini!", 13 | "queue_overflow": "Antrian penuh. Coba lagi nanti ⏳", 14 | "queue_empty": "🗑 Antrian kosong", 15 | "queue_accepted": "✅ Permintaan ke modul {module_name} ditambahkan ke antrian.\nPosisi: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Kesalahan: {error_text}\n\nMembersihkan riwayat dan buat permintaan lain atau coba lagi nanti", 17 | "empty_message": "⚠️ Kosong! Lihat log untuk detailnya", 18 | "regenerate_error_not_last": "❌ Kesalahan! Hanya bisa menghasilkan ulang permintaan terakhir!", 19 | "regenerate_error_empty": "❌ Kesalahan! Permintaan kosong!", 20 | "continue_error_not_last": "❌ Kesalahan! Hanya bisa melanjutkan permintaan terakhir!", 21 | "stop_error_not_last": "❌ Kesalahan! Hanya bisa menghentikan permintaan terakhir!", 22 | "stop_error": "❌ Kesalahan! Tidak dapat menghentikan penghasilan!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Mungkin menggunakan permintaan yang diajukan hanya dari pesan terakhir!", 26 | "users_read_error": "❌ Kesalahan membaca atau menguraikan daftar pengguna!", 27 | "users_admin": "Dilarang? Admin? Bahasa Modul ID Nama - Permintaan\n\n{users_data}", 28 | "users_total_stats": "Pengguna: {users_num}, diblokir {banned_num}, admin: {admins_num}", 29 | "restarting": "🙏 Sedang dilakukan proses restart...\nTunggu sebentar", 30 | "restarting_done": "{reload_logs}\n✅ Restart selesai", 31 | "chat_cleared": "✅ Riwayat obrolan dibersihkan untuk {module_name}", 32 | "clear_error": "❌ Kesalahan membersihkan riwayat obrolan!\n\n{error_text}", 33 | "clear_select_module": "Pilih modul yang ingin Anda bersihkan riwayat obrolannya,\natau abaikan pesan ini", 34 | "module_select_module": "Modul saat ini Anda: {current_module}\nPilih modul untuk berbicara,\natau abaikan pesan ini", 35 | "user_cooldown_error": "❌ Harap tunggu {time_formatted} sebelum mengirim permintaan ke modul {module_name}!", 36 | "hours": "j", 37 | "minutes": "m", 38 | "seconds": "d", 39 | "ban_message_admin": "✅ Pengguna diblokir: {banned_user}\n\nAlasan: {ban_reason}", 40 | "ban_no_user_id": "❌ Kesalahan! Harap tentukan user_id", 41 | "ban_message_user": "❌ Anda tidak masuk daftar putih atau Anda telah diblokir!\n\nAlasan: {ban_reason}", 42 | "ban_reason_default": "Alasan tidak ditentukan", 43 | "unban_message_admin": "✅ Pengguna dibuka blokir: {unbanned_user}", 44 | "broadcast_no_message": "❌ Tidak ada pesan untuk disiarkan!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Proses siaran sedang berlangsung... Harap tunggu dan jangan kirim pesan baru!", 47 | "broadcast_done": "✅ Siaran selesai! Pesan telah dikirim ke pengguna:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Gaya percakapan diubah menjadi {changed_style}", 49 | "style_change_error": "❌ Kesalahan mengubah gaya percakapan!\n\n{error_text}", 50 | "style_select": "Gaya percakapan Anda: {current_style}\nPilih gaya percakapan baru Bing,\natau abaikan pesan ini", 51 | "style_precise": "📏 Presisi", 52 | "style_balanced": "⚖️ Seimbang", 53 | "style_creative": "🎨 Kreatif", 54 | "model_changed": "✅ Model dari modul {module_name} berubah menjadi {changed_model}", 55 | "model_select": "Model modul {module_name} Anda: {current_model}\nPilih model baru modul {module_name},\natau abaikan pesan ini", 56 | "model_change_error": "❌ Kesalahan saat mengubah model!\n\n{error_text}", 57 | "model_no_models": "❌ Anda tidak dapat mengubah model modul saat ini!", 58 | "button_model_change": "⚙️ Ubah model", 59 | "button_stop_generating": "🛑 Hentikan penghasilan", 60 | "button_continue": "⏩ Lanjutkan", 61 | "button_regenerate": "🔄 Hasilkan ulang", 62 | "button_clear": "🧹 Bersihkan riwayat", 63 | "button_module": "↕️ Ganti modul", 64 | "button_style_change": "⚙️ Ubah gaya", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /ms_copilot_designer_module.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import json 22 | import logging 23 | import os 24 | from typing import Dict 25 | 26 | from BingImageCreator import ImageGen 27 | 28 | import messages 29 | import users_handler 30 | import bot_sender 31 | from async_helper import async_helper 32 | from request_response_container import RequestResponseContainer 33 | 34 | 35 | # Self name 36 | _NAME = "ms_copilot_designer" 37 | 38 | 39 | class MSCopilotDesignerModule: 40 | def __init__( 41 | self, 42 | config: Dict, 43 | messages_: messages.Messages, 44 | users_handler_: users_handler.UsersHandler, 45 | ) -> None: 46 | """Initializes class variables (must be done in main process) 47 | 48 | Args: 49 | config (Dict): global config 50 | messages_ (messages.Messages): initialized messages handler 51 | users_handler_ (users_handler.UsersHandler): initialized users handler 52 | """ 53 | self.config = config 54 | self.messages = messages_ 55 | self.users_handler = users_handler_ 56 | 57 | # Don't use this variables outside the module's process 58 | self._image_generator = None 59 | 60 | def initialize(self) -> None: 61 | """Initializes Bing ImageGen API 62 | 63 | Raises: 64 | Exception: in case of error 65 | """ 66 | self._image_generator = None 67 | 68 | # Get module's config 69 | module_config = self.config.get(_NAME) 70 | 71 | # Use proxy 72 | proxy = None 73 | if module_config.get("proxy") and module_config.get("proxy") != "auto": 74 | proxy = module_config.get("proxy") 75 | logging.info(f"Initializing MS Copilot Designer module with proxy {proxy}") 76 | else: 77 | logging.info("Initializing MS Copilot Designer module without proxy") 78 | 79 | # Read cookies file 80 | cookies = None 81 | if module_config.get("cookies_file") and os.path.exists(module_config.get("cookies_file")): 82 | logging.info(f"Loading cookies from {module_config.get('cookies_file')}") 83 | cookies = json.loads(open(module_config.get("cookies_file"), "r", encoding="utf-8").read()) 84 | 85 | # Parse cookies 86 | auth_cookie = "" 87 | auth_cookie_SRCHHPGUSR = "" 88 | if cookies: 89 | logging.info("Parsing cookies") 90 | try: 91 | for cookie in cookies: 92 | if cookie.get("name") == "_U": 93 | auth_cookie = cookie.get("value") 94 | elif cookie.get("name") == "SRCHHPGUSR": 95 | auth_cookie_SRCHHPGUSR = cookie.get("value") 96 | if not auth_cookie: 97 | raise Exception("No _U cookie") 98 | if not auth_cookie_SRCHHPGUSR: 99 | raise Exception("No SRCHHPGUSR cookie") 100 | except Exception as e: 101 | raise e 102 | 103 | # Initialize Bing ImageGen 104 | self._image_generator = ImageGen( 105 | auth_cookie=auth_cookie, 106 | auth_cookie_SRCHHPGUSR=auth_cookie_SRCHHPGUSR, 107 | quiet=True, 108 | all_cookies=cookies, 109 | ) 110 | 111 | # Set proxy 112 | if proxy: 113 | self._image_generator.session.proxies = {"http": proxy, "https": proxy} 114 | 115 | # Check 116 | if self._image_generator is not None: 117 | logging.info("Bing ImageGen module initialized") 118 | 119 | def process_request(self, request_response: RequestResponseContainer) -> None: 120 | """Processes request to Bing ImageGen 121 | 122 | Args: 123 | request_response (RequestResponseContainer): container from the queue 124 | """ 125 | # Check if we are initialized 126 | if self._image_generator is None: 127 | logging.error("MS Copilot Designer module not initialized") 128 | request_response.response_text = self.messages.get_message( 129 | "response_error", user_id=request_response.user_id 130 | ).format(error_text="MS Copilot Designer module not initialized") 131 | request_response.error = True 132 | return 133 | 134 | try: 135 | # Generate images 136 | logging.info("Requesting images from Bing ImageGen") 137 | response_urls = self._image_generator.get_images(request_response.request_text) 138 | 139 | # Check response 140 | if not response_urls or len(response_urls) == 0: 141 | raise Exception("Wrong Bing ImageGen response") 142 | 143 | # Use all generated images 144 | logging.info(f"Response successfully processed for user {request_response.user_id})") 145 | request_response.response_images = response_urls 146 | 147 | # Exit requested 148 | except (SystemExit, KeyboardInterrupt): 149 | logging.warning("KeyboardInterrupt @ process_request") 150 | return 151 | 152 | # DALL-E or other error 153 | except Exception as e: 154 | logging.error("Error processing request!", exc_info=e) 155 | error_text = str(e) 156 | if len(error_text) > 100: 157 | error_text = error_text[:100] + "..." 158 | 159 | request_response.response_text = self.messages.get_message( 160 | "response_error", user_id=request_response.user_id 161 | ).format(error_text=error_text) 162 | request_response.error = True 163 | 164 | # Finish message 165 | async_helper( 166 | bot_sender.send_message_async(self.config.get("telegram"), self.messages, request_response, end=True) 167 | ) 168 | -------------------------------------------------------------------------------- /langs/vie.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇻🇳 Vietnamese", 3 | "language_icon": "🇻🇳", 4 | "language_select_error": "❌ Lỗi khi chọn ngôn ngữ!\n\n{error_text}", 5 | "language_select": "Vui lòng chọn một ngôn ngữ giao diện.", 6 | "language_changed": "🇻🇳 Bạn đã chọn Tiếng Việt\n\nBạn có thể thay đổi ngôn ngữ bất kỳ lúc nào bằng cách nhập lệnh /lang", 7 | "start_message": "Xin chào, 👋 bạn thân mến! 👋\n\n📄 Phiên bản GPT-Telegramus {version}\n\n💜 Tác giả của GPT-Telegramus: Fern (còn được biết đến là F3RNI)\n💻 Đóng góp từ:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Trang GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Ủng hộ dự án bằng cách mua nhạc của tôi: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Tin nhắn chào mừng và phiên bản của bot\n❓ /help - Hiển thị thông điệp này\n↕ /module - Thay đổi mô-đun để trò chuyện\n🧹 /clear - Xóa lịch sử trò chuyện\n🌎 /lang - Thay đổi ngôn ngữ\n🆔 /chatid - Hiển thị chat_id của bạn\n\nBây giờ để bắt đầu, hãy gửi cho tôi bất kỳ tin nhắn nào 💬", 9 | "help_message_admin": "Các lệnh quản trị viên:\n\n💬 /queue - Hiển thị hàng đợi yêu cầu\n🔃 /restart [module, tùy chọn] - Khởi động lại mô-đun (hoặc tất cả), tải lại cấu hình, ngôn ngữ và các lệnh\n👤 /users - Hiển thị danh sách tất cả người dùng\n🔨 /ban [lý do] - Cấm một người dùng bằng id của họ với lý do (tùy chọn)\n🔓 /unban - Bỏ cấm một người dùng bằng id của họ\n📢 /broadcast - Gửi tin nhắn văn bản cho tất cả mọi người ngoại trừ người dùng đã bị cấm", 10 | "empty_request_module_changed": "✅ Mô-đun của bạn đã được chuyển sang {module_name}\nBây giờ hãy gửi yêu cầu của bạn dưới dạng tin nhắn", 11 | "media_group_response": "Đã tạo hình ảnh cho yêu cầu \"{request_text}\"", 12 | "permissions_deny": "❌ Bạn không có quyền để sử dụng lệnh này!", 13 | "queue_overflow": "Hàng đợi đầy. Vui lòng thử lại sau ⏳", 14 | "queue_empty": "🗑 Hàng đợi đang trống", 15 | "queue_accepted": "✅ Yêu cầu tới mô-đun {module_name} đã được thêm vào hàng đợi.\nVị trí: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Lỗi: {error_text}\n\nXóa lịch sử và thực hiện yêu cầu khác hoặc thử lại sau", 17 | "empty_message": "⚠️ Trống! Xem logs để biết chi tiết", 18 | "regenerate_error_not_last": "❌ Lỗi! Chỉ có thể tạo lại yêu cầu cuối cùng!", 19 | "regenerate_error_empty": "❌ Lỗi! Yêu cầu trống!", 20 | "continue_error_not_last": "❌ Lỗi! Chỉ có thể tiếp tục yêu cầu cuối cùng!", 21 | "stop_error_not_last": "❌ Lỗi! Chỉ có thể hủy bỏ yêu cầu cuối cùng!", 22 | "stop_error": "❌ Lỗi! Không thể dừng quá trình tạo ra!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Có thể sử dụng yêu cầu được đề xuất chỉ từ tin nhắn cuối cùng!", 26 | "users_read_error": "❌ Lỗi đọc hoặc phân tích cú pháp danh sách người dùng!", 27 | "users_admin": "Bị cấm? Quản trị viên? Mô-đun ngôn ngữ ID Tên - Yêu cầu\n\n{users_data}", 28 | "users_total_stats": "Người dùng: {users_num}, bị cấm: {banned_num}, quản trị viên: {admins_num}", 29 | "restarting": "🙏 Đang khởi động lại...\nVui lòng đợi", 30 | "restarting_done": "{reload_logs}\n✅ Khởi động lại đã hoàn thành", 31 | "chat_cleared": "✅ Lịch sử trò chuyện đã được xóa cho {module_name}", 32 | "clear_error": "❌ Lỗi khi xóa lịch sử trò chuyện!\n\n{error_text}", 33 | "clear_select_module": "Chọn mô-đun mà bạn muốn xóa lịch sử trò chuyện của,\nhoặc bỏ qua tin nhắn này", 34 | "module_select_module": "Mô-đun hiện tại của bạn: {current_module}\nChọn mô-đun để trò chuyện với,\nhoặc bỏ qua tin nhắn này", 35 | "user_cooldown_error": "❌ Vui lòng đợi {time_formatted} trước khi gửi yêu cầu đến mô-đun {module_name}!", 36 | "hours": "h", 37 | "minutes": "m", 38 | "seconds": "s", 39 | "ban_message_admin": "✅ Người dùng bị cấm: {banned_user}\n\nLý do: {ban_reason}", 40 | "ban_no_user_id": "❌ Lỗi! Vui lòng chỉ định user_id", 41 | "ban_message_user": "❌ Bạn không được phép hoặc đã bị cấm truy cập!\n\nLý do: {ban_reason}", 42 | "ban_reason_default": "Lý do không được chỉ định", 43 | "unban_message_admin": "✅ Người dùng đã được bỏ cấm: {unbanned_user}", 44 | "broadcast_no_message": "❌ Không có tin nhắn để phát sóng!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Quá trình phát sóng đang tiến hành... Vui lòng đợi và không gửi tin nhắn mới!", 47 | "broadcast_done": "✅ Quá trình phát sóng đã hoàn thành! Tin nhắn đã được gửi tới người dùng:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Phong cách trò chuyện đã được thay đổi thành {changed_style}", 49 | "style_change_error": "❌ Lỗi khi thay đổi phong cách trò chuyện!\n\n{error_text}", 50 | "style_select": "Phong cách trò chuyện hiện tại của bạn: {current_style}\nChọn phong cách trò chuyện mới của Bing,\nhoặc bỏ qua tin nhắn này", 51 | "style_precise": "📏 Chính xác", 52 | "style_balanced": "⚖️ Cân đối", 53 | "style_creative": "🎨 Sáng tạo", 54 | "model_changed": "✅ Mô hình từ mô-đun {module_name} đã thay đổi thành {changed_model}", 55 | "model_select": "Mô hình hiện tại của bạn từ mô-đun {module_name}: {current_model}\nChọn mô hình mới từ mô-đun {module_name},\nhoặc bỏ qua thông báo này", 56 | "model_change_error": "❌ Lỗi khi thay đổi mô hình!\n\n{error_text}", 57 | "model_no_models": "❌ Bạn không thể thay đổi mô hình của mô-đun hiện tại!", 58 | "button_model_change": "⚙️ Thay đổi mô hình", 59 | "button_stop_generating": "🛑 Dừng tạo ra", 60 | "button_continue": "⏩ Tiếp tục", 61 | "button_regenerate": "🔄 Tạo lại", 62 | "button_clear": "🧹 Xóa lịch sử", 63 | "button_module": "↕️ Thay đổi mô-đun", 64 | "button_style_change": "⚙️ Thay đổi phong cách", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /langs/ukr.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇺🇦 Українська", 3 | "language_icon": "🇺🇦", 4 | "language_select_error": "❌ Помилка вибору мови!\n\n{error_text}", 5 | "language_select": "Будь ласка, оберіть мову інтерфейсу", 6 | "language_changed": "🇺🇦 Ви обрали українську мову\n\nВи можете змінити мову у будь-який час прописавши команду /lang", 7 | "start_message": "Здоровенькі були, 👋 чумба! 👋\n\n📄 GPT-Telegramus version {version}\n\n💜 Розробка: Ферн (F3RNI)\n💻 Соавторы:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Страница GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Підтримайте проект, купивши мою музыку: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Вітальне повідомлення та версія бота\n❓ /help - Показати це повідомлення\n↕ /module - Змінити модуль для чату\n🧹 /clear - Очистити історію чату\n🌎 /lang - змінити мову\n🆔 /chatid - Показати ваш chat_id\n\nТепер, щоб почати, відправте мені будь-яке повідомлення 💬", 9 | "help_message_admin": "Команди адміністратора:\n\n💬 /queue - Показати чергу запитів\n🔃 /restart [модуль, необов'язково] - Перезапуск модуля (або всіх), перезавантаження конфігурації, мов та команд\n👤 /users - Показати перелік усіх користувачів\n🔨 /ban [reason] - Заблокувати користувача по його id із зазначенням причини (опціонально)\n🔓 /unban - Разблокувати користувача по id\n📢 /broadcast - Відправити текстове повідомлення усім, окрім заблокованих користувачів", 10 | "empty_request_module_changed": "✅ Модул було змінено на {module_name}\nТепер пришліть мені ваш запит у вигляді повідомлення", 11 | "media_group_response": "Згенеровані зображення по запиту \"{request_text}\"", 12 | "permissions_deny": "❌ У вас нема дозволу на цю команду!", 13 | "queue_overflow": "Черга заповнена. Спробуйте пізніше ⏳", 14 | "queue_empty": "🗑 Черга порожня", 15 | "queue_accepted": "✅ Запит до {module_name} модулю був доданий у чергу.\nПозиція у черзі: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Помилка: {error_text}\n\nОчистіть історію та зробіть ще один запит чи повторіть спробу пізніше", 17 | "empty_message": "⚠️ Пусте повідомлення! Дивітьсялоги для детальної інформації", 18 | "regenerate_error_not_last": "❌ Помилка! Можна згенерувати заново тільки останній запит!", 19 | "regenerate_error_empty": "❌ Помилка! Порожній запит!", 20 | "continue_error_not_last": "❌ Помилка! Можна продовжити тільки останнійзаприт!", 21 | "stop_error_not_last": "❌ Помилка! Можна перервати тільки останній запит!", 22 | "stop_error": "❌ Помилка! Неможливо припинити дегенерацію!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Можливо використовувати запит, запропонований лише з останнього повідомлення!", 26 | "users_read_error": "❌ Помилка читання або аналізу списку користувачів!", 27 | "users_admin": "Забанений? Адмін? Мова Модуль ID Ім'я - Запитів\n\n{users_data}", 28 | "users_total_stats": "Користувачі: {users_num}, заблоковані: {banned_num}, адміністратори: {admins_num}", 29 | "restarting": "🙏 Виконується перезавантаження...\nБудь ласка, зачекайте", 30 | "restarting_done": "{reload_logs}\n✅ Перезавантаження завершено", 31 | "chat_cleared": "✅ Історія чату була очищена для {module_name}", 32 | "clear_error": "❌ Помилка очищення історії чату!\n\n{error_text}", 33 | "clear_select_module": "Виберіть модуль, історію чату, якого хочете очистити\nАбо проігноруйте це повідомлення", 34 | "module_select_module": "Поточний модуль: {current_module}\nВиберіть модуль для чату\nАбо проігноруйте це повідомлення", 35 | "user_cooldown_error": "❌ Будь ласка, зачекайте {time_formatted}, перш ніж відправляти запит к {module_name}", 36 | "hours": "ч", 37 | "minutes": "м", 38 | "seconds": "с", 39 | "ban_message_admin": "✅ Заблоковано користувача: {banned_user}\n\nПричина: {ban_reason}", 40 | "ban_no_user_id": "❌ Помилка! Будь ласка, вкажіть user_id", 41 | "ban_message_user": "❌ Ви не знаходитесь у білому списку або ви були забанені!\n\nПричина: {ban_reason}", 42 | "ban_reason_default": "Причина не вказана", 43 | "unban_message_admin": "✅ Разблоковано користувача: {unbanned_user}", 44 | "broadcast_no_message": "❌ Нема повідомлення для мовлення!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Мовлення у процесі... Будь ласка, очікуйте та не відправляйте нові повідомлення!", 47 | "broadcast_done": "✅ Мовлення завершено! Повідомлення були відправлені користувачам:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Стиль спілкування змінено на {changed_style}", 49 | "style_change_error": "❌ Помилка зміни стилю спілкування!\n\n{error_text}", 50 | "style_select": "Поточний стиль спілкування: {current_style}\nОберіть новий стиль спілкування Bing,\nАбо проігноруйте це повідомлення", 51 | "style_precise": "📏 Точний", 52 | "style_balanced": "⚖️ Збалансований", 53 | "style_creative": "🎨 Творчий", 54 | "model_changed": "✅ Модель з модуля {module_name} змінився на {changed_model}", 55 | "model_select": "Ваша модель модуля {module_name}: {current_model}\nВиберіть нову модель модуля {module_name},\nабо проігноруйте це повідомлення", 56 | "model_change_error": "❌ Помилка при зміні моделі!\n\n{error_text}", 57 | "model_no_models": "❌ Ви не можете змінити модель поточного модуля!", 58 | "button_model_change": "⚙️ Змінити модель", 59 | "button_stop_generating": "🛑 Припинити генерацію", 60 | "button_continue": "⏩ Продовжити", 61 | "button_regenerate": "🔄 Перегенерувати", 62 | "button_clear": "🧹 Очистити історію", 63 | "button_module": "↕️ Змінити модуль", 64 | "button_style_change": "⚙️ Змінити стиль", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "__comment01__": "Version of config file. Please don't change it", 3 | "config_version": 8, 4 | 5 | "__comment02__": "General config of enabled modules", 6 | "__comment03__": "Available modules: lmao_chatgpt, lmao_ms_copilot, ms_copilot, ms_copilot_designer, gemini, groq", 7 | "__comment04__": "NOTE: ms_copilot and ms_copilot_designer modules are DEPRECATED and will be removed soon", 8 | "modules": { 9 | "__comment01__": "Enabled modules", 10 | "enabled": [ 11 | "lmao_chatgpt", 12 | "lmao_ms_copilot", 13 | "gemini", 14 | "groq" 15 | ], 16 | 17 | "__comment02__": "Default (initial) module for handling user messages", 18 | "default": "lmao_chatgpt", 19 | 20 | "__comment03__": "Specify modules for which to use web API instead of python package. ex: ['lmao_chatgpt']", 21 | "lmao_web_for_modules": [], 22 | 23 | "__comment04__": "LMAO web API URL (without tailing slash)", 24 | "lmao_web_api_url": "http://localhost:1312/api", 25 | 26 | "__comment05__": "If needed, specify LMAO API token (from --tokens-manage) argument", 27 | "__comment06__": "See for more info", 28 | "lmao_token_manage": "", 29 | 30 | "__comment07__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", 31 | "__comment08__": "Use http://username:password@ip:port format in case of proxy with authorization", 32 | "lmao_web_proxy": "" 33 | }, 34 | 35 | "__comment05__": "Paths to files and directories", 36 | "files": { 37 | "module_configs_dir": "module_configs", 38 | "users_database": "users.json", 39 | "user_images_dir": "user_images", 40 | "conversations_dir": "conversations", 41 | "data_collecting_dir": "data", 42 | "messages_dir": "langs" 43 | }, 44 | 45 | "__comment06__": "Telegram bot config", 46 | "telegram": { 47 | "__comment01__": "Provide your bot api key from ", 48 | "api_key": "", 49 | 50 | "__comment02__": "IDs of users who can use admin commands (to get id, you can use /chatid command)", 51 | "admin_ids": [], 52 | 53 | "__comment03__": "Set to true to block new users by default (whitelist)", 54 | "ban_by_default": false, 55 | 56 | "__comment04__": "Size of the request queue (other messages will be rejected)", 57 | "queue_max": 10, 58 | 59 | "__comment05__": "In how many seconds (can be float) to edit messages (stream responses)", 60 | "edit_message_every_seconds_num": 1, 61 | 62 | "__comment06__": "Add cursor character to the end of the message during stream response", 63 | "add_cursor_symbol": true, 64 | "cursor_symbol": "▯", 65 | 66 | "__comment07__": "Symbols for /users", 67 | "admin_symbol": "🐈‍⬛", 68 | "non_admin_symbol": "🐈", 69 | "banned_symbol": "🟥", 70 | "non_banned_symbol": "🟩", 71 | 72 | "__comment08__": "First message that will be sent to user while module is loading. Leave empty to disable it", 73 | "response_initial_message": "⌛️", 74 | 75 | "__comment09__": "If response is larger than this number (in chars), it will be split into multiple messages", 76 | "one_message_limit": 3000, 77 | 78 | "__comment10__": "If caption is larger than this number (in chars), it will be split into multiple messages", 79 | "one_caption_limit": 1000, 80 | 81 | "__comment11__": "Set to true for the bot to reply to messages as well as direct module commands", 82 | "reply_to_messages": true, 83 | 84 | "__comment12__": "How many seconds (can be float) to wait between each message while broadcasting", 85 | "broadcast_delay_per_user_seconds": 0.5, 86 | 87 | "__comment13__": "Bot commands description (will be automatically added to the bot) Set", 88 | "__comment14__": "commands_description_enabled to false to disable it (you can do it manually via BotFather)", 89 | "commands_description_enabled": true, 90 | "commands_description": [ 91 | { 92 | "command": "start", 93 | "description": "📄 Welcome message and bot version" 94 | }, 95 | { 96 | "command": "help", 97 | "description": "❓ Show help message" 98 | }, 99 | { 100 | "command": "module", 101 | "description": "↕ Change module to chat with" 102 | }, 103 | { 104 | "command": "model", 105 | "description": "⚙️ Change model of module (ex. For 🔴 Groq module)" 106 | }, 107 | { 108 | "command": "clear", 109 | "description": "🧹 Clear chat history" 110 | }, 111 | { 112 | "command": "lang", 113 | "description": "🌎 Change the language" 114 | }, 115 | { 116 | "command": "chatid", 117 | "description": "🆔 Show your chat_id" 118 | } 119 | ], 120 | 121 | "__comment15__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", 122 | "__comment16__": "Use http://username:password@ip:port format in case of proxy with authorization", 123 | "proxy": "" 124 | }, 125 | 126 | "__comment07__": "Save all requests and responses to the files", 127 | "data_collecting": { 128 | "__comment01__": "Set true to automatically save all user's requests and responses into data_collecting_dir", 129 | "__comment02__": "NOTE: You should notify users if it's enabled!", 130 | "enabled": false, 131 | 132 | "__comment03__": "Maximum file size (bytes). If the file exceed this value, a new file will be created", 133 | "max_size": 33554432, 134 | 135 | "__comment04__": "Name of each file", 136 | "filename_timestamp_format": "%Y_%m_%d_%H_%M_%S", 137 | "filename_extension": ".log", 138 | 139 | "__comment05__": "Request log entry format", 140 | "request_format": "{timestamp} Request ({container_id}) from {user_name} ({user_id}) to {module_name}: {request}\n\n", 141 | 142 | "__comment06__": "Response log entry format", 143 | "response_format": "{timestamp} Response ({container_id}) from {module_name} to {user_name} ({user_id}): {response}\n\n", 144 | 145 | "__comment07__": "Timestamp format", 146 | "timestamp_format": "%Y-%m-%d %H:%M:%S" 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /langs/spa.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇪🇸 Español", 3 | "language_icon": "🇪🇸", 4 | "language_select_error": "❌ Error al seleccionar el idioma!\n\n{error_text}", 5 | "language_select": "Por favor, selecciona un idioma de interfaz", 6 | "language_changed": "🇪🇸 Has selecionado Español\n\nPuedes cambiar el idioma en cualquier momento escribiendo el comando /lang", 7 | "start_message": "Hola, 👋 choomba! 👋\n\n📄 GPT-Telegramus version {version}\n\n💜 Autor de GPT-Telegramus: Fern (también conocido como F3RNI)\n💻 Colaboradores:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n Página de GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n Apoya el proyecto comprando mi música: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Mensaje de bienvenida y versión del bot\n❓ /help - Muestra este mensaje\n↕ /módulo - Cambia el módulo para chatear\n /clear - Limpiar el historial del chat\n /lang - Cambiar el idioma\n🆔 /chatid - Muestra tu chat_id\n\nAhora, para comenzar, envíame cualquier mensaje 💬", 9 | "help_message_admin": "Comandos de administrador:\n\n💬 /queue - Muestra la cola de solicitudes\n🔃 /restart [módulo, opcional] - Reiniciar módulo (o todos ellos), recargar configuración, idiomas y comandos\n👤 /users - Muestra la lista de todos los usuarios\n🔨 /ban [razón] - Banea a un usuario por su id con razón (opcional)\n🔓 /unban - Desbanea a un usuario por su id\n📢 /broadcast - Envía un mensaje de texto a todos menos a los usuarios baneados", 10 | "empty_request_module_changed": "✅ Tu módulo se ha cambiado a {module_name}\nAhora envíame tu solicitud como mensaje", 11 | "media_group_response": "Imágenes generadas para la solicitud \"{request_text}\"", 12 | "permissions_deny": "❌ No tienes permiso para este comando!", 13 | "queue_overflow": "La cola está llena. Vuelve a intentarlo más tarde ⏳", 14 | "queue_empty": "La cola está vacía 🗑", 15 | "queue_accepted": "✅ Solicitud al módulo {module_name} agregada a la cola.\nPosición: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Error: {error_text}\n\nLimpia el historial y haz otra solicitud o vuelve a intentarlo más tarde", 17 | "empty_message": "⚠️ Vacío! Consulta los registros para obtener más detalles", 18 | "regenerate_error_not_last": "❌ Error! ¡Solo se puede regenerar la última solicitud!", 19 | "regenerate_error_empty": "❌ Error! ¡Solicitud vacía!", 20 | "continue_error_not_last": "❌ Error! ¡Solo se puede continuar con la última solicitud!", 21 | "stop_error_not_last": "❌ Error! ¡Solo se puede detener la generación de la última solicitud!", 22 | "stop_error": "❌ Error! ¡No se puede detener la generación!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ ¡Se puede utilizar la solicitud propuesta solo del último mensaje!", 26 | "users_read_error": "❌ ¡Error al leer o analizar la lista de usuarios!", 27 | "users_admin": "¿Baneado? ¿Admin? Idioma Módulo ID Nombre - Solicitudes\n\n{users_data}", 28 | "users_total_stats": "Usuarios: {users_num}, prohibidos: {banned_num}, administradores: {admins_num}", 29 | "restarting": "🙏 Reinicio en curso...\nEspera por favor", 30 | "restarting_done": "{reload_logs}\n✅ El reinicio se ha completado", 31 | "chat_cleared": "✅ Historial del chat borrado para {module_name}", 32 | "clear_error": "❌ Error al borrar el historial del chat!\n\n{error_text}", 33 | "clear_select_module": "Selecciona el módulo cuyo historial de chat deseas borrar,\no ignora este mensaje", 34 | "module_select_module": "Tu módulo actual: {current_module}\nSelecciona el módulo para chatear,\no ignora este mensaje", 35 | "user_cooldown_error": "❌ Espera {time_formatted} antes de enviar una solicitud al módulo {module_name}!", 36 | "hours": "h", 37 | "minutes": "m", 38 | "seconds": "s", 39 | "ban_message_admin": "✅ Usuario baneado: {banned_user}\n\nRazón: {ban_reason}", 40 | "ban_no_user_id": "❌ Error! Especifica el user_id", 41 | "ban_message_user": "❌ No estás en la lista blanca o has sido baneado!\n\nRazón: {ban_reason}", 42 | "ban_reason_default": "Razón no especificada", 43 | "unban_message_admin": "✅ Usuario desbaneado: {unbanned_user}", 44 | "broadcast_no_message": "❌ No hay mensaje para difundir!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Difusión en curso... ¡Espera y no envíes nuevos mensajes!", 47 | "broadcast_done": "✅ Difusión completada! Mensajes enviados a usuarios:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Estilo de conversación cambiado a {changed_style}", 49 | "style_change_error": "❌ Error al cambiar el estilo de conversación!\n\n{error_text}", 50 | "style_select": "Tu estilo de conversación: {current_style}\nSelecciona un nuevo estilo de conversación de Bing,\no ignora este mensaje", 51 | "style_precise": "📏 Preciso", 52 | "style_balanced": "⚖️ Equilibrado", 53 | "style_creative": "🎨 Creativo", 54 | "model_changed": "✅ El modelo del módulo {module_name} se cambió a {changed_model}", 55 | "model_select": "Su modelo del módulo {module_name}: {current_model}\nSeleccione el nuevo modelo del módulo {module_name},\no ignore este mensaje", 56 | "model_change_error": "❌ Error al cambiar el modelo.\n\n{error_text}", 57 | "model_no_models": "❌ No puede cambiar el modelo del módulo actual.", 58 | "button_model_change": "⚙️ Cambiar modelo", 59 | "button_stop_generating": "🛑 Detener la generación", 60 | "button_continue": "⏩ Continuar", 61 | "button_regenerate": "🔄 Regenerar", 62 | "button_clear": "🧹 Limpiar historial", 63 | "button_module": "↕️ Cambiar módulo", 64 | "button_style_change": "⚙️ Cambiar estilo", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /langs/tof.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🏴‍☠️ Тофийсковый", 3 | "language_icon": "🏴‍☠", 4 | "language_select_error": "❌ Обшипка выборга языкба!\n\n{error_text}", 5 | "language_select": "Пожамлуста, выберитеб языкб интерфейсба", 6 | "language_changed": "🏴‍☠️ Вы вымбрали тофийский язык\n\nВы можитеб изменитьб язык в любои время прописаф командуб /lang", 7 | "start_message": "Преветб, 👋 тчумба! 👋\n\n📄 GPT-Telegramus вербпсии {version}\n\n💜 Разработбка: Ферн (F3RNI)\n💻 Соавторы:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Странитса GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Подержите проектб, купив моюб музыкбу: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Приветствене сообщенне и версия ботба\n❓ /help - Показатб этоб сообщенне\n↕ /module - Изменитб модуль для тчата\n🧹 /clear - Очиститб истоию чатба\n🌎 /lang - Изменитб языкб\n🆔 /chatid - Показатб ваш chat_id\n\nТеперб, чтобы начать, начырыкайте мне любое сообщенне 💬", 9 | "help_message_admin": "Команды админаб:\n\n💬 /queue - Показатб очередб запрософ\n🔃 /restart [мобдулб, обционально] - Перезапускб мобдуля (или фсех), перезагрузбка конфигурациеф, языкоф и командоф\n👤 /users - Показатб списох всех польбзователей\n🔨 /ban [reason] - Заблокировать полбзоватебля по его id с указанием причиныб (оптсыоинально)\n🔓 /unban - Разблокироватб полбзоватебля по id\n📢 /broadcast - Начырыкатб текбстовое сообщенне всем, кроме заблокированне пользователе", 10 | "empty_request_module_changed": "✅ Модулб был изменен на {module_name}\nТеперб начырыкайте мне ваш запросб в виде сообщення", 11 | "media_group_response": "Нарисованне кортинбки по зомпросуб \"{request_text}\"", 12 | "permissions_deny": "❌ Вам низзя, у вас нет правб на енту команбду!", 13 | "queue_overflow": "Очередб заполнена. Попробувайте пожже ⏳", 14 | "queue_empty": "🗑 Очередб пуста", 15 | "queue_accepted": "✅ Запрос к {module_name} модулю добавленб в очередб.\nПозиция в очереди: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Ошибка: {error_text}\n\nОбчистити имсториюб и сделойте есчо один зопрос или повторибте попытку пожже", 17 | "empty_message": "⚠️ Пустовое сообщенне! Смотрибте логи для подробновой информаци", 18 | "regenerate_error_not_last": "❌ Ошибка! Можбно сгенерироватб заново толбко последневый запросб!", 19 | "regenerate_error_empty": "❌ Ошибка! Пустовый запрос!", 20 | "continue_error_not_last": "❌ Ошибка! Можноб продолжить только последневый запрос!", 21 | "stop_error_not_last": "❌ Ошибка! Можноб прерватб толбко последневый запрос!", 22 | "stop_error": "❌ Ошибка! Невозможноб прекратитб дегенерацию!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Возможбно исполбзовац пребложенныйб зампросб токо из последнево сообщення!", 26 | "users_read_error": "❌ Ашипка чтения или парсенга списбка полбзователеф!", 27 | "users_admin": "Забананенб? Админ? Языкб Мобдулб ID Имбя - Запрософ\n\n{users_data}", 28 | "users_total_stats": "Побзовабтели: {users_num}, некорошие: {banned_num}, одменовое: {admins_num}", 29 | "restarting": "🙏 Выполняется перезампуск...\nПожамлеста, подождите чучутб", 30 | "restarting_done": "{reload_logs}\n✅ Перезампуск завершен", 31 | "chat_cleared": "✅ История чатба была очищена для {module_name}", 32 | "clear_error": "❌ Ошибка очистбки истории чата!\n\n{error_text}", 33 | "clear_select_module": "Выберимте модуль, историю чатба которогоб хотитеб очиститб\nИли проигнорируйте енто сообщенне", 34 | "module_select_module": "Текущий модуль: {current_module}\nВыберите модуль для чата\nИли проигнорируйте это сообщенне", 35 | "user_cooldown_error": "❌ Пожамлусто пождите есчо {time_formatted}, прежде чем черыкатб сообщчэние к {module_name}", 36 | "hours": "ч", 37 | "minutes": "м", 38 | "seconds": "с", 39 | "ban_message_admin": "✅ Заблокирован юзер: {banned_user}\n\nПримчина: {ban_reason}", 40 | "ban_no_user_id": "❌ Ошибка! Пожалеста, укажите user_id", 41 | "ban_message_user": "❌ Вы не находитясб в белом списке или вы были забананены!\n\nПричина: {ban_reason}", 42 | "ban_reason_default": "Надоб указатб причину", 43 | "unban_message_admin": "✅ Разблокирован пользователь: {unbanned_user}", 44 | "broadcast_no_message": "❌ Нет сообщення для вещання!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Bесчание в процесбе... Пожамлусто пождите и не черыкайти нововых собщениев!", 47 | "broadcast_done": "✅ Весчание покончено! Собщчення начерыканы юзверям:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Стилб общення изменен на {changed_style}", 49 | "style_change_error": "❌ Ошибка смены стиля общення!\n\n{error_text}", 50 | "style_select": "Текущий стиль общення: {current_style}\nВымберитеб новый стилб общення девианта,\nИли проигнорируйте это сообщенне", 51 | "style_precise": "📏 Точновый", 52 | "style_balanced": "⚖️ Сбалансированновый", 53 | "style_creative": "🎨 Творческовый", 54 | "model_changed": "✅ Модельб из момдуля {module_name} изминиа на {changed_model}", 55 | "model_select": "Вамша модельб момдуля {module_name}: {current_model}\nВыберитеп нововую модельб момдуля {module_name},\nили проигнорировайти енто сообщенне", 56 | "model_change_error": "❌ Ошипка при изминенни момдели!\n\n{error_text}", 57 | "model_no_models": "❌ Вы ни можоете изменитьб момдель текумщевого модуля!", 58 | "button_model_change": "⚙️ Изменитб момделб", 59 | "button_stop_generating": "🛑 Прекратитб дегенерабцию", 60 | "button_continue": "⏩ Продолжитб", 61 | "button_regenerate": "🔄 Дегенерироватб", 62 | "button_clear": "🧹 Очиститб ибстомрию", 63 | "button_module": "↕️ Сменить модулб", 64 | "button_style_change": "⚙️ Сменитб стилб", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ЖБТ-ЛМАО" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "Девиантнутый (ЛМАО)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ЖБТ" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "Каляка малака" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "Девиантнутый" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "Маляка каляка" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Близнетсы" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Грокб" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /langs/fra.json: -------------------------------------------------------------------------------- 1 | { 2 | "language_name": "🇫🇷 Français", 3 | "language_icon": "🇫🇷", 4 | "language_select_error": "❌ Erreur de sélection de language!\n\n{error_text}", 5 | "language_select": "Veillez sélectionner la langue de l'interface", 6 | "language_changed": "🇫🇷 Vous avez choisi le Français\n\nVous pouvez changer de langue a tout moment en écrivant la commande /lang", 7 | "start_message": "Salut, 👋 choomba! 👋\n\n📄 GPT-Telegramus version {version}\n\n💜 Author: Fern (F3RNI)\n💻 Contributeurs:\n🤍 Keepalove (Azarell) (Sprav04ka)\n🤍 Hanssen\n🤍 https://github.com/F33RNI/GPT-Telegramus#-contributors\n\n😺 Page GitHub: https://github.com/F33RNI/GPT-Telegramus\n\n🎵 Soutenir le projet en achetant ma musique: https://f3rni.bandcamp.com", 8 | "help_message": "📄 /start - Message de bienvenue et version du bot\n❓ /help - Afficher ce message\n↕️ /module - Modifier le module de chat avec\n🧹 /clear - Effacer l'historique du chat\n🌎 /lang - Changer de langue\n🆔 /chatid - Afficher votre chat_id\n\nMaintenant, Pour commencer, envoyez-moi un message 💬", 9 | "help_message_admin": "Commandes de l'administrateur:\n\n💬 /queue - Afficher la file d'attente des requêtes\n🔃 /restart [module, optionnel] - Redémarrer le module (ou tous les modules), recharger la configuration, les langues et les commandes\n👤 /users - Afficher la liste de tous les utilisateurs\n🔨 /ban [reason] - Bloquer un utilisateur par son id avec une raison (facultatif)\n🔓 /unban - Debloquer un utilisateur par son id\n📢 /broadcast - Envoyer un message à tous les utilisateurs, à l'exception des utilisateurs bloqués", 10 | "empty_request_module_changed": "✅ Votre module a été modifié en {module_name}\nEnvoyez-moi maintenant votre requête par message", 11 | "media_group_response": "Images générées pour la requête \"{request_text}\"", 12 | "permissions_deny": "❌ Vous n'avez pas la permission d'utiliser cette commande!", 13 | "queue_overflow": "File d'attente est saturée. Réessayer plus tard ⏳", 14 | "queue_empty": "🗑 File d'attente est vide", 15 | "queue_accepted": "✅ Requête au {module_name} module ajoutée à la file d'attente.\nPosition: {queue_size}/{queue_max} ⏳", 16 | "response_error": "❌ Erreur: {error_text}\n\nEffacer l'historique et faire une autre requête ou réessayer plus tard", 17 | "empty_message": "⚠️ Message vide! Voir les logs pour plus de détails", 18 | "regenerate_error_not_last": "❌ Erreur! Seule la dernière demande peut être re-générée!", 19 | "regenerate_error_empty": "❌ Erreur! Requête vide!", 20 | "continue_error_not_last": "❌ Erreur! On peut continuer la derniere requête!", 21 | "stop_error_not_last": "❌ Erreur! On ne peut interrompre que la dernière requête!", 22 | "stop_error": "❌ Erreur! Impossible d'arrêter la génération!", 23 | "response_link_format": "\n📄 {source_name}: {link}", 24 | "suggestion_format": "💡 {suggestion}", 25 | "suggestion_error": "❌ Il est possible d'utiliser uniquement la requête suggérée dans le dernier message!", 26 | "users_read_error": "❌ Erreur de lecture ou d'analyse de la liste des utilisateurs!", 27 | "users_admin": "Banni? Admin? Nom du language du Module ID - requête\n\n{users_data}", 28 | "users_total_stats": "Utilisateurs: {users_num}, bloqués: {banned_num}, Administrateurs: {admins_num}", 29 | "restarting": "🙏 Un redémarrage en cours...\nVeuillez patienter", 30 | "restarting_done": "{reload_logs}\n✅ Redémarrage terminé", 31 | "chat_cleared": "✅ L'historique du chat a été effacé pour {module_name}", 32 | "clear_error": "❌ Erreur d'effacement de l'historique du chat!\n\n{error_text}", 33 | "clear_select_module": "Sélectionnez le module dont vous souhaitez effacer l'historique du chat\nOu ignorer ce message", 34 | "module_select_module": "Module actuel: {current_module}\nSélectionner un module pour le chat\nOu ignorer ce message", 35 | "user_cooldown_error": "❌ Veuillez patienter {time_formatted}, avant d'envoyer une requête à {module_name}", 36 | "hours": "h", 37 | "minutes": "m", 38 | "seconds": "s", 39 | "ban_message_admin": "✅ Utilisateur bloqué: {banned_user}\n\nRaison: {ban_reason}", 40 | "ban_no_user_id": "❌ Erreur! Veuillez préciser user_id", 41 | "ban_message_user": "❌ Vous n'êtes pas sur la liste blanche ou vous avez été bloqué!\n\nRaison: {ban_reason}", 42 | "ban_reason_default": "Raison non précisée", 43 | "unban_message_admin": "✅ Utilisateur débloqué: {unbanned_user}", 44 | "broadcast_no_message": "❌ Pas de message à charger!", 45 | "broadcast": "📢 {message}", 46 | "broadcast_initiated": "⏳ Chargement en cours... Veuillez patienter et ne pas envoyer de nouveaux messages!", 47 | "broadcast_done": "✅ Le chargement est terminée ! Messages envoyés aux utilisateurs:\n{broadcast_ok_users}", 48 | "style_changed": "✅ Le style de conversation est passé à {changed_style}", 49 | "style_change_error": "❌ Erreur changement de style de conversation!\n\n{error_text}", 50 | "style_select": "Votre style de conversion: {current_style}\nChoisir un nouveau style de conversation Bing,\nOu ignorer ce message", 51 | "style_precise": "📏 Précision", 52 | "style_balanced": "⚖️ Équilibré", 53 | "style_creative": "🎨 Créative", 54 | "model_changed": "✅ Le modèle du module {module_name} a été changé en {changed_model}", 55 | "model_select": "Votre modèle du module {module_name} : {current_model}\nSélectionnez le nouveau modèle du module {module_name},\nou ignorez ce message", 56 | "model_change_error": "❌ Erreur lors du changement de modèle !\n\n{error_text}", 57 | "model_no_models": "❌ Vous ne pouvez pas changer le modèle du module actuel !", 58 | "button_model_change": "⚙️ Changer de modèle", 59 | "button_stop_generating": "🛑 Arrêter de générer", 60 | "button_continue": "⏩ Continuer", 61 | "button_regenerate": "🔄 Régénérer", 62 | "button_clear": "🧹 Effacer l'historique", 63 | "button_module": "↕️ Change de module", 64 | "button_style_change": "⚙️ Changer de style", 65 | "modules": { 66 | "lmao_chatgpt": { 67 | "icon": "💬", 68 | "name": "ChatGPT (LMAO API)" 69 | }, 70 | "lmao_ms_copilot": { 71 | "icon": "🟦", 72 | "name": "MS Copilot (LMAO API)" 73 | }, 74 | "chatgpt": { 75 | "icon": "💬", 76 | "name": "ChatGPT" 77 | }, 78 | "dalle": { 79 | "icon": "🖼", 80 | "name": "DALL-E" 81 | }, 82 | "ms_copilot": { 83 | "icon": "🔵", 84 | "name": "MS Copilot" 85 | }, 86 | "ms_copilot_designer": { 87 | "icon": "🎨", 88 | "name": "MS Copilot designer" 89 | }, 90 | "gemini": { 91 | "icon": "♊", 92 | "name": "Gemini" 93 | }, 94 | "groq": { 95 | "icon": "🔴", 96 | "name": "Groq" 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /queue_container_helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import multiprocessing 22 | import random 23 | from typing import List 24 | 25 | import request_response_container 26 | 27 | 28 | def queue_to_list( 29 | request_response_queue: multiprocessing.Queue, 30 | ) -> List[request_response_container.RequestResponseContainer]: 31 | """Retrieves all elements from queue and returns them as list 32 | NOTE: THIS FUNCTION MUST BE CALLED INSIDE LOCK 33 | 34 | Args: 35 | request_response_queue (multiprocessing.Queue): multiprocessing Queue to convert to list 36 | 37 | Returns: 38 | List[Any]: list of queue elements (containers) 39 | """ 40 | queue_list = [] 41 | 42 | # Convert entire queue to list 43 | while request_response_queue.qsize() > 0: 44 | container = request_response_queue.get() 45 | if container not in queue_list: 46 | queue_list.append(container) 47 | 48 | # Convert list back to the queue 49 | for container_ in queue_list: 50 | request_response_queue.put(container_) 51 | 52 | # Return list 53 | return queue_list 54 | 55 | 56 | def get_container_from_queue( 57 | request_response_queue: multiprocessing.Queue, 58 | lock: multiprocessing.Lock, 59 | container_id: int, 60 | ) -> request_response_container.RequestResponseContainer | None: 61 | """Retrieves request_response_container from queue by ID without removing it 62 | 63 | Args: 64 | request_response_queue (multiprocessing.Queue): multiprocessing Queue to get container from 65 | lock (multiprocessing.Lock): multiprocessing lock to prevent errors while updating the queue 66 | container_id: requested container ID 67 | 68 | Returns: 69 | RequestResponseContainer or None: container if exists, otherwise None 70 | """ 71 | 72 | def _get_container_from_queue() -> request_response_container.RequestResponseContainer | None: 73 | # Convert entire queue to list 74 | queue_list = queue_to_list(request_response_queue) 75 | 76 | # Search container in list 77 | container = None 78 | for container__ in queue_list: 79 | if container__.id == container_id: 80 | container = container__ 81 | return container 82 | 83 | # Is lock available? 84 | if lock is not None: 85 | # Use it 86 | with lock: 87 | container_ = _get_container_from_queue() 88 | return container_ 89 | 90 | # Get without lock 91 | else: 92 | return _get_container_from_queue() 93 | 94 | 95 | def put_container_to_queue( 96 | request_response_queue: multiprocessing.Queue, 97 | lock: multiprocessing.Lock, 98 | request_response_container_: request_response_container.RequestResponseContainer, 99 | ) -> int: 100 | """Generates unique container ID (if needed) and puts container to the queue (deletes previous one if exists) 101 | 102 | Args: 103 | request_response_queue (multiprocessing.Queue): Multiprocessing Queue into which put the container 104 | lock (multiprocessing.Lock): Multiprocessing lock to prevent errors while updating the queue 105 | request_response_container_: Container to put into the queue 106 | 107 | Returns: 108 | container ID: container ID 109 | """ 110 | 111 | def _put_container_to_queue() -> int: 112 | # Delete previous one 113 | if request_response_container_.id >= 0: 114 | remove_container_from_queue(request_response_queue, None, request_response_container_.id) 115 | 116 | # Convert queue to lost 117 | queue_list = queue_to_list(request_response_queue) 118 | 119 | # Check if we need to generate a new ID for the container 120 | if request_response_container_.id < 0: 121 | # Generate unique ID 122 | while True: 123 | container_id = random.randint(0, 2147483647) 124 | unique = True 125 | for container in queue_list: 126 | if container.id == container_id: 127 | unique = False 128 | break 129 | if unique: 130 | break 131 | 132 | # Set container id 133 | request_response_container_.id = container_id 134 | 135 | # Add our container to the queue 136 | request_response_queue.put(request_response_container_) 137 | 138 | return request_response_container_.id 139 | 140 | # Is lock available? 141 | if lock is not None: 142 | # Use it 143 | with lock: 144 | id_ = _put_container_to_queue() 145 | return id_ 146 | 147 | # Put without lock 148 | else: 149 | return _put_container_to_queue() 150 | 151 | 152 | def remove_container_from_queue( 153 | request_response_queue: multiprocessing.Queue, lock: multiprocessing.Lock, container_id: int 154 | ) -> bool: 155 | """Tries to remove container by specific ID from the queue 156 | 157 | Args: 158 | request_response_queue (multiprocessing.Queue): multiprocessing Queue to remove container from 159 | lock (multiprocessing.Lock): multiprocessing lock to prevent errors while updating the queue 160 | container_id (int): ID of container to remove from the queue 161 | 162 | Returns: 163 | bool: True if removed successfully, False if not 164 | """ 165 | 166 | def remove_container_from_queue_() -> bool: 167 | # Convert entire queue to list 168 | queue_list = [] 169 | while not request_response_queue.empty(): 170 | queue_list.append(request_response_queue.get()) 171 | 172 | # Flag to return 173 | removed = False 174 | 175 | # Convert list back to the queue without our container 176 | for container_ in queue_list: 177 | if container_.id != container_id: 178 | request_response_queue.put(container_) 179 | else: 180 | removed = True 181 | 182 | return removed 183 | 184 | # Is lock available? 185 | if lock is not None: 186 | # Use it 187 | with lock: 188 | removed_ = remove_container_from_queue_() 189 | return removed_ 190 | 191 | # Remove without lock 192 | else: 193 | return remove_container_from_queue_() 194 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import argparse 22 | from ctypes import c_double 23 | import json 24 | import logging 25 | import multiprocessing 26 | import os 27 | import sys 28 | from typing import Dict 29 | 30 | from _version import __version__ 31 | import logging_handler 32 | import messages 33 | import users_handler 34 | import queue_handler 35 | import bot_handler 36 | import module_wrapper_global 37 | 38 | # Default config file 39 | CONFIG_FILE = "config.json" 40 | CONFIG_COMPATIBLE_VERSIONS = [5, 6, 7, 8] 41 | 42 | 43 | def load_and_parse_config(config_file: str) -> Dict: 44 | """Loads and parses config from main file and from module's config files 45 | This is separate because of /restart command 46 | 47 | Args: 48 | config_file (str): path to main config file 49 | 50 | Raises: 51 | Exception: loading / parsing / version error 52 | 53 | Returns: 54 | Dict: loaded and parsed config 55 | """ 56 | logging.info(f"Loading config file {config_file}") 57 | with open(config_file, "r", encoding="utf-8") as file: 58 | config = json.loads(file.read()) 59 | 60 | # Check config version 61 | config_version = config.get("config_version") 62 | if config_version is None: 63 | raise Exception("No config_version key! Please update your config file") 64 | if not config_version in CONFIG_COMPATIBLE_VERSIONS: 65 | raise Exception( 66 | f"Your config version ({config_version}) is not compatible! " 67 | f"Compatible versions: {', '.join(str(version) for version in CONFIG_COMPATIBLE_VERSIONS)}" 68 | ) 69 | if config_version < max(CONFIG_COMPATIBLE_VERSIONS): 70 | logging.warning(f"You config version {config_version} < {max(CONFIG_COMPATIBLE_VERSIONS)}! Please update it") 71 | 72 | # List of enabled modules 73 | enabled_modules = config.get("modules").get("enabled") 74 | if len(enabled_modules) == 0: 75 | raise Exception("No modules enabled") 76 | logging.info(f"Enabled modules: {', '.join(enabled_modules)}") 77 | 78 | # Load config of enabled modules and merge it into global config 79 | module_configs_dir = config.get("files").get("module_configs_dir") 80 | logging.info(f"Parsing {module_configs_dir} directory") 81 | for file in os.listdir(module_configs_dir): 82 | # Parse only .json files 83 | if file.lower().endswith(".json"): 84 | # Check if need to load it 85 | module_name_from_file = os.path.splitext(os.path.basename(file))[0] 86 | if module_name_from_file not in enabled_modules: 87 | continue 88 | 89 | # Parse and merge 90 | logging.info(f"Adding config of {module_name_from_file} module") 91 | with open(os.path.join(module_configs_dir, file), "r", encoding="utf-8") as file_: 92 | module_config = json.loads(file_.read()) 93 | config[module_name_from_file] = module_config 94 | 95 | return config 96 | 97 | 98 | def parse_args() -> argparse.Namespace: 99 | """Parses cli arguments 100 | 101 | Returns: 102 | argparse.Namespace: parsed arguments 103 | """ 104 | parser = argparse.ArgumentParser() 105 | parser.add_argument( 106 | "-c", 107 | "--config", 108 | type=str, 109 | default=os.getenv("TELEGRAMUS_CONFIG_FILE", CONFIG_FILE), 110 | required=False, 111 | help=f"path to config.json file (Default: {os.getenv('TELEGRAMUS_CONFIG_FILE', CONFIG_FILE)})", 112 | ) 113 | parser.add_argument("-v", "--version", action="version", version=__version__) 114 | return parser.parse_args() 115 | 116 | 117 | def main(): 118 | """Main entry""" 119 | # Multiprocessing fix for Windows 120 | if sys.platform.startswith("win"): 121 | multiprocessing.freeze_support() 122 | 123 | # Parse arguments 124 | args = parse_args() 125 | 126 | # Initialize logging and start logging listener as process 127 | logging_handler_ = logging_handler.LoggingHandler() 128 | logging_handler_process = multiprocessing.Process(target=logging_handler_.configure_and_start_listener) 129 | logging_handler_process.start() 130 | logging_handler.worker_configurer(logging_handler_.queue, log_test_message=False) 131 | 132 | # Log software version and GitHub link 133 | logging.info(f"GPT-Telegramus version: {__version__}") 134 | logging.info("https://github.com/F33RNI/GPT-Telegramus") 135 | 136 | modules = {} 137 | 138 | # Catch errors during initialization process 139 | initialization_ok = False 140 | try: 141 | # Load config 142 | config = multiprocessing.Manager().dict(load_and_parse_config(args.config)) 143 | 144 | # Create conversations and user images dirs (it's not necessary but just in case) 145 | conversations_dir = config.get("files").get("conversations_dir") 146 | if not os.path.exists(conversations_dir): 147 | logging.info(f"Creating {conversations_dir} directory") 148 | os.makedirs(conversations_dir) 149 | user_images_dir = config.get("files").get("user_images_dir") 150 | if not os.path.exists(user_images_dir): 151 | logging.info(f"Creating {user_images_dir} directory") 152 | os.makedirs(user_images_dir) 153 | 154 | # Initialize users and messages handlers 155 | users_handler_ = users_handler.UsersHandler(config) 156 | messages_ = messages.Messages(users_handler_) 157 | 158 | # Load messages 159 | messages_.langs_load(config.get("files").get("messages_dir")) 160 | 161 | web_cooldown_timer = multiprocessing.Value(c_double, 0.0) 162 | web_request_lock = multiprocessing.Lock() 163 | 164 | # modules = {} is a dictionary of ModuleWrapperGlobal (each enabled module) 165 | # { 166 | # "module_name": ModuleWrapperGlobal, 167 | # ... 168 | # } 169 | for module_name in config.get("modules").get("enabled"): 170 | logging.info(f"Trying to load and initialize {module_name} module") 171 | use_web = ( 172 | module_name.startswith("lmao_") 173 | and module_name in config.get("modules").get("lmao_web_for_modules", []) 174 | and "lmao_web_api_url" in config.get("modules") 175 | ) 176 | try: 177 | module = module_wrapper_global.ModuleWrapperGlobal( 178 | module_name, 179 | config, 180 | messages_, 181 | users_handler_, 182 | logging_handler_.queue, 183 | use_web, 184 | web_cooldown_timer=web_cooldown_timer, 185 | web_request_lock=web_request_lock, 186 | ) 187 | modules[module_name] = module 188 | except Exception as e: 189 | logging.error(f"Error initializing {module_name} module: {e} Module will be ignored") 190 | 191 | # Initialize main classes 192 | queue_handler_ = queue_handler.QueueHandler( 193 | config, messages_, users_handler_, logging_handler_.queue, None, modules 194 | ) 195 | bot_handler_ = bot_handler.BotHandler( 196 | config, 197 | args.config, 198 | messages_, 199 | users_handler_, 200 | logging_handler_.queue, 201 | queue_handler_, 202 | modules, 203 | web_cooldown_timer, 204 | web_request_lock, 205 | ) 206 | queue_handler_.prevent_shutdown_flag = bot_handler_.prevent_shutdown_flag 207 | 208 | # At least, initialization did not raised any error 209 | initialization_ok = True 210 | except Exception as e: 211 | logging.error("Initialization error", exc_info=e) 212 | 213 | # Finally, start queue handler and bot polling (blocking) 214 | if initialization_ok: 215 | queue_handler_.start_processing_loop() 216 | bot_handler_.start_bot() 217 | 218 | # Stop queue handler 219 | queue_handler_.stop_processing_loop() 220 | 221 | # Close (stop) each module 222 | for module_name, module in modules.items(): 223 | logging.info(f"Trying to close and unload {module_name} module") 224 | try: 225 | module.on_exit() 226 | except Exception as e: 227 | logging.error(f"Error closing {module_name} module", exc_info=e) 228 | 229 | # Finally, stop logging loop 230 | logging.info("GPT-Telegramus exited") 231 | logging_handler_.queue.put(None) 232 | 233 | 234 | if __name__ == "__main__": 235 | main() 236 | -------------------------------------------------------------------------------- /groq_module.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import time 22 | import uuid 23 | import json 24 | import os 25 | import multiprocessing 26 | import ctypes 27 | import logging 28 | from typing import Dict 29 | 30 | from groq import Groq 31 | import httpx 32 | 33 | import messages 34 | import users_handler 35 | from async_helper import async_helper 36 | from bot_sender import send_message_async 37 | from request_response_container import RequestResponseContainer 38 | 39 | # Self name 40 | _NAME = "groq" 41 | 42 | 43 | class GroqModule: 44 | def __init__( 45 | self, 46 | config: Dict, 47 | messages_: messages.Messages, 48 | users_handler_: users_handler.UsersHandler, 49 | ) -> None: 50 | """Initializes class variables (must be done in main process) 51 | 52 | Args: 53 | config (Dict): global config 54 | messages_ (messages.Messages): initialized messages handler 55 | users_handler_ (users_handler.UsersHandler): initialized users handler 56 | """ 57 | self.config = config 58 | self.messages = messages_ 59 | self.users_handler = users_handler_ 60 | 61 | # All variables here must be multiprocessing 62 | self.processing_flag = multiprocessing.Value(ctypes.c_bool, False) 63 | self._last_request_time = multiprocessing.Value(ctypes.c_double, 0.0) 64 | 65 | # Don't use this variable outside the module's process 66 | self._model = None 67 | 68 | def initialize(self) -> None: 69 | """Initializes Groq module using official Groq API: 70 | This method must be called from another process 71 | 72 | Raises: 73 | Exception: initialization error 74 | """ 75 | # Internal variables for current process 76 | self._model = None 77 | try: 78 | self.processing_flag.value = False 79 | 80 | # Get module's config 81 | module_config = self.config.get(_NAME) 82 | 83 | # Use proxy 84 | if module_config.get("proxy") and module_config.get("proxy") != "auto": 85 | proxy = module_config.get("proxy") 86 | logging.info(f"Initializing Groq module with proxy {proxy}") 87 | self._model = Groq( 88 | api_key=module_config.get("api_key"), 89 | base_url=module_config.get("base_url"), 90 | http_client=httpx.Client(proxies=proxy), 91 | ) 92 | else: 93 | logging.info("Initializing Groq module without proxy") 94 | self._model = Groq(api_key=module_config.get("api_key"), base_url=module_config.get("base_url")) 95 | 96 | logging.info("Groq module initialized") 97 | 98 | # Reset module and re-raise the error 99 | except Exception as e: 100 | self._model = None 101 | raise e 102 | 103 | def process_request(self, request_response: RequestResponseContainer) -> None: 104 | """Processes request to Groq 105 | 106 | Args: 107 | request_response (RequestResponseContainer): container from the queue 108 | 109 | Raises: 110 | Exception: in case of error 111 | """ 112 | conversations_dir = self.config.get("files").get("conversations_dir") 113 | conversation_id = self.users_handler.get_key(request_response.user_id, f"{_NAME}_conversation_id") 114 | model_name = self.users_handler.get_key( 115 | request_response.user_id, f"{_NAME}_model", self.config.get(_NAME).get("model_default") 116 | ) 117 | 118 | # Check if we are initialized 119 | if self._model is None: 120 | logging.error("Groq not initialized") 121 | request_response.response_text = self.messages.get_message( 122 | "response_error", user_id=request_response.user_id 123 | ).format(error_text="Groq module not initialized") 124 | request_response.error = True 125 | self.processing_flag.value = False 126 | return 127 | 128 | try: 129 | # Set flag that we are currently processing request 130 | self.processing_flag.value = True 131 | 132 | # Get module's config 133 | module_config = self.config.get(_NAME) 134 | 135 | # Cool down 136 | if time.time() - self._last_request_time.value <= module_config.get("user_cooldown_seconds"): 137 | time_to_wait = module_config.get("user_cooldown_seconds") - ( 138 | time.time() - self._last_request_time.value 139 | ) 140 | logging.warning(f"Too frequent requests. Waiting {time_to_wait} seconds...") 141 | time.sleep(self._last_request_time.value + module_config.get("user_cooldown_seconds") - time.time()) 142 | self._last_request_time.value = time.time() 143 | 144 | # Check model name (just in case) 145 | if model_name not in self.config.get(_NAME).get("models"): 146 | logging.warning(f"No model named {model_name}. Using default one") 147 | model_name = self.config.get(_NAME).get("model_default") 148 | 149 | response = None 150 | conversation = [] 151 | 152 | # Try to load conversation 153 | conversation = _load_conversation(conversations_dir, conversation_id) or [] 154 | # Generate new random conversation ID 155 | if conversation_id is None: 156 | conversation_id = f"{_NAME}_{uuid.uuid4()}" 157 | 158 | conversation.append({"role": "user", "content": request_response.request_text}) 159 | 160 | logging.info("Asking Groq...") 161 | response = self._model.chat.completions.create(messages=conversation, model=model_name) 162 | 163 | request_response.response_text = response.choices[0].message.content 164 | role = response.choices[0].message.role 165 | 166 | # Try to save conversation 167 | conversation.append({"role": role, "content": request_response.response_text}) 168 | if not _save_conversation(conversations_dir, conversation_id, conversation): 169 | conversation_id = None 170 | 171 | # Save conversation ID 172 | self.users_handler.set_key(request_response.user_id, f"{_NAME}_conversation_id", conversation_id) 173 | 174 | finally: 175 | self.processing_flag.value = False 176 | 177 | # Finish 178 | async_helper(send_message_async(self.config.get("telegram"), self.messages, request_response, end=True)) 179 | 180 | def clear_conversation_for_user(self, user_id: int) -> None: 181 | """Clears conversation (chat history) for selected user""" 182 | # Get current conversation_id 183 | conversation_id = self.users_handler.get_key(user_id, f"{_NAME}_conversation_id") 184 | if conversation_id is None: 185 | return 186 | 187 | # Delete from API 188 | _delete_conversation(self.config.get("files").get("conversations_dir"), conversation_id) 189 | 190 | # Delete from user 191 | self.users_handler.set_key(user_id, f"{_NAME}_conversation_id", None) 192 | 193 | 194 | def _load_conversation(conversations_dir, conversation_id): 195 | """Tries to load conversation 196 | 197 | Args: 198 | conversations_dir (_type_): _description_ 199 | conversation_id (_type_): _description_ 200 | 201 | Returns: 202 | _type_: content of conversation, None if error 203 | """ 204 | logging.info(f"Loading conversation {conversation_id}") 205 | try: 206 | if conversation_id is None: 207 | logging.info("conversation_id is None. Skipping loading") 208 | return None 209 | 210 | conversation_file = os.path.join(conversations_dir, conversation_id + ".json") 211 | if os.path.exists(conversation_file): 212 | # Load from json file 213 | with open(conversation_file, "r", encoding="utf-8") as json_file: 214 | return json.load(json_file) 215 | else: 216 | logging.warning(f"File {conversation_file} not exists") 217 | 218 | except Exception as e: 219 | logging.warning(f"Error loading conversation {conversation_id}", exc_info=e) 220 | 221 | return None 222 | 223 | 224 | def _save_conversation(conversations_dir, conversation_id, conversation) -> bool: 225 | """Tries to save conversation without raising any error 226 | 227 | Args: 228 | conversations_dir (_type_): _description_ 229 | conversation_id (_type_): _description_ 230 | conversation (_type_): _description_ 231 | 232 | Returns: 233 | bool: True if no error 234 | """ 235 | logging.info(f"Saving conversation {conversation_id}") 236 | try: 237 | if conversation_id is None: 238 | logging.info("conversation_id is None. Skipping saving") 239 | return False 240 | 241 | # Create conversation dir 242 | if not os.path.exists(conversations_dir): 243 | logging.info(f"Creating {conversations_dir} directory") 244 | os.makedirs(conversations_dir) 245 | 246 | # Save as json file 247 | conversation_file = os.path.join(conversations_dir, conversation_id + ".json") 248 | with open(conversation_file, "w+", encoding="utf-8") as json_file: 249 | json.dump(conversation, json_file, indent=4, ensure_ascii=False) 250 | 251 | except Exception as e: 252 | logging.error(f"Error saving conversation {conversation_id}", exc_info=e) 253 | return False 254 | 255 | return True 256 | 257 | 258 | def _delete_conversation(conversations_dir, conversation_id) -> bool: 259 | """Tries to delete conversation without raising any error 260 | 261 | Args: 262 | conversations_dir (_type_): _description_ 263 | conversation_id (_type_): _description_ 264 | 265 | Returns: 266 | bool: True if no error 267 | """ 268 | logging.info(f"Deleting conversation {conversation_id}") 269 | # Delete conversation file if exists 270 | try: 271 | conversation_file = os.path.join(conversations_dir, conversation_id + ".json") 272 | if os.path.exists(conversation_file): 273 | logging.info(f"Deleting {conversation_file} file") 274 | os.remove(conversation_file) 275 | return True 276 | 277 | except Exception as e: 278 | logging.error( 279 | f"Error removing conversation file for conversation {conversation_id}", 280 | exc_info=e, 281 | ) 282 | 283 | return False 284 | -------------------------------------------------------------------------------- /google_ai_module.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane, Hanssen 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import time 22 | import uuid 23 | import json 24 | import os 25 | import multiprocessing 26 | import ctypes 27 | import logging 28 | from typing import Dict 29 | 30 | # pylint: disable=no-name-in-module 31 | from google.generativeai.client import _ClientManager 32 | import google.generativeai as genai 33 | from google.ai.generativelanguage import Part, Content 34 | 35 | import messages 36 | import users_handler 37 | from async_helper import async_helper 38 | from bot_sender import send_message_async 39 | from request_response_container import RequestResponseContainer 40 | 41 | # Self name 42 | _NAME = "gemini" 43 | 44 | 45 | class GoogleAIModule: 46 | def __init__( 47 | self, 48 | config: Dict, 49 | messages_: messages.Messages, 50 | users_handler_: users_handler.UsersHandler, 51 | ) -> None: 52 | """Initializes class variables (must be done in main process) 53 | 54 | Args: 55 | config (Dict): global config 56 | messages_ (messages.Messages): initialized messages handler 57 | users_handler_ (users_handler.UsersHandler): initialized users handler 58 | """ 59 | self.config = config 60 | self.messages = messages_ 61 | self.users_handler = users_handler_ 62 | 63 | # All variables here must be multiprocessing 64 | self.cancel_requested = multiprocessing.Value(ctypes.c_bool, False) 65 | self.processing_flag = multiprocessing.Value(ctypes.c_bool, False) 66 | self._last_request_time = multiprocessing.Value(ctypes.c_double, 0.0) 67 | 68 | # Don't use this variables outside the module's process 69 | self._model = None 70 | self._vision_model = None 71 | 72 | def initialize(self) -> None: 73 | """Initializes Google AI module using the generative language API: https://ai.google.dev/api 74 | This method must be called from another process 75 | 76 | Raises: 77 | Exception: initialization error 78 | """ 79 | # Internal variables for current process 80 | self._model = None 81 | try: 82 | self.processing_flag.value = False 83 | self.cancel_requested.value = False 84 | 85 | # Get module's config 86 | module_config = self.config.get(_NAME) 87 | 88 | # Use proxy 89 | if module_config.get("proxy") and module_config.get("proxy") != "auto": 90 | proxy = module_config.get("proxy") 91 | os.environ["http_proxy"] = proxy 92 | logging.info(f"Initializing Google AI module with proxy {proxy}") 93 | else: 94 | logging.info("Initializing Google AI module without proxy") 95 | 96 | # Set up the model 97 | generation_config = { 98 | "temperature": module_config.get("temperature", 0.9), 99 | "top_p": module_config.get("top_p", 1), 100 | "top_k": module_config.get("top_k", 1), 101 | "max_output_tokens": module_config.get("max_output_tokens", 2048), 102 | } 103 | safety_settings = [] 104 | self._model = genai.GenerativeModel( 105 | model_name="gemini-pro", 106 | generation_config=generation_config, 107 | safety_settings=safety_settings, 108 | ) 109 | self._vision_model = genai.GenerativeModel( 110 | model_name="gemini-pro-vision", 111 | generation_config=generation_config, 112 | safety_settings=safety_settings, 113 | ) 114 | 115 | client_manager = _ClientManager() 116 | client_manager.configure(api_key=module_config.get("api_key")) 117 | # pylint: disable=protected-access 118 | self._model._client = client_manager.get_default_client("generative") 119 | self._vision_model._client = client_manager.get_default_client("generative") 120 | # pylint: enable=protected-access 121 | logging.info("Google AI module initialized") 122 | 123 | # Reset module and re-raise the error 124 | except Exception as e: 125 | self._model = None 126 | raise e 127 | 128 | def process_request(self, request_response: RequestResponseContainer) -> None: 129 | """Processes request to Google AI 130 | 131 | Args: 132 | request_response (RequestResponseContainer): container from the queue 133 | 134 | Raises: 135 | Exception: in case of error 136 | """ 137 | conversations_dir = self.config.get("files").get("conversations_dir") 138 | conversation_id = self.users_handler.get_key(request_response.user_id, f"{_NAME}_conversation_id") 139 | 140 | # Check if we are initialized 141 | if self._model is None: 142 | logging.error("Google AI module not initialized") 143 | request_response.response_text = self.messages.get_message( 144 | "response_error", user_id=request_response.user_id 145 | ).format(error_text="Google AI module not initialized") 146 | request_response.error = True 147 | self.processing_flag.value = False 148 | return 149 | 150 | try: 151 | # Set flag that we are currently processing request 152 | self.processing_flag.value = True 153 | 154 | # Get module's config 155 | module_config = self.config.get(_NAME) 156 | 157 | # Cool down 158 | if time.time() - self._last_request_time.value <= module_config.get("user_cooldown_seconds"): 159 | time_to_wait = module_config.get("user_cooldown_seconds") - ( 160 | time.time() - self._last_request_time.value 161 | ) 162 | logging.warning(f"Too frequent requests. Waiting {time_to_wait} seconds...") 163 | time.sleep(self._last_request_time.value + module_config.get("user_cooldown_seconds") - time.time()) 164 | self._last_request_time.value = time.time() 165 | 166 | response = None 167 | conversation = [] 168 | 169 | # Gemini vision 170 | if request_response.request_image: 171 | logging.info("Asking Gemini...") 172 | response = self._vision_model.generate_content( 173 | [ 174 | Part( 175 | inline_data={ 176 | "mime_type": "image/jpeg", 177 | "data": request_response.request_image, 178 | } 179 | ), 180 | Part(text=request_response.request_text), 181 | ], 182 | stream=True, 183 | ) 184 | 185 | # Gemini (text) 186 | else: 187 | # Try to load conversation 188 | conversation = _load_conversation(conversations_dir, conversation_id) or [] 189 | # Generate new random conversation ID 190 | if conversation_id is None: 191 | conversation_id = f"{_NAME}_{uuid.uuid4()}" 192 | 193 | conversation.append( 194 | Content.to_json(Content(role="user", parts=[Part(text=request_response.request_text)])) 195 | ) 196 | 197 | logging.info("Asking Gemini...") 198 | response = self._model.generate_content( 199 | [Content.from_json(content) for content in conversation], 200 | stream=True, 201 | ) 202 | 203 | for chunk in response: 204 | if self.cancel_requested.value: 205 | break 206 | if len(chunk.parts) < 1 or "text" not in chunk.parts[0]: 207 | continue 208 | 209 | # Append and send response 210 | request_response.response_text += chunk.parts[0].text 211 | async_helper( 212 | send_message_async(self.config.get("telegram"), self.messages, request_response, end=False) 213 | ) 214 | 215 | # Canceled, don't save conversation 216 | if self.cancel_requested.value: 217 | logging.info("Gemini module canceled") 218 | 219 | # Save conversation if not gemini-vision 220 | elif not request_response.request_image: 221 | # Try to save conversation 222 | conversation.append(Content.to_json(Content(role="model", parts=response.parts))) 223 | if not _save_conversation(conversations_dir, conversation_id, conversation): 224 | conversation_id = None 225 | 226 | # Save conversation ID 227 | self.users_handler.set_key(request_response.user_id, f"{_NAME}_conversation_id", conversation_id) 228 | 229 | finally: 230 | self.processing_flag.value = False 231 | 232 | # Finish 233 | async_helper(send_message_async(self.config.get("telegram"), self.messages, request_response, end=True)) 234 | 235 | def clear_conversation_for_user(self, user_id: int) -> None: 236 | """Clears conversation (chat history) for selected user""" 237 | # Get current conversation_id 238 | conversation_id = self.users_handler.get_key(user_id, f"{_NAME}_conversation_id") 239 | if conversation_id is None: 240 | return 241 | 242 | # Delete from API 243 | _delete_conversation(self.config.get("files").get("conversations_dir"), conversation_id) 244 | 245 | # Delete from user 246 | self.users_handler.set_key(user_id, f"{_NAME}_conversation_id", None) 247 | 248 | 249 | def _load_conversation(conversations_dir, conversation_id): 250 | """Tries to load conversation 251 | 252 | Args: 253 | conversations_dir (_type_): _description_ 254 | conversation_id (_type_): _description_ 255 | 256 | Returns: 257 | _type_: content of conversation, None if error 258 | """ 259 | logging.info(f"Loading conversation {conversation_id}") 260 | try: 261 | if conversation_id is None: 262 | logging.info("conversation_id is None. Skipping loading") 263 | return None 264 | 265 | # API type 3 266 | conversation_file = os.path.join(conversations_dir, conversation_id + ".json") 267 | if os.path.exists(conversation_file): 268 | # Load from json file 269 | with open(conversation_file, "r", encoding="utf-8") as json_file: 270 | return json.load(json_file) 271 | else: 272 | logging.warning(f"File {conversation_file} not exists") 273 | 274 | except Exception as e: 275 | logging.warning(f"Error loading conversation {conversation_id}", exc_info=e) 276 | 277 | return None 278 | 279 | 280 | def _save_conversation(conversations_dir, conversation_id, conversation) -> bool: 281 | """Tries to save conversation without raising any error 282 | 283 | Args: 284 | conversations_dir (_type_): _description_ 285 | conversation_id (_type_): _description_ 286 | conversation (_type_): _description_ 287 | 288 | Returns: 289 | bool: True if no error 290 | """ 291 | logging.info(f"Saving conversation {conversation_id}") 292 | try: 293 | if conversation_id is None: 294 | logging.info("conversation_id is None. Skipping saving") 295 | return False 296 | 297 | # Create conversation dir 298 | if not os.path.exists(conversations_dir): 299 | logging.info(f"Creating {conversations_dir} directory") 300 | os.makedirs(conversations_dir) 301 | 302 | # Save as json file 303 | conversation_file = os.path.join(conversations_dir, conversation_id + ".json") 304 | with open(conversation_file, "w+", encoding="utf-8") as json_file: 305 | json.dump(conversation, json_file, indent=4, ensure_ascii=False) 306 | 307 | except Exception as e: 308 | logging.error(f"Error saving conversation {conversation_id}", exc_info=e) 309 | return False 310 | 311 | return True 312 | 313 | 314 | def _delete_conversation(conversations_dir, conversation_id) -> bool: 315 | """Tries to delete conversation without raising any error 316 | 317 | Args: 318 | conversations_dir (_type_): _description_ 319 | conversation_id (_type_): _description_ 320 | 321 | Returns: 322 | bool: True if no error 323 | """ 324 | logging.info(f"Deleting conversation {conversation_id}") 325 | # Delete conversation file if exists 326 | try: 327 | conversation_file = os.path.join(conversations_dir, conversation_id + ".json") 328 | if os.path.exists(conversation_file): 329 | logging.info(f"Deleting {conversation_file} file") 330 | os.remove(conversation_file) 331 | return True 332 | 333 | except Exception as e: 334 | logging.error( 335 | f"Error removing conversation file for conversation {conversation_id}", 336 | exc_info=e, 337 | ) 338 | 339 | return False 340 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤖 GPT-Telegramus v5 2 | 3 | | ![GPT-Telegramus logo](logo.png) |

The best free Telegram bot for ChatGPT, Microsoft Copilot (aka Bing AI / Sidney / EdgeGPT), Microsoft Copilot Designer (aka BingImageCreator), Gemini and Groq with stream writing, requests with images, multiple languages, admin control, data logging and more!

| 4 | | -------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | 5 | 6 |
7 |

8 | 9 |

10 |
11 |
12 |

13 | YouTube 14 | Bandcamp 15 | Spotify 16 | SoundCloud 17 |

18 |
19 |
20 |

21 | 22 | 23 |

24 |
25 |
26 |

27 | 28 | 29 |

30 |
31 | 32 | ![Project banner](banner.png) 33 | 34 | ---------- 35 | 36 | ## 🚧 GPT-Telegramus is under development 37 | 38 | > 😔 Currently, GPT-Telegramus doesn't have paid ChatGPT and DALL-E support 39 | > 40 | > 📈 GPT-Telegramus v5 is an updated and highly refactored version of the old GPT-Telegramus 41 | > 42 | > 📄 Documentation is also under development! Consider reading docstring for now 43 | > 44 | > 🐛 If you find a **bug** in GPT-Telegramus, please create an Issue 45 | > 46 | > p.s. Due to my studies, I don't have much time to work on the project 😔 47 | 48 | ---------- 49 | 50 | ## 😋 Support project 51 | 52 | > 💜 Please support the project so that I can continue to develop it 53 | 54 | - BTC: `bc1qd2j53p9nplxcx4uyrv322t3mg0t93pz6m5lnft` 55 | - ETH: `0x284E6121362ea1C69528eDEdc309fC8b90fA5578` 56 | - ZEC: `t1Jb5tH61zcSTy2QyfsxftUEWHikdSYpPoz` 57 | 58 | - Or by my music on [🟦 bandcamp](https://f3rni.bandcamp.com/) 59 | 60 | - Or message me if you would like to donate in other way 💰 61 | 62 | [![Star History Chart](https://api.star-history.com/svg?repos=F33RNI/GPT-Telegramus&type=Date)](https://star-history.com/#F33RNI/GPT-Telegramus&Date) 63 | 64 | ---------- 65 | 66 | ## 🤗 Contributors 67 | 68 | - 💜 [Sprav04ka](https://github.com/Sprav04ka) - *Tofii'skovyi' language, Testing, Super beautiful poster, Project Logo, Motivation* 69 | - 💜 [Hanssen](https://github.com/Hanssen0) - *Markdown parsing, bard images, /chat command, caption fix, loading emoji, dynamic splitting, code block splitting, Gemini module, Docker fix, GitHub actions fix* **and much much more** 70 | - 💜 [Sergey Krashevich](https://github.com/skrashevich) - *Docker, GitHub Actions* 71 | - 💜 [Wahit Fitriyanto](https://github.com/wahitftry) - *Indonesian language* 72 | - 💜 [Alexander Fadeyev](https://github.com/alfsoft) - *EdgeGPT Fix* 73 | - 💜 AnthroAsja - *Belarusian language* 74 | - 💜 Anonymous Samurai - *Ukrainian language* 75 | - 💜 Dunya Jafari - *Persian language* 76 | - 💜 [Dedy Rudney](https://github.com/rudney5000) - *French language* 77 | 78 | ---------- 79 | 80 | ## 📨 Project channel and demo 81 | 82 | - You can join official project's Telegram channel 83 | - Also you can test GPT-Telegramus using official bot 84 | 85 | ⚠️ Please do not overload the bot and use it only as a demo version 86 | 87 | ---------- 88 | 89 | ## 🏗️ Requirements 90 | 91 | - Python **3.10** / **3.11** *(not tested on other versions)* 92 | - Unblocked access to the telegram bot official API 93 | - Other requirements specified in the `requirements.txt` file 94 | 95 | ---------- 96 | 97 | ## 📙 Project based on 98 | 99 | - **F33RNI/LMAO API** (Unofficial open APIs): 100 | - **Groq API** (Official Python API): 101 | - **acheong08/EdgeGPT** (API): 102 | - **jacobgelling/EdgeGPT** (API): 103 | - **acheong08/BingImageCreator** (API): 104 | - **google/generative-ai-python** (API): 105 | - **python-telegram-bot** (Telegram bot API): 106 | 107 | ---------- 108 | 109 | ## ❓ Get started 110 | 111 | See **🐧 Running as service on linux**, **🍓 Running on Raspberry Pi (ARM)**, **🐋 Running in Docker** sections for more info 112 | 113 | 1. Install Python **3.10** / **3.11** *(not tested on other versions)*, `venv` and `pip` 114 | 2. Download source code (clone repo) 115 | 3. Create venv `python -m venv venv` / `python3 -m venv venv` / `python3.10 -m venv venv` / `python3.11 -m venv venv` 116 | 4. Activate venv `source venv/bin/activate` / `venv\Scripts\activate.bat` 117 | 5. Check python version using `python --version` command 118 | 6. Install requirements `pip install -r requirements.txt --upgrade` 119 | 7. Carefully change all the settings in `config.json` file and in each `*.json` file inside `module_configs` directory. If you have questions regarding any setting, open an issue, I'll try to add a more detailed description 120 | 8. Run main script `python main.py` 121 | 122 | - 💬 **ChatGPT** 123 | - Free browser-like Chat-GPT. Currently, without extensions and image requests (text only) (because I don't have a paid account to test it) 124 | - Stream response support 125 | - Chat history support 126 | - See for more info 127 | - 🟦 **Microsoft Copilot (aka EdgeGPT aka Bing AI aka Sydney)** 128 | - Supports conversation style `/style` 129 | - Stream response support 130 | - Chat history support 131 | - Web-browsing (probably) and sources (attributions) support 132 | - Accepts image requests (for **lmao_ms_copilot** only) 133 | - Send generated images (for **lmao_ms_copilot** only) 134 | - Suggestions (for **lmao_ms_copilot** only) 135 | - See for more info 136 | - **NOTE:** Non-LMAO API (the old one `ms_copilot`) is deprecated! Please use `lmao_ms_copilot` instead 137 | - 🎨 *Microsoft Copilot Designer* (**DEPRECATED**) 138 | - Bing Image Generator. Used as a separate module due to issues with the EdgeGPT module 139 | - Free and unlimited 140 | - ♊ **Gemini** 141 | - Google's AI using the Gemini Pro model 142 | - Chat history support 143 | - Requests with images (you can send an image with text to it) 144 | - Requests with images will not be recorded in the chat history since Google hasn't support this 145 | - 🔴 **Groq** 146 | - Official Python API 147 | - Chat history support 148 | - **Very fast** response 149 | - Multiple models (*see `/model` command*) 150 | 151 | ---------- 152 | 153 | ## 🐧 Running as service on linux 154 | 155 | 1. Install Python **3.10** / **3.11** *(not tested on other versions)*, `venv` and `pip` 156 | 2. Clone repo 157 | 1. `git clone https://github.com/F33RNI/GPT-Telegramus.git` 158 | 2. `cd GPT-Telegramus` 159 | 3. Create venv `python -m venv venv` / `python3 -m venv venv` / `python3.10 -m venv venv` / `python3.11 -m venv venv` 160 | 4. Carefully change all the settings in `config.json` file and in each `*.json` file inside `module_configs` directory 161 | 5. Install systemd 162 | 1. `sudo apt-get install -y systemd` 163 | 6. Create new service file 164 | 1. `sudo nano /etc/systemd/system/gpt-telegramus.service` 165 | 166 | ```ini 167 | [Unit] 168 | Description=GPT-Telegramus service 169 | After=multi-user.target 170 | 171 | [Service] 172 | Type=simple 173 | Restart=on-failure 174 | RestartSec=5 175 | 176 | WorkingDirectory=YOUR DIRECTORY HERE/GPT-Telegramus 177 | ExecStart=YOUR DIRECTORY HERE/GPT-Telegramus/run.sh 178 | 179 | [Install] 180 | WantedBy=multi-user.target 181 | 182 | ``` 183 | 184 | 7. Reload systemctl daemon 185 | 1. `sudo systemctl daemon-reload` 186 | 8. Enable and start service 187 | 1. `sudo systemctl enable gpt-telegramus` 188 | 2. `sudo systemctl start gpt-telegramus` 189 | 9. Note: Please use `sudo systemctl kill gpt-telegramus` and then `sudo systemctl stop gpt-telegramus` if only `sudo systemctl stop gpt-telegramus` not working 190 | 191 | ---------- 192 | 193 | ## 🍓 Running on Raspberry Pi (ARM) 194 | 195 | 1. Install Python 3.11 or later *(not tested)* if not installed 196 | 197 | 1. ```shell 198 | sudo apt-get update 199 | sudo apt-get install -y build-essential tk-dev libncurses5-dev libncursesw5-dev libreadline6-dev libdb5.3-dev libgdbm-dev libsqlite3-dev libssl-dev libbz2-dev libexpat1-dev liblzma-dev zlib1g-dev libffi-dev 200 | wget https://www.python.org/ftp/python/3.11.8/Python-3.11.8.tgz 201 | sudo tar zxf Python-3.11.8.tgz 202 | cd Python-3.11.8 203 | sudo ./configure --enable-optimizations 204 | sudo make -j 4 205 | sudo make altinstall 206 | ``` 207 | 208 | 2. Check version by typing `python3.11 -V`. After this, you should use `python3.11` command instead of `python` or you can add it to the `bashrc` by typing `echo "alias python=/usr/local/bin/python3.11" >> ~/.bashrc` 209 | 210 | 2. Follow the `🐧 Running as service on linux` guide 211 | 212 | ---------- 213 | 214 | ## 🐋 Running in Docker 215 | 216 | ### From GitHub Package 217 | 218 | 1. Clone repo or download [`config.json`](./config.json) and [`module_configs`](./module_configs) and [`langs`](./langs) 219 | 2. Edit the `config.json`, set options in the `files` section to the path in the container (`/app/config/`) 220 | 3. Run the container 221 | 222 | ```shell 223 | docker run -d -e TELEGRAMUS_CONFIG_FILE="/app/config/config.json" -v :/app/config --name gpt-telegramus --restart on-failure ghcr.io/f33rni/gpt-telegramus:latest 224 | ``` 225 | 226 | If you want to try the preview version 227 | 228 | ```shell 229 | docker run -d -e TELEGRAMUS_CONFIG_FILE="/app/config/config.json" -v :/app/config --name gpt-telegramus --restart on-failure ghcr.io/f33rni/gpt-telegramus:edge 230 | ``` 231 | 232 | ### Build Manually 233 | 234 | 1. Install Docker 235 | 2. Clone repo 236 | 3. Build container 237 | 238 | ```shell 239 | docker buildx build -t telegramus --load -f Dockerfile . 240 | ``` 241 | 242 | 4. Run the container 243 | 244 | ```shell 245 | docker run -d --name gpt-telegramus --restart on-failure telegramus 246 | ``` 247 | 248 | or if you want to use a custom config 249 | 250 | ```shell 251 | docker run -d -e TELEGRAMUS_CONFIG_FILE="/app/config/config.json" -v :/app/config --name gpt-telegramus --restart on-failure telegramus 252 | ``` 253 | 254 | ---------- 255 | 256 | ## 🌐 Bot messages 257 | 258 | ### Currently available languages 259 | 260 | - 🇺🇸 English 261 | - 🇷🇺 Русский 262 | - ‍☠️ Тофийсковый 263 | - 🇮🇩 Bahasa Indonesia 264 | - 🇨🇳 简体中文 265 | - 🇧🇾 Беларуская 266 | - 🇺🇦 Українська 267 | - فارسی 🇮🇷 268 | - 🇪🇸 Español 269 | - 🇻🇳 Vietnamese 270 | - 🇫🇷 Français 271 | 272 | You can add **a new language**. For that: 273 | 274 | 1. Copy any existing language file (inside `langs` directory) 275 | 2. Rename it according to `Set2/T` (3-letters code) **ISO 639** 276 | 3. Translate each entry 277 | 4. Create a pull request 💜 278 | 279 | > You can add new lines by adding `\n` 280 | > 281 | > ⚠️ Please make sure you haven't messed up the string formatting structure `{this_type}` 282 | 283 | ---------- 284 | 285 | ## 🤖 Telegram bot commands 286 | 287 | - 📄 `/start` - Welcome message and bot version 288 | - ❓ `/help` - Show help message 289 | - ↕️ `/module` - Change module to chat with 290 | - 🧹 `/clear` - Clear chat history 291 | - 🌎 `/lang` - Change the language 292 | - 🆔 `/chatid` - Show your chat_id 293 | - ⚙️ `/model` - Change model of module (Currently for 🔴 Groq module only) 294 | - `/style` - Bing AI conversation style 295 | - `/chat` - Send request in group chat 296 | - Other direct module commands (please check automatically-generated list of commands inside bot) 297 | 298 | ### Admin commands 299 | 300 | - 💬 `/queue` - Show requests queue 301 | - 🔃 `/restart [module name, optional]` - Restart specific module (**and it's config**) or every module and all configs, languages and bot commands 302 | - Please see `bot_command_restart` function in `bot_handler.py` file for more info 303 | - 👤 `/users` - Show list of all users 304 | - 🔨 `/ban [reason]` - Ban a user by their id with reason (optional) 305 | - 🔓 `/unban ` - Unban a user by their id 306 | - 📢 `/broadcast ` - Send text message to everyone except banned users 307 | 308 | ---------- 309 | 310 | ## 📜 Data collecting 311 | 312 | GPT-Telegramus has a built-in data collecting function (saves requests and responses in a files) 313 | 314 | - **For text requests / responses** will be saved as plain text 315 | - **For image requests / responses** will be saved as Base64-encoded image (in the same text file) 316 | 317 | You can enable and configure data collection in `config.json` in `data_collecting` section 318 | 319 | > ⚠️ Please make sure you notify your bot users that you're collecting data 320 | 321 | ---------- 322 | 323 | ## 📝 TODO 324 | 325 | - Paid version of ChatGPT 326 | - DALL-E (from OpenAI) 327 | - Some other LLMs (and maybe some free GPT-4 model) 328 | 329 | ---------- 330 | 331 | ## ✨ Contribution 332 | 333 | - Anyone can contribute! Just create a **pull request** 334 | -------------------------------------------------------------------------------- /lmao_process_loop.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import logging 22 | import multiprocessing 23 | import queue 24 | import random 25 | import string 26 | import threading 27 | import time 28 | from typing import Dict 29 | 30 | from lmao.module_wrapper import ( 31 | ModuleWrapper, 32 | STATUS_INITIALIZING, 33 | STATUS_BUSY, 34 | STATUS_FAILED, 35 | ) 36 | 37 | import logging_handler 38 | import messages 39 | import users_handler 40 | from bot_sender import send_message_async 41 | from async_helper import async_helper 42 | 43 | # lmao process loop delay during idle 44 | LMAO_LOOP_DELAY = 0.5 45 | 46 | 47 | def lmao_process_loop( 48 | name: str, 49 | name_lmao: str, 50 | config: Dict, 51 | messages_: messages.Messages, 52 | users_handler_: users_handler.UsersHandler, 53 | logging_queue: multiprocessing.Queue, 54 | lmao_process_running: multiprocessing.Value, 55 | lmao_stop_stream_value: multiprocessing.Value, 56 | lmao_module_status: multiprocessing.Value, 57 | lmao_delete_conversation_request_queue: multiprocessing.Queue, 58 | lmao_delete_conversation_response_queue: multiprocessing.Queue, 59 | lmao_request_queue: multiprocessing.Queue, 60 | lmao_response_queue: multiprocessing.Queue, 61 | lmao_exceptions_queue: multiprocessing.Queue, 62 | *args, 63 | ) -> None: 64 | """Handler for lmao's ModuleWrapper 65 | (see module_wrapper_global.py for more info) 66 | """ 67 | # Setup logging for current process 68 | logging_handler.worker_configurer(logging_queue) 69 | logging.info("_lmao_process_loop started") 70 | 71 | # Initialize module 72 | try: 73 | logging.info(f"Initializing {name}") 74 | with lmao_module_status.get_lock(): 75 | lmao_module_status.value = STATUS_INITIALIZING 76 | module = ModuleWrapper(name_lmao, config.get(name)) 77 | module.initialize(blocking=True) 78 | with lmao_module_status.get_lock(): 79 | lmao_module_status.value = module.status 80 | logging.info(f"{name} initialization finished") 81 | except Exception as e: 82 | logging.error(f"{name} initialization error", exc_info=e) 83 | with lmao_module_status.get_lock(): 84 | lmao_module_status.value = STATUS_FAILED 85 | with lmao_process_running.get_lock(): 86 | lmao_process_running.value = False 87 | return 88 | 89 | # Main loop container 90 | request_response = None 91 | 92 | def _lmao_stop_stream_loop() -> None: 93 | """Background thread that handles stream stop signal""" 94 | logging.info("_lmao_stop_stream_loop started") 95 | while True: 96 | # Exit from loop 97 | with lmao_process_running.get_lock(): 98 | if not lmao_process_running.value: 99 | logging.warning("Exit from _lmao_stop_stream_loop requested") 100 | break 101 | 102 | try: 103 | # Wait a bit to prevent overloading 104 | # We need to wait at the beginning to enable delay even after exception 105 | # But inside try-except to catch interrupts 106 | time.sleep(LMAO_LOOP_DELAY) 107 | 108 | # Get stop request 109 | lmao_stop_stream = False 110 | with lmao_stop_stream_value.get_lock(): 111 | if lmao_stop_stream_value.value: 112 | lmao_stop_stream = True 113 | lmao_stop_stream_value.value = False 114 | 115 | # Stop was requested 116 | if lmao_stop_stream: 117 | module.response_stop() 118 | 119 | # Catch process interrupts just in case 120 | except (SystemExit, KeyboardInterrupt): 121 | logging.warning("Exit from _lmao_stop_stream_loop requested") 122 | break 123 | 124 | # Stop loop error 125 | except Exception as e: 126 | logging.error("_lmao_stop_stream_loop error", exc_info=e) 127 | 128 | # Read module's status 129 | finally: 130 | with lmao_module_status.get_lock(): 131 | lmao_module_status.value = module.status 132 | 133 | # Done 134 | logging.info("_lmao_stop_stream_loop finished") 135 | 136 | # Start stream stop signal handler 137 | stop_handler_thread = threading.Thread(target=_lmao_stop_stream_loop) 138 | stop_handler_thread.start() 139 | 140 | # Main loop 141 | while True: 142 | # Exit from loop 143 | with lmao_process_running.get_lock(): 144 | lmao_process_running_value = lmao_process_running.value 145 | if not lmao_process_running_value: 146 | logging.warning(f"Exit from {name} loop requested") 147 | break 148 | 149 | request_response = None 150 | 151 | try: 152 | # Wait a bit to prevent overloading 153 | # We need to wait at the beginning to enable delay even after exception 154 | # But inside try-except to catch interrupts 155 | time.sleep(LMAO_LOOP_DELAY) 156 | 157 | # Non-blocking get of request-response container 158 | request_response = None 159 | try: 160 | request_response = lmao_request_queue.get(block=False) 161 | except queue.Empty: 162 | pass 163 | 164 | # Read module's status 165 | with lmao_module_status.get_lock(): 166 | lmao_module_status.value = module.status 167 | 168 | # New request 169 | if request_response: 170 | logging.info(f"Received new request to {name}") 171 | with lmao_module_status.get_lock(): 172 | lmao_module_status.value = STATUS_BUSY 173 | 174 | # Extract request 175 | prompt_text = request_response.request_text 176 | prompt_image = request_response.request_image 177 | 178 | # Check prompt 179 | if not prompt_text: 180 | raise Exception("No text request") 181 | else: 182 | # Extract conversation ID 183 | conversation_id = users_handler_.get_key(request_response.user_id, name + "_conversation_id") 184 | 185 | module_request = {"prompt": prompt_text, "convert_to_markdown": True} 186 | 187 | # Extract style (for lmao_ms_copilot only) 188 | if name == "lmao_ms_copilot": 189 | style = users_handler_.get_key(request_response.user_id, "ms_copilot_style", "balanced") 190 | module_request["style"] = style 191 | 192 | # Add image and conversation ID 193 | if prompt_image is not None: 194 | module_request["image"] = prompt_image 195 | if conversation_id: 196 | module_request["conversation_id"] = conversation_id 197 | 198 | # Reset suggestions 199 | users_handler_.set_key(request_response.user_id, "suggestions", []) 200 | 201 | # Ask and read stream 202 | for response in module.ask(module_request): 203 | finished = response.get("finished") 204 | conversation_id = response.get("conversation_id") 205 | request_response.response_text = response.get("response") 206 | 207 | images = response.get("images") 208 | if images is not None: 209 | request_response.response_images = images[:] 210 | 211 | # Format and add attributions 212 | attributions = response.get("attributions") 213 | if attributions is not None and len(attributions) != 0: 214 | response_link_format = messages_.get_message( 215 | "response_link_format", user_id=request_response.user_id 216 | ) 217 | request_response.response_text += "\n" 218 | for i, attribution in enumerate(attributions): 219 | request_response.response_text += response_link_format.format( 220 | source_name=str(i + 1), link=attribution.get("url", "") 221 | ) 222 | 223 | # Suggestions must be stored as tuples with unique ID for reply-markup 224 | if finished: 225 | suggestions = response.get("suggestions") 226 | if suggestions is not None: 227 | request_response.response_suggestions = [] 228 | for suggestion in suggestions: 229 | if not suggestion or len(suggestion) < 1: 230 | continue 231 | id_ = "".join( 232 | random.choices( 233 | string.ascii_uppercase + string.ascii_lowercase + string.digits, k=8 234 | ) 235 | ) 236 | request_response.response_suggestions.append((id_, suggestion)) 237 | users_handler_.set_key( 238 | request_response.user_id, 239 | "suggestions", 240 | request_response.response_suggestions, 241 | ) 242 | 243 | # Read module's status 244 | with lmao_module_status.get_lock(): 245 | lmao_module_status.value = module.status 246 | 247 | # Check if exit was requested 248 | with lmao_process_running.get_lock(): 249 | lmao_process_running_value = lmao_process_running.value 250 | if not lmao_process_running_value: 251 | finished = True 252 | 253 | # Send response to the user 254 | async_helper( 255 | send_message_async(config.get("telegram"), messages_, request_response, end=finished) 256 | ) 257 | 258 | # Exit from stream reader 259 | if not lmao_process_running_value: 260 | break 261 | 262 | # Save conversation ID 263 | logging.info(f"Saving user {request_response.user_id} conversation ID as: name_{conversation_id}") 264 | users_handler_.set_key(request_response.user_id, name + "_conversation_id", conversation_id) 265 | 266 | # Non-blocking get of user_id to clear conversation for 267 | delete_conversation_user_id = None 268 | try: 269 | delete_conversation_user_id = lmao_delete_conversation_request_queue.get(block=False) 270 | except queue.Empty: 271 | pass 272 | 273 | # Get and delete conversation 274 | if delete_conversation_user_id is not None: 275 | with lmao_module_status.get_lock(): 276 | lmao_module_status.value = STATUS_BUSY 277 | conversation_id = users_handler_.get_key(delete_conversation_user_id, name + "_conversation_id") 278 | try: 279 | if conversation_id: 280 | module.delete_conversation({"conversation_id": conversation_id}) 281 | users_handler_.set_key(delete_conversation_user_id, name + "_conversation_id", None) 282 | lmao_delete_conversation_response_queue.put(delete_conversation_user_id) 283 | except Exception as e: 284 | logging.error(f"Error deleting conversation for {name}", exc_info=e) 285 | lmao_delete_conversation_response_queue.put(e) 286 | finally: 287 | with lmao_module_status.get_lock(): 288 | lmao_module_status.value = module.status 289 | 290 | # Catch process interrupts just in case 291 | except (SystemExit, KeyboardInterrupt): 292 | logging.warning(f"Exit from {name} loop requested") 293 | break 294 | 295 | # Main loop error 296 | except Exception as e: 297 | logging.error(f"{name} error", exc_info=e) 298 | lmao_exceptions_queue.put(e) 299 | 300 | # Read module's status and return the container 301 | finally: 302 | with lmao_module_status.get_lock(): 303 | lmao_module_status.value = module.status 304 | if request_response: 305 | lmao_response_queue.put(request_response) 306 | 307 | # Wait for stop handler to finish 308 | if stop_handler_thread and stop_handler_thread.is_alive(): 309 | logging.info("Waiting for _lmao_stop_stream_loop") 310 | try: 311 | stop_handler_thread.join() 312 | except Exception as e: 313 | logging.warning(f"Error joining _lmao_stop_stream_loop: {e}") 314 | 315 | # Try to close module 316 | try: 317 | logging.info(f"Trying to close {name}") 318 | module.close(blocking=True) 319 | with lmao_module_status.get_lock(): 320 | lmao_module_status.value = module.status 321 | logging.info(f"{name} closing finished") 322 | except Exception as e: 323 | logging.error(f"Error closing {name}", exc_info=e) 324 | 325 | # Read module's status 326 | with lmao_module_status.get_lock(): 327 | lmao_module_status.value = module.status 328 | 329 | # Done 330 | with lmao_process_running.get_lock(): 331 | lmao_process_running.value = False 332 | logging.info("_lmao_process_loop finished") 333 | -------------------------------------------------------------------------------- /ms_copilot_module.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2023-2024 Fern Lane 3 | 4 | This file is part of the GPT-Telegramus distribution 5 | (see ) 6 | 7 | This program is free software: you can redistribute it and/or modify 8 | it under the terms of the GNU Affero General Public License as 9 | published by the Free Software Foundation, either version 3 of the 10 | License, or (at your option) any later version. 11 | 12 | This program is distributed in the hope that it will be useful, 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | GNU Affero General Public License for more details. 16 | 17 | You should have received a copy of the GNU Affero General Public License 18 | along with this program. If not, see . 19 | """ 20 | 21 | import asyncio 22 | import ctypes 23 | import json 24 | import logging 25 | import multiprocessing 26 | import os 27 | import uuid 28 | from typing import Dict 29 | 30 | from EdgeGPT.EdgeGPT import Chatbot 31 | from EdgeGPT.conversation_style import ConversationStyle 32 | 33 | import messages 34 | import users_handler 35 | import bot_sender 36 | from async_helper import async_helper 37 | from request_response_container import RequestResponseContainer 38 | 39 | # Self name 40 | _NAME = "ms_copilot" 41 | 42 | 43 | class MSCopilotModule: 44 | def __init__( 45 | self, 46 | config: Dict, 47 | messages_: messages.Messages, 48 | users_handler_: users_handler.UsersHandler, 49 | ) -> None: 50 | """Initializes class variables (must be done in main process) 51 | 52 | Args: 53 | config (Dict): global config 54 | messages_ (messages.Messages): initialized messages handler 55 | users_handler_ (users_handler.UsersHandler): initialized users handler 56 | """ 57 | self.config = config 58 | self.messages = messages_ 59 | self.users_handler = users_handler_ 60 | 61 | # All variables here must be multiprocessing 62 | self.cancel_requested = multiprocessing.Value(ctypes.c_bool, False) 63 | self.processing_flag = multiprocessing.Value(ctypes.c_bool, False) 64 | 65 | # Don't use this variables outside the module's process 66 | self._chatbot = None 67 | 68 | def initialize(self) -> None: 69 | """Initializes MS Copilot (aka EdgeGPT) 70 | https://github.com/F33RNI/EdgeGPT 71 | """ 72 | self._chatbot = None 73 | 74 | with self.processing_flag.get_lock(): 75 | self.processing_flag.value = False 76 | with self.cancel_requested.get_lock(): 77 | self.cancel_requested.value = False 78 | 79 | # Get module's config 80 | module_config = self.config.get(_NAME) 81 | 82 | # Use proxy 83 | proxy = None 84 | if module_config.get("proxy") and module_config.get("proxy") != "auto": 85 | proxy = module_config.get("proxy") 86 | logging.info(f"Initializing MS Copilot (aka EdgeGPT) module with proxy {proxy}") 87 | else: 88 | logging.info("Initializing MS Copilot (aka EdgeGPT) module without proxy") 89 | 90 | # Read cookies file 91 | cookies = None 92 | if module_config.get("cookies_file") and os.path.exists(module_config.get("cookies_file")): 93 | logging.info(f"Loading cookies from {module_config.get('cookies_file')}") 94 | cookies = json.loads(open(module_config.get("cookies_file"), "r", encoding="utf-8").read()) 95 | 96 | # Initialize EdgeGPT chatbot 97 | if proxy: 98 | self._chatbot = asyncio.run(Chatbot.create(proxy=proxy, cookies=cookies)) 99 | else: 100 | self._chatbot = asyncio.run(Chatbot.create(cookies=cookies)) 101 | 102 | # Check 103 | if self._chatbot is not None: 104 | logging.info("MS Copilot (aka EdgeGPT) module initialized") 105 | 106 | def process_request(self, request_response: RequestResponseContainer) -> None: 107 | """Processes request to MS Copilot 108 | 109 | Args: 110 | request_response (RequestResponseContainer): container from the queue 111 | 112 | Raises: 113 | Exception: in case of error 114 | """ 115 | # Check if module is initialized 116 | if self._chatbot is None: 117 | logging.error("MS Copilot (aka EdgeGPT) module not initialized") 118 | request_response.response_text = self.messages.get_message( 119 | "response_error", user_id=request_response.user_id 120 | ).format(error_text="MS Copilot (aka EdgeGPT) module not initialized") 121 | request_response.error = True 122 | return 123 | 124 | try: 125 | # Set flag that we are currently processing request 126 | with self.processing_flag.get_lock(): 127 | self.processing_flag.value = True 128 | with self.cancel_requested.get_lock(): 129 | self.cancel_requested.value = False 130 | 131 | # Get user data 132 | conversation_id = self.users_handler.get_key(request_response.user_id, f"{_NAME}_conversation_id") 133 | style_default = self.config.get(_NAME).get("conversation_style_type_default") 134 | conversation_style = self.users_handler.get_key( 135 | request_response.user_id, "ms_copilot_style", style_default 136 | ) 137 | 138 | async def async_ask_stream_(): 139 | async for finished, data in self._chatbot.ask_stream( 140 | prompt=request_response.request_text, 141 | conversation_style=getattr(ConversationStyle, conversation_style), 142 | raw=True, 143 | ): 144 | if not data: 145 | continue 146 | 147 | # Response 148 | text_response = None 149 | response_sources = [] 150 | 151 | type_ = data.get("type", -1) 152 | 153 | # Type 1 154 | if not finished and type_ == 1: 155 | arguments = data.get("arguments") 156 | if arguments is None or len(arguments) == 0: 157 | continue 158 | messages_ = arguments[-1].get("messages") 159 | if messages_ is None or len(messages_) == 0: 160 | continue 161 | text = messages_[-1].get("text") 162 | if not text: 163 | continue 164 | text_response = text 165 | 166 | # Type 2 167 | elif finished and type_ == 2: 168 | item = data.get("item") 169 | if item is None: 170 | continue 171 | messages_ = item.get("messages") 172 | if messages_ is None or len(messages_) == 0: 173 | continue 174 | for message in messages_: 175 | # Check author 176 | author = message.get("author") 177 | if author is None or author != "bot": 178 | continue 179 | 180 | # Ignore internal messages 181 | if message.get("messageType") is not None: 182 | continue 183 | 184 | # Text response 185 | text = message.get("text") 186 | if text: 187 | text_response = text 188 | 189 | # Sources 190 | source_attributions = message.get("sourceAttributions") 191 | if source_attributions is None or len(source_attributions) == 0: 192 | continue 193 | response_sources.clear() 194 | for source_attribution in source_attributions: 195 | provider_display_name = source_attribution.get("providerDisplayName") 196 | see_more_url = source_attribution.get("seeMoreUrl") 197 | if not provider_display_name or not see_more_url: 198 | continue 199 | response_sources.append((provider_display_name, see_more_url)) 200 | 201 | # Unknown 202 | else: 203 | continue 204 | 205 | # Check response 206 | if not text_response: 207 | continue 208 | 209 | # Set to container 210 | request_response.response_text = text_response 211 | 212 | # Add sources 213 | if len(response_sources) != 0: 214 | request_response.response_text += "\n" 215 | response_link_format = self.messages.get_message( 216 | "response_link_format", user_id=request_response.user_id 217 | ) 218 | for response_source in response_sources: 219 | request_response.response_text += response_link_format.format( 220 | source_name=response_source[0], link=response_source[1] 221 | ) 222 | 223 | # Send message to user 224 | await bot_sender.send_message_async( 225 | self.config.get("telegram"), self.messages, request_response, end=False 226 | ) 227 | 228 | # Exit requested? 229 | with self.cancel_requested.get_lock(): 230 | cancel_requested = self.cancel_requested.value 231 | if cancel_requested: 232 | logging.info("Exiting from loop") 233 | break 234 | 235 | # Reset current conversation 236 | asyncio.run(self._chatbot.reset()) 237 | 238 | # Try to load conversation 239 | if conversation_id: 240 | conversation_file = os.path.join( 241 | self.config.get("files").get("conversations_dir"), conversation_id + ".json" 242 | ) 243 | if os.path.exists(conversation_file): 244 | logging.info(f"Loading conversation from {conversation_file}") 245 | asyncio.run(self._chatbot.load_conversation(conversation_file)) 246 | else: 247 | conversation_id = None 248 | 249 | # Start request handling 250 | asyncio.run(async_ask_stream_()) 251 | 252 | # Generate new conversation id 253 | if not conversation_id: 254 | conversation_id = f"{_NAME}_{uuid.uuid4()}" 255 | 256 | # Save conversation 257 | logging.info(f"Saving conversation to {conversation_id}") 258 | asyncio.run( 259 | self._chatbot.save_conversation( 260 | os.path.join(self.config.get("files").get("conversations_dir"), conversation_id + ".json") 261 | ) 262 | ) 263 | 264 | # Save to user data 265 | self.users_handler.set_key(request_response.user_id, f"{_NAME}_conversation_id", conversation_id) 266 | 267 | # Check response 268 | if len(request_response.response_text) != 0: 269 | logging.info(f"Response successfully processed for user {request_response.user_id}") 270 | 271 | # No response 272 | else: 273 | logging.warning(f"Empty response for user {request_response.user_id}") 274 | request_response.response_text = self.messages.get_message( 275 | "response_error", user_id=request_response.user_id 276 | ).format(error_text="Empty response") 277 | request_response.error = True 278 | 279 | # Exit requested 280 | except KeyboardInterrupt: 281 | logging.warning("KeyboardInterrupt @ process_request") 282 | return 283 | 284 | # EdgeGPT or other error 285 | except Exception as e: 286 | logging.error("Error processing request!", exc_info=e) 287 | error_text = str(e) 288 | if len(error_text) > 100: 289 | error_text = error_text[:100] + "..." 290 | 291 | request_response.response_text = self.messages.get_message( 292 | "response_error", user_id=request_response.user_id 293 | ).format(error_text=error_text) 294 | request_response.error = True 295 | with self.processing_flag.get_lock(): 296 | self.processing_flag.value = False 297 | 298 | # Finish message 299 | async_helper( 300 | bot_sender.send_message_async(self.config.get("telegram"), self.messages, request_response, end=True) 301 | ) 302 | 303 | # Clear processing flag 304 | with self.processing_flag.get_lock(): 305 | self.processing_flag.value = False 306 | 307 | def clear_conversation_for_user(self, user_id: int) -> None: 308 | """Clears conversation (chat history) for selected user 309 | This can be called from any process 310 | 311 | Args: 312 | user_id (int): ID of user 313 | """ 314 | conversation_id = self.users_handler.get_key(user_id, f"{_NAME}_conversation_id") 315 | 316 | # Check if we need to clear it 317 | if conversation_id: 318 | # Delete file 319 | try: 320 | conversation_file = os.path.join( 321 | self.config.get("files").get("conversations_dir"), conversation_id + ".json" 322 | ) 323 | if os.path.exists(conversation_file): 324 | logging.info(f"Removing {conversation_file}") 325 | os.remove(conversation_file) 326 | except Exception as e: 327 | logging.error("Error removing conversation file!", exc_info=e) 328 | 329 | # Reset user data 330 | self.users_handler.set_key(user_id, f"{_NAME}_conversation_id", None) 331 | 332 | def exit(self) -> None: 333 | """Aborts processing (closes chatbot)""" 334 | if self._chatbot is None: 335 | return 336 | if self._chatbot is not None: 337 | logging.warning("Closing MS Copilot (aka EdgeGPT) connection") 338 | try: 339 | async_helper(self._chatbot.close()) 340 | except Exception as e: 341 | logging.error("Error closing MS Copilot (aka EdgeGPT) connection!", exc_info=e) 342 | --------------------------------------------------------------------------------