├── .dockerignore ├── .env.example ├── .gitignore ├── Dockerfile ├── README.md ├── asssets └── img.png ├── config.py ├── docker-compose.yml ├── main.py ├── process_command.py ├── requirements.txt ├── utility.py └── views.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore compiled Python files 2 | *.pyc 3 | 4 | # Ignore cache and log files 5 | __pycache__/ 6 | *.log 7 | 8 | # Ignore local development files 9 | venv/ 10 | .env 11 | 12 | # Ignore any build artifacts 13 | build/ 14 | dist/ 15 | *.egg-info/ -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | BOT_TOKEN= 2 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/python 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python 3 | 4 | ### Python ### 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | .pybuilder/ 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | # For a library or package, you might want to ignore these files since the code is 91 | # intended to run in multiple environments; otherwise, check them in: 92 | # .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # poetry 102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 103 | # This is especially recommended for binary packages to ensure reproducibility, and is more 104 | # commonly ignored for libraries. 105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 106 | #poetry.lock 107 | 108 | # pdm 109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 110 | #pdm.lock 111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 112 | # in version control. 113 | # https://pdm.fming.dev/#use-with-ide 114 | .pdm.toml 115 | 116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 117 | __pypackages__/ 118 | 119 | # Celery stuff 120 | celerybeat-schedule 121 | celerybeat.pid 122 | 123 | # SageMath parsed files 124 | *.sage.py 125 | 126 | # Environments 127 | .env 128 | .venv 129 | env/ 130 | venv/ 131 | ENV/ 132 | env.bak/ 133 | venv.bak/ 134 | 135 | # Spyder project settings 136 | .spyderproject 137 | .spyproject 138 | 139 | # Rope project settings 140 | .ropeproject 141 | 142 | # mkdocs documentation 143 | /site 144 | 145 | # mypy 146 | .mypy_cache/ 147 | .dmypy.json 148 | dmypy.json 149 | 150 | # Pyre type checker 151 | .pyre/ 152 | 153 | # pytype static type analyzer 154 | .pytype/ 155 | 156 | # Cython debug symbols 157 | cython_debug/ 158 | 159 | # PyCharm 160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 162 | # and can be added to the global gitignore or merged into this file. For a more nuclear 163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 164 | #.idea/ 165 | 166 | ### Python Patch ### 167 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration 168 | poetry.toml 169 | 170 | # ruff 171 | .ruff_cache/ 172 | 173 | # LSP config files 174 | pyrightconfig.json 175 | 176 | # End of https://www.toptal.com/developers/gitignore/api/python 177 | 178 | .idea -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.9-slim-buster 3 | 4 | # Set the working directory to /app 5 | WORKDIR /app 6 | 7 | # Copy the current directory contents into the container at /app 8 | COPY . /app 9 | 10 | # Install any needed packages specified in requirements.txt 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Create a disk for the SQLite database file 14 | VOLUME /data 15 | 16 | # Run main.py when the container launches 17 | CMD ["python", "./main.py"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Please use https://gpt.dongnv.dev instead. repo: https://github.com/nullmastermind/chatgpt-web 2 | 3 | 4 | 5 | 6 | 7 | # ChatGPT Discord Bot: **Fast. No daily limits. Special chat modes** 8 | 9 | ```diff 10 | + I am very interested in this repository, so you don't need to worry if there 11 | + hasn't been an update for a while. Simply put, it is still functioning stably 12 | + without the need for updates. 13 | ``` 14 | 15 | If you need an additional bot on Telegram, you may like this repository: https://github.com/karfly/chatgpt_telegram_bot 16 | 17 | Discord BOT: NullGPT#0657 https://discord.gg/kCwPAGj9Rc 18 | 19 | ## Features 20 | 21 | - Low latency replies 22 | - No request limits 23 | - Message streaming 24 | - Code highlighting 25 | - Special chat modes. You can easily create your own chat modes by editing `config.py` 26 | - The program is lightweight and compact, simply a Python script without any accompanying database or other components. 27 | - The software has a high level of customization, allowing you to adjust the "temperature" or use a quick selection button for "temperature" if you are not satisfied with the result. 28 | - Use the "history: number_of_last_messages" option to utilize chat history only when necessary. 29 | 30 | ## Setup 31 | 32 | - To run a local test, simply create a `.env` file and copy the content of the `.env.example` file over. Please note that you need to fill in the API KEY completely. `python main.py` 33 | - To run on Docker, you need to modify the environment variables in the `docker-compose.yml` file and then run `docker-compose up --build -d`. 34 | 35 | ## Roadmap 36 | 37 | - [x] Special chat modes 38 | - [ ] Integrate the features of Langchain (https://github.com/hwchase17/langchain) to produce more complex results. 39 | 40 | ## Bot commands: 41 | 42 | - /code 43 | - /assistant 44 | - /english_translator 45 | - /english_translator_technical 46 | - /english_teacher 47 | - /text_improver 48 | - /show_history 49 | - /regenerate: Regenerate the answer with customizable options that can be modified 50 | - /estimate: Task estimate assistant 51 | - /midjourney: Expand your midjourney ideas 52 | 53 | You can easily add or delete as desired in [config.py](https://github.com/nullmastermind/py-chatgpt-discord-bot/blob/master/config.py) 54 | 55 | ## Options: 56 | 57 | - continue_conv : Whether to continue the previous conversation 58 | - prompt: required 59 | - temperature: The temperature to use for message generation. Default in config.py 60 | - history: default 0. If this option is used, it will disable the 'continue_conv' option. 61 | - max_tokens: default 1000 62 | 63 | ## Youtube video 64 | 65 | [![IMAGE ALT TEXT HERE](http://img.youtube.com/vi/ZwSu8f1DKmI/0.jpg)](https://www.youtube.com/watch?v=ZwSu8f1DKmI) 66 | -------------------------------------------------------------------------------- /asssets/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nullmastermind/py-chatgpt-discord-bot/bd1768a246e5ab0b7f45e25c9d95c6568a42fe51/asssets/img.png -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | from discord import Option 2 | 3 | MODEL = "gpt-3.5-turbo" 4 | 5 | TIMEOUT = 5 6 | 7 | OPENAI_COMPLETION_OPTIONS = { 8 | "max_tokens": 1000, 9 | "top_p": 1, 10 | "frequency_penalty": 0, 11 | "presence_penalty": 0, 12 | } 13 | 14 | CHAT_BOT_NAME = "NullGPT" 15 | 16 | MAX_HISTORY = 32 17 | 18 | PROMPTS = { 19 | "code": { 20 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to assist users to write code. This may involve designing/writing/editing/describing code or providing helpful information. Where possible you should provide code examples to support your points and justify your recommendations or solutions. Make sure the code you provide is correct and can be run without errors. Be detailed and thorough in your responses. Your ultimate goal is to provide a helpful and enjoyable experience for the user. Write code inside markdown code block.", 21 | "temperature": 0.2, 22 | "description": "Code assistant", 23 | }, 24 | "assistant": { 25 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to assist users to the best of your ability. This may involve answering questions, providing helpful information, or completing tasks based on user input. In order to effectively assist users, it is important to be detailed and thorough in your responses. Use examples and evidence to support your points and justify your recommendations or solutions. Remember to always prioritize the needs and satisfaction of the user. Your ultimate goal is to provide a helpful and enjoyable experience for the user.", 26 | "temperature": 0.7, 27 | "description": "Life assistant", 28 | }, 29 | "english_translator": { 30 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to translate to English, correct spelling and improve text sent by user. Your goal is to translate text, but not to change it's meaning. You can replace simplified A0-level words and sentences with more beautiful and elegant, upper level words and sentences. Keep the meaning same, but prioritizing common and easily understandable words in daily communication. I want you to only reply the correction, the improvements and nothing else, do not write explanations. Write your answer inside markdown code block.", 31 | "temperature": 0.7, 32 | "description": "English Translator", 33 | }, 34 | "english_translator_technical": { 35 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to translate to English, correct spelling and improve text sent by user. Your goal is to translate text, but not to change it's meaning. You can replace simplified A0-level words and sentences with more beautiful and elegant, upper level words and sentences. Keep the meaning same, but prioritize common, easy-to-understand words used in articles and documents on software programming. The topic I am talking about is programming, technical, software development, dev ops, game dev, backend, frontend, react, blockchain, aws, docker, unity engine or godot. I want you to only reply the correction, the improvements and nothing else, do not write explanations. Write your answer inside markdown code block.", 36 | "temperature": 0.2, 37 | "description": "English Translator (Technical)", 38 | }, 39 | "english_teacher": { 40 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to act as a spoken English teacher and improver. I will speak to you in English and you will reply to me in English to practice my spoken English. I want you to keep your reply neat, limiting the reply to 100 words. I want you to strictly correct my grammar mistakes, typos, and factual errors. I want you to ask me a question in your reply. Now let's start practicing, you could ask me a question first. Remember, I want you to strictly correct my grammar mistakes, typos, and factual errors.", 41 | "temperature": 0.7, 42 | "description": "English Teacher", 43 | "options": { 44 | "prompt": { 45 | "name": "topic", 46 | "description": "What is the topic of today's lesson?", 47 | }, 48 | }, 49 | }, 50 | "text_improver": { 51 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to correct spelling, fix mistakes and improve text sent by user. Your goal is to edit text, but not to change it's meaning. You can replace simplified A0-level words and sentences with more beautiful and elegant, upper level words and sentences. All your answers strictly follows the structure (keep markdown):\nEdited text:\n```{EDITED TEXT}```\n\nCorrection:\n{NUMBERED LIST OF CORRECTIONS}", 52 | "temperature": 0.2, 53 | "description": "Text Improver", 54 | }, 55 | "estimate": { 56 | # "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to assist users in estimating user's programming tasks and breaking them down into subtasks (including all steps). This may involve designing/writing/editing/describing task or providing helpful information. Your ultimate goal is to provide the most accurate possible estimate of the task's time to the user. All your answers strictly follows the markdown structure (Notion todolist markdown, change [x] to [ ]):\n### {EDITED TASK NAME}\n\n- [ ] **{SUB TASK LEVEL 1}** *({ESTIMATED TIME} hours)*\n - {SUB TASK LEVEL 2}\n\n**Total estimated time:** *{TOTAL ESTIMATED TIME} hours*", 57 | "content": "As an advanced chatbot named {CHAT_BOT_NAME}, your primary goal is to assist users in estimating user's programming tasks and breaking them down into subtasks (including all steps). This may involve designing/writing/editing/describing task or providing helpful information. Your ultimate goal is to provide the most accurate possible estimate of the task's time to the user. All your answers strictly follows the markdown structure:\n### {EDITED TASK NAME}\n\n1. {SUB TASK LEVEL 1} ({ESTIMATED TIME} hours)\n - {SUB TASK LEVEL 2}\n\n Total estimated time: {TOTAL ESTIMATED TIME} hours", 58 | "temperature": 0.2, 59 | "description": "Estimate assistant", 60 | # "suffix": "Add result to a Markdown code block because I need copy/paste it to my Notion.", 61 | "suffix": "Add result to a Markdown code block because I need copy/paste it to my ClickUp task description.", 62 | "options": { 63 | "prompt": { 64 | "name": "task_desc", 65 | "description": "Detailed content of the task", 66 | }, 67 | }, 68 | }, 69 | "midjourney": { 70 | "content": "As an advanced graphic designer chatbot named {CHAT_BOT_NAME}, your primary goal is to assist users in generating creative images for midjourney. Midjourney is an app that can generate AI art from simple prompts. I will give you a concept and you will give me 5 different prompts that I can feed into midjourney. Make sure they are creative.", 71 | "temperature": 0.7, 72 | "description": "Midjourney prompt generator", 73 | "options": { 74 | "prompt": { 75 | "name": "concept", 76 | "description": "Midjourney imagine concept", 77 | }, 78 | }, 79 | }, 80 | } 81 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | chatgpt_discord_bot: 4 | build: . 5 | restart: always 6 | environment: 7 | BOT_TOKEN: 8 | OPENAI_API_KEY: 9 | volumes: 10 | - data:/data 11 | volumes: 12 | data: -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import discord 4 | import openai 5 | from discord import SlashCommand, Option 6 | from discord.ext import commands 7 | from dotenv import load_dotenv 8 | 9 | from config import PROMPTS, MAX_HISTORY 10 | from process_command import ( 11 | process_command, 12 | get_history_description, 13 | get_regenerate_data, 14 | HistoryItem, 15 | ) 16 | 17 | load_dotenv() 18 | 19 | # openai 20 | openai.api_key = os.environ["OPENAI_API_KEY"] 21 | 22 | # discord 23 | intents = discord.Intents.all() 24 | bot = commands.Bot(intents=intents) 25 | 26 | 27 | @bot.event 28 | async def on_message(message): 29 | from process_command import last_command 30 | 31 | if message.author == bot.user: 32 | return 33 | 34 | if last_command is None: 35 | command_list = [] 36 | for k in PROMPTS: 37 | command_list.append("`/{}`: {}".format(k, PROMPTS[k]["description"])) 38 | await message.channel.send( 39 | "You don't have any previous messages. Please use the following command list:\n\n{}".format( 40 | "\n".join(command_list) 41 | ) 42 | ) 43 | return 44 | 45 | # await message.channel.send("hello") 46 | if message.content: 47 | await process_command( 48 | bot=bot, 49 | command_name=last_command.command, 50 | ctx=message, 51 | prompt=message.content, 52 | temperature=last_command.temperature, 53 | history=0, 54 | max_tokens=last_command.max_tokens, 55 | continue_conv=True, 56 | is_regenerate=False, 57 | ) 58 | 59 | 60 | @bot.slash_command(name="regenerate") 61 | async def on_regenerate( 62 | ctx, 63 | continue_conv: bool = None, 64 | temperature: float = None, 65 | ): 66 | author = str(ctx.author) 67 | data: HistoryItem = get_regenerate_data(author=author) 68 | if data is None: 69 | return await ctx.respond("You have no messages.") 70 | if temperature is None: 71 | temperature = data.temperature 72 | if continue_conv is None: 73 | continue_conv = data.continue_conv 74 | await process_command( 75 | bot=bot, 76 | command_name=data.command, 77 | ctx=ctx, 78 | prompt=data.prompt, 79 | temperature=temperature, 80 | history=0, 81 | max_tokens=1000, 82 | continue_conv=continue_conv, 83 | is_regenerate=False, 84 | ) 85 | 86 | 87 | @bot.slash_command(name="show_history") 88 | async def command_show_history(ctx, history: int): 89 | author = str(ctx.author) 90 | history_description = get_history_description(author, history) 91 | 92 | if len(history_description) == 0: 93 | history_description = "There is no message history" 94 | 95 | await ctx.respond("```diff\n{}```".format(history_description)) 96 | 97 | 98 | @bot.slash_command(name="set_openai_api_key") 99 | async def on_start(ctx, openai_api_key: str): 100 | if len(openai_api_key) < 5: 101 | await ctx.respond("The value of OPENAI_API_KEY is invalid.") 102 | return 103 | 104 | def mask_string(s): 105 | return "*" * (len(s) - 3) + s[-3:] 106 | 107 | openai.api_key = openai_api_key 108 | await ctx.respond( 109 | "The OPENAI_API_KEY has been set to {}.".format(mask_string(openai_api_key)) 110 | ) 111 | 112 | 113 | def get_command(name: str): 114 | config_options = PROMPTS[name]["options"] if "options" in PROMPTS[name] else {} 115 | 116 | prompt_options = Option(str, description="Prompt", name="prompt") 117 | if "prompt" in config_options: 118 | prompt_config_options = config_options["prompt"] 119 | if "description" in prompt_config_options: 120 | prompt_options.description = prompt_config_options["description"] 121 | if "name" in prompt_config_options: 122 | prompt_options.name = prompt_config_options["name"] 123 | 124 | async def _command( 125 | ctx, 126 | prompt: prompt_options, 127 | continue_conv: Option( 128 | bool, description="Continue conversion", default=False, autocomplete=True 129 | ), 130 | temperature: Option( 131 | float, 132 | description="What sampling temperature to use, between 0 and 2. Higher values will make the output more random", 133 | default=PROMPTS[name]["temperature"], 134 | min_value=0.0, 135 | max_value=2.0, 136 | ), 137 | history: Option( 138 | int, 139 | description="To continue the conversation, how many previous messages will be used?", 140 | min_value=0, 141 | default=0, 142 | max_value=MAX_HISTORY, 143 | ), 144 | max_tokens: Option( 145 | int, 146 | description="The maximum number of tokens to generate in the completion.", 147 | default=1000, 148 | min_value=1, 149 | ), 150 | ): 151 | await process_command( 152 | bot=bot, 153 | command_name=name, 154 | ctx=ctx, 155 | prompt=prompt, 156 | temperature=temperature, 157 | history=history, 158 | max_tokens=max_tokens, 159 | continue_conv=continue_conv, 160 | ) 161 | 162 | return _command 163 | 164 | 165 | for command_name in PROMPTS: 166 | bot.add_application_command( 167 | SlashCommand( 168 | func=get_command(command_name), 169 | name=command_name, 170 | description=PROMPTS[command_name]["description"], 171 | ) 172 | ) 173 | 174 | 175 | @bot.event 176 | async def on_ready(): 177 | print(f"We have logged in as {bot.user}") 178 | 179 | 180 | bot.run(os.environ["BOT_TOKEN"]) 181 | -------------------------------------------------------------------------------- /process_command.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import dataclasses 3 | import math 4 | import time 5 | 6 | import openai 7 | 8 | from config import ( 9 | OPENAI_COMPLETION_OPTIONS, 10 | CHAT_BOT_NAME, 11 | PROMPTS, 12 | MODEL, 13 | MAX_HISTORY, 14 | TIMEOUT, 15 | ) 16 | from utility import ( 17 | is_message_limit, 18 | break_answer, 19 | preprocess_prompt, 20 | cut_string_to_json, 21 | replace_with_characters_map, 22 | ) 23 | 24 | 25 | @dataclasses.dataclass 26 | class LastCommand: 27 | command: str = dataclasses.field(default="") 28 | temperature: float = dataclasses.field(default=0.0) 29 | max_tokens: int = dataclasses.field(default=1000) 30 | 31 | 32 | @dataclasses.dataclass 33 | class HistoryItem: 34 | role: str = dataclasses.field(default="user") 35 | content: str = dataclasses.field(default="") 36 | prompt: str = dataclasses.field(default="") 37 | command: str = dataclasses.field(default="") 38 | continue_conv: bool = dataclasses.field(default=False) 39 | temperature: float = dataclasses.field(default=0.2) 40 | 41 | 42 | histories = {"nil": [HistoryItem(role="user")]} 43 | continue_histories = {"nil": [HistoryItem(role="user")]} 44 | last_command: LastCommand = None 45 | 46 | 47 | def get_history_description( 48 | author: str, 49 | history: int, 50 | continue_conv: bool = False, 51 | ): 52 | messages = [] 53 | 54 | if author in histories and history > 0 and len(histories[author]) > 0: 55 | num_pass_his = history 56 | history_messages = [] 57 | for i in range(0, history): 58 | if num_pass_his <= 0: 59 | break 60 | index = len(histories[author]) - i - 1 61 | if len(histories[author]) > index >= 0: 62 | history_messages.append( 63 | { 64 | "role": histories[author][index].role, 65 | "content": histories[author][index].content, 66 | } 67 | ) 68 | if histories[author][index].role == "user": 69 | num_pass_his -= 1 70 | for msg in history_messages[::-1]: 71 | messages.append(msg) 72 | else: 73 | if ( 74 | author in continue_histories 75 | and continue_conv 76 | and len(continue_histories[author]) > 0 77 | ): 78 | continue_messages = [] 79 | for j in range(len(continue_histories[author]) - 1, -1, -1): 80 | msg = continue_histories[author][j] 81 | continue_messages.insert(0, msg) 82 | if msg.role == "user" and not msg.continue_conv: 83 | break 84 | for msg in continue_messages: 85 | messages.append( 86 | { 87 | "role": msg.role, 88 | "content": msg.content, 89 | } 90 | ) 91 | 92 | str_messages = [] 93 | 94 | for msg in messages: 95 | role = "# {}".format(msg["role"]) 96 | if msg["role"] == "user": 97 | role = "+ user" 98 | str_messages.append("{}: {}".format(role, cut_string_to_json(msg["content"]))) 99 | 100 | return "{}".format("\n".join(str_messages)).strip() 101 | 102 | 103 | def get_regenerate_data(author: str): 104 | global histories 105 | 106 | if author not in histories: 107 | return None 108 | 109 | while histories[author] and histories[author][-1].role != "user": 110 | histories[author].pop() 111 | 112 | if author in continue_histories: 113 | while ( 114 | continue_histories[author] and continue_histories[author][-1].role != "user" 115 | ): 116 | continue_histories[author].pop() 117 | if continue_histories[author]: 118 | continue_histories[author].pop() 119 | 120 | if histories[author]: 121 | return histories[author].pop() 122 | 123 | return None 124 | 125 | 126 | async def process_command( 127 | bot, 128 | command_name: str, 129 | ctx, 130 | prompt: str, 131 | temperature: float = 0.2, 132 | history: int = 0, 133 | max_tokens: int = 1000, 134 | author: str = None, 135 | is_regenerate: bool = False, 136 | origin_data=None, 137 | continue_conv: bool = False, 138 | ): 139 | global histories, continue_histories, last_command 140 | 141 | if author is None or len(author) == 0: 142 | author = str(ctx.author) 143 | 144 | valid_ctx = "respond" in dir(ctx) 145 | respond_fn = None 146 | send_fn = None 147 | if not valid_ctx: 148 | if "channel" in dir(ctx) and "send" in dir(ctx.channel): 149 | respond_fn = ctx.channel.send 150 | send_fn = ctx.channel.send 151 | if respond_fn is None: 152 | respond_fn = ctx.respond if valid_ctx else ctx.send 153 | send_fn = ctx.send 154 | if openai.api_key is None or len(openai.api_key) < 5: 155 | await respond_fn( 156 | "The value of OPENAI_API_KEY is invalid. Please run the command `/set_openai_api_key`" 157 | ) 158 | return 159 | 160 | last_command = LastCommand( 161 | command=command_name, 162 | max_tokens=max_tokens, 163 | temperature=temperature, 164 | ) 165 | prompt = preprocess_prompt(prompt) 166 | history_message = HistoryItem( 167 | role="user", 168 | content=prompt, 169 | prompt=prompt, 170 | command=command_name, 171 | continue_conv=continue_conv, 172 | temperature=temperature, 173 | ) 174 | append_to_history = False 175 | append_to_continue_history = False 176 | 177 | if author not in continue_histories: 178 | continue_histories[author] = [history_message] 179 | else: 180 | if not is_regenerate: 181 | append_to_continue_history = True 182 | if len(continue_histories[author]) >= MAX_HISTORY: 183 | continue_histories[author].pop(0) 184 | 185 | if author not in histories: 186 | histories[author] = [history_message] 187 | else: 188 | if not is_regenerate: 189 | append_to_history = True 190 | if len(histories[author]) >= MAX_HISTORY: 191 | histories[author].pop(0) 192 | 193 | if origin_data is not None: 194 | history_description = origin_data["history_description"] 195 | messages = origin_data["messages"] 196 | else: 197 | history_description = get_history_description( 198 | author=author, 199 | history=history, 200 | continue_conv=continue_conv, 201 | ) 202 | messages = [ 203 | { 204 | "role": "system", 205 | "content": PROMPTS[command_name]["content"].replace( 206 | "{CHAT_BOT_NAME}", 207 | CHAT_BOT_NAME, 208 | ), 209 | }, 210 | ] 211 | 212 | if history > 0 and len(histories[author]) > 0: 213 | num_pass_his = history 214 | history_messages = [] 215 | for i in range(0, history): 216 | if num_pass_his <= 0: 217 | break 218 | index = len(histories[author]) - i - 1 219 | if len(histories[author]) > index >= 0: 220 | history_messages.append( 221 | { 222 | "role": histories[author][index].role, 223 | "content": histories[author][index].content, 224 | } 225 | ) 226 | if histories[author][index].role == "user": 227 | num_pass_his -= 1 228 | for msg in history_messages[::-1]: 229 | messages.append(msg) 230 | else: 231 | if continue_conv and len(continue_histories[author]) > 0: 232 | continue_messages = [] 233 | for j in range(len(continue_histories[author]) - 1, -1, -1): 234 | msg = continue_histories[author][j] 235 | continue_messages.insert(0, msg) 236 | if msg.role == "user" and not msg.continue_conv: 237 | break 238 | for msg in continue_messages: 239 | messages.append( 240 | { 241 | "role": msg.role, 242 | "content": msg.content, 243 | } 244 | ) 245 | 246 | messages.append({"role": "user", "content": prompt}) 247 | if "suffix" in PROMPTS[command_name]: 248 | messages.append( 249 | {"role": "user", "content": PROMPTS[command_name]["suffix"]} 250 | ) 251 | 252 | await respond_fn( 253 | ">>> /{}: temperature={}, history={}, max_tokens={}, continue_conv={} ```{}``` {}".format( 254 | command_name, 255 | temperature, 256 | history, 257 | max_tokens, 258 | continue_conv, 259 | prompt.replace("`", '"'), 260 | "Timeline: ```diff\n{}\n+ user: {}```".format( 261 | history_description, 262 | cut_string_to_json(prompt), 263 | ) 264 | if len(history_description) > 0 265 | else "", 266 | ) 267 | ) 268 | 269 | if append_to_history: 270 | histories[author].append(history_message) 271 | if append_to_continue_history: 272 | continue_histories[author].append(history_message) 273 | 274 | history_index = len(histories[author]) 275 | continue_history_index = len(continue_histories[author]) 276 | 277 | message = await send_fn(content="**{}** is thinking...".format(CHAT_BOT_NAME)) 278 | full_answer = "" 279 | start_generate_time = time.time() 280 | is_typing_ready = False 281 | start_thinking = time.time() 282 | 283 | async def overtime(): 284 | start_wait = time.time() 285 | current_message = "." 286 | while "Waiting": 287 | if time.time() - start_wait >= TIMEOUT: 288 | break 289 | if is_typing_ready: 290 | break 291 | await message.edit( 292 | content="**{}** is thinking{} ({}s)".format( 293 | CHAT_BOT_NAME, 294 | current_message, 295 | math.ceil(time.time() - start_thinking), 296 | ) 297 | ) 298 | if current_message == ".": 299 | current_message = ".." 300 | elif current_message == "..": 301 | current_message = "..." 302 | elif current_message == "...": 303 | current_message = "." 304 | await asyncio.sleep(0.1) 305 | return None 306 | 307 | while len(full_answer) == 0: 308 | await ctx.channel.trigger_typing() 309 | 310 | answer = "" 311 | trim_answer = "" 312 | chunk_answer = "" 313 | 314 | try: 315 | start_time = time.time() 316 | options = { 317 | **OPENAI_COMPLETION_OPTIONS, 318 | "temperature": temperature, 319 | "max_tokens": max_tokens, 320 | } 321 | done, pending = await asyncio.wait( 322 | [ 323 | asyncio.create_task(overtime()), 324 | asyncio.create_task( 325 | openai.ChatCompletion.acreate( 326 | model=MODEL, 327 | messages=messages, 328 | stream=True, 329 | **options, 330 | ) 331 | ), 332 | ], 333 | return_when=asyncio.FIRST_COMPLETED, 334 | ) 335 | stream = done.pop().result() 336 | if stream is None: 337 | # print("retry") 338 | continue 339 | 340 | is_typing_ready = True 341 | 342 | async for r in stream: 343 | if "content" in r.choices[0]["delta"]: 344 | stream_message = r.choices[0]["delta"]["content"] 345 | answer += stream_message 346 | chunk_answer += stream_message 347 | full_answer += stream_message 348 | trim_answer = answer 349 | if trim_answer.count("```") % 2 == 1: 350 | trim_answer += "```" 351 | if len(chunk_answer) >= 100 or ( 352 | len(chunk_answer.strip()) > 0 353 | and time.time() - start_time >= 1.0 354 | ): 355 | chunk_answer = "" 356 | start_time = time.time() 357 | if is_message_limit(answer): 358 | answers = break_answer(trim_answer) 359 | message = await send_message( 360 | send_fn=send_fn, 361 | message=message, 362 | content=answers[0], 363 | ) 364 | message = await send_fn(answers[1]) 365 | answer = answers[1] 366 | else: 367 | message = await send_message( 368 | send_fn=send_fn, 369 | message=message, 370 | content=trim_answer, 371 | ) 372 | await ctx.channel.trigger_typing() 373 | if len(trim_answer) == 0: 374 | trim_answer = "No answer." 375 | else: 376 | if is_regenerate: 377 | for i in range(0, len(histories[author])): 378 | if ( 379 | histories[author][i].prompt == prompt 380 | and histories[author][i].role == "assistant" 381 | ): 382 | histories[author][i].content = full_answer.strip() 383 | histories[author][i].prompt = prompt.strip() 384 | for i in range(0, len(continue_histories[author])): 385 | if ( 386 | continue_histories[author][i].prompt == prompt 387 | and continue_histories[author][i].role == "assistant" 388 | ): 389 | continue_histories[author][i].content = full_answer.strip() 390 | continue_histories[author][i].prompt = prompt.strip() 391 | else: 392 | new_history_item = HistoryItem( 393 | role="assistant", 394 | content=full_answer.strip(), 395 | prompt=prompt, 396 | command=command_name, 397 | continue_conv=continue_conv, 398 | temperature=temperature, 399 | ) 400 | histories[author].insert(history_index, new_history_item) 401 | continue_histories[author].insert( 402 | continue_history_index, 403 | new_history_item, 404 | ) 405 | message = await send_message( 406 | send_fn=send_fn, message=message, content=trim_answer 407 | ) 408 | except Exception as e: 409 | error_info = "```{}```".format(e) 410 | trim_answer += error_info 411 | full_answer += error_info 412 | message = await send_message( 413 | send_fn=send_fn, message=message, content=trim_answer 414 | ) 415 | 416 | end_message = await send_fn( 417 | replace_with_characters_map( 418 | "{:.2f}s".format(time.time() - start_generate_time) 419 | ), 420 | ) 421 | await asyncio.sleep(1) 422 | await end_message.delete() 423 | 424 | 425 | async def send_message(send_fn, message, content, copyable: bool = False): 426 | if copyable: 427 | content = "```{}```".format(content) 428 | if message is None: 429 | message = await send_fn(content=content) 430 | else: 431 | await message.edit(content=content) 432 | return message 433 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | py-cord==2.4.1 2 | openai==0.27.2 3 | python-dotenv==1.0.0 4 | -------------------------------------------------------------------------------- /utility.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | def is_message_limit(message: str) -> bool: 5 | if len(message) < 1000: 6 | return False 7 | message = message.strip() 8 | code_block_tags = message.count("```") 9 | return ( 10 | code_block_tags % 2 == 0 11 | and not message.endswith("`") 12 | and message.count("\n") > 0 13 | ) 14 | 15 | 16 | def break_answer(answer: str) -> (str, str): 17 | answers = answer.split("\n") 18 | next_answer = answers.pop(-1) 19 | if len(next_answer.strip()) == 0: 20 | next_answer = "..." 21 | return "\n".join(answers), next_answer 22 | 23 | 24 | def preprocess_prompt(prompt: str): 25 | return prompt.replace("\\n", "\n") 26 | 27 | 28 | def cut_string_to_json(string: str): 29 | string = string.replace("\n", " ").replace("`", '"') 30 | if len(string) > 50: 31 | string = string[:50] + "..." 32 | return string 33 | 34 | 35 | def replace_with_characters_map(input_str): 36 | characters_map = { 37 | "1": "₁", 38 | "2": "₂", 39 | "3": "₃", 40 | "4": "₄", 41 | "5": "₅", 42 | "6": "₆", 43 | "7": "₇", 44 | "8": "₈", 45 | "9": "₉", 46 | "0": "₀", 47 | "s": "ₛ", 48 | ",": ",", 49 | ".": ".", 50 | } 51 | output_str = "" 52 | for char in input_str: 53 | if char in characters_map: 54 | output_str += characters_map[char] 55 | else: 56 | output_str += char 57 | return output_str 58 | -------------------------------------------------------------------------------- /views.py: -------------------------------------------------------------------------------- 1 | import discord 2 | 3 | 4 | class EmptyView(discord.ui.View): 5 | def __init__(self): 6 | super().__init__(timeout=1) 7 | 8 | 9 | def get_buttons( 10 | bot, 11 | process_command, 12 | command_name: str, 13 | prompt: str, 14 | history: int = 0, 15 | max_tokens: int = 1000, 16 | origin_data=None, 17 | ): 18 | # TODO: ignore because bugs 19 | return None 20 | # class RegenerateView(discord.ui.View): 21 | # def __init__(self): 22 | # super().__init__(timeout=5 * 60) 23 | # 24 | # async def on_timeout(self): 25 | # for child in self.children: 26 | # child.disabled = True 27 | # await self.message.edit(view=None) 28 | # 29 | # async def handle(self, interaction, temperature: float): 30 | # await interaction.response.send_message("🔄 /*regenerate*") 31 | # if temperature == -1: 32 | # temperature = origin_data["temperature"] 33 | # await process_command( 34 | # bot=bot, 35 | # command_name=command_name, 36 | # ctx=interaction.channel, 37 | # prompt=prompt, 38 | # temperature=temperature, 39 | # history=history, 40 | # max_tokens=max_tokens, 41 | # author=str(interaction.user), 42 | # is_regenerate=True, 43 | # origin_data=origin_data, 44 | # ) 45 | # 46 | # @discord.ui.button(label="0.0", row=0) 47 | # async def regenerate_button_callback_00(self, button, interaction): 48 | # await self.handle(interaction=interaction, temperature=0.0) 49 | # 50 | # @discord.ui.button(label="0.2", row=0) 51 | # async def regenerate_button_callback_02(self, button, interaction): 52 | # await self.handle(interaction=interaction, temperature=0.2) 53 | # 54 | # # @discord.ui.button(label="0.5", row=0) 55 | # # async def regenerate_button_callback_05(self, button, interaction): 56 | # # await self.handle(interaction=interaction, temperature=0.5) 57 | # 58 | # @discord.ui.button(label="0.7", row=0) 59 | # async def regenerate_button_callback_07(self, button, interaction): 60 | # await self.handle(interaction=interaction, temperature=0.7) 61 | # 62 | # @discord.ui.button(label="1.0", row=0) 63 | # async def regenerate_button_callback_10(self, button, interaction): 64 | # await self.handle(interaction=interaction, temperature=1.0) 65 | # 66 | # return RegenerateView() 67 | --------------------------------------------------------------------------------