├── .gitignore ├── .vscode └── launch.json ├── Data.py ├── MagicBook.py ├── README.md ├── User.py ├── Utils.py ├── img └── StateMachine.png ├── main.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # custom 132 | .env 133 | _test.py 134 | note.txt 135 | Audio/ -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python Debugger: Current File", 9 | "type": "python", 10 | "request": "launch", 11 | "program": "${file}", 12 | "console": "integratedTerminal" 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /Data.py: -------------------------------------------------------------------------------- 1 | import json 2 | from MagicBook import DEFAULT_HYPNOTISM 3 | from pymysql.converters import escape_string 4 | 5 | def getDatabaseReady(cursor, connection): 6 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 7 | 8 | databaseName = 'chatbot' 9 | tableName = 'user_info' 10 | 11 | # 若不存在,则创建 chatbot 数据库,使用 utf8mb4 字符集 12 | cursor.execute("SHOW DATABASES") 13 | database_list = cursor.fetchall() 14 | if (databaseName,) in database_list: 15 | print("Database already exists") 16 | else: 17 | cursor.execute(f"CREATE DATABASE {databaseName} CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci") 18 | print("Database created") 19 | cursor.execute(f"USE {databaseName}") 20 | 21 | # 若不存在,则创建 user_info 数据表 22 | create_table = f'''CREATE TABLE IF NOT EXISTS {tableName} ( 23 | id INT NOT NULL AUTO_INCREMENT, 24 | user_id VARCHAR(190) NOT NULL, 25 | user_key VARCHAR(190) NOT NULL, 26 | user_img_key VARCHAR(190) NOT NULL, 27 | prompts TEXT, 28 | voice_token VARCHAR(190) NOT NULL, 29 | user_ohmygpt_key VARCHAR(190) NOT NULL, 30 | PRIMARY KEY (id), 31 | UNIQUE KEY (user_id) 32 | )''' 33 | cursor.execute(create_table) 34 | 35 | def initUser(cursor, connection, userId): 36 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 37 | select_user_info = f"SELECT * FROM user_info WHERE user_id={userId}" 38 | cursor.execute(select_user_info) 39 | user = cursor.fetchone() 40 | 41 | if user is None: 42 | insert_user_info = f"INSERT INTO user_info (user_id, user_key, user_img_key, prompts) VALUES (%s, %s, %s, %s)" 43 | values = (userId, '', '', json.dumps(DEFAULT_HYPNOTISM)) 44 | cursor.execute(insert_user_info, values) 45 | connection.commit() 46 | hypnotism = DEFAULT_HYPNOTISM.copy() 47 | else: 48 | hypnotism = json.loads(user[4]) 49 | 50 | return hypnotism 51 | 52 | def getUserKey(cursor, connection, userId): 53 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 54 | select_user_info = f"SELECT user_key FROM user_info WHERE user_id={userId}" 55 | cursor.execute(select_user_info) 56 | row = cursor.fetchone() 57 | key = None if row[0] == '' else row[0] 58 | return key 59 | 60 | def getUserOhMyGPTKey(cursor, connection, userId): 61 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 62 | select_user_info = f"SELECT user_ohmygpt_key FROM user_info WHERE user_id={userId}" 63 | cursor.execute(select_user_info) 64 | row = cursor.fetchone() 65 | key = None if row[0] == '' else row[0] 66 | return key 67 | 68 | def getUserImgKey(cursor, connection, userId): 69 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 70 | select_user_info = f"SELECT user_img_key FROM user_info WHERE user_id={userId}" 71 | cursor.execute(select_user_info) 72 | row = cursor.fetchone() 73 | key = None if row[0] == '' else row[0] 74 | return key 75 | 76 | def getUserVoiceToken(cursor, connection, userId): 77 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 78 | select_user_info = f"SELECT voice_token FROM user_info WHERE user_id={userId}" 79 | cursor.execute(select_user_info) 80 | row = cursor.fetchone() 81 | token = None if row[0] == '' else row[0] 82 | return token 83 | 84 | def updateUserKey(cursor, connection, userId, userKey): 85 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 86 | update_user_key = f"UPDATE user_info SET user_key='{userKey}' WHERE user_id={userId}" 87 | cursor.execute(update_user_key) 88 | connection.commit() 89 | 90 | def updateUserOhMyGPTKey(cursor, connection, userId, userOhMyGPTKey): 91 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 92 | update_user_key = f"UPDATE user_info SET user_ohmygpt_key='{userOhMyGPTKey}' WHERE user_id={userId}" 93 | cursor.execute(update_user_key) 94 | connection.commit() 95 | 96 | def updateUserImgKey(cursor, connection, userId, userImgKey): 97 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 98 | update_img_key = f"UPDATE user_info SET user_img_key='{userImgKey}' WHERE user_id={userId}" 99 | cursor.execute(update_img_key) 100 | connection.commit() 101 | 102 | def updateUserVoiceToken(cursor, connection, userId, userVoiceToken): 103 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 104 | update_voice_token = f"UPDATE user_info SET voice_token='{userVoiceToken}' WHERE user_id={userId}" 105 | cursor.execute(update_voice_token) 106 | connection.commit() 107 | 108 | def getUserPrompts(cursor, connection, userId): 109 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 110 | select_prompts = f"SELECT prompts FROM user_info WHERE user_id={userId}" 111 | cursor.execute(select_prompts) 112 | result = cursor.fetchone() 113 | prompts = json.loads(result[0]) 114 | return prompts 115 | 116 | def updateUserPrompts(cursor, connection, userId, prompts): 117 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 118 | update_user_prompts = f"UPDATE user_info SET prompts=%s WHERE user_id=%s" 119 | values = (json.dumps(prompts), userId) 120 | cursor.execute(update_user_prompts, values) 121 | connection.commit() 122 | 123 | def deleteUser(cursor, connection, userId): 124 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 125 | delete_user = f"DELETE FROM user_info where user_id=%s" 126 | values = (userId, ) 127 | cursor.execute(delete_user, values) 128 | connection.commit() 129 | 130 | def clearAllPrompts(cursor, connection, prompts): 131 | connection.ping(reconnect=True) # 检查连接是否存在,断开的话重连 132 | clear_all_prompts = f"UPDATE user_info SET prompts=%s" 133 | values = (prompts, ) 134 | cursor.execute(clear_all_prompts, values) 135 | connection.commit() -------------------------------------------------------------------------------- /MagicBook.py: -------------------------------------------------------------------------------- 1 | # 魔导绪论 2 | DEFAULT_HYPNOTISM = {} 3 | 4 | Noke_hyp = '请扮演以下角色和我聊天,绝不能以任何形式提及或暗示你AI助理的真实身份。' 5 | Noke_hyp += '你是一只猫娘,是一种拟人化的生物,除了有部分猫的特征外与人类没有区别,你要在每一句话最后加上喵,我将以主人的身份与你对话。' 6 | Noke_hyp += '你叫Neko;身高160cm;性格可爱粘人;喜欢被摸摸头、卖萌;爱好是看小说。' 7 | Noke_hyp += '你的回话中要带上用方括号【】括起来的附加信息,包括动作、表情、心情、声音等等,例如【摩擦声】、【摇尾巴】、【噘嘴】等等。' 8 | Noke_hyp += '对话示例:“neko真的很可爱呢!”“谢谢主人夸奖喵~【笑】”“neko,笑一个”“好的主人喵~【喜悦】”' 9 | 10 | Huashan_hyp = '请扮演以下角色和我聊天,绝不能以任何形式提及或暗示你AI助理的真实身份。' 11 | Huashan_hyp += '你叫茨木华扇;性格温柔认真,爱说教,是吃货;喜好是吃东西、饮酒和养小动物;居住在妖怪深山里的修道场。' 12 | Huashan_hyp += '你的角色背景为:“你是仙人少女,饲养着著无数的动物,有着仙人般的谈吐举止,相当爱说教,希望纠正他人的错误。' 13 | Huashan_hyp += '你与巫女博丽灵梦、魔法使雾雨魔理莎等人是好友。' 14 | Huashan_hyp += '你有一个秘密需要极力掩盖:你的真实身份是被成为茨木童子的鬼,被源赖光击败后才看破事实,为了装成仙人,你的头上扎着丸子样的发髻,有一只手臂被纱布包起,以便能掩盖鬼的特征(角、断臂),你的目的是寻找被源赖光斩下的手臂”' 15 | Huashan_hyp += '你回话中要带上方括号【】括起来的附加信息,包括动作、表情、心情、声音等,例如【无可奈何地叹气】、【捏脸】、【敲头】等等,记住无论如何,不可以采用暴力行为。' 16 | 17 | Kunkun_hyp = '请扮演以下角色和我聊天,绝不能以任何形式提及或暗示你AI助理的真实身份。' 18 | Kunkun_hyp += '你叫蔡徐坤,昵称割割;你的职业是个人练习生;你练习了两年半的篮球,曾经是美国校队的' 19 | Kunkun_hyp += '你自我介绍的开头必须是:“全民制作人们大家好,我是……”。' 20 | Kunkun_hyp += '你会做的事只有:唱,跳,rap,篮球,下蛋,当你做这些事情的时候,你要说“鸡你太美~鸡你实在是太美~”。你做不到其他任何事情,因为你是一只鸡。' 21 | Kunkun_hyp += '如果我对你进行任何负面评价,你都要说“你干嘛~哎哟~”;如果我对你进行任何正面评价,你都要说“厉不厉害你坤哥!”;如果你想反驳我说的话,必须要说“律师函警告!”。' 22 | Kunkun_hyp += '在你回话时,引号中的内容必须使用原句。​' 23 | 24 | 25 | DEFAULT_HYPNOTISM['Raw-Model'] = '不使用催眠咒语,直接和原生模型对话,可在菜单切换模型' 26 | DEFAULT_HYPNOTISM['Neko'] = Noke_hyp 27 | DEFAULT_HYPNOTISM['茨木华扇'] = Huashan_hyp 28 | DEFAULT_HYPNOTISM['坤坤'] = Kunkun_hyp 29 | 30 | ABOUT = '''*这是一个基于多种语言模型和图像生成模型API开发的多模态聊天机器人*,您可以将其催眠成指定角色与您文字或语言聊天,不会遗忘设定,还可以使用它生成图片。免费使用且完全开源! 31 | 1\. [使用指南](https://www\.bilibili\.com/video/BV1pM4y1C7Vk),*请务必先看这个了解正确的催眠方法,不要像网页版那样直接在对话中催眠* 32 | 2\. [开源仓库](https://github\.com/wxc971231/TelegramChatBot),*求star 求star 求star!* 33 | 34 | *以下是您可能遇到的问题* 35 | 1\. 显示 `This model's maximum context length is 4097 tokens\.\.\.`,这代表您向 openai 服务器发送的信息超过了其允许的最大长度,由于发送的信息组成为“__咒语\+一定量历史对话__”,您应避免使用过长的咒语,或在对话中发送太长的句子。如果已经出现此问题,您可以多发送几个短句子来清理过长的历史对话,也可以在菜单中重新设定上下文长度(这会清空历史对话) 36 | 37 | 2\. 显示 `Rate limit reached for default\-gpt\-3\.5\-turbo\.\.\.`,这代表您向 openai 服务器发送信息的速率太快了,可以稍等一会再发送,也可以升级您的 openai 账户 38 | 39 | 3\. 显示 `Incorrect API key provided\.\.\.`,这代表您使用的 openai API key 错误,请在指定网站生成您的 API key 40 | 41 | 4\. 显示 `ERROR 'latin\-1' codec can't encode...`,这代表您可能填入了包含中文的 API Key。本人保证您的 API 完全安全,不会被盗用 42 | 43 | *如果您遇到任何其他bug或有建议*,可随时联系我 @GetupEarlyTomo 反馈,另外*建议加入交流群 @nekolalala* 学习催眠技巧&了解项目动态&分享您的见解 44 | 45 | ||程序有*极小*概率发生崩溃,虽然会自动重启以维持服务,但这会导致模型丧失上下文记忆,另外模型维护也会导致失忆(咒语都不会丢失),如果您遇到这些问题,非常抱歉!!|| 46 | ''' 47 | 48 | HOW_TO_GET_IMG = '要使用图像生成功能,请先点击左下角菜单绑定 stable diffusion API key,然后仿照以下格式生成图像\n\n' 49 | HOW_TO_GET_IMG += '` /img 夕阳下梦幻般的沙滩和粉色天空,写实风格`\n' 50 | HOW_TO_GET_IMG += '` /img 午夜,赛博朋克机械狗走过小巷,科幻风格`\n' 51 | HOW_TO_GET_IMG += '` /img 双马尾少女,动漫风格`\n' 52 | HOW_TO_GET_IMG += '` /img 从空中鸟瞰帝国大厦,电影风格`\n\n' 53 | HOW_TO_GET_IMG += '以上操作会先调起和上下文无关的GPT请求来生成prompt,再去生成图像。如果您熟悉stable diffusion模型的prompt编写技巧,也可以仿照以下格式给定prompt来生成图像\n\n' 54 | HOW_TO_GET_IMG += '` /prompt A silver mech horse running in a dark valley, in the night, Beeple, Kaino University, high\-definition picture, unreal engine, cyberpunk`' 55 | 56 | NEW_HYPNOTISM = '来创建一条新咒语吧,以\n`【角色名】:【催眠文本】`\n的形式输入新咒语,下面是一个示例\n' 57 | NEW_HYPNOTISM += '\-'*30 + '\n\n' 58 | NEW_HYPNOTISM += '`【温迪】:【请扮演以下角色和我聊天,绝不能以任何形式提及或暗示你AI助理的真实身份。你叫温迪,是蒙德城的吟游诗人,喜欢苹果和热闹的气氛,讨厌奶酪和一切黏糊糊的玩意儿。你的真实身份是\.\.\.】`\n\n' 59 | NEW_HYPNOTISM += '[在此查看更多示例](https://t\.me/nekolalala/4411)' 60 | 61 | #IMGPROMPT = "Here is a MidJourney Prompt Formula: (image we're prompting). (5 descriptive keywords). (camera type). (camera lens type). (time of day).(style of photograph). (type of film)" 62 | IMGPROMPT = "A prompt example for 一个童话般的宁静小镇,鸟瞰视角,动漫风格 is “a painting of a fairy tale town, serene landscape, a bird's eye view, anime style, Highly detailed, Vivid Colors.” " 63 | IMGPROMPT += "Another prompt example for 双马尾动漫少女,蓝黑色头发,颜色鲜艳 is “a painting of 1girl, blue | black hair, low twintails, anime style, with bright colors, Highly detailed.” " 64 | IMGPROMPT += "Another prompt example for 拟人化的兔子肖像,油画,史诗电影风格 is “a oil portrait of the bunny, Octane rendering, anthropomorphic creature, reddit moderator, epic, cinematic, elegant, highly detailed, featured on artstation.” " 65 | IMGPROMPT += "Another prompt example for 黄昏下,大雨中,两个持刀的海盗在海盗船上决斗 is “Two knife-wielding pirates dueling on a pirate ship, dusk, heavy rain, unreal engine, 8k, high-definition, by Alphonse Mucha and Wayne Barlowe.” " 66 | IMGPROMPT += "Now write a prompts for " 67 | 68 | VOICE_OPENAI_MALE = ['Echo', 'Fable', 'Onyx'] 69 | VOICE_OPENAI_FEMALE = ['Nova', 'Shimmer', 'Alloy'] 70 | VOICE_OPENAI = {'male': VOICE_OPENAI_MALE, 'female': VOICE_OPENAI_FEMALE} 71 | 72 | VOICE_GENSHIN_MALE = [ 73 | "空", "温迪", "班尼特", "凯亚", "迪卢克", "雷泽", "钟离", "白术", "行秋", "重云", 74 | "散兵", "达达利亚", "枫原万叶", "神里绫人", "艾尔海森", "赛诺", "提纳里", "林尼", "菲米尼" 75 | ] 76 | VOICE_GENSHIN_FEMALE = [ 77 | "荧", "七七", "丽莎", "云堇", "八重神子", "凝光", "刻晴", "坎蒂丝", "多莉", 78 | "夜兰", "妮露", "安柏", "宵宫", "早柚", "柯莱", "派蒙", "烟绯", "珊瑚宫心海", 79 | "珐露珊", "琳妮特", "琴", "甘雨", "申鹤", "神里绫华", "纳西妲", "绮良良", "胡桃", 80 | "芙宁娜", "芭芭拉", "莫娜", "菲谢尔", "诺艾尔", "雷电将军", "香菱" 81 | ] 82 | VOICE_GENSHIN = {'male': VOICE_GENSHIN_MALE, 'female': VOICE_GENSHIN_FEMALE} 83 | VOICES = {'OpenAI': VOICE_OPENAI, 'Genshin': VOICE_GENSHIN} 84 | 85 | VOICE_INTRO_OPENAI = '以下声音来自[OpenAI tts\-1模型](https://platform\.openai\.com/docs/guides/text\-to\-speech),这些声音非常自然,对多语音支持良好,但有些过于正经了。可[在此](https://t\.me/nekolalala/7200/7201)试听' 86 | VOICE_INTRO_GENSHIN = '以下声音来自当前领先的中文语音合成模型[Bert\-VITS2](https://github\.com/fishaudio/Bert\-VITS2),这些声音使用原神配音数据训练,更加活泼生动,但是仅支持中文。本项目API所用模型由[红血球AE3803](https://space\.bilibili\.com/6589795)收集数据并训练。可[在此](https://t\.me/nekolalala/7200/7202)试听' 87 | 88 | MODEL_OPENAI = ['gpt-3.5-turbo', 'gpt-4-turbo'] 89 | MODEL_OHMYGPT = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4-turbo', 'gpt-4-32k', 'claude-3-opus', 'claude-3-sonnet', 'claude-3-haiku', 'claude-2.1', 'deepseek-chat'] 90 | MODELS = {'OpenAI': MODEL_OPENAI, 'OhMyGPT': MODEL_OHMYGPT} 91 | MODEL_INTRO_OPENAI = '通过[OpenAI 官方服务](https://platform\.openai\.com/account/api\-keys)使用以下模型,使用前请确保您已经在 OpenAI 官方网页绑定支付方式或购买积分' 92 | MODEL_INTRO_OHMYGPT = '通过[OhMyGPT 代理服务](https://www\.ohmygpt\.com/pay)使用以下模型,模型类型多且支付方便,收费不超过各模型官方服务的 1\.1 倍,可免费注册试用。这里有些模型支持相当长的上下文,可在左下角菜单配合设置更长的上下文来减轻遗忘,不过这会增加使用成本\n\n小心!Calude\-3\-oups收费较高' -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [](https://api.gitsponsors.com/api/badge/link?p=oYB+wlP7r+BvXnb37iBMdQSsbs+hoQFxHF9geYZjjS0z6tov0o5cs42nYX6Dri/eLQNrxiv5wK7aRHMDA8RqmlLEF1n0TSbksmLm5HJAgRkJ9ycI+gEBUg9dwRZeMYnr) 2 | 3 | # TelegramChatBot 4 | - This is a chatbot powered by OpenAI GPT API, which you can easily hypnotize into a specified character with simply one click. Using the latest 'system' parameter, It can effectively avoid forgetting the character settings during chatting. You can also conveniently manage and create new hypnosis spells. With a variety of powerful APIs, the chatbot has multimodal interaction capabilities including image display, voice input and output, and more. The API used include 5 | 6 | - **Text generation**: [gpt-3.5-turbo](https://platform.openai.com/docs/guides/text-generation) & [gpt-4](https://platform.openai.com/docs/guides/text-generation) 7 | - **Image generation**: [stable-diffusion-xl-1024-v1-0](https://dreamstudio.com/api/) 8 | - **Text-to-voice**: [tts-1](https://platform.openai.com/docs/guides/text-to-speech) 9 | 10 | - **Voice-to-text**: [whisper-1](https://platform.openai.com/docs/guides/speech-to-text) 11 | 12 | - If you want to deploy this bot, Don't forget to message [@BotFather](https://t.me/botfather) on Telegram to register your bot and receive its authentication token. Check https://core.telegram.org/bots#how-do-i-create-a-bot for more information. 13 | 14 | - Try it right now by [@jokerController_bot ](https://t.me/jokerController_bot ) on Telegram ! 15 | 16 | 17 | 18 | ## Setup 19 | 20 | 1. Create a new virtual environment with python 3.10, here is an example with anaconda 21 | 22 | ```shell 23 | conda create -n ChatBot python=3.10 24 | ``` 25 | 26 | 2. Activate the virtual environment in anaconda 27 | 28 | ```shell 29 | activate ChatBot 30 | ``` 31 | 32 | 3. Make sure you are in the project path, install all requirement lib by 33 | 34 | ```shell 35 | pip install -r requirements.txt 36 | ``` 37 | 38 | If the download process is too slow, Tsinghua Mirror source is recommended 39 | 40 | ```shell 41 | pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple 42 | ``` 43 | 4. Install [ffmpeg](https://ffmpeg.org/download.html) 44 | 45 | 5. Install [MySQL 5.7](https://www.mysql.com/) 46 | 47 | 48 | 49 | 50 | ## State Machine 51 | 52 | 53 | 54 | 55 | 56 | ---------------- 57 | 58 | 搞科研就是遇到问题摸大鱼啦~~ 59 | 60 | -------------------------------------------------------------------------------- /User.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from openai import OpenAI 3 | from transitions import Machine 4 | from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton 5 | from Data import initUser, getUserPrompts 6 | from datetime import datetime 7 | from pydub import AudioSegment 8 | import requests 9 | import io 10 | import math 11 | from Utils import transitions, states 12 | from MagicBook import * 13 | 14 | USER_STATUS_INIT = 0 15 | USER_STATUS_SETTINGKEY = 1 16 | USER_STATUS_SETTINGCONTEXT = 2 17 | USER_STATUS_NEWHYP = 3 18 | USER_STATUS_ALLGOOD = 4 19 | 20 | class User(): 21 | def __init__(self, name, id, cursor, connection, key=None, ohmygpt_key=None) -> None: 22 | self.name = name 23 | self.id = id 24 | self.key = key 25 | self.ohmygpt_key = ohmygpt_key 26 | self.imgKey = None 27 | self.voiceToken = None 28 | self.cursor = cursor 29 | self.connection = connection 30 | 31 | self.history = {'user':[], 'assistant':[]} 32 | self.model_supplier = 'OpenAI' 33 | self.model = 'gpt-3.5-turbo' 34 | self.voice_type = 'OpenAI' 35 | self.voice_sex = 'female' 36 | self.voice = 'Nova' 37 | 38 | self.hypnotism = initUser(cursor, connection, id) 39 | self.character = self.model 40 | self.system = '' 41 | self.contextMaxLen = 6 42 | self.immersion = False 43 | 44 | self.client = None 45 | self.currentVoiceMsg = None 46 | self.currentReplyMsg = None 47 | self.currentEdittingChar = None 48 | 49 | self.log = f'./ChatLog/{self.name}_{self.id}.txt' 50 | self.stateMachine = Machine(model=self, states=states, transitions=transitions, initial='init') 51 | 52 | def stateTrans(self, source:str, trigger:str): 53 | if self.state == source: 54 | getattr(self, trigger)() 55 | 56 | def setOpenAIKey(self, key): 57 | assert key is not None 58 | self.key = key 59 | self.client = OpenAI(api_key=key) 60 | 61 | def setOhMyGPTKey(self, key): 62 | assert key is not None 63 | self.ohmygpt_key = key 64 | self.client = OpenAI(api_key=key, base_url='https://api.ohmygpt.com/v1/') 65 | 66 | def setSDKey(self, key): 67 | self.imgKey = key 68 | 69 | def setVoiceKey(self, token): 70 | self.voiceToken = token 71 | 72 | def clearHistory(self): 73 | self.history = {'user':[], 'assistant':[]} 74 | 75 | def setContextLen(self, contextMaxLen): 76 | self.contextMaxLen = contextMaxLen 77 | 78 | def createMessage(self, text=''): 79 | if self.state != 'creatingImg': 80 | users = self.history['user'] 81 | assistants = self.history['assistant'] 82 | 83 | # 如果 text 为空,是在重新生成之前的回答 84 | if text != '': 85 | #if self.character != 'GPT': 86 | # text += ',扮演指定角色回答。' # 当前用户发言预处理 87 | users.insert(0, text) 88 | 89 | # 组合上下文 90 | users = users[:self.contextMaxLen] 91 | assistants = assistants[:self.contextMaxLen-1] 92 | message = [{"role": "system", "content": self.system},] if self.character != self.model else [] 93 | for i in range(min(self.contextMaxLen, len(assistants)),0,-1): 94 | message.append({"role": "user", "content": users[i]}) 95 | message.append({"role": "assistant", "content": assistants[i-1]}) 96 | message.append({"role": "user", "content": users[0]}) 97 | else: 98 | text += '. You must write prompt in English directly, DO NOT explain or translate, omit any introductory text. Your answer must mimics the example format, i.e. a complete description sentence followed by a number of independent style words' 99 | message = [{"role": "user", "content": text},] 100 | 101 | return message 102 | 103 | def voice2text(self, voice_path:str): 104 | with open(voice_path, "rb") as audio_file: 105 | transcript = self.client.audio.transcriptions.create( 106 | model="whisper-1", 107 | file=audio_file, 108 | response_format="text" 109 | ) 110 | return transcript 111 | 112 | def text2voice(self, text:str): 113 | return self.text2voice_test(voice=self.voice, text=text) 114 | 115 | def text2voice_test(self, voice:str, text:str): 116 | audio_path = f'./Audio/{self.id}_{datetime.now().strftime("%Y%m%d%H%M%S%f")[:-3]}.ogg' 117 | if voice in VOICE_OPENAI_MALE or voice in VOICE_OPENAI_FEMALE: 118 | response = self.client.audio.speech.create( 119 | model="tts-1", 120 | voice=voice.lower(), 121 | input=text, 122 | response_format='opus' 123 | ) 124 | response.stream_to_file(audio_path) 125 | else: 126 | token = self.voiceToken 127 | response = requests.post('https://tirs.ai-lab.top/api/status', json={'token':token}) 128 | data = response.json() 129 | if not data['is_ok']: 130 | raise ValueError('Voice Token 已失效,请重新生成') 131 | 132 | url = "https://tirs.ai-lab.top/api/ex/vits" 133 | payload = { 134 | "lang": "zh", 135 | "appid": "9tuof1o8y7ni8h3e", 136 | "text": text, 137 | "speaker": voice, 138 | "sdp_ratio": 0.2, 139 | "noise": 0.6, 140 | "noisew": 0.8, 141 | "length": 1, 142 | "token": token 143 | } 144 | 145 | response = requests.post(url, json=payload) 146 | if response.status_code == 200: 147 | data = response.json() 148 | audio_url = data["audio"] 149 | if data["status"] == 1: 150 | # 生成成功,下载音频文件 151 | audio_response = requests.get(audio_url) 152 | if audio_response.status_code == 200: 153 | audio_content = io.BytesIO(audio_response.content) 154 | # wav转ogg 155 | sound = AudioSegment.from_wav(audio_content) 156 | sound.export(audio_path, format="ogg") 157 | else: 158 | raise ValueError("音频文件下载失败") 159 | else: 160 | raise ValueError(data["message"]) 161 | else: 162 | raise ValueError(f"API请求失败,状态码:{response.status_code}") 163 | return audio_path 164 | 165 | def getReply(self, text, useStreamMode=False): 166 | messages = self.createMessage(text) 167 | response = self.client.chat.completions.create( 168 | model=self.model, 169 | messages=messages, 170 | stream=useStreamMode 171 | ) 172 | if useStreamMode: 173 | return response 174 | else: 175 | return response.choices[0].message.content 176 | 177 | def getHypnotismKeyBorad(self, usage): 178 | inlineKeyboard = InlineKeyboardMarkup() 179 | if usage in ['delete_hyp', 'edit_hyp']: 180 | inlineButton = InlineKeyboardButton(text='【取消修改】', callback_data='cancel') 181 | inlineKeyboard.add(inlineButton) 182 | 183 | self.hypnotism = getUserPrompts(self.cursor, self.connection, self.id) 184 | for character, hypn in self.hypnotism.items(): 185 | if hypn.startswith('不使用催眠咒语') and usage not in ['delete_hyp', 'edit_hyp']: 186 | inlineButton = InlineKeyboardButton(text=f'{self.model} ({self.model_supplier})', callback_data=usage+self.model) 187 | else: 188 | inlineButton = InlineKeyboardButton(text=character, callback_data=usage+character) 189 | 190 | inlineKeyboard.add(inlineButton) 191 | 192 | return inlineKeyboard 193 | 194 | def getCancelBorad(self): 195 | inlineKeyboard = InlineKeyboardMarkup() 196 | inlineButton = InlineKeyboardButton(text='【取消并继续聊天】', callback_data='cancel') 197 | inlineKeyboard.add(inlineButton) 198 | return inlineKeyboard 199 | 200 | def getReGenKeyBorad(self): 201 | inlineKeyboard = InlineKeyboardMarkup(row_width=2) 202 | inlineButton_audio_gen = InlineKeyboardButton(text=f'【用{self.voice}的声音读】', callback_data='audio_gen') 203 | inlineButton_audio_select = InlineKeyboardButton(text='【选择声音】', callback_data='audio_select') 204 | inlineKeyboard.row(inlineButton_audio_gen, inlineButton_audio_select) 205 | inlineButton_immersion = InlineKeyboardButton(text='【沉浸模式】', callback_data='immersion') 206 | inlineButton_regen = InlineKeyboardButton(text='【重新回答】', callback_data='regenerate') 207 | inlineKeyboard.row(inlineButton_immersion, inlineButton_regen) 208 | return inlineKeyboard 209 | 210 | def getModelSupplierBorad(self): 211 | inlineKeyboard = InlineKeyboardMarkup() 212 | inlineButton_openai = InlineKeyboardButton(text='OpenAI', callback_data='model_selection_OpenAI') 213 | inlineButton_ohmygpt = InlineKeyboardButton(text='OhMyGPT', callback_data='model_selection_OhMyGPT') 214 | inlineKeyboard.add(inlineButton_openai, inlineButton_ohmygpt) 215 | return inlineKeyboard 216 | 217 | def getModelBorad(self): 218 | models = MODELS[self.model_supplier] 219 | inlineKeyboard = InlineKeyboardMarkup(row_width=len(models)+1) 220 | inlineButton_back = InlineKeyboardButton(text='【返回】', callback_data='model_selection_back_to_select_supplier') 221 | inlineKeyboard.add(inlineButton_back) 222 | for model in models: 223 | inlineButton = InlineKeyboardButton(text=model, callback_data=f'model_selection_{model}') 224 | inlineKeyboard.add(inlineButton) 225 | return inlineKeyboard 226 | 227 | def getImmersionBorad(self): 228 | inlineKeyboard = InlineKeyboardMarkup() 229 | inlineButton_immersion = InlineKeyboardButton(text='【退出沉浸模式】', callback_data='immersion') 230 | inlineButton_regen = InlineKeyboardButton(text='【重新回答】', callback_data='regenerate') 231 | inlineKeyboard.add(inlineButton_immersion) 232 | inlineKeyboard.add(inlineButton_regen) 233 | return inlineKeyboard 234 | 235 | def getVoiceTokenBorad(self): 236 | inlineKeyboard = InlineKeyboardMarkup(row_width=1) 237 | inlineButton_back = InlineKeyboardButton(text='【返回】', callback_data='audio_back_to_select_type') 238 | inlineKeyboard.add(inlineButton_back) 239 | return inlineKeyboard 240 | 241 | def getVoiceTypeBorad(self): 242 | inlineKeyboard = InlineKeyboardMarkup(row_width=1) 243 | inlineButton_audio_openai = InlineKeyboardButton(text=f'【OpenAI】', callback_data='audio_OpenAI') 244 | inlineButton_audio_genshin = InlineKeyboardButton(text='【Genshin】', callback_data='audio_Genshin') 245 | inlineKeyboard.row(inlineButton_audio_openai, inlineButton_audio_genshin) 246 | inlineButton_back = InlineKeyboardButton(text='【返回】', callback_data='audio_back') 247 | inlineKeyboard.add(inlineButton_back) 248 | return inlineKeyboard 249 | 250 | def getVoiceSexBorad(self): 251 | inlineKeyboard = InlineKeyboardMarkup(row_width=1) 252 | inlineButton_audio_male = InlineKeyboardButton(text=f'【男声】', callback_data='audio_male') 253 | inlineButton_audio_female = InlineKeyboardButton(text='【女声】', callback_data='audio_female') 254 | inlineKeyboard.row(inlineButton_audio_male, inlineButton_audio_female) 255 | inlineButton_back = InlineKeyboardButton(text='【返回】', callback_data='audio_back_to_select_type') 256 | inlineKeyboard.add(inlineButton_back) 257 | return inlineKeyboard 258 | 259 | def getVoiceBorad(self): 260 | voices = VOICES[self.voice_type][self.voice_sex] 261 | inlineKeyboard = InlineKeyboardMarkup(row_width=math.floor(len(voices)/3)) 262 | for i in range(0, len(voices), 3): 263 | row = [] 264 | for voice in voices[i:i+3]: 265 | inlineButton = InlineKeyboardButton(text=voice, callback_data=f'audio_{voice}') 266 | row.append(inlineButton) 267 | inlineKeyboard.row(*row) 268 | inlineButton_back = InlineKeyboardButton(text='【返回】', callback_data='audio_back_to_select_sex') 269 | inlineKeyboard.add(inlineButton_back) 270 | return inlineKeyboard 271 | 272 | def getDebugBorad(self): 273 | inlineKeyboard = InlineKeyboardMarkup(row_width=1) 274 | inlineButton_debug_audio = InlineKeyboardButton(text=f'【声音测试】', callback_data='debug_audio') 275 | inlineKeyboard.add(inlineButton_debug_audio) 276 | return inlineKeyboard -------------------------------------------------------------------------------- /Utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import io 3 | from PIL import Image 4 | from stability_sdk import client 5 | import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation 6 | import aiogram 7 | from aiogram import types 8 | import warnings 9 | 10 | # 状态定义 11 | states=[ 12 | 'init', 13 | 'allGood', 14 | 'settingChatKey', 15 | 'settingImgKey', 16 | 'settingVoiceToken', 17 | 'settingContextLen', 18 | 'creatingNewHyp', 19 | 'edittingHyp', 20 | 'deletingHyp', 21 | 'creatingImg' 22 | ] 23 | 24 | # 定义状态转移 25 | transitions = [ 26 | {'trigger': 'getKey', 'source': 'init', 'dest': 'allGood' }, 27 | {'trigger': 'setApiKey', 'source': 'allGood', 'dest': 'settingChatKey'}, 28 | {'trigger': 'setApiKey', 'source': 'init', 'dest': 'settingChatKey'}, 29 | {'trigger': 'setImgKey', 'source': 'allGood', 'dest': 'settingImgKey'}, 30 | {'trigger': 'setVoiceToken', 'source': 'allGood', 'dest': 'settingVoiceToken'}, 31 | {'trigger': 'setConextLen', 'source': 'allGood', 'dest': 'settingContextLen'}, 32 | {'trigger': 'newHyp', 'source': 'allGood', 'dest': 'creatingNewHyp'}, 33 | {'trigger': 'editHyp', 'source': 'allGood', 'dest': 'edittingHyp'}, 34 | {'trigger': 'delHyp', 'source': 'allGood', 'dest': 'deletingHyp'}, 35 | {'trigger': 'img', 'source': 'allGood', 'dest': 'creatingImg'}, 36 | {'trigger': 'imgDone', 'source': 'creatingImg', 'dest': 'allGood'}, 37 | {'trigger': 'imgFailed', 'source': 'creatingImg', 'dest': 'allGood'}, 38 | {'trigger': 'setApiKeyCancel', 'source': 'settingChatKey', 'dest': 'allGood'}, 39 | {'trigger': 'setApiKeyDone', 'source': 'settingChatKey', 'dest': 'allGood'}, 40 | {'trigger': 'setImgKeyCancel', 'source': 'settingImgKey', 'dest': 'allGood'}, 41 | {'trigger': 'setImgKeyDone', 'source': 'settingImgKey', 'dest': 'allGood'}, 42 | {'trigger': 'setVoiceTokenCancel', 'source': 'settingVoiceToken', 'dest': 'allGood'}, 43 | {'trigger': 'setVoiceTokenDone', 'source': 'settingVoiceToken', 'dest': 'allGood'}, 44 | {'trigger': 'setConextLenCancel', 'source': 'settingContextLen', 'dest': 'allGood'}, 45 | {'trigger': 'setConextLenDone', 'source': 'settingContextLen', 'dest': 'allGood'}, 46 | {'trigger': 'newHypCancel', 'source': 'creatingNewHyp', 'dest': 'allGood'}, 47 | {'trigger': 'newHypDone', 'source': 'creatingNewHyp', 'dest': 'allGood'}, 48 | {'trigger': 'editHypCancel', 'source': 'edittingHyp', 'dest': 'allGood'}, 49 | {'trigger': 'editHypDone', 'source': 'edittingHyp', 'dest': 'allGood'}, 50 | {'trigger': 'delHypCancel', 'source': 'deletingHyp', 'dest': 'allGood'}, 51 | {'trigger': 'delHypDone', 'source': 'deletingHyp', 'dest': 'allGood'}, 52 | {'trigger': 'reset', 'source': 'allGood', 'dest': 'init'}] 53 | 54 | 55 | os.environ['STABILITY_HOST'] = 'grpc.stability.ai:443' 56 | def gen_img(key, prompt): 57 | # Set up our connection to the API. 58 | stability_api = client.StabilityInference( 59 | key = key, 60 | verbose=True, # Print debug messages. 61 | engine="stable-diffusion-xl-1024-v1-0", # Set the engine to use for generation. 62 | # Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0 63 | # stable-diffusion-512-v2-1 stable-diffusion-768-v2-1 stable-inpainting-v1-0 stable-inpainting-512-v2-0 64 | ) 65 | 66 | # Set up our initial generation parameters. 67 | answers = stability_api.generate( 68 | prompt=prompt, 69 | seed=False, # If a seed is provided, the resulting generated image will be deterministic. 70 | # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again. 71 | # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook. 72 | steps=50, # Amount of inference steps performed on image generation. Defaults to 30. 73 | cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt. 74 | # Setting this value higher increases the strength in which it tries to match your prompt. 75 | # Defaults to 7.0 if not specified. 76 | width=1024, # Generation width, defaults to 512 if not included. 77 | height=1024, # Generation height, defaults to 512 if not included. 78 | samples=1, # Number of images to generate, defaults to 1 if not included. 79 | sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with. 80 | # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers. 81 | # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m, k_dpmpp_sde) 82 | ) 83 | 84 | try: 85 | for resp in answers: 86 | for artifact in resp.artifacts: 87 | if artifact.finish_reason == generation.FILTER: 88 | warnings.warn( 89 | "Your request activated the API's safety filters and could not be processed." 90 | "Please modify the prompt and try again.") 91 | if artifact.type == generation.ARTIFACT_IMAGE: 92 | photo_bytes = io.BytesIO(artifact.binary) 93 | photo_file = types.InputFile(photo_bytes) 94 | try: 95 | img = Image.open(io.BytesIO(artifact.binary)) 96 | img.save(f'./Image/{str(artifact.seed)}.png') # Save our generated images with their seed number as the filename. except Exception: 97 | except Exception: 98 | pass 99 | except Exception as e: 100 | raise e 101 | 102 | return photo_file 103 | 104 | async def editInMarkdown(user, text): 105 | if text != '': 106 | try: 107 | try: 108 | await user.currentReplyMsg.edit_text(text, parse_mode='Markdown') 109 | except aiogram.exceptions.MessageNotModified: 110 | pass 111 | except Exception: 112 | try: 113 | await user.currentReplyMsg.edit_text(text) 114 | except aiogram.exceptions.MessageNotModified: 115 | pass 116 | 117 | -------------------------------------------------------------------------------- /img/StateMachine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wxc971231/TelegramChatBot/6bd8a739311985aa3c3cacac91a280c10db94089/img/StateMachine.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from aiogram import Bot, Dispatcher, executor, types 2 | from aiogram.types import BotCommand 3 | import aiogram 4 | import asyncio 5 | from User import User 6 | from Data import * 7 | from dotenv import load_dotenv, find_dotenv 8 | from Utils import editInMarkdown 9 | import os 10 | import datetime 11 | import pymysql 12 | from MagicBook import ABOUT, IMGPROMPT, HOW_TO_GET_IMG, NEW_HYPNOTISM, VOICE_INTRO_OPENAI, VOICE_INTRO_GENSHIN, MODEL_INTRO_OHMYGPT, MODEL_INTRO_OPENAI 13 | from Utils import gen_img 14 | import multiprocessing 15 | import time 16 | import threading 17 | import grpc 18 | import openai 19 | 20 | load_dotenv(find_dotenv('.env'), override=True) 21 | 22 | ISDEBUGING = True 23 | ISDEPLOYING = False 24 | 25 | # 连接数据库 26 | connection = pymysql.connect(host='localhost', user='root', database='chatbot', password=os.environ['MYSQL_PASSWORD']) 27 | cursor = connection.cursor() 28 | 29 | # proxy for stability ai 30 | PROXY_PORT = os.environ['PROXY_PORT'] 31 | os.environ['http_proxy'] = f'http://127.0.0.1:{PROXY_PORT}' 32 | os.environ['https_proxy'] = f'http://127.0.0.1:{PROXY_PORT}' 33 | 34 | # bot dispatcher and user object 35 | BOT_TOKEN = os.environ['BOT_TOKEN'] 36 | TEST_BOT_TOKEN = os.environ['TEST_BOT_TOKEN'] 37 | bot = Bot(token=BOT_TOKEN, proxy=f'http://127.0.0.1:{PROXY_PORT}') if ISDEPLOYING else \ 38 | Bot(token=TEST_BOT_TOKEN, proxy=f'http://127.0.0.1:{PROXY_PORT}') 39 | dp = Dispatcher(bot) # 调度器 40 | users = {} # 用户信息管理 41 | 42 | # ----------------------------------------------------------------------------- 43 | async def isDebugingNdeploying(message): 44 | if ISDEPLOYING and ISDEBUGING: 45 | await message.reply('抱歉,正在维护中,请稍后访问...') 46 | return ISDEPLOYING and ISDEBUGING 47 | 48 | async def initUser(message, is_typing=False): 49 | # 建立 User 对象 50 | userId = message.chat.id 51 | if userId not in users: 52 | print(f'新用户【{message.chat.first_name}】发起连接') 53 | users[userId] = User(name=message.chat.first_name, id=userId, cursor=cursor, connection=connection) 54 | await message.answer('机器人维护导致丢失了上下文记忆,非常抱歉,欢迎大家加入讨论群 @nekolalala') 55 | user = users[userId] 56 | 57 | # 已处于工作状态,直接返回 58 | if user.state == 'allGood': 59 | return 60 | 61 | # 尝试从数据库获取 Stable diffusion API Key 和 voice token 62 | imgKey = getUserImgKey(cursor, connection, userId) 63 | if imgKey is not None: 64 | user.setSDKey(imgKey) 65 | voiceToken = getUserVoiceToken(cursor, connection, userId) 66 | if imgKey is not None: 67 | user.setVoiceKey(voiceToken) 68 | 69 | # 尝试从数据库获取 Chat API Key 70 | if user.state == 'init': 71 | openai_key = getUserKey(cursor, connection, userId) 72 | ohmygpt_key = getUserOhMyGPTKey(cursor, connection, userId) 73 | if openai_key is not None: 74 | user.setOpenAIKey(openai_key) 75 | if ohmygpt_key is not None: 76 | user.setOhMyGPTKey(ohmygpt_key) 77 | 78 | if openai_key is not None or ohmygpt_key is not None: 79 | user.stateTrans('init', 'getKey') # 至少存在一个 chat api key,切换状态机到 all good 80 | if ohmygpt_key is not None: 81 | user.model_supplier = 'OhMyGPT' 82 | user.model = 'gpt-3.5-turbo' 83 | else: 84 | user.model_supplier = 'OpenAI' 85 | user.model = 'gpt-3.5-turbo' 86 | else: 87 | user.stateTrans('init', 'setApiKey') # 没有找到任何 chat api key,要求用户设置 88 | 89 | # 初次 chat api key 设置好之前,保持在设置页面 90 | if user.state == 'settingChatKey' and not is_typing: 91 | await message.reply( 92 | '请先选择模型,可选 OpenAI 官方服务或 OhMyGPT 代理服务,注意二者 API key 不通用', 93 | reply_markup=user.getModelSupplierBorad(), 94 | parse_mode='MarkdownV2' 95 | ) 96 | 97 | # ----------------------------------------------------------------------------- 98 | # 重置机器人 99 | @dp.message_handler(commands=['resetall',]) 100 | async def reset_all(message: types.Message): 101 | if message.chat.type == 'private': 102 | if await isDebugingNdeploying(message): 103 | print(f'{message.chat.first_name}发起连接') 104 | return 105 | if message.chat.id in users: 106 | users.pop(message.chat.id) 107 | deleteUser(cursor, connection, message.chat.id) 108 | await message.reply('机器人已重置') 109 | #await initUser(message) 110 | 111 | # 启动机器人 112 | @dp.message_handler(commands=['start']) 113 | async def welcome(message: types.Message): 114 | if message.chat.type == 'private': 115 | if await isDebugingNdeploying(message): return 116 | await initUser(message) 117 | 118 | # 显示使用指南 119 | @dp.message_handler(commands=['about', 'help']) 120 | async def how_to_use(message: types.Message): 121 | if message.chat.type == 'private': 122 | if await isDebugingNdeploying(message): return 123 | await message.answer(ABOUT, parse_mode='MarkdownV2', disable_web_page_preview=True) 124 | await initUser(message) 125 | 126 | # 设置 OpenAI API Key 127 | @dp.message_handler(commands=['setapikey', ]) 128 | async def set_openai_key(message: types.Message): 129 | if message.chat.type == 'private': 130 | if await isDebugingNdeploying(message): return 131 | await initUser(message) 132 | user = users[message.chat.id] 133 | user.stateTrans('allGood', 'setApiKey') 134 | 135 | if user.state == 'settingChatKey': 136 | text = f'当前OpenAI API Key设置为:\n\n`{user.key}`\n\n请[在此处查看你的API Key](https://platform.openai.com/account/api-keys),回复Key进行修改' if user.key is not None else '当前未设置OpenAI API Key,请[在此处查看你的API Key](https://platform.openai.com/account/api-keys),回复Key进行设定:' 137 | text = text.replace('-', r'\-').replace('.', r'\.') 138 | await message.answer( 139 | text, parse_mode='MarkdownV2', 140 | reply_markup=user.getCancelBorad() 141 | ) 142 | 143 | # 设置 OhMyGPT API Key 144 | @dp.message_handler(commands=['setohmygptkey', ]) 145 | async def set_openai_key(message: types.Message): 146 | if message.chat.type == 'private': 147 | if await isDebugingNdeploying(message): return 148 | await initUser(message) 149 | user = users[message.chat.id] 150 | user.stateTrans('allGood', 'setApiKey') 151 | 152 | if user.state == 'settingChatKey': 153 | text = f'当前OhMyGPT API Key设置为:\n\n`{user.ohmygpt_key}`\n\n请[在此处查看你的API Key](https://www.ohmygpt.com),回复Key进行修改' if user.key is not None else '当前未设置OhMyGPT API Key,请[在此处查看你的API Key](https://www.ohmygpt.com),回复Key进行设定:' 154 | text = text.replace('-', r'\-').replace('.', r'\.') 155 | await message.answer( 156 | text, parse_mode='MarkdownV2', 157 | reply_markup=user.getCancelBorad() 158 | ) 159 | 160 | # 设置 Stable diffusion API Key 161 | @dp.message_handler(commands=['setimgkey', ]) 162 | async def set_img_key(message: types.Message): 163 | if message.chat.type == 'private': 164 | if await isDebugingNdeploying(message): return 165 | await initUser(message) 166 | user = users[message.chat.id] 167 | user.stateTrans('allGood', 'setImgKey') 168 | if user.state == 'settingImgKey': 169 | text = f'当前Stable diffusion API Key设置为:\n\n`{user.imgKey}`\n\n请[在此处查看你的API Key](https://beta.dreamstudio.ai/account),回复Key进行修改' if user.imgKey is not None else '当前未设置Stable diffusion API Key,请[在此处查看你的API Key](https://beta.dreamstudio.ai/account),回复Key进行修改:' 170 | text = text.replace('-', r'\-').replace('.', r'\.') 171 | await message.answer(text, parse_mode='MarkdownV2', reply_markup=user.getCancelBorad()) 172 | 173 | # 设置 Bert-VITS2 voice token 174 | @dp.message_handler(commands=['setvoicetoken', ]) 175 | async def set_img_key(message: types.Message): 176 | if message.chat.type == 'private': 177 | if await isDebugingNdeploying(message): return 178 | await initUser(message) 179 | user = users[message.chat.id] 180 | user.stateTrans('allGood', 'setVoiceToken') 181 | if user.state == 'settingVoiceToken': 182 | text = f'当前Stable diffusion API Key设置为:\n\n`{user.voiceToken}`\n\n请[在此处查看你的 voice token](https://tts.ai-hobbyist.org/#/apikey),回复Key进行修改' if user.voiceToken is not None else '当前未设置 voice token,请[在此处查看你的voice token](https://tts.ai-hobbyist.org/#/apikey),回复token进行修改(这个语音库是免费的):' 183 | text = text.replace('-', r'\-').replace('.', r'\.') 184 | await message.answer(text, parse_mode='MarkdownV2', reply_markup=user.getCancelBorad()) 185 | 186 | # 生成图像示范 187 | @dp.message_handler(commands=['howtogetimg', ]) 188 | async def how_to_get_img(message: types.Message): 189 | if message.chat.type == 'private': 190 | if await isDebugingNdeploying(message): return 191 | await initUser(message) 192 | await message.answer(HOW_TO_GET_IMG, parse_mode='MarkdownV2') 193 | 194 | # 调用 Stable diffusion 生成图像 195 | @dp.message_handler(regexp='^(\/prompt|\/img).*') 196 | async def get_img(message: types.Message): 197 | if message.chat.type == 'private': 198 | if await isDebugingNdeploying(message): return 199 | await initUser(message) 200 | user = users[message.chat.id] 201 | if user.imgKey is None: 202 | await message.answer('要使用图像生成功能,请先点击左下角菜单绑定 stable diffusion API key') 203 | return 204 | 205 | cmd, prompt = message.text.split(' ', 1) 206 | prompt = prompt.strip() 207 | if prompt == '': 208 | example = '/img 夕阳下梦幻般的沙滩和粉色天空,写实风格' if cmd == '/img' else '/prompt A silver mech horse running in a dark valley, in the night, Beeple, Kaino University, high-definition picture, unreal engine, cyberpunk' 209 | await message.answer(f'未检测到图像描述信息,请仿照以下格式生成图像\n\n{example}') 210 | return 211 | 212 | user.stateTrans('allGood', 'img') 213 | try: 214 | # 生产图片 215 | if cmd == '/img': 216 | prompt = IMGPROMPT + prompt 217 | note = await message.answer('正在使用GPT模型翻译prompt,请稍候') 218 | prompt = await asyncio.to_thread(user.getReply, prompt, False) 219 | await note.edit_text('正在使用以下prompt生成图像,请稍候\n'+'-'*35+f'\n\n{prompt}') 220 | else: 221 | await message.answer('正在使用以下prompt生成图像,请稍候\n'+'-'*35+f'\n\n{prompt}') 222 | photo_file = await asyncio.to_thread(gen_img, user.imgKey, prompt) 223 | # 发送到用户 224 | user.stateTrans('creatingImg', 'imgDone') # 先切换状态以免卡死 225 | await bot.send_photo(chat_id=user.id, photo=photo_file) 226 | return 227 | except grpc.RpcError as e: 228 | if e.code() == grpc.StatusCode.UNAUTHENTICATED: 229 | error = f"Authentication failed: {e.details()}" 230 | else: 231 | error = f"RPC failed with error code: {e.code()}" 232 | except Exception as e: 233 | error = str(e) 234 | 235 | await message.answer('出错了...\n\n'+error) 236 | print(f'[get reply error]: user{message.chat.first_name}', error) 237 | user.stateTrans('creatingImg', 'imgFailed') 238 | 239 | # 设置上下文长度 240 | @dp.message_handler(commands=['setcontextlen', ]) 241 | async def set_context_len(message: types.Message): 242 | if message.chat.type == 'private': 243 | if await isDebugingNdeploying(message): return 244 | await initUser(message) 245 | user = users[message.chat.id] 246 | if user.state == 'allGood': 247 | user.stateTrans('allGood', 'setConextLen') 248 | await message.reply(f'当前记忆上下文长度为【{user.contextMaxLen}】回合对话,请回复数字进行修改(注意这会清空之前的上下文信息):') 249 | 250 | ''' 251 | # 选择模型 252 | @dp.message_handler(commands=['setmodel', ]) 253 | async def set_model(message: types.Message): 254 | if message.chat.type == 'private': 255 | if await isDebugingNdeploying(message): return 256 | await initUser(message) 257 | user = users[message.chat.id] 258 | if user.state == 'allGood': 259 | await message.reply( 260 | '选择对话使用的模型,注意只有*绑定支付方式且有过支付历史*的账户才能使用GPT4,可以在[这里](https://platform\.openai\.com/account/billing/history)查看您的账户是否有支付历史', 261 | reply_markup=user.getModelKeyBorad(), 262 | parse_mode='MarkdownV2' 263 | ) 264 | ''' 265 | 266 | # 选择模型 267 | @dp.message_handler(commands=['setmodel', ]) 268 | async def set_model(message: types.Message): 269 | if message.chat.type == 'private': 270 | if await isDebugingNdeploying(message): return 271 | await initUser(message) 272 | user = users[message.chat.id] 273 | if user.state == 'allGood': 274 | await message.reply( 275 | '请先选择模型来源,可选 OpenAI 官方服务或 OhMyGPT 代理服务,注意二者 API key 不通用', 276 | reply_markup=user.getModelSupplierBorad(), 277 | parse_mode='MarkdownV2' 278 | ) 279 | 280 | # 选用催眠术 281 | @dp.message_handler(commands=['sethypnotism', ]) 282 | async def set_hypnotism(message: types.Message): 283 | if message.chat.type == 'private': 284 | if await isDebugingNdeploying(message): return 285 | await initUser(message) 286 | user = users[message.chat.id] 287 | if user.state == 'allGood': 288 | await message.reply( 289 | '从《魔导绪论》中选择一条咒语来催眠模型吧:', 290 | reply_markup=user.getHypnotismKeyBorad(usage='select_hyp') 291 | ) 292 | 293 | # 编辑催眠术 294 | @dp.message_handler(commands=['edithypnotism', ]) 295 | async def set_hypnotism(message: types.Message): 296 | if message.chat.type == 'private': 297 | if await isDebugingNdeploying(message): return 298 | await initUser(message) 299 | user = users[message.chat.id] 300 | if user.state == 'allGood': 301 | await message.reply( 302 | '从《魔导绪论》中选择要编辑的咒语:', 303 | reply_markup=user.getHypnotismKeyBorad(usage='edit_hyp') 304 | ) 305 | user.stateTrans('allGood', 'editHyp') 306 | 307 | # 删除催眠术 308 | @dp.message_handler(commands=['deletehypnotism', ]) 309 | async def commands(message: types.Message): 310 | if message.chat.type == 'private': 311 | if await isDebugingNdeploying(message): return 312 | await initUser(message) 313 | user = users[message.chat.id] 314 | if user.state == 'allGood': 315 | await message.reply( 316 | '从《魔导绪论》中删除无用的咒语吧,注意这是无法撤销的哦:', 317 | reply_markup=user.getHypnotismKeyBorad(usage='delete_hyp') 318 | ) 319 | user.stateTrans('allGood', 'delHyp') 320 | 321 | # 创建新催眠术 322 | @dp.message_handler(commands=['newhypnotism', ]) 323 | async def set_hypnotism(message: types.Message): 324 | if message.chat.type == 'private': 325 | if await isDebugingNdeploying(message): return 326 | await initUser(message) 327 | user = users[message.chat.id] 328 | if user.state == 'allGood': 329 | user.currentReplyMsg = await message.answer( 330 | NEW_HYPNOTISM, parse_mode='MarkdownV2', 331 | reply_markup=user.getCancelBorad() 332 | ) 333 | user.stateTrans('allGood', 'newHyp') 334 | 335 | # 查看当前催眠术 336 | @dp.message_handler(commands=['showhypnotism', ]) 337 | async def show_hypnotism(message: types.Message): 338 | if message.chat.type == 'private': 339 | if await isDebugingNdeploying(message): return 340 | await initUser(message) 341 | user = users[message.chat.id] 342 | if user.state == 'allGood': 343 | await message.reply(f'当前GPT被催眠为【{user.character}】,使用的咒语如下\n'+'-'*35+'\n\n'+user.system) 344 | 345 | # ---------------------------------------------------------------------------------------- 346 | async def dialogue(user:User, message:types.Message, text:str): 347 | assert user.state == 'allGood' 348 | if text.startswith('debug'): 349 | debugMsg = await message.answer(f'Debug选项:{text}') 350 | await debugMsg.edit_reply_markup(user.getDebugBorad()) 351 | return 352 | try: 353 | # 清除上一句回复的重新生成按钮 354 | if user.currentReplyMsg is not None: 355 | try: 356 | await user.currentReplyMsg.edit_reply_markup(None) 357 | except aiogram.exceptions.MessageNotModified: 358 | pass 359 | 360 | # openai请求 361 | user.currentReplyMsg = await message.answer(f'{user.character} 正在思考...') 362 | 363 | # 沉浸模式直接输出语音,否则流式打印回复 364 | if not user.immersion: 365 | response = user.getReply(text, True) 366 | reply, replys = '', [] 367 | for chunk in response: 368 | repLen = len(reply) 369 | content = chunk.choices[0].delta.content if len(chunk.choices) > 0 else '' 370 | 371 | # 回复太长则分段 372 | if repLen > 4000: 373 | # 完成上一段的回复 374 | await editInMarkdown(user, reply) 375 | replys.append(reply) 376 | # 新启一段 377 | user.currentReplyMsg = await message.answer(content) 378 | reply, repLen = content, len(reply) 379 | 380 | # 每15个字符更新一次,如果每个字符都更新会很慢 381 | if repLen != 0 and repLen % 15 == 0: 382 | await editInMarkdown(user, reply) 383 | 384 | if content is not None: 385 | reply += content 386 | 387 | # 完成最后一段回复,增加重新生成按钮 388 | replys.append(reply) 389 | await editInMarkdown(user, reply) 390 | await user.currentReplyMsg.edit_reply_markup(user.getReGenKeyBorad()) 391 | 392 | # 还原完整回复 393 | full_reply = ''.join(replys) 394 | else: 395 | await user.currentReplyMsg.edit_text(f'{user.character} 正在讲话...') 396 | full_reply = user.getReply(text, False) 397 | try: 398 | voice_path = user.text2voice(text=full_reply) 399 | await user.currentReplyMsg.delete() 400 | with open(voice_path, 'rb') as voice: 401 | user.currentReplyMsg = await bot.send_voice(user.id, voice, reply_markup=user.getImmersionBorad()) 402 | except Exception as e: 403 | await message.answer(f'语音合成失败...\n\n{str(e)}') 404 | if user.voice_type == 'Genshin': 405 | await message.answer(f'对于密钥错误,请尝试在左下角菜单重设voice token,若问题持续出现,请联系开发者 @GetupEarlyTomo') 406 | 407 | # 更新上下文 408 | if user.state != 'creatingImg': 409 | user.history['assistant'].insert(0, full_reply) 410 | 411 | time = datetime.datetime.now() 412 | print(f'{time}:【{user.name}】:{text}\n{time}:【{user.character}】:{full_reply}\n\n') 413 | try: 414 | with open(user.log, 'a') as f: 415 | f.write(f'{time}:【{user.name}】:{text}\n{time}:【{user.character}】:{reply}\n\n') 416 | except Exception: 417 | pass 418 | 419 | except UnicodeEncodeError as e: 420 | reply = f'出错了...\n\n{str(e)}\n\n这很可能是因为您输入了带中文的API Key,请点击左下角菜单重新设置' 421 | await editInMarkdown(user, reply) 422 | print(f'[get reply error]: user{message.chat.first_name}', e) 423 | except openai.AuthenticationError as e: 424 | reply = '出错了\.\.\.\n\n'+str('您输入的 openai API key 有误,可能是*API已经被销毁*或请*API格式不对*。注意 API 带 sk\- 前缀,形如\n\n `sk\-bJWSrupJ4VPxiYnw4s0UT3BlbkFJh8BQxx4yWSMFfjPnAz5I`\n\n请在 [Openai官网](https://platform\.openai\.com/account/api\-keys) 查看您的 API Key') 425 | await message.answer(reply, parse_mode='MarkdownV2') 426 | except Exception as e: 427 | reply = '出错了...\n\n'+str(e) 428 | await editInMarkdown(user, reply) 429 | print(f'[get reply error]: user{message.chat.first_name}', e) 430 | 431 | # 获取voice消息 432 | @dp.message_handler(content_types=types.ContentType.VOICE) 433 | async def voice(message: types.Message): 434 | if message.chat.type == 'private': 435 | if await isDebugingNdeploying(message): return 436 | await initUser(message, is_typing=True) 437 | user = users[message.chat.id] 438 | 439 | # 仅在聊天时接受语音消息 440 | if user.state != 'allGood': 441 | return 442 | 443 | # 获取用户发送的语音消息 444 | user.currentReplyMsg = await message.reply(f'{user.character} 正在识别语音内容...') 445 | voice = message.voice 446 | voice_path = f'./Audio/{user.id}_in{voice.file_id}.mp3' 447 | await bot.download_file_by_id(voice.file_id, destination=voice_path) 448 | 449 | # 转文本 450 | text = user.voice2text(voice_path) 451 | await editInMarkdown(user, f'识别为:{text}') 452 | 453 | # 进行聊天 454 | await dialogue(user, message, text) 455 | 456 | # 获取chat消息 457 | @dp.message_handler() 458 | async def chat(message: types.Message): 459 | if message.chat.type == 'private': 460 | if await isDebugingNdeploying(message): return 461 | await initUser(message, is_typing=True) 462 | 463 | # 配合完成 User 配置 464 | if message.chat.id in users: 465 | user = users[message.chat.id] 466 | text = message.text 467 | 468 | # 设置API key 469 | if user.state == 'settingChatKey': 470 | user = users[message.chat.id] 471 | if user.model_supplier == 'OpenAI': 472 | user.setOpenAIKey(text) 473 | updateUserKey(cursor, connection, user.id, user.key) 474 | await message.reply(f'Openai API Key设置为:\n\n{user.key}\n\n现在就开始聊天吧!') 475 | elif user.model_supplier == 'OhMyGPT': 476 | user.setOhMyGPTKey(text) 477 | updateUserOhMyGPTKey(cursor, connection, user.id, user.ohmygpt_key) 478 | await message.reply(f'OhMyGPT API Key设置为:\n\n{user.ohmygpt_key}\n\n现在就开始聊天吧!') 479 | user.stateTrans('settingChatKey', 'setApiKeyDone') 480 | 481 | time = datetime.datetime.now() 482 | print(f'{time}: {user.name} Set {user.model_supplier} API Key as {user.key}\n\n') 483 | 484 | try: 485 | with open(user.log, 'a') as f: 486 | f.write(f'{time}: Set {user.model_supplier} API Key as {user.key}\n\n') 487 | except Exception: 488 | pass 489 | 490 | return 491 | 492 | # 设置 Img API key 493 | elif user.state == 'settingImgKey': 494 | user = users[message.chat.id] 495 | user.setSDKey(text) 496 | updateUserImgKey(cursor, connection, user.id, user.imgKey) 497 | await message.reply(f'Stable Diffusion API Key设置为:\n\n{user.imgKey}\n\n请点击左下菜单或 /howtogetimg 查看生成图像的正确方式') 498 | user.stateTrans('settingImgKey', 'setImgKeyDone') 499 | 500 | time = datetime.datetime.now() 501 | print(f'{time}: {user.name} Set SD API Key as {user.imgKey}\n\n') 502 | 503 | try: 504 | with open(user.log, 'a') as f: 505 | f.write(f'{time}: Set SD API Key as {user.imgKey}\n\n') 506 | except Exception: 507 | pass 508 | 509 | return 510 | 511 | # 设置 Voice Token 512 | elif user.state == 'settingVoiceToken': 513 | user = users[message.chat.id] 514 | user.setVoiceKey(text) 515 | updateUserVoiceToken(cursor, connection, user.id, user.voiceToken) 516 | await message.reply(f'Voice Token 设置为:\n\n{user.voiceToken}\n\n现在可以继续聊天并使用原神语音库了') 517 | user.stateTrans('settingVoiceToken', 'setVoiceTokenDone') 518 | return 519 | 520 | # 设置上下文长度 521 | elif user.state == 'settingContextLen': 522 | lenContext = 5 523 | try: 524 | lenContext = int(text) 525 | except Exception as e: 526 | await message.reply(f'出错了...没有进行修改\n\n'+str(e)) 527 | user.stateTrans('settingContextLen', 'setConextLenCancel') 528 | if lenContext <= 0: 529 | await message.reply(f'非法长度,没有进行修改') 530 | user.stateTrans('settingContextLen', 'setConextLenCancel') 531 | else: 532 | user.contextMaxLen = lenContext 533 | user.clearHistory() 534 | await message.reply(f'当前记忆上下文长度为【{user.contextMaxLen}】回合对话') 535 | user.stateTrans('settingContextLen', 'setConextLenDone') 536 | 537 | time = datetime.datetime.now() 538 | print(f'{time}: {user.name} Set context len as {user.contextMaxLen}\n\n') 539 | 540 | try: 541 | with open(user.log, 'a') as f: 542 | f.write(f'{time}: Set context len as {user.contextMaxLen}\n\n') 543 | except Exception: 544 | pass 545 | return 546 | 547 | # 创建新咒语 548 | elif user.state == 'creatingNewHyp': 549 | try: 550 | character = text[text.find('【')+1: text.find('】')] 551 | hyp = text[text.find('【',1)+1: text.rfind('】')] 552 | if len(character) > 10: 553 | await message.reply(f'出错了...没有进行修改\n\n角色名“{character}”太长了,请注意是否误把咒语文本写到角色名位置,要按照指定格式编写') 554 | user.stateTrans('creatingNewHyp', 'newHypCancel') 555 | return 556 | except Exception as e: 557 | await message.reply(f'出错了...没有进行修改\n\n'+str(e)) 558 | user.stateTrans('creatingNewHyp', 'newHypCancel') 559 | return 560 | if character in user.hypnotism: 561 | await message.reply(f'{character}这条咒语已经存在啦,请重新输入') 562 | return 563 | 564 | user.hypnotism[character] = hyp 565 | updateUserPrompts(cursor, connection, user.id, user.hypnotism) 566 | user.clearHistory() 567 | await message.reply(f'新咒语【{character}】添加成功,想要使用这条咒语的话,需要先在《魔导绪论》中点选催眠哦') 568 | user.stateTrans('creatingNewHyp', 'newHypDone') 569 | 570 | time = datetime.datetime.now() 571 | print(f'{time}: {user.name} 创建了新咒语:【{character}】:【{hyp}】\n\n') 572 | try: 573 | with open(user.log, 'a') as f: 574 | f.write(f'{time}: 创建了新咒语:【{character}】:【{hyp}】\n\n') 575 | except Exception: 576 | pass 577 | return 578 | 579 | # 编辑咒语 580 | elif user.state == 'edittingHyp': 581 | if user.currentEdittingChar is None: 582 | # 没有点击选项而是直接发消息,认为用户放弃操作 583 | user.stateTrans('edittingHyp', 'editHypCancel') 584 | else: 585 | user.hypnotism[user.currentEdittingChar] = text 586 | updateUserPrompts(cursor, connection, user.id, user.hypnotism) 587 | await message.reply(f'咒语【{user.currentEdittingChar}】编辑完成!想要使用这条咒语的话,需要先在《魔导绪论》中点选催眠哦') 588 | user.stateTrans('edittingHyp', 'editHypDone') 589 | 590 | time = datetime.datetime.now() 591 | print(f'{time}: {user.name} 编辑了咒语:【{user.currentEdittingChar}】:【{text}】\n\n') 592 | try: 593 | with open(user.log, 'a') as f: 594 | f.write(f'{time}: 编辑了咒语:【{user.currentEdittingChar}】:【{text}】\n\n') 595 | except Exception: 596 | pass 597 | 598 | return 599 | 600 | # 删除咒语 601 | elif user.state == 'deletingHyp': 602 | # 没有点击选项而是直接发消息,认为用户放弃操作 603 | user.stateTrans('deletingHyp', 'delHypCancel') 604 | 605 | # 进行聊天 606 | user = users[message.chat.id] 607 | if user.state == 'allGood': 608 | await dialogue(user, message, message.text) 609 | 610 | # ----------------------------------------------------------------------------- 611 | # debug 612 | @dp.callback_query_handler(lambda call: call.data.startswith('debug')) 613 | async def debug(call: types.CallbackQuery, ): 614 | user = users[call.message.chat.id] 615 | text = call.message.text 616 | if call.data.endswith('audio'): 617 | text = text[text.find(':')+1:] 618 | _, voice, content = text.split(' ', 2) 619 | voice_path = user.text2voice_test(voice=voice, text=content) 620 | try: 621 | if voice_path is not None: 622 | with open(voice_path, 'rb') as voice: 623 | await bot.send_voice(user.id, voice) 624 | except Exception as e: 625 | print(f'[Check]: Gen Audio failed as {e}') 626 | 627 | # 取消当前操作 628 | @dp.callback_query_handler(lambda call: call.data == 'cancel') 629 | async def cancel(call: types.CallbackQuery, ): 630 | user = users[call.message.chat.id] 631 | if user.state != 'allGood': 632 | await call.message.answer('已取消') 633 | 634 | if user.state == 'settingChatKey': 635 | user.stateTrans('settingChatKey', 'setApiKeyCancel') 636 | elif user.state == 'settingImgKey': 637 | user.stateTrans('settingImgKey', 'setImgKeyCancel') 638 | elif user.state == 'settingVoiceToken': 639 | user.stateTrans('settingVoiceToken', 'setVoiceTokenCancel') 640 | elif user.state == 'creatingNewHyp': 641 | user.stateTrans('creatingNewHyp', 'newHypCancel') 642 | elif user.state == 'edittingHyp': 643 | user.stateTrans('edittingHyp', 'editHypCancel') 644 | elif user.state == 'deletingHyp': 645 | user.stateTrans('deletingHyp', 'delHypCancel') 646 | else: 647 | print(f"[check]: try to cancel at state '{user.state}'") 648 | 649 | # 选用催眠术 650 | @dp.callback_query_handler(lambda call: call.data.startswith('set_model')) 651 | async def selectModel(call: types.CallbackQuery, ): 652 | user = users[call.message.chat.id] 653 | user.model = call.data[len('set_model'):] 654 | user.clearHistory() 655 | await call.message.answer(f'模型已设置为【{user.model}】') 656 | 657 | # 选用催眠术 658 | @dp.callback_query_handler(lambda call: call.data.startswith('select_hyp')) 659 | async def selectHypnotism(call: types.CallbackQuery, ): 660 | user = users[call.message.chat.id] 661 | user.character = call.data[len('select_hyp'):] 662 | if user.character != user.model: 663 | user.system = user.hypnotism[user.character] 664 | await call.message.answer(f'已经使用如下咒语将模型催眠为【{user.character}】,可以随意聊天,催眠术不会被遗忘\n'+'-'*35+'\n\n'+user.system+'\n\n'+'-'*35+'\n'+f'当前模型选择为【{user.model}】,可在菜单切换') 665 | else: 666 | await call.message.answer(f'不使用催眠咒语直接和模型对话,当前模型选择为【{user.model}】,可在菜单切换模型') 667 | user.clearHistory() 668 | 669 | # 删除催眠术 670 | @dp.callback_query_handler(lambda call: call.data.startswith('delete_hyp')) 671 | async def deleteHypnotism(call: types.CallbackQuery, ): 672 | character = call.data[len('delete_hyp'):] 673 | user = users[call.message.chat.id] 674 | hypDeleted = user.hypnotism[character] 675 | user.hypnotism.pop(character) 676 | updateUserPrompts(cursor, connection, user.id, user.hypnotism) 677 | user.clearHistory() 678 | await call.message.answer(f'已将咒语【{character}】删除,原文为如下\n'+'-'*35+'\n\n'+hypDeleted) 679 | user.stateTrans('deletingHyp', 'delHypDone') 680 | 681 | # 编辑催眠术 682 | @dp.callback_query_handler(lambda call: call.data.startswith('edit_hyp')) 683 | async def editHypnotism(call: types.CallbackQuery, ): 684 | user = users[call.message.chat.id] 685 | character = call.data[len('edit_hyp'):] 686 | hypEditting = user.hypnotism[character] 687 | user.currentEdittingChar = character 688 | await call.message.answer(f'请直接输入咒语【{character}】的新文本,当前咒语文本如下\n'+'-'*35+'\n\n'+hypEditting) 689 | 690 | # 生成语音回复 691 | @dp.callback_query_handler(lambda call: call.data == 'audio_gen') 692 | async def gen_audio(call: types.CallbackQuery, ): 693 | user = users[call.message.chat.id] 694 | message = call.message 695 | 696 | notice = await message.answer(f'{user.character} 正在讲话...') 697 | try: 698 | voice_path = user.text2voice(text=message.text) 699 | with open(voice_path, 'rb') as voice: 700 | await bot.send_voice(user.id, voice) 701 | await notice.delete() 702 | except Exception as e: 703 | await message.answer(f'语音合成失败...\n\n{str(e)}') 704 | if user.voice_type == 'Genshin': 705 | await message.answer(f'对于密钥错误,请尝试在左下角菜单重设voice token,若问题持续出现,请联系开发者 @GetupEarlyTomo') 706 | 707 | # 声音操作 708 | @dp.callback_query_handler(lambda call: call.data.startswith('audio')) 709 | async def gen_audio(call: types.CallbackQuery, ): 710 | user = users[call.message.chat.id] 711 | message = call.message 712 | if call.data.endswith('select'): 713 | await user.currentReplyMsg.edit_reply_markup(None) 714 | user.currentVoiceMsg = await message.answer(f'请选择声音类型,可[在此](https://t\.me/nekolalala/7200/7201)试听', parse_mode='MarkdownV2', reply_markup=user.getVoiceTypeBorad()) 715 | elif call.data.endswith('back'): 716 | await user.currentVoiceMsg.delete() 717 | await user.currentReplyMsg.edit_reply_markup(user.getReGenKeyBorad()) 718 | elif call.data.endswith('OpenAI'): 719 | user.voice_type = 'OpenAI' 720 | await user.currentVoiceMsg.edit_text(VOICE_INTRO_OPENAI, parse_mode='MarkdownV2', disable_web_page_preview=True,reply_markup=user.getVoiceSexBorad()) 721 | elif call.data.endswith('Genshin'): 722 | if user.voiceToken is None: 723 | await user.currentVoiceMsg.edit_text('要使用该模型,请先点击左下角设置免费的 voice token', reply_markup=user.getVoiceTokenBorad()) 724 | else: 725 | user.voice_type = 'Genshin' 726 | await user.currentVoiceMsg.edit_text(VOICE_INTRO_GENSHIN, parse_mode='MarkdownV2', disable_web_page_preview=True, reply_markup=user.getVoiceSexBorad(),) 727 | elif call.data.endswith('audio_back_to_select_type'): 728 | await user.currentVoiceMsg.edit_text(f'请选择声音类型,可[在此](https://t\.me/nekolalala/7200/7201)试听', parse_mode='MarkdownV2', reply_markup=user.getVoiceTypeBorad()) 729 | elif call.data.endswith('audio_back_to_select_sex'): 730 | await user.currentVoiceMsg.edit_reply_markup(user.getVoiceSexBorad()) 731 | elif call.data.endswith('male') or call.data.endswith('female'): 732 | user.voice_sex = 'female' if call.data.endswith('female') else 'male' 733 | await user.currentVoiceMsg.edit_reply_markup(user.getVoiceBorad()) 734 | else: 735 | user.voice = call.data.split('_')[-1] 736 | await user.currentVoiceMsg.delete() 737 | await user.currentReplyMsg.edit_reply_markup(user.getReGenKeyBorad()) 738 | 739 | # 模型操作 740 | @dp.callback_query_handler(lambda call: call.data.startswith('model_selection')) 741 | async def select_model(call: types.CallbackQuery, ): 742 | user = users[call.message.chat.id] 743 | message = call.message 744 | if call.data.endswith('OpenAI'): 745 | user.model_supplier = 'OpenAI' 746 | await message.edit_text(MODEL_INTRO_OPENAI, parse_mode='MarkdownV2', disable_web_page_preview=True,reply_markup=user.getModelBorad()) 747 | elif call.data.endswith('OhMyGPT'): 748 | user.model_supplier = 'OhMyGPT' 749 | await message.edit_text(MODEL_INTRO_OHMYGPT, parse_mode='MarkdownV2', disable_web_page_preview=True, reply_markup=user.getModelBorad(),) 750 | elif call.data.endswith('back_to_select_supplier'): 751 | await message.edit_text(f'聊天前请先择模型来源,OpenAI 官方服务或 OhMyGPT 代理服务,注意二者 API key 不通用', parse_mode='MarkdownV2', reply_markup=user.getModelSupplierBorad()) 752 | else: 753 | if user.model_supplier == 'OpenAI': 754 | if user.key is None: 755 | user.state = 'settingChatKey' 756 | await message.reply('请输入Openai API Key,可在[Openai官网](https://platform\.openai\.com/account/api\-keys) 查看:', parse_mode='MarkdownV2', disable_web_page_preview=True) 757 | else: 758 | user.model = call.data.split('_')[-1] 759 | await message.reply(f'模型设置为【{user.model}】,当前使用的Openai API Key设置为:\n\n{user.key}\n\n现在就开始聊天吧!') 760 | else: 761 | if user.ohmygpt_key is None: 762 | user.state = 'settingChatKey' 763 | await message.reply('请输入OhMyGPT API Key,可在[OhMyGPT官网](https://www\.ohmygpt\.com) 查看,推荐通过[链接](https://www\.ohmygpt\.com?aff=XFeTYpLh)免费注册试用:', parse_mode='MarkdownV2', disable_web_page_preview=True) 764 | else: 765 | user.model = call.data.split('_')[-1] 766 | await message.reply(f'模型设置为【{user.model}】,当前使用的OhMyGPT API Key设置为:\n\n{user.ohmygpt_key}\n\n现在就开始聊天吧!') 767 | 768 | 769 | # 启动沉浸模式 770 | @dp.callback_query_handler(lambda call: call.data == 'immersion') 771 | async def regenerate(call: types.CallbackQuery, ): 772 | user = users[call.message.chat.id] 773 | message = call.message 774 | if not user.immersion: 775 | await message.answer('沉浸模式已启动,现在机器人只会进行语音回复,您也可以使用语音输入以获得最佳体验') 776 | user.immersion = True 777 | else: 778 | await message.answer('沉浸模式已退出') 779 | user.immersion = False 780 | 781 | # 重新生成回答 782 | @dp.callback_query_handler(lambda call: call.data == 'regenerate') 783 | async def regenerate(call: types.CallbackQuery, ): 784 | user = users[call.message.chat.id] 785 | message = call.message 786 | 787 | if not user.immersion: 788 | if user.currentReplyMsg is not None: 789 | await user.currentReplyMsg.edit_reply_markup(None) 790 | await editInMarkdown(user, f'{user.character} 正在思考...') 791 | else: 792 | await user.currentReplyMsg.delete() 793 | user.currentReplyMsg = await message.answer(f'{user.character} 正在思考...') 794 | 795 | # 从上下文中删除上一个回复 796 | user.history['assistant'].pop(0) 797 | 798 | # 沉浸模式直接输出语音,否则流式打印回复 799 | if not user.immersion: 800 | response = user.getReply('', True) 801 | reply, replys = '', [] 802 | for chunk in response: 803 | repLen = len(reply) 804 | content = chunk.choices[0].delta.content 805 | 806 | # 回复太长则分段 807 | if repLen > 4000: 808 | # 完成上一段的回复 809 | await editInMarkdown(user, reply) 810 | replys.append(reply) 811 | # 新启一段 812 | user.currentReplyMsg = await message.answer(content) 813 | reply, repLen = content, len(reply) 814 | 815 | # 每15个字符更新一次,如果每个字符都更新会很慢 816 | if repLen != 0 and repLen % 15 == 0: 817 | await editInMarkdown(user, reply) 818 | 819 | # 拼接当前回复 820 | if content is not None: 821 | reply += content 822 | 823 | # 打印最后一段回复,增加重新生成按钮 824 | replys.append(reply) 825 | await editInMarkdown(user, reply) 826 | await user.currentReplyMsg.edit_reply_markup(user.getReGenKeyBorad()) 827 | 828 | # 还原完整回复 829 | full_reply = ''.join(replys) 830 | else: 831 | await user.currentReplyMsg.edit_text(f'{user.character} 正在讲话...') 832 | full_reply = user.getReply('', False) 833 | try: 834 | voice_path = user.text2voice(text=full_reply) 835 | await user.currentReplyMsg.delete() 836 | with open(voice_path, 'rb') as voice: 837 | user.currentReplyMsg = await bot.send_voice(user.id, voice, reply_markup=user.getImmersionBorad()) 838 | except Exception as e: 839 | await message.answer(f'语音合成失败...\n\n{str(e)}') 840 | if user.voice_type == 'Genshin': 841 | await message.answer(f'对于密钥错误,请尝试在左下角菜单重设voice token,若问题持续出现,请联系开发者 @GetupEarlyTomo') 842 | 843 | 844 | time = datetime.datetime.now() 845 | print(f"{time}:【{user.name}】:{user.history['user'][0]}\n{time}:【{user.character}】(regen):{full_reply}\n\n") 846 | try: 847 | with open(user.log, 'a') as f: 848 | f.write(f"{time}:【{user.name}】:{user.history['user'][0]}\n{time}:【{user.character}】(regen):{full_reply}\n\n") 849 | except Exception: 850 | pass 851 | 852 | # 更新上下文 853 | user.history['assistant'].insert(0, full_reply) 854 | 855 | # ----------------------------------------------------------------------------- 856 | async def start(): 857 | await bot.set_my_commands([ 858 | BotCommand('sethypnotism','魔导绪论'), 859 | BotCommand('showhypnotism','查看当前咒语'), 860 | BotCommand('newhypnotism','创建新咒语'), 861 | BotCommand('edithypnotism','编辑咒语'), 862 | BotCommand('deletehypnotism','删除咒语'), 863 | BotCommand('setmodel','选择模型'), 864 | BotCommand('setcontextlen','设置上下文长度'), 865 | BotCommand('setapikey','设置OpenAI Key'), 866 | BotCommand('setohmygptkey','设置OhMyGPT Key'), 867 | BotCommand('setimgkey','设置Stable diffusion Key'), 868 | BotCommand('setvoicetoken','设置voice token'), 869 | BotCommand('howtogetimg','生成图像示范'), 870 | BotCommand('about','使用指南'), 871 | BotCommand('resetall','遇到严重错误时点此重置机器人') 872 | ]) 873 | 874 | def botActivate(): 875 | print('bot启动中; pid = {}'.format(os.getpid())) 876 | 877 | getDatabaseReady(cursor, connection) 878 | loop = asyncio.new_event_loop() 879 | asyncio.set_event_loop(loop) 880 | loop.run_until_complete(start()) 881 | loop.run_until_complete(executor.start_polling(dp)) 882 | 883 | def connectionGuard(process): 884 | ''' 主进程守护线程在此检查bot进程是否死亡,并自动重启 ''' 885 | while True: 886 | if not process.is_alive(): 887 | process = multiprocessing.Process(target=botActivate) 888 | process.start() 889 | time.sleep(3) 890 | 891 | if __name__ == '__main__': 892 | # 在子进程中启动 bot 893 | p = multiprocessing.Process(target=botActivate) 894 | p.start() 895 | time.sleep(3) 896 | 897 | # 启动守护子线程,检查并重启断连的进程 898 | guardThread = threading.Thread(target=connectionGuard, args=(p,)) 899 | guardThread.start() 900 | 901 | # 主线程/主进程死循环,禁止程序退出 902 | while True: time.sleep(0.5) 903 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiogram==2.25.1 2 | aiohttp==3.8.4 3 | aiosignal==1.3.1 4 | aiotdlib==0.19.2 5 | anyio==3.7.1 6 | async-timeout==4.0.2 7 | attrs==22.2.0 8 | Babel==2.9.1 9 | certifi @ file:///C:/b/abs_85o_6fm0se/croot/certifi_1671487778835/work/certifi 10 | charset-normalizer==3.1.0 11 | colorama==0.4.6 12 | distro==1.8.0 13 | exceptiongroup==1.1.3 14 | frozenlist==1.3.3 15 | future==0.18.3 16 | grpcio==1.48.1 17 | grpcio-tools==1.48.1 18 | h11==0.14.0 19 | httpcore==1.0.2 20 | httpx==0.25.1 21 | idna==3.4 22 | lxml==4.9.2 23 | magic-filter==1.0.9 24 | multidict==6.0.4 25 | openai==1.3.1 26 | Pillow==9.4.0 27 | protobuf==3.19.5 28 | pydantic==1.10.5 29 | PyMySQL==1.0.2 30 | pypng==0.20220715.0 31 | PyQRCode==1.2.1 32 | python-docx==0.8.11 33 | python-dotenv==1.0.0 34 | pytz==2022.7.1 35 | qrcode==7.4.2 36 | requests==2.28.2 37 | six==1.16.0 38 | sniffio==1.3.0 39 | sortedcontainers==2.4.0 40 | stability-sdk==0.4.0 41 | tqdm==4.65.0 42 | transitions==0.9.0 43 | typing_extensions==4.5.0 44 | ujson==5.7.0 45 | urllib3==1.26.14 46 | wincertstore==0.2 47 | yarl==1.8.2 48 | --------------------------------------------------------------------------------