├── nohup.ou ├── __init__.py ├── plugins ├── banwords │ ├── .gitignore │ ├── __init__.py │ ├── banwords.txt.template │ ├── config.json.template │ ├── README.md │ └── banwords.py ├── role │ ├── __init__.py │ └── README.md ├── tool │ ├── __init__.py │ ├── config.json.template │ ├── README.md │ └── tool.py ├── bdunit │ ├── __init__.py │ ├── config.json.template │ └── README.md ├── finish │ ├── __init__.py │ └── finish.py ├── godcmd │ ├── __init__.py │ ├── config.json.template │ └── README.md ├── hello │ ├── __init__.py │ └── hello.py ├── dungeon │ ├── __init__.py │ ├── README.md │ └── dungeon.py ├── keyword │ ├── __init__.py │ ├── config.json.template │ ├── test-keyword.png │ ├── README.md │ └── keyword.py ├── plugin.py ├── __init__.py ├── source.json └── event.py ├── img.png ├── img_1.png ├── img_2.png ├── img_3.png ├── img_4.png ├── img_5.png ├── img_6.png ├── img_7.png ├── img_8.png ├── sessions.db.dat ├── docs └── images │ ├── group-chat-sample.jpg │ ├── image-create-sample.jpg │ └── single-chat-sample.jpg ├── voice ├── baidu │ ├── config.json.template │ ├── README.md │ └── baidu_voice.py ├── voice.py ├── azure │ ├── config.json.template │ └── azure_voice.py ├── factory.py ├── openai │ └── openai_voice.py ├── google │ └── google_voice.py ├── pytts │ └── pytts_voice.py └── audio_convert.py ├── pyproject.toml ├── requirements.txt ├── common ├── const.py ├── singleton.py ├── tmp_dir.py ├── database.py ├── package_manager.py ├── log.py ├── dequeue.py ├── expired_dict.py ├── token_bucket.py ├── utils.py ├── time_check.py └── sorted_dict.py ├── translate ├── factory.py ├── translator.py └── baidu │ └── baidu_translate.py ├── .flake8 ├── nixpacks.toml ├── scripts ├── tout.sh ├── shutdown.sh └── start.sh ├── lib └── itchat │ ├── components │ ├── __init__.py │ ├── hotreload.py │ └── register.py │ ├── async_components │ ├── __init__.py │ ├── hotreload.py │ └── register.py │ ├── content.py │ ├── storage │ ├── messagequeue.py │ └── __init__.py │ ├── config.py │ ├── log.py │ ├── returnvalues.py │ ├── __init__.py │ └── utils.py ├── bot ├── bot.py ├── bot_factory.py ├── baidu │ └── baidu_unit_bot.py ├── openai │ ├── open_ai_image.py │ ├── open_ai_session.py │ └── open_ai_bot.py ├── session_manager.py ├── chatgpt │ └── chat_gpt_session.py └── luolinai │ └── luolinai_bot.py ├── .gitignore ├── sessions.db.bak ├── sessions.db.dir ├── bridge ├── reply.py ├── context.py └── bridge.py ├── requirements-optional.txt ├── config企业微信配置模板.json ├── channel ├── wechatmp │ ├── common.py │ ├── wechatmp_message.py │ ├── wechatmp_client.py │ ├── active_reply.py │ └── README.md ├── wechatcom │ ├── wechatcomapp_client.py │ ├── wechatcomapp_message.py │ └── README.md ├── channel_factory.py ├── channel.py ├── chat_message.py ├── terminal │ └── terminal_channel.py └── wechat │ ├── wechaty_message.py │ ├── wechat_message.py │ ├── wechaty_channel.py │ └── wechat_channel.py ├── sdfg ├── 知识库接口说明.md ├── .pre-commit-config.yaml ├── LICENSE ├── config-微信公众号模板.json ├── config-template.json ├── config-微信配置template.json ├── 配置说明.md └── app.py /nohup.ou: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plugins/banwords/.gitignore: -------------------------------------------------------------------------------- 1 | banwords.txt -------------------------------------------------------------------------------- /plugins/role/__init__.py: -------------------------------------------------------------------------------- 1 | from .role import * 2 | -------------------------------------------------------------------------------- /plugins/tool/__init__.py: -------------------------------------------------------------------------------- 1 | from .tool import * 2 | -------------------------------------------------------------------------------- /plugins/bdunit/__init__.py: -------------------------------------------------------------------------------- 1 | from .bdunit import * 2 | -------------------------------------------------------------------------------- /plugins/finish/__init__.py: -------------------------------------------------------------------------------- 1 | from .finish import * 2 | -------------------------------------------------------------------------------- /plugins/godcmd/__init__.py: -------------------------------------------------------------------------------- 1 | from .godcmd import * 2 | -------------------------------------------------------------------------------- /plugins/hello/__init__.py: -------------------------------------------------------------------------------- 1 | from .hello import * 2 | -------------------------------------------------------------------------------- /plugins/banwords/__init__.py: -------------------------------------------------------------------------------- 1 | from .banwords import * 2 | -------------------------------------------------------------------------------- /plugins/dungeon/__init__.py: -------------------------------------------------------------------------------- 1 | from .dungeon import * 2 | -------------------------------------------------------------------------------- /plugins/keyword/__init__.py: -------------------------------------------------------------------------------- 1 | from .keyword import * 2 | -------------------------------------------------------------------------------- /plugins/banwords/banwords.txt.template: -------------------------------------------------------------------------------- 1 | nipples 2 | pennis 3 | 法轮功 -------------------------------------------------------------------------------- /img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img.png -------------------------------------------------------------------------------- /img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_1.png -------------------------------------------------------------------------------- /img_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_2.png -------------------------------------------------------------------------------- /img_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_3.png -------------------------------------------------------------------------------- /img_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_4.png -------------------------------------------------------------------------------- /img_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_5.png -------------------------------------------------------------------------------- /img_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_6.png -------------------------------------------------------------------------------- /img_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_7.png -------------------------------------------------------------------------------- /img_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/img_8.png -------------------------------------------------------------------------------- /plugins/godcmd/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "password": "", 3 | "admin_users": [] 4 | } 5 | -------------------------------------------------------------------------------- /sessions.db.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/sessions.db.dat -------------------------------------------------------------------------------- /plugins/keyword/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "keyword": { 3 | "关键字匹配": "测试成功" 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /plugins/bdunit/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "service_id": "s...", 3 | "api_key": "", 4 | "secret_key": "" 5 | } 6 | -------------------------------------------------------------------------------- /docs/images/group-chat-sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/docs/images/group-chat-sample.jpg -------------------------------------------------------------------------------- /plugins/keyword/test-keyword.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/plugins/keyword/test-keyword.png -------------------------------------------------------------------------------- /docs/images/image-create-sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/docs/images/image-create-sample.jpg -------------------------------------------------------------------------------- /docs/images/single-chat-sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/luolin-ai/chatgpt-KnowledgeBot/HEAD/docs/images/single-chat-sample.jpg -------------------------------------------------------------------------------- /plugins/banwords/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "action": "replace", 3 | "reply_filter": true, 4 | "reply_action": "ignore" 5 | } 6 | -------------------------------------------------------------------------------- /voice/baidu/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "lang": "zh", 3 | "ctp": 1, 4 | "spd": 5, 5 | "pit": 5, 6 | "vol": 5, 7 | "per": 0 8 | } 9 | -------------------------------------------------------------------------------- /plugins/dungeon/README.md: -------------------------------------------------------------------------------- 1 | 玩地牢游戏的聊天插件,触发方法如下: 2 | 3 | - `$开始冒险 <背景故事>` - 以<背景故事>开始一个地牢游戏,不填写会使用默认背景故事。之后聊天中你的所有消息会帮助ai完善这个故事。 4 | - `$停止冒险` - 停止一个地牢游戏,回归正常的ai。 5 | -------------------------------------------------------------------------------- /plugins/plugin.py: -------------------------------------------------------------------------------- 1 | class Plugin: 2 | def __init__(self): 3 | self.handlers = {} 4 | 5 | def get_help_text(self, **kwargs): 6 | return "暂无帮助信息" 7 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 176 3 | target-version = ['py37'] 4 | include = '\.pyi?$' 5 | extend-exclude = '.+/(dist|.venv|venv|build|lib)/.+' 6 | 7 | [tool.isort] 8 | profile = "black" -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai==0.27.2 2 | HTMLParser>=0.0.2 3 | PyQRCode>=1.2.1 4 | qrcode>=7.4.2 5 | requests>=2.28.2 6 | chardet>=5.1.0 7 | Pillow 8 | pre-commit 9 | mysql-connector-python 10 | pymongo -------------------------------------------------------------------------------- /common/const.py: -------------------------------------------------------------------------------- 1 | # bot_type 2 | OPEN_AI = "openAI" 3 | CHATGPT = "chatGPT" 4 | BAIDU = "baidu" 5 | CHATGPTONAZURE = "chatGPTOnAzure" 6 | LINKAI = "linkai" 7 | luolinai = "luolinai" 8 | 9 | VERSION = "1.3.0" 10 | -------------------------------------------------------------------------------- /translate/factory.py: -------------------------------------------------------------------------------- 1 | def create_translator(voice_type): 2 | if voice_type == "baidu": 3 | from translate.baidu.baidu_translate import BaiduTranslator 4 | 5 | return BaiduTranslator() 6 | raise RuntimeError 7 | -------------------------------------------------------------------------------- /plugins/keyword/README.md: -------------------------------------------------------------------------------- 1 | # 目的 2 | 关键字匹配并回复 3 | 4 | # 试用场景 5 | 目前是在微信公众号下面使用过。 6 | 7 | # 使用步骤 8 | 1. 复制 `config.json.template` 为 `config.json` 9 | 2. 在关键字 `keyword` 新增需要关键字匹配的内容 10 | 3. 重启程序做验证 11 | 12 | # 验证结果 13 | ![结果](test-keyword.png) -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 176 3 | select = E303,W293,W291,W292,E305,E231,E302 4 | exclude = 5 | .tox, 6 | __pycache__, 7 | *.pyc, 8 | .env 9 | venv/* 10 | .venv/* 11 | reports/* 12 | dist/* 13 | lib/* -------------------------------------------------------------------------------- /common/singleton.py: -------------------------------------------------------------------------------- 1 | def singleton(cls): 2 | instances = {} 3 | 4 | def get_instance(*args, **kwargs): 5 | if cls not in instances: 6 | instances[cls] = cls(*args, **kwargs) 7 | return instances[cls] 8 | 9 | return get_instance 10 | -------------------------------------------------------------------------------- /plugins/tool/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "tools": [ 3 | "python", 4 | "url-get", 5 | "terminal", 6 | "meteo-weather" 7 | ], 8 | "kwargs": { 9 | "top_k_results": 2, 10 | "no_default": false, 11 | "model_name": "gpt-3.5-turbo" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- 1 | from .event import * 2 | from .plugin import * 3 | from .plugin_manager import PluginManager 4 | 5 | instance = PluginManager() 6 | 7 | register = instance.register 8 | # load_plugins = instance.load_plugins 9 | # emit_event = instance.emit_event 10 | -------------------------------------------------------------------------------- /nixpacks.toml: -------------------------------------------------------------------------------- 1 | providers = ['python'] 2 | 3 | [phases.setup] 4 | nixPkgs = ['python310'] 5 | cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak libavcodec-extra','python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements-optional.txt'] 6 | [start] 7 | cmd = "python ./app.py" -------------------------------------------------------------------------------- /scripts/tout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #打开日志 3 | 4 | cd `dirname $0`/.. 5 | export BASE_DIR=`pwd` 6 | echo $BASE_DIR 7 | 8 | # check the nohup.out log output file 9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then 10 | echo "No file ${BASE_DIR}/nohup.out" 11 | exit -1; 12 | fi 13 | 14 | tail -f "${BASE_DIR}/nohup.out" 15 | -------------------------------------------------------------------------------- /lib/itchat/components/__init__.py: -------------------------------------------------------------------------------- 1 | from .contact import load_contact 2 | from .hotreload import load_hotreload 3 | from .login import load_login 4 | from .messages import load_messages 5 | from .register import load_register 6 | 7 | def load_components(core): 8 | load_contact(core) 9 | load_hotreload(core) 10 | load_login(core) 11 | load_messages(core) 12 | load_register(core) 13 | -------------------------------------------------------------------------------- /lib/itchat/async_components/__init__.py: -------------------------------------------------------------------------------- 1 | from .contact import load_contact 2 | from .hotreload import load_hotreload 3 | from .login import load_login 4 | from .messages import load_messages 5 | from .register import load_register 6 | 7 | def load_components(core): 8 | load_contact(core) 9 | load_hotreload(core) 10 | load_login(core) 11 | load_messages(core) 12 | load_register(core) 13 | -------------------------------------------------------------------------------- /translate/translator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Voice service abstract class 3 | """ 4 | 5 | 6 | class Translator(object): 7 | # please use https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes to specify language 8 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str: 9 | """ 10 | Translate text from one language to another 11 | """ 12 | raise NotImplementedError 13 | -------------------------------------------------------------------------------- /voice/voice.py: -------------------------------------------------------------------------------- 1 | """ 2 | Voice service abstract class 3 | """ 4 | 5 | 6 | class Voice(object): 7 | def voiceToText(self, voice_file): 8 | """ 9 | Send voice to voice service and get text 10 | """ 11 | raise NotImplementedError 12 | 13 | def textToVoice(self, text): 14 | """ 15 | Send text to voice service and get voice 16 | """ 17 | raise NotImplementedError 18 | -------------------------------------------------------------------------------- /bot/bot.py: -------------------------------------------------------------------------------- 1 | """ 2 | Auto-replay chat robot abstract class 3 | """ 4 | 5 | 6 | from bridge.context import Context 7 | from bridge.reply import Reply 8 | 9 | 10 | class Bot(object): 11 | def reply(self, query, context: Context = None) -> Reply: 12 | """ 13 | bot auto-reply content 14 | :param req: received message 15 | :return: reply content 16 | """ 17 | raise NotImplementedError 18 | -------------------------------------------------------------------------------- /lib/itchat/content.py: -------------------------------------------------------------------------------- 1 | TEXT = 'Text' 2 | MAP = 'Map' 3 | CARD = 'Card' 4 | NOTE = 'Note' 5 | SHARING = 'Sharing' 6 | PICTURE = 'Picture' 7 | RECORDING = VOICE = 'Recording' 8 | ATTACHMENT = 'Attachment' 9 | VIDEO = 'Video' 10 | FRIENDS = 'Friends' 11 | SYSTEM = 'System' 12 | 13 | INCOME_MSG = [TEXT, MAP, CARD, NOTE, SHARING, PICTURE, 14 | RECORDING, VOICE, ATTACHMENT, VIDEO, FRIENDS, SYSTEM] 15 | -------------------------------------------------------------------------------- /plugins/godcmd/README.md: -------------------------------------------------------------------------------- 1 | ## 插件说明 2 | 3 | 指令插件 4 | 5 | ## 插件使用 6 | 7 | 将`config.json.template`复制为`config.json`,并修改其中`password`的值为口令。 8 | 9 | 如果没有设置命令,在命令行日志中会打印出本次的临时口令,请注意观察,打印格式如下。 10 | 11 | ``` 12 | [INFO][2023-04-06 23:53:47][godcmd.py:165] - [Godcmd] 因未设置口令,本次的临时口令为0971。 13 | ``` 14 | 15 | 在私聊中可使用`#auth`指令,输入口令进行管理员认证。更多详细指令请输入`#help`查看帮助文档: 16 | 17 | `#auth <口令>` - 管理员认证,仅可在私聊时认证。 18 | `#help` - 输出帮助文档,**是否是管理员**和是否是在群聊中会影响帮助文档的输出内容。 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | .vscode 4 | .wechaty/ 5 | __pycache__/ 6 | venv* 7 | *.pyc 8 | config.json 9 | QR.png 10 | nohup.out 11 | tmp 12 | plugins.json 13 | itchat.pkl 14 | *.log 15 | user_datas.pkl 16 | chatgpt_tool_hub/ 17 | plugins/**/ 18 | !plugins/bdunit 19 | !plugins/dungeon 20 | !plugins/finish 21 | !plugins/godcmd 22 | !plugins/tool 23 | !plugins/banwords 24 | !plugins/banwords/**/ 25 | !plugins/hello 26 | !plugins/role 27 | !plugins/keyword -------------------------------------------------------------------------------- /scripts/shutdown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #关闭服务 4 | cd `dirname $0`/.. 5 | export BASE_DIR=`pwd` 6 | pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'` 7 | if [ -z "$pid" ] ; then 8 | echo "No chatgpt-on-wechat running." 9 | exit -1; 10 | fi 11 | 12 | echo "The chatgpt-on-wechat(${pid}) is running..." 13 | 14 | kill ${pid} 15 | 16 | echo "Send shutdown request to chatgpt-on-wechat(${pid}) OK" 17 | -------------------------------------------------------------------------------- /plugins/source.json: -------------------------------------------------------------------------------- 1 | { 2 | "repo": { 3 | "sdwebui": { 4 | "url": "https://github.com/lanvent/plugin_sdwebui.git", 5 | "desc": "利用stable-diffusion画图的插件" 6 | }, 7 | "replicate": { 8 | "url": "https://github.com/lanvent/plugin_replicate.git", 9 | "desc": "利用replicate api画图的插件" 10 | }, 11 | "summary": { 12 | "url": "https://github.com/lanvent/plugin_summary.git", 13 | "desc": "总结聊天记录的插件" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /scripts/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #后台运行Chat_on_webchat执行脚本 3 | 4 | cd `dirname $0`/.. 5 | export BASE_DIR=`pwd` 6 | echo $BASE_DIR 7 | 8 | # check the nohup.out log output file 9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then 10 | touch "${BASE_DIR}/nohup.out" 11 | echo "create file ${BASE_DIR}/nohup.out" 12 | fi 13 | 14 | nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out" 15 | 16 | echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out" 17 | -------------------------------------------------------------------------------- /common/tmp_dir.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | 4 | from config import conf 5 | 6 | 7 | class TmpDir(object): 8 | """A temporary directory that is deleted when the object is destroyed.""" 9 | 10 | tmpFilePath = pathlib.Path("./tmp/") 11 | 12 | def __init__(self): 13 | pathExists = os.path.exists(self.tmpFilePath) 14 | if not pathExists: 15 | os.makedirs(self.tmpFilePath) 16 | 17 | def path(self): 18 | return str(self.tmpFilePath) + "/" 19 | -------------------------------------------------------------------------------- /sessions.db.bak: -------------------------------------------------------------------------------- 1 | '@@22a72f12c7ca81ea0d43c5c8251eadb35577769488d208d9f079b42f48664bab', (0, 395) 2 | '@e182a52a4dea484a450f51fe2b8c2f8c', (512, 362) 3 | 'filehelper', (1024, 339) 4 | '@@4b24f0f66d0e295e9356b9070866850ee4be45d4afe61ae96eafbf8f072bac2f', (1536, 395) 5 | '@@da9253479a99a41a0da391a75fe637d66db852a1f2e4752ac7530c53ac6f3ad8', (2048, 395) 6 | '@@39c13fbbe7b28e69d29338ac467aaf3e90b38b543fd718897ba90dd2e879d0b4', (2560, 395) 7 | '@@21b7fd5ed9e0b137c6f49680c5fbb957be103bd0230f7c075bcc678fce574e33', (3072, 395) 8 | -------------------------------------------------------------------------------- /sessions.db.dir: -------------------------------------------------------------------------------- 1 | '@@22a72f12c7ca81ea0d43c5c8251eadb35577769488d208d9f079b42f48664bab', (0, 395) 2 | '@e182a52a4dea484a450f51fe2b8c2f8c', (512, 362) 3 | 'filehelper', (1024, 339) 4 | '@@4b24f0f66d0e295e9356b9070866850ee4be45d4afe61ae96eafbf8f072bac2f', (1536, 395) 5 | '@@da9253479a99a41a0da391a75fe637d66db852a1f2e4752ac7530c53ac6f3ad8', (2048, 395) 6 | '@@39c13fbbe7b28e69d29338ac467aaf3e90b38b543fd718897ba90dd2e879d0b4', (2560, 395) 7 | '@@21b7fd5ed9e0b137c6f49680c5fbb957be103bd0230f7c075bcc678fce574e33', (3072, 395) 8 | -------------------------------------------------------------------------------- /voice/azure/config.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural", 3 | "auto_detect": true, 4 | "speech_synthesis_zh": "zh-CN-YunxiNeural", 5 | "speech_synthesis_en": "en-US-JacobNeural", 6 | "speech_synthesis_ja": "ja-JP-AoiNeural", 7 | "speech_synthesis_ko": "ko-KR-SoonBokNeural", 8 | "speech_synthesis_de": "de-DE-LouisaNeural", 9 | "speech_synthesis_fr": "fr-FR-BrigitteNeural", 10 | "speech_synthesis_es": "es-ES-LaiaNeural", 11 | "speech_recognition_language": "zh-CN" 12 | } 13 | -------------------------------------------------------------------------------- /bridge/reply.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | from enum import Enum 4 | 5 | 6 | class ReplyType(Enum): 7 | TEXT = 1 # 文本 8 | VOICE = 2 # 音频文件 9 | IMAGE = 3 # 图片文件 10 | IMAGE_URL = 4 # 图片URL 11 | 12 | INFO = 9 13 | ERROR = 10 14 | 15 | def __str__(self): 16 | return self.name 17 | 18 | 19 | class Reply: 20 | def __init__(self, type: ReplyType = None, content=None): 21 | self.type = type 22 | self.content = content 23 | 24 | def __str__(self): 25 | return "Reply(type={}, content={})".format(self.type, self.content) 26 | -------------------------------------------------------------------------------- /plugins/banwords/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## 插件描述 3 | 4 | 简易的敏感词插件,暂不支持分词,请自行导入词库到插件文件夹中的`banwords.txt`,每行一个词,一个参考词库是[1](https://github.com/cjh0613/tencent-sensitive-words/blob/main/sensitive_words_lines.txt)。 5 | 6 | 使用前将`config.json.template`复制为`config.json`,并自行配置。 7 | 8 | 目前插件对消息的默认处理行为有如下两种: 9 | 10 | - `ignore` : 无视这条消息。 11 | - `replace` : 将消息中的敏感词替换成"*",并回复违规。 12 | 13 | ```json 14 | "action": "replace", 15 | "reply_filter": true, 16 | "reply_action": "ignore" 17 | ``` 18 | 19 | 在以上配置项中: 20 | 21 | - `action`: 对用户消息的默认处理行为 22 | - `reply_filter`: 是否对ChatGPT的回复也进行敏感词过滤 23 | - `reply_action`: 如果开启了回复过滤,对回复的默认处理行为 24 | 25 | ## 致谢 26 | 27 | 搜索功能实现来自https://github.com/toolgood/ToolGood.Words -------------------------------------------------------------------------------- /requirements-optional.txt: -------------------------------------------------------------------------------- 1 | tiktoken>=0.3.2 # openai calculate token 2 | 3 | #voice 4 | pydub>=0.25.1 # need ffmpeg 5 | SpeechRecognition # google speech to text 6 | gTTS>=2.3.1 # google text to speech 7 | pyttsx3>=2.90 # pytsx text to speech 8 | baidu_aip>=4.16.10 # baidu voice 9 | azure-cognitiveservices-speech # azure voice 10 | numpy<=1.24.2 11 | langid # language detect 12 | 13 | #install plugin 14 | dulwich 15 | 16 | # wechaty 17 | wechaty>=0.10.7 18 | wechaty_puppet>=0.4.23 19 | pysilk_mod>=1.6.0 # needed by send voice 20 | 21 | # wechatmp wechatcom 22 | web.py 23 | wechatpy 24 | 25 | # chatgpt-tool-hub plugin 26 | 27 | --extra-index-url https://pypi.python.org/simple 28 | chatgpt_tool_hub==0.4.4 -------------------------------------------------------------------------------- /common/database.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from pymongo import MongoClient 3 | class Database: 4 | def __init__(self): 5 | self.client = MongoClient('mongodb://root:Is2WQKd0lPxx8jY0@45.204.10.148:27017/') 6 | self.db = self.client['wxjqr'] 7 | self.chat_collection = self.db['chat_collection'] 8 | 9 | def insert_chat(self, session_id, query, response): 10 | chat_document = { 11 | 'session_id': session_id, 12 | 'query': query, 13 | 'response': response, 14 | 'timestamp': datetime.datetime.now() 15 | } 16 | self.chat_collection.insert_one(chat_document) 17 | 18 | def close(self): 19 | self.client.close() 20 | -------------------------------------------------------------------------------- /plugins/bdunit/README.md: -------------------------------------------------------------------------------- 1 | ## 插件说明 2 | 3 | 利用百度UNIT实现智能对话 4 | 5 | - 1.解决问题:chatgpt无法处理的指令,交给百度UNIT处理如:天气,日期时间,数学运算等 6 | - 2.如问时间:现在几点钟,今天几号 7 | - 3.如问天气:明天广州天气怎么样,这个周末深圳会不会下雨 8 | - 4.如问数学运算:23+45=多少,100-23=多少,35转化为二进制是多少? 9 | 10 | ## 使用说明 11 | 12 | ### 获取apikey 13 | 14 | 在百度UNIT官网上自己创建应用,申请百度机器人,可以把预先训练好的模型导入到自己的应用中, 15 | 16 | see https://ai.baidu.com/unit/home#/home?track=61fe1b0d3407ce3face1d92cb5c291087095fc10c8377aaf https://console.bce.baidu.com/ai平台申请 17 | 18 | ### 配置文件 19 | 20 | 将文件夹中`config.json.template`复制为`config.json`。 21 | 22 | 在其中填写百度UNIT官网上获取应用的API Key和Secret Key 23 | 24 | ``` json 25 | { 26 | "service_id": "s...", #"机器人ID" 27 | "api_key": "", 28 | "secret_key": "" 29 | } 30 | ``` -------------------------------------------------------------------------------- /config企业微信配置模板.json: -------------------------------------------------------------------------------- 1 | { 2 | "luolinai_api_key": "请输入您的知识库密钥", 3 | "luolinai_model_id": "请输入您的知识库模型ID", 4 | "base_url":"https://api.gojiberrys.cn/api/openapi", 5 | "image_create_prefix": [ 6 | "画", 7 | "看", 8 | "找" 9 | ], 10 | "speech_recognition": false, 11 | "group_speech_recognition": false, 12 | "voice_reply_voice": false, 13 | "conversation_max_tokens": 1000, 14 | "expires_in_seconds": 3600, 15 | "channel_type": "wechatcom_app", 16 | "wechatcom_corp_id": "请输入您的企业ID", 17 | "wechatcomapp_secret": "请输入您的应用Secret", 18 | "wechatcomapp_agent_id": "请输入您的应用Agent ID", 19 | "wechatcomapp_token": "请输入您的应用Token", 20 | "wechatcomapp_aes_key": "请输入您的应用AES Key", 21 | "wechatcomapp_port": 9200 22 | } 23 | -------------------------------------------------------------------------------- /channel/wechatmp/common.py: -------------------------------------------------------------------------------- 1 | import web 2 | from wechatpy.crypto import WeChatCrypto 3 | from wechatpy.exceptions import InvalidSignatureException 4 | from wechatpy.utils import check_signature 5 | 6 | from config import conf 7 | 8 | MAX_UTF8_LEN = 2048 9 | 10 | 11 | class WeChatAPIException(Exception): 12 | pass 13 | 14 | 15 | def verify_server(data): 16 | try: 17 | signature = data.signature 18 | timestamp = data.timestamp 19 | nonce = data.nonce 20 | echostr = data.get("echostr", None) 21 | token = conf().get("wechatmp_token") # 请按照公众平台官网\基本配置中信息填写 22 | check_signature(token, signature, timestamp, nonce) 23 | return echostr 24 | except InvalidSignatureException: 25 | raise web.Forbidden("Invalid signature") 26 | except Exception as e: 27 | raise web.Forbidden(str(e)) 28 | -------------------------------------------------------------------------------- /common/package_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pip 4 | from pip._internal import main as pipmain 5 | 6 | from common.log import _reset_logger, logger 7 | 8 | 9 | def install(package): 10 | pipmain(["install", package]) 11 | 12 | 13 | def install_requirements(file): 14 | pipmain(["install", "-r", file, "--upgrade"]) 15 | _reset_logger(logger) 16 | 17 | 18 | def check_dulwich(): 19 | needwait = False 20 | for i in range(2): 21 | if needwait: 22 | time.sleep(3) 23 | needwait = False 24 | try: 25 | import dulwich 26 | 27 | return 28 | except ImportError: 29 | try: 30 | install("dulwich") 31 | except: 32 | needwait = True 33 | try: 34 | import dulwich 35 | except ImportError: 36 | raise ImportError("Unable to import dulwich") 37 | -------------------------------------------------------------------------------- /voice/factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | voice factory 3 | """ 4 | 5 | 6 | def create_voice(voice_type): 7 | """ 8 | create a voice instance 9 | :param voice_type: voice type code 10 | :return: voice instance 11 | """ 12 | if voice_type == "baidu": 13 | from voice.baidu.baidu_voice import BaiduVoice 14 | 15 | return BaiduVoice() 16 | elif voice_type == "google": 17 | from voice.google.google_voice import GoogleVoice 18 | 19 | return GoogleVoice() 20 | elif voice_type == "openai": 21 | from voice.openai.openai_voice import OpenaiVoice 22 | 23 | return OpenaiVoice() 24 | elif voice_type == "pytts": 25 | from voice.pytts.pytts_voice import PyttsVoice 26 | 27 | return PyttsVoice() 28 | elif voice_type == "azure": 29 | from voice.azure.azure_voice import AzureVoice 30 | 31 | return AzureVoice() 32 | raise RuntimeError 33 | -------------------------------------------------------------------------------- /sdfg: -------------------------------------------------------------------------------- 1 | { 2 | "open_ai_api_key": "YOUR API KEY", 3 | "model": "gpt-3.5-turbo", 4 | "proxy": "", 5 | "single_chat_prefix": [ 6 | "bot", 7 | "@bot" 8 | ], 9 | "single_chat_reply_prefix": "[bot] ", 10 | "group_chat_prefix": [ 11 | "@bot" 12 | ], 13 | "group_name_white_list": [ 14 | "ChatGPT测试群", 15 | "ChatGPT测试群2" 16 | ], 17 | "group_chat_in_one_session": [ 18 | "ChatGPT测试群" 19 | ], 20 | "image_create_prefix": [ 21 | "画", 22 | "看", 23 | "找" 24 | ], 25 | "speech_recognition": false, 26 | "group_speech_recognition": false, 27 | "voice_reply_voice": false, 28 | "conversation_max_tokens": 1000, 29 | "expires_in_seconds": 3600, 30 | "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", 31 | "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。" 32 | } -------------------------------------------------------------------------------- /知识库接口说明.md: -------------------------------------------------------------------------------- 1 | **************基于自己私有化知识库的的微信机器人************** 2 | 3 | 点此[登录洛林AI知识交互中心网站](https://api.gojiberrys.cn/) 4 | ![img_3.png](img_3.png)点击创建AI 5 | 6 | 如何创建知识库,并导入 7 | ![img_4.png](img_4.png) 8 | 9 | 导入知识之手动方式导入 10 | ![img_5.png](img_5.png) 11 | **文件导入** 12 | 支持 .txt,.doc,.docx,.pdf,.md 文件。Gpt会自动对文本进行 QA 拆分,需要较长训练时间,拆分需要消耗 13 | tokens,账号余额不足时,未拆分的数据会被删除。一个1个文本。 14 | ![img_6.png](img_6.png) 15 | **表格导入** 16 | 接受一个 csv 文件,表格头包含 question 和 answer。question 代表问题,answer 代表答案。 17 | 导入前会进行去重,如果问题和答案完全相同,则不会被导入,所以最终导入的内容可能会比文件的内容少。但是,对于带有换行的内容,目前无法去重。 18 | ![img_7.png](img_7.png) 19 | **以上三种方法都可以,根据自己的需求来选择** 20 | 21 | 完成以上操作基本之后点击保存,机器人就可以对上传的知识进行训练了,当训练完成后可以回自动显示到训练之后就在知识库中出现了 22 | 23 | 创建自己的私有化应用,,关联自己相关的知识库 24 | ![img_8.png](img_8.png) 25 | 26 | 如何获取 api key 27 | 开发页,点击添加新的 Api Key 可获取 ,请在获取后保存,后续将无法再获取该 key,只能删除重新生成。 28 | ![img_1.png](img_1.png) 29 | 如何取 modelId / appId 30 | V3.8之后的接口改成了 appId 。两者是同一个东西,主要看接口实际字段。 31 | 我的应用编辑页内可获取 32 | ![img_2.png](img_2.png) -------------------------------------------------------------------------------- /channel/wechatcom/wechatcomapp_client.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | 4 | from wechatpy.enterprise import WeChatClient 5 | 6 | 7 | class WechatComAppClient(WeChatClient): 8 | def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True): 9 | super(WechatComAppClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry) 10 | self.fetch_access_token_lock = threading.Lock() 11 | 12 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token 13 | with self.fetch_access_token_lock: 14 | access_token = self.session.get(self.access_token_key) 15 | if access_token: 16 | if not self.expires_at: 17 | return access_token 18 | timestamp = time.time() 19 | if self.expires_at - timestamp > 60: 20 | return access_token 21 | return super().fetch_access_token() 22 | -------------------------------------------------------------------------------- /voice/openai/openai_voice.py: -------------------------------------------------------------------------------- 1 | """ 2 | google voice service 3 | """ 4 | import json 5 | 6 | import openai 7 | 8 | from bridge.reply import Reply, ReplyType 9 | from common.log import logger 10 | from config import conf 11 | from voice.voice import Voice 12 | 13 | 14 | class OpenaiVoice(Voice): 15 | def __init__(self): 16 | openai.api_key = conf().get("open_ai_api_key") 17 | 18 | def voiceToText(self, voice_file): 19 | logger.debug("[Openai] voice file name={}".format(voice_file)) 20 | try: 21 | file = open(voice_file, "rb") 22 | result = openai.Audio.transcribe("whisper-1", file) 23 | text = result["text"] 24 | reply = Reply(ReplyType.TEXT, text) 25 | logger.info("[Openai] voiceToText text={} voice file name={}".format(text, voice_file)) 26 | except Exception as e: 27 | reply = Reply(ReplyType.ERROR, str(e)) 28 | finally: 29 | return reply 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: fix-byte-order-marker 6 | - id: check-case-conflict 7 | - id: check-merge-conflict 8 | - id: debug-statements 9 | - id: pretty-format-json 10 | types: [text] 11 | files: \.json(.template)?$ 12 | args: [ --autofix , --no-ensure-ascii, --indent=2, --no-sort-keys] 13 | - id: trailing-whitespace 14 | exclude: '(\/|^)lib\/' 15 | args: [ --markdown-linebreak-ext=md ] 16 | - repo: https://github.com/PyCQA/isort 17 | rev: 5.12.0 18 | hooks: 19 | - id: isort 20 | exclude: '(\/|^)lib\/' 21 | - repo: https://github.com/psf/black 22 | rev: 23.3.0 23 | hooks: 24 | - id: black 25 | exclude: '(\/|^)lib\/' 26 | - repo: https://github.com/PyCQA/flake8 27 | rev: 6.0.0 28 | hooks: 29 | - id: flake8 30 | exclude: '(\/|^)lib\/' -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 zhayujie 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /common/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | 5 | def _reset_logger(log): 6 | for handler in log.handlers: 7 | handler.close() 8 | log.removeHandler(handler) 9 | del handler 10 | log.handlers.clear() 11 | log.propagate = False 12 | console_handle = logging.StreamHandler(sys.stdout) 13 | console_handle.setFormatter( 14 | logging.Formatter( 15 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s", 16 | datefmt="%Y-%m-%d %H:%M:%S", 17 | ) 18 | ) 19 | file_handle = logging.FileHandler("run.log", encoding="utf-8") 20 | file_handle.setFormatter( 21 | logging.Formatter( 22 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s", 23 | datefmt="%Y-%m-%d %H:%M:%S", 24 | ) 25 | ) 26 | log.addHandler(file_handle) 27 | log.addHandler(console_handle) 28 | 29 | 30 | def _get_logger(): 31 | log = logging.getLogger("log") 32 | _reset_logger(log) 33 | log.setLevel(logging.INFO) 34 | return log 35 | 36 | 37 | # 日志句柄 38 | logger = _get_logger() 39 | -------------------------------------------------------------------------------- /config-微信公众号模板.json: -------------------------------------------------------------------------------- 1 | { 2 | "luolinai_api_key": "YOUR_LUOLINAI_API_KEY", 3 | "luolinai_model_id": "YOUR_LUOLINAI_MODEL_ID", 4 | "base_url":"https://api.gojiberrys.cn/api/openapi", 5 | "channel_type": "wechatmp", 6 | "wechatmp_token": "微信公众平台的Token", 7 | "wechatmp_port": 8080, #微信公众平台的端口,需要端口转发到80或443设置后请删除 #后面的话 8 | "wechatmp_app_id": "微信公众平台的appID", 9 | "wechatmp_app_secret": "微信公众平台的appsecret", 10 | "wechatmp_aes_key": "微信公众平台的EncodingAESKey,加密模式需要", 11 | "max_single_chat_replies": 100, 12 | "max_group_chat_replies": 100, 13 | "speech_recognition": false, 14 | "group_speech_recognition": false, 15 | "voice_reply_voice": false, 16 | "conversation_max_tokens": 1000, 17 | "expires_in_seconds": 3600, 18 | "character_desc": "你是ChatGPT,一个由OpenAI训练的大型语言模型。你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", 19 | "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,以“画”字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help查看详细指令。", 20 | "max_daily_replies": 30, 21 | "max_hourly_replies": 30, 22 | "max_message_length": 2000, 23 | "db_path": "sessions.db", 24 | "bot_prefix": "[Bot] " 25 | } 26 | -------------------------------------------------------------------------------- /lib/itchat/storage/messagequeue.py: -------------------------------------------------------------------------------- 1 | import logging 2 | try: 3 | import Queue as queue 4 | except ImportError: 5 | import queue 6 | 7 | from .templates import AttributeDict 8 | 9 | logger = logging.getLogger('itchat') 10 | 11 | class Queue(queue.Queue): 12 | def put(self, message): 13 | queue.Queue.put(self, Message(message)) 14 | 15 | class Message(AttributeDict): 16 | def download(self, fileName): 17 | if hasattr(self.text, '__call__'): 18 | return self.text(fileName) 19 | else: 20 | return b'' 21 | def __getitem__(self, value): 22 | if value in ('isAdmin', 'isAt'): 23 | v = value[0].upper() + value[1:] # ''[1:] == '' 24 | logger.debug('%s is expired in 1.3.0, use %s instead.' % (value, v)) 25 | value = v 26 | return super(Message, self).__getitem__(value) 27 | def __str__(self): 28 | return '{%s}' % ', '.join( 29 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()]) 30 | def __repr__(self): 31 | return '<%s: %s>' % (self.__class__.__name__.split('.')[-1], 32 | self.__str__()) 33 | -------------------------------------------------------------------------------- /plugins/role/README.md: -------------------------------------------------------------------------------- 1 | 用于让Bot扮演指定角色的聊天插件,触发方法如下: 2 | 3 | - `$角色/$role help/帮助` - 打印目前支持的角色列表。 4 | - `$角色/$role <角色名>` - 让AI扮演该角色,角色名支持模糊匹配。 5 | - `$停止扮演` - 停止角色扮演。 6 | 7 | 添加自定义角色请在`roles/roles.json`中添加。 8 | 9 | (大部分prompt来自https://github.com/rockbenben/ChatGPT-Shortcut/blob/main/src/data/users.tsx) 10 | 11 | 以下为例子: 12 | ```json 13 | { 14 | "title": "写作助理", 15 | "description": "As a writing improvement assistant, your task is to improve the spelling, grammar, clarity, concision, and overall readability of the text I provided, while breaking down long sentences, reducing repetition, and providing suggestions for improvement. Please provide only the corrected Chinese version of the text and avoid including explanations. Please treat every message I send later as text content.", 16 | "descn": "作为一名中文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请把我之后的每一条消息都当作文本内容。", 17 | "wrapper": "内容是:\n\"%s\"", 18 | "remark": "最常使用的角色,用于优化文本的语法、清晰度和简洁度,提高可读性。" 19 | } 20 | ``` 21 | 22 | - `title`: 角色名。 23 | - `description`: 使用`$role`触发时,使用英语prompt。 24 | - `descn`: 使用`$角色`触发时,使用中文prompt。 25 | - `wrapper`: 用于包装用户消息,可起到强调作用,避免回复离题。 26 | - `remark`: 简短描述该角色,在打印帮助文档时显示。 27 | -------------------------------------------------------------------------------- /channel/channel_factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | channel factory 3 | """ 4 | 5 | 6 | def create_channel(channel_type): 7 | """ 8 | create a channel instance 9 | :param channel_type: channel type code 10 | :return: channel instance 11 | """ 12 | if channel_type == "wx": 13 | from channel.wechat.wechat_channel import WechatChannel 14 | 15 | return WechatChannel() 16 | elif channel_type == "wxy": 17 | from channel.wechat.wechaty_channel import WechatyChannel 18 | 19 | return WechatyChannel() 20 | elif channel_type == "terminal": 21 | from channel.terminal.terminal_channel import TerminalChannel 22 | 23 | return TerminalChannel() 24 | elif channel_type == "wechatmp": 25 | from channel.wechatmp.wechatmp_channel import WechatMPChannel 26 | 27 | return WechatMPChannel(passive_reply=True) 28 | elif channel_type == "wechatmp_service": 29 | from channel.wechatmp.wechatmp_channel import WechatMPChannel 30 | 31 | return WechatMPChannel(passive_reply=False) 32 | elif channel_type == "wechatcom_app": 33 | from channel.wechatcom.wechatcomapp_channel import WechatComAppChannel 34 | 35 | return WechatComAppChannel() 36 | raise RuntimeError 37 | -------------------------------------------------------------------------------- /config-template.json: -------------------------------------------------------------------------------- 1 | { 2 | "luolinai_api_key": "YOUR_LUOLINAI_API_KEY", 3 | "luolinai_model_id": "YOUR_LUOLINAI_MODEL_ID", 4 | "base_url":"https://api.gojiberrys.cn/api/openapi", 5 | "max_single_chat_replies": 100, 6 | "max_group_chat_replies": 100, 7 | "ad_message": "此处广告", 8 | "single_chat_prefix": [ 9 | "bot", 10 | "@bot" 11 | ], 12 | "single_chat_reply_prefix": "[bot] ", 13 | "group_chat_prefix": [ 14 | "@bot" 15 | ], 16 | "group_name_white_list": [ 17 | "ALL_GROUP" 18 | ], 19 | "group_chat_in_one_session": [ 20 | "ALL_GROUP" 21 | ], 22 | "image_create_prefix": [ 23 | "画", 24 | "看", 25 | "找" 26 | ], 27 | "speech_recognition": false, 28 | "group_speech_recognition": false, 29 | "voice_reply_voice": false, 30 | "conversation_max_tokens": 1000, 31 | "expires_in_seconds": 3600, 32 | "character_desc": "你是ChatGPT,一个由OpenAI训练的大型语言模型。你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", 33 | "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,以“画”字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help查看详细指令。", 34 | "max_daily_replies": 30, 35 | "max_hourly_replies": 30, 36 | "max_message_length": 2000, 37 | "db_path": "sessions.db", 38 | "bot_prefix": "[Bot] ", 39 | "error_message": "我有点累了,先喝杯茶,关注公众号洛林AI互联" 40 | } 41 | -------------------------------------------------------------------------------- /config-微信配置template.json: -------------------------------------------------------------------------------- 1 | { 2 | "luolinai_api_key": "YOUR_LUOLINAI_API_KEY", 3 | "luolinai_model_id": "YOUR_LUOLINAI_MODEL_ID", 4 | "base_url":"https://api.gojiberrys.cn/api/openapi", 5 | "max_single_chat_replies": 100, 6 | "max_group_chat_replies": 100, 7 | "ad_message": "此处广告", 8 | "single_chat_prefix": [ 9 | "bot", 10 | "@bot" 11 | ], 12 | "single_chat_reply_prefix": "[bot] ", 13 | "group_chat_prefix": [ 14 | "@bot" 15 | ], 16 | "group_name_white_list": [ 17 | "ALL_GROUP" 18 | ], 19 | "group_chat_in_one_session": [ 20 | "ALL_GROUP" 21 | ], 22 | "image_create_prefix": [ 23 | "画", 24 | "看", 25 | "找" 26 | ], 27 | "speech_recognition": false, 28 | "group_speech_recognition": false, 29 | "voice_reply_voice": false, 30 | "conversation_max_tokens": 1000, 31 | "expires_in_seconds": 3600, 32 | "character_desc": "你是ChatGPT,一个由OpenAI训练的大型语言模型。你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", 33 | "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,以“画”字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help查看详细指令。", 34 | "max_daily_replies": 30, 35 | "max_hourly_replies": 30, 36 | "max_message_length": 2000, 37 | "db_path": "sessions.db", 38 | "bot_prefix": "[Bot] ", 39 | "error_message": "我有点累了,先喝杯茶,关注公众号洛林AI互联" 40 | } 41 | -------------------------------------------------------------------------------- /channel/channel.py: -------------------------------------------------------------------------------- 1 | """ 2 | Message sending channel abstract class 3 | """ 4 | 5 | from bridge.bridge import Bridge 6 | from bridge.context import Context 7 | from bridge.reply import * 8 | 9 | 10 | class Channel(object): 11 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE] 12 | 13 | def startup(self): 14 | """ 15 | init channel 16 | """ 17 | raise NotImplementedError 18 | 19 | def handle_text(self, msg): 20 | """ 21 | process received msg 22 | :param msg: message object 23 | """ 24 | raise NotImplementedError 25 | 26 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息 27 | def send(self, reply: Reply, context: Context): 28 | """ 29 | send message to user 30 | :param msg: message content 31 | :param receiver: receiver channel account 32 | :return: 33 | """ 34 | raise NotImplementedError 35 | 36 | def build_reply_content(self, query, context: Context = None) -> Reply: 37 | return Bridge().fetch_reply_content(query, context) 38 | 39 | def build_voice_to_text(self, voice_file) -> Reply: 40 | return Bridge().fetch_voice_to_text(voice_file) 41 | 42 | def build_text_to_voice(self, text) -> Reply: 43 | return Bridge().fetch_text_to_voice(text) 44 | -------------------------------------------------------------------------------- /common/dequeue.py: -------------------------------------------------------------------------------- 1 | from queue import Full, Queue 2 | from time import monotonic as time 3 | 4 | 5 | # add implementation of putleft to Queue 6 | class Dequeue(Queue): 7 | def putleft(self, item, block=True, timeout=None): 8 | with self.not_full: 9 | if self.maxsize > 0: 10 | if not block: 11 | if self._qsize() >= self.maxsize: 12 | raise Full 13 | elif timeout is None: 14 | while self._qsize() >= self.maxsize: 15 | self.not_full.wait() 16 | elif timeout < 0: 17 | raise ValueError("'timeout' must be a non-negative number") 18 | else: 19 | endtime = time() + timeout 20 | while self._qsize() >= self.maxsize: 21 | remaining = endtime - time() 22 | if remaining <= 0.0: 23 | raise Full 24 | self.not_full.wait(remaining) 25 | self._putleft(item) 26 | self.unfinished_tasks += 1 27 | self.not_empty.notify() 28 | 29 | def putleft_nowait(self, item): 30 | return self.putleft(item, block=False) 31 | 32 | def _putleft(self, item): 33 | self.queue.appendleft(item) 34 | -------------------------------------------------------------------------------- /common/expired_dict.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | 3 | 4 | class ExpiredDict(dict): 5 | def __init__(self, expires_in_seconds): 6 | super().__init__() 7 | self.expires_in_seconds = expires_in_seconds 8 | 9 | def __getitem__(self, key): 10 | value, expiry_time = super().__getitem__(key) 11 | if datetime.now() > expiry_time: 12 | del self[key] 13 | raise KeyError("expired {}".format(key)) 14 | self.__setitem__(key, value) 15 | return value 16 | 17 | def __setitem__(self, key, value): 18 | expiry_time = datetime.now() + timedelta(seconds=self.expires_in_seconds) 19 | super().__setitem__(key, (value, expiry_time)) 20 | 21 | def get(self, key, default=None): 22 | try: 23 | return self[key] 24 | except KeyError: 25 | return default 26 | 27 | def __contains__(self, key): 28 | try: 29 | self[key] 30 | return True 31 | except KeyError: 32 | return False 33 | 34 | def keys(self): 35 | keys = list(super().keys()) 36 | return [key for key in keys if key in self] 37 | 38 | def items(self): 39 | return [(key, self[key]) for key in self.keys()] 40 | 41 | def __iter__(self): 42 | return self.keys().__iter__() 43 | -------------------------------------------------------------------------------- /bot/bot_factory.py: -------------------------------------------------------------------------------- 1 | """ 2 | channel factory 3 | """ 4 | from common import const 5 | 6 | 7 | def create_bot(bot_type): 8 | """ 9 | create a bot_type instance 10 | :param bot_type: bot type code 11 | :return: bot instance 12 | """ 13 | if bot_type == const.BAIDU: 14 | # Baidu Unit对话接口 15 | from bot.baidu.baidu_unit_bot import BaiduUnitBot 16 | 17 | return BaiduUnitBot() 18 | 19 | elif bot_type == const.CHATGPT: 20 | # ChatGPT 网页端web接口 21 | from bot.chatgpt.chat_gpt_bot import ChatGPTBot 22 | 23 | return ChatGPTBot() 24 | 25 | elif bot_type == const.OPEN_AI: 26 | # OpenAI 官方对话模型API 27 | from bot.openai.open_ai_bot import OpenAIBot 28 | 29 | return OpenAIBot() 30 | 31 | elif bot_type == const.CHATGPTONAZURE: 32 | # Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/ 33 | from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot 34 | 35 | return AzureChatGPTBot() 36 | 37 | elif bot_type == const.LINKAI: 38 | from bot.linkai.link_ai_bot import LinkAIBot 39 | return LinkAIBot() 40 | 41 | elif bot_type == const.luolinai: 42 | from bot.luolinai.luolinai_bot import luolinaiBot 43 | return luolinaiBot() 44 | 45 | raise RuntimeError("Unsupported bot type: {}".format(bot_type)) 46 | -------------------------------------------------------------------------------- /配置说明.md: -------------------------------------------------------------------------------- 1 | # config.json文件内容示例 2 | 3 | { 4 | 5 | # 知识库配置,当配置知识库时自动加载知识库的api 6 | 7 | "luolinai_api_key": "输入你的知识库密钥", 8 | "luolinai_model_id": "输入你的知识库模型id", 9 | "open_ai_api_key": "YOUR API KEY", # 填入上面创建的 OpenAI API KEY 10 | "model": "gpt-3.5-turbo", # 模型名称。当use_azure_chatgpt为true时,其名称为Azure上model deployment名称 11 | "proxy": "127.0.0.1:7890", # 代理客户端的ip和端口 12 | "single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复 13 | "single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人 14 | "group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复 15 | "group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表 16 | "group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称 17 | "image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀 18 | "conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数 19 | "speech_recognition": false, # 是否开启语音识别 20 | "group_speech_recognition": false, # 是否开启群组语音识别 21 | "use_azure_chatgpt": false, # 是否使用Azure ChatGPT service代替openai ChatGPT service. 当设置为true时需要设置 22 | open_ai_api_base,如 https://xxx.openai.azure.com/ 23 | "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 24 | 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。", # 人格描述 25 | 26 | # 订阅消息,公众号和企业微信channel中请填写,当被订阅时会自动回复,可使用特殊占位符。目前支持的占位符有{trigger_prefix},在程序中它会自动替换成bot的触发词。 27 | 28 | "subscribe_msg": " 29 | 感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持角色扮演和文字冒险等丰富插件。\n输入{trigger_prefix}#help 30 | 查看详细指令。" 31 | -------------------------------------------------------------------------------- /plugins/finish/finish.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import plugins 4 | from bridge.context import ContextType 5 | from bridge.reply import Reply, ReplyType 6 | from common.log import logger 7 | from config import conf 8 | from plugins import * 9 | 10 | 11 | @plugins.register( 12 | name="Finish", 13 | desire_priority=-999, 14 | hidden=True, 15 | desc="A plugin that check unknown command", 16 | version="1.0", 17 | author="js00000", 18 | ) 19 | class Finish(Plugin): 20 | def __init__(self): 21 | super().__init__() 22 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context 23 | logger.info("[Finish] inited") 24 | 25 | def on_handle_context(self, e_context: EventContext): 26 | if e_context["context"].type != ContextType.TEXT: 27 | return 28 | 29 | content = e_context["context"].content 30 | logger.debug("[Finish] on_handle_context. content: %s" % content) 31 | trigger_prefix = conf().get("plugin_trigger_prefix", "$") 32 | if content.startswith(trigger_prefix): 33 | reply = Reply() 34 | reply.type = ReplyType.ERROR 35 | reply.content = "未知插件命令\n查看插件命令列表请输入#help 插件名\n" 36 | e_context["reply"] = reply 37 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑 38 | 39 | def get_help_text(self, **kwargs): 40 | return "" 41 | -------------------------------------------------------------------------------- /lib/itchat/config.py: -------------------------------------------------------------------------------- 1 | import os, platform 2 | 3 | VERSION = '1.5.0.dev' 4 | 5 | # use this envrionment to initialize the async & sync componment 6 | ASYNC_COMPONENTS = os.environ.get('ITCHAT_UOS_ASYNC', False) 7 | 8 | BASE_URL = 'https://login.weixin.qq.com' 9 | OS = platform.system() # Windows, Linux, Darwin 10 | DIR = os.getcwd() 11 | DEFAULT_QR = 'QR.png' 12 | TIMEOUT = (10, 60) 13 | 14 | USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36' 15 | 16 | UOS_PATCH_CLIENT_VERSION = '2.0.0' 17 | UOS_PATCH_EXTSPAM = 'Go8FCIkFEokFCggwMDAwMDAwMRAGGvAESySibk50w5Wb3uTl2c2h64jVVrV7gNs06GFlWplHQbY/5FfiO++1yH4ykCyNPWKXmco+wfQzK5R98D3so7rJ5LmGFvBLjGceleySrc3SOf2Pc1gVehzJgODeS0lDL3/I/0S2SSE98YgKleq6Uqx6ndTy9yaL9qFxJL7eiA/R3SEfTaW1SBoSITIu+EEkXff+Pv8NHOk7N57rcGk1w0ZzRrQDkXTOXFN2iHYIzAAZPIOY45Lsh+A4slpgnDiaOvRtlQYCt97nmPLuTipOJ8Qc5pM7ZsOsAPPrCQL7nK0I7aPrFDF0q4ziUUKettzW8MrAaiVfmbD1/VkmLNVqqZVvBCtRblXb5FHmtS8FxnqCzYP4WFvz3T0TcrOqwLX1M/DQvcHaGGw0B0y4bZMs7lVScGBFxMj3vbFi2SRKbKhaitxHfYHAOAa0X7/MSS0RNAjdwoyGHeOepXOKY+h3iHeqCvgOH6LOifdHf/1aaZNwSkGotYnYScW8Yx63LnSwba7+hESrtPa/huRmB9KWvMCKbDThL/nne14hnL277EDCSocPu3rOSYjuB9gKSOdVmWsj9Dxb/iZIe+S6AiG29Esm+/eUacSba0k8wn5HhHg9d4tIcixrxveflc8vi2/wNQGVFNsGO6tB5WF0xf/plngOvQ1/ivGV/C1Qpdhzznh0ExAVJ6dwzNg7qIEBaw+BzTJTUuRcPk92Sn6QDn2Pu3mpONaEumacjW4w6ipPnPw+g2TfywJjeEcpSZaP4Q3YV5HG8D6UjWA4GSkBKculWpdCMadx0usMomsSS/74QgpYqcPkmamB4nVv1JxczYITIqItIKjD35IGKAUwAA==' 18 | -------------------------------------------------------------------------------- /common/token_bucket.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | 4 | 5 | class TokenBucket: 6 | def __init__(self, tpm, timeout=None): 7 | self.capacity = int(tpm) # 令牌桶容量 8 | self.tokens = 0 # 初始令牌数为0 9 | self.rate = int(tpm) / 60 # 令牌每秒生成速率 10 | self.timeout = timeout # 等待令牌超时时间 11 | self.cond = threading.Condition() # 条件变量 12 | self.is_running = True 13 | # 开启令牌生成线程 14 | threading.Thread(target=self._generate_tokens).start() 15 | 16 | def _generate_tokens(self): 17 | """生成令牌""" 18 | while self.is_running: 19 | with self.cond: 20 | if self.tokens < self.capacity: 21 | self.tokens += 1 22 | self.cond.notify() # 通知获取令牌的线程 23 | time.sleep(1 / self.rate) 24 | 25 | def get_token(self): 26 | """获取令牌""" 27 | with self.cond: 28 | while self.tokens <= 0: 29 | flag = self.cond.wait(self.timeout) 30 | if not flag: # 超时 31 | return False 32 | self.tokens -= 1 33 | return True 34 | 35 | def close(self): 36 | self.is_running = False 37 | 38 | 39 | if __name__ == "__main__": 40 | token_bucket = TokenBucket(20, None) # 创建一个每分钟生产20个tokens的令牌桶 41 | # token_bucket = TokenBucket(20, 0.1) 42 | for i in range(3): 43 | if token_bucket.get_token(): 44 | print(f"第{i+1}次请求成功") 45 | token_bucket.close() 46 | -------------------------------------------------------------------------------- /plugins/event.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | from enum import Enum 4 | 5 | 6 | class Event(Enum): 7 | ON_RECEIVE_MESSAGE = 1 # 收到消息 8 | """ 9 | e_context = { "channel": 消息channel, "context" : 本次消息的context} 10 | """ 11 | 12 | ON_HANDLE_CONTEXT = 2 # 处理消息前 13 | """ 14 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复,初始为空 } 15 | """ 16 | 17 | ON_DECORATE_REPLY = 3 # 得到回复后准备装饰 18 | """ 19 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 } 20 | """ 21 | 22 | ON_SEND_REPLY = 4 # 发送回复前 23 | """ 24 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 } 25 | """ 26 | 27 | # AFTER_SEND_REPLY = 5 # 发送回复后 28 | 29 | 30 | class EventAction(Enum): 31 | CONTINUE = 1 # 事件未结束,继续交给下个插件处理,如果没有下个插件,则交付给默认的事件处理逻辑 32 | BREAK = 2 # 事件结束,不再给下个插件处理,交付给默认的事件处理逻辑 33 | BREAK_PASS = 3 # 事件结束,不再给下个插件处理,不交付给默认的事件处理逻辑 34 | 35 | 36 | class EventContext: 37 | def __init__(self, event, econtext=dict()): 38 | self.event = event 39 | self.econtext = econtext 40 | self.action = EventAction.CONTINUE 41 | 42 | def __getitem__(self, key): 43 | return self.econtext[key] 44 | 45 | def __setitem__(self, key, value): 46 | self.econtext[key] = value 47 | 48 | def __delitem__(self, key): 49 | del self.econtext[key] 50 | 51 | def is_pass(self): 52 | return self.action == EventAction.BREAK_PASS 53 | -------------------------------------------------------------------------------- /bot/baidu/baidu_unit_bot.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import requests 4 | 5 | from bot.bot import Bot 6 | from bridge.reply import Reply, ReplyType 7 | 8 | 9 | # Baidu Unit对话接口 (可用, 但能力较弱) 10 | class BaiduUnitBot(Bot): 11 | def reply(self, query, context=None): 12 | token = self.get_token() 13 | url = "https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token=" + token 14 | post_data = ( 15 | '{"version":"3.0","service_id":"S73177","session_id":"","log_id":"7758521","skill_ids":["1221886"],"request":{"terminal_id":"88888","query":"' 16 | + query 17 | + '", "hyper_params": {"chat_custom_bot_profile": 1}}}' 18 | ) 19 | print(post_data) 20 | headers = {"content-type": "application/x-www-form-urlencoded"} 21 | response = requests.post(url, data=post_data.encode(), headers=headers) 22 | if response: 23 | reply = Reply( 24 | ReplyType.TEXT, 25 | response.json()["result"]["context"]["SYS_PRESUMED_HIST"][1], 26 | ) 27 | return reply 28 | 29 | def get_token(self): 30 | access_key = "YOUR_ACCESS_KEY" 31 | secret_key = "YOUR_SECRET_KEY" 32 | host = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=" + access_key + "&client_secret=" + secret_key 33 | response = requests.get(host) 34 | if response: 35 | print(response.json()) 36 | return response.json()["access_token"] 37 | -------------------------------------------------------------------------------- /lib/itchat/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class LogSystem(object): 4 | handlerList = [] 5 | showOnCmd = True 6 | loggingLevel = logging.INFO 7 | loggingFile = None 8 | def __init__(self): 9 | self.logger = logging.getLogger('itchat') 10 | self.logger.addHandler(logging.NullHandler()) 11 | self.logger.setLevel(self.loggingLevel) 12 | self.cmdHandler = logging.StreamHandler() 13 | self.fileHandler = None 14 | self.logger.addHandler(self.cmdHandler) 15 | def set_logging(self, showOnCmd=True, loggingFile=None, 16 | loggingLevel=logging.INFO): 17 | if showOnCmd != self.showOnCmd: 18 | if showOnCmd: 19 | self.logger.addHandler(self.cmdHandler) 20 | else: 21 | self.logger.removeHandler(self.cmdHandler) 22 | self.showOnCmd = showOnCmd 23 | if loggingFile != self.loggingFile: 24 | if self.loggingFile is not None: # clear old fileHandler 25 | self.logger.removeHandler(self.fileHandler) 26 | self.fileHandler.close() 27 | if loggingFile is not None: # add new fileHandler 28 | self.fileHandler = logging.FileHandler(loggingFile) 29 | self.logger.addHandler(self.fileHandler) 30 | self.loggingFile = loggingFile 31 | if loggingLevel != self.loggingLevel: 32 | self.logger.setLevel(loggingLevel) 33 | self.loggingLevel = loggingLevel 34 | 35 | ls = LogSystem() 36 | set_logging = ls.set_logging 37 | -------------------------------------------------------------------------------- /common/utils.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | 4 | from PIL import Image 5 | 6 | 7 | def fsize(file): 8 | if isinstance(file, io.BytesIO): 9 | return file.getbuffer().nbytes 10 | elif isinstance(file, str): 11 | return os.path.getsize(file) 12 | elif hasattr(file, "seek") and hasattr(file, "tell"): 13 | pos = file.tell() 14 | file.seek(0, os.SEEK_END) 15 | size = file.tell() 16 | file.seek(pos) 17 | return size 18 | else: 19 | raise TypeError("Unsupported type") 20 | 21 | 22 | def compress_imgfile(file, max_size): 23 | if fsize(file) <= max_size: 24 | return file 25 | file.seek(0) 26 | img = Image.open(file) 27 | rgb_image = img.convert("RGB") 28 | quality = 95 29 | while True: 30 | out_buf = io.BytesIO() 31 | rgb_image.save(out_buf, "JPEG", quality=quality) 32 | if fsize(out_buf) <= max_size: 33 | return out_buf 34 | quality -= 5 35 | 36 | 37 | def split_string_by_utf8_length(string, max_length, max_split=0): 38 | encoded = string.encode("utf-8") 39 | start, end = 0, 0 40 | result = [] 41 | while end < len(encoded): 42 | if max_split > 0 and len(result) >= max_split: 43 | result.append(encoded[start:].decode("utf-8")) 44 | break 45 | end = min(start + max_length, len(encoded)) 46 | # 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止 47 | while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000: 48 | end -= 1 49 | result.append(encoded[start:end].decode("utf-8")) 50 | start = end 51 | return result 52 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | 2 | # encoding:utf-8 3 | 4 | import os 5 | import signal 6 | import sys 7 | 8 | from channel import channel_factory 9 | from common.log import logger 10 | from config import conf, load_config 11 | from plugins import * 12 | 13 | 14 | def sigterm_handler_wrap(_signo): 15 | old_handler = signal.getsignal(_signo) 16 | 17 | def func(_signo, _stack_frame): 18 | logger.info("signal {} received, exiting...".format(_signo)) 19 | conf().save_user_datas() 20 | if callable(old_handler): # check old_handler 21 | return old_handler(_signo, _stack_frame) 22 | sys.exit(0) 23 | 24 | signal.signal(_signo, func) 25 | 26 | 27 | def run(): 28 | try: 29 | # load config 30 | load_config() 31 | # ctrl + c 32 | sigterm_handler_wrap(signal.SIGINT) 33 | # kill signal 34 | sigterm_handler_wrap(signal.SIGTERM) 35 | 36 | # create channel 37 | channel_name = conf().get("channel_type", "wx") 38 | 39 | if "--cmd" in sys.argv: 40 | channel_name = "terminal" 41 | 42 | if channel_name == "wxy": 43 | os.environ["WECHATY_LOG"] = "warn" 44 | # os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001' 45 | 46 | channel = channel_factory.create_channel(channel_name) 47 | if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app"]: 48 | PluginManager().load_plugins() 49 | 50 | # startup channel 51 | channel.startup() 52 | except Exception as e: 53 | logger.error("App startup failed!") 54 | logger.exception(e) 55 | 56 | 57 | if __name__ == "__main__": 58 | run() 59 | -------------------------------------------------------------------------------- /bot/openai/open_ai_image.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import openai 4 | import openai.error 5 | 6 | from common.log import logger 7 | from common.token_bucket import TokenBucket 8 | from config import conf 9 | 10 | 11 | # OPENAI提供的画图接口 12 | class OpenAIImage(object): 13 | def __init__(self): 14 | openai.api_key = conf().get("open_ai_api_key") 15 | if conf().get("rate_limit_dalle"): 16 | self.tb4dalle = TokenBucket(conf().get("rate_limit_dalle", 50)) 17 | 18 | def create_img(self, query, retry_count=0, api_key=None): 19 | try: 20 | if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token(): 21 | return False, "请求太快了,请休息一下再问我吧" 22 | logger.info("[OPEN_AI] image_query={}".format(query)) 23 | response = openai.Image.create( 24 | api_key=api_key, 25 | prompt=query, # 图片描述 26 | n=1, # 每次生成图片的数量 27 | size=conf().get("image_create_size", "256x256"), # 图片大小,可选有 256x256, 512x512, 1024x1024 28 | ) 29 | image_url = response["data"][0]["url"] 30 | logger.info("[OPEN_AI] image_url={}".format(image_url)) 31 | return True, image_url 32 | except openai.error.RateLimitError as e: 33 | logger.warn(e) 34 | if retry_count < 1: 35 | time.sleep(5) 36 | logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count + 1)) 37 | return self.create_img(query, retry_count + 1) 38 | else: 39 | return False, "提问太快啦,请休息一下再问我吧" 40 | except Exception as e: 41 | logger.exception(e) 42 | return False, str(e) 43 | -------------------------------------------------------------------------------- /voice/baidu/README.md: -------------------------------------------------------------------------------- 1 | ## 说明 2 | 百度语音识别与合成参数说明 3 | 百度语音依赖,经常会出现问题,可能就是缺少依赖: 4 | pip install baidu-aip 5 | pip install pydub 6 | pip install pysilk 7 | 还有ffmpeg,不同系统安装方式不同 8 | 9 | 系统中收到的语音文件为mp3格式(wx)或者sil格式(wxy),如果要识别需要转换为pcm格式,转换后的文件为16k采样率,单声道,16bit的pcm文件 10 | 发送时又需要(wx)转换为mp3格式,转换后的文件为16k采样率,单声道,16bit的pcm文件,(wxy)转换为sil格式,还要计算声音长度,发送时需要带上声音长度 11 | 这些事情都在audio_convert.py中封装了,直接调用即可 12 | 13 | 14 | 参数说明 15 | 识别参数 16 | https://ai.baidu.com/ai-doc/SPEECH/Vk38lxily 17 | 合成参数 18 | https://ai.baidu.com/ai-doc/SPEECH/Gk38y8lzk 19 | 20 | ## 使用说明 21 | 分两个地方配置 22 | 23 | 1、对于def voiceToText(self, filename)函数中调用的百度语音识别API,中接口调用asr(参数)这个配置见CHATGPT-ON-WECHAT工程目录下的`config.json`文件和config.py文件。 24 | 参数 可需 描述 25 | app_id 必填 应用的APPID 26 | api_key 必填 应用的APIKey 27 | secret_key 必填 应用的SecretKey 28 | dev_pid 必填 语言选择,填写语言对应的dev_pid值 29 | 30 | 2、对于def textToVoice(self, text)函数中调用的百度语音合成API,中接口调用synthesis(参数)在本目录下的`config.json`文件中进行配置。 31 | 参数 可需 描述 32 | tex 必填 合成的文本,使用UTF-8编码,请注意文本长度必须小于1024字节 33 | lan 必填 固定值zh。语言选择,目前只有中英文混合模式,填写固定值zh 34 | spd 选填 语速,取值0-15,默认为5中语速 35 | pit 选填 音调,取值0-15,默认为5中语调 36 | vol 选填 音量,取值0-15,默认为5中音量(取值为0时为音量最小值,并非为无声) 37 | per(基础音库) 选填 度小宇=1,度小美=0,度逍遥(基础)=3,度丫丫=4 38 | per(精品音库) 选填 度逍遥(精品)=5003,度小鹿=5118,度博文=106,度小童=110,度小萌=111,度米朵=103,度小娇=5 39 | aue 选填 3为mp3格式(默认); 4为pcm-16k;5为pcm-8k;6为wav(内容同pcm-16k); 注意aue=4或者6是语音识别要求的格式,但是音频内容不是语音识别要求的自然人发音,所以识别效果会受影响。 40 | 41 | 关于per参数的说明,注意您购买的哪个音库,就填写哪个音库的参数,否则会报错。如果您购买的是基础音库,那么per参数只能填写0到4,如果您购买的是精品音库,那么per参数只能填写5003,5118,106,110,111,103,5其他的都会报错。 42 | ### 配置文件 43 | 44 | 将文件夹中`config.json.template`复制为`config.json`。 45 | 46 | ``` json 47 | { 48 | "lang": "zh", 49 | "ctp": 1, 50 | "spd": 5, 51 | "pit": 5, 52 | "vol": 5, 53 | "per": 0 54 | } 55 | ``` -------------------------------------------------------------------------------- /voice/google/google_voice.py: -------------------------------------------------------------------------------- 1 | """ 2 | google voice service 3 | """ 4 | 5 | import time 6 | 7 | import speech_recognition 8 | from gtts import gTTS 9 | 10 | from bridge.reply import Reply, ReplyType 11 | from common.log import logger 12 | from common.tmp_dir import TmpDir 13 | from voice.voice import Voice 14 | 15 | 16 | class GoogleVoice(Voice): 17 | recognizer = speech_recognition.Recognizer() 18 | 19 | def __init__(self): 20 | pass 21 | 22 | def voiceToText(self, voice_file): 23 | with speech_recognition.AudioFile(voice_file) as source: 24 | audio = self.recognizer.record(source) 25 | try: 26 | text = self.recognizer.recognize_google(audio, language="zh-CN") 27 | logger.info("[Google] voiceToText text={} voice file name={}".format(text, voice_file)) 28 | reply = Reply(ReplyType.TEXT, text) 29 | except speech_recognition.UnknownValueError: 30 | reply = Reply(ReplyType.ERROR, "抱歉,我听不懂") 31 | except speech_recognition.RequestError as e: 32 | reply = Reply(ReplyType.ERROR, "抱歉,无法连接到 Google 语音识别服务;{0}".format(e)) 33 | finally: 34 | return reply 35 | 36 | def textToVoice(self, text): 37 | try: 38 | # Avoid the same filename under multithreading 39 | mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3" 40 | tts = gTTS(text=text, lang="zh") 41 | tts.save(mp3File) 42 | logger.info("[Google] textToVoice text={} voice file name={}".format(text, mp3File)) 43 | reply = Reply(ReplyType.VOICE, mp3File) 44 | except Exception as e: 45 | reply = Reply(ReplyType.ERROR, str(e)) 46 | finally: 47 | return reply 48 | -------------------------------------------------------------------------------- /bridge/context.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | from enum import Enum 4 | 5 | 6 | class ContextType(Enum): 7 | TEXT = 1 # 文本消息 8 | VOICE = 2 # 音频消息 9 | IMAGE = 3 # 图片消息 10 | IMAGE_CREATE = 10 # 创建图片命令 11 | JOIN_GROUP = 20 # 加入群聊 12 | PATPAT = 21 # 拍了拍 13 | 14 | def __str__(self): 15 | return self.name 16 | 17 | 18 | class Context: 19 | def __init__(self, type: ContextType = None, content=None, kwargs=dict()): 20 | self.type = type 21 | self.content = content 22 | self.kwargs = kwargs 23 | 24 | def __contains__(self, key): 25 | if key == "type": 26 | return self.type is not None 27 | elif key == "content": 28 | return self.content is not None 29 | else: 30 | return key in self.kwargs 31 | 32 | def __getitem__(self, key): 33 | if key == "type": 34 | return self.type 35 | elif key == "content": 36 | return self.content 37 | else: 38 | return self.kwargs[key] 39 | 40 | def get(self, key, default=None): 41 | try: 42 | return self[key] 43 | except KeyError: 44 | return default 45 | 46 | def __setitem__(self, key, value): 47 | if key == "type": 48 | self.type = value 49 | elif key == "content": 50 | self.content = value 51 | else: 52 | self.kwargs[key] = value 53 | 54 | def __delitem__(self, key): 55 | if key == "type": 56 | self.type = None 57 | elif key == "content": 58 | self.content = None 59 | else: 60 | del self.kwargs[key] 61 | 62 | def __str__(self): 63 | return "Context(type={}, content={}, kwargs={})".format(self.type, self.content, self.kwargs) 64 | -------------------------------------------------------------------------------- /common/time_check.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import re 3 | import time 4 | 5 | import config 6 | from common.log import logger 7 | 8 | 9 | def time_checker(f): 10 | def _time_checker(self, *args, **kwargs): 11 | _config = config.conf() 12 | chat_time_module = _config.get("chat_time_module", False) 13 | if chat_time_module: 14 | chat_start_time = _config.get("chat_start_time", "00:00") 15 | chat_stopt_time = _config.get("chat_stop_time", "24:00") 16 | time_regex = re.compile(r"^([01]?[0-9]|2[0-4])(:)([0-5][0-9])$") # 时间匹配,包含24:00 17 | 18 | starttime_format_check = time_regex.match(chat_start_time) # 检查停止时间格式 19 | stoptime_format_check = time_regex.match(chat_stopt_time) # 检查停止时间格式 20 | chat_time_check = chat_start_time < chat_stopt_time # 确定启动时间<停止时间 21 | 22 | # 时间格式检查 23 | if not (starttime_format_check and stoptime_format_check and chat_time_check): 24 | logger.warn("时间格式不正确,请在config.json中修改您的CHAT_START_TIME/CHAT_STOP_TIME,否则可能会影响您正常使用,开始({})-结束({})".format(starttime_format_check, stoptime_format_check)) 25 | if chat_start_time > "23:59": 26 | logger.error("启动时间可能存在问题,请修改!") 27 | 28 | # 服务时间检查 29 | now_time = time.strftime("%H:%M", time.localtime()) 30 | if chat_start_time <= now_time <= chat_stopt_time: # 服务时间内,正常返回回答 31 | f(self, *args, **kwargs) 32 | return None 33 | else: 34 | if args[0]["Content"] == "#更新配置": # 不在服务时间内也可以更新配置 35 | f(self, *args, **kwargs) 36 | else: 37 | logger.info("非服务时间内,不接受访问") 38 | return None 39 | else: 40 | f(self, *args, **kwargs) # 未开启时间模块则直接回答 41 | 42 | return _time_checker 43 | -------------------------------------------------------------------------------- /translate/baidu/baidu_translate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import random 4 | from hashlib import md5 5 | 6 | import requests 7 | 8 | from config import conf 9 | from translate.translator import Translator 10 | 11 | 12 | class BaiduTranslator(Translator): 13 | def __init__(self) -> None: 14 | super().__init__() 15 | endpoint = "http://api.fanyi.baidu.com" 16 | path = "/api/trans/vip/translate" 17 | self.url = endpoint + path 18 | self.appid = conf().get("baidu_translate_app_id") 19 | self.appkey = conf().get("baidu_translate_app_key") 20 | 21 | # For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes 22 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str: 23 | if not from_lang: 24 | from_lang = "auto" # baidu suppport auto detect 25 | salt = random.randint(32768, 65536) 26 | sign = self.make_md5(self.appid + query + str(salt) + self.appkey) 27 | headers = {"Content-Type": "application/x-www-form-urlencoded"} 28 | payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign} 29 | 30 | retry_cnt = 3 31 | while retry_cnt: 32 | r = requests.post(self.url, params=payload, headers=headers) 33 | result = r.json() 34 | errcode = result.get("error_code", "52000") 35 | if errcode != "52000": 36 | if errcode == "52001" or errcode == "52002": 37 | retry_cnt -= 1 38 | continue 39 | else: 40 | raise Exception(result["error_msg"]) 41 | else: 42 | break 43 | text = "\n".join([item["dst"] for item in result["trans_result"]]) 44 | return text 45 | 46 | def make_md5(self, s, encoding="utf-8"): 47 | return md5(s.encode(encoding)).hexdigest() 48 | -------------------------------------------------------------------------------- /channel/wechatcom/wechatcomapp_message.py: -------------------------------------------------------------------------------- 1 | from wechatpy.enterprise import WeChatClient 2 | 3 | from bridge.context import ContextType 4 | from channel.chat_message import ChatMessage 5 | from common.log import logger 6 | from common.tmp_dir import TmpDir 7 | 8 | 9 | class WechatComAppMessage(ChatMessage): 10 | def __init__(self, msg, client: WeChatClient, is_group=False): 11 | super().__init__(msg) 12 | self.msg_id = msg.id 13 | self.create_time = msg.time 14 | self.is_group = is_group 15 | 16 | if msg.type == "text": 17 | self.ctype = ContextType.TEXT 18 | self.content = msg.content 19 | elif msg.type == "voice": 20 | self.ctype = ContextType.VOICE 21 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径 22 | 23 | def download_voice(): 24 | # 如果响应状态码是200,则将响应内容写入本地文件 25 | response = client.media.download(msg.media_id) 26 | if response.status_code == 200: 27 | with open(self.content, "wb") as f: 28 | f.write(response.content) 29 | else: 30 | logger.info(f"[wechatcom] Failed to download voice file, {response.content}") 31 | 32 | self._prepare_fn = download_voice 33 | elif msg.type == "image": 34 | self.ctype = ContextType.IMAGE 35 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径 36 | 37 | def download_image(): 38 | # 如果响应状态码是200,则将响应内容写入本地文件 39 | response = client.media.download(msg.media_id) 40 | if response.status_code == 200: 41 | with open(self.content, "wb") as f: 42 | f.write(response.content) 43 | else: 44 | logger.info(f"[wechatcom] Failed to download image file, {response.content}") 45 | 46 | self._prepare_fn = download_image 47 | else: 48 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type)) 49 | 50 | self.from_user_id = msg.source 51 | self.to_user_id = msg.target 52 | self.other_user_id = msg.source 53 | -------------------------------------------------------------------------------- /channel/chat_message.py: -------------------------------------------------------------------------------- 1 | """ 2 | 本类表示聊天消息,用于对itchat和wechaty的消息进行统一的封装。 3 | 4 | 填好必填项(群聊6个,非群聊8个),即可接入ChatChannel,并支持插件,参考TerminalChannel 5 | 6 | ChatMessage 7 | msg_id: 消息id (必填) 8 | create_time: 消息创建时间 9 | 10 | ctype: 消息类型 : ContextType (必填) 11 | content: 消息内容, 如果是声音/图片,这里是文件路径 (必填) 12 | 13 | from_user_id: 发送者id (必填) 14 | from_user_nickname: 发送者昵称 15 | to_user_id: 接收者id (必填) 16 | to_user_nickname: 接收者昵称 17 | 18 | other_user_id: 对方的id,如果你是发送者,那这个就是接收者id,如果你是接收者,那这个就是发送者id,如果是群消息,那这一直是群id (必填) 19 | other_user_nickname: 同上 20 | 21 | is_group: 是否是群消息 (群聊必填) 22 | is_at: 是否被at 23 | 24 | - (群消息时,一般会存在实际发送者,是群内某个成员的id和昵称,下列项仅在群消息时存在) 25 | actual_user_id: 实际发送者id (群聊必填) 26 | actual_user_nickname:实际发送者昵称 27 | 28 | 29 | 30 | 31 | _prepare_fn: 准备函数,用于准备消息的内容,比如下载图片等, 32 | _prepared: 是否已经调用过准备函数 33 | _rawmsg: 原始消息对象 34 | 35 | """ 36 | 37 | 38 | class ChatMessage(object): 39 | msg_id = None 40 | create_time = None 41 | 42 | ctype = None 43 | content = None 44 | 45 | from_user_id = None 46 | from_user_nickname = None 47 | to_user_id = None 48 | to_user_nickname = None 49 | other_user_id = None 50 | other_user_nickname = None 51 | 52 | is_group = False 53 | is_at = False 54 | actual_user_id = None 55 | actual_user_nickname = None 56 | 57 | _prepare_fn = None 58 | _prepared = False 59 | _rawmsg = None 60 | 61 | def __init__(self, _rawmsg): 62 | self._rawmsg = _rawmsg 63 | 64 | def prepare(self): 65 | if self._prepare_fn and not self._prepared: 66 | self._prepared = True 67 | self._prepare_fn() 68 | 69 | def __str__(self): 70 | return "ChatMessage: id={}, create_time={}, ctype={}, content={}, from_user_id={}, from_user_nickname={}, to_user_id={}, to_user_nickname={}, other_user_id={}, other_user_nickname={}, is_group={}, is_at={}, actual_user_id={}, actual_user_nickname={}".format( 71 | self.msg_id, 72 | self.create_time, 73 | self.ctype, 74 | self.content, 75 | self.from_user_id, 76 | self.from_user_nickname, 77 | self.to_user_id, 78 | self.to_user_nickname, 79 | self.other_user_id, 80 | self.other_user_nickname, 81 | self.is_group, 82 | self.is_at, 83 | self.actual_user_id, 84 | self.actual_user_nickname, 85 | ) 86 | -------------------------------------------------------------------------------- /channel/wechatmp/wechatmp_message.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*-# 2 | 3 | from bridge.context import ContextType 4 | from channel.chat_message import ChatMessage 5 | from common.log import logger 6 | from common.tmp_dir import TmpDir 7 | 8 | 9 | class WeChatMPMessage(ChatMessage): 10 | def __init__(self, msg, client=None): 11 | super().__init__(msg) 12 | self.msg_id = msg.id 13 | self.create_time = msg.time 14 | self.is_group = False 15 | 16 | if msg.type == "text": 17 | self.ctype = ContextType.TEXT 18 | self.content = msg.content 19 | elif msg.type == "voice": 20 | if msg.recognition == None: 21 | self.ctype = ContextType.VOICE 22 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径 23 | 24 | def download_voice(): 25 | # 如果响应状态码是200,则将响应内容写入本地文件 26 | response = client.media.download(msg.media_id) 27 | if response.status_code == 200: 28 | with open(self.content, "wb") as f: 29 | f.write(response.content) 30 | else: 31 | logger.info(f"[wechatmp] Failed to download voice file, {response.content}") 32 | 33 | self._prepare_fn = download_voice 34 | else: 35 | self.ctype = ContextType.TEXT 36 | self.content = msg.recognition 37 | elif msg.type == "image": 38 | self.ctype = ContextType.IMAGE 39 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径 40 | 41 | def download_image(): 42 | # 如果响应状态码是200,则将响应内容写入本地文件 43 | response = client.media.download(msg.media_id) 44 | if response.status_code == 200: 45 | with open(self.content, "wb") as f: 46 | f.write(response.content) 47 | else: 48 | logger.info(f"[wechatmp] Failed to download image file, {response.content}") 49 | 50 | self._prepare_fn = download_image 51 | else: 52 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type)) 53 | 54 | self.from_user_id = msg.source 55 | self.to_user_id = msg.target 56 | self.other_user_id = msg.source 57 | -------------------------------------------------------------------------------- /plugins/keyword/keyword.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import json 4 | import os 5 | 6 | import plugins 7 | from bridge.context import ContextType 8 | from bridge.reply import Reply, ReplyType 9 | from common.log import logger 10 | from plugins import * 11 | 12 | 13 | @plugins.register( 14 | name="Keyword", 15 | desire_priority=900, 16 | hidden=True, 17 | desc="关键词匹配过滤", 18 | version="0.1", 19 | author="fengyege.top", 20 | ) 21 | class Keyword(Plugin): 22 | def __init__(self): 23 | super().__init__() 24 | try: 25 | curdir = os.path.dirname(__file__) 26 | config_path = os.path.join(curdir, "config.json") 27 | conf = None 28 | if not os.path.exists(config_path): 29 | logger.debug(f"[keyword]不存在配置文件{config_path}") 30 | conf = {"keyword": {}} 31 | with open(config_path, "w", encoding="utf-8") as f: 32 | json.dump(conf, f, indent=4) 33 | else: 34 | logger.debug(f"[keyword]加载配置文件{config_path}") 35 | with open(config_path, "r", encoding="utf-8") as f: 36 | conf = json.load(f) 37 | # 加载关键词 38 | self.keyword = conf["keyword"] 39 | 40 | logger.info("[keyword] {}".format(self.keyword)) 41 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context 42 | logger.info("[keyword] inited.") 43 | except Exception as e: 44 | logger.warn("[keyword] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/keyword .") 45 | raise e 46 | 47 | def on_handle_context(self, e_context: EventContext): 48 | if e_context["context"].type != ContextType.TEXT: 49 | return 50 | 51 | content = e_context["context"].content.strip() 52 | logger.debug("[keyword] on_handle_context. content: %s" % content) 53 | if content in self.keyword: 54 | logger.debug(f"[keyword] 匹配到关键字【{content}】") 55 | reply_text = self.keyword[content] 56 | 57 | reply = Reply() 58 | reply.type = ReplyType.TEXT 59 | reply.content = reply_text 60 | e_context["reply"] = reply 61 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑 62 | 63 | def get_help_text(self, **kwargs): 64 | help_text = "关键词过滤" 65 | return help_text 66 | -------------------------------------------------------------------------------- /bridge/bridge.py: -------------------------------------------------------------------------------- 1 | from bot.bot_factory import create_bot 2 | from bridge.context import Context 3 | from bridge.reply import Reply 4 | from common import const 5 | from common.log import logger 6 | from common.singleton import singleton 7 | from config import conf 8 | from translate.factory import create_translator 9 | from voice.factory import create_voice 10 | 11 | 12 | @singleton 13 | class Bridge(object): 14 | def __init__(self): 15 | self.btype = { 16 | "chat": const.CHATGPT, 17 | "voice_to_text": conf().get("voice_to_text", "openai"), 18 | "text_to_voice": conf().get("text_to_voice", "google"), 19 | "translate": conf().get("translate", "baidu"), 20 | } 21 | model_type = conf().get("model") 22 | if model_type in ["text-davinci-003"]: 23 | self.btype["chat"] = const.OPEN_AI 24 | if conf().get("use_azure_chatgpt", False): 25 | self.btype["chat"] = const.CHATGPTONAZURE 26 | if conf().get("luolinai_api_key"): 27 | self.btype["chat"] = const.luolinai 28 | self.bots = {} 29 | 30 | def get_bot(self, typename): 31 | if self.bots.get(typename) is None: 32 | logger.info("create bot {} for {}".format(self.btype[typename], typename)) 33 | if typename == "text_to_voice": 34 | self.bots[typename] = create_voice(self.btype[typename]) 35 | elif typename == "voice_to_text": 36 | self.bots[typename] = create_voice(self.btype[typename]) 37 | elif typename == "chat": 38 | self.bots[typename] = create_bot(self.btype[typename]) 39 | elif typename == "translate": 40 | self.bots[typename] = create_translator(self.btype[typename]) 41 | return self.bots[typename] 42 | 43 | def get_bot_type(self, typename): 44 | return self.btype[typename] 45 | 46 | def fetch_reply_content(self, query, context: Context) -> Reply: 47 | return self.get_bot("chat").reply(query, context) 48 | 49 | def fetch_voice_to_text(self, voiceFile) -> Reply: 50 | return self.get_bot("voice_to_text").voiceToText(voiceFile) 51 | 52 | def fetch_text_to_voice(self, text) -> Reply: 53 | return self.get_bot("text_to_voice").textToVoice(text) 54 | 55 | def fetch_translate(self, text, from_lang="", to_lang="en") -> Reply: 56 | return self.get_bot("translate").translate(text, from_lang, to_lang) 57 | -------------------------------------------------------------------------------- /channel/wechatmp/wechatmp_client.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | 4 | from wechatpy.client import WeChatClient 5 | from wechatpy.exceptions import APILimitedException 6 | 7 | from channel.wechatmp.common import * 8 | from common.log import logger 9 | 10 | 11 | class WechatMPClient(WeChatClient): 12 | def __init__(self, appid, secret, access_token=None, session=None, timeout=None, auto_retry=True): 13 | super(WechatMPClient, self).__init__(appid, secret, access_token, session, timeout, auto_retry) 14 | self.fetch_access_token_lock = threading.Lock() 15 | self.clear_quota_lock = threading.Lock() 16 | self.last_clear_quota_time = -1 17 | 18 | def clear_quota(self): 19 | return self.post("clear_quota", data={"appid": self.appid}) 20 | 21 | def clear_quota_v2(self): 22 | return self.post("clear_quota/v2", params={"appid": self.appid, "appsecret": self.secret}) 23 | 24 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token 25 | with self.fetch_access_token_lock: 26 | access_token = self.session.get(self.access_token_key) 27 | if access_token: 28 | if not self.expires_at: 29 | return access_token 30 | timestamp = time.time() 31 | if self.expires_at - timestamp > 60: 32 | return access_token 33 | return super().fetch_access_token() 34 | 35 | def _request(self, method, url_or_endpoint, **kwargs): # 重载父类方法,遇到API限流时,清除quota后重试 36 | try: 37 | return super()._request(method, url_or_endpoint, **kwargs) 38 | except APILimitedException as e: 39 | logger.error("[wechatmp] API quata has been used up. {}".format(e)) 40 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60: 41 | with self.clear_quota_lock: 42 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60: 43 | self.last_clear_quota_time = time.time() 44 | response = self.clear_quota_v2() 45 | logger.debug("[wechatmp] API quata has been cleard, {}".format(response)) 46 | return super()._request(method, url_or_endpoint, **kwargs) 47 | else: 48 | logger.error("[wechatmp] last clear quota time is {}, less than 60s, skip clear quota") 49 | raise e 50 | -------------------------------------------------------------------------------- /common/sorted_dict.py: -------------------------------------------------------------------------------- 1 | import heapq 2 | 3 | 4 | class SortedDict(dict): 5 | def __init__(self, sort_func=lambda k, v: k, init_dict=None, reverse=False): 6 | if init_dict is None: 7 | init_dict = [] 8 | if isinstance(init_dict, dict): 9 | init_dict = init_dict.items() 10 | self.sort_func = sort_func 11 | self.sorted_keys = None 12 | self.reverse = reverse 13 | self.heap = [] 14 | for k, v in init_dict: 15 | self[k] = v 16 | 17 | def __setitem__(self, key, value): 18 | if key in self: 19 | super().__setitem__(key, value) 20 | for i, (priority, k) in enumerate(self.heap): 21 | if k == key: 22 | self.heap[i] = (self.sort_func(key, value), key) 23 | heapq.heapify(self.heap) 24 | break 25 | self.sorted_keys = None 26 | else: 27 | super().__setitem__(key, value) 28 | heapq.heappush(self.heap, (self.sort_func(key, value), key)) 29 | self.sorted_keys = None 30 | 31 | def __delitem__(self, key): 32 | super().__delitem__(key) 33 | for i, (priority, k) in enumerate(self.heap): 34 | if k == key: 35 | del self.heap[i] 36 | heapq.heapify(self.heap) 37 | break 38 | self.sorted_keys = None 39 | 40 | def keys(self): 41 | if self.sorted_keys is None: 42 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)] 43 | return self.sorted_keys 44 | 45 | def items(self): 46 | if self.sorted_keys is None: 47 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)] 48 | sorted_items = [(k, self[k]) for k in self.sorted_keys] 49 | return sorted_items 50 | 51 | def _update_heap(self, key): 52 | for i, (priority, k) in enumerate(self.heap): 53 | if k == key: 54 | new_priority = self.sort_func(key, self[key]) 55 | if new_priority != priority: 56 | self.heap[i] = (new_priority, key) 57 | heapq.heapify(self.heap) 58 | self.sorted_keys = None 59 | break 60 | 61 | def __iter__(self): 62 | return iter(self.keys()) 63 | 64 | def __repr__(self): 65 | return f"{type(self).__name__}({dict(self)}, sort_func={self.sort_func.__name__}, reverse={self.reverse})" 66 | -------------------------------------------------------------------------------- /lib/itchat/returnvalues.py: -------------------------------------------------------------------------------- 1 | #coding=utf8 2 | TRANSLATE = 'Chinese' 3 | 4 | class ReturnValue(dict): 5 | ''' turn return value of itchat into a boolean value 6 | for requests: 7 | ..code::python 8 | 9 | import requests 10 | r = requests.get('http://httpbin.org/get') 11 | print(ReturnValue(rawResponse=r) 12 | 13 | for normal dict: 14 | ..code::python 15 | 16 | returnDict = { 17 | 'BaseResponse': { 18 | 'Ret': 0, 19 | 'ErrMsg': 'My error msg', }, } 20 | print(ReturnValue(returnDict)) 21 | ''' 22 | def __init__(self, returnValueDict={}, rawResponse=None): 23 | if rawResponse: 24 | try: 25 | returnValueDict = rawResponse.json() 26 | except ValueError: 27 | returnValueDict = { 28 | 'BaseResponse': { 29 | 'Ret': -1004, 30 | 'ErrMsg': 'Unexpected return value', }, 31 | 'Data': rawResponse.content, } 32 | for k, v in returnValueDict.items(): 33 | self[k] = v 34 | if not 'BaseResponse' in self: 35 | self['BaseResponse'] = { 36 | 'ErrMsg': 'no BaseResponse in raw response', 37 | 'Ret': -1000, } 38 | if TRANSLATE: 39 | self['BaseResponse']['RawMsg'] = self['BaseResponse'].get('ErrMsg', '') 40 | self['BaseResponse']['ErrMsg'] = \ 41 | TRANSLATION[TRANSLATE].get( 42 | self['BaseResponse'].get('Ret', '')) \ 43 | or self['BaseResponse'].get('ErrMsg', u'No ErrMsg') 44 | self['BaseResponse']['RawMsg'] = \ 45 | self['BaseResponse']['RawMsg'] or self['BaseResponse']['ErrMsg'] 46 | def __nonzero__(self): 47 | return self['BaseResponse'].get('Ret') == 0 48 | def __bool__(self): 49 | return self.__nonzero__() 50 | def __str__(self): 51 | return '{%s}' % ', '.join( 52 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()]) 53 | def __repr__(self): 54 | return '' % self.__str__() 55 | 56 | TRANSLATION = { 57 | 'Chinese': { 58 | -1000: u'返回值不带BaseResponse', 59 | -1001: u'无法找到对应的成员', 60 | -1002: u'文件位置错误', 61 | -1003: u'服务器拒绝连接', 62 | -1004: u'服务器返回异常值', 63 | -1005: u'参数错误', 64 | -1006: u'无效操作', 65 | 0: u'请求成功', 66 | }, 67 | } 68 | -------------------------------------------------------------------------------- /channel/wechatcom/README.md: -------------------------------------------------------------------------------- 1 | # 企业微信应用号channel 2 | 3 | 企业微信官方提供了客服、应用等API,本channel使用的是企业微信的应用API的能力。 4 | 5 | 因为未来可能还会开发客服能力,所以本channel的类型名叫作`wechatcom_app`。 6 | 7 | `wechatcom_app` channel支持插件系统和图片声音交互等能力,除了无法加入群聊,作为个人使用的私人助理已绰绰有余。 8 | 9 | ## 开始之前 10 | 11 | - 在企业中确认自己拥有在企业内自建应用的权限。 12 | - 如果没有权限或者是个人用户,也可创建未认证的企业。操作方式:登录手机企业微信,选择`创建/加入企业`来创建企业,类型请选择企业,企业名称可随意填写。 13 | 未认证的企业有100人的服务人数上限,其他功能与认证企业没有差异。 14 | 15 | 本channel需安装的依赖与公众号一致,需要安装`wechatpy`和`web.py`,它们包含在`requirements-optional.txt`中。 16 | 17 | 此外,如果你是`Linux`系统,除了`ffmpeg`还需要安装`amr`编码器,否则会出现找不到编码器的错误,无法正常使用语音功能。 18 | 19 | - Ubuntu/Debian 20 | 21 | ```bash 22 | apt-get install libavcodec-extra 23 | ``` 24 | 25 | - Alpine 26 | 27 | 需自行编译`ffmpeg`,在编译参数里加入`amr`编码器的支持 28 | 29 | ## 使用方法 30 | 31 | 1.查看企业ID 32 | 33 | - 扫码登陆[企业微信后台](https://work.weixin.qq.com) 34 | - 选择`我的企业`,点击`企业信息`,记住该`企业ID` 35 | 36 | 2.创建自建应用 37 | 38 | - 选择应用管理, 在自建区选创建应用来创建企业自建应用 39 | - 上传应用logo,填写应用名称等项 40 | - 创建应用后进入应用详情页面,记住`AgentId`和`Secert` 41 | 42 | 3.配置应用 43 | 44 | - 在详情页点击`企业可信IP`的配置(没看到可以不管),填入你服务器的公网IP,如果不知道可以先不填 45 | - 点击`接收消息`下的启用API接收消息 46 | - `URL`填写格式为`http://url:port/wxcomapp`,`port`是程序监听的端口,默认是9898 47 | 如果是未认证的企业,url可直接使用服务器的IP。如果是认证企业,需要使用备案的域名,可使用二级域名。 48 | - `Token`可随意填写,停留在这个页面 49 | - 在程序根目录`config.json`中增加配置(**去掉注释**),`wechatcomapp_aes_key`是当前页面的`wechatcomapp_aes_key` 50 | 51 | ```python 52 | "channel_type": "wechatcom_app", 53 | "wechatcom_corp_id": "", # 企业微信公司的corpID 54 | "wechatcomapp_token": "", # 企业微信app的token 55 | "wechatcomapp_port": 9898, # 企业微信app的服务端口, 不需要端口转发 56 | "wechatcomapp_secret": "", # 企业微信app的secret 57 | "wechatcomapp_agent_id": "", # 企业微信app的agent_id 58 | "wechatcomapp_aes_key": "", # 企业微信app的aes_key 59 | ``` 60 | 61 | - 运行程序,在页面中点击保存,保存成功说明验证成功 62 | 63 | 4.连接个人微信 64 | 65 | 选择`我的企业`,点击`微信插件`,下面有个邀请关注的二维码。微信扫码后,即可在微信中看到对应企业,在这里你便可以和机器人沟通。 66 | 67 | 向机器人发送消息,如果日志里出现报错: 68 | 69 | ```bash 70 | Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx.xx.xx" 71 | ``` 72 | 73 | 意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。 74 | 75 | ### Railway部署方式 76 | 77 | 公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)! 78 | 79 | [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/-FHS--?referralCode=RC3znh) 80 | 81 | 填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP) 82 | 83 | ## 测试体验 84 | 85 | AIGC开放社区中已经部署了多个可免费使用的Bot,扫描下方的二维码会自动邀请你来体验。 86 | 87 | 88 | -------------------------------------------------------------------------------- /voice/pytts/pytts_voice.py: -------------------------------------------------------------------------------- 1 | """ 2 | pytts voice service (offline) 3 | """ 4 | 5 | import os 6 | import sys 7 | import time 8 | 9 | import pyttsx3 10 | 11 | from bridge.reply import Reply, ReplyType 12 | from common.log import logger 13 | from common.tmp_dir import TmpDir 14 | from voice.voice import Voice 15 | 16 | 17 | class PyttsVoice(Voice): 18 | engine = pyttsx3.init() 19 | 20 | def __init__(self): 21 | # 语速 22 | self.engine.setProperty("rate", 125) 23 | # 音量 24 | self.engine.setProperty("volume", 1.0) 25 | if sys.platform == "win32": 26 | for voice in self.engine.getProperty("voices"): 27 | if "Chinese" in voice.name: 28 | self.engine.setProperty("voice", voice.id) 29 | else: 30 | self.engine.setProperty("voice", "zh") 31 | # If the problem of espeak is fixed, using runAndWait() and remove this startLoop() 32 | # TODO: check if this is work on win32 33 | self.engine.startLoop(useDriverLoop=False) 34 | 35 | def textToVoice(self, text): 36 | try: 37 | # Avoid the same filename under multithreading 38 | wavFileName = "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav" 39 | wavFile = TmpDir().path() + wavFileName 40 | logger.info("[Pytts] textToVoice text={} voice file name={}".format(text, wavFile)) 41 | 42 | self.engine.save_to_file(text, wavFile) 43 | 44 | if sys.platform == "win32": 45 | self.engine.runAndWait() 46 | else: 47 | # In ubuntu, runAndWait do not really wait until the file created. 48 | # It will return once the task queue is empty, but the task is still running in coroutine. 49 | # And if you call runAndWait() and time.sleep() twice, it will stuck, so do not use this. 50 | # If you want to fix this, add self._proxy.setBusy(True) in line 127 in espeak.py, at the beginning of the function save_to_file. 51 | # self.engine.runAndWait() 52 | 53 | # Before espeak fix this problem, we iterate the generator and control the waiting by ourself. 54 | # But this is not the canonical way to use it, for example if the file already exists it also cannot wait. 55 | self.engine.iterate() 56 | while self.engine.isBusy() or wavFileName not in os.listdir(TmpDir().path()): 57 | time.sleep(0.1) 58 | 59 | reply = Reply(ReplyType.VOICE, wavFile) 60 | 61 | except Exception as e: 62 | reply = Reply(ReplyType.ERROR, str(e)) 63 | finally: 64 | return reply 65 | -------------------------------------------------------------------------------- /bot/openai/open_ai_session.py: -------------------------------------------------------------------------------- 1 | from bot.session_manager import Session 2 | from common.log import logger 3 | 4 | 5 | class OpenAISession(Session): 6 | def __init__(self, session_id, system_prompt=None, model="text-davinci-003"): 7 | super().__init__(session_id, system_prompt) 8 | self.model = model 9 | self.reset() 10 | 11 | def __str__(self): 12 | # 构造对话模型的输入 13 | """ 14 | e.g. Q: xxx 15 | A: xxx 16 | Q: xxx 17 | """ 18 | prompt = "" 19 | for item in self.messages: 20 | if item["role"] == "system": 21 | prompt += item["content"] + "<|endoftext|>\n\n\n" 22 | elif item["role"] == "user": 23 | prompt += "Q: " + item["content"] + "\n" 24 | elif item["role"] == "assistant": 25 | prompt += "\n\nA: " + item["content"] + "<|endoftext|>\n" 26 | 27 | if len(self.messages) > 0 and self.messages[-1]["role"] == "user": 28 | prompt += "A: " 29 | return prompt 30 | 31 | def discard_exceeding(self, max_tokens, cur_tokens=None): 32 | precise = True 33 | try: 34 | cur_tokens = self.calc_tokens() 35 | except Exception as e: 36 | precise = False 37 | if cur_tokens is None: 38 | raise e 39 | logger.debug("Exception when counting tokens precisely for query: {}".format(e)) 40 | while cur_tokens > max_tokens: 41 | if len(self.messages) > 1: 42 | self.messages.pop(0) 43 | elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant": 44 | self.messages.pop(0) 45 | if precise: 46 | cur_tokens = self.calc_tokens() 47 | else: 48 | cur_tokens = len(str(self)) 49 | break 50 | elif len(self.messages) == 1 and self.messages[0]["role"] == "user": 51 | logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens)) 52 | break 53 | else: 54 | logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages))) 55 | break 56 | if precise: 57 | cur_tokens = self.calc_tokens() 58 | else: 59 | cur_tokens = len(str(self)) 60 | return cur_tokens 61 | 62 | def calc_tokens(self): 63 | return num_tokens_from_string(str(self), self.model) 64 | 65 | 66 | # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb 67 | def num_tokens_from_string(string: str, model: str) -> int: 68 | """Returns the number of tokens in a text string.""" 69 | import tiktoken 70 | 71 | encoding = tiktoken.encoding_for_model(model) 72 | num_tokens = len(encoding.encode(string, disallowed_special=())) 73 | return num_tokens 74 | -------------------------------------------------------------------------------- /plugins/hello/hello.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import plugins 4 | from bridge.context import ContextType 5 | from bridge.reply import Reply, ReplyType 6 | from channel.chat_message import ChatMessage 7 | from common.log import logger 8 | from plugins import * 9 | 10 | 11 | @plugins.register( 12 | name="Hello", 13 | desire_priority=-1, 14 | hidden=True, 15 | desc="A simple plugin that says hello", 16 | version="0.1", 17 | author="lanvent", 18 | ) 19 | class Hello(Plugin): 20 | def __init__(self): 21 | super().__init__() 22 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context 23 | logger.info("[Hello] inited") 24 | 25 | def on_handle_context(self, e_context: EventContext): 26 | if e_context["context"].type not in [ 27 | ContextType.TEXT, 28 | ContextType.JOIN_GROUP, 29 | ContextType.PATPAT, 30 | ]: 31 | return 32 | 33 | if e_context["context"].type == ContextType.JOIN_GROUP: 34 | e_context["context"].type = ContextType.TEXT 35 | msg: ChatMessage = e_context["context"]["msg"] 36 | e_context["context"].content = f'请你随机使用一种风格说一句问候语来欢迎新用户"{msg.actual_user_nickname}"加入群聊。' 37 | e_context.action = EventAction.CONTINUE # 事件继续,交付给下个插件或默认逻辑 38 | return 39 | 40 | if e_context["context"].type == ContextType.PATPAT: 41 | e_context["context"].type = ContextType.TEXT 42 | msg: ChatMessage = e_context["context"]["msg"] 43 | e_context["context"].content = f"请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。" 44 | e_context.action = EventAction.CONTINUE # 事件继续,交付给下个插件或默认逻辑 45 | return 46 | 47 | content = e_context["context"].content 48 | logger.debug("[Hello] on_handle_context. content: %s" % content) 49 | if content == "Hello": 50 | reply = Reply() 51 | reply.type = ReplyType.TEXT 52 | msg: ChatMessage = e_context["context"]["msg"] 53 | if e_context["context"]["isgroup"]: 54 | reply.content = f"Hello, {msg.actual_user_nickname} from {msg.from_user_nickname}" 55 | else: 56 | reply.content = f"Hello, {msg.from_user_nickname}" 57 | e_context["reply"] = reply 58 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑 59 | 60 | if content == "Hi": 61 | reply = Reply() 62 | reply.type = ReplyType.TEXT 63 | reply.content = "Hi" 64 | e_context["reply"] = reply 65 | e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑,一般会覆写reply 66 | 67 | if content == "End": 68 | # 如果是文本消息"End",将请求转换成"IMAGE_CREATE",并将content设置为"The World" 69 | e_context["context"].type = ContextType.IMAGE_CREATE 70 | content = "The World" 71 | e_context.action = EventAction.CONTINUE # 事件继续,交付给下个插件或默认逻辑 72 | 73 | def get_help_text(self, **kwargs): 74 | help_text = "输入Hello,我会回复你的名字\n输入End,我会回复你世界的图片\n" 75 | return help_text 76 | -------------------------------------------------------------------------------- /channel/terminal/terminal_channel.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from bridge.context import * 4 | from bridge.reply import Reply, ReplyType 5 | from channel.chat_channel import ChatChannel, check_prefix 6 | from channel.chat_message import ChatMessage 7 | from common.log import logger 8 | from config import conf 9 | 10 | 11 | class TerminalMessage(ChatMessage): 12 | def __init__( 13 | self, 14 | msg_id, 15 | content, 16 | ctype=ContextType.TEXT, 17 | from_user_id="User", 18 | to_user_id="Chatgpt", 19 | other_user_id="Chatgpt", 20 | ): 21 | self.msg_id = msg_id 22 | self.ctype = ctype 23 | self.content = content 24 | self.from_user_id = from_user_id 25 | self.to_user_id = to_user_id 26 | self.other_user_id = other_user_id 27 | 28 | 29 | class TerminalChannel(ChatChannel): 30 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE] 31 | 32 | def send(self, reply: Reply, context: Context): 33 | print("\nBot:") 34 | if reply.type == ReplyType.IMAGE: 35 | from PIL import Image 36 | 37 | image_storage = reply.content 38 | image_storage.seek(0) 39 | img = Image.open(image_storage) 40 | print("") 41 | img.show() 42 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片 43 | import io 44 | 45 | import requests 46 | from PIL import Image 47 | 48 | img_url = reply.content 49 | pic_res = requests.get(img_url, stream=True) 50 | image_storage = io.BytesIO() 51 | for block in pic_res.iter_content(1024): 52 | image_storage.write(block) 53 | image_storage.seek(0) 54 | img = Image.open(image_storage) 55 | print(img_url) 56 | img.show() 57 | else: 58 | print(reply.content) 59 | print("\nUser:", end="") 60 | sys.stdout.flush() 61 | return 62 | 63 | def startup(self): 64 | context = Context() 65 | logger.setLevel("WARN") 66 | print("\nPlease input your question:\nUser:", end="") 67 | sys.stdout.flush() 68 | msg_id = 0 69 | while True: 70 | try: 71 | prompt = self.get_input() 72 | except KeyboardInterrupt: 73 | print("\nExiting...") 74 | sys.exit() 75 | msg_id += 1 76 | trigger_prefixs = conf().get("single_chat_prefix", [""]) 77 | if check_prefix(prompt, trigger_prefixs) is None: 78 | prompt = trigger_prefixs[0] + prompt # 给没触发的消息加上触发前缀 79 | 80 | context = self._compose_context(ContextType.TEXT, prompt, msg=TerminalMessage(msg_id, prompt)) 81 | if context: 82 | self.produce(context) 83 | else: 84 | raise Exception("context is None") 85 | 86 | def get_input(self): 87 | """ 88 | Multi-line input function 89 | """ 90 | sys.stdout.flush() 91 | line = input() 92 | return line 93 | -------------------------------------------------------------------------------- /channel/wechat/wechaty_message.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import re 3 | 4 | from wechaty import MessageType 5 | from wechaty.user import Message 6 | 7 | from bridge.context import ContextType 8 | from channel.chat_message import ChatMessage 9 | from common.log import logger 10 | from common.tmp_dir import TmpDir 11 | 12 | 13 | class aobject(object): 14 | """Inheriting this class allows you to define an async __init__. 15 | 16 | So you can create objects by doing something like `await MyClass(params)` 17 | """ 18 | 19 | async def __new__(cls, *a, **kw): 20 | instance = super().__new__(cls) 21 | await instance.__init__(*a, **kw) 22 | return instance 23 | 24 | async def __init__(self): 25 | pass 26 | 27 | 28 | class WechatyMessage(ChatMessage, aobject): 29 | async def __init__(self, wechaty_msg: Message): 30 | super().__init__(wechaty_msg) 31 | 32 | room = wechaty_msg.room() 33 | 34 | self.msg_id = wechaty_msg.message_id 35 | self.create_time = wechaty_msg.payload.timestamp 36 | self.is_group = room is not None 37 | 38 | if wechaty_msg.type() == MessageType.MESSAGE_TYPE_TEXT: 39 | self.ctype = ContextType.TEXT 40 | self.content = wechaty_msg.text() 41 | elif wechaty_msg.type() == MessageType.MESSAGE_TYPE_AUDIO: 42 | self.ctype = ContextType.VOICE 43 | voice_file = await wechaty_msg.to_file_box() 44 | self.content = TmpDir().path() + voice_file.name # content直接存临时目录路径 45 | 46 | def func(): 47 | loop = asyncio.get_event_loop() 48 | asyncio.run_coroutine_threadsafe(voice_file.to_file(self.content), loop).result() 49 | 50 | self._prepare_fn = func 51 | 52 | else: 53 | raise NotImplementedError("Unsupported message type: {}".format(wechaty_msg.type())) 54 | 55 | from_contact = wechaty_msg.talker() # 获取消息的发送者 56 | self.from_user_id = from_contact.contact_id 57 | self.from_user_nickname = from_contact.name 58 | 59 | # group中的from和to,wechaty跟itchat含义不一样 60 | # wecahty: from是消息实际发送者, to:所在群 61 | # itchat: 如果是你发送群消息,from和to是你自己和所在群,如果是别人发群消息,from和to是所在群和你自己 62 | # 但这个差别不影响逻辑,group中只使用到:1.用from来判断是否是自己发的,2.actual_user_id来判断实际发送用户 63 | 64 | if self.is_group: 65 | self.to_user_id = room.room_id 66 | self.to_user_nickname = await room.topic() 67 | else: 68 | to_contact = wechaty_msg.to() 69 | self.to_user_id = to_contact.contact_id 70 | self.to_user_nickname = to_contact.name 71 | 72 | if self.is_group or wechaty_msg.is_self(): # 如果是群消息,other_user设置为群,如果是私聊消息,而且自己发的,就设置成对方。 73 | self.other_user_id = self.to_user_id 74 | self.other_user_nickname = self.to_user_nickname 75 | else: 76 | self.other_user_id = self.from_user_id 77 | self.other_user_nickname = self.from_user_nickname 78 | 79 | if self.is_group: # wechaty群聊中,实际发送用户就是from_user 80 | self.is_at = await wechaty_msg.mention_self() 81 | if not self.is_at: # 有时候复制粘贴的消息,不算做@,但是内容里面会有@xxx,这里做一下兼容 82 | name = wechaty_msg.wechaty.user_self().name 83 | pattern = f"@{re.escape(name)}(\u2005|\u0020)" 84 | if re.search(pattern, self.content): 85 | logger.debug(f"wechaty message {self.msg_id} include at") 86 | self.is_at = True 87 | 88 | self.actual_user_id = self.from_user_id 89 | self.actual_user_nickname = self.from_user_nickname 90 | -------------------------------------------------------------------------------- /channel/wechatmp/active_reply.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import web 4 | from wechatpy import parse_message 5 | from wechatpy.replies import create_reply 6 | 7 | from bridge.context import * 8 | from bridge.reply import * 9 | from channel.wechatmp.common import * 10 | from channel.wechatmp.wechatmp_channel import WechatMPChannel 11 | from channel.wechatmp.wechatmp_message import WeChatMPMessage 12 | from common.log import logger 13 | from config import conf, subscribe_msg 14 | 15 | 16 | # This class is instantiated once per query 17 | class Query: 18 | def GET(self): 19 | return verify_server(web.input()) 20 | 21 | def POST(self): 22 | # Make sure to return the instance that first created, @singleton will do that. 23 | try: 24 | args = web.input() 25 | verify_server(args) 26 | channel = WechatMPChannel() 27 | message = web.data() 28 | encrypt_func = lambda x: x 29 | if args.get("encrypt_type") == "aes": 30 | logger.debug("[wechatmp] Receive encrypted post data:\n" + message.decode("utf-8")) 31 | if not channel.crypto: 32 | raise Exception("Crypto not initialized, Please set wechatmp_aes_key in config.json") 33 | message = channel.crypto.decrypt_message(message, args.msg_signature, args.timestamp, args.nonce) 34 | encrypt_func = lambda x: channel.crypto.encrypt_message(x, args.nonce, args.timestamp) 35 | else: 36 | logger.debug("[wechatmp] Receive post data:\n" + message.decode("utf-8")) 37 | msg = parse_message(message) 38 | if msg.type in ["text", "voice", "image"]: 39 | wechatmp_msg = WeChatMPMessage(msg, client=channel.client) 40 | from_user = wechatmp_msg.from_user_id 41 | content = wechatmp_msg.content 42 | message_id = wechatmp_msg.msg_id 43 | 44 | logger.info( 45 | "[wechatmp] {}:{} Receive post query {} {}: {}".format( 46 | web.ctx.env.get("REMOTE_ADDR"), 47 | web.ctx.env.get("REMOTE_PORT"), 48 | from_user, 49 | message_id, 50 | content, 51 | ) 52 | ) 53 | if msg.type == "voice" and wechatmp_msg.ctype == ContextType.TEXT and conf().get("voice_reply_voice", False): 54 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, desire_rtype=ReplyType.VOICE, msg=wechatmp_msg) 55 | else: 56 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg) 57 | if context: 58 | channel.produce(context) 59 | # The reply will be sent by channel.send() in another thread 60 | return "success" 61 | elif msg.type == "event": 62 | logger.info("[wechatmp] Event {} from {}".format(msg.event, msg.source)) 63 | if msg.event in ["subscribe", "subscribe_scan"]: 64 | reply_text = subscribe_msg() 65 | if reply_text: 66 | replyPost = create_reply(reply_text, msg) 67 | return encrypt_func(replyPost.render()) 68 | else: 69 | return "success" 70 | else: 71 | logger.info("暂且不处理") 72 | return "success" 73 | except Exception as exc: 74 | logger.exception(exc) 75 | return exc 76 | -------------------------------------------------------------------------------- /lib/itchat/__init__.py: -------------------------------------------------------------------------------- 1 | from .core import Core 2 | from .config import VERSION, ASYNC_COMPONENTS 3 | from .log import set_logging 4 | 5 | if ASYNC_COMPONENTS: 6 | from .async_components import load_components 7 | else: 8 | from .components import load_components 9 | 10 | 11 | __version__ = VERSION 12 | 13 | 14 | instanceList = [] 15 | 16 | def load_async_itchat() -> Core: 17 | """load async-based itchat instance 18 | 19 | Returns: 20 | Core: the abstract interface of itchat 21 | """ 22 | from .async_components import load_components 23 | load_components(Core) 24 | return Core() 25 | 26 | 27 | def load_sync_itchat() -> Core: 28 | """load sync-based itchat instance 29 | 30 | Returns: 31 | Core: the abstract interface of itchat 32 | """ 33 | from .components import load_components 34 | load_components(Core) 35 | return Core() 36 | 37 | 38 | if ASYNC_COMPONENTS: 39 | instance = load_async_itchat() 40 | else: 41 | instance = load_sync_itchat() 42 | 43 | 44 | instanceList = [instance] 45 | 46 | # I really want to use sys.modules[__name__] = originInstance 47 | # but it makes auto-fill a real mess, so forgive me for my following ** 48 | # actually it toke me less than 30 seconds, god bless Uganda 49 | 50 | # components.login 51 | login = instance.login 52 | get_QRuuid = instance.get_QRuuid 53 | get_QR = instance.get_QR 54 | check_login = instance.check_login 55 | web_init = instance.web_init 56 | show_mobile_login = instance.show_mobile_login 57 | start_receiving = instance.start_receiving 58 | get_msg = instance.get_msg 59 | logout = instance.logout 60 | # components.contact 61 | update_chatroom = instance.update_chatroom 62 | update_friend = instance.update_friend 63 | get_contact = instance.get_contact 64 | get_friends = instance.get_friends 65 | get_chatrooms = instance.get_chatrooms 66 | get_mps = instance.get_mps 67 | set_alias = instance.set_alias 68 | set_pinned = instance.set_pinned 69 | accept_friend = instance.accept_friend 70 | get_head_img = instance.get_head_img 71 | create_chatroom = instance.create_chatroom 72 | set_chatroom_name = instance.set_chatroom_name 73 | delete_member_from_chatroom = instance.delete_member_from_chatroom 74 | add_member_into_chatroom = instance.add_member_into_chatroom 75 | # components.messages 76 | send_raw_msg = instance.send_raw_msg 77 | send_msg = instance.send_msg 78 | upload_file = instance.upload_file 79 | send_file = instance.send_file 80 | send_image = instance.send_image 81 | send_video = instance.send_video 82 | send = instance.send 83 | revoke = instance.revoke 84 | # components.hotreload 85 | dump_login_status = instance.dump_login_status 86 | load_login_status = instance.load_login_status 87 | # components.register 88 | auto_login = instance.auto_login 89 | configured_reply = instance.configured_reply 90 | msg_register = instance.msg_register 91 | run = instance.run 92 | # other functions 93 | search_friends = instance.search_friends 94 | search_chatrooms = instance.search_chatrooms 95 | search_mps = instance.search_mps 96 | set_logging = set_logging 97 | -------------------------------------------------------------------------------- /bot/session_manager.py: -------------------------------------------------------------------------------- 1 | from common.expired_dict import ExpiredDict 2 | from common.log import logger 3 | from config import conf 4 | 5 | 6 | class Session(object): 7 | def __init__(self, session_id, system_prompt=None): 8 | self.session_id = session_id 9 | self.messages = [] 10 | if system_prompt is None: 11 | self.system_prompt = conf().get("character_desc", "") 12 | else: 13 | self.system_prompt = system_prompt 14 | 15 | # 重置会话 16 | def reset(self): 17 | system_item = {"role": "system", "content": self.system_prompt} 18 | self.messages = [system_item] 19 | 20 | def set_system_prompt(self, system_prompt): 21 | self.system_prompt = system_prompt 22 | self.reset() 23 | 24 | def add_query(self, query): 25 | user_item = {"role": "user", "content": query} 26 | self.messages.append(user_item) 27 | 28 | def add_reply(self, reply): 29 | assistant_item = {"role": "assistant", "content": reply} 30 | self.messages.append(assistant_item) 31 | 32 | def discard_exceeding(self, max_tokens=None, cur_tokens=None): 33 | raise NotImplementedError 34 | 35 | def calc_tokens(self): 36 | raise NotImplementedError 37 | 38 | 39 | class SessionManager(object): 40 | def __init__(self, sessioncls, **session_args): 41 | if conf().get("expires_in_seconds"): 42 | sessions = ExpiredDict(conf().get("expires_in_seconds")) 43 | else: 44 | sessions = dict() 45 | self.sessions = sessions 46 | self.sessioncls = sessioncls 47 | self.session_args = session_args 48 | 49 | def build_session(self, session_id, system_prompt=None): 50 | """ 51 | 如果session_id不在sessions中,创建一个新的session并添加到sessions中 52 | 如果system_prompt不会空,会更新session的system_prompt并重置session 53 | """ 54 | if session_id is None: 55 | return self.sessioncls(session_id, system_prompt, **self.session_args) 56 | 57 | if session_id not in self.sessions: 58 | self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args) 59 | elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session 60 | self.sessions[session_id].set_system_prompt(system_prompt) 61 | session = self.sessions[session_id] 62 | return session 63 | 64 | def session_query(self, query, session_id): 65 | session = self.build_session(session_id) 66 | session.add_query(query) 67 | try: 68 | max_tokens = conf().get("conversation_max_tokens", 1000) 69 | total_tokens = session.discard_exceeding(max_tokens, None) 70 | logger.debug("prompt tokens used={}".format(total_tokens)) 71 | except Exception as e: 72 | logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e))) 73 | return session 74 | 75 | def session_reply(self, reply, session_id, total_tokens=None): 76 | session = self.build_session(session_id) 77 | session.add_reply(reply) 78 | try: 79 | max_tokens = conf().get("conversation_max_tokens", 1000) 80 | tokens_cnt = session.discard_exceeding(max_tokens, total_tokens) 81 | logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt)) 82 | except Exception as e: 83 | logger.debug("Exception when counting tokens precisely for session: {}".format(str(e))) 84 | return session 85 | 86 | def clear_session(self, session_id): 87 | if session_id in self.sessions: 88 | del self.sessions[session_id] 89 | 90 | def clear_all_session(self): 91 | self.sessions.clear() 92 | -------------------------------------------------------------------------------- /voice/baidu/baidu_voice.py: -------------------------------------------------------------------------------- 1 | """ 2 | baidu voice service 3 | """ 4 | import json 5 | import os 6 | import time 7 | 8 | from aip import AipSpeech 9 | 10 | from bridge.reply import Reply, ReplyType 11 | from common.log import logger 12 | from common.tmp_dir import TmpDir 13 | from config import conf 14 | from voice.audio_convert import get_pcm_from_wav 15 | from voice.voice import Voice 16 | 17 | """ 18 | 百度的语音识别API. 19 | dev_pid: 20 | - 1936: 普通话远场 21 | - 1536:普通话(支持简单的英文识别) 22 | - 1537:普通话(纯中文识别) 23 | - 1737:英语 24 | - 1637:粤语 25 | - 1837:四川话 26 | 要使用本模块, 首先到 yuyin.baidu.com 注册一个开发者账号, 27 | 之后创建一个新应用, 然后在应用管理的"查看key"中获得 API Key 和 Secret Key 28 | 然后在 config.json 中填入这两个值, 以及 app_id, dev_pid 29 | """ 30 | 31 | 32 | class BaiduVoice(Voice): 33 | def __init__(self): 34 | try: 35 | curdir = os.path.dirname(__file__) 36 | config_path = os.path.join(curdir, "config.json") 37 | bconf = None 38 | if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件 39 | bconf = {"lang": "zh", "ctp": 1, "spd": 5, "pit": 5, "vol": 5, "per": 0} 40 | with open(config_path, "w") as fw: 41 | json.dump(bconf, fw, indent=4) 42 | else: 43 | with open(config_path, "r") as fr: 44 | bconf = json.load(fr) 45 | 46 | self.app_id = conf().get("baidu_app_id") 47 | self.api_key = conf().get("baidu_api_key") 48 | self.secret_key = conf().get("baidu_secret_key") 49 | self.dev_id = conf().get("baidu_dev_pid") 50 | self.lang = bconf["lang"] 51 | self.ctp = bconf["ctp"] 52 | self.spd = bconf["spd"] 53 | self.pit = bconf["pit"] 54 | self.vol = bconf["vol"] 55 | self.per = bconf["per"] 56 | 57 | self.client = AipSpeech(self.app_id, self.api_key, self.secret_key) 58 | except Exception as e: 59 | logger.warn("BaiduVoice init failed: %s, ignore " % e) 60 | 61 | def voiceToText(self, voice_file): 62 | # 识别本地文件 63 | logger.debug("[Baidu] voice file name={}".format(voice_file)) 64 | pcm = get_pcm_from_wav(voice_file) 65 | res = self.client.asr(pcm, "pcm", 16000, {"dev_pid": self.dev_id}) 66 | if res["err_no"] == 0: 67 | logger.info("百度语音识别到了:{}".format(res["result"])) 68 | text = "".join(res["result"]) 69 | reply = Reply(ReplyType.TEXT, text) 70 | else: 71 | logger.info("百度语音识别出错了: {}".format(res["err_msg"])) 72 | if res["err_msg"] == "request pv too much": 73 | logger.info(" 出现这个原因很可能是你的百度语音服务调用量超出限制,或未开通付费") 74 | reply = Reply(ReplyType.ERROR, "百度语音识别出错了;{0}".format(res["err_msg"])) 75 | return reply 76 | 77 | def textToVoice(self, text): 78 | result = self.client.synthesis( 79 | text, 80 | self.lang, 81 | self.ctp, 82 | {"spd": self.spd, "pit": self.pit, "vol": self.vol, "per": self.per}, 83 | ) 84 | if not isinstance(result, dict): 85 | # Avoid the same filename under multithreading 86 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3" 87 | with open(fileName, "wb") as f: 88 | f.write(result) 89 | logger.info("[Baidu] textToVoice text={} voice file name={}".format(text, fileName)) 90 | reply = Reply(ReplyType.VOICE, fileName) 91 | else: 92 | logger.error("[Baidu] textToVoice error={}".format(result)) 93 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败") 94 | return reply 95 | -------------------------------------------------------------------------------- /bot/chatgpt/chat_gpt_session.py: -------------------------------------------------------------------------------- 1 | from bot.session_manager import Session 2 | from common.log import logger 3 | 4 | """ 5 | e.g. [ 6 | {"role": "system", "content": "You are a helpful assistant."}, 7 | {"role": "user", "content": "Who won the world series in 2020?"}, 8 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, 9 | {"role": "user", "content": "Where was it played?"} 10 | ] 11 | """ 12 | 13 | 14 | class ChatGPTSession(Session): 15 | def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"): 16 | super().__init__(session_id, system_prompt) 17 | self.model = model 18 | self.reset() 19 | 20 | def discard_exceeding(self, max_tokens, cur_tokens=None): 21 | precise = True 22 | try: 23 | cur_tokens = self.calc_tokens() 24 | except Exception as e: 25 | precise = False 26 | if cur_tokens is None: 27 | raise e 28 | logger.debug("Exception when counting tokens precisely for query: {}".format(e)) 29 | while cur_tokens > max_tokens: 30 | if len(self.messages) > 2: 31 | self.messages.pop(1) 32 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant": 33 | self.messages.pop(1) 34 | if precise: 35 | cur_tokens = self.calc_tokens() 36 | else: 37 | cur_tokens = cur_tokens - max_tokens 38 | break 39 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user": 40 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens)) 41 | break 42 | else: 43 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages))) 44 | break 45 | if precise: 46 | cur_tokens = self.calc_tokens() 47 | else: 48 | cur_tokens = cur_tokens - max_tokens 49 | return cur_tokens 50 | 51 | def calc_tokens(self): 52 | return num_tokens_from_messages(self.messages, self.model) 53 | 54 | 55 | # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb 56 | def num_tokens_from_messages(messages, model): 57 | """Returns the number of tokens used by a list of messages.""" 58 | import tiktoken 59 | 60 | try: 61 | encoding = tiktoken.encoding_for_model(model) 62 | except KeyError: 63 | logger.debug("Warning: model not found. Using cl100k_base encoding.") 64 | encoding = tiktoken.get_encoding("cl100k_base") 65 | if model == "gpt-3.5-turbo" or model == "gpt-35-turbo": 66 | return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301") 67 | elif model == "gpt-4": 68 | return num_tokens_from_messages(messages, model="gpt-4-0314") 69 | elif model == "gpt-3.5-turbo-0301": 70 | tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n 71 | tokens_per_name = -1 # if there's a name, the role is omitted 72 | elif model == "gpt-4-0314": 73 | tokens_per_message = 3 74 | tokens_per_name = 1 75 | else: 76 | logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo-0301.") 77 | return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301") 78 | num_tokens = 0 79 | for message in messages: 80 | num_tokens += tokens_per_message 81 | for key, value in message.items(): 82 | num_tokens += len(encoding.encode(value)) 83 | if key == "name": 84 | num_tokens += tokens_per_name 85 | num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> 86 | return num_tokens 87 | -------------------------------------------------------------------------------- /channel/wechatmp/README.md: -------------------------------------------------------------------------------- 1 | # 微信公众号channel 2 | 3 | 鉴于个人微信号在服务器上通过itchat登录有封号风险,这里新增了微信公众号channel,提供无风险的服务。 4 | 目前支持订阅号和服务号两种类型的公众号,它们都支持文本交互,语音和图片输入。其中个人主体的微信订阅号由于无法通过微信认证,存在回复时间限制,每天的图片和声音回复次数也有限制。 5 | 6 | ## 使用方法(订阅号,服务号类似) 7 | 8 | 在开始部署前,你需要一个拥有公网IP的服务器,以提供微信服务器和我们自己服务器的连接。或者你需要进行内网穿透,否则微信服务器无法将消息发送给我们的服务器。 9 | 10 | 此外,需要在我们的服务器上安装python的web框架web.py和wechatpy。 11 | 以ubuntu为例(在ubuntu 22.04上测试): 12 | ``` 13 | pip3 install web.py 14 | pip3 install wechatpy 15 | ``` 16 | 17 | 然后在[微信公众平台](https://mp.weixin.qq.com)注册一个自己的公众号,类型选择订阅号,主体为个人即可。 18 | 19 | 然后根据[接入指南](https://developers.weixin.qq.com/doc/offiaccount/Basic_Information/Access_Overview.html)的说明,在[微信公众平台](https://mp.weixin.qq.com)的“设置与开发”-“基本配置”-“服务器配置”中填写服务器地址`URL`和令牌`Token`。`URL`填写格式为`http://url/wx`,可使用IP(成功几率看脸),`Token`是你自己编的一个特定的令牌。消息加解密方式如果选择了需要加密的模式,需要在配置中填写`wechatmp_aes_key`。 20 | 21 | 相关的服务器验证代码已经写好,你不需要再添加任何代码。你只需要在本项目根目录的`config.json`中添加 22 | ``` 23 | "channel_type": "wechatmp", # 如果通过了微信认证,将"wechatmp"替换为"wechatmp_service",可极大的优化使用体验 24 | "wechatmp_token": "xxxx", # 微信公众平台的Token 25 | "wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443 26 | "wechatmp_app_id": "xxxx", # 微信公众平台的appID 27 | "wechatmp_app_secret": "xxxx", # 微信公众平台的appsecret 28 | "wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要 29 | "single_chat_prefix": [""], # 推荐设置,任意对话都可以触发回复,不添加前缀 30 | "single_chat_reply_prefix": "", # 推荐设置,回复不设置前缀 31 | "plugin_trigger_prefix": "&", # 推荐设置,在手机微信客户端中,$%^等符号与中文连在一起时会自动显示一段较大的间隔,用户体验不好。请不要使用管理员指令前缀"#",这会造成未知问题。 32 | ``` 33 | 然后运行`python3 app.py`启动web服务器。这里会默认监听8080端口,但是微信公众号的服务器配置只支持80/443端口,有两种方法来解决这个问题。第一个是推荐的方法,使用端口转发命令将80端口转发到8080端口: 34 | ``` 35 | sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 8080 36 | sudo iptables-save > /etc/iptables/rules.v4 37 | ``` 38 | 第二个方法是让python程序直接监听80端口,在配置文件中设置`"wechatmp_port": 80` ,在linux上需要使用`sudo python3 app.py`启动程序。然而这会导致一系列环境和权限问题,因此不是推荐的方法。 39 | 40 | 443端口同理,注意需要支持SSL,也就是https的访问,在`wechatmp_channel.py`中需要修改相应的证书路径。 41 | 42 | 程序启动并监听端口后,在刚才的“服务器配置”中点击`提交`即可验证你的服务器。 43 | 随后在[微信公众平台](https://mp.weixin.qq.com)启用服务器,关闭手动填写规则的自动回复,即可实现ChatGPT的自动回复。 44 | 45 | 之后需要在公众号开发信息下将本机IP加入到IP白名单。 46 | 47 | 不然在启用后,发送语音、图片等消息可能会遇到如下报错: 48 | ``` 49 | 'errcode': 40164, 'errmsg': 'invalid ip xx.xx.xx.xx not in whitelist rid 50 | ``` 51 | 52 | 53 | ## 个人微信公众号的限制 54 | 由于人微信公众号不能通过微信认证,所以没有客服接口,因此公众号无法主动发出消息,只能被动回复。而微信官方对被动回复有5秒的时间限制,最多重试2次,因此最多只有15秒的自动回复时间窗口。因此如果问题比较复杂或者我们的服务器比较忙,ChatGPT的回答就没办法及时回复给用户。为了解决这个问题,这里做了回答缓存,它需要你在回复超时后,再次主动发送任意文字(例如1)来尝试拿到回答缓存。为了优化使用体验,目前设置了两分钟(120秒)的timeout,用户在至多两分钟后即可得到查询到回复或者错误原因。 55 | 56 | 另外,由于微信官方的限制,自动回复有长度限制。因此这里将ChatGPT的回答进行了拆分,以满足限制。 57 | 58 | ## 私有api_key 59 | 公共api有访问频率限制(免费账号每分钟最多3次ChatGPT的API调用),这在服务多人的时候会遇到问题。因此这里多加了一个设置私有api_key的功能。目前通过godcmd插件的命令来设置私有api_key。 60 | 61 | ## 语音输入 62 | 利用微信自带的语音识别功能,提供语音输入能力。需要在公众号管理页面的“设置与开发”->“接口权限”页面开启“接收语音识别结果”。 63 | 64 | ## 语音回复 65 | 请在配置文件中添加以下词条: 66 | ``` 67 | "voice_reply_voice": true, 68 | ``` 69 | 这样公众号将会用语音回复语音消息,实现语音对话。 70 | 71 | 默认的语音合成引擎是`google`,它是免费使用的。 72 | 73 | 如果要选择其他的语音合成引擎,请添加以下配置项: 74 | ``` 75 | "text_to_voice": "pytts" 76 | ``` 77 | 78 | pytts是本地的语音合成引擎。还支持baidu,azure,这些你需要自行配置相关的依赖和key。 79 | 80 | 如果使用pytts,在ubuntu上需要安装如下依赖: 81 | ``` 82 | sudo apt update 83 | sudo apt install espeak 84 | sudo apt install ffmpeg 85 | python3 -m pip install pyttsx3 86 | ``` 87 | 不是很建议开启pytts语音回复,因为它是离线本地计算,算的慢会拖垮服务器,且声音不好听。 88 | 89 | ## 图片回复 90 | 现在认证公众号和非认证公众号都可以实现的图片和语音回复。但是非认证公众号使用了永久素材接口,每天有1000次的调用上限(每个月有10次重置机会,程序中已设定遇到上限会自动重置),且永久素材库存也有上限。因此对于非认证公众号,我们会在回复图片或者语音消息后的10秒内从永久素材库存内删除该素材。 91 | 92 | ## 测试 93 | 目前在`RoboStyle`这个公众号上进行了测试(基于[wechatmp分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp)),感兴趣的可以关注并体验。开启了godcmd, Banwords, role, dungeon, finish这五个插件,其他的插件还没有详尽测试。百度的接口暂未测试。[wechatmp-stable分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp-stable)是较稳定的上个版本,但也缺少最新的功能支持。 94 | 95 | ## TODO 96 | - [x] 语音输入 97 | - [x] 图片输入 98 | - [x] 使用临时素材接口提供认证公众号的图片和语音回复 99 | - [x] 使用永久素材接口提供未认证公众号的图片和语音回复 100 | - [ ] 高并发支持 101 | -------------------------------------------------------------------------------- /channel/wechat/wechat_message.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from bridge.context import ContextType 4 | from channel.chat_message import ChatMessage 5 | from common.log import logger 6 | from common.tmp_dir import TmpDir 7 | from lib import itchat 8 | from lib.itchat.content import * 9 | 10 | 11 | class WechatMessage(ChatMessage): 12 | def __init__(self, itchat_msg, is_group=False): 13 | super().__init__(itchat_msg) 14 | self.msg_id = itchat_msg["MsgId"] 15 | self.create_time = itchat_msg["CreateTime"] 16 | self.is_group = is_group 17 | 18 | if itchat_msg["Type"] == TEXT: 19 | self.ctype = ContextType.TEXT 20 | self.content = itchat_msg["Text"] 21 | elif itchat_msg["Type"] == VOICE: 22 | self.ctype = ContextType.VOICE 23 | self.content = TmpDir().path() + itchat_msg["FileName"] # content直接存临时目录路径 24 | self._prepare_fn = lambda: itchat_msg.download(self.content) 25 | elif itchat_msg["Type"] == PICTURE and itchat_msg["MsgType"] == 3: 26 | self.ctype = ContextType.IMAGE 27 | self.content = TmpDir().path() + itchat_msg["FileName"] # content直接存临时目录路径 28 | self._prepare_fn = lambda: itchat_msg.download(self.content) 29 | elif itchat_msg["Type"] == NOTE and itchat_msg["MsgType"] == 10000: 30 | if is_group and ("加入群聊" in itchat_msg["Content"] or "加入了群聊" in itchat_msg["Content"]): 31 | self.ctype = ContextType.JOIN_GROUP 32 | self.content = itchat_msg["Content"] 33 | # 这里只能得到nickname, actual_user_id还是机器人的id 34 | if "加入了群聊" in itchat_msg["Content"]: 35 | self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[-1] 36 | elif "加入群聊" in itchat_msg["Content"]: 37 | self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0] 38 | elif "拍了拍我" in itchat_msg["Content"]: 39 | self.ctype = ContextType.PATPAT 40 | self.content = itchat_msg["Content"] 41 | if is_group: 42 | self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0] 43 | else: 44 | raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"]) 45 | else: 46 | raise NotImplementedError("Unsupported message type: Type:{} MsgType:{}".format(itchat_msg["Type"], itchat_msg["MsgType"])) 47 | 48 | self.from_user_id = itchat_msg["FromUserName"] 49 | self.to_user_id = itchat_msg["ToUserName"] 50 | 51 | user_id = itchat.instance.storageClass.userName 52 | nickname = itchat.instance.storageClass.nickName 53 | 54 | # 虽然from_user_id和to_user_id用的少,但是为了保持一致性,还是要填充一下 55 | # 以下很繁琐,一句话总结:能填的都填了。 56 | if self.from_user_id == user_id: 57 | self.from_user_nickname = nickname 58 | if self.to_user_id == user_id: 59 | self.to_user_nickname = nickname 60 | try: # 陌生人时候, 'User'字段可能不存在 61 | self.other_user_id = itchat_msg["User"]["UserName"] 62 | self.other_user_nickname = itchat_msg["User"]["NickName"] 63 | if self.other_user_id == self.from_user_id: 64 | self.from_user_nickname = self.other_user_nickname 65 | if self.other_user_id == self.to_user_id: 66 | self.to_user_nickname = self.other_user_nickname 67 | except KeyError as e: # 处理偶尔没有对方信息的情况 68 | logger.warn("[WX]get other_user_id failed: " + str(e)) 69 | if self.from_user_id == user_id: 70 | self.other_user_id = self.to_user_id 71 | else: 72 | self.other_user_id = self.from_user_id 73 | 74 | if self.is_group: 75 | self.is_at = itchat_msg["IsAt"] 76 | self.actual_user_id = itchat_msg["ActualUserName"] 77 | if self.ctype not in [ContextType.JOIN_GROUP, ContextType.PATPAT]: 78 | self.actual_user_nickname = itchat_msg["ActualNickName"] 79 | -------------------------------------------------------------------------------- /plugins/banwords/banwords.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import json 4 | import os 5 | 6 | import plugins 7 | from bridge.context import ContextType 8 | from bridge.reply import Reply, ReplyType 9 | from common.log import logger 10 | from plugins import * 11 | 12 | from .lib.WordsSearch import WordsSearch 13 | 14 | 15 | @plugins.register( 16 | name="Banwords", 17 | desire_priority=100, 18 | hidden=True, 19 | desc="判断消息中是否有敏感词、决定是否回复。", 20 | version="1.0", 21 | author="lanvent", 22 | ) 23 | class Banwords(Plugin): 24 | def __init__(self): 25 | super().__init__() 26 | try: 27 | curdir = os.path.dirname(__file__) 28 | config_path = os.path.join(curdir, "config.json") 29 | conf = None 30 | if not os.path.exists(config_path): 31 | conf = {"action": "ignore"} 32 | with open(config_path, "w") as f: 33 | json.dump(conf, f, indent=4) 34 | else: 35 | with open(config_path, "r") as f: 36 | conf = json.load(f) 37 | self.searchr = WordsSearch() 38 | self.action = conf["action"] 39 | banwords_path = os.path.join(curdir, "banwords.txt") 40 | with open(banwords_path, "r", encoding="utf-8") as f: 41 | words = [] 42 | for line in f: 43 | word = line.strip() 44 | if word: 45 | words.append(word) 46 | self.searchr.SetKeywords(words) 47 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context 48 | if conf.get("reply_filter", True): 49 | self.handlers[Event.ON_DECORATE_REPLY] = self.on_decorate_reply 50 | self.reply_action = conf.get("reply_action", "ignore") 51 | logger.info("[Banwords] inited") 52 | except Exception as e: 53 | logger.warn("[Banwords] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/banwords .") 54 | raise e 55 | 56 | def on_handle_context(self, e_context: EventContext): 57 | if e_context["context"].type not in [ 58 | ContextType.TEXT, 59 | ContextType.IMAGE_CREATE, 60 | ]: 61 | return 62 | 63 | content = e_context["context"].content 64 | logger.debug("[Banwords] on_handle_context. content: %s" % content) 65 | if self.action == "ignore": 66 | f = self.searchr.FindFirst(content) 67 | if f: 68 | logger.info("[Banwords] %s in message" % f["Keyword"]) 69 | e_context.action = EventAction.BREAK_PASS 70 | return 71 | elif self.action == "replace": 72 | if self.searchr.ContainsAny(content): 73 | reply = Reply(ReplyType.INFO, "发言中包含敏感词,请重试: \n" + self.searchr.Replace(content)) 74 | e_context["reply"] = reply 75 | e_context.action = EventAction.BREAK_PASS 76 | return 77 | 78 | def on_decorate_reply(self, e_context: EventContext): 79 | if e_context["reply"].type not in [ReplyType.TEXT]: 80 | return 81 | 82 | reply = e_context["reply"] 83 | content = reply.content 84 | if self.reply_action == "ignore": 85 | f = self.searchr.FindFirst(content) 86 | if f: 87 | logger.info("[Banwords] %s in reply" % f["Keyword"]) 88 | e_context["reply"] = None 89 | e_context.action = EventAction.BREAK_PASS 90 | return 91 | elif self.reply_action == "replace": 92 | if self.searchr.ContainsAny(content): 93 | reply = Reply(ReplyType.INFO, "已替换回复中的敏感词: \n" + self.searchr.Replace(content)) 94 | e_context["reply"] = reply 95 | e_context.action = EventAction.CONTINUE 96 | return 97 | 98 | def get_help_text(self, **kwargs): 99 | return "过滤消息中的敏感词。" 100 | -------------------------------------------------------------------------------- /voice/audio_convert.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import wave 3 | 4 | import pysilk 5 | from pydub import AudioSegment 6 | 7 | sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] # slk转wav时,支持的采样率 8 | 9 | 10 | def find_closest_sil_supports(sample_rate): 11 | """ 12 | 找到最接近的支持的采样率 13 | """ 14 | if sample_rate in sil_supports: 15 | return sample_rate 16 | closest = 0 17 | mindiff = 9999999 18 | for rate in sil_supports: 19 | diff = abs(rate - sample_rate) 20 | if diff < mindiff: 21 | closest = rate 22 | mindiff = diff 23 | return closest 24 | 25 | 26 | def get_pcm_from_wav(wav_path): 27 | """ 28 | 从 wav 文件中读取 pcm 29 | 30 | :param wav_path: wav 文件路径 31 | :returns: pcm 数据 32 | """ 33 | wav = wave.open(wav_path, "rb") 34 | return wav.readframes(wav.getnframes()) 35 | 36 | 37 | def any_to_mp3(any_path, mp3_path): 38 | """ 39 | 把任意格式转成mp3文件 40 | """ 41 | if any_path.endswith(".mp3"): 42 | shutil.copy2(any_path, mp3_path) 43 | return 44 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): 45 | sil_to_wav(any_path, any_path) 46 | any_path = mp3_path 47 | audio = AudioSegment.from_file(any_path) 48 | audio.export(mp3_path, format="mp3") 49 | 50 | 51 | def any_to_wav(any_path, wav_path): 52 | """ 53 | 把任意格式转成wav文件 54 | """ 55 | if any_path.endswith(".wav"): 56 | shutil.copy2(any_path, wav_path) 57 | return 58 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): 59 | return sil_to_wav(any_path, wav_path) 60 | audio = AudioSegment.from_file(any_path) 61 | audio.export(wav_path, format="wav") 62 | 63 | 64 | def any_to_sil(any_path, sil_path): 65 | """ 66 | 把任意格式转成sil文件 67 | """ 68 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): 69 | shutil.copy2(any_path, sil_path) 70 | return 10000 71 | audio = AudioSegment.from_file(any_path) 72 | rate = find_closest_sil_supports(audio.frame_rate) 73 | # Convert to PCM_s16 74 | pcm_s16 = audio.set_sample_width(2) 75 | pcm_s16 = pcm_s16.set_frame_rate(rate) 76 | wav_data = pcm_s16.raw_data 77 | silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate) 78 | with open(sil_path, "wb") as f: 79 | f.write(silk_data) 80 | return audio.duration_seconds * 1000 81 | 82 | 83 | def any_to_amr(any_path, amr_path): 84 | """ 85 | 把任意格式转成amr文件 86 | """ 87 | if any_path.endswith(".amr"): 88 | shutil.copy2(any_path, amr_path) 89 | return 90 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"): 91 | raise NotImplementedError("Not support file type: {}".format(any_path)) 92 | audio = AudioSegment.from_file(any_path) 93 | audio = audio.set_frame_rate(8000) # only support 8000 94 | audio.export(amr_path, format="amr") 95 | return audio.duration_seconds * 1000 96 | 97 | 98 | def sil_to_wav(silk_path, wav_path, rate: int = 24000): 99 | """ 100 | silk 文件转 wav 101 | """ 102 | wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate) 103 | with open(wav_path, "wb") as f: 104 | f.write(wav_data) 105 | 106 | 107 | def split_audio(file_path, max_segment_length_ms=60000): 108 | """ 109 | 分割音频文件 110 | """ 111 | audio = AudioSegment.from_file(file_path) 112 | audio_length_ms = len(audio) 113 | if audio_length_ms <= max_segment_length_ms: 114 | return audio_length_ms, [file_path] 115 | segments = [] 116 | for start_ms in range(0, audio_length_ms, max_segment_length_ms): 117 | end_ms = min(audio_length_ms, start_ms + max_segment_length_ms) 118 | segment = audio[start_ms:end_ms] 119 | segments.append(segment) 120 | file_prefix = file_path[: file_path.rindex(".")] 121 | format = file_path[file_path.rindex(".") + 1 :] 122 | files = [] 123 | for i, segment in enumerate(segments): 124 | path = f"{file_prefix}_{i+1}" + f".{format}" 125 | segment.export(path, format=format) 126 | files.append(path) 127 | return audio_length_ms, files 128 | -------------------------------------------------------------------------------- /lib/itchat/components/hotreload.py: -------------------------------------------------------------------------------- 1 | import pickle, os 2 | import logging 3 | 4 | import requests 5 | 6 | from ..config import VERSION 7 | from ..returnvalues import ReturnValue 8 | from ..storage import templates 9 | from .contact import update_local_chatrooms, update_local_friends 10 | from .messages import produce_msg 11 | 12 | logger = logging.getLogger('itchat') 13 | 14 | def load_hotreload(core): 15 | core.dump_login_status = dump_login_status 16 | core.load_login_status = load_login_status 17 | 18 | def dump_login_status(self, fileDir=None): 19 | fileDir = fileDir or self.hotReloadDir 20 | try: 21 | with open(fileDir, 'w') as f: 22 | f.write('itchat - DELETE THIS') 23 | os.remove(fileDir) 24 | except: 25 | raise Exception('Incorrect fileDir') 26 | status = { 27 | 'version' : VERSION, 28 | 'loginInfo' : self.loginInfo, 29 | 'cookies' : self.s.cookies.get_dict(), 30 | 'storage' : self.storageClass.dumps()} 31 | with open(fileDir, 'wb') as f: 32 | pickle.dump(status, f) 33 | logger.debug('Dump login status for hot reload successfully.') 34 | 35 | def load_login_status(self, fileDir, 36 | loginCallback=None, exitCallback=None): 37 | try: 38 | with open(fileDir, 'rb') as f: 39 | j = pickle.load(f) 40 | except Exception as e: 41 | logger.debug('No such file, loading login status failed.') 42 | return ReturnValue({'BaseResponse': { 43 | 'ErrMsg': 'No such file, loading login status failed.', 44 | 'Ret': -1002, }}) 45 | 46 | if j.get('version', '') != VERSION: 47 | logger.debug(('you have updated itchat from %s to %s, ' + 48 | 'so cached status is ignored') % ( 49 | j.get('version', 'old version'), VERSION)) 50 | return ReturnValue({'BaseResponse': { 51 | 'ErrMsg': 'cached status ignored because of version', 52 | 'Ret': -1005, }}) 53 | self.loginInfo = j['loginInfo'] 54 | self.loginInfo['User'] = templates.User(self.loginInfo['User']) 55 | self.loginInfo['User'].core = self 56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies']) 57 | self.storageClass.loads(j['storage']) 58 | try: 59 | msgList, contactList = self.get_msg() 60 | except: 61 | msgList = contactList = None 62 | if (msgList or contactList) is None: 63 | self.logout() 64 | load_last_login_status(self.s, j['cookies']) 65 | logger.debug('server refused, loading login status failed.') 66 | return ReturnValue({'BaseResponse': { 67 | 'ErrMsg': 'server refused, loading login status failed.', 68 | 'Ret': -1003, }}) 69 | else: 70 | if contactList: 71 | for contact in contactList: 72 | if '@@' in contact['UserName']: 73 | update_local_chatrooms(self, [contact]) 74 | else: 75 | update_local_friends(self, [contact]) 76 | if msgList: 77 | msgList = produce_msg(self, msgList) 78 | for msg in msgList: self.msgList.put(msg) 79 | self.start_receiving(exitCallback) 80 | logger.debug('loading login status succeeded.') 81 | if hasattr(loginCallback, '__call__'): 82 | loginCallback() 83 | return ReturnValue({'BaseResponse': { 84 | 'ErrMsg': 'loading login status succeeded.', 85 | 'Ret': 0, }}) 86 | 87 | def load_last_login_status(session, cookiesDict): 88 | try: 89 | session.cookies = requests.utils.cookiejar_from_dict({ 90 | 'webwxuvid': cookiesDict['webwxuvid'], 91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'], 92 | 'login_frequency': '2', 93 | 'last_wxuin': cookiesDict['wxuin'], 94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired', 95 | 'wxpluginkey': cookiesDict['wxloadtime'], 96 | 'wxuin': cookiesDict['wxuin'], 97 | 'mm_lang': 'zh_CN', 98 | 'MM_WX_NOTIFY_STATE': '1', 99 | 'MM_WX_SOUND_STATE': '1', }) 100 | except: 101 | logger.info('Load status for push login failed, we may have experienced a cookies change.') 102 | logger.info('If you are using the newest version of itchat, you may report a bug.') 103 | -------------------------------------------------------------------------------- /plugins/tool/README.md: -------------------------------------------------------------------------------- 1 | ## 插件描述 2 | 一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力 3 | 使用说明(默认trigger_prefix为$): 4 | ```text 5 | #help tool: 查看tool帮助信息,可查看已加载工具列表 6 | $tool 命令: 根据给出的{命令}使用一些可用工具尽力为你得到结果。 7 | $tool reset: 重置工具。 8 | ``` 9 | ### 本插件所有工具同步存放至专用仓库:[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub) 10 | 11 | 12 | ## 使用说明 13 | 使用该插件后将默认使用4个工具, 无需额外配置长期生效: 14 | ### 1. python 15 | ###### python解释器,使用它来解释执行python指令,可以配合你想要chatgpt生成的代码输出结果或执行事务 16 | 17 | ### 2. 访问网页的工具汇总(默认url-get) 18 | 19 | #### 2.1 url-get 20 | ###### 往往用来获取某个网站具体内容,结果可能会被反爬策略影响 21 | 22 | #### 2.2 browser 23 | ###### 浏览器,功能与2.1类似,但能更好模拟,不会被识别为爬虫影响获取网站内容 24 | 25 | > 注1:url-get默认配置、browser需额外配置,browser依赖google-chrome,你需要提前安装好 26 | 27 | > 注2:当检测到长文本时会进入summary tool总结长文本,tokens可能会大量消耗! 28 | 29 | 这是debian端安装google-chrome教程,其他系统请自行查找 30 | > https://www.linuxjournal.com/content/how-can-you-install-google-browser-debian 31 | 32 | ### 3. terminal 33 | ###### 在你运行的电脑里执行shell命令,可以配合你想要chatgpt生成的代码使用,给予自然语言控制手段 34 | 35 | > terminal调优记录:https://github.com/zhayujie/chatgpt-on-wechat/issues/776#issue-1659347640 36 | 37 | ### 4. meteo-weather 38 | ###### 回答你有关天气的询问, 需要获取时间、地点上下文信息,本工具使用了[meteo open api](https://open-meteo.com/) 39 | 注:该工具需要较高的对话技巧,不保证你问的任何问题均能得到满意的回复 40 | 41 | > meteo调优记录:https://github.com/zhayujie/chatgpt-on-wechat/issues/776#issuecomment-1500771334 42 | 43 | ## 使用本插件对话(prompt)技巧 44 | ### 1. 有指引的询问 45 | #### 例如: 46 | - 总结这个链接的内容 https://github.com/goldfishh/chatgpt-tool-hub 47 | - 使用Terminal执行curl cip.cc 48 | - 使用python查询今天日期 49 | 50 | ### 2. 使用搜索引擎工具 51 | - 如果有搜索工具就能让chatgpt获取到你的未传达清楚的上下文信息,比如chatgpt不知道你的地理位置,现在时间等,所以无法查询到天气 52 | 53 | ## 其他工具 54 | 55 | ### 5. wikipedia 56 | ###### 可以回答你想要知道确切的人事物 57 | 58 | ### 6. news 新闻类工具集合 59 | 60 | > news更新:0.4版本对新闻类工具做了整合,配置文件只要加入`news`一个工具名就会自动加载所有新闻类工具 61 | 62 | #### 6.1. news-api * 63 | ###### 从全球 80,000 多个信息源中获取当前和历史新闻文章 64 | 65 | #### 6.2. morning-news * 66 | ###### 每日60秒早报,每天凌晨一点更新,本工具使用了[alapi-每日60秒早报](https://alapi.cn/api/view/93) 67 | 68 | ```text 69 | 可配置参数: 70 | 1. morning_news_use_llm: 是否使用LLM润色结果,默认false(可能会慢) 71 | ``` 72 | 73 | > 该tool每天返回内容相同 74 | 75 | #### 6.3. finance-news 76 | ###### 获取实时的金融财政新闻 77 | 78 | > 该工具需要解决browser tool 的google-chrome依赖安装 79 | 80 | 81 | 82 | ### 7. bing-search * 83 | ###### bing搜索引擎,从此你不用再烦恼搜索要用哪些关键词 84 | 85 | ### 8. wolfram-alpha * 86 | ###### 知识搜索引擎、科学问答系统,常用于专业学科计算 87 | 88 | ### 9. google-search * 89 | ###### google搜索引擎,申请流程较bing-search繁琐 90 | 91 | ### 10. arxiv 92 | ###### 用于查找论文 93 | 94 | ```text 95 | 可配置参数: 96 | 1. arxiv_summary: 是否使用总结工具,默认true, 当为false时会直接返回论文的标题、作者、发布时间、摘要、分类、备注、pdf链接等内容 97 | ``` 98 | 99 | > 0.4.2更新,例子:帮我找一篇吴恩达写的论文 100 | 101 | ### 11. summary 102 | ###### 总结工具,该工具必须输入一个本地文件的绝对路径 103 | 104 | > 该工具目前是和其他工具配合使用,暂未测试单独使用效果 105 | 106 | ### 12. image2text 107 | ###### 将图片转换成文字,底层调用imageCaption模型,该工具必须输入一个本地文件的绝对路径 108 | 109 | ### 13. searxng-search * 110 | ###### 一个私有化的搜索引擎工具 111 | 112 | > 安装教程:https://docs.searxng.org/admin/installation.html 113 | 114 | --- 115 | 116 | ###### 注1:带*工具需要获取api-key才能使用(在config.json内的kwargs添加项),部分工具需要外网支持 117 | #### [申请方法](https://github.com/goldfishh/chatgpt-tool-hub/blob/master/docs/apply_optional_tool.md) 118 | 119 | ## config.json 配置说明 120 | ###### 默认工具无需配置,其它工具需手动配置,一个例子: 121 | ```json 122 | { 123 | "tools": ["wikipedia", "你想要添加的其他工具"], // 填入你想用到的额外工具名 124 | "kwargs": { 125 | "debug": true, // 当你遇到问题求助时,需要配置 126 | "request_timeout": 120, // openai接口超时时间 127 | "no_default": false, // 是否不使用默认的4个工具 128 | // 带*工具需要申请api-key,在这里填入,api_name参考前述`申请方法` 129 | } 130 | } 131 | 132 | ``` 133 | 注:config.json文件非必须,未创建仍可使用本tool;带*工具需在kwargs填入对应api-key键值对 134 | - `tools`:本插件初始化时加载的工具, 上述一级标题即是对应工具名称,带*工具必须在kwargs中配置相应api-key 135 | - `kwargs`:工具执行时的配置,一般在这里存放**api-key**,或环境配置 136 | - `debug`: 输出chatgpt-tool-hub额外信息用于调试 137 | - `request_timeout`: 访问openai接口的超时时间,默认与wechat-on-chatgpt配置一致,可单独配置 138 | - `no_default`: 用于配置默认加载4个工具的行为,如果为true则仅使用tools列表工具,不加载默认工具 139 | - `top_k_results`: 控制所有有关搜索的工具返回条目数,数字越高则参考信息越多,但无用信息可能干扰判断,该值一般为2 140 | - `model_name`: 用于控制tool插件底层使用的llm模型,目前暂未测试3.5以外的模型,一般保持默认 141 | 142 | --- 143 | 144 | ## 备注 145 | - 强烈建议申请搜索工具搭配使用,推荐bing-search 146 | - 虽然我会有意加入一些限制,但请不要使用本插件做危害他人的事情,请提前了解清楚某些内容是否会违反相关规定,建议提前做好过滤 147 | - 如有本插件问题,请将debug设置为true无上下文重新问一遍,如仍有问题请访问[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)建个issue,将日志贴进去,我无法处理不能复现的问题 148 | - 欢迎 star & 宣传,有能力请提pr 149 | -------------------------------------------------------------------------------- /lib/itchat/async_components/hotreload.py: -------------------------------------------------------------------------------- 1 | import pickle, os 2 | import logging 3 | 4 | import requests # type: ignore 5 | 6 | from ..config import VERSION 7 | from ..returnvalues import ReturnValue 8 | from ..storage import templates 9 | from .contact import update_local_chatrooms, update_local_friends 10 | from .messages import produce_msg 11 | 12 | logger = logging.getLogger('itchat') 13 | 14 | def load_hotreload(core): 15 | core.dump_login_status = dump_login_status 16 | core.load_login_status = load_login_status 17 | 18 | async def dump_login_status(self, fileDir=None): 19 | fileDir = fileDir or self.hotReloadDir 20 | try: 21 | with open(fileDir, 'w') as f: 22 | f.write('itchat - DELETE THIS') 23 | os.remove(fileDir) 24 | except: 25 | raise Exception('Incorrect fileDir') 26 | status = { 27 | 'version' : VERSION, 28 | 'loginInfo' : self.loginInfo, 29 | 'cookies' : self.s.cookies.get_dict(), 30 | 'storage' : self.storageClass.dumps()} 31 | with open(fileDir, 'wb') as f: 32 | pickle.dump(status, f) 33 | logger.debug('Dump login status for hot reload successfully.') 34 | 35 | async def load_login_status(self, fileDir, 36 | loginCallback=None, exitCallback=None): 37 | try: 38 | with open(fileDir, 'rb') as f: 39 | j = pickle.load(f) 40 | except Exception as e: 41 | logger.debug('No such file, loading login status failed.') 42 | return ReturnValue({'BaseResponse': { 43 | 'ErrMsg': 'No such file, loading login status failed.', 44 | 'Ret': -1002, }}) 45 | 46 | if j.get('version', '') != VERSION: 47 | logger.debug(('you have updated itchat from %s to %s, ' + 48 | 'so cached status is ignored') % ( 49 | j.get('version', 'old version'), VERSION)) 50 | return ReturnValue({'BaseResponse': { 51 | 'ErrMsg': 'cached status ignored because of version', 52 | 'Ret': -1005, }}) 53 | self.loginInfo = j['loginInfo'] 54 | self.loginInfo['User'] = templates.User(self.loginInfo['User']) 55 | self.loginInfo['User'].core = self 56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies']) 57 | self.storageClass.loads(j['storage']) 58 | try: 59 | msgList, contactList = self.get_msg() 60 | except: 61 | msgList = contactList = None 62 | if (msgList or contactList) is None: 63 | self.logout() 64 | await load_last_login_status(self.s, j['cookies']) 65 | logger.debug('server refused, loading login status failed.') 66 | return ReturnValue({'BaseResponse': { 67 | 'ErrMsg': 'server refused, loading login status failed.', 68 | 'Ret': -1003, }}) 69 | else: 70 | if contactList: 71 | for contact in contactList: 72 | if '@@' in contact['UserName']: 73 | update_local_chatrooms(self, [contact]) 74 | else: 75 | update_local_friends(self, [contact]) 76 | if msgList: 77 | msgList = produce_msg(self, msgList) 78 | for msg in msgList: self.msgList.put(msg) 79 | await self.start_receiving(exitCallback) 80 | logger.debug('loading login status succeeded.') 81 | if hasattr(loginCallback, '__call__'): 82 | await loginCallback(self.storageClass.userName) 83 | return ReturnValue({'BaseResponse': { 84 | 'ErrMsg': 'loading login status succeeded.', 85 | 'Ret': 0, }}) 86 | 87 | async def load_last_login_status(session, cookiesDict): 88 | try: 89 | session.cookies = requests.utils.cookiejar_from_dict({ 90 | 'webwxuvid': cookiesDict['webwxuvid'], 91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'], 92 | 'login_frequency': '2', 93 | 'last_wxuin': cookiesDict['wxuin'], 94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired', 95 | 'wxpluginkey': cookiesDict['wxloadtime'], 96 | 'wxuin': cookiesDict['wxuin'], 97 | 'mm_lang': 'zh_CN', 98 | 'MM_WX_NOTIFY_STATE': '1', 99 | 'MM_WX_SOUND_STATE': '1', }) 100 | except: 101 | logger.info('Load status for push login failed, we may have experienced a cookies change.') 102 | logger.info('If you are using the newest version of itchat, you may report a bug.') 103 | -------------------------------------------------------------------------------- /lib/itchat/components/register.py: -------------------------------------------------------------------------------- 1 | import logging, traceback, sys, threading 2 | try: 3 | import Queue 4 | except ImportError: 5 | import queue as Queue 6 | 7 | from ..log import set_logging 8 | from ..utils import test_connect 9 | from ..storage import templates 10 | 11 | logger = logging.getLogger('itchat') 12 | 13 | def load_register(core): 14 | core.auto_login = auto_login 15 | core.configured_reply = configured_reply 16 | core.msg_register = msg_register 17 | core.run = run 18 | 19 | def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl', 20 | enableCmdQR=False, picDir=None, qrCallback=None, 21 | loginCallback=None, exitCallback=None): 22 | if not test_connect(): 23 | logger.info("You can't get access to internet or wechat domain, so exit.") 24 | sys.exit() 25 | self.useHotReload = hotReload 26 | self.hotReloadDir = statusStorageDir 27 | if hotReload: 28 | rval=self.load_login_status(statusStorageDir, 29 | loginCallback=loginCallback, exitCallback=exitCallback) 30 | if rval: 31 | return 32 | logger.error('Hot reload failed, logging in normally, error={}'.format(rval)) 33 | self.logout() 34 | self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, 35 | loginCallback=loginCallback, exitCallback=exitCallback) 36 | self.dump_login_status(statusStorageDir) 37 | else: 38 | self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, 39 | loginCallback=loginCallback, exitCallback=exitCallback) 40 | 41 | def configured_reply(self): 42 | ''' determine the type of message and reply if its method is defined 43 | however, I use a strange way to determine whether a msg is from massive platform 44 | I haven't found a better solution here 45 | The main problem I'm worrying about is the mismatching of new friends added on phone 46 | If you have any good idea, pleeeease report an issue. I will be more than grateful. 47 | ''' 48 | try: 49 | msg = self.msgList.get(timeout=1) 50 | except Queue.Empty: 51 | pass 52 | else: 53 | if isinstance(msg['User'], templates.User): 54 | replyFn = self.functionDict['FriendChat'].get(msg['Type']) 55 | elif isinstance(msg['User'], templates.MassivePlatform): 56 | replyFn = self.functionDict['MpChat'].get(msg['Type']) 57 | elif isinstance(msg['User'], templates.Chatroom): 58 | replyFn = self.functionDict['GroupChat'].get(msg['Type']) 59 | if replyFn is None: 60 | r = None 61 | else: 62 | try: 63 | r = replyFn(msg) 64 | if r is not None: 65 | self.send(r, msg.get('FromUserName')) 66 | except: 67 | logger.warning(traceback.format_exc()) 68 | 69 | def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False): 70 | ''' a decorator constructor 71 | return a specific decorator based on information given ''' 72 | if not (isinstance(msgType, list) or isinstance(msgType, tuple)): 73 | msgType = [msgType] 74 | def _msg_register(fn): 75 | for _msgType in msgType: 76 | if isFriendChat: 77 | self.functionDict['FriendChat'][_msgType] = fn 78 | if isGroupChat: 79 | self.functionDict['GroupChat'][_msgType] = fn 80 | if isMpChat: 81 | self.functionDict['MpChat'][_msgType] = fn 82 | if not any((isFriendChat, isGroupChat, isMpChat)): 83 | self.functionDict['FriendChat'][_msgType] = fn 84 | return fn 85 | return _msg_register 86 | 87 | def run(self, debug=False, blockThread=True): 88 | logger.info('Start auto replying.') 89 | if debug: 90 | set_logging(loggingLevel=logging.DEBUG) 91 | def reply_fn(): 92 | try: 93 | while self.alive: 94 | self.configured_reply() 95 | except KeyboardInterrupt: 96 | if self.useHotReload: 97 | self.dump_login_status() 98 | self.alive = False 99 | logger.debug('itchat received an ^C and exit.') 100 | logger.info('Bye~') 101 | if blockThread: 102 | reply_fn() 103 | else: 104 | replyThread = threading.Thread(target=reply_fn) 105 | replyThread.setDaemon(True) 106 | replyThread.start() 107 | -------------------------------------------------------------------------------- /plugins/dungeon/dungeon.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import plugins 4 | from bridge.bridge import Bridge 5 | from bridge.context import ContextType 6 | from bridge.reply import Reply, ReplyType 7 | from common import const 8 | from common.expired_dict import ExpiredDict 9 | from common.log import logger 10 | from config import conf 11 | from plugins import * 12 | 13 | 14 | # https://github.com/bupticybee/ChineseAiDungeonChatGPT 15 | class StoryTeller: 16 | def __init__(self, bot, sessionid, story): 17 | self.bot = bot 18 | self.sessionid = sessionid 19 | bot.sessions.clear_session(sessionid) 20 | self.first_interact = True 21 | self.story = story 22 | 23 | def reset(self): 24 | self.bot.sessions.clear_session(self.sessionid) 25 | self.first_interact = True 26 | 27 | def action(self, user_action): 28 | if user_action[-1] != "。": 29 | user_action = user_action + "。" 30 | if self.first_interact: 31 | prompt = ( 32 | """现在来充当一个文字冒险游戏,描述时候注意节奏,不要太快,仔细描述各个人物的心情和周边环境。一次只需写四到六句话。 33 | 开头是,""" 34 | + self.story 35 | + " " 36 | + user_action 37 | ) 38 | self.first_interact = False 39 | else: 40 | prompt = """继续,一次只需要续写四到六句话,总共就只讲5分钟内发生的事情。""" + user_action 41 | return prompt 42 | 43 | 44 | @plugins.register( 45 | name="Dungeon", 46 | desire_priority=0, 47 | namecn="文字冒险", 48 | desc="A plugin to play dungeon game", 49 | version="1.0", 50 | author="lanvent", 51 | ) 52 | class Dungeon(Plugin): 53 | def __init__(self): 54 | super().__init__() 55 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context 56 | logger.info("[Dungeon] inited") 57 | # 目前没有设计session过期事件,这里先暂时使用过期字典 58 | if conf().get("expires_in_seconds"): 59 | self.games = ExpiredDict(conf().get("expires_in_seconds")) 60 | else: 61 | self.games = dict() 62 | 63 | def on_handle_context(self, e_context: EventContext): 64 | if e_context["context"].type != ContextType.TEXT: 65 | return 66 | bottype = Bridge().get_bot_type("chat") 67 | if bottype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE]: 68 | return 69 | bot = Bridge().get_bot("chat") 70 | content = e_context["context"].content[:] 71 | clist = e_context["context"].content.split(maxsplit=1) 72 | sessionid = e_context["context"]["session_id"] 73 | logger.debug("[Dungeon] on_handle_context. content: %s" % clist) 74 | trigger_prefix = conf().get("plugin_trigger_prefix", "$") 75 | if clist[0] == f"{trigger_prefix}停止冒险": 76 | if sessionid in self.games: 77 | self.games[sessionid].reset() 78 | del self.games[sessionid] 79 | reply = Reply(ReplyType.INFO, "冒险结束!") 80 | e_context["reply"] = reply 81 | e_context.action = EventAction.BREAK_PASS 82 | elif clist[0] == f"{trigger_prefix}开始冒险" or sessionid in self.games: 83 | if sessionid not in self.games or clist[0] == f"{trigger_prefix}开始冒险": 84 | if len(clist) > 1: 85 | story = clist[1] 86 | else: 87 | story = "你在树林里冒险,指不定会从哪里蹦出来一些奇怪的东西,你握紧手上的手枪,希望这次冒险能够找到一些值钱的东西,你往树林深处走去。" 88 | self.games[sessionid] = StoryTeller(bot, sessionid, story) 89 | reply = Reply(ReplyType.INFO, "冒险开始,你可以输入任意内容,让故事继续下去。故事背景是:" + story) 90 | e_context["reply"] = reply 91 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑 92 | else: 93 | prompt = self.games[sessionid].action(content) 94 | e_context["context"].type = ContextType.TEXT 95 | e_context["context"].content = prompt 96 | e_context.action = EventAction.BREAK # 事件结束,不跳过处理context的默认逻辑 97 | 98 | def get_help_text(self, **kwargs): 99 | help_text = "可以和机器人一起玩文字冒险游戏。\n" 100 | if kwargs.get("verbose") != True: 101 | return help_text 102 | trigger_prefix = conf().get("plugin_trigger_prefix", "$") 103 | help_text = f"{trigger_prefix}开始冒险 " + "背景故事: 开始一个基于{背景故事}的文字冒险,之后你的所有消息会协助完善这个故事。\n" + f"{trigger_prefix}停止冒险: 结束游戏。\n" 104 | if kwargs.get("verbose") == True: 105 | help_text += f"\n命令例子: '{trigger_prefix}开始冒险 你在树林里冒险,指不定会从哪里蹦出来一些奇怪的东西,你握紧手上的手枪,希望这次冒险能够找到一些值钱的东西,你往树林深处走去。'" 106 | return help_text 107 | -------------------------------------------------------------------------------- /voice/azure/azure_voice.py: -------------------------------------------------------------------------------- 1 | """ 2 | azure voice service 3 | """ 4 | import json 5 | import os 6 | import time 7 | 8 | import azure.cognitiveservices.speech as speechsdk 9 | from langid import classify 10 | 11 | from bridge.reply import Reply, ReplyType 12 | from common.log import logger 13 | from common.tmp_dir import TmpDir 14 | from config import conf 15 | from voice.voice import Voice 16 | 17 | """ 18 | Azure voice 19 | 主目录设置文件中需填写azure_voice_api_key和azure_voice_region 20 | 21 | 查看可用的 voice: https://speech.microsoft.com/portal/voicegallery 22 | 23 | """ 24 | 25 | 26 | class AzureVoice(Voice): 27 | def __init__(self): 28 | try: 29 | curdir = os.path.dirname(__file__) 30 | config_path = os.path.join(curdir, "config.json") 31 | config = None 32 | if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件 33 | config = { 34 | "speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural", # 识别不出时的默认语音 35 | "auto_detect": True, # 是否自动检测语言 36 | "speech_synthesis_zh": "zh-CN-XiaozhenNeural", 37 | "speech_synthesis_en": "en-US-JacobNeural", 38 | "speech_synthesis_ja": "ja-JP-AoiNeural", 39 | "speech_synthesis_ko": "ko-KR-SoonBokNeural", 40 | "speech_synthesis_de": "de-DE-LouisaNeural", 41 | "speech_synthesis_fr": "fr-FR-BrigitteNeural", 42 | "speech_synthesis_es": "es-ES-LaiaNeural", 43 | "speech_recognition_language": "zh-CN", 44 | } 45 | with open(config_path, "w") as fw: 46 | json.dump(config, fw, indent=4) 47 | else: 48 | with open(config_path, "r") as fr: 49 | config = json.load(fr) 50 | self.config = config 51 | self.api_key = conf().get("azure_voice_api_key") 52 | self.api_region = conf().get("azure_voice_region") 53 | self.speech_config = speechsdk.SpeechConfig(subscription=self.api_key, region=self.api_region) 54 | self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"] 55 | self.speech_config.speech_recognition_language = self.config["speech_recognition_language"] 56 | except Exception as e: 57 | logger.warn("AzureVoice init failed: %s, ignore " % e) 58 | 59 | def voiceToText(self, voice_file): 60 | audio_config = speechsdk.AudioConfig(filename=voice_file) 61 | speech_recognizer = speechsdk.SpeechRecognizer(speech_config=self.speech_config, audio_config=audio_config) 62 | result = speech_recognizer.recognize_once() 63 | if result.reason == speechsdk.ResultReason.RecognizedSpeech: 64 | logger.info("[Azure] voiceToText voice file name={} text={}".format(voice_file, result.text)) 65 | reply = Reply(ReplyType.TEXT, result.text) 66 | else: 67 | cancel_details = result.cancellation_details 68 | logger.error("[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details.error_details)) 69 | reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败") 70 | return reply 71 | 72 | def textToVoice(self, text): 73 | if self.config.get("auto_detect"): 74 | lang = classify(text)[0] 75 | key = "speech_synthesis_" + lang 76 | if key in self.config: 77 | logger.info("[Azure] textToVoice auto detect language={}, voice={}".format(lang, self.config[key])) 78 | self.speech_config.speech_synthesis_voice_name = self.config[key] 79 | else: 80 | self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"] 81 | else: 82 | self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"] 83 | # Avoid the same filename under multithreading 84 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav" 85 | audio_config = speechsdk.AudioConfig(filename=fileName) 86 | speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.speech_config, audio_config=audio_config) 87 | result = speech_synthesizer.speak_text(text) 88 | if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: 89 | logger.info("[Azure] textToVoice text={} voice file name={}".format(text, fileName)) 90 | reply = Reply(ReplyType.VOICE, fileName) 91 | else: 92 | cancel_details = result.cancellation_details 93 | logger.error("[Azure] textToVoice error, result={}, errordetails={}".format(result, cancel_details.error_details)) 94 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败") 95 | return reply 96 | -------------------------------------------------------------------------------- /lib/itchat/async_components/register.py: -------------------------------------------------------------------------------- 1 | import logging, traceback, sys, threading 2 | try: 3 | import Queue 4 | except ImportError: 5 | import queue as Queue # type: ignore 6 | 7 | from ..log import set_logging 8 | from ..utils import test_connect 9 | from ..storage import templates 10 | 11 | logger = logging.getLogger('itchat') 12 | 13 | def load_register(core): 14 | core.auto_login = auto_login 15 | core.configured_reply = configured_reply 16 | core.msg_register = msg_register 17 | core.run = run 18 | 19 | async def auto_login(self, EventScanPayload=None,ScanStatus=None,event_stream=None, 20 | hotReload=True, statusStorageDir='itchat.pkl', 21 | enableCmdQR=False, picDir=None, qrCallback=None, 22 | loginCallback=None, exitCallback=None): 23 | if not test_connect(): 24 | logger.info("You can't get access to internet or wechat domain, so exit.") 25 | sys.exit() 26 | self.useHotReload = hotReload 27 | self.hotReloadDir = statusStorageDir 28 | if hotReload: 29 | if await self.load_login_status(statusStorageDir, 30 | loginCallback=loginCallback, exitCallback=exitCallback): 31 | return 32 | await self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, EventScanPayload=EventScanPayload, ScanStatus=ScanStatus, event_stream=event_stream, 33 | loginCallback=loginCallback, exitCallback=exitCallback) 34 | await self.dump_login_status(statusStorageDir) 35 | else: 36 | await self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, EventScanPayload=EventScanPayload, ScanStatus=ScanStatus, event_stream=event_stream, 37 | loginCallback=loginCallback, exitCallback=exitCallback) 38 | 39 | async def configured_reply(self, event_stream, payload, message_container): 40 | ''' determine the type of message and reply if its method is defined 41 | however, I use a strange way to determine whether a msg is from massive platform 42 | I haven't found a better solution here 43 | The main problem I'm worrying about is the mismatching of new friends added on phone 44 | If you have any good idea, pleeeease report an issue. I will be more than grateful. 45 | ''' 46 | try: 47 | msg = self.msgList.get(timeout=1) 48 | if 'MsgId' in msg.keys(): 49 | message_container[msg['MsgId']] = msg 50 | except Queue.Empty: 51 | pass 52 | else: 53 | if isinstance(msg['User'], templates.User): 54 | replyFn = self.functionDict['FriendChat'].get(msg['Type']) 55 | elif isinstance(msg['User'], templates.MassivePlatform): 56 | replyFn = self.functionDict['MpChat'].get(msg['Type']) 57 | elif isinstance(msg['User'], templates.Chatroom): 58 | replyFn = self.functionDict['GroupChat'].get(msg['Type']) 59 | if replyFn is None: 60 | r = None 61 | else: 62 | try: 63 | r = await replyFn(msg) 64 | if r is not None: 65 | await self.send(r, msg.get('FromUserName')) 66 | except: 67 | logger.warning(traceback.format_exc()) 68 | 69 | def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False): 70 | ''' a decorator constructor 71 | return a specific decorator based on information given ''' 72 | if not (isinstance(msgType, list) or isinstance(msgType, tuple)): 73 | msgType = [msgType] 74 | def _msg_register(fn): 75 | for _msgType in msgType: 76 | if isFriendChat: 77 | self.functionDict['FriendChat'][_msgType] = fn 78 | if isGroupChat: 79 | self.functionDict['GroupChat'][_msgType] = fn 80 | if isMpChat: 81 | self.functionDict['MpChat'][_msgType] = fn 82 | if not any((isFriendChat, isGroupChat, isMpChat)): 83 | self.functionDict['FriendChat'][_msgType] = fn 84 | return fn 85 | return _msg_register 86 | 87 | async def run(self, debug=False, blockThread=True): 88 | logger.info('Start auto replying.') 89 | if debug: 90 | set_logging(loggingLevel=logging.DEBUG) 91 | async def reply_fn(): 92 | try: 93 | while self.alive: 94 | await self.configured_reply() 95 | except KeyboardInterrupt: 96 | if self.useHotReload: 97 | await self.dump_login_status() 98 | self.alive = False 99 | logger.debug('itchat received an ^C and exit.') 100 | logger.info('Bye~') 101 | if blockThread: 102 | await reply_fn() 103 | else: 104 | replyThread = threading.Thread(target=reply_fn) 105 | replyThread.setDaemon(True) 106 | replyThread.start() 107 | -------------------------------------------------------------------------------- /lib/itchat/storage/__init__.py: -------------------------------------------------------------------------------- 1 | import os, time, copy 2 | from threading import Lock 3 | 4 | from .messagequeue import Queue 5 | from .templates import ( 6 | ContactList, AbstractUserDict, User, 7 | MassivePlatform, Chatroom, ChatroomMember) 8 | 9 | def contact_change(fn): 10 | def _contact_change(core, *args, **kwargs): 11 | with core.storageClass.updateLock: 12 | return fn(core, *args, **kwargs) 13 | return _contact_change 14 | 15 | class Storage(object): 16 | def __init__(self, core): 17 | self.userName = None 18 | self.nickName = None 19 | self.updateLock = Lock() 20 | self.memberList = ContactList() 21 | self.mpList = ContactList() 22 | self.chatroomList = ContactList() 23 | self.msgList = Queue(-1) 24 | self.lastInputUserName = None 25 | self.memberList.set_default_value(contactClass=User) 26 | self.memberList.core = core 27 | self.mpList.set_default_value(contactClass=MassivePlatform) 28 | self.mpList.core = core 29 | self.chatroomList.set_default_value(contactClass=Chatroom) 30 | self.chatroomList.core = core 31 | def dumps(self): 32 | return { 33 | 'userName' : self.userName, 34 | 'nickName' : self.nickName, 35 | 'memberList' : self.memberList, 36 | 'mpList' : self.mpList, 37 | 'chatroomList' : self.chatroomList, 38 | 'lastInputUserName' : self.lastInputUserName, } 39 | def loads(self, j): 40 | self.userName = j.get('userName', None) 41 | self.nickName = j.get('nickName', None) 42 | del self.memberList[:] 43 | for i in j.get('memberList', []): 44 | self.memberList.append(i) 45 | del self.mpList[:] 46 | for i in j.get('mpList', []): 47 | self.mpList.append(i) 48 | del self.chatroomList[:] 49 | for i in j.get('chatroomList', []): 50 | self.chatroomList.append(i) 51 | # I tried to solve everything in pickle 52 | # but this way is easier and more storage-saving 53 | for chatroom in self.chatroomList: 54 | if 'MemberList' in chatroom: 55 | for member in chatroom['MemberList']: 56 | member.core = chatroom.core 57 | member.chatroom = chatroom 58 | if 'Self' in chatroom: 59 | chatroom['Self'].core = chatroom.core 60 | chatroom['Self'].chatroom = chatroom 61 | self.lastInputUserName = j.get('lastInputUserName', None) 62 | def search_friends(self, name=None, userName=None, remarkName=None, nickName=None, 63 | wechatAccount=None): 64 | with self.updateLock: 65 | if (name or userName or remarkName or nickName or wechatAccount) is None: 66 | return copy.deepcopy(self.memberList[0]) # my own account 67 | elif userName: # return the only userName match 68 | for m in self.memberList: 69 | if m['UserName'] == userName: 70 | return copy.deepcopy(m) 71 | else: 72 | matchDict = { 73 | 'RemarkName' : remarkName, 74 | 'NickName' : nickName, 75 | 'Alias' : wechatAccount, } 76 | for k in ('RemarkName', 'NickName', 'Alias'): 77 | if matchDict[k] is None: 78 | del matchDict[k] 79 | if name: # select based on name 80 | contact = [] 81 | for m in self.memberList: 82 | if any([m.get(k) == name for k in ('RemarkName', 'NickName', 'Alias')]): 83 | contact.append(m) 84 | else: 85 | contact = self.memberList[:] 86 | if matchDict: # select again based on matchDict 87 | friendList = [] 88 | for m in contact: 89 | if all([m.get(k) == v for k, v in matchDict.items()]): 90 | friendList.append(m) 91 | return copy.deepcopy(friendList) 92 | else: 93 | return copy.deepcopy(contact) 94 | def search_chatrooms(self, name=None, userName=None): 95 | with self.updateLock: 96 | if userName is not None: 97 | for m in self.chatroomList: 98 | if m['UserName'] == userName: 99 | return copy.deepcopy(m) 100 | elif name is not None: 101 | matchList = [] 102 | for m in self.chatroomList: 103 | if name in m['NickName']: 104 | matchList.append(copy.deepcopy(m)) 105 | return matchList 106 | def search_mps(self, name=None, userName=None): 107 | with self.updateLock: 108 | if userName is not None: 109 | for m in self.mpList: 110 | if m['UserName'] == userName: 111 | return copy.deepcopy(m) 112 | elif name is not None: 113 | matchList = [] 114 | for m in self.mpList: 115 | if name in m['NickName']: 116 | matchList.append(copy.deepcopy(m)) 117 | return matchList 118 | -------------------------------------------------------------------------------- /channel/wechat/wechaty_channel.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | """ 4 | wechaty channel 5 | Python Wechaty - https://github.com/wechaty/python-wechaty 6 | """ 7 | import asyncio 8 | import base64 9 | import os 10 | import time 11 | 12 | from wechaty import Contact, Wechaty 13 | from wechaty.user import Message 14 | from wechaty_puppet import FileBox 15 | 16 | from bridge.context import * 17 | from bridge.context import Context 18 | from bridge.reply import * 19 | from channel.chat_channel import ChatChannel 20 | from channel.wechat.wechaty_message import WechatyMessage 21 | from common.log import logger 22 | from common.singleton import singleton 23 | from config import conf 24 | 25 | try: 26 | from voice.audio_convert import any_to_sil 27 | except Exception as e: 28 | pass 29 | 30 | 31 | @singleton 32 | class WechatyChannel(ChatChannel): 33 | NOT_SUPPORT_REPLYTYPE = [] 34 | 35 | def __init__(self): 36 | super().__init__() 37 | 38 | def startup(self): 39 | config = conf() 40 | token = config.get("wechaty_puppet_service_token") 41 | os.environ["WECHATY_PUPPET_SERVICE_TOKEN"] = token 42 | asyncio.run(self.main()) 43 | 44 | async def main(self): 45 | loop = asyncio.get_event_loop() 46 | # 将asyncio的loop传入处理线程 47 | self.handler_pool._initializer = lambda: asyncio.set_event_loop(loop) 48 | self.bot = Wechaty() 49 | self.bot.on("login", self.on_login) 50 | self.bot.on("message", self.on_message) 51 | await self.bot.start() 52 | 53 | async def on_login(self, contact: Contact): 54 | self.user_id = contact.contact_id 55 | self.name = contact.name 56 | logger.info("[WX] login user={}".format(contact)) 57 | 58 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息 59 | def send(self, reply: Reply, context: Context): 60 | receiver_id = context["receiver"] 61 | loop = asyncio.get_event_loop() 62 | if context["isgroup"]: 63 | receiver = asyncio.run_coroutine_threadsafe(self.bot.Room.find(receiver_id), loop).result() 64 | else: 65 | receiver = asyncio.run_coroutine_threadsafe(self.bot.Contact.find(receiver_id), loop).result() 66 | msg = None 67 | if reply.type == ReplyType.TEXT: 68 | msg = reply.content 69 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result() 70 | logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver)) 71 | elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO: 72 | msg = reply.content 73 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result() 74 | logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver)) 75 | elif reply.type == ReplyType.VOICE: 76 | voiceLength = None 77 | file_path = reply.content 78 | sil_file = os.path.splitext(file_path)[0] + ".sil" 79 | voiceLength = int(any_to_sil(file_path, sil_file)) 80 | if voiceLength >= 60000: 81 | voiceLength = 60000 82 | logger.info("[WX] voice too long, length={}, set to 60s".format(voiceLength)) 83 | # 发送语音 84 | t = int(time.time()) 85 | msg = FileBox.from_file(sil_file, name=str(t) + ".sil") 86 | if voiceLength is not None: 87 | msg.metadata["voiceLength"] = voiceLength 88 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result() 89 | try: 90 | os.remove(file_path) 91 | if sil_file != file_path: 92 | os.remove(sil_file) 93 | except Exception as e: 94 | pass 95 | logger.info("[WX] sendVoice={}, receiver={}".format(reply.content, receiver)) 96 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片 97 | img_url = reply.content 98 | t = int(time.time()) 99 | msg = FileBox.from_url(url=img_url, name=str(t) + ".png") 100 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result() 101 | logger.info("[WX] sendImage url={}, receiver={}".format(img_url, receiver)) 102 | elif reply.type == ReplyType.IMAGE: # 从文件读取图片 103 | image_storage = reply.content 104 | image_storage.seek(0) 105 | t = int(time.time()) 106 | msg = FileBox.from_base64(base64.b64encode(image_storage.read()), str(t) + ".png") 107 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result() 108 | logger.info("[WX] sendImage, receiver={}".format(receiver)) 109 | 110 | async def on_message(self, msg: Message): 111 | """ 112 | listen for message event 113 | """ 114 | try: 115 | cmsg = await WechatyMessage(msg) 116 | except NotImplementedError as e: 117 | logger.debug("[WX] {}".format(e)) 118 | return 119 | except Exception as e: 120 | logger.exception("[WX] {}".format(e)) 121 | return 122 | logger.debug("[WX] message:{}".format(cmsg)) 123 | room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None 124 | isgroup = room is not None 125 | ctype = cmsg.ctype 126 | context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg) 127 | if context: 128 | logger.info("[WX] receiveMsg={}, context={}".format(cmsg, context)) 129 | self.produce(context) 130 | -------------------------------------------------------------------------------- /bot/openai/open_ai_bot.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | import time 4 | 5 | import openai 6 | import openai.error 7 | 8 | from bot.bot import Bot 9 | from bot.openai.open_ai_image import OpenAIImage 10 | from bot.openai.open_ai_session import OpenAISession 11 | from bot.session_manager import SessionManager 12 | from bridge.context import ContextType 13 | from bridge.reply import Reply, ReplyType 14 | from common.log import logger 15 | from config import conf 16 | 17 | user_session = dict() 18 | 19 | 20 | # OpenAI对话模型API (可用) 21 | class OpenAIBot(Bot, OpenAIImage): 22 | def __init__(self): 23 | super().__init__() 24 | openai.api_key = conf().get("open_ai_api_key") 25 | if conf().get("open_ai_api_base"): 26 | openai.api_base = conf().get("open_ai_api_base") 27 | proxy = conf().get("proxy") 28 | if proxy: 29 | openai.proxy = proxy 30 | 31 | self.sessions = SessionManager(OpenAISession, model=conf().get("model") or "text-davinci-003") 32 | self.args = { 33 | "model": conf().get("model") or "text-davinci-003", # 对话模型的名称 34 | "temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性 35 | "max_tokens": 1200, # 回复最大的字符数 36 | "top_p": 1, 37 | "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 38 | "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容 39 | "request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间 40 | "timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试 41 | "stop": ["\n\n\n"], 42 | } 43 | 44 | def reply(self, query, context=None): 45 | # acquire reply content 46 | if context and context.type: 47 | if context.type == ContextType.TEXT: 48 | logger.info("[OPEN_AI] query={}".format(query)) 49 | session_id = context["session_id"] 50 | reply = None 51 | if query == "#清除记忆": 52 | self.sessions.clear_session(session_id) 53 | reply = Reply(ReplyType.INFO, "记忆已清除") 54 | elif query == "#清除所有": 55 | self.sessions.clear_all_session() 56 | reply = Reply(ReplyType.INFO, "所有人记忆已清除") 57 | else: 58 | session = self.sessions.session_query(query, session_id) 59 | result = self.reply_text(session) 60 | total_tokens, completion_tokens, reply_content = ( 61 | result["total_tokens"], 62 | result["completion_tokens"], 63 | result["content"], 64 | ) 65 | logger.debug( 66 | "[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens) 67 | ) 68 | 69 | if total_tokens == 0: 70 | reply = Reply(ReplyType.ERROR, reply_content) 71 | else: 72 | self.sessions.session_reply(reply_content, session_id, total_tokens) 73 | reply = Reply(ReplyType.TEXT, reply_content) 74 | return reply 75 | elif context.type == ContextType.IMAGE_CREATE: 76 | ok, retstring = self.create_img(query, 0) 77 | reply = None 78 | if ok: 79 | reply = Reply(ReplyType.IMAGE_URL, retstring) 80 | else: 81 | reply = Reply(ReplyType.ERROR, retstring) 82 | return reply 83 | 84 | def reply_text(self, session: OpenAISession, retry_count=0): 85 | try: 86 | response = openai.Completion.create(prompt=str(session), **self.args) 87 | res_content = response.choices[0]["text"].strip().replace("<|endoftext|>", "") 88 | total_tokens = response["usage"]["total_tokens"] 89 | completion_tokens = response["usage"]["completion_tokens"] 90 | logger.info("[OPEN_AI] reply={}".format(res_content)) 91 | return { 92 | "total_tokens": total_tokens, 93 | "completion_tokens": completion_tokens, 94 | "content": res_content, 95 | } 96 | except Exception as e: 97 | need_retry = retry_count < 2 98 | result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"} 99 | if isinstance(e, openai.error.RateLimitError): 100 | logger.warn("[OPEN_AI] RateLimitError: {}".format(e)) 101 | result["content"] = "提问太快啦,请休息一下再问我吧" 102 | if need_retry: 103 | time.sleep(20) 104 | elif isinstance(e, openai.error.Timeout): 105 | logger.warn("[OPEN_AI] Timeout: {}".format(e)) 106 | result["content"] = "我没有收到你的消息" 107 | if need_retry: 108 | time.sleep(5) 109 | elif isinstance(e, openai.error.APIConnectionError): 110 | logger.warn("[OPEN_AI] APIConnectionError: {}".format(e)) 111 | need_retry = False 112 | result["content"] = "我连接不到你的网络" 113 | else: 114 | logger.warn("[OPEN_AI] Exception: {}".format(e)) 115 | need_retry = False 116 | self.sessions.clear_session(session.session_id) 117 | 118 | if need_retry: 119 | logger.warn("[OPEN_AI] 第{}次重试".format(retry_count + 1)) 120 | return self.reply_text(session, retry_count + 1) 121 | else: 122 | return result 123 | -------------------------------------------------------------------------------- /bot/luolinai/luolinai_bot.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import time 3 | 4 | import requests 5 | from common.log import logger 6 | from bridge.context import Context 7 | from bridge.reply import Reply, ReplyType 8 | from bot.bot import Bot 9 | from bot.chatgpt.chat_gpt_session import ChatGPTSession 10 | from bot.session_manager import SessionManager 11 | from config import conf 12 | from common.database import Database 13 | 14 | 15 | class luolinaiBot(Bot): 16 | AUTH_FAILED_CODE = 401 17 | 18 | def __init__(self): 19 | self.base_url = conf().get("base_url") or "https://api.gojiberrys.cn/api/openapi" 20 | self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo") 21 | self.reply_counts = {} 22 | self.last_update_date = datetime.date.today() 23 | self.max_daily_replies = conf().get("max_daily_replies", 10) 24 | self.max_single_chat_replies = conf().get("max_single_chat_replies") 25 | self.max_group_chat_replies = conf().get("max_group_chat_replies") 26 | self.ad_message = conf().get("ad_message") 27 | self.database = Database() 28 | 29 | def __del__(self): 30 | self.database.close() 31 | 32 | def reply(self, query, context: Context = None) -> Reply: 33 | # Get the WeChat nickname from the context 34 | wechat_nickname = context.get("wechat_nickname") 35 | 36 | # Check if we've moved to a new day since the last update 37 | if datetime.date.today() != self.last_update_date: 38 | # If so, reset the reply counts and update the date 39 | self.reply_counts = {} 40 | self.last_update_date = datetime.date.today() 41 | 42 | # Check if the user has already reached the maximum number of replies for the day 43 | if self.reply_counts.get(wechat_nickname, 0) >= self.max_daily_replies: 44 | # If so, return an error message 45 | return Reply(ReplyType.ERROR, "已达到最大回复次数") 46 | 47 | # Otherwise, increment the user's reply count 48 | self.reply_counts[wechat_nickname] = self.reply_counts.get(wechat_nickname, 0) + 1 49 | 50 | # Continue processing the reply as before... 51 | reply = self._chat(query, context) 52 | 53 | return reply 54 | 55 | def _chat(self, query, context, retry_count=0): 56 | if retry_count >= 5: 57 | logger.warn("[luolinai] 失败超过最大重试次数") 58 | return Reply(ReplyType.ERROR, "请再问我一次吧") 59 | 60 | try: 61 | session_id = context.get("session_id") 62 | session = self.sessions.session_query(query, session_id) 63 | 64 | if session.messages and session.messages[0].get("role") == "system": 65 | session.messages.pop(0) 66 | 67 | luolinai_api_key = conf().get("luolinai_api_key") 68 | model_id = conf().get("luolinai_model_id") 69 | 70 | prompts = [] 71 | for msg in session.messages: 72 | if "role" in msg and "content" in msg: 73 | prompt = {"obj": msg["role"], "value": msg["content"]} 74 | prompts.append(prompt) 75 | else: 76 | logger.warn(f"[luolinai] 无效的消息格式: {msg}") 77 | 78 | body = { 79 | "chatId": session_id, 80 | "modelId": model_id, 81 | "isStream": False, 82 | "prompts": prompts 83 | } 84 | headers = {"apikey": luolinai_api_key, "Content-Type": "application/json"} 85 | 86 | response = requests.post(url=f"{self.base_url}/chat/chat", json=body, headers=headers) 87 | 88 | logger.info(f"[luolinai] 响应状态码: {response.status_code}") 89 | logger.info(f"[luolinai] 响应内容: {response.content}") 90 | 91 | if response.status_code == 200: 92 | res = response.json() 93 | chat_reply = res.get("data") 94 | 95 | # 在这里修改广告信息的处理部分 96 | ad_message = conf().get("ad_message") 97 | if isinstance(chat_reply, str) and ad_message: 98 | ad_prefix = "🌟🌟🌟 🌟🌟🌟" 99 | ad_separator = "\n✨✨✨✨✨✨✨✨✨✨" 100 | ad_message = f"\n{ad_separator}\n{ad_message}\n{ad_separator}" 101 | styled_ad_prefix = f"**{ad_prefix}**" 102 | chat_reply_with_ad = chat_reply + f"\n{styled_ad_prefix}{ad_message}" 103 | self.database.insert_chat(session_id, query, chat_reply_with_ad) 104 | return Reply(ReplyType.TEXT, chat_reply_with_ad) 105 | 106 | if isinstance(chat_reply, str): 107 | self.database.insert_chat(session_id, query, chat_reply) 108 | return Reply(ReplyType.TEXT, chat_reply) 109 | 110 | # 添加以下两行代码来处理其他类型的回复 111 | elif isinstance(chat_reply, dict) and chat_reply.get("type") == "text": 112 | reply_text = chat_reply.get("message") 113 | self.database.insert_chat(session_id, query, reply_text) 114 | return Reply(ReplyType.TEXT, reply_text) 115 | 116 | else: 117 | logger.error(f"[luolinai] 回复类型不正确: {type(chat_reply)}") 118 | return Reply(ReplyType.TEXT, str(chat_reply)) 119 | 120 | else: 121 | time.sleep(2) 122 | logger.warn(f"[luolinai] 进行重试,次数={retry_count + 1}") 123 | return self._chat(query, context, retry_count + 1) 124 | 125 | except Exception as e: 126 | logger.error(f"[luolinai] 异常: {str(e)}") 127 | if 'response' in locals(): 128 | logger.error(f"[luolinai] API响应内容: {response.content.decode('utf-8')}") 129 | 130 | # 发生错误时,检查配置文件是否存在广告信息,如果存在则返回广告信息作为错误提示 131 | ad_message = conf().get("ad_message") 132 | if ad_message: 133 | return Reply(ReplyType.ERROR, ad_message) 134 | 135 | -------------------------------------------------------------------------------- /lib/itchat/utils.py: -------------------------------------------------------------------------------- 1 | import re, os, sys, subprocess, copy, traceback, logging 2 | 3 | try: 4 | from HTMLParser import HTMLParser 5 | except ImportError: 6 | from html.parser import HTMLParser 7 | try: 8 | from urllib import quote as _quote 9 | quote = lambda n: _quote(n.encode('utf8', 'replace')) 10 | except ImportError: 11 | from urllib.parse import quote 12 | 13 | import requests 14 | 15 | from . import config 16 | 17 | logger = logging.getLogger('itchat') 18 | 19 | emojiRegex = re.compile(r'') 20 | htmlParser = HTMLParser() 21 | if not hasattr(htmlParser, 'unescape'): 22 | import html 23 | htmlParser.unescape = html.unescape 24 | # FIX Python 3.9 HTMLParser.unescape is removed. See https://docs.python.org/3.9/whatsnew/3.9.html 25 | try: 26 | b = u'\u2588' 27 | sys.stdout.write(b + '\r') 28 | sys.stdout.flush() 29 | except UnicodeEncodeError: 30 | BLOCK = 'MM' 31 | else: 32 | BLOCK = b 33 | friendInfoTemplate = {} 34 | for k in ('UserName', 'City', 'DisplayName', 'PYQuanPin', 'RemarkPYInitial', 'Province', 35 | 'KeyWord', 'RemarkName', 'PYInitial', 'EncryChatRoomId', 'Alias', 'Signature', 36 | 'NickName', 'RemarkPYQuanPin', 'HeadImgUrl'): 37 | friendInfoTemplate[k] = '' 38 | for k in ('UniFriend', 'Sex', 'AppAccountFlag', 'VerifyFlag', 'ChatRoomId', 'HideInputBarFlag', 39 | 'AttrStatus', 'SnsFlag', 'MemberCount', 'OwnerUin', 'ContactFlag', 'Uin', 40 | 'StarFriend', 'Statues'): 41 | friendInfoTemplate[k] = 0 42 | friendInfoTemplate['MemberList'] = [] 43 | 44 | def clear_screen(): 45 | os.system('cls' if config.OS == 'Windows' else 'clear') 46 | 47 | def emoji_formatter(d, k): 48 | ''' _emoji_deebugger is for bugs about emoji match caused by wechat backstage 49 | like :face with tears of joy: will be replaced with :cat face with tears of joy: 50 | ''' 51 | def _emoji_debugger(d, k): 52 | s = d[k].replace('') # fix missing bug 54 | def __fix_miss_match(m): 55 | return '' % ({ 56 | '1f63c': '1f601', '1f639': '1f602', '1f63a': '1f603', 57 | '1f4ab': '1f616', '1f64d': '1f614', '1f63b': '1f60d', 58 | '1f63d': '1f618', '1f64e': '1f621', '1f63f': '1f622', 59 | }.get(m.group(1), m.group(1))) 60 | return emojiRegex.sub(__fix_miss_match, s) 61 | def _emoji_formatter(m): 62 | s = m.group(1) 63 | if len(s) == 6: 64 | return ('\\U%s\\U%s'%(s[:2].rjust(8, '0'), s[2:].rjust(8, '0')) 65 | ).encode('utf8').decode('unicode-escape', 'replace') 66 | elif len(s) == 10: 67 | return ('\\U%s\\U%s'%(s[:5].rjust(8, '0'), s[5:].rjust(8, '0')) 68 | ).encode('utf8').decode('unicode-escape', 'replace') 69 | else: 70 | return ('\\U%s'%m.group(1).rjust(8, '0') 71 | ).encode('utf8').decode('unicode-escape', 'replace') 72 | d[k] = _emoji_debugger(d, k) 73 | d[k] = emojiRegex.sub(_emoji_formatter, d[k]) 74 | 75 | def msg_formatter(d, k): 76 | emoji_formatter(d, k) 77 | d[k] = d[k].replace('
', '\n') 78 | d[k] = htmlParser.unescape(d[k]) 79 | 80 | def check_file(fileDir): 81 | try: 82 | with open(fileDir): 83 | pass 84 | return True 85 | except: 86 | return False 87 | 88 | def print_qr(fileDir): 89 | if config.OS == 'Darwin': 90 | subprocess.call(['open', fileDir]) 91 | elif config.OS == 'Linux': 92 | subprocess.call(['xdg-open', fileDir]) 93 | else: 94 | os.startfile(fileDir) 95 | 96 | def print_cmd_qr(qrText, white=BLOCK, black=' ', enableCmdQR=True): 97 | blockCount = int(enableCmdQR) 98 | if abs(blockCount) == 0: 99 | blockCount = 1 100 | white *= abs(blockCount) 101 | if blockCount < 0: 102 | white, black = black, white 103 | sys.stdout.write(' '*50 + '\r') 104 | sys.stdout.flush() 105 | qr = qrText.replace('0', white).replace('1', black) 106 | sys.stdout.write(qr) 107 | sys.stdout.flush() 108 | 109 | def struct_friend_info(knownInfo): 110 | member = copy.deepcopy(friendInfoTemplate) 111 | for k, v in copy.deepcopy(knownInfo).items(): member[k] = v 112 | return member 113 | 114 | def search_dict_list(l, key, value): 115 | ''' Search a list of dict 116 | * return dict with specific value & key ''' 117 | for i in l: 118 | if i.get(key) == value: 119 | return i 120 | 121 | def print_line(msg, oneLine = False): 122 | if oneLine: 123 | sys.stdout.write(' '*40 + '\r') 124 | sys.stdout.flush() 125 | else: 126 | sys.stdout.write('\n') 127 | sys.stdout.write(msg.encode(sys.stdin.encoding or 'utf8', 'replace' 128 | ).decode(sys.stdin.encoding or 'utf8', 'replace')) 129 | sys.stdout.flush() 130 | 131 | def test_connect(retryTime=5): 132 | for i in range(retryTime): 133 | try: 134 | r = requests.get(config.BASE_URL) 135 | return True 136 | except: 137 | if i == retryTime - 1: 138 | logger.error(traceback.format_exc()) 139 | return False 140 | 141 | def contact_deep_copy(core, contact): 142 | with core.storageClass.updateLock: 143 | return copy.deepcopy(contact) 144 | 145 | def get_image_postfix(data): 146 | data = data[:20] 147 | if b'GIF' in data: 148 | return 'gif' 149 | elif b'PNG' in data: 150 | return 'png' 151 | elif b'JFIF' in data: 152 | return 'jpg' 153 | return '' 154 | 155 | def update_info_dict(oldInfoDict, newInfoDict): 156 | ''' only normal values will be updated here 157 | because newInfoDict is normal dict, so it's not necessary to consider templates 158 | ''' 159 | for k, v in newInfoDict.items(): 160 | if any((isinstance(v, t) for t in (tuple, list, dict))): 161 | pass # these values will be updated somewhere else 162 | elif oldInfoDict.get(k) is None or v not in (None, '', '0', 0): 163 | oldInfoDict[k] = v -------------------------------------------------------------------------------- /plugins/tool/tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from chatgpt_tool_hub.apps import AppFactory 5 | from chatgpt_tool_hub.apps.app import App 6 | from chatgpt_tool_hub.tools.all_tool_list import get_all_tool_names 7 | 8 | from plugins.plugin import Plugin 9 | from bridge.bridge import Bridge 10 | from bridge.context import ContextType 11 | from bridge.reply import Reply, ReplyType 12 | from common import const 13 | from common.log import logger 14 | from config import conf 15 | from plugins import plugins 16 | 17 | 18 | @plugins.register( 19 | name="tool", 20 | desc="Arming your ChatGPT bot with various tools", 21 | version="0.4", 22 | author="goldfishh", 23 | desire_priority=0, 24 | ) 25 | class Tool(Plugin): 26 | def __init__(self): 27 | super().__init__() 28 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context 29 | 30 | self.app = self._reset_app() 31 | 32 | logger.info("[tool] inited") 33 | 34 | def get_help_text(self, verbose=False, **kwargs): 35 | help_text = "这是一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力。" 36 | trigger_prefix = conf().get("plugin_trigger_prefix", "$") 37 | if not verbose: 38 | return help_text 39 | help_text += "\n使用说明:\n" 40 | help_text += f"{trigger_prefix}tool " + "命令: 根据给出的{命令}使用一些可用工具尽力为你得到结果。\n" 41 | help_text += f"{trigger_prefix}tool reset: 重置工具。\n\n" 42 | help_text += f"已加载工具列表: \n" 43 | for idx, tool in enumerate(self.app.get_tool_list()): 44 | if idx != 0: 45 | help_text += ", " 46 | help_text += f"{tool}" 47 | return help_text 48 | 49 | def on_handle_context(self, e_context: EventContext): 50 | if e_context["context"].type != ContextType.TEXT: 51 | return 52 | 53 | # 暂时不支持未来扩展的bot 54 | if Bridge().get_bot_type("chat") not in ( 55 | const.CHATGPT, 56 | const.OPEN_AI, 57 | const.CHATGPTONAZURE, 58 | ): 59 | return 60 | 61 | content = e_context["context"].content 62 | content_list = e_context["context"].content.split(maxsplit=1) 63 | 64 | if not content or len(content_list) < 1: 65 | e_context.action = EventAction.CONTINUE 66 | return 67 | 68 | logger.debug("[tool] on_handle_context. content: %s" % content) 69 | reply = Reply() 70 | reply.type = ReplyType.TEXT 71 | trigger_prefix = conf().get("plugin_trigger_prefix", "$") 72 | # todo: 有些工具必须要api-key,需要修改config文件,所以这里没有实现query增删tool的功能 73 | if content.startswith(f"{trigger_prefix}tool"): 74 | if len(content_list) == 1: 75 | logger.debug("[tool]: get help") 76 | reply.content = self.get_help_text() 77 | e_context["reply"] = reply 78 | e_context.action = EventAction.BREAK_PASS 79 | return 80 | elif len(content_list) > 1: 81 | if content_list[1].strip() == "reset": 82 | logger.debug("[tool]: reset config") 83 | self.app = self._reset_app() 84 | reply.content = "重置工具成功" 85 | e_context["reply"] = reply 86 | e_context.action = EventAction.BREAK_PASS 87 | return 88 | elif content_list[1].startswith("reset"): 89 | logger.debug("[tool]: remind") 90 | e_context["context"].content = "请你随机用一种聊天风格,提醒用户:如果想重置tool插件,reset之后不要加任何字符" 91 | 92 | e_context.action = EventAction.BREAK 93 | return 94 | 95 | query = content_list[1].strip() 96 | 97 | # Don't modify bot name 98 | all_sessions = Bridge().get_bot("chat").sessions 99 | user_session = all_sessions.session_query(query, e_context["context"]["session_id"]).messages 100 | 101 | # chatgpt-tool-hub will reply to you with many tools 102 | logger.debug("[tool]: just-go") 103 | try: 104 | _reply = self.app.ask(query, user_session) 105 | e_context.action = EventAction.BREAK_PASS 106 | all_sessions.session_reply(_reply, e_context["context"]["session_id"]) 107 | except Exception as e: 108 | logger.exception(e) 109 | logger.error(str(e)) 110 | 111 | e_context["context"].content = "请你随机用一种聊天风格,提醒用户:这个问题tool插件暂时无法处理" 112 | reply.type = ReplyType.ERROR 113 | e_context.action = EventAction.BREAK 114 | return 115 | 116 | reply.content = _reply 117 | e_context["reply"] = reply 118 | return 119 | 120 | def _read_json(self) -> dict: 121 | curdir = os.path.dirname(__file__) 122 | config_path = os.path.join(curdir, "config.json") 123 | tool_config = {"tools": [], "kwargs": {}} 124 | if not os.path.exists(config_path): 125 | return tool_config 126 | else: 127 | with open(config_path, "r") as f: 128 | tool_config = json.load(f) 129 | return tool_config 130 | 131 | def _build_tool_kwargs(self, kwargs: dict): 132 | tool_model_name = kwargs.get("model_name") 133 | request_timeout = kwargs.get("request_timeout") 134 | 135 | return { 136 | "debug": kwargs.get("debug", False), 137 | "openai_api_key": conf().get("open_ai_api_key", ""), 138 | "openai_api_base": conf().get("open_ai_api_base", "https://api.openai.com/v1"), 139 | "proxy": conf().get("proxy", ""), 140 | "request_timeout": request_timeout if request_timeout else conf().get("request_timeout", 120), 141 | # note: 目前tool暂未对其他模型测试,但这里仍对配置来源做了优先级区分,一般插件配置可覆盖全局配置 142 | "model_name": tool_model_name if tool_model_name else conf().get("model", "gpt-3.5-turbo"), 143 | "no_default": kwargs.get("no_default", False), 144 | "top_k_results": kwargs.get("top_k_results", 3), 145 | # for news tool 146 | "news_api_key": kwargs.get("news_api_key", ""), 147 | # for bing-search tool 148 | "bing_subscription_key": kwargs.get("bing_subscription_key", ""), 149 | # for google-search tool 150 | "google_api_key": kwargs.get("google_api_key", ""), 151 | "google_cse_id": kwargs.get("google_cse_id", ""), 152 | # for searxng-search tool 153 | "searx_search_host": kwargs.get("searx_search_host", ""), 154 | # for wolfram-alpha tool 155 | "wolfram_alpha_appid": kwargs.get("wolfram_alpha_appid", ""), 156 | # for morning-news tool 157 | "morning_news_api_key": kwargs.get("morning_news_api_key", ""), 158 | # for visual_dl tool 159 | "cuda_device": kwargs.get("cuda_device", "cpu"), 160 | "think_depth": kwargs.get("think_depth", 3), 161 | "arxiv_summary": kwargs.get("arxiv_summary", True), 162 | "morning_news_use_llm": kwargs.get("morning_news_use_llm", False), 163 | } 164 | 165 | def _filter_tool_list(self, tool_list: list): 166 | valid_list = [] 167 | for tool in tool_list: 168 | if tool in get_all_tool_names(): 169 | valid_list.append(tool) 170 | else: 171 | logger.warning("[tool] filter invalid tool: " + repr(tool)) 172 | return valid_list 173 | 174 | def _reset_app(self) -> App: 175 | tool_config = self._read_json() 176 | app_kwargs = self._build_tool_kwargs(tool_config.get("kwargs", {})) 177 | 178 | app = AppFactory() 179 | app.init_env(**app_kwargs) 180 | 181 | # filter unsupported tools 182 | tool_list = self._filter_tool_list(tool_config.get("tools", [])) 183 | 184 | return app.create_app(tools_list=tool_list, **app_kwargs) 185 | -------------------------------------------------------------------------------- /channel/wechat/wechat_channel.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | 3 | """ 4 | wechat channel 5 | """ 6 | 7 | import io 8 | import json 9 | import os 10 | import threading 11 | import time 12 | 13 | import requests 14 | 15 | from bridge.context import * 16 | from bridge.reply import * 17 | from channel.chat_channel import ChatChannel 18 | from channel.wechat.wechat_message import * 19 | from common.expired_dict import ExpiredDict 20 | from common.log import logger 21 | from common.singleton import singleton 22 | from common.time_check import time_checker 23 | from config import conf, get_appdata_dir 24 | from lib import itchat 25 | from lib.itchat.content import * 26 | 27 | 28 | @itchat.msg_register([TEXT, VOICE, PICTURE, NOTE]) 29 | def handler_single_msg(msg): 30 | try: 31 | cmsg = WechatMessage(msg, False) 32 | except NotImplementedError as e: 33 | logger.debug("[WX]single message {} skipped: {}".format(msg["MsgId"], e)) 34 | return None 35 | WechatChannel().handle_single(cmsg) 36 | return None 37 | 38 | 39 | @itchat.msg_register([TEXT, VOICE, PICTURE, NOTE], isGroupChat=True) 40 | def handler_group_msg(msg): 41 | try: 42 | cmsg = WechatMessage(msg, True) 43 | except NotImplementedError as e: 44 | logger.debug("[WX]group message {} skipped: {}".format(msg["MsgId"], e)) 45 | return None 46 | WechatChannel().handle_group(cmsg) 47 | return None 48 | 49 | 50 | def _check(func): 51 | def wrapper(self, cmsg: ChatMessage): 52 | msgId = cmsg.msg_id 53 | if msgId in self.receivedMsgs: 54 | logger.info("Wechat message {} already received, ignore".format(msgId)) 55 | return 56 | self.receivedMsgs[msgId] = cmsg 57 | create_time = cmsg.create_time # 消息时间戳 58 | if conf().get("hot_reload") == True and int(create_time) < int(time.time()) - 60: # 跳过1分钟前的历史消息 59 | logger.debug("[WX]history message {} skipped".format(msgId)) 60 | return 61 | return func(self, cmsg) 62 | 63 | return wrapper 64 | 65 | 66 | # 可用的二维码生成接口 67 | # https://api.qrserver.com/v1/create-qr-code/?size=400×400&data=https://www.abc.com 68 | # https://api.isoyu.com/qr/?m=1&e=L&p=20&url=https://www.abc.com 69 | def qrCallback(uuid, status, qrcode): 70 | # logger.debug("qrCallback: {} {}".format(uuid,status)) 71 | if status == "0": 72 | try: 73 | from PIL import Image 74 | 75 | img = Image.open(io.BytesIO(qrcode)) 76 | _thread = threading.Thread(target=img.show, args=("QRCode",)) 77 | _thread.setDaemon(True) 78 | _thread.start() 79 | except Exception as e: 80 | pass 81 | 82 | import qrcode 83 | 84 | url = f"https://login.weixin.qq.com/l/{uuid}" 85 | 86 | qr_api1 = "https://api.isoyu.com/qr/?m=1&e=L&p=20&url={}".format(url) 87 | qr_api2 = "https://api.qrserver.com/v1/create-qr-code/?size=400×400&data={}".format(url) 88 | qr_api3 = "https://api.pwmqr.com/qrcode/create/?url={}".format(url) 89 | qr_api4 = "https://my.tv.sohu.com/user/a/wvideo/getQRCode.do?text={}".format(url) 90 | print("You can also scan QRCode in any website below:") 91 | print(qr_api3) 92 | print(qr_api4) 93 | print(qr_api2) 94 | print(qr_api1) 95 | 96 | qr = qrcode.QRCode(border=1) 97 | qr.add_data(url) 98 | qr.make(fit=True) 99 | qr.print_ascii(invert=True) 100 | 101 | 102 | @singleton 103 | class WechatChannel(ChatChannel): 104 | NOT_SUPPORT_REPLYTYPE = [] 105 | 106 | def __init__(self): 107 | super().__init__() 108 | self.receivedMsgs = ExpiredDict(60 * 60 * 24) 109 | 110 | def startup(self): 111 | itchat.instance.receivingRetryCount = 600 # 修改断线超时时间 112 | # login by scan QRCode 113 | hotReload = conf().get("hot_reload", False) 114 | status_path = os.path.join(get_appdata_dir(), "itchat.pkl") 115 | itchat.auto_login( 116 | enableCmdQR=2, 117 | hotReload=hotReload, 118 | statusStorageDir=status_path, 119 | qrCallback=qrCallback, 120 | ) 121 | self.user_id = itchat.instance.storageClass.userName 122 | self.name = itchat.instance.storageClass.nickName 123 | logger.info("Wechat login success, user_id: {}, nickname: {}".format(self.user_id, self.name)) 124 | # start message listener 125 | itchat.run() 126 | 127 | # handle_* 系列函数处理收到的消息后构造Context,然后传入produce函数中处理Context和发送回复 128 | # Context包含了消息的所有信息,包括以下属性 129 | # type 消息类型, 包括TEXT、VOICE、IMAGE_CREATE 130 | # content 消息内容,如果是TEXT类型,content就是文本内容,如果是VOICE类型,content就是语音文件名,如果是IMAGE_CREATE类型,content就是图片生成命令 131 | # kwargs 附加参数字典,包含以下的key: 132 | # session_id: 会话id 133 | # isgroup: 是否是群聊 134 | # receiver: 需要回复的对象 135 | # msg: ChatMessage消息对象 136 | # origin_ctype: 原始消息类型,语音转文字后,私聊时如果匹配前缀失败,会根据初始消息是否是语音来放宽触发规则 137 | # desire_rtype: 希望回复类型,默认是文本回复,设置为ReplyType.VOICE是语音回复 138 | 139 | @time_checker 140 | @_check 141 | def handle_single(self, cmsg: ChatMessage): 142 | if cmsg.ctype == ContextType.VOICE: 143 | if conf().get("speech_recognition") != True: 144 | return 145 | logger.debug("[WX]receive voice msg: {}".format(cmsg.content)) 146 | elif cmsg.ctype == ContextType.IMAGE: 147 | logger.debug("[WX]receive image msg: {}".format(cmsg.content)) 148 | elif cmsg.ctype == ContextType.PATPAT: 149 | logger.debug("[WX]receive patpat msg: {}".format(cmsg.content)) 150 | elif cmsg.ctype == ContextType.TEXT: 151 | logger.debug("[WX]receive text msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg)) 152 | else: 153 | logger.debug("[WX]receive msg: {}, cmsg={}".format(cmsg.content, cmsg)) 154 | context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=False, msg=cmsg) 155 | if context: 156 | self.produce(context) 157 | 158 | @time_checker 159 | @_check 160 | def handle_group(self, cmsg: ChatMessage): 161 | if cmsg.ctype == ContextType.VOICE: 162 | if conf().get("speech_recognition") != True: 163 | return 164 | logger.debug("[WX]receive voice for group msg: {}".format(cmsg.content)) 165 | elif cmsg.ctype == ContextType.IMAGE: 166 | logger.debug("[WX]receive image for group msg: {}".format(cmsg.content)) 167 | elif cmsg.ctype in [ContextType.JOIN_GROUP, ContextType.PATPAT]: 168 | logger.debug("[WX]receive note msg: {}".format(cmsg.content)) 169 | elif cmsg.ctype == ContextType.TEXT: 170 | # logger.debug("[WX]receive group msg: {}, cmsg={}".format(json.dumps(cmsg._rawmsg, ensure_ascii=False), cmsg)) 171 | pass 172 | else: 173 | logger.debug("[WX]receive group msg: {}".format(cmsg.content)) 174 | context = self._compose_context(cmsg.ctype, cmsg.content, isgroup=True, msg=cmsg) 175 | if context: 176 | self.produce(context) 177 | 178 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息 179 | def send(self, reply: Reply, context: Context): 180 | receiver = context["receiver"] 181 | if reply.type == ReplyType.TEXT: 182 | itchat.send(reply.content, toUserName=receiver) 183 | logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver)) 184 | elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO: 185 | itchat.send(reply.content, toUserName=receiver) 186 | logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver)) 187 | elif reply.type == ReplyType.VOICE: 188 | itchat.send_file(reply.content, toUserName=receiver) 189 | logger.info("[WX] sendFile={}, receiver={}".format(reply.content, receiver)) 190 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片 191 | img_url = reply.content 192 | pic_res = requests.get(img_url, stream=True) 193 | image_storage = io.BytesIO() 194 | for block in pic_res.iter_content(1024): 195 | image_storage.write(block) 196 | image_storage.seek(0) 197 | itchat.send_image(image_storage, toUserName=receiver) 198 | logger.info("[WX] sendImage url={}, receiver={}".format(img_url, receiver)) 199 | elif reply.type == ReplyType.IMAGE: # 从文件读取图片 200 | image_storage = reply.content 201 | image_storage.seek(0) 202 | itchat.send_image(image_storage, toUserName=receiver) 203 | logger.info("[WX] sendImage, receiver={}".format(receiver)) 204 | --------------------------------------------------------------------------------