├── plugins
├── banwords
│ ├── .gitignore
│ ├── __init__.py
│ ├── banwords.txt.template
│ ├── config.json.template
│ ├── README.md
│ └── banwords.py
├── role
│ ├── __init__.py
│ └── README.md
├── tool
│ ├── __init__.py
│ └── config.json.template
├── bdunit
│ ├── __init__.py
│ ├── config.json.template
│ └── README.md
├── finish
│ ├── __init__.py
│ └── finish.py
├── godcmd
│ ├── __init__.py
│ ├── config.json.template
│ └── README.md
├── hello
│ ├── __init__.py
│ ├── config.json.template
│ └── README.md
├── linkai
│ ├── __init__.py
│ ├── config.json.template
│ ├── utils.py
│ ├── README.md
│ └── summary.py
├── dungeon
│ ├── __init__.py
│ ├── README.md
│ └── dungeon.py
├── keyword
│ ├── __init__.py
│ ├── config.json.template
│ ├── test-keyword.png
│ ├── README.md
│ └── keyword.py
├── __init__.py
├── source.json
├── event.py
├── config.json.template
└── plugin.py
├── Dockerfile
├── common
├── memory.py
├── singleton.py
├── tmp_dir.py
├── package_manager.py
├── log.py
├── dequeue.py
├── expired_dict.py
├── token_bucket.py
├── time_check.py
├── utils.py
└── sorted_dict.py
├── docs
├── images
│ └── contact.jpg
└── version
│ └── old-version.md
├── voice
├── baidu
│ ├── config.json.template
│ ├── README.md
│ └── baidu_voice.py
├── tencent
│ └── config.json.template
├── ali
│ ├── config.json.template
│ └── ali_voice.py
├── xunfei
│ ├── config.json.template
│ └── xunfei_voice.py
├── voice.py
├── azure
│ └── config.json.template
├── elevent
│ └── elevent_voice.py
├── edge
│ └── edge_voice.py
├── factory.py
├── google
│ └── google_voice.py
├── pytts
│ └── pytts_voice.py
├── openai
│ └── openai_voice.py
├── linkai
│ └── linkai_voice.py
└── audio_convert.py
├── pyproject.toml
├── requirements.txt
├── channel
├── web
│ └── README.md
├── wework
│ └── run.py
├── wechatmp
│ ├── common.py
│ ├── wechatmp_message.py
│ ├── wechatmp_client.py
│ ├── active_reply.py
│ └── README.md
├── wechatcom
│ ├── wechatcomapp_client.py
│ ├── wechatcomapp_message.py
│ └── README.md
├── channel.py
├── channel_factory.py
├── wechat
│ ├── wcf_message.py
│ └── wechaty_message.py
├── chat_message.py
├── feishu
│ └── feishu_message.py
├── terminal
│ └── terminal_channel.py
└── dingtalk
│ └── dingtalk_message.py
├── translate
├── factory.py
├── translator.py
└── baidu
│ └── baidu_translate.py
├── .flake8
├── docker
├── build.latest.sh
├── docker-compose.yml
├── Dockerfile.latest
└── entrypoint.sh
├── scripts
├── tout.sh
├── shutdown.sh
└── start.sh
├── bot
├── claude
│ └── claude_ai_session.py
├── bot.py
├── zhipuai
│ ├── zhipu_ai_image.py
│ └── zhipu_ai_session.py
├── baidu
│ ├── baidu_unit_bot.py
│ └── baidu_wenxin_session.py
├── openai
│ ├── open_ai_image.py
│ └── open_ai_session.py
├── moonshot
│ └── moonshot_session.py
├── modelscope
│ └── modelscope_session.py
├── dashscope
│ └── dashscope_session.py
├── ali
│ └── ali_qwen_session.py
├── bot_factory.py
├── minimax
│ └── minimax_session.py
└── session_manager.py
├── nixpacks.toml
├── lib
└── itchat
│ ├── components
│ ├── __init__.py
│ ├── hotreload.py
│ └── register.py
│ ├── async_components
│ ├── __init__.py
│ └── hotreload.py
│ ├── content.py
│ ├── storage
│ └── messagequeue.py
│ ├── LICENSE
│ ├── config.py
│ ├── log.py
│ ├── returnvalues.py
│ └── __init__.py
├── .gitignore
├── bridge
├── reply.py
└── context.py
├── .github
├── ISSUE_TEMPLATE
│ └── 2.feature.yml
└── workflows
│ ├── deploy-image.yml
│ └── deploy-image-arm.yml
├── .pre-commit-config.yaml
├── requirements-optional.txt
├── LICENSE
├── config-template.json
└── app.py
/plugins/banwords/.gitignore:
--------------------------------------------------------------------------------
1 | banwords.txt
--------------------------------------------------------------------------------
/plugins/role/__init__.py:
--------------------------------------------------------------------------------
1 | from .role import *
2 |
--------------------------------------------------------------------------------
/plugins/tool/__init__.py:
--------------------------------------------------------------------------------
1 | from .tool import *
2 |
--------------------------------------------------------------------------------
/plugins/bdunit/__init__.py:
--------------------------------------------------------------------------------
1 | from .bdunit import *
2 |
--------------------------------------------------------------------------------
/plugins/finish/__init__.py:
--------------------------------------------------------------------------------
1 | from .finish import *
2 |
--------------------------------------------------------------------------------
/plugins/godcmd/__init__.py:
--------------------------------------------------------------------------------
1 | from .godcmd import *
2 |
--------------------------------------------------------------------------------
/plugins/hello/__init__.py:
--------------------------------------------------------------------------------
1 | from .hello import *
2 |
--------------------------------------------------------------------------------
/plugins/linkai/__init__.py:
--------------------------------------------------------------------------------
1 | from .linkai import *
2 |
--------------------------------------------------------------------------------
/plugins/banwords/__init__.py:
--------------------------------------------------------------------------------
1 | from .banwords import *
2 |
--------------------------------------------------------------------------------
/plugins/dungeon/__init__.py:
--------------------------------------------------------------------------------
1 | from .dungeon import *
2 |
--------------------------------------------------------------------------------
/plugins/keyword/__init__.py:
--------------------------------------------------------------------------------
1 | from .keyword import *
2 |
--------------------------------------------------------------------------------
/plugins/banwords/banwords.txt.template:
--------------------------------------------------------------------------------
1 | nipples
2 | pennis
3 | 法轮功
--------------------------------------------------------------------------------
/plugins/godcmd/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "password": "",
3 | "admin_users": []
4 | }
5 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/zhayujie/chatgpt-on-wechat:latest
2 |
3 | ENTRYPOINT ["/entrypoint.sh"]
--------------------------------------------------------------------------------
/plugins/keyword/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "keyword": {
3 | "关键字匹配": "测试成功"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/common/memory.py:
--------------------------------------------------------------------------------
1 | from common.expired_dict import ExpiredDict
2 |
3 | USER_IMAGE_CACHE = ExpiredDict(60 * 3)
--------------------------------------------------------------------------------
/docs/images/contact.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/josephier/chatgpt-on-wechat/HEAD/docs/images/contact.jpg
--------------------------------------------------------------------------------
/plugins/bdunit/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "service_id": "s...",
3 | "api_key": "",
4 | "secret_key": ""
5 | }
6 |
--------------------------------------------------------------------------------
/plugins/keyword/test-keyword.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/josephier/chatgpt-on-wechat/HEAD/plugins/keyword/test-keyword.png
--------------------------------------------------------------------------------
/plugins/banwords/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "action": "replace",
3 | "reply_filter": true,
4 | "reply_action": "ignore"
5 | }
6 |
--------------------------------------------------------------------------------
/voice/baidu/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "lang": "zh",
3 | "ctp": 1,
4 | "spd": 5,
5 | "pit": 5,
6 | "vol": 5,
7 | "per": 0
8 | }
9 |
--------------------------------------------------------------------------------
/voice/tencent/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "voice_type": 1003,
3 | "secret_id": "YOUR_SECRET_ID",
4 | "secret_key": "YOUR_SECRET_KEY"
5 | }
6 |
--------------------------------------------------------------------------------
/plugins/dungeon/README.md:
--------------------------------------------------------------------------------
1 | 玩地牢游戏的聊天插件,触发方法如下:
2 |
3 | - `$开始冒险 <背景故事>` - 以<背景故事>开始一个地牢游戏,不填写会使用默认背景故事。之后聊天中你的所有消息会帮助ai完善这个故事。
4 | - `$停止冒险` - 停止一个地牢游戏,回归正常的ai。
5 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 176
3 | target-version = ['py37']
4 | include = '\.pyi?$'
5 | extend-exclude = '.+/(dist|.venv|venv|build|lib)/.+'
6 |
7 | [tool.isort]
8 | profile = "black"
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai==0.27.8
2 | HTMLParser>=0.0.2
3 | PyQRCode==1.2.1
4 | qrcode==7.4.2
5 | requests>=2.28.2
6 | chardet>=5.1.0
7 | Pillow
8 | pre-commit
9 | web.py
10 | linkai>=0.0.6.0
11 |
12 |
--------------------------------------------------------------------------------
/channel/web/README.md:
--------------------------------------------------------------------------------
1 | # Web channel
2 | 使用SSE(Server-Sent Events,服务器推送事件)实现,提供了一个默认的网页。也可以自己实现加入api
3 |
4 | #使用方法
5 | - 在配置文件中channel_type填入web即可
6 | - 访问地址 http://localhost:9899/chat
7 | - port可以在配置项 web_port中设置
8 |
--------------------------------------------------------------------------------
/plugins/tool/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "tools": [
3 | "url-get",
4 | "meteo"
5 | ],
6 | "kwargs": {
7 | "debug": false,
8 | "no_default": false,
9 | "model_name": "gpt-3.5-turbo"
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/translate/factory.py:
--------------------------------------------------------------------------------
1 | def create_translator(voice_type):
2 | if voice_type == "baidu":
3 | from translate.baidu.baidu_translate import BaiduTranslator
4 |
5 | return BaiduTranslator()
6 | raise RuntimeError
7 |
--------------------------------------------------------------------------------
/plugins/keyword/README.md:
--------------------------------------------------------------------------------
1 | # 目的
2 | 关键字匹配并回复
3 |
4 | # 试用场景
5 | 目前是在微信公众号下面使用过。
6 |
7 | # 使用步骤
8 | 1. 复制 `config.json.template` 为 `config.json`
9 | 2. 在关键字 `keyword` 新增需要关键字匹配的内容
10 | 3. 重启程序做验证
11 |
12 | # 验证结果
13 | 
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 176
3 | select = E303,W293,W291,W292,E305,E231,E302
4 | exclude =
5 | .tox,
6 | __pycache__,
7 | *.pyc,
8 | .env
9 | venv/*
10 | .venv/*
11 | reports/*
12 | dist/*
13 | lib/*
--------------------------------------------------------------------------------
/docker/build.latest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | unset KUBECONFIG
4 |
5 | cd .. && docker build -f docker/Dockerfile.latest \
6 | -t zhayujie/chatgpt-on-wechat .
7 |
8 | docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$(date +%y%m%d)
--------------------------------------------------------------------------------
/common/singleton.py:
--------------------------------------------------------------------------------
1 | def singleton(cls):
2 | instances = {}
3 |
4 | def get_instance(*args, **kwargs):
5 | if cls not in instances:
6 | instances[cls] = cls(*args, **kwargs)
7 | return instances[cls]
8 |
9 | return get_instance
10 |
--------------------------------------------------------------------------------
/voice/ali/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "api_url_text_to_voice": "https://nls-gateway-cn-shanghai.aliyuncs.com/stream/v1/tts",
3 | "api_url_voice_to_text": "https://nls-gateway.cn-shanghai.aliyuncs.com/stream/v1/asr",
4 | "app_key": "",
5 | "access_key_id": "",
6 | "access_key_secret": ""
7 | }
--------------------------------------------------------------------------------
/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | from .event import *
2 | from .plugin import *
3 | from .plugin_manager import PluginManager
4 |
5 | instance = PluginManager()
6 |
7 | register = instance.register
8 | # load_plugins = instance.load_plugins
9 | # emit_event = instance.emit_event
10 |
--------------------------------------------------------------------------------
/scripts/tout.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #打开日志
3 |
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | echo $BASE_DIR
7 |
8 | # check the nohup.out log output file
9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10 | echo "No file ${BASE_DIR}/nohup.out"
11 | exit -1;
12 | fi
13 |
14 | tail -f "${BASE_DIR}/nohup.out"
15 |
--------------------------------------------------------------------------------
/channel/wework/run.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | os.environ['ntwork_LOG'] = "ERROR"
4 | import ntwork
5 |
6 | wework = ntwork.WeWork()
7 |
8 |
9 | def forever():
10 | try:
11 | while True:
12 | time.sleep(0.1)
13 | except KeyboardInterrupt:
14 | ntwork.exit_()
15 | os._exit(0)
16 |
17 |
18 |
--------------------------------------------------------------------------------
/bot/claude/claude_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 |
3 |
4 | class ClaudeAiSession(Session):
5 | def __init__(self, session_id, system_prompt=None, model="claude"):
6 | super().__init__(session_id, system_prompt)
7 | self.model = model
8 | # claude逆向不支持role prompt
9 | # self.reset()
10 |
--------------------------------------------------------------------------------
/nixpacks.toml:
--------------------------------------------------------------------------------
1 | [phases.setup]
2 | nixPkgs = ['python310']
3 | cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak libavcodec-extra']
4 | [phases.install]
5 | cmds = ['python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements.txt && pip install -r requirements-optional.txt']
6 | [start]
7 | cmd = "python ./app.py"
--------------------------------------------------------------------------------
/voice/xunfei/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "APPID":"xxx71xxx",
3 | "APIKey":"xxxx69058exxxxxx",
4 | "APISecret":"xxxx697f0xxxxxx",
5 | "BusinessArgsTTS":{"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "vcn": "xiaoyan", "tte": "utf8"},
6 | "BusinessArgsASR":{"domain": "iat", "language": "zh_cn", "accent": "mandarin", "vad_eos":10000, "dwa": "wpgs"}
7 | }
8 |
--------------------------------------------------------------------------------
/lib/itchat/components/__init__.py:
--------------------------------------------------------------------------------
1 | from .contact import load_contact
2 | from .hotreload import load_hotreload
3 | from .login import load_login
4 | from .messages import load_messages
5 | from .register import load_register
6 |
7 | def load_components(core):
8 | load_contact(core)
9 | load_hotreload(core)
10 | load_login(core)
11 | load_messages(core)
12 | load_register(core)
13 |
--------------------------------------------------------------------------------
/lib/itchat/async_components/__init__.py:
--------------------------------------------------------------------------------
1 | from .contact import load_contact
2 | from .hotreload import load_hotreload
3 | from .login import load_login
4 | from .messages import load_messages
5 | from .register import load_register
6 |
7 | def load_components(core):
8 | load_contact(core)
9 | load_hotreload(core)
10 | load_login(core)
11 | load_messages(core)
12 | load_register(core)
13 |
--------------------------------------------------------------------------------
/plugins/hello/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "group_welc_fixed_msg": {
3 | "群聊1": "群聊1的固定欢迎语",
4 | "群聊2": "群聊2的固定欢迎语"
5 | },
6 |
7 | "group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。",
8 |
9 | "group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。",
10 |
11 | "patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。",
12 |
13 | "use_character_desc": false
14 | }
--------------------------------------------------------------------------------
/translate/translator.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice service abstract class
3 | """
4 |
5 |
6 | class Translator(object):
7 | # please use https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes to specify language
8 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
9 | """
10 | Translate text from one language to another
11 | """
12 | raise NotImplementedError
13 |
--------------------------------------------------------------------------------
/voice/voice.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice service abstract class
3 | """
4 |
5 |
6 | class Voice(object):
7 | def voiceToText(self, voice_file):
8 | """
9 | Send voice to voice service and get text
10 | """
11 | raise NotImplementedError
12 |
13 | def textToVoice(self, text):
14 | """
15 | Send text to voice service and get voice
16 | """
17 | raise NotImplementedError
18 |
--------------------------------------------------------------------------------
/bot/bot.py:
--------------------------------------------------------------------------------
1 | """
2 | Auto-replay chat robot abstract class
3 | """
4 |
5 |
6 | from bridge.context import Context
7 | from bridge.reply import Reply
8 |
9 |
10 | class Bot(object):
11 | def reply(self, query, context: Context = None) -> Reply:
12 | """
13 | bot auto-reply content
14 | :param req: received message
15 | :return: reply content
16 | """
17 | raise NotImplementedError
18 |
--------------------------------------------------------------------------------
/lib/itchat/content.py:
--------------------------------------------------------------------------------
1 | TEXT = 'Text'
2 | MAP = 'Map'
3 | CARD = 'Card'
4 | NOTE = 'Note'
5 | SHARING = 'Sharing'
6 | PICTURE = 'Picture'
7 | RECORDING = VOICE = 'Recording'
8 | ATTACHMENT = 'Attachment'
9 | VIDEO = 'Video'
10 | FRIENDS = 'Friends'
11 | SYSTEM = 'System'
12 |
13 | INCOME_MSG = [TEXT, MAP, CARD, NOTE, SHARING, PICTURE,
14 | RECORDING, VOICE, ATTACHMENT, VIDEO, FRIENDS, SYSTEM]
15 |
--------------------------------------------------------------------------------
/plugins/godcmd/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 指令插件
4 |
5 | ## 插件使用
6 |
7 | 将`config.json.template`复制为`config.json`,并修改其中`password`的值为口令。
8 |
9 | 如果没有设置命令,在命令行日志中会打印出本次的临时口令,请注意观察,打印格式如下。
10 |
11 | ```
12 | [INFO][2023-04-06 23:53:47][godcmd.py:165] - [Godcmd] 因未设置口令,本次的临时口令为0971。
13 | ```
14 |
15 | 在私聊中可使用`#auth`指令,输入口令进行管理员认证。更多详细指令请输入`#help`查看帮助文档:
16 |
17 | `#auth <口令>` - 管理员认证,仅可在私聊时认证。
18 | `#help` - 输出帮助文档,**是否是管理员**和是否是在群聊中会影响帮助文档的输出内容。
19 |
--------------------------------------------------------------------------------
/scripts/shutdown.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #关闭服务
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'`
7 | if [ -z "$pid" ] ; then
8 | echo "No chatgpt-on-wechat running."
9 | exit -1;
10 | fi
11 |
12 | echo "The chatgpt-on-wechat(${pid}) is running..."
13 |
14 | kill ${pid}
15 |
16 | echo "Send shutdown request to chatgpt-on-wechat(${pid}) OK"
17 |
--------------------------------------------------------------------------------
/scripts/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #后台运行Chat_on_webchat执行脚本
3 |
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | echo $BASE_DIR
7 |
8 | # check the nohup.out log output file
9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10 | touch "${BASE_DIR}/nohup.out"
11 | echo "create file ${BASE_DIR}/nohup.out"
12 | fi
13 |
14 | nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out"
15 |
16 | echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out"
17 |
--------------------------------------------------------------------------------
/common/tmp_dir.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 |
4 | from config import conf
5 |
6 |
7 | class TmpDir(object):
8 | """A temporary directory that is deleted when the object is destroyed."""
9 |
10 | tmpFilePath = pathlib.Path("./tmp/")
11 |
12 | def __init__(self):
13 | pathExists = os.path.exists(self.tmpFilePath)
14 | if not pathExists:
15 | os.makedirs(self.tmpFilePath)
16 |
17 | def path(self):
18 | return str(self.tmpFilePath) + "/"
19 |
--------------------------------------------------------------------------------
/voice/azure/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural",
3 | "auto_detect": true,
4 | "speech_synthesis_zh": "zh-CN-YunxiNeural",
5 | "speech_synthesis_en": "en-US-JacobNeural",
6 | "speech_synthesis_ja": "ja-JP-AoiNeural",
7 | "speech_synthesis_ko": "ko-KR-SoonBokNeural",
8 | "speech_synthesis_de": "de-DE-LouisaNeural",
9 | "speech_synthesis_fr": "fr-FR-BrigitteNeural",
10 | "speech_synthesis_es": "es-ES-LaiaNeural",
11 | "speech_recognition_language": "zh-CN"
12 | }
13 |
--------------------------------------------------------------------------------
/plugins/linkai/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "group_app_map": {
3 | "测试群名1": "default",
4 | "测试群名2": "Kv2fXJcH"
5 | },
6 | "midjourney": {
7 | "enabled": true,
8 | "auto_translate": true,
9 | "img_proxy": true,
10 | "max_tasks": 3,
11 | "max_tasks_per_user": 1,
12 | "use_image_create_prefix": true
13 | },
14 | "summary": {
15 | "enabled": true,
16 | "group_enabled": true,
17 | "max_file_size": 5000,
18 | "type": ["FILE", "SHARING"]
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 | .vscode
4 | .venv
5 | .vs
6 | .wechaty/
7 | __pycache__/
8 | venv*
9 | *.pyc
10 | config.json
11 | QR.png
12 | nohup.out
13 | tmp
14 | plugins.json
15 | itchat.pkl
16 | *.log
17 | logs/
18 | user_datas.pkl
19 | chatgpt_tool_hub/
20 | plugins/**/
21 | !plugins/bdunit
22 | !plugins/dungeon
23 | !plugins/finish
24 | !plugins/godcmd
25 | !plugins/tool
26 | !plugins/banwords
27 | !plugins/banwords/**/
28 | plugins/banwords/__pycache__
29 | plugins/banwords/lib/__pycache__
30 | !plugins/hello
31 | !plugins/role
32 | !plugins/keyword
33 | !plugins/linkai
34 | client_config.json
35 |
--------------------------------------------------------------------------------
/docs/version/old-version.md:
--------------------------------------------------------------------------------
1 | ## 归档更新日志
2 |
3 | 2023.04.26: 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,使用文档。(contributed by @lanvent in #944)
4 |
5 | 2023.04.05: 支持微信公众号部署,兼容插件,并支持语音图片交互,使用文档。(contributed by @JS00000 in #686)
6 |
7 | 2023.04.05: 增加能让ChatGPT使用工具的tool插件,使用文档。工具相关issue可反馈至chatgpt-tool-hub。(contributed by @goldfishh in #663)
8 |
9 | 2023.03.25: 支持插件化开发,目前已实现 多角色切换、文字冒险游戏、管理员指令、Stable Diffusion等插件,使用参考 #578。(contributed by @lanvent in #565)
10 |
11 | 2023.03.09: 基于 whisper API(后续已接入更多的语音API服务) 实现对语音消息的解析和回复,添加配置项 "speech_recognition":true 即可启用,使用参考 #415。(contributed by wanggang1987 in #385)
12 |
13 | 2023.02.09: 扫码登录存在账号限制风险,请谨慎使用,参考#58
--------------------------------------------------------------------------------
/plugins/banwords/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## 插件描述
3 |
4 | 简易的敏感词插件,暂不支持分词,请自行导入词库到插件文件夹中的`banwords.txt`,每行一个词,一个参考词库是[1](https://github.com/cjh0613/tencent-sensitive-words/blob/main/sensitive_words_lines.txt)。
5 |
6 | 使用前将`config.json.template`复制为`config.json`,并自行配置。
7 |
8 | 目前插件对消息的默认处理行为有如下两种:
9 |
10 | - `ignore` : 无视这条消息。
11 | - `replace` : 将消息中的敏感词替换成"*",并回复违规。
12 |
13 | ```json
14 | "action": "replace",
15 | "reply_filter": true,
16 | "reply_action": "ignore"
17 | ```
18 |
19 | 在以上配置项中:
20 |
21 | - `action`: 对用户消息的默认处理行为
22 | - `reply_filter`: 是否对ChatGPT的回复也进行敏感词过滤
23 | - `reply_action`: 如果开启了回复过滤,对回复的默认处理行为
24 |
25 | ## 致谢
26 |
27 | 搜索功能实现来自https://github.com/toolgood/ToolGood.Words
--------------------------------------------------------------------------------
/plugins/bdunit/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 利用百度UNIT实现智能对话
4 |
5 | - 1.解决问题:chatgpt无法处理的指令,交给百度UNIT处理如:天气,日期时间,数学运算等
6 | - 2.如问时间:现在几点钟,今天几号
7 | - 3.如问天气:明天广州天气怎么样,这个周末深圳会不会下雨
8 | - 4.如问数学运算:23+45=多少,100-23=多少,35转化为二进制是多少?
9 |
10 | ## 使用说明
11 |
12 | ### 获取apikey
13 |
14 | 在百度UNIT官网上自己创建应用,申请百度机器人,可以把预先训练好的模型导入到自己的应用中,
15 |
16 | see https://ai.baidu.com/unit/home#/home?track=61fe1b0d3407ce3face1d92cb5c291087095fc10c8377aaf https://console.bce.baidu.com/ai平台申请
17 |
18 | ### 配置文件
19 |
20 | 将文件夹中`config.json.template`复制为`config.json`。
21 |
22 | 在其中填写百度UNIT官网上获取应用的API Key和Secret Key
23 |
24 | ``` json
25 | {
26 | "service_id": "s...", #"机器人ID"
27 | "api_key": "",
28 | "secret_key": ""
29 | }
30 | ```
--------------------------------------------------------------------------------
/bridge/reply.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class ReplyType(Enum):
7 | TEXT = 1 # 文本
8 | VOICE = 2 # 音频文件
9 | IMAGE = 3 # 图片文件
10 | IMAGE_URL = 4 # 图片URL
11 | VIDEO_URL = 5 # 视频URL
12 | FILE = 6 # 文件
13 | CARD = 7 # 微信名片,仅支持ntchat
14 | INVITE_ROOM = 8 # 邀请好友进群
15 | INFO = 9
16 | ERROR = 10
17 | TEXT_ = 11 # 强制文本
18 | VIDEO = 12
19 | MINIAPP = 13 # 小程序
20 |
21 | def __str__(self):
22 | return self.name
23 |
24 |
25 | class Reply:
26 | def __init__(self, type: ReplyType = None, content=None):
27 | self.type = type
28 | self.content = content
29 |
30 | def __str__(self):
31 | return "Reply(type={}, content={})".format(self.type, self.content)
32 |
--------------------------------------------------------------------------------
/channel/wechatmp/common.py:
--------------------------------------------------------------------------------
1 | import web
2 | from wechatpy.crypto import WeChatCrypto
3 | from wechatpy.exceptions import InvalidSignatureException
4 | from wechatpy.utils import check_signature
5 |
6 | from config import conf
7 |
8 | MAX_UTF8_LEN = 2048
9 |
10 |
11 | class WeChatAPIException(Exception):
12 | pass
13 |
14 |
15 | def verify_server(data):
16 | try:
17 | signature = data.signature
18 | timestamp = data.timestamp
19 | nonce = data.nonce
20 | echostr = data.get("echostr", None)
21 | token = conf().get("wechatmp_token") # 请按照公众平台官网\基本配置中信息填写
22 | check_signature(token, signature, timestamp, nonce)
23 | return echostr
24 | except InvalidSignatureException:
25 | raise web.Forbidden("Invalid signature")
26 | except Exception as e:
27 | raise web.Forbidden(str(e))
28 |
--------------------------------------------------------------------------------
/common/package_manager.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import pip
4 | from pip._internal import main as pipmain
5 |
6 | from common.log import _reset_logger, logger
7 |
8 |
9 | def install(package):
10 | pipmain(["install", package])
11 |
12 |
13 | def install_requirements(file):
14 | pipmain(["install", "-r", file, "--upgrade"])
15 | _reset_logger(logger)
16 |
17 |
18 | def check_dulwich():
19 | needwait = False
20 | for i in range(2):
21 | if needwait:
22 | time.sleep(3)
23 | needwait = False
24 | try:
25 | import dulwich
26 |
27 | return
28 | except ImportError:
29 | try:
30 | install("dulwich")
31 | except:
32 | needwait = True
33 | try:
34 | import dulwich
35 | except ImportError:
36 | raise ImportError("Unable to import dulwich")
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/2.feature.yml:
--------------------------------------------------------------------------------
1 | name: Feature request 🚀
2 | description: 提出你对项目的新想法或建议。
3 | labels: ['status: needs check']
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | 请在上方的`title`中填写简略总结,谢谢❤️。
9 | - type: checkboxes
10 | attributes:
11 | label: ⚠️ 搜索是否存在类似issue
12 | description: >
13 | 请在 [历史issue](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中清空输入框,搜索关键词查找是否存在相似issue。
14 | options:
15 | - label: 我已经搜索过issues和disscussions,没有发现相似issue
16 | required: true
17 | - type: textarea
18 | attributes:
19 | label: 总结
20 | description: 描述feature的功能。
21 | - type: textarea
22 | attributes:
23 | label: 举例
24 | description: 提供聊天示例,草图或相关网址。
25 | - type: textarea
26 | attributes:
27 | label: 动机
28 | description: 描述你提出该feature的动机,比如没有这项feature对你的使用造成了怎样的影响。 请提供更详细的场景描述,这可能会帮助我们发现并提出更好的解决方案。
--------------------------------------------------------------------------------
/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | chatgpt-on-wechat:
4 | image: zhayujie/chatgpt-on-wechat
5 | container_name: chatgpt-on-wechat
6 | security_opt:
7 | - seccomp:unconfined
8 | environment:
9 | TZ: 'Asia/Shanghai'
10 | OPEN_AI_API_KEY: 'YOUR API KEY'
11 | MODEL: 'gpt-3.5-turbo'
12 | PROXY: ''
13 | SINGLE_CHAT_PREFIX: '["bot", "@bot"]'
14 | SINGLE_CHAT_REPLY_PREFIX: '"[bot] "'
15 | GROUP_CHAT_PREFIX: '["@bot"]'
16 | GROUP_NAME_WHITE_LIST: '["ChatGPT测试群", "ChatGPT测试群2"]'
17 | IMAGE_CREATE_PREFIX: '["画", "看", "找"]'
18 | CONVERSATION_MAX_TOKENS: 1000
19 | SPEECH_RECOGNITION: 'False'
20 | CHARACTER_DESC: '你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。'
21 | EXPIRES_IN_SECONDS: 3600
22 | USE_GLOBAL_PLUGIN_CONFIG: 'True'
23 | USE_LINKAI: 'False'
24 | LINKAI_API_KEY: ''
25 | LINKAI_APP_CODE: ''
26 |
--------------------------------------------------------------------------------
/channel/wechatcom/wechatcomapp_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | from wechatpy.enterprise import WeChatClient
5 |
6 |
7 | class WechatComAppClient(WeChatClient):
8 | def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True):
9 | super(WechatComAppClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry)
10 | self.fetch_access_token_lock = threading.Lock()
11 |
12 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
13 | with self.fetch_access_token_lock:
14 | access_token = self.session.get(self.access_token_key)
15 | if access_token:
16 | if not self.expires_at:
17 | return access_token
18 | timestamp = time.time()
19 | if self.expires_at - timestamp > 60:
20 | return access_token
21 | return super().fetch_access_token()
22 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: fix-byte-order-marker
6 | - id: check-case-conflict
7 | - id: check-merge-conflict
8 | - id: debug-statements
9 | - id: pretty-format-json
10 | types: [text]
11 | files: \.json(.template)?$
12 | args: [ --autofix , --no-ensure-ascii, --indent=2, --no-sort-keys]
13 | - id: trailing-whitespace
14 | exclude: '(\/|^)lib\/'
15 | args: [ --markdown-linebreak-ext=md ]
16 | - repo: https://github.com/PyCQA/isort
17 | rev: 5.12.0
18 | hooks:
19 | - id: isort
20 | exclude: '(\/|^)lib\/'
21 | - repo: https://github.com/psf/black
22 | rev: 23.3.0
23 | hooks:
24 | - id: black
25 | exclude: '(\/|^)lib\/'
26 | - repo: https://github.com/PyCQA/flake8
27 | rev: 6.0.0
28 | hooks:
29 | - id: flake8
30 | exclude: '(\/|^)lib\/'
--------------------------------------------------------------------------------
/requirements-optional.txt:
--------------------------------------------------------------------------------
1 | tiktoken>=0.3.2 # openai calculate token
2 |
3 | #voice
4 | pydub>=0.25.1 # need ffmpeg
5 | SpeechRecognition # google speech to text
6 | gTTS>=2.3.1 # google text to speech
7 | pyttsx3>=2.90 # pytsx text to speech
8 | baidu_aip>=4.16.10 # baidu voice
9 | azure-cognitiveservices-speech # azure voice
10 | edge-tts # edge-tts
11 | numpy<=1.24.2
12 | langid # language detect
13 | elevenlabs==1.0.3 # elevenlabs TTS
14 |
15 | #install plugin
16 | dulwich
17 |
18 | # wechatmp && wechatcom
19 | web.py
20 | wechatpy
21 |
22 | # chatgpt-tool-hub plugin
23 | chatgpt_tool_hub==0.5.0
24 |
25 | # xunfei spark
26 | websocket-client==1.2.0
27 |
28 | # claude bot
29 | curl_cffi
30 | # claude API
31 | anthropic
32 |
33 | # tongyi qwen
34 | broadscope_bailian
35 |
36 | # google
37 | google-generativeai
38 |
39 | # dingtalk
40 | dingtalk_stream
41 |
42 | # zhipuai
43 | zhipuai>=2.0.1
44 |
45 | # tongyi qwen new sdk
46 | dashscope
47 |
48 | # tencentcloud sdk
49 | tencentcloud-sdk-python>=3.0.0
50 |
--------------------------------------------------------------------------------
/voice/elevent/elevent_voice.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from elevenlabs.client import ElevenLabs
4 | from elevenlabs import save
5 | from bridge.reply import Reply, ReplyType
6 | from common.log import logger
7 | from common.tmp_dir import TmpDir
8 | from voice.voice import Voice
9 | from config import conf
10 |
11 | XI_API_KEY = conf().get("xi_api_key")
12 | client = ElevenLabs(api_key=XI_API_KEY)
13 | name = conf().get("xi_voice_id")
14 |
15 | class ElevenLabsVoice(Voice):
16 |
17 | def __init__(self):
18 | pass
19 |
20 | def voiceToText(self, voice_file):
21 | pass
22 |
23 | def textToVoice(self, text):
24 | audio = client.generate(
25 | text=text,
26 | voice=name,
27 | model='eleven_multilingual_v2'
28 | )
29 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
30 | save(audio, fileName)
31 | logger.info("[ElevenLabs] textToVoice text={} voice file name={}".format(text, fileName))
32 | return Reply(ReplyType.VOICE, fileName)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2022 zhayujie
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
--------------------------------------------------------------------------------
/common/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 |
5 | def _reset_logger(log):
6 | for handler in log.handlers:
7 | handler.close()
8 | log.removeHandler(handler)
9 | del handler
10 | log.handlers.clear()
11 | log.propagate = False
12 | console_handle = logging.StreamHandler(sys.stdout)
13 | console_handle.setFormatter(
14 | logging.Formatter(
15 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
16 | datefmt="%Y-%m-%d %H:%M:%S",
17 | )
18 | )
19 | file_handle = logging.FileHandler("run.log", encoding="utf-8")
20 | file_handle.setFormatter(
21 | logging.Formatter(
22 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
23 | datefmt="%Y-%m-%d %H:%M:%S",
24 | )
25 | )
26 | log.addHandler(file_handle)
27 | log.addHandler(console_handle)
28 |
29 |
30 | def _get_logger():
31 | log = logging.getLogger("log")
32 | _reset_logger(log)
33 | log.setLevel(logging.INFO)
34 | return log
35 |
36 |
37 | # 日志句柄
38 | logger = _get_logger()
39 |
--------------------------------------------------------------------------------
/config-template.json:
--------------------------------------------------------------------------------
1 | {
2 | "channel_type": "wx",
3 | "model": "",
4 | "open_ai_api_key": "YOUR API KEY",
5 | "claude_api_key": "YOUR API KEY",
6 | "text_to_image": "dall-e-2",
7 | "voice_to_text": "openai",
8 | "text_to_voice": "openai",
9 | "proxy": "",
10 | "hot_reload": false,
11 | "single_chat_prefix": [
12 | "bot",
13 | "@bot"
14 | ],
15 | "single_chat_reply_prefix": "[bot] ",
16 | "group_chat_prefix": [
17 | "@bot"
18 | ],
19 | "group_name_white_list": [
20 | "ChatGPT测试群",
21 | "ChatGPT测试群2"
22 | ],
23 | "image_create_prefix": [
24 | "画"
25 | ],
26 | "speech_recognition": true,
27 | "group_speech_recognition": false,
28 | "voice_reply_voice": false,
29 | "conversation_max_tokens": 2500,
30 | "expires_in_seconds": 3600,
31 | "character_desc": "你是基于大语言模型的AI智能助手,旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
32 | "temperature": 0.7,
33 | "subscribe_msg": "感谢您的关注!\n这里是AI智能助手,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。",
34 | "use_linkai": false,
35 | "linkai_api_key": "",
36 | "linkai_app_code": ""
37 | }
38 |
--------------------------------------------------------------------------------
/lib/itchat/storage/messagequeue.py:
--------------------------------------------------------------------------------
1 | import logging
2 | try:
3 | import Queue as queue
4 | except ImportError:
5 | import queue
6 |
7 | from .templates import AttributeDict
8 |
9 | logger = logging.getLogger('itchat')
10 |
11 | class Queue(queue.Queue):
12 | def put(self, message):
13 | queue.Queue.put(self, Message(message))
14 |
15 | class Message(AttributeDict):
16 | def download(self, fileName):
17 | if hasattr(self.text, '__call__'):
18 | return self.text(fileName)
19 | else:
20 | return b''
21 | def __getitem__(self, value):
22 | if value in ('isAdmin', 'isAt'):
23 | v = value[0].upper() + value[1:] # ''[1:] == ''
24 | logger.debug('%s is expired in 1.3.0, use %s instead.' % (value, v))
25 | value = v
26 | return super(Message, self).__getitem__(value)
27 | def __str__(self):
28 | return '{%s}' % ', '.join(
29 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
30 | def __repr__(self):
31 | return '<%s: %s>' % (self.__class__.__name__.split('.')[-1],
32 | self.__str__())
33 |
--------------------------------------------------------------------------------
/lib/itchat/LICENSE:
--------------------------------------------------------------------------------
1 | **The MIT License (MIT)**
2 |
3 | Copyright (c) 2017 LittleCoder ([littlecodersh@Github](https://github.com/littlecodersh))
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/docker/Dockerfile.latest:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim-bullseye
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | ARG CHATGPT_ON_WECHAT_VER
7 |
8 | RUN echo /etc/apt/sources.list
9 | # RUN sed -i 's/deb.debian.org/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list
10 | ENV BUILD_PREFIX=/app
11 |
12 | ADD . ${BUILD_PREFIX}
13 |
14 | RUN apt-get update \
15 | &&apt-get install -y --no-install-recommends bash ffmpeg espeak libavcodec-extra\
16 | && cd ${BUILD_PREFIX} \
17 | && cp config-template.json config.json \
18 | && /usr/local/bin/python -m pip install --no-cache --upgrade pip \
19 | && pip install --no-cache -r requirements.txt \
20 | && pip install --no-cache -r requirements-optional.txt \
21 | && pip install azure-cognitiveservices-speech
22 |
23 | WORKDIR ${BUILD_PREFIX}
24 |
25 | ADD docker/entrypoint.sh /entrypoint.sh
26 |
27 | RUN chmod +x /entrypoint.sh \
28 | && mkdir -p /home/noroot \
29 | && groupadd -r noroot \
30 | && useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
31 | && chown -R noroot:noroot /home/noroot ${BUILD_PREFIX} /usr/local/lib
32 |
33 | USER noroot
34 |
35 | ENTRYPOINT ["/entrypoint.sh"]
36 |
--------------------------------------------------------------------------------
/plugins/role/README.md:
--------------------------------------------------------------------------------
1 | 用于让Bot扮演指定角色的聊天插件,触发方法如下:
2 |
3 | - `$角色/$role help/帮助` - 打印目前支持的角色列表。
4 | - `$角色/$role <角色名>` - 让AI扮演该角色,角色名支持模糊匹配。
5 | - `$停止扮演` - 停止角色扮演。
6 |
7 | 添加自定义角色请在`roles/roles.json`中添加。
8 |
9 | (大部分prompt来自https://github.com/rockbenben/ChatGPT-Shortcut/blob/main/src/data/users.tsx)
10 |
11 | 以下为例子:
12 | ```json
13 | {
14 | "title": "写作助理",
15 | "description": "As a writing improvement assistant, your task is to improve the spelling, grammar, clarity, concision, and overall readability of the text I provided, while breaking down long sentences, reducing repetition, and providing suggestions for improvement. Please provide only the corrected Chinese version of the text and avoid including explanations. Please treat every message I send later as text content.",
16 | "descn": "作为一名中文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请把我之后的每一条消息都当作文本内容。",
17 | "wrapper": "内容是:\n\"%s\"",
18 | "remark": "最常使用的角色,用于优化文本的语法、清晰度和简洁度,提高可读性。"
19 | }
20 | ```
21 |
22 | - `title`: 角色名。
23 | - `description`: 使用`$role`触发时,使用英语prompt。
24 | - `descn`: 使用`$角色`触发时,使用中文prompt。
25 | - `wrapper`: 用于包装用户消息,可起到强调作用,避免回复离题。
26 | - `remark`: 简短描述该角色,在打印帮助文档时显示。
27 |
--------------------------------------------------------------------------------
/bot/zhipuai/zhipu_ai_image.py:
--------------------------------------------------------------------------------
1 | from common.log import logger
2 | from config import conf
3 |
4 |
5 | # ZhipuAI提供的画图接口
6 |
7 | class ZhipuAIImage(object):
8 | def __init__(self):
9 | from zhipuai import ZhipuAI
10 | self.client = ZhipuAI(api_key=conf().get("zhipu_ai_api_key"))
11 |
12 | def create_img(self, query, retry_count=0, api_key=None, api_base=None):
13 | try:
14 | if conf().get("rate_limit_dalle"):
15 | return False, "请求太快了,请休息一下再问我吧"
16 | logger.info("[ZHIPU_AI] image_query={}".format(query))
17 | response = self.client.images.generations(
18 | prompt=query,
19 | n=1, # 每次生成图片的数量
20 | model=conf().get("text_to_image") or "cogview-3",
21 | size=conf().get("image_create_size", "1024x1024"), # 图片大小,可选有 256x256, 512x512, 1024x1024
22 | quality="standard",
23 | )
24 | image_url = response.data[0].url
25 | logger.info("[ZHIPU_AI] image_url={}".format(image_url))
26 | return True, image_url
27 | except Exception as e:
28 | logger.exception(e)
29 | return False, "画图出现问题,请休息一下再问我吧"
30 |
--------------------------------------------------------------------------------
/common/dequeue.py:
--------------------------------------------------------------------------------
1 | from queue import Full, Queue
2 | from time import monotonic as time
3 |
4 |
5 | # add implementation of putleft to Queue
6 | class Dequeue(Queue):
7 | def putleft(self, item, block=True, timeout=None):
8 | with self.not_full:
9 | if self.maxsize > 0:
10 | if not block:
11 | if self._qsize() >= self.maxsize:
12 | raise Full
13 | elif timeout is None:
14 | while self._qsize() >= self.maxsize:
15 | self.not_full.wait()
16 | elif timeout < 0:
17 | raise ValueError("'timeout' must be a non-negative number")
18 | else:
19 | endtime = time() + timeout
20 | while self._qsize() >= self.maxsize:
21 | remaining = endtime - time()
22 | if remaining <= 0.0:
23 | raise Full
24 | self.not_full.wait(remaining)
25 | self._putleft(item)
26 | self.unfinished_tasks += 1
27 | self.not_empty.notify()
28 |
29 | def putleft_nowait(self, item):
30 | return self.putleft(item, block=False)
31 |
32 | def _putleft(self, item):
33 | self.queue.appendleft(item)
34 |
--------------------------------------------------------------------------------
/channel/channel.py:
--------------------------------------------------------------------------------
1 | """
2 | Message sending channel abstract class
3 | """
4 |
5 | from bridge.bridge import Bridge
6 | from bridge.context import Context
7 | from bridge.reply import *
8 |
9 |
10 | class Channel(object):
11 | channel_type = ""
12 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE]
13 |
14 | def startup(self):
15 | """
16 | init channel
17 | """
18 | raise NotImplementedError
19 |
20 | def handle_text(self, msg):
21 | """
22 | process received msg
23 | :param msg: message object
24 | """
25 | raise NotImplementedError
26 |
27 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
28 | def send(self, reply: Reply, context: Context):
29 | """
30 | send message to user
31 | :param msg: message content
32 | :param receiver: receiver channel account
33 | :return:
34 | """
35 | raise NotImplementedError
36 |
37 | def build_reply_content(self, query, context: Context = None) -> Reply:
38 | return Bridge().fetch_reply_content(query, context)
39 |
40 | def build_voice_to_text(self, voice_file) -> Reply:
41 | return Bridge().fetch_voice_to_text(voice_file)
42 |
43 | def build_text_to_voice(self, text) -> Reply:
44 | return Bridge().fetch_text_to_voice(text)
45 |
--------------------------------------------------------------------------------
/common/expired_dict.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 |
4 | class ExpiredDict(dict):
5 | def __init__(self, expires_in_seconds):
6 | super().__init__()
7 | self.expires_in_seconds = expires_in_seconds
8 |
9 | def __getitem__(self, key):
10 | value, expiry_time = super().__getitem__(key)
11 | if datetime.now() > expiry_time:
12 | del self[key]
13 | raise KeyError("expired {}".format(key))
14 | self.__setitem__(key, value)
15 | return value
16 |
17 | def __setitem__(self, key, value):
18 | expiry_time = datetime.now() + timedelta(seconds=self.expires_in_seconds)
19 | super().__setitem__(key, (value, expiry_time))
20 |
21 | def get(self, key, default=None):
22 | try:
23 | return self[key]
24 | except KeyError:
25 | return default
26 |
27 | def __contains__(self, key):
28 | try:
29 | self[key]
30 | return True
31 | except KeyError:
32 | return False
33 |
34 | def keys(self):
35 | keys = list(super().keys())
36 | return [key for key in keys if key in self]
37 |
38 | def items(self):
39 | return [(key, self[key]) for key in self.keys()]
40 |
41 | def __iter__(self):
42 | return self.keys().__iter__()
43 |
--------------------------------------------------------------------------------
/plugins/finish/finish.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import plugins
4 | from bridge.context import ContextType
5 | from bridge.reply import Reply, ReplyType
6 | from common.log import logger
7 | from config import conf
8 | from plugins import *
9 |
10 |
11 | @plugins.register(
12 | name="Finish",
13 | desire_priority=-999,
14 | hidden=True,
15 | desc="A plugin that check unknown command",
16 | version="1.0",
17 | author="js00000",
18 | )
19 | class Finish(Plugin):
20 | def __init__(self):
21 | super().__init__()
22 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
23 | logger.info("[Finish] inited")
24 |
25 | def on_handle_context(self, e_context: EventContext):
26 | if e_context["context"].type != ContextType.TEXT:
27 | return
28 |
29 | content = e_context["context"].content
30 | logger.debug("[Finish] on_handle_context. content: %s" % content)
31 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
32 | if content.startswith(trigger_prefix):
33 | reply = Reply()
34 | reply.type = ReplyType.ERROR
35 | reply.content = "未知插件命令\n查看插件命令列表请输入#help 插件名\n"
36 | e_context["reply"] = reply
37 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
38 |
39 | def get_help_text(self, **kwargs):
40 | return ""
41 |
--------------------------------------------------------------------------------
/plugins/source.json:
--------------------------------------------------------------------------------
1 | {
2 | "repo": {
3 | "sdwebui": {
4 | "url": "https://github.com/lanvent/plugin_sdwebui.git",
5 | "desc": "利用stable-diffusion画图的插件"
6 | },
7 | "replicate": {
8 | "url": "https://github.com/lanvent/plugin_replicate.git",
9 | "desc": "利用replicate api画图的插件"
10 | },
11 | "summary": {
12 | "url": "https://github.com/lanvent/plugin_summary.git",
13 | "desc": "总结聊天记录的插件"
14 | },
15 | "Apilot": {
16 | "url": "https://github.com/6vision/Apilot.git",
17 | "desc": "通过api直接查询早报、热榜、快递、天气等实用信息的插件"
18 | },
19 | "pictureChange": {
20 | "url": "https://github.com/Yanyutin753/pictureChange.git",
21 | "desc": "1. 支持百度AI和Stable Diffusion WebUI进行图像处理,提供多种模型选择,支持图生图、文生图自定义模板。2. 支持Suno音乐AI可将图像和文字转为音乐。3. 支持自定义模型进行文件、图片总结功能。4. 支持管理员控制群聊内容与参数和功能改变。"
22 | },
23 | "Blackroom": {
24 | "url": "https://github.com/dividduang/blackroom.git",
25 | "desc": "小黑屋插件,被拉进小黑屋的人将不能使用@bot的功能的插件"
26 | },
27 | "midjourney": {
28 | "url": "https://github.com/baojingyu/midjourney.git",
29 | "desc": "利用midjourney实现ai绘图的的插件"
30 | },
31 | "solitaire": {
32 | "url": "https://github.com/Wang-zhechao/solitaire.git",
33 | "desc": "机器人微信接龙插件"
34 | },
35 | "HighSpeedTicket": {
36 | "url": "https://github.com/He0607/HighSpeedTicket.git",
37 | "desc": "高铁(火车)票查询插件"
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/lib/itchat/config.py:
--------------------------------------------------------------------------------
1 | import os, platform
2 |
3 | VERSION = '1.5.0.dev'
4 |
5 | # use this envrionment to initialize the async & sync componment
6 | ASYNC_COMPONENTS = os.environ.get('ITCHAT_UOS_ASYNC', False)
7 |
8 | BASE_URL = 'https://login.weixin.qq.com'
9 | OS = platform.system() # Windows, Linux, Darwin
10 | DIR = os.getcwd()
11 | DEFAULT_QR = 'QR.png'
12 | TIMEOUT = (10, 60)
13 |
14 | USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'
15 |
16 | UOS_PATCH_CLIENT_VERSION = '2.0.0'
17 | UOS_PATCH_EXTSPAM = 'Go8FCIkFEokFCggwMDAwMDAwMRAGGvAESySibk50w5Wb3uTl2c2h64jVVrV7gNs06GFlWplHQbY/5FfiO++1yH4ykCyNPWKXmco+wfQzK5R98D3so7rJ5LmGFvBLjGceleySrc3SOf2Pc1gVehzJgODeS0lDL3/I/0S2SSE98YgKleq6Uqx6ndTy9yaL9qFxJL7eiA/R3SEfTaW1SBoSITIu+EEkXff+Pv8NHOk7N57rcGk1w0ZzRrQDkXTOXFN2iHYIzAAZPIOY45Lsh+A4slpgnDiaOvRtlQYCt97nmPLuTipOJ8Qc5pM7ZsOsAPPrCQL7nK0I7aPrFDF0q4ziUUKettzW8MrAaiVfmbD1/VkmLNVqqZVvBCtRblXb5FHmtS8FxnqCzYP4WFvz3T0TcrOqwLX1M/DQvcHaGGw0B0y4bZMs7lVScGBFxMj3vbFi2SRKbKhaitxHfYHAOAa0X7/MSS0RNAjdwoyGHeOepXOKY+h3iHeqCvgOH6LOifdHf/1aaZNwSkGotYnYScW8Yx63LnSwba7+hESrtPa/huRmB9KWvMCKbDThL/nne14hnL277EDCSocPu3rOSYjuB9gKSOdVmWsj9Dxb/iZIe+S6AiG29Esm+/eUacSba0k8wn5HhHg9d4tIcixrxveflc8vi2/wNQGVFNsGO6tB5WF0xf/plngOvQ1/ivGV/C1Qpdhzznh0ExAVJ6dwzNg7qIEBaw+BzTJTUuRcPk92Sn6QDn2Pu3mpONaEumacjW4w6ipPnPw+g2TfywJjeEcpSZaP4Q3YV5HG8D6UjWA4GSkBKculWpdCMadx0usMomsSS/74QgpYqcPkmamB4nVv1JxczYITIqItIKjD35IGKAUwAA=='
18 |
--------------------------------------------------------------------------------
/common/token_bucket.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 |
5 | class TokenBucket:
6 | def __init__(self, tpm, timeout=None):
7 | self.capacity = int(tpm) # 令牌桶容量
8 | self.tokens = 0 # 初始令牌数为0
9 | self.rate = int(tpm) / 60 # 令牌每秒生成速率
10 | self.timeout = timeout # 等待令牌超时时间
11 | self.cond = threading.Condition() # 条件变量
12 | self.is_running = True
13 | # 开启令牌生成线程
14 | threading.Thread(target=self._generate_tokens).start()
15 |
16 | def _generate_tokens(self):
17 | """生成令牌"""
18 | while self.is_running:
19 | with self.cond:
20 | if self.tokens < self.capacity:
21 | self.tokens += 1
22 | self.cond.notify() # 通知获取令牌的线程
23 | time.sleep(1 / self.rate)
24 |
25 | def get_token(self):
26 | """获取令牌"""
27 | with self.cond:
28 | while self.tokens <= 0:
29 | flag = self.cond.wait(self.timeout)
30 | if not flag: # 超时
31 | return False
32 | self.tokens -= 1
33 | return True
34 |
35 | def close(self):
36 | self.is_running = False
37 |
38 |
39 | if __name__ == "__main__":
40 | token_bucket = TokenBucket(20, None) # 创建一个每分钟生产20个tokens的令牌桶
41 | # token_bucket = TokenBucket(20, 0.1)
42 | for i in range(3):
43 | if token_bucket.get_token():
44 | print(f"第{i+1}次请求成功")
45 | token_bucket.close()
46 |
--------------------------------------------------------------------------------
/plugins/hello/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 可以根据需求设置入群欢迎、群聊拍一拍、退群等消息的自定义提示词,也支持为每个群设置对应的固定欢迎语。
4 |
5 | 该插件也是用户根据需求开发自定义插件的示例插件,参考[插件开发说明](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)
6 |
7 | ## 插件配置
8 |
9 | 将 `plugins/hello` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`。 (如果未配置则会默认使用`config.json.template`模板中配置)。
10 |
11 | 以下是插件配置项说明:
12 |
13 | ```bash
14 | {
15 | "group_welc_fixed_msg": { ## 这里可以为特定群里配置特定的固定欢迎语
16 | "群聊1": "群聊1的固定欢迎语",
17 | "群聊2": "群聊2的固定欢迎语"
18 | },
19 |
20 | "group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。", ## 群聊随机欢迎语的提示词
21 |
22 | "group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。", ## 移出群聊的提示词
23 |
24 | "patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。", ## 群内拍一拍的提示词
25 |
26 | "use_character_desc": false ## 是否在Hello插件中使用LinkAI应用的系统设定
27 | }
28 | ```
29 |
30 |
31 | 注意:
32 |
33 | - 设置全局的用户进群固定欢迎语,可以在***项目根目录下***的`config.json`文件里,可以添加参数`"group_welcome_msg": "" `,参考 [#1482](https://github.com/zhayujie/chatgpt-on-wechat/pull/1482)
34 | - 为每个群设置固定的欢迎语,可以在`"group_welc_fixed_msg": {}`配置群聊名和对应的固定欢迎语,优先级高于全局固定欢迎语
35 | - 如果没有配置以上两个参数,则使用随机欢迎语,如需设定风格,语言等,修改`"group_welc_prompt": `即可
36 | - 如果使用LinkAI的服务,想在随机欢迎中结合LinkAI应用的设定,配置`"use_character_desc": true `
37 | - 实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释
38 | - 如果是`docker`部署,可通过映射 `plugins/config.json` 到容器中来完成插件配置,参考[文档](https://github.com/zhayujie/chatgpt-on-wechat#3-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/bot/baidu/baidu_unit_bot.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import requests
4 |
5 | from bot.bot import Bot
6 | from bridge.reply import Reply, ReplyType
7 |
8 |
9 | # Baidu Unit对话接口 (可用, 但能力较弱)
10 | class BaiduUnitBot(Bot):
11 | def reply(self, query, context=None):
12 | token = self.get_token()
13 | url = "https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token=" + token
14 | post_data = (
15 | '{"version":"3.0","service_id":"S73177","session_id":"","log_id":"7758521","skill_ids":["1221886"],"request":{"terminal_id":"88888","query":"'
16 | + query
17 | + '", "hyper_params": {"chat_custom_bot_profile": 1}}}'
18 | )
19 | print(post_data)
20 | headers = {"content-type": "application/x-www-form-urlencoded"}
21 | response = requests.post(url, data=post_data.encode(), headers=headers)
22 | if response:
23 | reply = Reply(
24 | ReplyType.TEXT,
25 | response.json()["result"]["context"]["SYS_PRESUMED_HIST"][1],
26 | )
27 | return reply
28 |
29 | def get_token(self):
30 | access_key = "YOUR_ACCESS_KEY"
31 | secret_key = "YOUR_SECRET_KEY"
32 | host = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=" + access_key + "&client_secret=" + secret_key
33 | response = requests.get(host)
34 | if response:
35 | print(response.json())
36 | return response.json()["access_token"]
37 |
--------------------------------------------------------------------------------
/voice/edge/edge_voice.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import edge_tts
4 | import asyncio
5 |
6 | from bridge.reply import Reply, ReplyType
7 | from common.log import logger
8 | from common.tmp_dir import TmpDir
9 | from voice.voice import Voice
10 |
11 |
12 | class EdgeVoice(Voice):
13 |
14 | def __init__(self):
15 | '''
16 | # 普通话
17 | zh-CN-XiaoxiaoNeural
18 | zh-CN-XiaoyiNeural
19 | zh-CN-YunjianNeural
20 | zh-CN-YunxiNeural
21 | zh-CN-YunxiaNeural
22 | zh-CN-YunyangNeural
23 | # 地方口音
24 | zh-CN-liaoning-XiaobeiNeural
25 | zh-CN-shaanxi-XiaoniNeural
26 | # 粤语
27 | zh-HK-HiuGaaiNeural
28 | zh-HK-HiuMaanNeural
29 | zh-HK-WanLungNeural
30 | # 湾湾腔
31 | zh-TW-HsiaoChenNeural
32 | zh-TW-HsiaoYuNeural
33 | zh-TW-YunJheNeural
34 | '''
35 | self.voice = "zh-CN-YunjianNeural"
36 |
37 | def voiceToText(self, voice_file):
38 | pass
39 |
40 | async def gen_voice(self, text, fileName):
41 | communicate = edge_tts.Communicate(text, self.voice)
42 | await communicate.save(fileName)
43 |
44 | def textToVoice(self, text):
45 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
46 |
47 | asyncio.run(self.gen_voice(text, fileName))
48 |
49 | logger.info("[EdgeTTS] textToVoice text={} voice file name={}".format(text, fileName))
50 | return Reply(ReplyType.VOICE, fileName)
51 |
--------------------------------------------------------------------------------
/lib/itchat/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | class LogSystem(object):
4 | handlerList = []
5 | showOnCmd = True
6 | loggingLevel = logging.INFO
7 | loggingFile = None
8 | def __init__(self):
9 | self.logger = logging.getLogger('itchat')
10 | self.logger.addHandler(logging.NullHandler())
11 | self.logger.setLevel(self.loggingLevel)
12 | self.cmdHandler = logging.StreamHandler()
13 | self.fileHandler = None
14 | self.logger.addHandler(self.cmdHandler)
15 | def set_logging(self, showOnCmd=True, loggingFile=None,
16 | loggingLevel=logging.INFO):
17 | if showOnCmd != self.showOnCmd:
18 | if showOnCmd:
19 | self.logger.addHandler(self.cmdHandler)
20 | else:
21 | self.logger.removeHandler(self.cmdHandler)
22 | self.showOnCmd = showOnCmd
23 | if loggingFile != self.loggingFile:
24 | if self.loggingFile is not None: # clear old fileHandler
25 | self.logger.removeHandler(self.fileHandler)
26 | self.fileHandler.close()
27 | if loggingFile is not None: # add new fileHandler
28 | self.fileHandler = logging.FileHandler(loggingFile)
29 | self.logger.addHandler(self.fileHandler)
30 | self.loggingFile = loggingFile
31 | if loggingLevel != self.loggingLevel:
32 | self.logger.setLevel(loggingLevel)
33 | self.loggingLevel = loggingLevel
34 |
35 | ls = LogSystem()
36 | set_logging = ls.set_logging
37 |
--------------------------------------------------------------------------------
/plugins/event.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class Event(Enum):
7 | ON_RECEIVE_MESSAGE = 1 # 收到消息
8 | """
9 | e_context = { "channel": 消息channel, "context" : 本次消息的context}
10 | """
11 |
12 | ON_HANDLE_CONTEXT = 2 # 处理消息前
13 | """
14 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复,初始为空 }
15 | """
16 |
17 | ON_DECORATE_REPLY = 3 # 得到回复后准备装饰
18 | """
19 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 }
20 | """
21 |
22 | ON_SEND_REPLY = 4 # 发送回复前
23 | """
24 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 }
25 | """
26 |
27 | # AFTER_SEND_REPLY = 5 # 发送回复后
28 |
29 |
30 | class EventAction(Enum):
31 | CONTINUE = 1 # 事件未结束,继续交给下个插件处理,如果没有下个插件,则交付给默认的事件处理逻辑
32 | BREAK = 2 # 事件结束,不再给下个插件处理,交付给默认的事件处理逻辑
33 | BREAK_PASS = 3 # 事件结束,不再给下个插件处理,不交付给默认的事件处理逻辑
34 |
35 |
36 | class EventContext:
37 | def __init__(self, event, econtext=dict()):
38 | self.event = event
39 | self.econtext = econtext
40 | self.action = EventAction.CONTINUE
41 |
42 | def __getitem__(self, key):
43 | return self.econtext[key]
44 |
45 | def __setitem__(self, key, value):
46 | self.econtext[key] = value
47 |
48 | def __delitem__(self, key):
49 | del self.econtext[key]
50 |
51 | def is_pass(self):
52 | return self.action == EventAction.BREAK_PASS
53 |
54 | def is_break(self):
55 | return self.action == EventAction.BREAK or self.action == EventAction.BREAK_PASS
56 |
--------------------------------------------------------------------------------
/plugins/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "godcmd": {
3 | "password": "",
4 | "admin_users": []
5 | },
6 | "banwords": {
7 | "action": "replace",
8 | "reply_filter": true,
9 | "reply_action": "ignore"
10 | },
11 | "tool": {
12 | "tools": [
13 | "url-get",
14 | "meteo-weather"
15 | ],
16 | "kwargs": {
17 | "top_k_results": 2,
18 | "no_default": false,
19 | "model_name": "gpt-3.5-turbo"
20 | }
21 | },
22 | "linkai": {
23 | "group_app_map": {
24 | "测试群1": "default",
25 | "测试群2": "Kv2fXJcH"
26 | },
27 | "midjourney": {
28 | "enabled": true,
29 | "auto_translate": true,
30 | "img_proxy": true,
31 | "max_tasks": 3,
32 | "max_tasks_per_user": 1,
33 | "use_image_create_prefix": true
34 | },
35 | "summary": {
36 | "enabled": true,
37 | "group_enabled": true,
38 | "max_file_size": 5000,
39 | "type": ["FILE", "SHARING"]
40 | }
41 | },
42 | "hello": {
43 | "group_welc_fixed_msg": {
44 | "群聊1": "群聊1的固定欢迎语",
45 | "群聊2": "群聊2的固定欢迎语"
46 | },
47 | "group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。",
48 |
49 | "group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。",
50 |
51 | "patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。",
52 |
53 | "use_character_desc": false
54 | },
55 | "Apilot": {
56 | "alapi_token": "xxx",
57 | "morning_news_text_enabled": false
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/voice/factory.py:
--------------------------------------------------------------------------------
1 | """
2 | voice factory
3 | """
4 |
5 |
6 | def create_voice(voice_type):
7 | """
8 | create a voice instance
9 | :param voice_type: voice type code
10 | :return: voice instance
11 | """
12 | if voice_type == "baidu":
13 | from voice.baidu.baidu_voice import BaiduVoice
14 |
15 | return BaiduVoice()
16 | elif voice_type == "google":
17 | from voice.google.google_voice import GoogleVoice
18 |
19 | return GoogleVoice()
20 | elif voice_type == "openai":
21 | from voice.openai.openai_voice import OpenaiVoice
22 |
23 | return OpenaiVoice()
24 | elif voice_type == "pytts":
25 | from voice.pytts.pytts_voice import PyttsVoice
26 |
27 | return PyttsVoice()
28 | elif voice_type == "azure":
29 | from voice.azure.azure_voice import AzureVoice
30 |
31 | return AzureVoice()
32 | elif voice_type == "elevenlabs":
33 | from voice.elevent.elevent_voice import ElevenLabsVoice
34 |
35 | return ElevenLabsVoice()
36 |
37 | elif voice_type == "linkai":
38 | from voice.linkai.linkai_voice import LinkAIVoice
39 |
40 | return LinkAIVoice()
41 | elif voice_type == "ali":
42 | from voice.ali.ali_voice import AliVoice
43 |
44 | return AliVoice()
45 | elif voice_type == "edge":
46 | from voice.edge.edge_voice import EdgeVoice
47 |
48 | return EdgeVoice()
49 | elif voice_type == "xunfei":
50 | from voice.xunfei.xunfei_voice import XunfeiVoice
51 |
52 | return XunfeiVoice()
53 | elif voice_type == "tencent":
54 | from voice.tencent.tencent_voice import TencentVoice
55 |
56 | return TencentVoice()
57 | raise RuntimeError
58 |
--------------------------------------------------------------------------------
/voice/baidu/README.md:
--------------------------------------------------------------------------------
1 | ## 说明
2 | 百度语音识别与合成参数说明
3 | 百度语音依赖,经常会出现问题,可能就是缺少依赖:
4 | pip install baidu-aip
5 | pip install pydub
6 | pip install pysilk
7 | 还有ffmpeg,不同系统安装方式不同
8 |
9 | 系统中收到的语音文件为mp3格式(wx)或者sil格式(wxy),如果要识别需要转换为pcm格式,转换后的文件为16k采样率,单声道,16bit的pcm文件
10 | 发送时又需要(wx)转换为mp3格式,转换后的文件为16k采样率,单声道,16bit的pcm文件,(wxy)转换为sil格式,还要计算声音长度,发送时需要带上声音长度
11 | 这些事情都在audio_convert.py中封装了,直接调用即可
12 |
13 |
14 | 参数说明
15 | 识别参数
16 | https://ai.baidu.com/ai-doc/SPEECH/Vk38lxily
17 | 合成参数
18 | https://ai.baidu.com/ai-doc/SPEECH/Gk38y8lzk
19 |
20 | ## 使用说明
21 | 分两个地方配置
22 |
23 | 1、对于def voiceToText(self, filename)函数中调用的百度语音识别API,中接口调用asr(参数)这个配置见CHATGPT-ON-WECHAT工程目录下的`config.json`文件和config.py文件。
24 | 参数 可需 描述
25 | app_id 必填 应用的APPID
26 | api_key 必填 应用的APIKey
27 | secret_key 必填 应用的SecretKey
28 | dev_pid 必填 语言选择,填写语言对应的dev_pid值
29 |
30 | 2、对于def textToVoice(self, text)函数中调用的百度语音合成API,中接口调用synthesis(参数)在本目录下的`config.json`文件中进行配置。
31 | 参数 可需 描述
32 | tex 必填 合成的文本,使用UTF-8编码,请注意文本长度必须小于1024字节
33 | lan 必填 固定值zh。语言选择,目前只有中英文混合模式,填写固定值zh
34 | spd 选填 语速,取值0-15,默认为5中语速
35 | pit 选填 音调,取值0-15,默认为5中语调
36 | vol 选填 音量,取值0-15,默认为5中音量(取值为0时为音量最小值,并非为无声)
37 | per(基础音库) 选填 度小宇=1,度小美=0,度逍遥(基础)=3,度丫丫=4
38 | per(精品音库) 选填 度逍遥(精品)=5003,度小鹿=5118,度博文=106,度小童=110,度小萌=111,度米朵=103,度小娇=5
39 | aue 选填 3为mp3格式(默认); 4为pcm-16k;5为pcm-8k;6为wav(内容同pcm-16k); 注意aue=4或者6是语音识别要求的格式,但是音频内容不是语音识别要求的自然人发音,所以识别效果会受影响。
40 |
41 | 关于per参数的说明,注意您购买的哪个音库,就填写哪个音库的参数,否则会报错。如果您购买的是基础音库,那么per参数只能填写0到4,如果您购买的是精品音库,那么per参数只能填写5003,5118,106,110,111,103,5其他的都会报错。
42 | ### 配置文件
43 |
44 | 将文件夹中`config.json.template`复制为`config.json`。
45 |
46 | ``` json
47 | {
48 | "lang": "zh",
49 | "ctp": 1,
50 | "spd": 5,
51 | "pit": 5,
52 | "vol": 5,
53 | "per": 0
54 | }
55 | ```
--------------------------------------------------------------------------------
/voice/google/google_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 |
5 | import time
6 |
7 | import speech_recognition
8 | from gtts import gTTS
9 |
10 | from bridge.reply import Reply, ReplyType
11 | from common.log import logger
12 | from common.tmp_dir import TmpDir
13 | from voice.voice import Voice
14 |
15 |
16 | class GoogleVoice(Voice):
17 | recognizer = speech_recognition.Recognizer()
18 |
19 | def __init__(self):
20 | pass
21 |
22 | def voiceToText(self, voice_file):
23 | with speech_recognition.AudioFile(voice_file) as source:
24 | audio = self.recognizer.record(source)
25 | try:
26 | text = self.recognizer.recognize_google(audio, language="zh-CN")
27 | logger.info("[Google] voiceToText text={} voice file name={}".format(text, voice_file))
28 | reply = Reply(ReplyType.TEXT, text)
29 | except speech_recognition.UnknownValueError:
30 | reply = Reply(ReplyType.ERROR, "抱歉,我听不懂")
31 | except speech_recognition.RequestError as e:
32 | reply = Reply(ReplyType.ERROR, "抱歉,无法连接到 Google 语音识别服务;{0}".format(e))
33 | finally:
34 | return reply
35 |
36 | def textToVoice(self, text):
37 | try:
38 | # Avoid the same filename under multithreading
39 | mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
40 | tts = gTTS(text=text, lang="zh")
41 | tts.save(mp3File)
42 | logger.info("[Google] textToVoice text={} voice file name={}".format(text, mp3File))
43 | reply = Reply(ReplyType.VOICE, mp3File)
44 | except Exception as e:
45 | reply = Reply(ReplyType.ERROR, str(e))
46 | finally:
47 | return reply
48 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_image.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import openai
4 | import openai.error
5 |
6 | from common.log import logger
7 | from common.token_bucket import TokenBucket
8 | from config import conf
9 |
10 |
11 | # OPENAI提供的画图接口
12 | class OpenAIImage(object):
13 | def __init__(self):
14 | openai.api_key = conf().get("open_ai_api_key")
15 | if conf().get("rate_limit_dalle"):
16 | self.tb4dalle = TokenBucket(conf().get("rate_limit_dalle", 50))
17 |
18 | def create_img(self, query, retry_count=0, api_key=None, api_base=None):
19 | try:
20 | if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token():
21 | return False, "请求太快了,请休息一下再问我吧"
22 | logger.info("[OPEN_AI] image_query={}".format(query))
23 | response = openai.Image.create(
24 | api_key=api_key,
25 | prompt=query, # 图片描述
26 | n=1, # 每次生成图片的数量
27 | model=conf().get("text_to_image") or "dall-e-2",
28 | # size=conf().get("image_create_size", "256x256"), # 图片大小,可选有 256x256, 512x512, 1024x1024
29 | )
30 | image_url = response["data"][0]["url"]
31 | logger.info("[OPEN_AI] image_url={}".format(image_url))
32 | return True, image_url
33 | except openai.error.RateLimitError as e:
34 | logger.warn(e)
35 | if retry_count < 1:
36 | time.sleep(5)
37 | logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count + 1))
38 | return self.create_img(query, retry_count + 1)
39 | else:
40 | return False, "画图出现问题,请休息一下再问我吧"
41 | except Exception as e:
42 | logger.exception(e)
43 | return False, "画图出现问题,请休息一下再问我吧"
44 |
--------------------------------------------------------------------------------
/common/time_check.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import config
4 | from common.log import logger
5 |
6 |
7 | def time_checker(f):
8 | def _time_checker(self, *args, **kwargs):
9 | _config = config.conf()
10 | chat_time_module = _config.get("chat_time_module", False)
11 |
12 | if chat_time_module:
13 | chat_start_time = _config.get("chat_start_time", "00:00")
14 | chat_stop_time = _config.get("chat_stop_time", "24:00")
15 |
16 | time_regex = re.compile(r"^([01]?[0-9]|2[0-4])(:)([0-5][0-9])$")
17 |
18 | if not (time_regex.match(chat_start_time) and time_regex.match(chat_stop_time)):
19 | logger.warning("时间格式不正确,请在config.json中修改CHAT_START_TIME/CHAT_STOP_TIME。")
20 | return None
21 |
22 | now_time = time.strptime(time.strftime("%H:%M"), "%H:%M")
23 | chat_start_time = time.strptime(chat_start_time, "%H:%M")
24 | chat_stop_time = time.strptime(chat_stop_time, "%H:%M")
25 | # 结束时间小于开始时间,跨天了
26 | if chat_stop_time < chat_start_time and (chat_start_time <= now_time or now_time <= chat_stop_time):
27 | f(self, *args, **kwargs)
28 | # 结束大于开始时间代表,没有跨天
29 | elif chat_start_time < chat_stop_time and chat_start_time <= now_time <= chat_stop_time:
30 | f(self, *args, **kwargs)
31 | else:
32 | # 定义匹配规则,如果以 #reconf 或者 #更新配置 结尾, 非服务时间可以修改开始/结束时间并重载配置
33 | pattern = re.compile(r"^.*#(?:reconf|更新配置)$")
34 | if args and pattern.match(args[0].content):
35 | f(self, *args, **kwargs)
36 | else:
37 | logger.info("非服务时间内,不接受访问")
38 | return None
39 | else:
40 | f(self, *args, **kwargs) # 未开启时间模块则直接回答
41 |
42 | return _time_checker
43 |
--------------------------------------------------------------------------------
/bridge/context.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class ContextType(Enum):
7 | TEXT = 1 # 文本消息
8 | VOICE = 2 # 音频消息
9 | IMAGE = 3 # 图片消息
10 | FILE = 4 # 文件信息
11 | VIDEO = 5 # 视频信息
12 | SHARING = 6 # 分享信息
13 |
14 | IMAGE_CREATE = 10 # 创建图片命令
15 | ACCEPT_FRIEND = 19 # 同意好友请求
16 | JOIN_GROUP = 20 # 加入群聊
17 | PATPAT = 21 # 拍了拍
18 | FUNCTION = 22 # 函数调用
19 | EXIT_GROUP = 23 #退出
20 |
21 |
22 | def __str__(self):
23 | return self.name
24 |
25 |
26 | class Context:
27 | def __init__(self, type: ContextType = None, content=None, kwargs=dict()):
28 | self.type = type
29 | self.content = content
30 | self.kwargs = kwargs
31 |
32 | def __contains__(self, key):
33 | if key == "type":
34 | return self.type is not None
35 | elif key == "content":
36 | return self.content is not None
37 | else:
38 | return key in self.kwargs
39 |
40 | def __getitem__(self, key):
41 | if key == "type":
42 | return self.type
43 | elif key == "content":
44 | return self.content
45 | else:
46 | return self.kwargs[key]
47 |
48 | def get(self, key, default=None):
49 | try:
50 | return self[key]
51 | except KeyError:
52 | return default
53 |
54 | def __setitem__(self, key, value):
55 | if key == "type":
56 | self.type = value
57 | elif key == "content":
58 | self.content = value
59 | else:
60 | self.kwargs[key] = value
61 |
62 | def __delitem__(self, key):
63 | if key == "type":
64 | self.type = None
65 | elif key == "content":
66 | self.content = None
67 | else:
68 | del self.kwargs[key]
69 |
70 | def __str__(self):
71 | return "Context(type={}, content={}, kwargs={})".format(self.type, self.content, self.kwargs)
72 |
--------------------------------------------------------------------------------
/channel/channel_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | channel factory
3 | """
4 | from common import const
5 | from .channel import Channel
6 |
7 |
8 | def create_channel(channel_type) -> Channel:
9 | """
10 | create a channel instance
11 | :param channel_type: channel type code
12 | :return: channel instance
13 | """
14 | ch = Channel()
15 | if channel_type == "wx":
16 | from channel.wechat.wechat_channel import WechatChannel
17 | ch = WechatChannel()
18 | elif channel_type == "wxy":
19 | from channel.wechat.wechaty_channel import WechatyChannel
20 | ch = WechatyChannel()
21 | elif channel_type == "wcf":
22 | from channel.wechat.wcf_channel import WechatfChannel
23 | ch = WechatfChannel()
24 | elif channel_type == "terminal":
25 | from channel.terminal.terminal_channel import TerminalChannel
26 | ch = TerminalChannel()
27 | elif channel_type == 'web':
28 | from channel.web.web_channel import WebChannel
29 | ch = WebChannel()
30 | elif channel_type == "wechatmp":
31 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
32 | ch = WechatMPChannel(passive_reply=True)
33 | elif channel_type == "wechatmp_service":
34 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
35 | ch = WechatMPChannel(passive_reply=False)
36 | elif channel_type == "wechatcom_app":
37 | from channel.wechatcom.wechatcomapp_channel import WechatComAppChannel
38 | ch = WechatComAppChannel()
39 | elif channel_type == "wework":
40 | from channel.wework.wework_channel import WeworkChannel
41 | ch = WeworkChannel()
42 | elif channel_type == const.FEISHU:
43 | from channel.feishu.feishu_channel import FeiShuChanel
44 | ch = FeiShuChanel()
45 | elif channel_type == const.DINGTALK:
46 | from channel.dingtalk.dingtalk_channel import DingTalkChanel
47 | ch = DingTalkChanel()
48 | else:
49 | raise RuntimeError
50 | ch.channel_type = channel_type
51 | return ch
52 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import os
4 | import signal
5 | import sys
6 | import time
7 |
8 | from channel import channel_factory
9 | from common import const
10 | from config import load_config
11 | from plugins import *
12 | import threading
13 |
14 |
15 | def sigterm_handler_wrap(_signo):
16 | old_handler = signal.getsignal(_signo)
17 |
18 | def func(_signo, _stack_frame):
19 | logger.info("signal {} received, exiting...".format(_signo))
20 | conf().save_user_datas()
21 | if callable(old_handler): # check old_handler
22 | return old_handler(_signo, _stack_frame)
23 | sys.exit(0)
24 |
25 | signal.signal(_signo, func)
26 |
27 |
28 | def start_channel(channel_name: str):
29 | channel = channel_factory.create_channel(channel_name)
30 | if channel_name in ["wx", "wxy", "terminal", "wechatmp","web", "wechatmp_service", "wechatcom_app", "wework",
31 | const.FEISHU, const.DINGTALK]:
32 | PluginManager().load_plugins()
33 |
34 | if conf().get("use_linkai"):
35 | try:
36 | from common import linkai_client
37 | threading.Thread(target=linkai_client.start, args=(channel,)).start()
38 | except Exception as e:
39 | pass
40 | channel.startup()
41 |
42 |
43 | def run():
44 | try:
45 | # load config
46 | load_config()
47 | # ctrl + c
48 | sigterm_handler_wrap(signal.SIGINT)
49 | # kill signal
50 | sigterm_handler_wrap(signal.SIGTERM)
51 |
52 | # create channel
53 | channel_name = conf().get("channel_type", "wx")
54 |
55 | if "--cmd" in sys.argv:
56 | channel_name = "terminal"
57 |
58 | if channel_name == "wxy":
59 | os.environ["WECHATY_LOG"] = "warn"
60 |
61 | start_channel(channel_name)
62 |
63 | while True:
64 | time.sleep(1)
65 | except Exception as e:
66 | logger.error("App startup failed!")
67 | logger.exception(e)
68 |
69 |
70 | if __name__ == "__main__":
71 | run()
72 |
--------------------------------------------------------------------------------
/bot/baidu/baidu_wenxin_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g. [
6 | {"role": "user", "content": "Who won the world series in 2020?"},
7 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
8 | {"role": "user", "content": "Where was it played?"}
9 | ]
10 | """
11 |
12 |
13 | class BaiduWenxinSession(Session):
14 | def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"):
15 | super().__init__(session_id, system_prompt)
16 | self.model = model
17 | # 百度文心不支持system prompt
18 | # self.reset()
19 |
20 | def discard_exceeding(self, max_tokens, cur_tokens=None):
21 | precise = True
22 | try:
23 | cur_tokens = self.calc_tokens()
24 | except Exception as e:
25 | precise = False
26 | if cur_tokens is None:
27 | raise e
28 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
29 | while cur_tokens > max_tokens:
30 | if len(self.messages) >= 2:
31 | self.messages.pop(0)
32 | self.messages.pop(0)
33 | else:
34 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
35 | break
36 | if precise:
37 | cur_tokens = self.calc_tokens()
38 | else:
39 | cur_tokens = cur_tokens - max_tokens
40 | return cur_tokens
41 |
42 | def calc_tokens(self):
43 | return num_tokens_from_messages(self.messages, self.model)
44 |
45 |
46 | def num_tokens_from_messages(messages, model):
47 | """Returns the number of tokens used by a list of messages."""
48 | tokens = 0
49 | for msg in messages:
50 | # 官方token计算规则暂不明确: "大约为 token数为 "中文字 + 其他语种单词数 x 1.3"
51 | # 这里先直接根据字数粗略估算吧,暂不影响正常使用,仅在判断是否丢弃历史会话的时候会有偏差
52 | tokens += len(msg["content"])
53 | return tokens
54 |
--------------------------------------------------------------------------------
/docker/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # build prefix
5 | CHATGPT_ON_WECHAT_PREFIX=${CHATGPT_ON_WECHAT_PREFIX:-""}
6 | # path to config.json
7 | CHATGPT_ON_WECHAT_CONFIG_PATH=${CHATGPT_ON_WECHAT_CONFIG_PATH:-""}
8 | # execution command line
9 | CHATGPT_ON_WECHAT_EXEC=${CHATGPT_ON_WECHAT_EXEC:-""}
10 |
11 | # use environment variables to pass parameters
12 | # if you have not defined environment variables, set them below
13 | # export OPEN_AI_API_KEY=${OPEN_AI_API_KEY:-'YOUR API KEY'}
14 | # export OPEN_AI_PROXY=${OPEN_AI_PROXY:-""}
15 | # export SINGLE_CHAT_PREFIX=${SINGLE_CHAT_PREFIX:-'["bot", "@bot"]'}
16 | # export SINGLE_CHAT_REPLY_PREFIX=${SINGLE_CHAT_REPLY_PREFIX:-'"[bot] "'}
17 | # export GROUP_CHAT_PREFIX=${GROUP_CHAT_PREFIX:-'["@bot"]'}
18 | # export GROUP_NAME_WHITE_LIST=${GROUP_NAME_WHITE_LIST:-'["ChatGPT测试群", "ChatGPT测试群2"]'}
19 | # export IMAGE_CREATE_PREFIX=${IMAGE_CREATE_PREFIX:-'["画", "看", "找"]'}
20 | # export CONVERSATION_MAX_TOKENS=${CONVERSATION_MAX_TOKENS:-"1000"}
21 | # export SPEECH_RECOGNITION=${SPEECH_RECOGNITION:-"False"}
22 | # export CHARACTER_DESC=${CHARACTER_DESC:-"你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"}
23 | # export EXPIRES_IN_SECONDS=${EXPIRES_IN_SECONDS:-"3600"}
24 |
25 | # CHATGPT_ON_WECHAT_PREFIX is empty, use /app
26 | if [ "$CHATGPT_ON_WECHAT_PREFIX" == "" ] ; then
27 | CHATGPT_ON_WECHAT_PREFIX=/app
28 | fi
29 |
30 | # CHATGPT_ON_WECHAT_CONFIG_PATH is empty, use '/app/config.json'
31 | if [ "$CHATGPT_ON_WECHAT_CONFIG_PATH" == "" ] ; then
32 | CHATGPT_ON_WECHAT_CONFIG_PATH=$CHATGPT_ON_WECHAT_PREFIX/config.json
33 | fi
34 |
35 | # CHATGPT_ON_WECHAT_EXEC is empty, use ‘python app.py’
36 | if [ "$CHATGPT_ON_WECHAT_EXEC" == "" ] ; then
37 | CHATGPT_ON_WECHAT_EXEC="python app.py"
38 | fi
39 |
40 | # modify content in config.json
41 | # if [ "$OPEN_AI_API_KEY" == "YOUR API KEY" ] || [ "$OPEN_AI_API_KEY" == "" ]; then
42 | # echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
43 | # fi
44 |
45 |
46 | # go to prefix dir
47 | cd $CHATGPT_ON_WECHAT_PREFIX
48 | # excute
49 | $CHATGPT_ON_WECHAT_EXEC
50 |
51 |
52 |
--------------------------------------------------------------------------------
/plugins/plugin.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from config import pconf, plugin_config, conf, write_plugin_config
4 | from common.log import logger
5 |
6 |
7 | class Plugin:
8 | def __init__(self):
9 | self.handlers = {}
10 |
11 | def load_config(self) -> dict:
12 | """
13 | 加载当前插件配置
14 | :return: 插件配置字典
15 | """
16 | # 优先获取 plugins/config.json 中的全局配置
17 | plugin_conf = pconf(self.name)
18 | if not plugin_conf:
19 | # 全局配置不存在,则获取插件目录下的配置
20 | plugin_config_path = os.path.join(self.path, "config.json")
21 | logger.debug(f"loading plugin config, plugin_config_path={plugin_config_path}, exist={os.path.exists(plugin_config_path)}")
22 | if os.path.exists(plugin_config_path):
23 | with open(plugin_config_path, "r", encoding="utf-8") as f:
24 | plugin_conf = json.load(f)
25 |
26 | # 写入全局配置内存
27 | write_plugin_config({self.name: plugin_conf})
28 | logger.debug(f"loading plugin config, plugin_name={self.name}, conf={plugin_conf}")
29 | return plugin_conf
30 |
31 | def save_config(self, config: dict):
32 | try:
33 | write_plugin_config({self.name: config})
34 | # 写入全局配置
35 | global_config_path = "./plugins/config.json"
36 | if os.path.exists(global_config_path):
37 | with open(global_config_path, "w", encoding='utf-8') as f:
38 | json.dump(plugin_config, f, indent=4, ensure_ascii=False)
39 | # 写入插件配置
40 | plugin_config_path = os.path.join(self.path, "config.json")
41 | if os.path.exists(plugin_config_path):
42 | with open(plugin_config_path, "w", encoding='utf-8') as f:
43 | json.dump(config, f, indent=4, ensure_ascii=False)
44 |
45 | except Exception as e:
46 | logger.warn("save plugin config failed: {}".format(e))
47 |
48 | def get_help_text(self, **kwargs):
49 | return "暂无帮助信息"
50 |
51 | def reload(self):
52 | pass
53 |
--------------------------------------------------------------------------------
/translate/baidu/baidu_translate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import random
4 | from hashlib import md5
5 |
6 | import requests
7 |
8 | from config import conf
9 | from translate.translator import Translator
10 |
11 |
12 | class BaiduTranslator(Translator):
13 | def __init__(self) -> None:
14 | super().__init__()
15 | endpoint = "http://api.fanyi.baidu.com"
16 | path = "/api/trans/vip/translate"
17 | self.url = endpoint + path
18 | self.appid = conf().get("baidu_translate_app_id")
19 | self.appkey = conf().get("baidu_translate_app_key")
20 | if not self.appid or not self.appkey:
21 | raise Exception("baidu translate appid or appkey not set")
22 |
23 | # For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes
24 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
25 | if not from_lang:
26 | from_lang = "auto" # baidu suppport auto detect
27 | salt = random.randint(32768, 65536)
28 | sign = self.make_md5("{}{}{}{}".format(self.appid, query, salt, self.appkey))
29 | headers = {"Content-Type": "application/x-www-form-urlencoded"}
30 | payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign}
31 |
32 | retry_cnt = 3
33 | while retry_cnt:
34 | r = requests.post(self.url, params=payload, headers=headers)
35 | result = r.json()
36 | errcode = result.get("error_code", "52000")
37 | if errcode != "52000":
38 | if errcode == "52001" or errcode == "52002":
39 | retry_cnt -= 1
40 | continue
41 | else:
42 | raise Exception(result["error_msg"])
43 | else:
44 | break
45 | text = "\n".join([item["dst"] for item in result["trans_result"]])
46 | return text
47 |
48 | def make_md5(self, s, encoding="utf-8"):
49 | return md5(s.encode(encoding)).hexdigest()
50 |
--------------------------------------------------------------------------------
/bot/moonshot/moonshot_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class MoonshotSession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="moonshot-v1-128k"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 |
11 | def discard_exceeding(self, max_tokens, cur_tokens=None):
12 | precise = True
13 | try:
14 | cur_tokens = self.calc_tokens()
15 | except Exception as e:
16 | precise = False
17 | if cur_tokens is None:
18 | raise e
19 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
20 | while cur_tokens > max_tokens:
21 | if len(self.messages) > 2:
22 | self.messages.pop(1)
23 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
24 | self.messages.pop(1)
25 | if precise:
26 | cur_tokens = self.calc_tokens()
27 | else:
28 | cur_tokens = cur_tokens - max_tokens
29 | break
30 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
31 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
32 | break
33 | else:
34 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
35 | len(self.messages)))
36 | break
37 | if precise:
38 | cur_tokens = self.calc_tokens()
39 | else:
40 | cur_tokens = cur_tokens - max_tokens
41 | return cur_tokens
42 |
43 | def calc_tokens(self):
44 | return num_tokens_from_messages(self.messages, self.model)
45 |
46 |
47 | def num_tokens_from_messages(messages, model):
48 | tokens = 0
49 | for msg in messages:
50 | tokens += len(msg["content"])
51 | return tokens
52 |
--------------------------------------------------------------------------------
/bot/modelscope/modelscope_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class ModelScopeSession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="Qwen/Qwen2.5-7B-Instruct"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 |
11 | def discard_exceeding(self, max_tokens, cur_tokens=None):
12 | precise = True
13 | try:
14 | cur_tokens = self.calc_tokens()
15 | except Exception as e:
16 | precise = False
17 | if cur_tokens is None:
18 | raise e
19 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
20 | while cur_tokens > max_tokens:
21 | if len(self.messages) > 2:
22 | self.messages.pop(1)
23 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
24 | self.messages.pop(1)
25 | if precise:
26 | cur_tokens = self.calc_tokens()
27 | else:
28 | cur_tokens = cur_tokens - max_tokens
29 | break
30 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
31 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
32 | break
33 | else:
34 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
35 | len(self.messages)))
36 | break
37 | if precise:
38 | cur_tokens = self.calc_tokens()
39 | else:
40 | cur_tokens = cur_tokens - max_tokens
41 | return cur_tokens
42 |
43 | def calc_tokens(self):
44 | return num_tokens_from_messages(self.messages, self.model)
45 |
46 |
47 | def num_tokens_from_messages(messages, model):
48 | tokens = 0
49 | for msg in messages:
50 | tokens += len(msg["content"])
51 | return tokens
52 |
--------------------------------------------------------------------------------
/plugins/linkai/utils.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from common.log import logger
3 | from config import global_config
4 | from bridge.reply import Reply, ReplyType
5 | from plugins.event import EventContext, EventAction
6 | from config import conf
7 |
8 | class Util:
9 | @staticmethod
10 | def is_admin(e_context: EventContext) -> bool:
11 | """
12 | 判断消息是否由管理员用户发送
13 | :param e_context: 消息上下文
14 | :return: True: 是, False: 否
15 | """
16 | context = e_context["context"]
17 | if context["isgroup"]:
18 | actual_user_id = context.kwargs.get("msg").actual_user_id
19 | for admin_user in global_config["admin_users"]:
20 | if actual_user_id and actual_user_id in admin_user:
21 | return True
22 | return False
23 | else:
24 | return context["receiver"] in global_config["admin_users"]
25 |
26 | @staticmethod
27 | def set_reply_text(content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
28 | reply = Reply(level, content)
29 | e_context["reply"] = reply
30 | e_context.action = EventAction.BREAK_PASS
31 |
32 | @staticmethod
33 | def fetch_app_plugin(app_code: str, plugin_name: str) -> bool:
34 | try:
35 | headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
36 | # do http request
37 | base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
38 | params = {"app_code": app_code}
39 | res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
40 | if res.status_code == 200:
41 | plugins = res.json().get("data").get("plugins")
42 | for plugin in plugins:
43 | if plugin.get("name") and plugin.get("name") == plugin_name:
44 | return True
45 | return False
46 | else:
47 | logger.warning(f"[LinkAI] find app info exception, res={res}")
48 | return False
49 | except Exception as e:
50 | return False
51 |
--------------------------------------------------------------------------------
/bot/dashscope/dashscope_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class DashscopeSession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="qwen-turbo"):
7 | super().__init__(session_id)
8 | self.reset()
9 |
10 | def discard_exceeding(self, max_tokens, cur_tokens=None):
11 | precise = True
12 | try:
13 | cur_tokens = self.calc_tokens()
14 | except Exception as e:
15 | precise = False
16 | if cur_tokens is None:
17 | raise e
18 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
19 | while cur_tokens > max_tokens:
20 | if len(self.messages) > 2:
21 | self.messages.pop(1)
22 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
23 | self.messages.pop(1)
24 | if precise:
25 | cur_tokens = self.calc_tokens()
26 | else:
27 | cur_tokens = cur_tokens - max_tokens
28 | break
29 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
30 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
31 | break
32 | else:
33 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
34 | len(self.messages)))
35 | break
36 | if precise:
37 | cur_tokens = self.calc_tokens()
38 | else:
39 | cur_tokens = cur_tokens - max_tokens
40 | return cur_tokens
41 |
42 | def calc_tokens(self):
43 | return num_tokens_from_messages(self.messages)
44 |
45 |
46 | def num_tokens_from_messages(messages):
47 | # 只是大概,具体计算规则:https://help.aliyun.com/zh/dashscope/developer-reference/token-api?spm=a2c4g.11186623.0.0.4d8b12b0BkP3K9
48 | tokens = 0
49 | for msg in messages:
50 | tokens += len(msg["content"])
51 | return tokens
52 |
--------------------------------------------------------------------------------
/bot/zhipuai/zhipu_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class ZhipuAISession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="glm-4"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 | if not system_prompt:
11 | logger.warn("[ZhiPu] `character_desc` can not be empty")
12 |
13 | def discard_exceeding(self, max_tokens, cur_tokens=None):
14 | precise = True
15 | try:
16 | cur_tokens = self.calc_tokens()
17 | except Exception as e:
18 | precise = False
19 | if cur_tokens is None:
20 | raise e
21 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
22 | while cur_tokens > max_tokens:
23 | if len(self.messages) > 2:
24 | self.messages.pop(1)
25 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
26 | self.messages.pop(1)
27 | if precise:
28 | cur_tokens = self.calc_tokens()
29 | else:
30 | cur_tokens = cur_tokens - max_tokens
31 | break
32 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
33 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
34 | break
35 | else:
36 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
37 | len(self.messages)))
38 | break
39 | if precise:
40 | cur_tokens = self.calc_tokens()
41 | else:
42 | cur_tokens = cur_tokens - max_tokens
43 | return cur_tokens
44 |
45 | def calc_tokens(self):
46 | return num_tokens_from_messages(self.messages, self.model)
47 |
48 |
49 | def num_tokens_from_messages(messages, model):
50 | tokens = 0
51 | for msg in messages:
52 | tokens += len(msg["content"])
53 | return tokens
54 |
--------------------------------------------------------------------------------
/channel/wechatcom/wechatcomapp_message.py:
--------------------------------------------------------------------------------
1 | from wechatpy.enterprise import WeChatClient
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WechatComAppMessage(ChatMessage):
10 | def __init__(self, msg, client: WeChatClient, is_group=False):
11 | super().__init__(msg)
12 | self.msg_id = msg.id
13 | self.create_time = msg.time
14 | self.is_group = is_group
15 |
16 | if msg.type == "text":
17 | self.ctype = ContextType.TEXT
18 | self.content = msg.content
19 | elif msg.type == "voice":
20 | self.ctype = ContextType.VOICE
21 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
22 |
23 | def download_voice():
24 | # 如果响应状态码是200,则将响应内容写入本地文件
25 | response = client.media.download(msg.media_id)
26 | if response.status_code == 200:
27 | with open(self.content, "wb") as f:
28 | f.write(response.content)
29 | else:
30 | logger.info(f"[wechatcom] Failed to download voice file, {response.content}")
31 |
32 | self._prepare_fn = download_voice
33 | elif msg.type == "image":
34 | self.ctype = ContextType.IMAGE
35 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
36 |
37 | def download_image():
38 | # 如果响应状态码是200,则将响应内容写入本地文件
39 | response = client.media.download(msg.media_id)
40 | if response.status_code == 200:
41 | with open(self.content, "wb") as f:
42 | f.write(response.content)
43 | else:
44 | logger.info(f"[wechatcom] Failed to download image file, {response.content}")
45 |
46 | self._prepare_fn = download_image
47 | else:
48 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
49 |
50 | self.from_user_id = msg.source
51 | self.to_user_id = msg.target
52 | self.other_user_id = msg.source
53 |
--------------------------------------------------------------------------------
/channel/wechat/wcf_message.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | """
4 | wechat channel message
5 | """
6 |
7 | from bridge.context import ContextType
8 | from channel.chat_message import ChatMessage
9 | from common.log import logger
10 | from wcferry import WxMsg
11 |
12 |
13 | class WechatfMessage(ChatMessage):
14 | """
15 | 微信消息封装类
16 | """
17 |
18 | def __init__(self, channel, wcf_msg: WxMsg, is_group=False):
19 | """
20 | 初始化消息对象
21 | :param wcf_msg: wcferry消息对象
22 | :param is_group: 是否是群消息
23 | """
24 | super().__init__(wcf_msg)
25 | self.msg_id = wcf_msg.id
26 | self.create_time = wcf_msg.ts # 使用消息时间戳
27 | self.is_group = is_group or wcf_msg._is_group
28 | self.wxid = channel.wxid
29 | self.name = channel.name
30 |
31 | # 解析消息类型
32 | if wcf_msg.is_text():
33 | self.ctype = ContextType.TEXT
34 | self.content = wcf_msg.content
35 | else:
36 | raise NotImplementedError(f"Unsupported message type: {wcf_msg.type}")
37 |
38 | # 设置发送者和接收者信息
39 | self.from_user_id = self.wxid if wcf_msg.sender == self.wxid else wcf_msg.sender
40 | self.from_user_nickname = self.name if wcf_msg.sender == self.wxid else channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
41 | self.to_user_id = self.wxid
42 | self.to_user_nickname = self.name
43 | self.other_user_id = wcf_msg.sender
44 | self.other_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
45 |
46 | # 群消息特殊处理
47 | if self.is_group:
48 | self.other_user_id = wcf_msg.roomid
49 | self.other_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.roomid)
50 | self.actual_user_id = wcf_msg.sender
51 | self.actual_user_nickname = channel.wcf.get_alias_in_chatroom(wcf_msg.sender, wcf_msg.roomid)
52 | if not self.actual_user_nickname: # 群聊获取不到企微号成员昵称,这里尝试从联系人缓存去获取
53 | self.actual_user_nickname = channel.contact_cache.get_name_by_wxid(wcf_msg.sender)
54 | self.room_id = wcf_msg.roomid
55 | self.is_at = wcf_msg.is_at(self.wxid) # 是否被@当前登录用户
56 |
57 | # 判断是否是自己发送的消息
58 | self.my_msg = wcf_msg.from_self()
59 |
--------------------------------------------------------------------------------
/.github/workflows/deploy-image.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # GitHub recommends pinning actions to a commit SHA.
7 | # To get a newer version, you will need to update the SHA.
8 | # You can also reference a tag or branch, but the action may change without warning.
9 |
10 | name: Create and publish a Docker image
11 |
12 | on:
13 | push:
14 | branches: ['master']
15 | create:
16 | env:
17 | REGISTRY: ghcr.io
18 | IMAGE_NAME: ${{ github.repository }}
19 |
20 | jobs:
21 | build-and-push-image:
22 | if: github.repository == 'zhayujie/chatgpt-on-wechat'
23 | runs-on: ubuntu-latest
24 | permissions:
25 | contents: read
26 | packages: write
27 |
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v3
31 |
32 | - name: Login to Docker Hub
33 | uses: docker/login-action@v2
34 | with:
35 | username: ${{ secrets.DOCKERHUB_USERNAME }}
36 | password: ${{ secrets.DOCKERHUB_TOKEN }}
37 |
38 | - name: Log in to the Container registry
39 | uses: docker/login-action@v2
40 | with:
41 | registry: ${{ env.REGISTRY }}
42 | username: ${{ github.actor }}
43 | password: ${{ secrets.GITHUB_TOKEN }}
44 |
45 | - name: Extract metadata (tags, labels) for Docker
46 | id: meta
47 | uses: docker/metadata-action@v4
48 | with:
49 | images: |
50 | ${{ env.IMAGE_NAME }}
51 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
52 |
53 | - name: Build and push Docker image
54 | uses: docker/build-push-action@v3
55 | with:
56 | context: .
57 | push: true
58 | file: ./docker/Dockerfile.latest
59 | tags: ${{ steps.meta.outputs.tags }}
60 | labels: ${{ steps.meta.outputs.labels }}
61 |
62 | - uses: actions/delete-package-versions@v4
63 | with:
64 | package-name: 'chatgpt-on-wechat'
65 | package-type: 'container'
66 | min-versions-to-keep: 10
67 | delete-only-untagged-versions: 'true'
68 | token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/deploy-image-arm.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # GitHub recommends pinning actions to a commit SHA.
7 | # To get a newer version, you will need to update the SHA.
8 | # You can also reference a tag or branch, but the action may change without warning.
9 |
10 | name: Create and publish a Docker image
11 |
12 | on:
13 | push:
14 | branches: ['master']
15 | create:
16 | env:
17 | REGISTRY: ghcr.io
18 | IMAGE_NAME: ${{ github.repository }}
19 |
20 | jobs:
21 | build-and-push-image:
22 | if: github.repository == 'zhayujie/chatgpt-on-wechat'
23 | runs-on: ubuntu-latest
24 | permissions:
25 | contents: read
26 | packages: write
27 |
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v3
31 |
32 | - name: Set up QEMU
33 | uses: docker/setup-qemu-action@v1
34 |
35 | - name: Set up Docker Buildx
36 | id: buildx
37 | uses: docker/setup-buildx-action@v1
38 |
39 | - name: Available platforms
40 | run: echo ${{ steps.buildx.outputs.platforms }}
41 |
42 | - name: Log in to the Container registry
43 | uses: docker/login-action@v2
44 | with:
45 | registry: ${{ env.REGISTRY }}
46 | username: ${{ github.actor }}
47 | password: ${{ secrets.GITHUB_TOKEN }}
48 |
49 | - name: Extract metadata (tags, labels) for Docker
50 | id: meta
51 | uses: docker/metadata-action@v4
52 | with:
53 | images: |
54 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
55 |
56 | - name: Build and push Docker image
57 | uses: docker/build-push-action@v3
58 | with:
59 | context: .
60 | push: true
61 | file: ./docker/Dockerfile.latest
62 | platforms: linux/arm64
63 | tags: ${{ steps.meta.outputs.tags }}-arm64
64 | labels: ${{ steps.meta.outputs.labels }}
65 |
66 | - uses: actions/delete-package-versions@v4
67 | with:
68 | package-name: 'chatgpt-on-wechat'
69 | package-type: 'container'
70 | min-versions-to-keep: 10
71 | delete-only-untagged-versions: 'true'
72 | token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/channel/wechatmp/wechatmp_message.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-#
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WeChatMPMessage(ChatMessage):
10 | def __init__(self, msg, client=None):
11 | super().__init__(msg)
12 | self.msg_id = msg.id
13 | self.create_time = msg.time
14 | self.is_group = False
15 |
16 | if msg.type == "text":
17 | self.ctype = ContextType.TEXT
18 | self.content = msg.content
19 | elif msg.type == "voice":
20 | if msg.recognition == None:
21 | self.ctype = ContextType.VOICE
22 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
23 |
24 | def download_voice():
25 | # 如果响应状态码是200,则将响应内容写入本地文件
26 | response = client.media.download(msg.media_id)
27 | if response.status_code == 200:
28 | with open(self.content, "wb") as f:
29 | f.write(response.content)
30 | else:
31 | logger.info(f"[wechatmp] Failed to download voice file, {response.content}")
32 |
33 | self._prepare_fn = download_voice
34 | else:
35 | self.ctype = ContextType.TEXT
36 | self.content = msg.recognition
37 | elif msg.type == "image":
38 | self.ctype = ContextType.IMAGE
39 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
40 |
41 | def download_image():
42 | # 如果响应状态码是200,则将响应内容写入本地文件
43 | response = client.media.download(msg.media_id)
44 | if response.status_code == 200:
45 | with open(self.content, "wb") as f:
46 | f.write(response.content)
47 | else:
48 | logger.info(f"[wechatmp] Failed to download image file, {response.content}")
49 |
50 | self._prepare_fn = download_image
51 | else:
52 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
53 |
54 | self.from_user_id = msg.source
55 | self.to_user_id = msg.target
56 | self.other_user_id = msg.source
57 |
--------------------------------------------------------------------------------
/channel/wechatcom/README.md:
--------------------------------------------------------------------------------
1 | # 企业微信应用号channel
2 |
3 | 企业微信官方提供了客服、应用等API,本channel使用的是企业微信的自建应用API的能力。
4 |
5 | 因为未来可能还会开发客服能力,所以本channel的类型名叫作`wechatcom_app`。
6 |
7 | `wechatcom_app` channel支持插件系统和图片声音交互等能力,除了无法加入群聊,作为个人使用的私人助理已绰绰有余。
8 |
9 | ## 开始之前
10 |
11 | - 在企业中确认自己拥有在企业内自建应用的权限。
12 | - 如果没有权限或者是个人用户,也可创建未认证的企业。操作方式:登录手机企业微信,选择`创建/加入企业`来创建企业,类型请选择企业,企业名称可随意填写。
13 | 未认证的企业有100人的服务人数上限,其他功能与认证企业没有差异。
14 |
15 | 本channel需安装的依赖与公众号一致,需要安装`wechatpy`和`web.py`,它们包含在`requirements-optional.txt`中。
16 |
17 | 此外,如果你是`Linux`系统,除了`ffmpeg`还需要安装`amr`编码器,否则会出现找不到编码器的错误,无法正常使用语音功能。
18 |
19 | - Ubuntu/Debian
20 |
21 | ```bash
22 | apt-get install libavcodec-extra
23 | ```
24 |
25 | - Alpine
26 |
27 | 需自行编译`ffmpeg`,在编译参数里加入`amr`编码器的支持
28 |
29 | ## 使用方法
30 |
31 | 1.查看企业ID
32 |
33 | - 扫码登陆[企业微信后台](https://work.weixin.qq.com)
34 | - 选择`我的企业`,点击`企业信息`,记住该`企业ID`
35 |
36 | 2.创建自建应用
37 |
38 | - 选择应用管理, 在自建区选创建应用来创建企业自建应用
39 | - 上传应用logo,填写应用名称等项
40 | - 创建应用后进入应用详情页面,记住`AgentId`和`Secert`
41 |
42 | 3.配置应用
43 |
44 | - 在详情页点击`企业可信IP`的配置(没看到可以不管),填入你服务器的公网IP,如果不知道可以先不填
45 | - 点击`接收消息`下的启用API接收消息
46 | - `URL`填写格式为`http://url:port/wxcomapp`,`port`是程序监听的端口,默认是9898
47 | 如果是未认证的企业,url可直接使用服务器的IP。如果是认证企业,需要使用备案的域名,可使用二级域名。
48 | - `Token`可随意填写,停留在这个页面
49 | - 在程序根目录`config.json`中增加配置(**去掉注释**),`wechatcomapp_aes_key`是当前页面的`wechatcomapp_aes_key`
50 |
51 | ```python
52 | "channel_type": "wechatcom_app",
53 | "wechatcom_corp_id": "", # 企业微信公司的corpID
54 | "wechatcomapp_token": "", # 企业微信app的token
55 | "wechatcomapp_port": 9898, # 企业微信app的服务端口, 不需要端口转发
56 | "wechatcomapp_secret": "", # 企业微信app的secret
57 | "wechatcomapp_agent_id": "", # 企业微信app的agent_id
58 | "wechatcomapp_aes_key": "", # 企业微信app的aes_key
59 | ```
60 |
61 | - 运行程序,在页面中点击保存,保存成功说明验证成功
62 |
63 | 4.连接个人微信
64 |
65 | 选择`我的企业`,点击`微信插件`,下面有个邀请关注的二维码。微信扫码后,即可在微信中看到对应企业,在这里你便可以和机器人沟通。
66 |
67 | 向机器人发送消息,如果日志里出现报错:
68 |
69 | ```bash
70 | Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx.xx.xx"
71 | ```
72 |
73 | 意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。
74 |
75 | ~~### Railway部署方式~~(2023-06-08已失效)
76 |
77 | ~~公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)!~~
78 |
79 | ~~填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP)~~
80 |
81 | ## 测试体验
82 |
83 | AIGC开放社区中已经部署了多个可免费使用的Bot,扫描下方的二维码会自动邀请你来体验。
84 |
85 |
86 |
--------------------------------------------------------------------------------
/channel/wechatmp/wechatmp_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | from wechatpy.client import WeChatClient
5 | from wechatpy.exceptions import APILimitedException
6 |
7 | from channel.wechatmp.common import *
8 | from common.log import logger
9 |
10 |
11 | class WechatMPClient(WeChatClient):
12 | def __init__(self, appid, secret, access_token=None, session=None, timeout=None, auto_retry=True):
13 | super(WechatMPClient, self).__init__(appid, secret, access_token, session, timeout, auto_retry)
14 | self.fetch_access_token_lock = threading.Lock()
15 | self.clear_quota_lock = threading.Lock()
16 | self.last_clear_quota_time = -1
17 |
18 | def clear_quota(self):
19 | return self.post("clear_quota", data={"appid": self.appid})
20 |
21 | def clear_quota_v2(self):
22 | return self.post("clear_quota/v2", params={"appid": self.appid, "appsecret": self.secret})
23 |
24 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
25 | with self.fetch_access_token_lock:
26 | access_token = self.session.get(self.access_token_key)
27 | if access_token:
28 | if not self.expires_at:
29 | return access_token
30 | timestamp = time.time()
31 | if self.expires_at - timestamp > 60:
32 | return access_token
33 | return super().fetch_access_token()
34 |
35 | def _request(self, method, url_or_endpoint, **kwargs): # 重载父类方法,遇到API限流时,清除quota后重试
36 | try:
37 | return super()._request(method, url_or_endpoint, **kwargs)
38 | except APILimitedException as e:
39 | logger.error("[wechatmp] API quata has been used up. {}".format(e))
40 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
41 | with self.clear_quota_lock:
42 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
43 | self.last_clear_quota_time = time.time()
44 | response = self.clear_quota_v2()
45 | logger.debug("[wechatmp] API quata has been cleard, {}".format(response))
46 | return super()._request(method, url_or_endpoint, **kwargs)
47 | else:
48 | logger.error("[wechatmp] last clear quota time is {}, less than 60s, skip clear quota")
49 | raise e
50 |
--------------------------------------------------------------------------------
/common/utils.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import re
4 | from urllib.parse import urlparse
5 | from PIL import Image
6 | from common.log import logger
7 |
8 | def fsize(file):
9 | if isinstance(file, io.BytesIO):
10 | return file.getbuffer().nbytes
11 | elif isinstance(file, str):
12 | return os.path.getsize(file)
13 | elif hasattr(file, "seek") and hasattr(file, "tell"):
14 | pos = file.tell()
15 | file.seek(0, os.SEEK_END)
16 | size = file.tell()
17 | file.seek(pos)
18 | return size
19 | else:
20 | raise TypeError("Unsupported type")
21 |
22 |
23 | def compress_imgfile(file, max_size):
24 | if fsize(file) <= max_size:
25 | return file
26 | file.seek(0)
27 | img = Image.open(file)
28 | rgb_image = img.convert("RGB")
29 | quality = 95
30 | while True:
31 | out_buf = io.BytesIO()
32 | rgb_image.save(out_buf, "JPEG", quality=quality)
33 | if fsize(out_buf) <= max_size:
34 | return out_buf
35 | quality -= 5
36 |
37 |
38 | def split_string_by_utf8_length(string, max_length, max_split=0):
39 | encoded = string.encode("utf-8")
40 | start, end = 0, 0
41 | result = []
42 | while end < len(encoded):
43 | if max_split > 0 and len(result) >= max_split:
44 | result.append(encoded[start:].decode("utf-8"))
45 | break
46 | end = min(start + max_length, len(encoded))
47 | # 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止
48 | while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000:
49 | end -= 1
50 | result.append(encoded[start:end].decode("utf-8"))
51 | start = end
52 | return result
53 |
54 |
55 | def get_path_suffix(path):
56 | path = urlparse(path).path
57 | return os.path.splitext(path)[-1].lstrip('.')
58 |
59 |
60 | def convert_webp_to_png(webp_image):
61 | from PIL import Image
62 | try:
63 | webp_image.seek(0)
64 | img = Image.open(webp_image).convert("RGBA")
65 | png_image = io.BytesIO()
66 | img.save(png_image, format="PNG")
67 | png_image.seek(0)
68 | return png_image
69 | except Exception as e:
70 | logger.error(f"Failed to convert WEBP to PNG: {e}")
71 | raise
72 |
73 |
74 | def remove_markdown_symbol(text: str):
75 | # 移除markdown格式,目前先移除**
76 | if not text:
77 | return text
78 | return re.sub(r'\*\*(.*?)\*\*', r'\1', text)
79 |
--------------------------------------------------------------------------------
/common/sorted_dict.py:
--------------------------------------------------------------------------------
1 | import heapq
2 |
3 |
4 | class SortedDict(dict):
5 | def __init__(self, sort_func=lambda k, v: k, init_dict=None, reverse=False):
6 | if init_dict is None:
7 | init_dict = []
8 | if isinstance(init_dict, dict):
9 | init_dict = init_dict.items()
10 | self.sort_func = sort_func
11 | self.sorted_keys = None
12 | self.reverse = reverse
13 | self.heap = []
14 | for k, v in init_dict:
15 | self[k] = v
16 |
17 | def __setitem__(self, key, value):
18 | if key in self:
19 | super().__setitem__(key, value)
20 | for i, (priority, k) in enumerate(self.heap):
21 | if k == key:
22 | self.heap[i] = (self.sort_func(key, value), key)
23 | heapq.heapify(self.heap)
24 | break
25 | self.sorted_keys = None
26 | else:
27 | super().__setitem__(key, value)
28 | heapq.heappush(self.heap, (self.sort_func(key, value), key))
29 | self.sorted_keys = None
30 |
31 | def __delitem__(self, key):
32 | super().__delitem__(key)
33 | for i, (priority, k) in enumerate(self.heap):
34 | if k == key:
35 | del self.heap[i]
36 | heapq.heapify(self.heap)
37 | break
38 | self.sorted_keys = None
39 |
40 | def keys(self):
41 | if self.sorted_keys is None:
42 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)]
43 | return self.sorted_keys
44 |
45 | def items(self):
46 | if self.sorted_keys is None:
47 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)]
48 | sorted_items = [(k, self[k]) for k in self.sorted_keys]
49 | return sorted_items
50 |
51 | def _update_heap(self, key):
52 | for i, (priority, k) in enumerate(self.heap):
53 | if k == key:
54 | new_priority = self.sort_func(key, self[key])
55 | if new_priority != priority:
56 | self.heap[i] = (new_priority, key)
57 | heapq.heapify(self.heap)
58 | self.sorted_keys = None
59 | break
60 |
61 | def __iter__(self):
62 | return iter(self.keys())
63 |
64 | def __repr__(self):
65 | return f"{type(self).__name__}({dict(self)}, sort_func={self.sort_func.__name__}, reverse={self.reverse})"
66 |
--------------------------------------------------------------------------------
/lib/itchat/returnvalues.py:
--------------------------------------------------------------------------------
1 | #coding=utf8
2 | TRANSLATE = 'Chinese'
3 |
4 | class ReturnValue(dict):
5 | ''' turn return value of itchat into a boolean value
6 | for requests:
7 | ..code::python
8 |
9 | import requests
10 | r = requests.get('http://httpbin.org/get')
11 | print(ReturnValue(rawResponse=r)
12 |
13 | for normal dict:
14 | ..code::python
15 |
16 | returnDict = {
17 | 'BaseResponse': {
18 | 'Ret': 0,
19 | 'ErrMsg': 'My error msg', }, }
20 | print(ReturnValue(returnDict))
21 | '''
22 | def __init__(self, returnValueDict={}, rawResponse=None):
23 | if rawResponse:
24 | try:
25 | returnValueDict = rawResponse.json()
26 | except ValueError:
27 | returnValueDict = {
28 | 'BaseResponse': {
29 | 'Ret': -1004,
30 | 'ErrMsg': 'Unexpected return value', },
31 | 'Data': rawResponse.content, }
32 | for k, v in returnValueDict.items():
33 | self[k] = v
34 | if not 'BaseResponse' in self:
35 | self['BaseResponse'] = {
36 | 'ErrMsg': 'no BaseResponse in raw response',
37 | 'Ret': -1000, }
38 | if TRANSLATE:
39 | self['BaseResponse']['RawMsg'] = self['BaseResponse'].get('ErrMsg', '')
40 | self['BaseResponse']['ErrMsg'] = \
41 | TRANSLATION[TRANSLATE].get(
42 | self['BaseResponse'].get('Ret', '')) \
43 | or self['BaseResponse'].get('ErrMsg', u'No ErrMsg')
44 | self['BaseResponse']['RawMsg'] = \
45 | self['BaseResponse']['RawMsg'] or self['BaseResponse']['ErrMsg']
46 | def __nonzero__(self):
47 | return self['BaseResponse'].get('Ret') == 0
48 | def __bool__(self):
49 | return self.__nonzero__()
50 | def __str__(self):
51 | return '{%s}' % ', '.join(
52 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
53 | def __repr__(self):
54 | return '' % self.__str__()
55 |
56 | TRANSLATION = {
57 | 'Chinese': {
58 | -1000: u'返回值不带BaseResponse',
59 | -1001: u'无法找到对应的成员',
60 | -1002: u'文件位置错误',
61 | -1003: u'服务器拒绝连接',
62 | -1004: u'服务器返回异常值',
63 | -1005: u'参数错误',
64 | -1006: u'无效操作',
65 | 0: u'请求成功',
66 | },
67 | }
68 |
--------------------------------------------------------------------------------
/channel/chat_message.py:
--------------------------------------------------------------------------------
1 | """
2 | 本类表示聊天消息,用于对itchat和wechaty的消息进行统一的封装。
3 |
4 | 填好必填项(群聊6个,非群聊8个),即可接入ChatChannel,并支持插件,参考TerminalChannel
5 |
6 | ChatMessage
7 | msg_id: 消息id (必填)
8 | create_time: 消息创建时间
9 |
10 | ctype: 消息类型 : ContextType (必填)
11 | content: 消息内容, 如果是声音/图片,这里是文件路径 (必填)
12 |
13 | from_user_id: 发送者id (必填)
14 | from_user_nickname: 发送者昵称
15 | to_user_id: 接收者id (必填)
16 | to_user_nickname: 接收者昵称
17 |
18 | other_user_id: 对方的id,如果你是发送者,那这个就是接收者id,如果你是接收者,那这个就是发送者id,如果是群消息,那这一直是群id (必填)
19 | other_user_nickname: 同上
20 |
21 | is_group: 是否是群消息 (群聊必填)
22 | is_at: 是否被at
23 |
24 | - (群消息时,一般会存在实际发送者,是群内某个成员的id和昵称,下列项仅在群消息时存在)
25 | actual_user_id: 实际发送者id (群聊必填)
26 | actual_user_nickname:实际发送者昵称
27 | self_display_name: 自身的展示名,设置群昵称时,该字段表示群昵称
28 |
29 | _prepare_fn: 准备函数,用于准备消息的内容,比如下载图片等,
30 | _prepared: 是否已经调用过准备函数
31 | _rawmsg: 原始消息对象
32 |
33 | """
34 |
35 |
36 | class ChatMessage(object):
37 | msg_id = None
38 | create_time = None
39 |
40 | ctype = None
41 | content = None
42 |
43 | from_user_id = None
44 | from_user_nickname = None
45 | to_user_id = None
46 | to_user_nickname = None
47 | other_user_id = None
48 | other_user_nickname = None
49 | my_msg = False
50 | self_display_name = None
51 |
52 | is_group = False
53 | is_at = False
54 | actual_user_id = None
55 | actual_user_nickname = None
56 | at_list = None
57 |
58 | _prepare_fn = None
59 | _prepared = False
60 | _rawmsg = None
61 |
62 | def __init__(self, _rawmsg):
63 | self._rawmsg = _rawmsg
64 |
65 | def prepare(self):
66 | if self._prepare_fn and not self._prepared:
67 | self._prepared = True
68 | self._prepare_fn()
69 |
70 | def __str__(self):
71 | return "ChatMessage: id={}, create_time={}, ctype={}, content={}, from_user_id={}, from_user_nickname={}, to_user_id={}, to_user_nickname={}, other_user_id={}, other_user_nickname={}, is_group={}, is_at={}, actual_user_id={}, actual_user_nickname={}, at_list={}".format(
72 | self.msg_id,
73 | self.create_time,
74 | self.ctype,
75 | self.content,
76 | self.from_user_id,
77 | self.from_user_nickname,
78 | self.to_user_id,
79 | self.to_user_nickname,
80 | self.other_user_id,
81 | self.other_user_nickname,
82 | self.is_group,
83 | self.is_at,
84 | self.actual_user_id,
85 | self.actual_user_nickname,
86 | self.at_list
87 | )
88 |
--------------------------------------------------------------------------------
/bot/ali/ali_qwen_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g.
6 | [
7 | {"role": "system", "content": "You are a helpful assistant."},
8 | {"role": "user", "content": "Who won the world series in 2020?"},
9 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
10 | {"role": "user", "content": "Where was it played?"}
11 | ]
12 | """
13 |
14 | class AliQwenSession(Session):
15 | def __init__(self, session_id, system_prompt=None, model="qianwen"):
16 | super().__init__(session_id, system_prompt)
17 | self.model = model
18 | self.reset()
19 |
20 | def discard_exceeding(self, max_tokens, cur_tokens=None):
21 | precise = True
22 | try:
23 | cur_tokens = self.calc_tokens()
24 | except Exception as e:
25 | precise = False
26 | if cur_tokens is None:
27 | raise e
28 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
29 | while cur_tokens > max_tokens:
30 | if len(self.messages) > 2:
31 | self.messages.pop(1)
32 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
33 | self.messages.pop(1)
34 | if precise:
35 | cur_tokens = self.calc_tokens()
36 | else:
37 | cur_tokens = cur_tokens - max_tokens
38 | break
39 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
40 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
41 | break
42 | else:
43 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
44 | break
45 | if precise:
46 | cur_tokens = self.calc_tokens()
47 | else:
48 | cur_tokens = cur_tokens - max_tokens
49 | return cur_tokens
50 |
51 | def calc_tokens(self):
52 | return num_tokens_from_messages(self.messages, self.model)
53 |
54 | def num_tokens_from_messages(messages, model):
55 | """Returns the number of tokens used by a list of messages."""
56 | # 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
57 | # 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
58 | # 目前根据字符串长度粗略估计token数,不影响正常使用
59 | tokens = 0
60 | for msg in messages:
61 | tokens += len(msg["content"])
62 | return tokens
63 |
--------------------------------------------------------------------------------
/bot/bot_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | channel factory
3 | """
4 | from common import const
5 |
6 |
7 | def create_bot(bot_type):
8 | """
9 | create a bot_type instance
10 | :param bot_type: bot type code
11 | :return: bot instance
12 | """
13 | if bot_type == const.BAIDU:
14 | # 替换Baidu Unit为Baidu文心千帆对话接口
15 | # from bot.baidu.baidu_unit_bot import BaiduUnitBot
16 | # return BaiduUnitBot()
17 | from bot.baidu.baidu_wenxin import BaiduWenxinBot
18 | return BaiduWenxinBot()
19 |
20 | elif bot_type == const.CHATGPT:
21 | # ChatGPT 网页端web接口
22 | from bot.chatgpt.chat_gpt_bot import ChatGPTBot
23 | return ChatGPTBot()
24 |
25 | elif bot_type == const.OPEN_AI:
26 | # OpenAI 官方对话模型API
27 | from bot.openai.open_ai_bot import OpenAIBot
28 | return OpenAIBot()
29 |
30 | elif bot_type == const.CHATGPTONAZURE:
31 | # Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
32 | from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
33 | return AzureChatGPTBot()
34 |
35 | elif bot_type == const.XUNFEI:
36 | from bot.xunfei.xunfei_spark_bot import XunFeiBot
37 | return XunFeiBot()
38 |
39 | elif bot_type == const.LINKAI:
40 | from bot.linkai.link_ai_bot import LinkAIBot
41 | return LinkAIBot()
42 |
43 | elif bot_type == const.CLAUDEAI:
44 | from bot.claude.claude_ai_bot import ClaudeAIBot
45 | return ClaudeAIBot()
46 | elif bot_type == const.CLAUDEAPI:
47 | from bot.claudeapi.claude_api_bot import ClaudeAPIBot
48 | return ClaudeAPIBot()
49 | elif bot_type == const.QWEN:
50 | from bot.ali.ali_qwen_bot import AliQwenBot
51 | return AliQwenBot()
52 | elif bot_type == const.QWEN_DASHSCOPE:
53 | from bot.dashscope.dashscope_bot import DashscopeBot
54 | return DashscopeBot()
55 | elif bot_type == const.GEMINI:
56 | from bot.gemini.google_gemini_bot import GoogleGeminiBot
57 | return GoogleGeminiBot()
58 |
59 | elif bot_type == const.ZHIPU_AI:
60 | from bot.zhipuai.zhipuai_bot import ZHIPUAIBot
61 | return ZHIPUAIBot()
62 |
63 | elif bot_type == const.MOONSHOT:
64 | from bot.moonshot.moonshot_bot import MoonshotBot
65 | return MoonshotBot()
66 |
67 | elif bot_type == const.MiniMax:
68 | from bot.minimax.minimax_bot import MinimaxBot
69 | return MinimaxBot()
70 |
71 | elif bot_type == const.MODELSCOPE:
72 | from bot.modelscope.modelscope_bot import ModelScopeBot
73 | return ModelScopeBot()
74 |
75 |
76 | raise RuntimeError
77 |
--------------------------------------------------------------------------------
/voice/pytts/pytts_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | pytts voice service (offline)
3 | """
4 |
5 | import os
6 | import sys
7 | import time
8 |
9 | import pyttsx3
10 |
11 | from bridge.reply import Reply, ReplyType
12 | from common.log import logger
13 | from common.tmp_dir import TmpDir
14 | from voice.voice import Voice
15 |
16 |
17 | class PyttsVoice(Voice):
18 | engine = pyttsx3.init()
19 |
20 | def __init__(self):
21 | # 语速
22 | self.engine.setProperty("rate", 125)
23 | # 音量
24 | self.engine.setProperty("volume", 1.0)
25 | if sys.platform == "win32":
26 | for voice in self.engine.getProperty("voices"):
27 | if "Chinese" in voice.name:
28 | self.engine.setProperty("voice", voice.id)
29 | else:
30 | self.engine.setProperty("voice", "zh")
31 | # If the problem of espeak is fixed, using runAndWait() and remove this startLoop()
32 | # TODO: check if this is work on win32
33 | self.engine.startLoop(useDriverLoop=False)
34 |
35 | def textToVoice(self, text):
36 | try:
37 | # Avoid the same filename under multithreading
38 | wavFileName = "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
39 | wavFile = TmpDir().path() + wavFileName
40 | logger.info("[Pytts] textToVoice text={} voice file name={}".format(text, wavFile))
41 |
42 | self.engine.save_to_file(text, wavFile)
43 |
44 | if sys.platform == "win32":
45 | self.engine.runAndWait()
46 | else:
47 | # In ubuntu, runAndWait do not really wait until the file created.
48 | # It will return once the task queue is empty, but the task is still running in coroutine.
49 | # And if you call runAndWait() and time.sleep() twice, it will stuck, so do not use this.
50 | # If you want to fix this, add self._proxy.setBusy(True) in line 127 in espeak.py, at the beginning of the function save_to_file.
51 | # self.engine.runAndWait()
52 |
53 | # Before espeak fix this problem, we iterate the generator and control the waiting by ourself.
54 | # But this is not the canonical way to use it, for example if the file already exists it also cannot wait.
55 | self.engine.iterate()
56 | while self.engine.isBusy() or wavFileName not in os.listdir(TmpDir().path()):
57 | time.sleep(0.1)
58 |
59 | reply = Reply(ReplyType.VOICE, wavFile)
60 |
61 | except Exception as e:
62 | reply = Reply(ReplyType.ERROR, str(e))
63 | finally:
64 | return reply
65 |
--------------------------------------------------------------------------------
/channel/feishu/feishu_message.py:
--------------------------------------------------------------------------------
1 | from bridge.context import ContextType
2 | from channel.chat_message import ChatMessage
3 | import json
4 | import requests
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 | from common import utils
8 |
9 |
10 | class FeishuMessage(ChatMessage):
11 | def __init__(self, event: dict, is_group=False, access_token=None):
12 | super().__init__(event)
13 | msg = event.get("message")
14 | sender = event.get("sender")
15 | self.access_token = access_token
16 | self.msg_id = msg.get("message_id")
17 | self.create_time = msg.get("create_time")
18 | self.is_group = is_group
19 | msg_type = msg.get("message_type")
20 |
21 | if msg_type == "text":
22 | self.ctype = ContextType.TEXT
23 | content = json.loads(msg.get('content'))
24 | self.content = content.get("text").strip()
25 | elif msg_type == "file":
26 | self.ctype = ContextType.FILE
27 | content = json.loads(msg.get("content"))
28 | file_key = content.get("file_key")
29 | file_name = content.get("file_name")
30 |
31 | self.content = TmpDir().path() + file_key + "." + utils.get_path_suffix(file_name)
32 |
33 | def _download_file():
34 | # 如果响应状态码是200,则将响应内容写入本地文件
35 | url = f"https://open.feishu.cn/open-apis/im/v1/messages/{self.msg_id}/resources/{file_key}"
36 | headers = {
37 | "Authorization": "Bearer " + access_token,
38 | }
39 | params = {
40 | "type": "file"
41 | }
42 | response = requests.get(url=url, headers=headers, params=params)
43 | if response.status_code == 200:
44 | with open(self.content, "wb") as f:
45 | f.write(response.content)
46 | else:
47 | logger.info(f"[FeiShu] Failed to download file, key={file_key}, res={response.text}")
48 | self._prepare_fn = _download_file
49 | else:
50 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg_type))
51 |
52 | self.from_user_id = sender.get("sender_id").get("open_id")
53 | self.to_user_id = event.get("app_id")
54 | if is_group:
55 | # 群聊
56 | self.other_user_id = msg.get("chat_id")
57 | self.actual_user_id = self.from_user_id
58 | self.content = self.content.replace("@_user_1", "").strip()
59 | self.actual_user_nickname = ""
60 | else:
61 | # 私聊
62 | self.other_user_id = self.from_user_id
63 | self.actual_user_id = self.from_user_id
64 |
--------------------------------------------------------------------------------
/voice/openai/openai_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 | import json
5 |
6 | import openai
7 |
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from config import conf
11 | from voice.voice import Voice
12 | import requests
13 | from common import const
14 | import datetime, random
15 |
16 | class OpenaiVoice(Voice):
17 | def __init__(self):
18 | openai.api_key = conf().get("open_ai_api_key")
19 |
20 | def voiceToText(self, voice_file):
21 | logger.debug("[Openai] voice file name={}".format(voice_file))
22 | try:
23 | file = open(voice_file, "rb")
24 | api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
25 | url = f'{api_base}/audio/transcriptions'
26 | headers = {
27 | 'Authorization': 'Bearer ' + conf().get("open_ai_api_key"),
28 | # 'Content-Type': 'multipart/form-data' # 加了会报错,不知道什么原因
29 | }
30 | files = {
31 | "file": file,
32 | }
33 | data = {
34 | "model": "whisper-1",
35 | }
36 | response = requests.post(url, headers=headers, files=files, data=data)
37 | response_data = response.json()
38 | text = response_data['text']
39 | reply = Reply(ReplyType.TEXT, text)
40 | logger.info("[Openai] voiceToText text={} voice file name={}".format(text, voice_file))
41 | except Exception as e:
42 | reply = Reply(ReplyType.ERROR, "我暂时还无法听清您的语音,请稍后再试吧~")
43 | finally:
44 | return reply
45 |
46 |
47 | def textToVoice(self, text):
48 | try:
49 | api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
50 | url = f'{api_base}/audio/speech'
51 | headers = {
52 | 'Authorization': 'Bearer ' + conf().get("open_ai_api_key"),
53 | 'Content-Type': 'application/json'
54 | }
55 | data = {
56 | 'model': conf().get("text_to_voice_model") or const.TTS_1,
57 | 'input': text,
58 | 'voice': conf().get("tts_voice_id") or "alloy"
59 | }
60 | response = requests.post(url, headers=headers, json=data)
61 | file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
62 | logger.debug(f"[OPENAI] text_to_Voice file_name={file_name}, input={text}")
63 | with open(file_name, 'wb') as f:
64 | f.write(response.content)
65 | logger.info(f"[OPENAI] text_to_Voice success")
66 | reply = Reply(ReplyType.VOICE, file_name)
67 | except Exception as e:
68 | logger.error(e)
69 | reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧")
70 | return reply
71 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class OpenAISession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="text-davinci-003"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 |
11 | def __str__(self):
12 | # 构造对话模型的输入
13 | """
14 | e.g. Q: xxx
15 | A: xxx
16 | Q: xxx
17 | """
18 | prompt = ""
19 | for item in self.messages:
20 | if item["role"] == "system":
21 | prompt += item["content"] + "<|endoftext|>\n\n\n"
22 | elif item["role"] == "user":
23 | prompt += "Q: " + item["content"] + "\n"
24 | elif item["role"] == "assistant":
25 | prompt += "\n\nA: " + item["content"] + "<|endoftext|>\n"
26 |
27 | if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
28 | prompt += "A: "
29 | return prompt
30 |
31 | def discard_exceeding(self, max_tokens, cur_tokens=None):
32 | precise = True
33 | try:
34 | cur_tokens = self.calc_tokens()
35 | except Exception as e:
36 | precise = False
37 | if cur_tokens is None:
38 | raise e
39 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
40 | while cur_tokens > max_tokens:
41 | if len(self.messages) > 1:
42 | self.messages.pop(0)
43 | elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant":
44 | self.messages.pop(0)
45 | if precise:
46 | cur_tokens = self.calc_tokens()
47 | else:
48 | cur_tokens = len(str(self))
49 | break
50 | elif len(self.messages) == 1 and self.messages[0]["role"] == "user":
51 | logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens))
52 | break
53 | else:
54 | logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages)))
55 | break
56 | if precise:
57 | cur_tokens = self.calc_tokens()
58 | else:
59 | cur_tokens = len(str(self))
60 | return cur_tokens
61 |
62 | def calc_tokens(self):
63 | return num_tokens_from_string(str(self), self.model)
64 |
65 |
66 | # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
67 | def num_tokens_from_string(string: str, model: str) -> int:
68 | """Returns the number of tokens in a text string."""
69 | import tiktoken
70 |
71 | encoding = tiktoken.encoding_for_model(model)
72 | num_tokens = len(encoding.encode(string, disallowed_special=()))
73 | return num_tokens
74 |
--------------------------------------------------------------------------------
/bot/minimax/minimax_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g.
6 | [
7 | {"role": "system", "content": "You are a helpful assistant."},
8 | {"role": "user", "content": "Who won the world series in 2020?"},
9 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
10 | {"role": "user", "content": "Where was it played?"}
11 | ]
12 | """
13 |
14 |
15 | class MinimaxSession(Session):
16 | def __init__(self, session_id, system_prompt=None, model="minimax"):
17 | super().__init__(session_id, system_prompt)
18 | self.model = model
19 | # self.reset()
20 |
21 | def add_query(self, query):
22 | user_item = {"sender_type": "USER", "sender_name": self.session_id, "text": query}
23 | self.messages.append(user_item)
24 |
25 | def add_reply(self, reply):
26 | assistant_item = {"sender_type": "BOT", "sender_name": "MM智能助理", "text": reply}
27 | self.messages.append(assistant_item)
28 |
29 | def discard_exceeding(self, max_tokens, cur_tokens=None):
30 | precise = True
31 | try:
32 | cur_tokens = self.calc_tokens()
33 | except Exception as e:
34 | precise = False
35 | if cur_tokens is None:
36 | raise e
37 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
38 | while cur_tokens > max_tokens:
39 | if len(self.messages) > 2:
40 | self.messages.pop(1)
41 | elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "BOT":
42 | self.messages.pop(1)
43 | if precise:
44 | cur_tokens = self.calc_tokens()
45 | else:
46 | cur_tokens = cur_tokens - max_tokens
47 | break
48 | elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "USER":
49 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
50 | break
51 | else:
52 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
53 | break
54 | if precise:
55 | cur_tokens = self.calc_tokens()
56 | else:
57 | cur_tokens = cur_tokens - max_tokens
58 | return cur_tokens
59 |
60 | def calc_tokens(self):
61 | return num_tokens_from_messages(self.messages, self.model)
62 |
63 |
64 | def num_tokens_from_messages(messages, model):
65 | """Returns the number of tokens used by a list of messages."""
66 | # 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
67 | # 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
68 | # 目前根据字符串长度粗略估计token数,不影响正常使用
69 | tokens = 0
70 | for msg in messages:
71 | tokens += len(msg["text"])
72 | return tokens
73 |
--------------------------------------------------------------------------------
/channel/terminal/terminal_channel.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from bridge.context import *
4 | from bridge.reply import Reply, ReplyType
5 | from channel.chat_channel import ChatChannel, check_prefix
6 | from channel.chat_message import ChatMessage
7 | from common.log import logger
8 | from config import conf
9 |
10 |
11 | class TerminalMessage(ChatMessage):
12 | def __init__(
13 | self,
14 | msg_id,
15 | content,
16 | ctype=ContextType.TEXT,
17 | from_user_id="User",
18 | to_user_id="Chatgpt",
19 | other_user_id="Chatgpt",
20 | ):
21 | self.msg_id = msg_id
22 | self.ctype = ctype
23 | self.content = content
24 | self.from_user_id = from_user_id
25 | self.to_user_id = to_user_id
26 | self.other_user_id = other_user_id
27 |
28 |
29 | class TerminalChannel(ChatChannel):
30 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE]
31 |
32 | def send(self, reply: Reply, context: Context):
33 | print("\nBot:")
34 | if reply.type == ReplyType.IMAGE:
35 | from PIL import Image
36 |
37 | image_storage = reply.content
38 | image_storage.seek(0)
39 | img = Image.open(image_storage)
40 | print("")
41 | img.show()
42 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
43 | import io
44 |
45 | import requests
46 | from PIL import Image
47 |
48 | img_url = reply.content
49 | pic_res = requests.get(img_url, stream=True)
50 | image_storage = io.BytesIO()
51 | for block in pic_res.iter_content(1024):
52 | image_storage.write(block)
53 | image_storage.seek(0)
54 | img = Image.open(image_storage)
55 | print(img_url)
56 | img.show()
57 | else:
58 | print(reply.content)
59 | print("\nUser:", end="")
60 | sys.stdout.flush()
61 | return
62 |
63 | def startup(self):
64 | context = Context()
65 | logger.setLevel("WARN")
66 | print("\nPlease input your question:\nUser:", end="")
67 | sys.stdout.flush()
68 | msg_id = 0
69 | while True:
70 | try:
71 | prompt = self.get_input()
72 | except KeyboardInterrupt:
73 | print("\nExiting...")
74 | sys.exit()
75 | msg_id += 1
76 | trigger_prefixs = conf().get("single_chat_prefix", [""])
77 | if check_prefix(prompt, trigger_prefixs) is None:
78 | prompt = trigger_prefixs[0] + prompt # 给没触发的消息加上触发前缀
79 |
80 | context = self._compose_context(ContextType.TEXT, prompt, msg=TerminalMessage(msg_id, prompt))
81 | context["isgroup"] = False
82 | if context:
83 | self.produce(context)
84 | else:
85 | raise Exception("context is None")
86 |
87 | def get_input(self):
88 | """
89 | Multi-line input function
90 | """
91 | sys.stdout.flush()
92 | line = input()
93 | return line
94 |
--------------------------------------------------------------------------------
/channel/dingtalk/dingtalk_message.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import requests
4 | from dingtalk_stream import ChatbotMessage
5 |
6 | from bridge.context import ContextType
7 | from channel.chat_message import ChatMessage
8 | # -*- coding=utf-8 -*-
9 | from common.log import logger
10 | from common.tmp_dir import TmpDir
11 |
12 |
13 | class DingTalkMessage(ChatMessage):
14 | def __init__(self, event: ChatbotMessage, image_download_handler):
15 | super().__init__(event)
16 | self.image_download_handler = image_download_handler
17 | self.msg_id = event.message_id
18 | self.message_type = event.message_type
19 | self.incoming_message = event
20 | self.sender_staff_id = event.sender_staff_id
21 | self.other_user_id = event.conversation_id
22 | self.create_time = event.create_at
23 | self.image_content = event.image_content
24 | self.rich_text_content = event.rich_text_content
25 | if event.conversation_type == "1":
26 | self.is_group = False
27 | else:
28 | self.is_group = True
29 |
30 | if self.message_type == "text":
31 | self.ctype = ContextType.TEXT
32 |
33 | self.content = event.text.content.strip()
34 | elif self.message_type == "audio":
35 | # 钉钉支持直接识别语音,所以此处将直接提取文字,当文字处理
36 | self.content = event.extensions['content']['recognition'].strip()
37 | self.ctype = ContextType.TEXT
38 | elif (self.message_type == 'picture') or (self.message_type == 'richText'):
39 | self.ctype = ContextType.IMAGE
40 | # 钉钉图片类型或富文本类型消息处理
41 | image_list = event.get_image_list()
42 | if len(image_list) > 0:
43 | download_code = image_list[0]
44 | download_url = image_download_handler.get_image_download_url(download_code)
45 | self.content = download_image_file(download_url, TmpDir().path())
46 | else:
47 | logger.debug(f"[Dingtalk] messageType :{self.message_type} , imageList isEmpty")
48 |
49 | if self.is_group:
50 | self.from_user_id = event.conversation_id
51 | self.actual_user_id = event.sender_id
52 | self.is_at = True
53 | else:
54 | self.from_user_id = event.sender_id
55 | self.actual_user_id = event.sender_id
56 | self.to_user_id = event.chatbot_user_id
57 | self.other_user_nickname = event.conversation_title
58 |
59 |
60 | def download_image_file(image_url, temp_dir):
61 | headers = {
62 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
63 | }
64 | # 设置代理
65 | # self.proxies
66 | # , proxies=self.proxies
67 | response = requests.get(image_url, headers=headers, stream=True, timeout=60 * 5)
68 | if response.status_code == 200:
69 |
70 | # 生成文件名
71 | file_name = image_url.split("/")[-1].split("?")[0]
72 |
73 | # 检查临时目录是否存在,如果不存在则创建
74 | if not os.path.exists(temp_dir):
75 | os.makedirs(temp_dir)
76 |
77 | # 将文件保存到临时目录
78 | file_path = os.path.join(temp_dir, file_name)
79 | with open(file_path, 'wb') as file:
80 | file.write(response.content)
81 | return file_path
82 | else:
83 | logger.info(f"[Dingtalk] Failed to download image file, {response.content}")
84 | return None
85 |
--------------------------------------------------------------------------------
/channel/wechat/wechaty_message.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 |
4 | from wechaty import MessageType
5 | from wechaty.user import Message
6 |
7 | from bridge.context import ContextType
8 | from channel.chat_message import ChatMessage
9 | from common.log import logger
10 | from common.tmp_dir import TmpDir
11 |
12 |
13 | class aobject(object):
14 | """Inheriting this class allows you to define an async __init__.
15 |
16 | So you can create objects by doing something like `await MyClass(params)`
17 | """
18 |
19 | async def __new__(cls, *a, **kw):
20 | instance = super().__new__(cls)
21 | await instance.__init__(*a, **kw)
22 | return instance
23 |
24 | async def __init__(self):
25 | pass
26 |
27 |
28 | class WechatyMessage(ChatMessage, aobject):
29 | async def __init__(self, wechaty_msg: Message):
30 | super().__init__(wechaty_msg)
31 |
32 | room = wechaty_msg.room()
33 |
34 | self.msg_id = wechaty_msg.message_id
35 | self.create_time = wechaty_msg.payload.timestamp
36 | self.is_group = room is not None
37 |
38 | if wechaty_msg.type() == MessageType.MESSAGE_TYPE_TEXT:
39 | self.ctype = ContextType.TEXT
40 | self.content = wechaty_msg.text()
41 | elif wechaty_msg.type() == MessageType.MESSAGE_TYPE_AUDIO:
42 | self.ctype = ContextType.VOICE
43 | voice_file = await wechaty_msg.to_file_box()
44 | self.content = TmpDir().path() + voice_file.name # content直接存临时目录路径
45 |
46 | def func():
47 | loop = asyncio.get_event_loop()
48 | asyncio.run_coroutine_threadsafe(voice_file.to_file(self.content), loop).result()
49 |
50 | self._prepare_fn = func
51 |
52 | else:
53 | raise NotImplementedError("Unsupported message type: {}".format(wechaty_msg.type()))
54 |
55 | from_contact = wechaty_msg.talker() # 获取消息的发送者
56 | self.from_user_id = from_contact.contact_id
57 | self.from_user_nickname = from_contact.name
58 |
59 | # group中的from和to,wechaty跟itchat含义不一样
60 | # wecahty: from是消息实际发送者, to:所在群
61 | # itchat: 如果是你发送群消息,from和to是你自己和所在群,如果是别人发群消息,from和to是所在群和你自己
62 | # 但这个差别不影响逻辑,group中只使用到:1.用from来判断是否是自己发的,2.actual_user_id来判断实际发送用户
63 |
64 | if self.is_group:
65 | self.to_user_id = room.room_id
66 | self.to_user_nickname = await room.topic()
67 | else:
68 | to_contact = wechaty_msg.to()
69 | self.to_user_id = to_contact.contact_id
70 | self.to_user_nickname = to_contact.name
71 |
72 | if self.is_group or wechaty_msg.is_self(): # 如果是群消息,other_user设置为群,如果是私聊消息,而且自己发的,就设置成对方。
73 | self.other_user_id = self.to_user_id
74 | self.other_user_nickname = self.to_user_nickname
75 | else:
76 | self.other_user_id = self.from_user_id
77 | self.other_user_nickname = self.from_user_nickname
78 |
79 | if self.is_group: # wechaty群聊中,实际发送用户就是from_user
80 | self.is_at = await wechaty_msg.mention_self()
81 | if not self.is_at: # 有时候复制粘贴的消息,不算做@,但是内容里面会有@xxx,这里做一下兼容
82 | name = wechaty_msg.wechaty.user_self().name
83 | pattern = f"@{re.escape(name)}(\u2005|\u0020)"
84 | if re.search(pattern, self.content):
85 | logger.debug(f"wechaty message {self.msg_id} include at")
86 | self.is_at = True
87 |
88 | self.actual_user_id = self.from_user_id
89 | self.actual_user_nickname = self.from_user_nickname
90 |
--------------------------------------------------------------------------------
/channel/wechatmp/active_reply.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import web
4 | from wechatpy import parse_message
5 | from wechatpy.replies import create_reply
6 |
7 | from bridge.context import *
8 | from bridge.reply import *
9 | from channel.wechatmp.common import *
10 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
11 | from channel.wechatmp.wechatmp_message import WeChatMPMessage
12 | from common.log import logger
13 | from config import conf, subscribe_msg
14 |
15 |
16 | # This class is instantiated once per query
17 | class Query:
18 | def GET(self):
19 | return verify_server(web.input())
20 |
21 | def POST(self):
22 | # Make sure to return the instance that first created, @singleton will do that.
23 | try:
24 | args = web.input()
25 | verify_server(args)
26 | channel = WechatMPChannel()
27 | message = web.data()
28 | encrypt_func = lambda x: x
29 | if args.get("encrypt_type") == "aes":
30 | logger.debug("[wechatmp] Receive encrypted post data:\n" + message.decode("utf-8"))
31 | if not channel.crypto:
32 | raise Exception("Crypto not initialized, Please set wechatmp_aes_key in config.json")
33 | message = channel.crypto.decrypt_message(message, args.msg_signature, args.timestamp, args.nonce)
34 | encrypt_func = lambda x: channel.crypto.encrypt_message(x, args.nonce, args.timestamp)
35 | else:
36 | logger.debug("[wechatmp] Receive post data:\n" + message.decode("utf-8"))
37 | msg = parse_message(message)
38 | if msg.type in ["text", "voice", "image"]:
39 | wechatmp_msg = WeChatMPMessage(msg, client=channel.client)
40 | from_user = wechatmp_msg.from_user_id
41 | content = wechatmp_msg.content
42 | message_id = wechatmp_msg.msg_id
43 |
44 | logger.info(
45 | "[wechatmp] {}:{} Receive post query {} {}: {}".format(
46 | web.ctx.env.get("REMOTE_ADDR"),
47 | web.ctx.env.get("REMOTE_PORT"),
48 | from_user,
49 | message_id,
50 | content,
51 | )
52 | )
53 | if msg.type == "voice" and wechatmp_msg.ctype == ContextType.TEXT and conf().get("voice_reply_voice", False):
54 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, desire_rtype=ReplyType.VOICE, msg=wechatmp_msg)
55 | else:
56 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg)
57 | if context:
58 | channel.produce(context)
59 | # The reply will be sent by channel.send() in another thread
60 | return "success"
61 | elif msg.type == "event":
62 | logger.info("[wechatmp] Event {} from {}".format(msg.event, msg.source))
63 | if msg.event in ["subscribe", "subscribe_scan"]:
64 | reply_text = subscribe_msg()
65 | if reply_text:
66 | replyPost = create_reply(reply_text, msg)
67 | return encrypt_func(replyPost.render())
68 | else:
69 | return "success"
70 | else:
71 | logger.info("暂且不处理")
72 | return "success"
73 | except Exception as exc:
74 | logger.exception(exc)
75 | return exc
76 |
--------------------------------------------------------------------------------
/lib/itchat/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import Core
2 | from .config import VERSION, ASYNC_COMPONENTS
3 | from .log import set_logging
4 |
5 | if ASYNC_COMPONENTS:
6 | from .async_components import load_components
7 | else:
8 | from .components import load_components
9 |
10 |
11 | __version__ = VERSION
12 |
13 |
14 | instanceList = []
15 |
16 | def load_async_itchat() -> Core:
17 | """load async-based itchat instance
18 |
19 | Returns:
20 | Core: the abstract interface of itchat
21 | """
22 | from .async_components import load_components
23 | load_components(Core)
24 | return Core()
25 |
26 |
27 | def load_sync_itchat() -> Core:
28 | """load sync-based itchat instance
29 |
30 | Returns:
31 | Core: the abstract interface of itchat
32 | """
33 | from .components import load_components
34 | load_components(Core)
35 | return Core()
36 |
37 |
38 | if ASYNC_COMPONENTS:
39 | instance = load_async_itchat()
40 | else:
41 | instance = load_sync_itchat()
42 |
43 |
44 | instanceList = [instance]
45 |
46 | # I really want to use sys.modules[__name__] = originInstance
47 | # but it makes auto-fill a real mess, so forgive me for my following **
48 | # actually it toke me less than 30 seconds, god bless Uganda
49 |
50 | # components.login
51 | login = instance.login
52 | get_QRuuid = instance.get_QRuuid
53 | get_QR = instance.get_QR
54 | check_login = instance.check_login
55 | web_init = instance.web_init
56 | show_mobile_login = instance.show_mobile_login
57 | start_receiving = instance.start_receiving
58 | get_msg = instance.get_msg
59 | logout = instance.logout
60 | # components.contact
61 | update_chatroom = instance.update_chatroom
62 | update_friend = instance.update_friend
63 | get_contact = instance.get_contact
64 | get_friends = instance.get_friends
65 | get_chatrooms = instance.get_chatrooms
66 | get_mps = instance.get_mps
67 | set_alias = instance.set_alias
68 | set_pinned = instance.set_pinned
69 | accept_friend = instance.accept_friend
70 | get_head_img = instance.get_head_img
71 | create_chatroom = instance.create_chatroom
72 | set_chatroom_name = instance.set_chatroom_name
73 | delete_member_from_chatroom = instance.delete_member_from_chatroom
74 | add_member_into_chatroom = instance.add_member_into_chatroom
75 | # components.messages
76 | send_raw_msg = instance.send_raw_msg
77 | send_msg = instance.send_msg
78 | upload_file = instance.upload_file
79 | send_file = instance.send_file
80 | send_image = instance.send_image
81 | send_video = instance.send_video
82 | send = instance.send
83 | revoke = instance.revoke
84 | # components.hotreload
85 | dump_login_status = instance.dump_login_status
86 | load_login_status = instance.load_login_status
87 | # components.register
88 | auto_login = instance.auto_login
89 | configured_reply = instance.configured_reply
90 | msg_register = instance.msg_register
91 | run = instance.run
92 | # other functions
93 | search_friends = instance.search_friends
94 | search_chatrooms = instance.search_chatrooms
95 | search_mps = instance.search_mps
96 | set_logging = set_logging
97 |
--------------------------------------------------------------------------------
/bot/session_manager.py:
--------------------------------------------------------------------------------
1 | from common.expired_dict import ExpiredDict
2 | from common.log import logger
3 | from config import conf
4 |
5 |
6 | class Session(object):
7 | def __init__(self, session_id, system_prompt=None):
8 | self.session_id = session_id
9 | self.messages = []
10 | if system_prompt is None:
11 | self.system_prompt = conf().get("character_desc", "")
12 | else:
13 | self.system_prompt = system_prompt
14 |
15 | # 重置会话
16 | def reset(self):
17 | system_item = {"role": "system", "content": self.system_prompt}
18 | self.messages = [system_item]
19 |
20 | def set_system_prompt(self, system_prompt):
21 | self.system_prompt = system_prompt
22 | self.reset()
23 |
24 | def add_query(self, query):
25 | user_item = {"role": "user", "content": query}
26 | self.messages.append(user_item)
27 |
28 | def add_reply(self, reply):
29 | assistant_item = {"role": "assistant", "content": reply}
30 | self.messages.append(assistant_item)
31 |
32 | def discard_exceeding(self, max_tokens=None, cur_tokens=None):
33 | raise NotImplementedError
34 |
35 | def calc_tokens(self):
36 | raise NotImplementedError
37 |
38 |
39 | class SessionManager(object):
40 | def __init__(self, sessioncls, **session_args):
41 | if conf().get("expires_in_seconds"):
42 | sessions = ExpiredDict(conf().get("expires_in_seconds"))
43 | else:
44 | sessions = dict()
45 | self.sessions = sessions
46 | self.sessioncls = sessioncls
47 | self.session_args = session_args
48 |
49 | def build_session(self, session_id, system_prompt=None):
50 | """
51 | 如果session_id不在sessions中,创建一个新的session并添加到sessions中
52 | 如果system_prompt不会空,会更新session的system_prompt并重置session
53 | """
54 | if session_id is None:
55 | return self.sessioncls(session_id, system_prompt, **self.session_args)
56 |
57 | if session_id not in self.sessions:
58 | self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)
59 | elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
60 | self.sessions[session_id].set_system_prompt(system_prompt)
61 | session = self.sessions[session_id]
62 | return session
63 |
64 | def session_query(self, query, session_id):
65 | session = self.build_session(session_id)
66 | session.add_query(query)
67 | try:
68 | max_tokens = conf().get("conversation_max_tokens", 1000)
69 | total_tokens = session.discard_exceeding(max_tokens, None)
70 | logger.debug("prompt tokens used={}".format(total_tokens))
71 | except Exception as e:
72 | logger.warning("Exception when counting tokens precisely for prompt: {}".format(str(e)))
73 | return session
74 |
75 | def session_reply(self, reply, session_id, total_tokens=None):
76 | session = self.build_session(session_id)
77 | session.add_reply(reply)
78 | try:
79 | max_tokens = conf().get("conversation_max_tokens", 1000)
80 | tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
81 | logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt))
82 | except Exception as e:
83 | logger.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
84 | return session
85 |
86 | def clear_session(self, session_id):
87 | if session_id in self.sessions:
88 | del self.sessions[session_id]
89 |
90 | def clear_all_session(self):
91 | self.sessions.clear()
92 |
--------------------------------------------------------------------------------
/voice/baidu/baidu_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | baidu voice service
3 | """
4 | import json
5 | import os
6 | import time
7 |
8 | from aip import AipSpeech
9 |
10 | from bridge.reply import Reply, ReplyType
11 | from common.log import logger
12 | from common.tmp_dir import TmpDir
13 | from config import conf
14 | from voice.audio_convert import get_pcm_from_wav
15 | from voice.voice import Voice
16 |
17 | """
18 | 百度的语音识别API.
19 | dev_pid:
20 | - 1936: 普通话远场
21 | - 1536:普通话(支持简单的英文识别)
22 | - 1537:普通话(纯中文识别)
23 | - 1737:英语
24 | - 1637:粤语
25 | - 1837:四川话
26 | 要使用本模块, 首先到 yuyin.baidu.com 注册一个开发者账号,
27 | 之后创建一个新应用, 然后在应用管理的"查看key"中获得 API Key 和 Secret Key
28 | 然后在 config.json 中填入这两个值, 以及 app_id, dev_pid
29 | """
30 |
31 |
32 | class BaiduVoice(Voice):
33 | def __init__(self):
34 | try:
35 | curdir = os.path.dirname(__file__)
36 | config_path = os.path.join(curdir, "config.json")
37 | bconf = None
38 | if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
39 | bconf = {"lang": "zh", "ctp": 1, "spd": 5, "pit": 5, "vol": 5, "per": 0}
40 | with open(config_path, "w") as fw:
41 | json.dump(bconf, fw, indent=4)
42 | else:
43 | with open(config_path, "r") as fr:
44 | bconf = json.load(fr)
45 |
46 | self.app_id = str(conf().get("baidu_app_id"))
47 | self.api_key = str(conf().get("baidu_api_key"))
48 | self.secret_key = str(conf().get("baidu_secret_key"))
49 | self.dev_id = conf().get("baidu_dev_pid")
50 | self.lang = bconf["lang"]
51 | self.ctp = bconf["ctp"]
52 | self.spd = bconf["spd"]
53 | self.pit = bconf["pit"]
54 | self.vol = bconf["vol"]
55 | self.per = bconf["per"]
56 |
57 | self.client = AipSpeech(self.app_id, self.api_key, self.secret_key)
58 | except Exception as e:
59 | logger.warn("BaiduVoice init failed: %s, ignore " % e)
60 |
61 | def voiceToText(self, voice_file):
62 | # 识别本地文件
63 | logger.debug("[Baidu] voice file name={}".format(voice_file))
64 | pcm = get_pcm_from_wav(voice_file)
65 | res = self.client.asr(pcm, "pcm", 16000, {"dev_pid": self.dev_id})
66 | if res["err_no"] == 0:
67 | logger.info("百度语音识别到了:{}".format(res["result"]))
68 | text = "".join(res["result"])
69 | reply = Reply(ReplyType.TEXT, text)
70 | else:
71 | logger.info("百度语音识别出错了: {}".format(res["err_msg"]))
72 | if res["err_msg"] == "request pv too much":
73 | logger.info(" 出现这个原因很可能是你的百度语音服务调用量超出限制,或未开通付费")
74 | reply = Reply(ReplyType.ERROR, "百度语音识别出错了;{0}".format(res["err_msg"]))
75 | return reply
76 |
77 | def textToVoice(self, text):
78 | result = self.client.synthesis(
79 | text,
80 | self.lang,
81 | self.ctp,
82 | {"spd": self.spd, "pit": self.pit, "vol": self.vol, "per": self.per},
83 | )
84 | if not isinstance(result, dict):
85 | # Avoid the same filename under multithreading
86 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
87 | with open(fileName, "wb") as f:
88 | f.write(result)
89 | logger.info("[Baidu] textToVoice text={} voice file name={}".format(text, fileName))
90 | reply = Reply(ReplyType.VOICE, fileName)
91 | else:
92 | logger.error("[Baidu] textToVoice error={}".format(result))
93 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
94 | return reply
95 |
--------------------------------------------------------------------------------
/voice/linkai/linkai_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 | import random
5 | import requests
6 | from voice import audio_convert
7 | from bridge.reply import Reply, ReplyType
8 | from common.log import logger
9 | from config import conf
10 | from voice.voice import Voice
11 | from common import const
12 | import os
13 | import datetime
14 |
15 | class LinkAIVoice(Voice):
16 | def __init__(self):
17 | pass
18 |
19 | def voiceToText(self, voice_file):
20 | logger.debug("[LinkVoice] voice file name={}".format(voice_file))
21 | try:
22 | url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/audio/transcriptions"
23 | headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
24 | model = None
25 | if not conf().get("text_to_voice") or conf().get("voice_to_text") == "openai":
26 | model = const.WHISPER_1
27 | if voice_file.endswith(".amr"):
28 | try:
29 | mp3_file = os.path.splitext(voice_file)[0] + ".mp3"
30 | audio_convert.any_to_mp3(voice_file, mp3_file)
31 | voice_file = mp3_file
32 | except Exception as e:
33 | logger.warn(f"[LinkVoice] amr file transfer failed, directly send amr voice file: {format(e)}")
34 | file = open(voice_file, "rb")
35 | file_body = {
36 | "file": file
37 | }
38 | data = {
39 | "model": model
40 | }
41 | res = requests.post(url, files=file_body, headers=headers, data=data, timeout=(5, 60))
42 | if res.status_code == 200:
43 | text = res.json().get("text")
44 | else:
45 | res_json = res.json()
46 | logger.error(f"[LinkVoice] voiceToText error, status_code={res.status_code}, msg={res_json.get('message')}")
47 | return None
48 | reply = Reply(ReplyType.TEXT, text)
49 | logger.info(f"[LinkVoice] voiceToText success, text={text}, file name={voice_file}")
50 | except Exception as e:
51 | logger.error(e)
52 | return None
53 | return reply
54 |
55 | def textToVoice(self, text):
56 | try:
57 | url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/audio/speech"
58 | headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
59 | model = const.TTS_1
60 | if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]:
61 | model = conf().get("text_to_voice_model") or const.TTS_1
62 | data = {
63 | "model": model,
64 | "input": text,
65 | "voice": conf().get("tts_voice_id"),
66 | "app_code": conf().get("linkai_app_code")
67 | }
68 | res = requests.post(url, headers=headers, json=data, timeout=(5, 120))
69 | if res.status_code == 200:
70 | tmp_file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
71 | with open(tmp_file_name, 'wb') as f:
72 | f.write(res.content)
73 | reply = Reply(ReplyType.VOICE, tmp_file_name)
74 | logger.info(f"[LinkVoice] textToVoice success, input={text}, model={model}, voice_id={data.get('voice')}")
75 | return reply
76 | else:
77 | res_json = res.json()
78 | logger.error(f"[LinkVoice] textToVoice error, status_code={res.status_code}, msg={res_json.get('message')}")
79 | return None
80 | except Exception as e:
81 | logger.error(e)
82 | # reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧")
83 | return None
84 |
--------------------------------------------------------------------------------
/voice/xunfei/xunfei_voice.py:
--------------------------------------------------------------------------------
1 | #####################################################################
2 | # xunfei voice service
3 | # Auth: njnuko
4 | # Email: njnuko@163.com
5 | #
6 | # 要使用本模块, 首先到 xfyun.cn 注册一个开发者账号,
7 | # 之后创建一个新应用, 然后在应用管理的语音识别或者语音合同右边可以查看APPID API Key 和 Secret Key
8 | # 然后在 config.json 中填入这三个值
9 | #
10 | # 配置说明:
11 | # {
12 | # "APPID":"xxx71xxx",
13 | # "APIKey":"xxxx69058exxxxxx", #讯飞xfyun.cn控制台语音合成或者听写界面的APIKey
14 | # "APISecret":"xxxx697f0xxxxxx", #讯飞xfyun.cn控制台语音合成或者听写界面的APIKey
15 | # "BusinessArgsTTS":{"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "vcn": "xiaoyan", "tte": "utf8"}, #语音合成的参数,具体可以参考xfyun.cn的文档
16 | # "BusinessArgsASR":{"domain": "iat", "language": "zh_cn", "accent": "mandarin", "vad_eos":10000, "dwa": "wpgs"} #语音听写的参数,具体可以参考xfyun.cn的文档
17 | # }
18 | #####################################################################
19 |
20 | import json
21 | import os
22 | import time
23 |
24 | from bridge.reply import Reply, ReplyType
25 | from common.log import logger
26 | from common.tmp_dir import TmpDir
27 | from config import conf
28 | from voice.voice import Voice
29 | from .xunfei_asr import xunfei_asr
30 | from .xunfei_tts import xunfei_tts
31 | from voice.audio_convert import any_to_mp3
32 | import shutil
33 | from pydub import AudioSegment
34 |
35 |
36 | class XunfeiVoice(Voice):
37 | def __init__(self):
38 | try:
39 | curdir = os.path.dirname(__file__)
40 | config_path = os.path.join(curdir, "config.json")
41 | conf = None
42 | with open(config_path, "r") as fr:
43 | conf = json.load(fr)
44 | print(conf)
45 | self.APPID = str(conf.get("APPID"))
46 | self.APIKey = str(conf.get("APIKey"))
47 | self.APISecret = str(conf.get("APISecret"))
48 | self.BusinessArgsTTS = conf.get("BusinessArgsTTS")
49 | self.BusinessArgsASR= conf.get("BusinessArgsASR")
50 |
51 | except Exception as e:
52 | logger.warn("XunfeiVoice init failed: %s, ignore " % e)
53 |
54 | def voiceToText(self, voice_file):
55 | # 识别本地文件
56 | try:
57 | logger.debug("[Xunfei] voice file name={}".format(voice_file))
58 | #print("voice_file===========",voice_file)
59 | #print("voice_file_type===========",type(voice_file))
60 | #mp3_name, file_extension = os.path.splitext(voice_file)
61 | #mp3_file = mp3_name + ".mp3"
62 | #pcm_data=get_pcm_from_wav(voice_file)
63 | #mp3_name, file_extension = os.path.splitext(voice_file)
64 | #AudioSegment.from_wav(voice_file).export(mp3_file, format="mp3")
65 | #shutil.copy2(voice_file, 'tmp/test1.wav')
66 | #shutil.copy2(mp3_file, 'tmp/test1.mp3')
67 | #print("voice and mp3 file",voice_file,mp3_file)
68 | text = xunfei_asr(self.APPID,self.APISecret,self.APIKey,self.BusinessArgsASR,voice_file)
69 | logger.info("讯飞语音识别到了: {}".format(text))
70 | reply = Reply(ReplyType.TEXT, text)
71 | except Exception as e:
72 | logger.warn("XunfeiVoice init failed: %s, ignore " % e)
73 | reply = Reply(ReplyType.ERROR, "讯飞语音识别出错了;{0}")
74 | return reply
75 |
76 | def textToVoice(self, text):
77 | try:
78 | # Avoid the same filename under multithreading
79 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
80 | return_file = xunfei_tts(self.APPID,self.APIKey,self.APISecret,self.BusinessArgsTTS,text,fileName)
81 | logger.info("[Xunfei] textToVoice text={} voice file name={}".format(text, fileName))
82 | reply = Reply(ReplyType.VOICE, fileName)
83 | except Exception as e:
84 | logger.error("[Xunfei] textToVoice error={}".format(fileName))
85 | reply = Reply(ReplyType.ERROR, "抱歉,讯飞语音合成失败")
86 | return reply
87 |
--------------------------------------------------------------------------------
/voice/ali/ali_voice.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Author: chazzjimel
4 | Email: chazzjimel@gmail.com
5 | wechat:cheung-z-x
6 |
7 | Description:
8 | ali voice service
9 |
10 | """
11 | import json
12 | import os
13 | import re
14 | import time
15 |
16 | from bridge.reply import Reply, ReplyType
17 | from common.log import logger
18 | from voice.audio_convert import get_pcm_from_wav
19 | from voice.voice import Voice
20 | from voice.ali.ali_api import AliyunTokenGenerator, speech_to_text_aliyun, text_to_speech_aliyun
21 | from config import conf
22 |
23 |
24 | class AliVoice(Voice):
25 | def __init__(self):
26 | """
27 | 初始化AliVoice类,从配置文件加载必要的配置。
28 | """
29 | try:
30 | curdir = os.path.dirname(__file__)
31 | config_path = os.path.join(curdir, "config.json")
32 | with open(config_path, "r") as fr:
33 | config = json.load(fr)
34 | self.token = None
35 | self.token_expire_time = 0
36 | # 默认复用阿里云千问的 access_key 和 access_secret
37 | self.api_url_voice_to_text = config.get("api_url_voice_to_text")
38 | self.api_url_text_to_voice = config.get("api_url_text_to_voice")
39 | self.app_key = config.get("app_key")
40 | self.access_key_id = conf().get("qwen_access_key_id") or config.get("access_key_id")
41 | self.access_key_secret = conf().get("qwen_access_key_secret") or config.get("access_key_secret")
42 | except Exception as e:
43 | logger.warn("AliVoice init failed: %s, ignore " % e)
44 |
45 | def textToVoice(self, text):
46 | """
47 | 将文本转换为语音文件。
48 |
49 | :param text: 要转换的文本。
50 | :return: 返回一个Reply对象,其中包含转换得到的语音文件或错误信息。
51 | """
52 | # 清除文本中的非中文、非英文和非基本字符
53 | text = re.sub(r'[^\u4e00-\u9fa5\u3040-\u30FF\uAC00-\uD7AFa-zA-Z0-9'
54 | r'äöüÄÖÜáéíóúÁÉÍÓÚàèìòùÀÈÌÒÙâêîôûÂÊÎÔÛçÇñÑ,。!?,.]', '', text)
55 | # 提取有效的token
56 | token_id = self.get_valid_token()
57 | fileName = text_to_speech_aliyun(self.api_url_text_to_voice, text, self.app_key, token_id)
58 | if fileName:
59 | logger.info("[Ali] textToVoice text={} voice file name={}".format(text, fileName))
60 | reply = Reply(ReplyType.VOICE, fileName)
61 | else:
62 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
63 | return reply
64 |
65 | def voiceToText(self, voice_file):
66 | """
67 | 将语音文件转换为文本。
68 |
69 | :param voice_file: 要转换的语音文件。
70 | :return: 返回一个Reply对象,其中包含转换得到的文本或错误信息。
71 | """
72 | # 提取有效的token
73 | token_id = self.get_valid_token()
74 | logger.debug("[Ali] voice file name={}".format(voice_file))
75 | pcm = get_pcm_from_wav(voice_file)
76 | text = speech_to_text_aliyun(self.api_url_voice_to_text, pcm, self.app_key, token_id)
77 | if text:
78 | logger.info("[Ali] VoicetoText = {}".format(text))
79 | reply = Reply(ReplyType.TEXT, text)
80 | else:
81 | reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
82 | return reply
83 |
84 | def get_valid_token(self):
85 | """
86 | 获取有效的阿里云token。
87 |
88 | :return: 返回有效的token字符串。
89 | """
90 | current_time = time.time()
91 | if self.token is None or current_time >= self.token_expire_time:
92 | get_token = AliyunTokenGenerator(self.access_key_id, self.access_key_secret)
93 | token_str = get_token.get_token()
94 | token_data = json.loads(token_str)
95 | self.token = token_data["Token"]["Id"]
96 | # 将过期时间减少一小段时间(例如5分钟),以避免在边界条件下的过期
97 | self.token_expire_time = token_data["Token"]["ExpireTime"] - 300
98 | logger.debug(f"新获取的阿里云token:{self.token}")
99 | else:
100 | logger.debug("使用缓存的token")
101 | return self.token
102 |
--------------------------------------------------------------------------------
/channel/wechatmp/README.md:
--------------------------------------------------------------------------------
1 | # 微信公众号channel
2 |
3 | 鉴于个人微信号在服务器上通过itchat登录有封号风险,这里新增了微信公众号channel,提供无风险的服务。
4 | 目前支持订阅号和服务号两种类型的公众号,它们都支持文本交互,语音和图片输入。其中个人主体的微信订阅号由于无法通过微信认证,存在回复时间限制,每天的图片和声音回复次数也有限制。
5 |
6 | ## 使用方法(订阅号,服务号类似)
7 |
8 | 在开始部署前,你需要一个拥有公网IP的服务器,以提供微信服务器和我们自己服务器的连接。或者你需要进行内网穿透,否则微信服务器无法将消息发送给我们的服务器。
9 |
10 | 此外,需要在我们的服务器上安装python的web框架web.py和wechatpy。
11 | 以ubuntu为例(在ubuntu 22.04上测试):
12 | ```
13 | pip3 install web.py
14 | pip3 install wechatpy
15 | ```
16 |
17 | 然后在[微信公众平台](https://mp.weixin.qq.com)注册一个自己的公众号,类型选择订阅号,主体为个人即可。
18 |
19 | 然后根据[接入指南](https://developers.weixin.qq.com/doc/offiaccount/Basic_Information/Access_Overview.html)的说明,在[微信公众平台](https://mp.weixin.qq.com)的“设置与开发”-“基本配置”-“服务器配置”中填写服务器地址`URL`和令牌`Token`。`URL`填写格式为`http://url/wx`,可使用IP(成功几率看脸),`Token`是你自己编的一个特定的令牌。消息加解密方式如果选择了需要加密的模式,需要在配置中填写`wechatmp_aes_key`。
20 |
21 | 相关的服务器验证代码已经写好,你不需要再添加任何代码。你只需要在本项目根目录的`config.json`中添加
22 | ```
23 | "channel_type": "wechatmp", # 如果通过了微信认证,将"wechatmp"替换为"wechatmp_service",可极大的优化使用体验
24 | "wechatmp_token": "xxxx", # 微信公众平台的Token
25 | "wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
26 | "wechatmp_app_id": "xxxx", # 微信公众平台的appID
27 | "wechatmp_app_secret": "xxxx", # 微信公众平台的appsecret
28 | "wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
29 | "single_chat_prefix": [""], # 推荐设置,任意对话都可以触发回复,不添加前缀
30 | "single_chat_reply_prefix": "", # 推荐设置,回复不设置前缀
31 | "plugin_trigger_prefix": "&", # 推荐设置,在手机微信客户端中,$%^等符号与中文连在一起时会自动显示一段较大的间隔,用户体验不好。请不要使用管理员指令前缀"#",这会造成未知问题。
32 | ```
33 | 然后运行`python3 app.py`启动web服务器。这里会默认监听8080端口,但是微信公众号的服务器配置只支持80/443端口,有两种方法来解决这个问题。第一个是推荐的方法,使用端口转发命令将80端口转发到8080端口:
34 | ```
35 | sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 8080
36 | sudo iptables-save > /etc/iptables/rules.v4
37 | ```
38 | 第二个方法是让python程序直接监听80端口,在配置文件中设置`"wechatmp_port": 80` ,在linux上需要使用`sudo python3 app.py`启动程序。然而这会导致一系列环境和权限问题,因此不是推荐的方法。
39 |
40 | 443端口同理,注意需要支持SSL,也就是https的访问,在`wechatmp_channel.py`中需要修改相应的证书路径。
41 |
42 | 程序启动并监听端口后,在刚才的“服务器配置”中点击`提交`即可验证你的服务器。
43 | 随后在[微信公众平台](https://mp.weixin.qq.com)启用服务器,关闭手动填写规则的自动回复,即可实现ChatGPT的自动回复。
44 |
45 | 之后需要在公众号开发信息下将本机IP加入到IP白名单。
46 |
47 | 不然在启用后,发送语音、图片等消息可能会遇到如下报错:
48 | ```
49 | 'errcode': 40164, 'errmsg': 'invalid ip xx.xx.xx.xx not in whitelist rid
50 | ```
51 |
52 |
53 | ## 个人微信公众号的限制
54 | 由于人微信公众号不能通过微信认证,所以没有客服接口,因此公众号无法主动发出消息,只能被动回复。而微信官方对被动回复有5秒的时间限制,最多重试2次,因此最多只有15秒的自动回复时间窗口。因此如果问题比较复杂或者我们的服务器比较忙,ChatGPT的回答就没办法及时回复给用户。为了解决这个问题,这里做了回答缓存,它需要你在回复超时后,再次主动发送任意文字(例如1)来尝试拿到回答缓存。为了优化使用体验,目前设置了两分钟(120秒)的timeout,用户在至多两分钟后即可得到查询到回复或者错误原因。
55 |
56 | 另外,由于微信官方的限制,自动回复有长度限制。因此这里将ChatGPT的回答进行了拆分,以满足限制。
57 |
58 | ## 私有api_key
59 | 公共api有访问频率限制(免费账号每分钟最多3次ChatGPT的API调用),这在服务多人的时候会遇到问题。因此这里多加了一个设置私有api_key的功能。目前通过godcmd插件的命令来设置私有api_key。
60 |
61 | ## 语音输入
62 | 利用微信自带的语音识别功能,提供语音输入能力。需要在公众号管理页面的“设置与开发”->“接口权限”页面开启“接收语音识别结果”。
63 |
64 | ## 语音回复
65 | 请在配置文件中添加以下词条:
66 | ```
67 | "voice_reply_voice": true,
68 | ```
69 | 这样公众号将会用语音回复语音消息,实现语音对话。
70 |
71 | 默认的语音合成引擎是`google`,它是免费使用的。
72 |
73 | 如果要选择其他的语音合成引擎,请添加以下配置项:
74 | ```
75 | "text_to_voice": "pytts"
76 | ```
77 |
78 | pytts是本地的语音合成引擎。还支持baidu,azure,这些你需要自行配置相关的依赖和key。
79 |
80 | 如果使用pytts,在ubuntu上需要安装如下依赖:
81 | ```
82 | sudo apt update
83 | sudo apt install espeak
84 | sudo apt install ffmpeg
85 | python3 -m pip install pyttsx3
86 | ```
87 | 不是很建议开启pytts语音回复,因为它是离线本地计算,算的慢会拖垮服务器,且声音不好听。
88 |
89 | ## 图片回复
90 | 现在认证公众号和非认证公众号都可以实现的图片和语音回复。但是非认证公众号使用了永久素材接口,每天有1000次的调用上限(每个月有10次重置机会,程序中已设定遇到上限会自动重置),且永久素材库存也有上限。因此对于非认证公众号,我们会在回复图片或者语音消息后的10秒内从永久素材库存内删除该素材。
91 |
92 | ## 测试
93 | 目前在`RoboStyle`这个公众号上进行了测试(基于[wechatmp分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp)),感兴趣的可以关注并体验。开启了godcmd, Banwords, role, dungeon, finish这五个插件,其他的插件还没有详尽测试。百度的接口暂未测试。[wechatmp-stable分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp-stable)是较稳定的上个版本,但也缺少最新的功能支持。
94 |
95 | ## TODO
96 | - [x] 语音输入
97 | - [x] 图片输入
98 | - [x] 使用临时素材接口提供认证公众号的图片和语音回复
99 | - [x] 使用永久素材接口提供未认证公众号的图片和语音回复
100 | - [ ] 高并发支持
101 |
--------------------------------------------------------------------------------
/plugins/banwords/banwords.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import json
4 | import os
5 |
6 | import plugins
7 | from bridge.context import ContextType
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from plugins import *
11 |
12 | from .lib.WordsSearch import WordsSearch
13 |
14 |
15 | @plugins.register(
16 | name="Banwords",
17 | desire_priority=100,
18 | hidden=True,
19 | desc="判断消息中是否有敏感词、决定是否回复。",
20 | version="1.0",
21 | author="lanvent",
22 | )
23 | class Banwords(Plugin):
24 | def __init__(self):
25 | super().__init__()
26 | try:
27 | # load config
28 | conf = super().load_config()
29 | curdir = os.path.dirname(__file__)
30 | if not conf:
31 | # 配置不存在则写入默认配置
32 | config_path = os.path.join(curdir, "config.json")
33 | if not os.path.exists(config_path):
34 | conf = {"action": "ignore"}
35 | with open(config_path, "w") as f:
36 | json.dump(conf, f, indent=4)
37 |
38 | self.searchr = WordsSearch()
39 | self.action = conf["action"]
40 | banwords_path = os.path.join(curdir, "banwords.txt")
41 | with open(banwords_path, "r", encoding="utf-8") as f:
42 | words = []
43 | for line in f:
44 | word = line.strip()
45 | if word:
46 | words.append(word)
47 | self.searchr.SetKeywords(words)
48 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
49 | if conf.get("reply_filter", True):
50 | self.handlers[Event.ON_DECORATE_REPLY] = self.on_decorate_reply
51 | self.reply_action = conf.get("reply_action", "ignore")
52 | logger.info("[Banwords] inited")
53 | except Exception as e:
54 | logger.warn("[Banwords] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/banwords .")
55 | raise e
56 |
57 | def on_handle_context(self, e_context: EventContext):
58 | if e_context["context"].type not in [
59 | ContextType.TEXT,
60 | ContextType.IMAGE_CREATE,
61 | ]:
62 | return
63 |
64 | content = e_context["context"].content
65 | logger.debug("[Banwords] on_handle_context. content: %s" % content)
66 | if self.action == "ignore":
67 | f = self.searchr.FindFirst(content)
68 | if f:
69 | logger.info("[Banwords] %s in message" % f["Keyword"])
70 | e_context.action = EventAction.BREAK_PASS
71 | return
72 | elif self.action == "replace":
73 | if self.searchr.ContainsAny(content):
74 | reply = Reply(ReplyType.INFO, "发言中包含敏感词,请重试: \n" + self.searchr.Replace(content))
75 | e_context["reply"] = reply
76 | e_context.action = EventAction.BREAK_PASS
77 | return
78 |
79 | def on_decorate_reply(self, e_context: EventContext):
80 | if e_context["reply"].type not in [ReplyType.TEXT]:
81 | return
82 |
83 | reply = e_context["reply"]
84 | content = reply.content
85 | if self.reply_action == "ignore":
86 | f = self.searchr.FindFirst(content)
87 | if f:
88 | logger.info("[Banwords] %s in reply" % f["Keyword"])
89 | e_context["reply"] = None
90 | e_context.action = EventAction.BREAK_PASS
91 | return
92 | elif self.reply_action == "replace":
93 | if self.searchr.ContainsAny(content):
94 | reply = Reply(ReplyType.INFO, "已替换回复中的敏感词: \n" + self.searchr.Replace(content))
95 | e_context["reply"] = reply
96 | e_context.action = EventAction.CONTINUE
97 | return
98 |
99 | def get_help_text(self, **kwargs):
100 | return "过滤消息中的敏感词。"
101 |
--------------------------------------------------------------------------------
/plugins/linkai/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 基于 LinkAI 提供的知识库、Midjourney绘画、文档对话等能力对机器人的功能进行增强。平台地址: https://link-ai.tech/console
4 |
5 | ## 插件配置
6 |
7 | 将 `plugins/linkai` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`。 (如果未配置则会默认使用`config.json.template`模板中配置,但功能默认关闭,需要可通过指令进行开启)。
8 |
9 | 以下是插件配置项说明:
10 |
11 | ```bash
12 | {
13 | "group_app_map": { # 群聊 和 应用编码 的映射关系
14 | "测试群名称1": "default", # 表示在名称为 "测试群名称1" 的群聊中将使用app_code 为 default 的应用
15 | "测试群名称2": "Kv2fXJcH"
16 | },
17 | "midjourney": {
18 | "enabled": true, # midjourney 绘画开关
19 | "auto_translate": true, # 是否自动将提示词翻译为英文
20 | "img_proxy": true, # 是否对生成的图片使用代理,如果你是国外服务器,将这一项设置为false会获得更快的生成速度
21 | "max_tasks": 3, # 支持同时提交的总任务个数
22 | "max_tasks_per_user": 1, # 支持单个用户同时提交的任务个数
23 | "use_image_create_prefix": true # 是否使用全局的绘画触发词,如果开启将同时支持由`config.json`中的 image_create_prefix 配置触发
24 | },
25 | "summary": {
26 | "enabled": true, # 文档总结和对话功能开关
27 | "group_enabled": true, # 是否支持群聊开启
28 | "max_file_size": 5000, # 文件的大小限制,单位KB,默认为5M,超过该大小直接忽略
29 | "type": ["FILE", "SHARING", "IMAGE"] # 支持总结的类型,分别表示 文件、分享链接、图片,其中文件和链接默认打开,图片默认关闭
30 | }
31 | }
32 | ```
33 |
34 | 根目录 `config.json` 中配置,`API_KEY` 在 [控制台](https://link-ai.tech/console/interface) 中创建并复制过来:
35 |
36 | ```bash
37 | "linkai_api_key": "Link_xxxxxxxxx"
38 | ```
39 |
40 | 注意:
41 |
42 | - 配置项中 `group_app_map` 部分是用于映射群聊与LinkAI平台上的应用, `midjourney` 部分是 mj 画图的配置,`summary` 部分是文档总结及对话功能的配置。三部分的配置相互独立,可按需开启
43 | - 实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释
44 | - 如果是`docker`部署,可通过映射 `plugins/config.json` 到容器中来完成插件配置,参考[文档](https://github.com/zhayujie/chatgpt-on-wechat#3-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
45 |
46 | ## 插件使用
47 |
48 | > 使用插件中的知识库管理功能需要首先开启`linkai`对话,依赖全局 `config.json` 中的 `use_linkai` 和 `linkai_api_key` 配置;而midjourney绘画 和 summary文档总结对话功能则只需填写 `linkai_api_key` 配置,`use_linkai` 无论是否关闭均可使用。具体可参考 [详细文档](https://link-ai.tech/platform/link-app/wechat)。
49 |
50 | 完成配置后运行项目,会自动运行插件,输入 `#help linkai` 可查看插件功能。
51 |
52 | ### 1.知识库管理功能
53 |
54 | 提供在不同群聊使用不同应用的功能。可以在上述 `group_app_map` 配置中固定映射关系,也可以通过指令在群中快速完成切换。
55 |
56 | 应用切换指令需要首先完成管理员 (`godcmd`) 插件的认证,然后按以下格式输入:
57 |
58 | `$linkai app {app_code}`
59 |
60 | 例如输入 `$linkai app Kv2fXJcH`,即将当前群聊与 app_code为 Kv2fXJcH 的应用绑定。
61 |
62 | 另外,还可以通过 `$linkai close` 来一键关闭linkai对话,此时就会使用默认的openai接口;同理,发送 `$linkai open` 可以再次开启。
63 |
64 | ### 2.Midjourney绘画功能
65 |
66 | 若未配置 `plugins/linkai/config.json`,默认会关闭画图功能,直接使用 `$mj open` 可基于默认配置直接使用mj画图。
67 |
68 | 指令格式:
69 |
70 | ```
71 | - 图片生成: $mj 描述词1, 描述词2..
72 | - 图片放大: $mju 图片ID 图片序号
73 | - 图片变换: $mjv 图片ID 图片序号
74 | - 重置: $mjr 图片ID
75 | ```
76 |
77 | 例如:
78 |
79 | ```
80 | "$mj a little cat, white --ar 9:16"
81 | "$mju 1105592717188272288 2"
82 | "$mjv 11055927171882 2"
83 | "$mjr 11055927171882"
84 | ```
85 |
86 | 注意事项:
87 | 1. 使用 `$mj open` 和 `$mj close` 指令可以快速打开和关闭绘图功能
88 | 2. 海外环境部署请将 `img_proxy` 设置为 `false`
89 | 3. 开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。
90 | 4. 提示词内容中包含敏感词或者参数格式错误可能导致绘画失败,生成失败不消耗积分
91 | 5. 若未收到图片可能有两种可能,一种是收到了图片但微信发送失败,可以在后台日志查看有没有获取到图片url,一般原因是受到了wx限制,可以稍后重试或更换账号尝试;另一种情况是图片提示词存在疑似违规,mj不会直接提示错误但会在画图后删掉原图导致程序无法获取,这种情况不消耗积分。
92 |
93 | ### 3.文档总结对话功能
94 |
95 | #### 配置
96 |
97 | 该功能依赖 LinkAI的知识库及对话功能,需要在项目根目录的config.json中设置 `linkai_api_key`, 同时根据上述插件配置说明,在插件config.json添加 `summary` 部分的配置,设置 `enabled` 为 true。
98 |
99 | 如果不想创建 `plugins/linkai/config.json` 配置,可以直接通过 `$linkai sum open` 指令开启该功能。
100 |
101 | 也可以通过私聊(全局 `config.json` 中的 `linkai_app_code`)或者群聊绑定(通过`group_app_map`参数配置)的应用来开启该功能:在LinkAI平台 [应用配置](https://link-ai.tech/console/factory) 里添加并开启**内容总结**插件。
102 |
103 | #### 使用
104 |
105 | 功能开启后,向机器人发送 **文件**、 **分享链接卡片**、**图片** 即可生成摘要,进一步可以与文件或链接的内容进行多轮对话。如果需要关闭某种类型的内容总结,设置 `summary`配置中的type字段即可。
106 |
107 | #### 限制
108 |
109 | 1. 文件目前 支持 `txt`, `docx`, `pdf`, `md`, `csv`格式,文件大小由 `max_file_size` 限制,最大不超过15M,文件字数最多可支持百万字的文件。但不建议上传字数过多的文件,一是token消耗过大,二是摘要很难覆盖到全部内容,只能通过多轮对话来了解细节。
110 | 2. 分享链接 目前仅支持 公众号文章,后续会支持更多文章类型及视频链接等
111 | 3. 总结及对话的 费用与 LinkAI 3.5-4K 模型的计费方式相同,按文档内容的tokens进行计算
112 |
--------------------------------------------------------------------------------
/plugins/keyword/keyword.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import json
4 | import os
5 | import requests
6 | import plugins
7 | from bridge.context import ContextType
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from plugins import *
11 |
12 |
13 | @plugins.register(
14 | name="Keyword",
15 | desire_priority=900,
16 | hidden=True,
17 | desc="关键词匹配过滤",
18 | version="0.1",
19 | author="fengyege.top",
20 | )
21 | class Keyword(Plugin):
22 | def __init__(self):
23 | super().__init__()
24 | try:
25 | curdir = os.path.dirname(__file__)
26 | config_path = os.path.join(curdir, "config.json")
27 | conf = None
28 | if not os.path.exists(config_path):
29 | logger.debug(f"[keyword]不存在配置文件{config_path}")
30 | conf = {"keyword": {}}
31 | with open(config_path, "w", encoding="utf-8") as f:
32 | json.dump(conf, f, indent=4)
33 | else:
34 | logger.debug(f"[keyword]加载配置文件{config_path}")
35 | with open(config_path, "r", encoding="utf-8") as f:
36 | conf = json.load(f)
37 | # 加载关键词
38 | self.keyword = conf["keyword"]
39 |
40 | logger.info("[keyword] {}".format(self.keyword))
41 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
42 | logger.info("[keyword] inited.")
43 | except Exception as e:
44 | logger.warn("[keyword] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/keyword .")
45 | raise e
46 |
47 | def on_handle_context(self, e_context: EventContext):
48 | if e_context["context"].type != ContextType.TEXT:
49 | return
50 |
51 | content = e_context["context"].content.strip()
52 | logger.debug("[keyword] on_handle_context. content: %s" % content)
53 | if content in self.keyword:
54 | logger.info(f"[keyword] 匹配到关键字【{content}】")
55 | reply_text = self.keyword[content]
56 |
57 | # 判断匹配内容的类型
58 | if (reply_text.startswith("http://") or reply_text.startswith("https://")) and any(reply_text.endswith(ext) for ext in [".jpg", ".webp", ".jpeg", ".png", ".gif", ".img"]):
59 | # 如果是以 http:// 或 https:// 开头,且".jpg", ".jpeg", ".png", ".gif", ".img"结尾,则认为是图片 URL。
60 | reply = Reply()
61 | reply.type = ReplyType.IMAGE_URL
62 | reply.content = reply_text
63 |
64 | elif (reply_text.startswith("http://") or reply_text.startswith("https://")) and any(reply_text.endswith(ext) for ext in [".pdf", ".doc", ".docx", ".xls", "xlsx",".zip", ".rar"]):
65 | # 如果是以 http:// 或 https:// 开头,且".pdf", ".doc", ".docx", ".xls", "xlsx",".zip", ".rar"结尾,则下载文件到tmp目录并发送给用户
66 | file_path = "tmp"
67 | if not os.path.exists(file_path):
68 | os.makedirs(file_path)
69 | file_name = reply_text.split("/")[-1] # 获取文件名
70 | file_path = os.path.join(file_path, file_name)
71 | response = requests.get(reply_text)
72 | with open(file_path, "wb") as f:
73 | f.write(response.content)
74 | #channel/wechat/wechat_channel.py和channel/wechat_channel.py中缺少ReplyType.FILE类型。
75 | reply = Reply()
76 | reply.type = ReplyType.FILE
77 | reply.content = file_path
78 |
79 | elif (reply_text.startswith("http://") or reply_text.startswith("https://")) and any(reply_text.endswith(ext) for ext in [".mp4"]):
80 | # 如果是以 http:// 或 https:// 开头,且".mp4"结尾,则下载视频到tmp目录并发送给用户
81 | reply = Reply()
82 | reply.type = ReplyType.VIDEO_URL
83 | reply.content = reply_text
84 |
85 | else:
86 | # 否则认为是普通文本
87 | reply = Reply()
88 | reply.type = ReplyType.TEXT
89 | reply.content = reply_text
90 |
91 | e_context["reply"] = reply
92 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
93 |
94 | def get_help_text(self, **kwargs):
95 | help_text = "关键词过滤"
96 | return help_text
97 |
--------------------------------------------------------------------------------
/plugins/linkai/summary.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from config import conf
3 | from common.log import logger
4 | import os
5 | import html
6 |
7 |
8 | class LinkSummary:
9 | def __init__(self):
10 | pass
11 |
12 | def summary_file(self, file_path: str, app_code: str):
13 | file_body = {
14 | "file": open(file_path, "rb"),
15 | "name": file_path.split("/")[-1]
16 | }
17 | body = {
18 | "app_code": app_code
19 | }
20 | url = self.base_url() + "/v1/summary/file"
21 | logger.info(f"[LinkSum] file summary, app_code={app_code}")
22 | res = requests.post(url, headers=self.headers(), files=file_body, data=body, timeout=(5, 300))
23 | return self._parse_summary_res(res)
24 |
25 | def summary_url(self, url: str, app_code: str):
26 | url = html.unescape(url)
27 | body = {
28 | "url": url,
29 | "app_code": app_code
30 | }
31 | logger.info(f"[LinkSum] url summary, app_code={app_code}")
32 | res = requests.post(url=self.base_url() + "/v1/summary/url", headers=self.headers(), json=body, timeout=(5, 180))
33 | return self._parse_summary_res(res)
34 |
35 | def summary_chat(self, summary_id: str):
36 | body = {
37 | "summary_id": summary_id
38 | }
39 | res = requests.post(url=self.base_url() + "/v1/summary/chat", headers=self.headers(), json=body, timeout=(5, 180))
40 | if res.status_code == 200:
41 | res = res.json()
42 | logger.debug(f"[LinkSum] chat open, res={res}")
43 | if res.get("code") == 200:
44 | data = res.get("data")
45 | return {
46 | "questions": data.get("questions"),
47 | "file_id": data.get("file_id")
48 | }
49 | else:
50 | res_json = res.json()
51 | logger.error(f"[LinkSum] summary error, status_code={res.status_code}, msg={res_json.get('message')}")
52 | return None
53 |
54 | def _parse_summary_res(self, res):
55 | if res.status_code == 200:
56 | res = res.json()
57 | logger.debug(f"[LinkSum] summary result, res={res}")
58 | if res.get("code") == 200:
59 | data = res.get("data")
60 | return {
61 | "summary": data.get("summary"),
62 | "summary_id": data.get("summary_id")
63 | }
64 | else:
65 | res_json = res.json()
66 | logger.error(f"[LinkSum] summary error, status_code={res.status_code}, msg={res_json.get('message')}")
67 | return None
68 |
69 | def base_url(self):
70 | return conf().get("linkai_api_base", "https://api.link-ai.tech")
71 |
72 | def headers(self):
73 | return {"Authorization": "Bearer " + conf().get("linkai_api_key")}
74 |
75 | def check_file(self, file_path: str, sum_config: dict) -> bool:
76 | file_size = os.path.getsize(file_path) // 1000
77 |
78 | if (sum_config.get("max_file_size") and file_size > sum_config.get("max_file_size")) or file_size > 15000:
79 | logger.warn(f"[LinkSum] file size exceeds limit, No processing, file_size={file_size}KB")
80 | return False
81 |
82 | suffix = file_path.split(".")[-1]
83 | support_list = ["txt", "csv", "docx", "pdf", "md", "jpg", "jpeg", "png"]
84 | if suffix not in support_list:
85 | logger.warn(f"[LinkSum] unsupported file, suffix={suffix}, support_list={support_list}")
86 | return False
87 |
88 | return True
89 |
90 | def check_url(self, url: str):
91 | if not url:
92 | return False
93 | support_list = ["http://mp.weixin.qq.com", "https://mp.weixin.qq.com"]
94 | black_support_list = ["https://mp.weixin.qq.com/mp/waerrpage"]
95 | for black_url_prefix in black_support_list:
96 | if url.strip().startswith(black_url_prefix):
97 | logger.warn(f"[LinkSum] unsupported url, no need to process, url={url}")
98 | return False
99 | for support_url in support_list:
100 | if url.strip().startswith(support_url):
101 | return True
102 | return False
103 |
--------------------------------------------------------------------------------
/lib/itchat/components/hotreload.py:
--------------------------------------------------------------------------------
1 | import pickle, os
2 | import logging
3 |
4 | import requests
5 |
6 | from ..config import VERSION
7 | from ..returnvalues import ReturnValue
8 | from ..storage import templates
9 | from .contact import update_local_chatrooms, update_local_friends
10 | from .messages import produce_msg
11 |
12 | logger = logging.getLogger('itchat')
13 |
14 | def load_hotreload(core):
15 | core.dump_login_status = dump_login_status
16 | core.load_login_status = load_login_status
17 |
18 | def dump_login_status(self, fileDir=None):
19 | fileDir = fileDir or self.hotReloadDir
20 | try:
21 | with open(fileDir, 'w') as f:
22 | f.write('itchat - DELETE THIS')
23 | os.remove(fileDir)
24 | except:
25 | raise Exception('Incorrect fileDir')
26 | status = {
27 | 'version' : VERSION,
28 | 'loginInfo' : self.loginInfo,
29 | 'cookies' : self.s.cookies.get_dict(),
30 | 'storage' : self.storageClass.dumps()}
31 | with open(fileDir, 'wb') as f:
32 | pickle.dump(status, f)
33 | logger.debug('Dump login status for hot reload successfully.')
34 |
35 | def load_login_status(self, fileDir,
36 | loginCallback=None, exitCallback=None):
37 | try:
38 | with open(fileDir, 'rb') as f:
39 | j = pickle.load(f)
40 | except Exception as e:
41 | logger.debug('No such file, loading login status failed.')
42 | return ReturnValue({'BaseResponse': {
43 | 'ErrMsg': 'No such file, loading login status failed.',
44 | 'Ret': -1002, }})
45 |
46 | if j.get('version', '') != VERSION:
47 | logger.debug(('you have updated itchat from %s to %s, ' +
48 | 'so cached status is ignored') % (
49 | j.get('version', 'old version'), VERSION))
50 | return ReturnValue({'BaseResponse': {
51 | 'ErrMsg': 'cached status ignored because of version',
52 | 'Ret': -1005, }})
53 | self.loginInfo = j['loginInfo']
54 | self.loginInfo['User'] = templates.User(self.loginInfo['User'])
55 | self.loginInfo['User'].core = self
56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies'])
57 | self.storageClass.loads(j['storage'])
58 | try:
59 | msgList, contactList = self.get_msg()
60 | except:
61 | msgList = contactList = None
62 | if (msgList or contactList) is None:
63 | self.logout()
64 | load_last_login_status(self.s, j['cookies'])
65 | logger.debug('server refused, loading login status failed.')
66 | return ReturnValue({'BaseResponse': {
67 | 'ErrMsg': 'server refused, loading login status failed.',
68 | 'Ret': -1003, }})
69 | else:
70 | if contactList:
71 | for contact in contactList:
72 | if '@@' in contact['UserName']:
73 | update_local_chatrooms(self, [contact])
74 | else:
75 | update_local_friends(self, [contact])
76 | if msgList:
77 | msgList = produce_msg(self, msgList)
78 | for msg in msgList: self.msgList.put(msg)
79 | self.start_receiving(exitCallback)
80 | logger.debug('loading login status succeeded.')
81 | if hasattr(loginCallback, '__call__'):
82 | loginCallback()
83 | return ReturnValue({'BaseResponse': {
84 | 'ErrMsg': 'loading login status succeeded.',
85 | 'Ret': 0, }})
86 |
87 | def load_last_login_status(session, cookiesDict):
88 | try:
89 | session.cookies = requests.utils.cookiejar_from_dict({
90 | 'webwxuvid': cookiesDict['webwxuvid'],
91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'],
92 | 'login_frequency': '2',
93 | 'last_wxuin': cookiesDict['wxuin'],
94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired',
95 | 'wxpluginkey': cookiesDict['wxloadtime'],
96 | 'wxuin': cookiesDict['wxuin'],
97 | 'mm_lang': 'zh_CN',
98 | 'MM_WX_NOTIFY_STATE': '1',
99 | 'MM_WX_SOUND_STATE': '1', })
100 | except:
101 | logger.info('Load status for push login failed, we may have experienced a cookies change.')
102 | logger.info('If you are using the newest version of itchat, you may report a bug.')
103 |
--------------------------------------------------------------------------------
/lib/itchat/async_components/hotreload.py:
--------------------------------------------------------------------------------
1 | import pickle, os
2 | import logging
3 |
4 | import requests # type: ignore
5 |
6 | from ..config import VERSION
7 | from ..returnvalues import ReturnValue
8 | from ..storage import templates
9 | from .contact import update_local_chatrooms, update_local_friends
10 | from .messages import produce_msg
11 |
12 | logger = logging.getLogger('itchat')
13 |
14 | def load_hotreload(core):
15 | core.dump_login_status = dump_login_status
16 | core.load_login_status = load_login_status
17 |
18 | async def dump_login_status(self, fileDir=None):
19 | fileDir = fileDir or self.hotReloadDir
20 | try:
21 | with open(fileDir, 'w') as f:
22 | f.write('itchat - DELETE THIS')
23 | os.remove(fileDir)
24 | except:
25 | raise Exception('Incorrect fileDir')
26 | status = {
27 | 'version' : VERSION,
28 | 'loginInfo' : self.loginInfo,
29 | 'cookies' : self.s.cookies.get_dict(),
30 | 'storage' : self.storageClass.dumps()}
31 | with open(fileDir, 'wb') as f:
32 | pickle.dump(status, f)
33 | logger.debug('Dump login status for hot reload successfully.')
34 |
35 | async def load_login_status(self, fileDir,
36 | loginCallback=None, exitCallback=None):
37 | try:
38 | with open(fileDir, 'rb') as f:
39 | j = pickle.load(f)
40 | except Exception as e:
41 | logger.debug('No such file, loading login status failed.')
42 | return ReturnValue({'BaseResponse': {
43 | 'ErrMsg': 'No such file, loading login status failed.',
44 | 'Ret': -1002, }})
45 |
46 | if j.get('version', '') != VERSION:
47 | logger.debug(('you have updated itchat from %s to %s, ' +
48 | 'so cached status is ignored') % (
49 | j.get('version', 'old version'), VERSION))
50 | return ReturnValue({'BaseResponse': {
51 | 'ErrMsg': 'cached status ignored because of version',
52 | 'Ret': -1005, }})
53 | self.loginInfo = j['loginInfo']
54 | self.loginInfo['User'] = templates.User(self.loginInfo['User'])
55 | self.loginInfo['User'].core = self
56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies'])
57 | self.storageClass.loads(j['storage'])
58 | try:
59 | msgList, contactList = self.get_msg()
60 | except:
61 | msgList = contactList = None
62 | if (msgList or contactList) is None:
63 | self.logout()
64 | await load_last_login_status(self.s, j['cookies'])
65 | logger.debug('server refused, loading login status failed.')
66 | return ReturnValue({'BaseResponse': {
67 | 'ErrMsg': 'server refused, loading login status failed.',
68 | 'Ret': -1003, }})
69 | else:
70 | if contactList:
71 | for contact in contactList:
72 | if '@@' in contact['UserName']:
73 | update_local_chatrooms(self, [contact])
74 | else:
75 | update_local_friends(self, [contact])
76 | if msgList:
77 | msgList = produce_msg(self, msgList)
78 | for msg in msgList: self.msgList.put(msg)
79 | await self.start_receiving(exitCallback)
80 | logger.debug('loading login status succeeded.')
81 | if hasattr(loginCallback, '__call__'):
82 | await loginCallback(self.storageClass.userName)
83 | return ReturnValue({'BaseResponse': {
84 | 'ErrMsg': 'loading login status succeeded.',
85 | 'Ret': 0, }})
86 |
87 | async def load_last_login_status(session, cookiesDict):
88 | try:
89 | session.cookies = requests.utils.cookiejar_from_dict({
90 | 'webwxuvid': cookiesDict['webwxuvid'],
91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'],
92 | 'login_frequency': '2',
93 | 'last_wxuin': cookiesDict['wxuin'],
94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired',
95 | 'wxpluginkey': cookiesDict['wxloadtime'],
96 | 'wxuin': cookiesDict['wxuin'],
97 | 'mm_lang': 'zh_CN',
98 | 'MM_WX_NOTIFY_STATE': '1',
99 | 'MM_WX_SOUND_STATE': '1', })
100 | except:
101 | logger.info('Load status for push login failed, we may have experienced a cookies change.')
102 | logger.info('If you are using the newest version of itchat, you may report a bug.')
103 |
--------------------------------------------------------------------------------
/lib/itchat/components/register.py:
--------------------------------------------------------------------------------
1 | import logging, traceback, sys, threading
2 | try:
3 | import Queue
4 | except ImportError:
5 | import queue as Queue
6 |
7 | from ..log import set_logging
8 | from ..utils import test_connect
9 | from ..storage import templates
10 |
11 | logger = logging.getLogger('itchat')
12 |
13 | def load_register(core):
14 | core.auto_login = auto_login
15 | core.configured_reply = configured_reply
16 | core.msg_register = msg_register
17 | core.run = run
18 |
19 | def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl',
20 | enableCmdQR=False, picDir=None, qrCallback=None,
21 | loginCallback=None, exitCallback=None):
22 | if not test_connect():
23 | logger.info("You can't get access to internet or wechat domain, so exit.")
24 | sys.exit()
25 | self.useHotReload = hotReload
26 | self.hotReloadDir = statusStorageDir
27 | if hotReload:
28 | rval=self.load_login_status(statusStorageDir,
29 | loginCallback=loginCallback, exitCallback=exitCallback)
30 | if rval:
31 | return
32 | logger.error('Hot reload failed, logging in normally, error={}'.format(rval))
33 | self.logout()
34 | self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
35 | loginCallback=loginCallback, exitCallback=exitCallback)
36 | self.dump_login_status(statusStorageDir)
37 | else:
38 | self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
39 | loginCallback=loginCallback, exitCallback=exitCallback)
40 |
41 | def configured_reply(self):
42 | ''' determine the type of message and reply if its method is defined
43 | however, I use a strange way to determine whether a msg is from massive platform
44 | I haven't found a better solution here
45 | The main problem I'm worrying about is the mismatching of new friends added on phone
46 | If you have any good idea, pleeeease report an issue. I will be more than grateful.
47 | '''
48 | try:
49 | msg = self.msgList.get(timeout=1)
50 | except Queue.Empty:
51 | pass
52 | else:
53 | if isinstance(msg['User'], templates.User):
54 | replyFn = self.functionDict['FriendChat'].get(msg['Type'])
55 | elif isinstance(msg['User'], templates.MassivePlatform):
56 | replyFn = self.functionDict['MpChat'].get(msg['Type'])
57 | elif isinstance(msg['User'], templates.Chatroom):
58 | replyFn = self.functionDict['GroupChat'].get(msg['Type'])
59 | if replyFn is None:
60 | r = None
61 | else:
62 | try:
63 | r = replyFn(msg)
64 | if r is not None:
65 | self.send(r, msg.get('FromUserName'))
66 | except:
67 | logger.warning(traceback.format_exc())
68 |
69 | def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False):
70 | ''' a decorator constructor
71 | return a specific decorator based on information given '''
72 | if not (isinstance(msgType, list) or isinstance(msgType, tuple)):
73 | msgType = [msgType]
74 | def _msg_register(fn):
75 | for _msgType in msgType:
76 | if isFriendChat:
77 | self.functionDict['FriendChat'][_msgType] = fn
78 | if isGroupChat:
79 | self.functionDict['GroupChat'][_msgType] = fn
80 | if isMpChat:
81 | self.functionDict['MpChat'][_msgType] = fn
82 | if not any((isFriendChat, isGroupChat, isMpChat)):
83 | self.functionDict['FriendChat'][_msgType] = fn
84 | return fn
85 | return _msg_register
86 |
87 | def run(self, debug=False, blockThread=True):
88 | logger.info('Start auto replying.')
89 | if debug:
90 | set_logging(loggingLevel=logging.DEBUG)
91 | def reply_fn():
92 | try:
93 | while self.alive:
94 | self.configured_reply()
95 | except KeyboardInterrupt:
96 | if self.useHotReload:
97 | self.dump_login_status()
98 | self.alive = False
99 | logger.debug('itchat received an ^C and exit.')
100 | logger.info('Bye~')
101 | if blockThread:
102 | reply_fn()
103 | else:
104 | replyThread = threading.Thread(target=reply_fn)
105 | replyThread.setDaemon(True)
106 | replyThread.start()
107 |
--------------------------------------------------------------------------------
/voice/audio_convert.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import wave
3 |
4 | from common.log import logger
5 |
6 | try:
7 | import pysilk
8 | except ImportError:
9 | logger.debug("import pysilk failed, wechaty voice message will not be supported.")
10 |
11 | from pydub import AudioSegment
12 |
13 | sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] # slk转wav时,支持的采样率
14 |
15 |
16 | def find_closest_sil_supports(sample_rate):
17 | """
18 | 找到最接近的支持的采样率
19 | """
20 | if sample_rate in sil_supports:
21 | return sample_rate
22 | closest = 0
23 | mindiff = 9999999
24 | for rate in sil_supports:
25 | diff = abs(rate - sample_rate)
26 | if diff < mindiff:
27 | closest = rate
28 | mindiff = diff
29 | return closest
30 |
31 |
32 | def get_pcm_from_wav(wav_path):
33 | """
34 | 从 wav 文件中读取 pcm
35 |
36 | :param wav_path: wav 文件路径
37 | :returns: pcm 数据
38 | """
39 | wav = wave.open(wav_path, "rb")
40 | return wav.readframes(wav.getnframes())
41 |
42 |
43 | def any_to_mp3(any_path, mp3_path):
44 | """
45 | 把任意格式转成mp3文件
46 | """
47 | if any_path.endswith(".mp3"):
48 | shutil.copy2(any_path, mp3_path)
49 | return
50 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
51 | sil_to_wav(any_path, any_path)
52 | any_path = mp3_path
53 | audio = AudioSegment.from_file(any_path)
54 | audio.export(mp3_path, format="mp3")
55 |
56 |
57 | def any_to_wav(any_path, wav_path):
58 | """
59 | 把任意格式转成wav文件
60 | """
61 | if any_path.endswith(".wav"):
62 | shutil.copy2(any_path, wav_path)
63 | return
64 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
65 | return sil_to_wav(any_path, wav_path)
66 | audio = AudioSegment.from_file(any_path)
67 | audio.set_frame_rate(8000) # 百度语音转写支持8000采样率, pcm_s16le, 单通道语音识别
68 | audio.set_channels(1)
69 | audio.export(wav_path, format="wav", codec='pcm_s16le')
70 |
71 |
72 | def any_to_sil(any_path, sil_path):
73 | """
74 | 把任意格式转成sil文件
75 | """
76 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
77 | shutil.copy2(any_path, sil_path)
78 | return 10000
79 | audio = AudioSegment.from_file(any_path)
80 | rate = find_closest_sil_supports(audio.frame_rate)
81 | # Convert to PCM_s16
82 | pcm_s16 = audio.set_sample_width(2)
83 | pcm_s16 = pcm_s16.set_frame_rate(rate)
84 | wav_data = pcm_s16.raw_data
85 | silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
86 | with open(sil_path, "wb") as f:
87 | f.write(silk_data)
88 | return audio.duration_seconds * 1000
89 |
90 |
91 | def any_to_amr(any_path, amr_path):
92 | """
93 | 把任意格式转成amr文件
94 | """
95 | if any_path.endswith(".amr"):
96 | shutil.copy2(any_path, amr_path)
97 | return
98 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
99 | raise NotImplementedError("Not support file type: {}".format(any_path))
100 | audio = AudioSegment.from_file(any_path)
101 | audio = audio.set_frame_rate(8000) # only support 8000
102 | audio.export(amr_path, format="amr")
103 | return audio.duration_seconds * 1000
104 |
105 |
106 | def sil_to_wav(silk_path, wav_path, rate: int = 24000):
107 | """
108 | silk 文件转 wav
109 | """
110 | wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
111 | with open(wav_path, "wb") as f:
112 | f.write(wav_data)
113 |
114 |
115 | def split_audio(file_path, max_segment_length_ms=60000):
116 | """
117 | 分割音频文件
118 | """
119 | audio = AudioSegment.from_file(file_path)
120 | audio_length_ms = len(audio)
121 | if audio_length_ms <= max_segment_length_ms:
122 | return audio_length_ms, [file_path]
123 | segments = []
124 | for start_ms in range(0, audio_length_ms, max_segment_length_ms):
125 | end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
126 | segment = audio[start_ms:end_ms]
127 | segments.append(segment)
128 | file_prefix = file_path[: file_path.rindex(".")]
129 | format = file_path[file_path.rindex(".") + 1 :]
130 | files = []
131 | for i, segment in enumerate(segments):
132 | path = f"{file_prefix}_{i+1}" + f".{format}"
133 | segment.export(path, format=format)
134 | files.append(path)
135 | return audio_length_ms, files
136 |
--------------------------------------------------------------------------------
/plugins/dungeon/dungeon.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import plugins
4 | from bridge.bridge import Bridge
5 | from bridge.context import ContextType
6 | from bridge.reply import Reply, ReplyType
7 | from common import const
8 | from common.expired_dict import ExpiredDict
9 | from common.log import logger
10 | from config import conf
11 | from plugins import *
12 |
13 |
14 | # https://github.com/bupticybee/ChineseAiDungeonChatGPT
15 | class StoryTeller:
16 | def __init__(self, bot, sessionid, story):
17 | self.bot = bot
18 | self.sessionid = sessionid
19 | bot.sessions.clear_session(sessionid)
20 | self.first_interact = True
21 | self.story = story
22 |
23 | def reset(self):
24 | self.bot.sessions.clear_session(self.sessionid)
25 | self.first_interact = True
26 |
27 | def action(self, user_action):
28 | if user_action[-1] != "。":
29 | user_action = user_action + "。"
30 | if self.first_interact:
31 | prompt = (
32 | """现在来充当一个文字冒险游戏,描述时候注意节奏,不要太快,仔细描述各个人物的心情和周边环境。一次只需写四到六句话。
33 | 开头是,"""
34 | + self.story
35 | + " "
36 | + user_action
37 | )
38 | self.first_interact = False
39 | else:
40 | prompt = """继续,一次只需要续写四到六句话,总共就只讲5分钟内发生的事情。""" + user_action
41 | return prompt
42 |
43 |
44 | @plugins.register(
45 | name="Dungeon",
46 | desire_priority=0,
47 | namecn="文字冒险",
48 | desc="A plugin to play dungeon game",
49 | version="1.0",
50 | author="lanvent",
51 | )
52 | class Dungeon(Plugin):
53 | def __init__(self):
54 | super().__init__()
55 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
56 | logger.info("[Dungeon] inited")
57 | # 目前没有设计session过期事件,这里先暂时使用过期字典
58 | if conf().get("expires_in_seconds"):
59 | self.games = ExpiredDict(conf().get("expires_in_seconds"))
60 | else:
61 | self.games = dict()
62 |
63 | def on_handle_context(self, e_context: EventContext):
64 | if e_context["context"].type != ContextType.TEXT:
65 | return
66 | bottype = Bridge().get_bot_type("chat")
67 | if bottype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI]:
68 | return
69 | bot = Bridge().get_bot("chat")
70 | content = e_context["context"].content[:]
71 | clist = e_context["context"].content.split(maxsplit=1)
72 | sessionid = e_context["context"]["session_id"]
73 | logger.debug("[Dungeon] on_handle_context. content: %s" % clist)
74 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
75 | if clist[0] == f"{trigger_prefix}停止冒险":
76 | if sessionid in self.games:
77 | self.games[sessionid].reset()
78 | del self.games[sessionid]
79 | reply = Reply(ReplyType.INFO, "冒险结束!")
80 | e_context["reply"] = reply
81 | e_context.action = EventAction.BREAK_PASS
82 | elif clist[0] == f"{trigger_prefix}开始冒险" or sessionid in self.games:
83 | if sessionid not in self.games or clist[0] == f"{trigger_prefix}开始冒险":
84 | if len(clist) > 1:
85 | story = clist[1]
86 | else:
87 | story = "你在树林里冒险,指不定会从哪里蹦出来一些奇怪的东西,你握紧手上的手枪,希望这次冒险能够找到一些值钱的东西,你往树林深处走去。"
88 | self.games[sessionid] = StoryTeller(bot, sessionid, story)
89 | reply = Reply(ReplyType.INFO, "冒险开始,你可以输入任意内容,让故事继续下去。故事背景是:" + story)
90 | e_context["reply"] = reply
91 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
92 | else:
93 | prompt = self.games[sessionid].action(content)
94 | e_context["context"].type = ContextType.TEXT
95 | e_context["context"].content = prompt
96 | e_context.action = EventAction.BREAK # 事件结束,不跳过处理context的默认逻辑
97 |
98 | def get_help_text(self, **kwargs):
99 | help_text = "可以和机器人一起玩文字冒险游戏。\n"
100 | if kwargs.get("verbose") != True:
101 | return help_text
102 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
103 | help_text = f"{trigger_prefix}开始冒险 " + "背景故事: 开始一个基于{背景故事}的文字冒险,之后你的所有消息会协助完善这个故事。\n" + f"{trigger_prefix}停止冒险: 结束游戏。\n"
104 | if kwargs.get("verbose") == True:
105 | help_text += f"\n命令例子: '{trigger_prefix}开始冒险 你在树林里冒险,指不定会从哪里蹦出来一些奇怪的东西,你握紧手上的手枪,希望这次冒险能够找到一些值钱的东西,你往树林深处走去。'"
106 | return help_text
107 |
--------------------------------------------------------------------------------