├── plugins
├── banwords
│ ├── .gitignore
│ ├── __init__.py
│ ├── banwords.txt.template
│ ├── config.json.template
│ ├── README.md
│ └── banwords.py
├── role
│ ├── __init__.py
│ └── README.md
├── tool
│ ├── __init__.py
│ ├── config.json.template
│ └── README.md
├── bdunit
│ ├── __init__.py
│ ├── config.json.template
│ └── README.md
├── finish
│ ├── __init__.py
│ └── finish.py
├── godcmd
│ ├── __init__.py
│ ├── config.json.template
│ └── README.md
├── hello
│ ├── __init__.py
│ └── hello.py
├── dungeon
│ ├── __init__.py
│ ├── README.md
│ └── dungeon.py
├── keyword
│ ├── __init__.py
│ ├── config.json.template
│ ├── test-keyword.png
│ ├── README.md
│ └── keyword.py
├── plugin.py
├── __init__.py
├── source.json
└── event.py
├── docker
├── sample-chatgpt-on-wechat
│ ├── Name
│ ├── .env
│ └── Makefile
├── build.latest.sh
├── chatgpt-on-wechat-voice-reply
│ ├── Dockerfile.alpine
│ ├── Dockerfile.debian
│ ├── docker-compose.yaml
│ └── entrypoint.sh
├── build.debian.sh
├── build.alpine.sh
├── docker-compose.yaml
├── Dockerfile.alpine.latest
├── Dockerfile.latest
├── Dockerfile.alpine
├── Dockerfile.debian
└── entrypoint.sh
├── Dockerfile
├── docs
└── images
│ ├── planet.jpg
│ ├── aigcopen.png
│ ├── contact.jpg
│ ├── group-chat-sample.jpg
│ ├── single-chat-sample.jpg
│ └── image-create-sample.jpg
├── voice
├── baidu
│ ├── config.json.template
│ ├── README.md
│ └── baidu_voice.py
├── voice.py
├── azure
│ ├── config.json.template
│ └── azure_voice.py
├── factory.py
├── openai
│ └── openai_voice.py
├── google
│ └── google_voice.py
├── pytts
│ └── pytts_voice.py
└── audio_convert.py
├── requirements.txt
├── common
├── const.py
├── singleton.py
├── tmp_dir.py
├── package_manager.py
├── log.py
├── dequeue.py
├── expired_dict.py
├── token_bucket.py
├── utils.py
├── time_check.py
└── sorted_dict.py
├── pyproject.toml
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── 2.feature.yml
│ └── 1.bug.yml
└── workflows
│ └── deploy-image.yml
├── translate
├── factory.py
├── translator.py
└── baidu
│ └── baidu_translate.py
├── .flake8
├── scripts
├── tout.sh
├── shutdown.sh
└── start.sh
├── nixpacks.toml
├── lib
└── itchat
│ ├── components
│ ├── __init__.py
│ ├── hotreload.py
│ └── register.py
│ ├── async_components
│ ├── __init__.py
│ ├── hotreload.py
│ └── register.py
│ ├── content.py
│ ├── storage
│ ├── messagequeue.py
│ └── __init__.py
│ ├── config.py
│ ├── log.py
│ ├── returnvalues.py
│ ├── __init__.py
│ └── utils.py
├── bot
├── bot.py
├── bot_factory.py
├── baidu
│ └── baidu_unit_bot.py
├── openai
│ ├── open_ai_image.py
│ ├── open_ai_session.py
│ └── open_ai_bot.py
├── session_manager.py
├── chatgpt
│ └── chat_gpt_session.py
└── linkai
│ └── link_ai_bot.py
├── .gitignore
├── bridge
├── reply.py
├── context.py
└── bridge.py
├── requirements-optional.txt
├── channel
├── wechatmp
│ ├── common.py
│ ├── wechatmp_message.py
│ ├── wechatmp_client.py
│ ├── active_reply.py
│ └── README.md
├── wechatcom
│ ├── wechatcomapp_client.py
│ ├── wechatcomapp_message.py
│ └── README.md
├── channel_factory.py
├── channel.py
├── chat_message.py
├── terminal
│ └── terminal_channel.py
└── wechat
│ ├── wechaty_message.py
│ ├── wechat_message.py
│ └── wechaty_channel.py
├── .pre-commit-config.yaml
├── config-template.json
├── LICENSE
└── app.py
/plugins/banwords/.gitignore:
--------------------------------------------------------------------------------
1 | banwords.txt
--------------------------------------------------------------------------------
/plugins/role/__init__.py:
--------------------------------------------------------------------------------
1 | from .role import *
2 |
--------------------------------------------------------------------------------
/plugins/tool/__init__.py:
--------------------------------------------------------------------------------
1 | from .tool import *
2 |
--------------------------------------------------------------------------------
/plugins/bdunit/__init__.py:
--------------------------------------------------------------------------------
1 | from .bdunit import *
2 |
--------------------------------------------------------------------------------
/plugins/finish/__init__.py:
--------------------------------------------------------------------------------
1 | from .finish import *
2 |
--------------------------------------------------------------------------------
/plugins/godcmd/__init__.py:
--------------------------------------------------------------------------------
1 | from .godcmd import *
2 |
--------------------------------------------------------------------------------
/plugins/hello/__init__.py:
--------------------------------------------------------------------------------
1 | from .hello import *
2 |
--------------------------------------------------------------------------------
/plugins/banwords/__init__.py:
--------------------------------------------------------------------------------
1 | from .banwords import *
2 |
--------------------------------------------------------------------------------
/plugins/dungeon/__init__.py:
--------------------------------------------------------------------------------
1 | from .dungeon import *
2 |
--------------------------------------------------------------------------------
/plugins/keyword/__init__.py:
--------------------------------------------------------------------------------
1 | from .keyword import *
2 |
--------------------------------------------------------------------------------
/docker/sample-chatgpt-on-wechat/Name:
--------------------------------------------------------------------------------
1 | zhayujie/chatgpt-on-wechat
2 |
--------------------------------------------------------------------------------
/plugins/banwords/banwords.txt.template:
--------------------------------------------------------------------------------
1 | nipples
2 | pennis
3 | 法轮功
--------------------------------------------------------------------------------
/plugins/godcmd/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "password": "",
3 | "admin_users": []
4 | }
5 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ghcr.io/zhayujie/chatgpt-on-wechat:latest
2 |
3 | ENTRYPOINT ["/entrypoint.sh"]
--------------------------------------------------------------------------------
/plugins/keyword/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "keyword": {
3 | "关键字匹配": "测试成功"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/docs/images/planet.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/docs/images/planet.jpg
--------------------------------------------------------------------------------
/docs/images/aigcopen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/docs/images/aigcopen.png
--------------------------------------------------------------------------------
/docs/images/contact.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/docs/images/contact.jpg
--------------------------------------------------------------------------------
/plugins/bdunit/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "service_id": "s...",
3 | "api_key": "",
4 | "secret_key": ""
5 | }
6 |
--------------------------------------------------------------------------------
/plugins/keyword/test-keyword.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/plugins/keyword/test-keyword.png
--------------------------------------------------------------------------------
/docs/images/group-chat-sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/docs/images/group-chat-sample.jpg
--------------------------------------------------------------------------------
/docs/images/single-chat-sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/docs/images/single-chat-sample.jpg
--------------------------------------------------------------------------------
/docs/images/image-create-sample.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YuliangXiu/chatgpt-on-wechat/HEAD/docs/images/image-create-sample.jpg
--------------------------------------------------------------------------------
/plugins/banwords/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "action": "replace",
3 | "reply_filter": true,
4 | "reply_action": "ignore"
5 | }
6 |
--------------------------------------------------------------------------------
/voice/baidu/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "lang": "zh",
3 | "ctp": 1,
4 | "spd": 5,
5 | "pit": 5,
6 | "vol": 5,
7 | "per": 0
8 | }
9 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai==0.27.2
2 | HTMLParser>=0.0.2
3 | PyQRCode>=1.2.1
4 | qrcode>=7.4.2
5 | requests>=2.28.2
6 | chardet>=5.1.0
7 | Pillow
8 | pre-commit
--------------------------------------------------------------------------------
/plugins/dungeon/README.md:
--------------------------------------------------------------------------------
1 | 玩地牢游戏的聊天插件,触发方法如下:
2 |
3 | - `$开始冒险 <背景故事>` - 以<背景故事>开始一个地牢游戏,不填写会使用默认背景故事。之后聊天中你的所有消息会帮助ai完善这个故事。
4 | - `$停止冒险` - 停止一个地牢游戏,回归正常的ai。
5 |
--------------------------------------------------------------------------------
/plugins/plugin.py:
--------------------------------------------------------------------------------
1 | class Plugin:
2 | def __init__(self):
3 | self.handlers = {}
4 |
5 | def get_help_text(self, **kwargs):
6 | return "暂无帮助信息"
7 |
--------------------------------------------------------------------------------
/common/const.py:
--------------------------------------------------------------------------------
1 | # bot_type
2 | OPEN_AI = "openAI"
3 | CHATGPT = "chatGPT"
4 | BAIDU = "baidu"
5 | CHATGPTONAZURE = "chatGPTOnAzure"
6 | LINKAI = "linkai"
7 |
8 | VERSION = "1.3.0"
9 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 176
3 | target-version = ['py37']
4 | include = '\.pyi?$'
5 | extend-exclude = '.+/(dist|.venv|venv|build|lib)/.+'
6 |
7 | [tool.isort]
8 | profile = "black"
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: 知识星球
4 | url: https://public.zsxq.com/groups/88885848842852.html
5 | about: 如果你想了解更多项目细节,并与开发者们交流更多关于AI技术的实践,欢迎加入星球
6 |
7 |
--------------------------------------------------------------------------------
/translate/factory.py:
--------------------------------------------------------------------------------
1 | def create_translator(voice_type):
2 | if voice_type == "baidu":
3 | from translate.baidu.baidu_translate import BaiduTranslator
4 |
5 | return BaiduTranslator()
6 | raise RuntimeError
7 |
--------------------------------------------------------------------------------
/plugins/keyword/README.md:
--------------------------------------------------------------------------------
1 | # 目的
2 | 关键字匹配并回复
3 |
4 | # 试用场景
5 | 目前是在微信公众号下面使用过。
6 |
7 | # 使用步骤
8 | 1. 复制 `config.json.template` 为 `config.json`
9 | 2. 在关键字 `keyword` 新增需要关键字匹配的内容
10 | 3. 重启程序做验证
11 |
12 | # 验证结果
13 | 
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 176
3 | select = E303,W293,W291,W292,E305,E231,E302
4 | exclude =
5 | .tox,
6 | __pycache__,
7 | *.pyc,
8 | .env
9 | venv/*
10 | .venv/*
11 | reports/*
12 | dist/*
13 | lib/*
--------------------------------------------------------------------------------
/docker/build.latest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | unset KUBECONFIG
4 |
5 | cd .. && docker build -f docker/Dockerfile.latest \
6 | -t zhayujie/chatgpt-on-wechat .
7 |
8 | docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$(date +%y%m%d)
--------------------------------------------------------------------------------
/common/singleton.py:
--------------------------------------------------------------------------------
1 | def singleton(cls):
2 | instances = {}
3 |
4 | def get_instance(*args, **kwargs):
5 | if cls not in instances:
6 | instances[cls] = cls(*args, **kwargs)
7 | return instances[cls]
8 |
9 | return get_instance
10 |
--------------------------------------------------------------------------------
/plugins/tool/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "tools": [
3 | "python",
4 | "url-get",
5 | "terminal",
6 | "meteo-weather"
7 | ],
8 | "kwargs": {
9 | "top_k_results": 2,
10 | "no_default": false,
11 | "model_name": "gpt-3.5-turbo"
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | from .event import *
2 | from .plugin import *
3 | from .plugin_manager import PluginManager
4 |
5 | instance = PluginManager()
6 |
7 | register = instance.register
8 | # load_plugins = instance.load_plugins
9 | # emit_event = instance.emit_event
10 |
--------------------------------------------------------------------------------
/scripts/tout.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #打开日志
3 |
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | echo $BASE_DIR
7 |
8 | # check the nohup.out log output file
9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10 | echo "No file ${BASE_DIR}/nohup.out"
11 | exit -1;
12 | fi
13 |
14 | tail -f "${BASE_DIR}/nohup.out"
15 |
--------------------------------------------------------------------------------
/nixpacks.toml:
--------------------------------------------------------------------------------
1 | [phases.setup]
2 | nixPkgs = ['python310']
3 | cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak libavcodec-extra']
4 | [phases.install]
5 | cmds = ['python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements.txt && pip install -r requirements-optional.txt']
6 | [start]
7 | cmd = "python ./app.py"
--------------------------------------------------------------------------------
/lib/itchat/components/__init__.py:
--------------------------------------------------------------------------------
1 | from .contact import load_contact
2 | from .hotreload import load_hotreload
3 | from .login import load_login
4 | from .messages import load_messages
5 | from .register import load_register
6 |
7 | def load_components(core):
8 | load_contact(core)
9 | load_hotreload(core)
10 | load_login(core)
11 | load_messages(core)
12 | load_register(core)
13 |
--------------------------------------------------------------------------------
/lib/itchat/async_components/__init__.py:
--------------------------------------------------------------------------------
1 | from .contact import load_contact
2 | from .hotreload import load_hotreload
3 | from .login import load_login
4 | from .messages import load_messages
5 | from .register import load_register
6 |
7 | def load_components(core):
8 | load_contact(core)
9 | load_hotreload(core)
10 | load_login(core)
11 | load_messages(core)
12 | load_register(core)
13 |
--------------------------------------------------------------------------------
/translate/translator.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice service abstract class
3 | """
4 |
5 |
6 | class Translator(object):
7 | # please use https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes to specify language
8 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
9 | """
10 | Translate text from one language to another
11 | """
12 | raise NotImplementedError
13 |
--------------------------------------------------------------------------------
/voice/voice.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice service abstract class
3 | """
4 |
5 |
6 | class Voice(object):
7 | def voiceToText(self, voice_file):
8 | """
9 | Send voice to voice service and get text
10 | """
11 | raise NotImplementedError
12 |
13 | def textToVoice(self, text):
14 | """
15 | Send text to voice service and get voice
16 | """
17 | raise NotImplementedError
18 |
--------------------------------------------------------------------------------
/bot/bot.py:
--------------------------------------------------------------------------------
1 | """
2 | Auto-replay chat robot abstract class
3 | """
4 |
5 |
6 | from bridge.context import Context
7 | from bridge.reply import Reply
8 |
9 |
10 | class Bot(object):
11 | def reply(self, query, context: Context = None) -> Reply:
12 | """
13 | bot auto-reply content
14 | :param req: received message
15 | :return: reply content
16 | """
17 | raise NotImplementedError
18 |
--------------------------------------------------------------------------------
/lib/itchat/content.py:
--------------------------------------------------------------------------------
1 | TEXT = 'Text'
2 | MAP = 'Map'
3 | CARD = 'Card'
4 | NOTE = 'Note'
5 | SHARING = 'Sharing'
6 | PICTURE = 'Picture'
7 | RECORDING = VOICE = 'Recording'
8 | ATTACHMENT = 'Attachment'
9 | VIDEO = 'Video'
10 | FRIENDS = 'Friends'
11 | SYSTEM = 'System'
12 |
13 | INCOME_MSG = [TEXT, MAP, CARD, NOTE, SHARING, PICTURE,
14 | RECORDING, VOICE, ATTACHMENT, VIDEO, FRIENDS, SYSTEM]
15 |
--------------------------------------------------------------------------------
/plugins/godcmd/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 指令插件
4 |
5 | ## 插件使用
6 |
7 | 将`config.json.template`复制为`config.json`,并修改其中`password`的值为口令。
8 |
9 | 如果没有设置命令,在命令行日志中会打印出本次的临时口令,请注意观察,打印格式如下。
10 |
11 | ```
12 | [INFO][2023-04-06 23:53:47][godcmd.py:165] - [Godcmd] 因未设置口令,本次的临时口令为0971。
13 | ```
14 |
15 | 在私聊中可使用`#auth`指令,输入口令进行管理员认证。更多详细指令请输入`#help`查看帮助文档:
16 |
17 | `#auth <口令>` - 管理员认证,仅可在私聊时认证。
18 | `#help` - 输出帮助文档,**是否是管理员**和是否是在群聊中会影响帮助文档的输出内容。
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 | .vscode
4 | .wechaty/
5 | __pycache__/
6 | venv*
7 | *.pyc
8 | config.json
9 | QR.png
10 | nohup.out
11 | tmp
12 | plugins.json
13 | itchat.pkl
14 | *.log
15 | user_datas.pkl
16 | chatgpt_tool_hub/
17 | plugins/**/
18 | !plugins/bdunit
19 | !plugins/dungeon
20 | !plugins/finish
21 | !plugins/godcmd
22 | !plugins/tool
23 | !plugins/banwords
24 | !plugins/banwords/**/
25 | !plugins/hello
26 | !plugins/role
27 | !plugins/keyword
--------------------------------------------------------------------------------
/scripts/shutdown.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #关闭服务
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'`
7 | if [ -z "$pid" ] ; then
8 | echo "No chatgpt-on-wechat running."
9 | exit -1;
10 | fi
11 |
12 | echo "The chatgpt-on-wechat(${pid}) is running..."
13 |
14 | kill ${pid}
15 |
16 | echo "Send shutdown request to chatgpt-on-wechat(${pid}) OK"
17 |
--------------------------------------------------------------------------------
/plugins/source.json:
--------------------------------------------------------------------------------
1 | {
2 | "repo": {
3 | "sdwebui": {
4 | "url": "https://github.com/lanvent/plugin_sdwebui.git",
5 | "desc": "利用stable-diffusion画图的插件"
6 | },
7 | "replicate": {
8 | "url": "https://github.com/lanvent/plugin_replicate.git",
9 | "desc": "利用replicate api画图的插件"
10 | },
11 | "summary": {
12 | "url": "https://github.com/lanvent/plugin_summary.git",
13 | "desc": "总结聊天记录的插件"
14 | }
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/scripts/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #后台运行Chat_on_webchat执行脚本
3 |
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | echo $BASE_DIR
7 |
8 | # check the nohup.out log output file
9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10 | touch "${BASE_DIR}/nohup.out"
11 | echo "create file ${BASE_DIR}/nohup.out"
12 | fi
13 |
14 | nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out"
15 |
16 | echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out"
17 |
--------------------------------------------------------------------------------
/common/tmp_dir.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 |
4 | from config import conf
5 |
6 |
7 | class TmpDir(object):
8 | """A temporary directory that is deleted when the object is destroyed."""
9 |
10 | tmpFilePath = pathlib.Path("./tmp/")
11 |
12 | def __init__(self):
13 | pathExists = os.path.exists(self.tmpFilePath)
14 | if not pathExists:
15 | os.makedirs(self.tmpFilePath)
16 |
17 | def path(self):
18 | return str(self.tmpFilePath) + "/"
19 |
--------------------------------------------------------------------------------
/voice/azure/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural",
3 | "auto_detect": true,
4 | "speech_synthesis_zh": "zh-CN-YunxiNeural",
5 | "speech_synthesis_en": "en-US-JacobNeural",
6 | "speech_synthesis_ja": "ja-JP-AoiNeural",
7 | "speech_synthesis_ko": "ko-KR-SoonBokNeural",
8 | "speech_synthesis_de": "de-DE-LouisaNeural",
9 | "speech_synthesis_fr": "fr-FR-BrigitteNeural",
10 | "speech_synthesis_es": "es-ES-LaiaNeural",
11 | "speech_recognition_language": "zh-CN"
12 | }
13 |
--------------------------------------------------------------------------------
/docker/chatgpt-on-wechat-voice-reply/Dockerfile.alpine:
--------------------------------------------------------------------------------
1 | FROM zhayujie/chatgpt-on-wechat:alpine
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | USER root
7 |
8 | RUN apk add --no-cache \
9 | ffmpeg \
10 | espeak \
11 | && pip install --no-cache \
12 | baidu-aip \
13 | chardet \
14 | SpeechRecognition
15 |
16 | # replace entrypoint
17 | ADD ./entrypoint.sh /entrypoint.sh
18 |
19 | RUN chmod +x /entrypoint.sh
20 |
21 | USER noroot
22 |
23 | ENTRYPOINT ["/entrypoint.sh"]
--------------------------------------------------------------------------------
/bridge/reply.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class ReplyType(Enum):
7 | TEXT = 1 # 文本
8 | VOICE = 2 # 音频文件
9 | IMAGE = 3 # 图片文件
10 | IMAGE_URL = 4 # 图片URL
11 |
12 | INFO = 9
13 | ERROR = 10
14 |
15 | def __str__(self):
16 | return self.name
17 |
18 |
19 | class Reply:
20 | def __init__(self, type: ReplyType = None, content=None):
21 | self.type = type
22 | self.content = content
23 |
24 | def __str__(self):
25 | return "Reply(type={}, content={})".format(self.type, self.content)
26 |
--------------------------------------------------------------------------------
/docker/chatgpt-on-wechat-voice-reply/Dockerfile.debian:
--------------------------------------------------------------------------------
1 | FROM zhayujie/chatgpt-on-wechat:debian
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | USER root
7 |
8 | RUN apt-get update \
9 | && apt-get install -y --no-install-recommends \
10 | ffmpeg \
11 | espeak \
12 | && pip install --no-cache \
13 | baidu-aip \
14 | chardet \
15 | SpeechRecognition
16 |
17 | # replace entrypoint
18 | ADD ./entrypoint.sh /entrypoint.sh
19 |
20 | RUN chmod +x /entrypoint.sh
21 |
22 | USER noroot
23 |
24 | ENTRYPOINT ["/entrypoint.sh"]
25 |
--------------------------------------------------------------------------------
/docker/sample-chatgpt-on-wechat/.env:
--------------------------------------------------------------------------------
1 | OPEN_AI_API_KEY=YOUR API KEY
2 | OPEN_AI_PROXY=
3 | SINGLE_CHAT_PREFIX=["bot", "@bot"]
4 | SINGLE_CHAT_REPLY_PREFIX="[bot] "
5 | GROUP_CHAT_PREFIX=["@bot"]
6 | GROUP_NAME_WHITE_LIST=["ChatGPT测试群", "ChatGPT测试群2"]
7 | IMAGE_CREATE_PREFIX=["画", "看", "找"]
8 | CONVERSATION_MAX_TOKENS=1000
9 | SPEECH_RECOGNITION=false
10 | CHARACTER_DESC=你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。
11 | EXPIRES_IN_SECONDS=3600
12 |
13 | # Optional
14 | #CHATGPT_ON_WECHAT_PREFIX=/app
15 | #CHATGPT_ON_WECHAT_CONFIG_PATH=/app/config.json
16 | #CHATGPT_ON_WECHAT_EXEC=python app.py
--------------------------------------------------------------------------------
/docker/build.debian.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # fetch latest release tag
4 | CHATGPT_ON_WECHAT_TAG=`curl -sL "https://api.github.com/repos/zhayujie/chatgpt-on-wechat/releases/latest" | \
5 | grep '"tag_name":' | \
6 | sed -E 's/.*"([^"]+)".*/\1/'`
7 |
8 | # build image
9 | docker build -f Dockerfile.debian \
10 | --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
11 | -t zhayujie/chatgpt-on-wechat .
12 |
13 | # tag image
14 | docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:debian
15 | docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-debian
--------------------------------------------------------------------------------
/docker/sample-chatgpt-on-wechat/Makefile:
--------------------------------------------------------------------------------
1 | IMG:=`cat Name`
2 | MOUNT:=
3 | PORT_MAP:=
4 | DOTENV:=.env
5 | CONTAINER_NAME:=sample-chatgpt-on-wechat
6 |
7 | echo:
8 | echo $(IMG)
9 |
10 | run_d:
11 | docker rm $(CONTAINER_NAME) || echo
12 | docker run -dt --name $(CONTAINER_NAME) $(PORT_MAP) \
13 | --env-file=$(DOTENV) \
14 | $(MOUNT) $(IMG)
15 |
16 | run_i:
17 | docker rm $(CONTAINER_NAME) || echo
18 | docker run -it --name $(CONTAINER_NAME) $(PORT_MAP) \
19 | --env-file=$(DOTENV) \
20 | $(MOUNT) $(IMG)
21 |
22 | stop:
23 | docker stop $(CONTAINER_NAME)
24 |
25 | rm: stop
26 | docker rm $(CONTAINER_NAME)
27 |
--------------------------------------------------------------------------------
/docker/build.alpine.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # fetch latest release tag
4 | CHATGPT_ON_WECHAT_TAG=`curl -sL "https://api.github.com/repos/zhayujie/chatgpt-on-wechat/releases/latest" | \
5 | grep '"tag_name":' | \
6 | sed -E 's/.*"([^"]+)".*/\1/'`
7 |
8 | # build image
9 | docker build -f Dockerfile.alpine \
10 | --build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
11 | -t zhayujie/chatgpt-on-wechat .
12 |
13 | # tag image
14 | docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:alpine
15 | docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-alpine
16 |
--------------------------------------------------------------------------------
/plugins/banwords/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## 插件描述
3 |
4 | 简易的敏感词插件,暂不支持分词,请自行导入词库到插件文件夹中的`banwords.txt`,每行一个词,一个参考词库是[1](https://github.com/cjh0613/tencent-sensitive-words/blob/main/sensitive_words_lines.txt)。
5 |
6 | 使用前将`config.json.template`复制为`config.json`,并自行配置。
7 |
8 | 目前插件对消息的默认处理行为有如下两种:
9 |
10 | - `ignore` : 无视这条消息。
11 | - `replace` : 将消息中的敏感词替换成"*",并回复违规。
12 |
13 | ```json
14 | "action": "replace",
15 | "reply_filter": true,
16 | "reply_action": "ignore"
17 | ```
18 |
19 | 在以上配置项中:
20 |
21 | - `action`: 对用户消息的默认处理行为
22 | - `reply_filter`: 是否对ChatGPT的回复也进行敏感词过滤
23 | - `reply_action`: 如果开启了回复过滤,对回复的默认处理行为
24 |
25 | ## 致谢
26 |
27 | 搜索功能实现来自https://github.com/toolgood/ToolGood.Words
--------------------------------------------------------------------------------
/requirements-optional.txt:
--------------------------------------------------------------------------------
1 | tiktoken>=0.3.2 # openai calculate token
2 |
3 | #voice
4 | pydub>=0.25.1 # need ffmpeg
5 | SpeechRecognition # google speech to text
6 | gTTS>=2.3.1 # google text to speech
7 | pyttsx3>=2.90 # pytsx text to speech
8 | baidu_aip>=4.16.10 # baidu voice
9 | azure-cognitiveservices-speech # azure voice
10 | numpy<=1.24.2
11 | langid # language detect
12 |
13 | #install plugin
14 | dulwich
15 |
16 | # wechaty
17 | wechaty>=0.10.7
18 | wechaty_puppet>=0.4.23
19 | pysilk_mod>=1.6.0 # needed by send voice
20 |
21 | # wechatmp wechatcom
22 | web.py
23 | wechatpy
24 |
25 | # chatgpt-tool-hub plugin
26 |
27 | --extra-index-url https://pypi.python.org/simple
28 | chatgpt_tool_hub==0.4.4
--------------------------------------------------------------------------------
/plugins/bdunit/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 利用百度UNIT实现智能对话
4 |
5 | - 1.解决问题:chatgpt无法处理的指令,交给百度UNIT处理如:天气,日期时间,数学运算等
6 | - 2.如问时间:现在几点钟,今天几号
7 | - 3.如问天气:明天广州天气怎么样,这个周末深圳会不会下雨
8 | - 4.如问数学运算:23+45=多少,100-23=多少,35转化为二进制是多少?
9 |
10 | ## 使用说明
11 |
12 | ### 获取apikey
13 |
14 | 在百度UNIT官网上自己创建应用,申请百度机器人,可以把预先训练好的模型导入到自己的应用中,
15 |
16 | see https://ai.baidu.com/unit/home#/home?track=61fe1b0d3407ce3face1d92cb5c291087095fc10c8377aaf https://console.bce.baidu.com/ai平台申请
17 |
18 | ### 配置文件
19 |
20 | 将文件夹中`config.json.template`复制为`config.json`。
21 |
22 | 在其中填写百度UNIT官网上获取应用的API Key和Secret Key
23 |
24 | ``` json
25 | {
26 | "service_id": "s...", #"机器人ID"
27 | "api_key": "",
28 | "secret_key": ""
29 | }
30 | ```
--------------------------------------------------------------------------------
/docker/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | chatgpt-on-wechat:
4 | build:
5 | context: ./
6 | dockerfile: Dockerfile.alpine
7 | image: zhayujie/chatgpt-on-wechat
8 | container_name: sample-chatgpt-on-wechat
9 | environment:
10 | OPEN_AI_API_KEY: 'YOUR API KEY'
11 | OPEN_AI_PROXY: ''
12 | SINGLE_CHAT_PREFIX: '["bot", "@bot"]'
13 | SINGLE_CHAT_REPLY_PREFIX: '"[bot] "'
14 | GROUP_CHAT_PREFIX: '["@bot"]'
15 | GROUP_NAME_WHITE_LIST: '["ChatGPT测试群", "ChatGPT测试群2"]'
16 | IMAGE_CREATE_PREFIX: '["画", "看", "找"]'
17 | CONVERSATION_MAX_TOKENS: 1000
18 | SPEECH_RECOGNITION: "False"
19 | CHARACTER_DESC: '你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。'
20 | EXPIRES_IN_SECONDS: 3600
--------------------------------------------------------------------------------
/channel/wechatmp/common.py:
--------------------------------------------------------------------------------
1 | import web
2 | from wechatpy.crypto import WeChatCrypto
3 | from wechatpy.exceptions import InvalidSignatureException
4 | from wechatpy.utils import check_signature
5 |
6 | from config import conf
7 |
8 | MAX_UTF8_LEN = 2048
9 |
10 |
11 | class WeChatAPIException(Exception):
12 | pass
13 |
14 |
15 | def verify_server(data):
16 | try:
17 | signature = data.signature
18 | timestamp = data.timestamp
19 | nonce = data.nonce
20 | echostr = data.get("echostr", None)
21 | token = conf().get("wechatmp_token") # 请按照公众平台官网\基本配置中信息填写
22 | check_signature(token, signature, timestamp, nonce)
23 | return echostr
24 | except InvalidSignatureException:
25 | raise web.Forbidden("Invalid signature")
26 | except Exception as e:
27 | raise web.Forbidden(str(e))
28 |
--------------------------------------------------------------------------------
/common/package_manager.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import pip
4 | from pip._internal import main as pipmain
5 |
6 | from common.log import _reset_logger, logger
7 |
8 |
9 | def install(package):
10 | pipmain(["install", package])
11 |
12 |
13 | def install_requirements(file):
14 | pipmain(["install", "-r", file, "--upgrade"])
15 | _reset_logger(logger)
16 |
17 |
18 | def check_dulwich():
19 | needwait = False
20 | for i in range(2):
21 | if needwait:
22 | time.sleep(3)
23 | needwait = False
24 | try:
25 | import dulwich
26 |
27 | return
28 | except ImportError:
29 | try:
30 | install("dulwich")
31 | except:
32 | needwait = True
33 | try:
34 | import dulwich
35 | except ImportError:
36 | raise ImportError("Unable to import dulwich")
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/2.feature.yml:
--------------------------------------------------------------------------------
1 | name: Feature request 🚀
2 | description: 提出你对项目的新想法或建议。
3 | labels: ['status: needs check']
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | 请在上方的`title`中填写简略总结,谢谢❤️。
9 | - type: checkboxes
10 | attributes:
11 | label: ⚠️ 搜索是否存在类似issue
12 | description: >
13 | 请在 [历史issue](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中清空输入框,搜索关键词查找是否存在相似issue。
14 | options:
15 | - label: 我已经搜索过issues和disscussions,没有发现相似issue
16 | required: true
17 | - type: textarea
18 | attributes:
19 | label: 总结
20 | description: 描述feature的功能。
21 | - type: textarea
22 | attributes:
23 | label: 举例
24 | description: 提供聊天示例,草图或相关网址。
25 | - type: textarea
26 | attributes:
27 | label: 动机
28 | description: 描述你提出该feature的动机,比如没有这项feature对你的使用造成了怎样的影响。 请提供更详细的场景描述,这可能会帮助我们发现并提出更好的解决方案。
--------------------------------------------------------------------------------
/voice/factory.py:
--------------------------------------------------------------------------------
1 | """
2 | voice factory
3 | """
4 |
5 |
6 | def create_voice(voice_type):
7 | """
8 | create a voice instance
9 | :param voice_type: voice type code
10 | :return: voice instance
11 | """
12 | if voice_type == "baidu":
13 | from voice.baidu.baidu_voice import BaiduVoice
14 |
15 | return BaiduVoice()
16 | elif voice_type == "google":
17 | from voice.google.google_voice import GoogleVoice
18 |
19 | return GoogleVoice()
20 | elif voice_type == "openai":
21 | from voice.openai.openai_voice import OpenaiVoice
22 |
23 | return OpenaiVoice()
24 | elif voice_type == "pytts":
25 | from voice.pytts.pytts_voice import PyttsVoice
26 |
27 | return PyttsVoice()
28 | elif voice_type == "azure":
29 | from voice.azure.azure_voice import AzureVoice
30 |
31 | return AzureVoice()
32 | raise RuntimeError
33 |
--------------------------------------------------------------------------------
/docker/Dockerfile.alpine.latest:
--------------------------------------------------------------------------------
1 | FROM python:3.10-alpine
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | ARG CHATGPT_ON_WECHAT_VER
7 |
8 | ENV BUILD_PREFIX=/app
9 |
10 | ADD . ${BUILD_PREFIX}
11 |
12 | RUN apk add --no-cache bash ffmpeg espeak \
13 | && cd ${BUILD_PREFIX} \
14 | && cp config-template.json config.json \
15 | && /usr/local/bin/python -m pip install --no-cache --upgrade pip \
16 | && pip install --no-cache -r requirements.txt --extra-index-url https://alpine-wheels.github.io/index\
17 | && pip install --no-cache -r requirements-optional.txt --extra-index-url https://alpine-wheels.github.io/index
18 |
19 | WORKDIR ${BUILD_PREFIX}
20 |
21 | ADD docker/entrypoint.sh /entrypoint.sh
22 |
23 | RUN chmod +x /entrypoint.sh \
24 | && adduser -D -h /home/noroot -u 1000 -s /bin/bash noroot \
25 | && chown -R noroot:noroot ${BUILD_PREFIX}
26 |
27 | USER noroot
28 |
29 | ENTRYPOINT ["/entrypoint.sh"]
30 |
--------------------------------------------------------------------------------
/channel/wechatcom/wechatcomapp_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | from wechatpy.enterprise import WeChatClient
5 |
6 |
7 | class WechatComAppClient(WeChatClient):
8 | def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True):
9 | super(WechatComAppClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry)
10 | self.fetch_access_token_lock = threading.Lock()
11 |
12 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
13 | with self.fetch_access_token_lock:
14 | access_token = self.session.get(self.access_token_key)
15 | if access_token:
16 | if not self.expires_at:
17 | return access_token
18 | timestamp = time.time()
19 | if self.expires_at - timestamp > 60:
20 | return access_token
21 | return super().fetch_access_token()
22 |
--------------------------------------------------------------------------------
/voice/openai/openai_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 | import json
5 |
6 | import openai
7 |
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from config import conf
11 | from voice.voice import Voice
12 |
13 |
14 | class OpenaiVoice(Voice):
15 | def __init__(self):
16 | openai.api_key = conf().get("open_ai_api_key")
17 |
18 | def voiceToText(self, voice_file):
19 | logger.debug("[Openai] voice file name={}".format(voice_file))
20 | try:
21 | file = open(voice_file, "rb")
22 | result = openai.Audio.transcribe("whisper-1", file)
23 | text = result["text"]
24 | reply = Reply(ReplyType.TEXT, text)
25 | logger.info("[Openai] voiceToText text={} voice file name={}".format(text, voice_file))
26 | except Exception as e:
27 | reply = Reply(ReplyType.ERROR, str(e))
28 | finally:
29 | return reply
30 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: fix-byte-order-marker
6 | - id: check-case-conflict
7 | - id: check-merge-conflict
8 | - id: debug-statements
9 | - id: pretty-format-json
10 | types: [text]
11 | files: \.json(.template)?$
12 | args: [ --autofix , --no-ensure-ascii, --indent=2, --no-sort-keys]
13 | - id: trailing-whitespace
14 | exclude: '(\/|^)lib\/'
15 | args: [ --markdown-linebreak-ext=md ]
16 | - repo: https://github.com/PyCQA/isort
17 | rev: 5.12.0
18 | hooks:
19 | - id: isort
20 | exclude: '(\/|^)lib\/'
21 | - repo: https://github.com/psf/black
22 | rev: 23.3.0
23 | hooks:
24 | - id: black
25 | exclude: '(\/|^)lib\/'
26 | - repo: https://github.com/PyCQA/flake8
27 | rev: 6.0.0
28 | hooks:
29 | - id: flake8
30 | exclude: '(\/|^)lib\/'
--------------------------------------------------------------------------------
/docker/chatgpt-on-wechat-voice-reply/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | chatgpt-on-wechat:
4 | build:
5 | context: ./
6 | dockerfile: Dockerfile.alpine
7 | image: zhayujie/chatgpt-on-wechat-voice-reply
8 | container_name: chatgpt-on-wechat-voice-reply
9 | environment:
10 | OPEN_AI_API_KEY: 'YOUR API KEY'
11 | OPEN_AI_PROXY: ''
12 | SINGLE_CHAT_PREFIX: '["bot", "@bot"]'
13 | SINGLE_CHAT_REPLY_PREFIX: '"[bot] "'
14 | GROUP_CHAT_PREFIX: '["@bot"]'
15 | GROUP_NAME_WHITE_LIST: '["ChatGPT测试群", "ChatGPT测试群2"]'
16 | IMAGE_CREATE_PREFIX: '["画", "看", "找"]'
17 | CONVERSATION_MAX_TOKENS: 1000
18 | SPEECH_RECOGNITION: 'true'
19 | CHARACTER_DESC: '你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。'
20 | EXPIRES_IN_SECONDS: 3600
21 | VOICE_REPLY_VOICE: 'true'
22 | BAIDU_APP_ID: 'YOUR BAIDU APP ID'
23 | BAIDU_API_KEY: 'YOUR BAIDU API KEY'
24 | BAIDU_SECRET_KEY: 'YOUR BAIDU SERVICE KEY'
--------------------------------------------------------------------------------
/config-template.json:
--------------------------------------------------------------------------------
1 | {
2 | "open_ai_api_key": "YOUR API KEY",
3 | "model": "gpt-3.5-turbo",
4 | "proxy": "",
5 | "single_chat_prefix": [
6 | "bot",
7 | "@bot"
8 | ],
9 | "single_chat_reply_prefix": "[bot] ",
10 | "group_chat_prefix": [
11 | "@bot"
12 | ],
13 | "group_name_white_list": [
14 | "ChatGPT测试群",
15 | "ChatGPT测试群2"
16 | ],
17 | "group_chat_in_one_session": [
18 | "ChatGPT测试群"
19 | ],
20 | "image_create_prefix": [
21 | "画",
22 | "看",
23 | "找"
24 | ],
25 | "speech_recognition": false,
26 | "group_speech_recognition": false,
27 | "voice_reply_voice": false,
28 | "conversation_max_tokens": 1000,
29 | "expires_in_seconds": 3600,
30 | "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
31 | "subscribe_msg": "感谢您的关注!\n这里是ChatGPT,可以自由对话。\n支持语音对话。\n支持图片输入。\n支持图片输出,画字开头的消息将按要求创作图片。\n支持tool、角色扮演和文字冒险等丰富的插件。\n输入{trigger_prefix}#help 查看详细指令。",
32 | "use_linkai": false,
33 | "linkai_api_key": "",
34 | "linkai_app_code": ""
35 | }
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2022 zhayujie
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
--------------------------------------------------------------------------------
/common/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 |
5 | def _reset_logger(log):
6 | for handler in log.handlers:
7 | handler.close()
8 | log.removeHandler(handler)
9 | del handler
10 | log.handlers.clear()
11 | log.propagate = False
12 | console_handle = logging.StreamHandler(sys.stdout)
13 | console_handle.setFormatter(
14 | logging.Formatter(
15 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
16 | datefmt="%Y-%m-%d %H:%M:%S",
17 | )
18 | )
19 | file_handle = logging.FileHandler("run.log", encoding="utf-8")
20 | file_handle.setFormatter(
21 | logging.Formatter(
22 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
23 | datefmt="%Y-%m-%d %H:%M:%S",
24 | )
25 | )
26 | log.addHandler(file_handle)
27 | log.addHandler(console_handle)
28 |
29 |
30 | def _get_logger():
31 | log = logging.getLogger("log")
32 | _reset_logger(log)
33 | log.setLevel(logging.INFO)
34 | return log
35 |
36 |
37 | # 日志句柄
38 | logger = _get_logger()
39 |
--------------------------------------------------------------------------------
/lib/itchat/storage/messagequeue.py:
--------------------------------------------------------------------------------
1 | import logging
2 | try:
3 | import Queue as queue
4 | except ImportError:
5 | import queue
6 |
7 | from .templates import AttributeDict
8 |
9 | logger = logging.getLogger('itchat')
10 |
11 | class Queue(queue.Queue):
12 | def put(self, message):
13 | queue.Queue.put(self, Message(message))
14 |
15 | class Message(AttributeDict):
16 | def download(self, fileName):
17 | if hasattr(self.text, '__call__'):
18 | return self.text(fileName)
19 | else:
20 | return b''
21 | def __getitem__(self, value):
22 | if value in ('isAdmin', 'isAt'):
23 | v = value[0].upper() + value[1:] # ''[1:] == ''
24 | logger.debug('%s is expired in 1.3.0, use %s instead.' % (value, v))
25 | value = v
26 | return super(Message, self).__getitem__(value)
27 | def __str__(self):
28 | return '{%s}' % ', '.join(
29 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
30 | def __repr__(self):
31 | return '<%s: %s>' % (self.__class__.__name__.split('.')[-1],
32 | self.__str__())
33 |
--------------------------------------------------------------------------------
/docker/Dockerfile.latest:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | ARG CHATGPT_ON_WECHAT_VER
7 |
8 | RUN echo /etc/apt/sources.list
9 | # RUN sed -i 's/deb.debian.org/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list
10 | ENV BUILD_PREFIX=/app
11 |
12 | ADD . ${BUILD_PREFIX}
13 |
14 | RUN apt-get update \
15 | &&apt-get install -y --no-install-recommends bash ffmpeg espeak libavcodec-extra\
16 | && cd ${BUILD_PREFIX} \
17 | && cp config-template.json config.json \
18 | && /usr/local/bin/python -m pip install --no-cache --upgrade pip \
19 | && pip install --no-cache -r requirements.txt \
20 | && pip install --no-cache -r requirements-optional.txt \
21 | && pip install azure-cognitiveservices-speech
22 |
23 | WORKDIR ${BUILD_PREFIX}
24 |
25 | ADD docker/entrypoint.sh /entrypoint.sh
26 |
27 | RUN chmod +x /entrypoint.sh \
28 | && mkdir -p /home/noroot \
29 | && groupadd -r noroot \
30 | && useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
31 | && chown -R noroot:noroot /home/noroot ${BUILD_PREFIX} /usr/local/lib
32 |
33 | USER noroot
34 |
35 | ENTRYPOINT ["/entrypoint.sh"]
--------------------------------------------------------------------------------
/bot/bot_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | channel factory
3 | """
4 | from common import const
5 |
6 |
7 | def create_bot(bot_type):
8 | """
9 | create a bot_type instance
10 | :param bot_type: bot type code
11 | :return: bot instance
12 | """
13 | if bot_type == const.BAIDU:
14 | # Baidu Unit对话接口
15 | from bot.baidu.baidu_unit_bot import BaiduUnitBot
16 |
17 | return BaiduUnitBot()
18 |
19 | elif bot_type == const.CHATGPT:
20 | # ChatGPT 网页端web接口
21 | from bot.chatgpt.chat_gpt_bot import ChatGPTBot
22 |
23 | return ChatGPTBot()
24 |
25 | elif bot_type == const.OPEN_AI:
26 | # OpenAI 官方对话模型API
27 | from bot.openai.open_ai_bot import OpenAIBot
28 |
29 | return OpenAIBot()
30 |
31 | elif bot_type == const.CHATGPTONAZURE:
32 | # Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
33 | from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
34 |
35 | return AzureChatGPTBot()
36 |
37 | elif bot_type == const.LINKAI:
38 | from bot.linkai.link_ai_bot import LinkAIBot
39 | return LinkAIBot()
40 |
41 | raise RuntimeError
42 |
--------------------------------------------------------------------------------
/plugins/role/README.md:
--------------------------------------------------------------------------------
1 | 用于让Bot扮演指定角色的聊天插件,触发方法如下:
2 |
3 | - `$角色/$role help/帮助` - 打印目前支持的角色列表。
4 | - `$角色/$role <角色名>` - 让AI扮演该角色,角色名支持模糊匹配。
5 | - `$停止扮演` - 停止角色扮演。
6 |
7 | 添加自定义角色请在`roles/roles.json`中添加。
8 |
9 | (大部分prompt来自https://github.com/rockbenben/ChatGPT-Shortcut/blob/main/src/data/users.tsx)
10 |
11 | 以下为例子:
12 | ```json
13 | {
14 | "title": "写作助理",
15 | "description": "As a writing improvement assistant, your task is to improve the spelling, grammar, clarity, concision, and overall readability of the text I provided, while breaking down long sentences, reducing repetition, and providing suggestions for improvement. Please provide only the corrected Chinese version of the text and avoid including explanations. Please treat every message I send later as text content.",
16 | "descn": "作为一名中文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请把我之后的每一条消息都当作文本内容。",
17 | "wrapper": "内容是:\n\"%s\"",
18 | "remark": "最常使用的角色,用于优化文本的语法、清晰度和简洁度,提高可读性。"
19 | }
20 | ```
21 |
22 | - `title`: 角色名。
23 | - `description`: 使用`$role`触发时,使用英语prompt。
24 | - `descn`: 使用`$角色`触发时,使用中文prompt。
25 | - `wrapper`: 用于包装用户消息,可起到强调作用,避免回复离题。
26 | - `remark`: 简短描述该角色,在打印帮助文档时显示。
27 |
--------------------------------------------------------------------------------
/channel/channel_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | channel factory
3 | """
4 |
5 |
6 | def create_channel(channel_type):
7 | """
8 | create a channel instance
9 | :param channel_type: channel type code
10 | :return: channel instance
11 | """
12 | if channel_type == "wx":
13 | from channel.wechat.wechat_channel import WechatChannel
14 |
15 | return WechatChannel()
16 | elif channel_type == "wxy":
17 | from channel.wechat.wechaty_channel import WechatyChannel
18 |
19 | return WechatyChannel()
20 | elif channel_type == "terminal":
21 | from channel.terminal.terminal_channel import TerminalChannel
22 |
23 | return TerminalChannel()
24 | elif channel_type == "wechatmp":
25 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
26 |
27 | return WechatMPChannel(passive_reply=True)
28 | elif channel_type == "wechatmp_service":
29 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
30 |
31 | return WechatMPChannel(passive_reply=False)
32 | elif channel_type == "wechatcom_app":
33 | from channel.wechatcom.wechatcomapp_channel import WechatComAppChannel
34 |
35 | return WechatComAppChannel()
36 | raise RuntimeError
37 |
--------------------------------------------------------------------------------
/channel/channel.py:
--------------------------------------------------------------------------------
1 | """
2 | Message sending channel abstract class
3 | """
4 |
5 | from bridge.bridge import Bridge
6 | from bridge.context import Context
7 | from bridge.reply import *
8 |
9 |
10 | class Channel(object):
11 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE]
12 |
13 | def startup(self):
14 | """
15 | init channel
16 | """
17 | raise NotImplementedError
18 |
19 | def handle_text(self, msg):
20 | """
21 | process received msg
22 | :param msg: message object
23 | """
24 | raise NotImplementedError
25 |
26 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
27 | def send(self, reply: Reply, context: Context):
28 | """
29 | send message to user
30 | :param msg: message content
31 | :param receiver: receiver channel account
32 | :return:
33 | """
34 | raise NotImplementedError
35 |
36 | def build_reply_content(self, query, context: Context = None) -> Reply:
37 | return Bridge().fetch_reply_content(query, context)
38 |
39 | def build_voice_to_text(self, voice_file) -> Reply:
40 | return Bridge().fetch_voice_to_text(voice_file)
41 |
42 | def build_text_to_voice(self, text) -> Reply:
43 | return Bridge().fetch_text_to_voice(text)
44 |
--------------------------------------------------------------------------------
/common/dequeue.py:
--------------------------------------------------------------------------------
1 | from queue import Full, Queue
2 | from time import monotonic as time
3 |
4 |
5 | # add implementation of putleft to Queue
6 | class Dequeue(Queue):
7 | def putleft(self, item, block=True, timeout=None):
8 | with self.not_full:
9 | if self.maxsize > 0:
10 | if not block:
11 | if self._qsize() >= self.maxsize:
12 | raise Full
13 | elif timeout is None:
14 | while self._qsize() >= self.maxsize:
15 | self.not_full.wait()
16 | elif timeout < 0:
17 | raise ValueError("'timeout' must be a non-negative number")
18 | else:
19 | endtime = time() + timeout
20 | while self._qsize() >= self.maxsize:
21 | remaining = endtime - time()
22 | if remaining <= 0.0:
23 | raise Full
24 | self.not_full.wait(remaining)
25 | self._putleft(item)
26 | self.unfinished_tasks += 1
27 | self.not_empty.notify()
28 |
29 | def putleft_nowait(self, item):
30 | return self.putleft(item, block=False)
31 |
32 | def _putleft(self, item):
33 | self.queue.appendleft(item)
34 |
--------------------------------------------------------------------------------
/common/expired_dict.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 |
4 | class ExpiredDict(dict):
5 | def __init__(self, expires_in_seconds):
6 | super().__init__()
7 | self.expires_in_seconds = expires_in_seconds
8 |
9 | def __getitem__(self, key):
10 | value, expiry_time = super().__getitem__(key)
11 | if datetime.now() > expiry_time:
12 | del self[key]
13 | raise KeyError("expired {}".format(key))
14 | self.__setitem__(key, value)
15 | return value
16 |
17 | def __setitem__(self, key, value):
18 | expiry_time = datetime.now() + timedelta(seconds=self.expires_in_seconds)
19 | super().__setitem__(key, (value, expiry_time))
20 |
21 | def get(self, key, default=None):
22 | try:
23 | return self[key]
24 | except KeyError:
25 | return default
26 |
27 | def __contains__(self, key):
28 | try:
29 | self[key]
30 | return True
31 | except KeyError:
32 | return False
33 |
34 | def keys(self):
35 | keys = list(super().keys())
36 | return [key for key in keys if key in self]
37 |
38 | def items(self):
39 | return [(key, self[key]) for key in self.keys()]
40 |
41 | def __iter__(self):
42 | return self.keys().__iter__()
43 |
--------------------------------------------------------------------------------
/plugins/finish/finish.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import plugins
4 | from bridge.context import ContextType
5 | from bridge.reply import Reply, ReplyType
6 | from common.log import logger
7 | from config import conf
8 | from plugins import *
9 |
10 |
11 | @plugins.register(
12 | name="Finish",
13 | desire_priority=-999,
14 | hidden=True,
15 | desc="A plugin that check unknown command",
16 | version="1.0",
17 | author="js00000",
18 | )
19 | class Finish(Plugin):
20 | def __init__(self):
21 | super().__init__()
22 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
23 | logger.info("[Finish] inited")
24 |
25 | def on_handle_context(self, e_context: EventContext):
26 | if e_context["context"].type != ContextType.TEXT:
27 | return
28 |
29 | content = e_context["context"].content
30 | logger.debug("[Finish] on_handle_context. content: %s" % content)
31 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
32 | if content.startswith(trigger_prefix):
33 | reply = Reply()
34 | reply.type = ReplyType.ERROR
35 | reply.content = "未知插件命令\n查看插件命令列表请输入#help 插件名\n"
36 | e_context["reply"] = reply
37 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
38 |
39 | def get_help_text(self, **kwargs):
40 | return ""
41 |
--------------------------------------------------------------------------------
/lib/itchat/config.py:
--------------------------------------------------------------------------------
1 | import os, platform
2 |
3 | VERSION = '1.5.0.dev'
4 |
5 | # use this envrionment to initialize the async & sync componment
6 | ASYNC_COMPONENTS = os.environ.get('ITCHAT_UOS_ASYNC', False)
7 |
8 | BASE_URL = 'https://login.weixin.qq.com'
9 | OS = platform.system() # Windows, Linux, Darwin
10 | DIR = os.getcwd()
11 | DEFAULT_QR = 'QR.png'
12 | TIMEOUT = (10, 60)
13 |
14 | USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'
15 |
16 | UOS_PATCH_CLIENT_VERSION = '2.0.0'
17 | UOS_PATCH_EXTSPAM = 'Go8FCIkFEokFCggwMDAwMDAwMRAGGvAESySibk50w5Wb3uTl2c2h64jVVrV7gNs06GFlWplHQbY/5FfiO++1yH4ykCyNPWKXmco+wfQzK5R98D3so7rJ5LmGFvBLjGceleySrc3SOf2Pc1gVehzJgODeS0lDL3/I/0S2SSE98YgKleq6Uqx6ndTy9yaL9qFxJL7eiA/R3SEfTaW1SBoSITIu+EEkXff+Pv8NHOk7N57rcGk1w0ZzRrQDkXTOXFN2iHYIzAAZPIOY45Lsh+A4slpgnDiaOvRtlQYCt97nmPLuTipOJ8Qc5pM7ZsOsAPPrCQL7nK0I7aPrFDF0q4ziUUKettzW8MrAaiVfmbD1/VkmLNVqqZVvBCtRblXb5FHmtS8FxnqCzYP4WFvz3T0TcrOqwLX1M/DQvcHaGGw0B0y4bZMs7lVScGBFxMj3vbFi2SRKbKhaitxHfYHAOAa0X7/MSS0RNAjdwoyGHeOepXOKY+h3iHeqCvgOH6LOifdHf/1aaZNwSkGotYnYScW8Yx63LnSwba7+hESrtPa/huRmB9KWvMCKbDThL/nne14hnL277EDCSocPu3rOSYjuB9gKSOdVmWsj9Dxb/iZIe+S6AiG29Esm+/eUacSba0k8wn5HhHg9d4tIcixrxveflc8vi2/wNQGVFNsGO6tB5WF0xf/plngOvQ1/ivGV/C1Qpdhzznh0ExAVJ6dwzNg7qIEBaw+BzTJTUuRcPk92Sn6QDn2Pu3mpONaEumacjW4w6ipPnPw+g2TfywJjeEcpSZaP4Q3YV5HG8D6UjWA4GSkBKculWpdCMadx0usMomsSS/74QgpYqcPkmamB4nVv1JxczYITIqItIKjD35IGKAUwAA=='
18 |
--------------------------------------------------------------------------------
/common/token_bucket.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 |
5 | class TokenBucket:
6 | def __init__(self, tpm, timeout=None):
7 | self.capacity = int(tpm) # 令牌桶容量
8 | self.tokens = 0 # 初始令牌数为0
9 | self.rate = int(tpm) / 60 # 令牌每秒生成速率
10 | self.timeout = timeout # 等待令牌超时时间
11 | self.cond = threading.Condition() # 条件变量
12 | self.is_running = True
13 | # 开启令牌生成线程
14 | threading.Thread(target=self._generate_tokens).start()
15 |
16 | def _generate_tokens(self):
17 | """生成令牌"""
18 | while self.is_running:
19 | with self.cond:
20 | if self.tokens < self.capacity:
21 | self.tokens += 1
22 | self.cond.notify() # 通知获取令牌的线程
23 | time.sleep(1 / self.rate)
24 |
25 | def get_token(self):
26 | """获取令牌"""
27 | with self.cond:
28 | while self.tokens <= 0:
29 | flag = self.cond.wait(self.timeout)
30 | if not flag: # 超时
31 | return False
32 | self.tokens -= 1
33 | return True
34 |
35 | def close(self):
36 | self.is_running = False
37 |
38 |
39 | if __name__ == "__main__":
40 | token_bucket = TokenBucket(20, None) # 创建一个每分钟生产20个tokens的令牌桶
41 | # token_bucket = TokenBucket(20, 0.1)
42 | for i in range(3):
43 | if token_bucket.get_token():
44 | print(f"第{i+1}次请求成功")
45 | token_bucket.close()
46 |
--------------------------------------------------------------------------------
/bot/baidu/baidu_unit_bot.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import requests
4 |
5 | from bot.bot import Bot
6 | from bridge.reply import Reply, ReplyType
7 |
8 |
9 | # Baidu Unit对话接口 (可用, 但能力较弱)
10 | class BaiduUnitBot(Bot):
11 | def reply(self, query, context=None):
12 | token = self.get_token()
13 | url = "https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token=" + token
14 | post_data = (
15 | '{"version":"3.0","service_id":"S73177","session_id":"","log_id":"7758521","skill_ids":["1221886"],"request":{"terminal_id":"88888","query":"'
16 | + query
17 | + '", "hyper_params": {"chat_custom_bot_profile": 1}}}'
18 | )
19 | print(post_data)
20 | headers = {"content-type": "application/x-www-form-urlencoded"}
21 | response = requests.post(url, data=post_data.encode(), headers=headers)
22 | if response:
23 | reply = Reply(
24 | ReplyType.TEXT,
25 | response.json()["result"]["context"]["SYS_PRESUMED_HIST"][1],
26 | )
27 | return reply
28 |
29 | def get_token(self):
30 | access_key = "YOUR_ACCESS_KEY"
31 | secret_key = "YOUR_SECRET_KEY"
32 | host = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=" + access_key + "&client_secret=" + secret_key
33 | response = requests.get(host)
34 | if response:
35 | print(response.json())
36 | return response.json()["access_token"]
37 |
--------------------------------------------------------------------------------
/lib/itchat/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | class LogSystem(object):
4 | handlerList = []
5 | showOnCmd = True
6 | loggingLevel = logging.INFO
7 | loggingFile = None
8 | def __init__(self):
9 | self.logger = logging.getLogger('itchat')
10 | self.logger.addHandler(logging.NullHandler())
11 | self.logger.setLevel(self.loggingLevel)
12 | self.cmdHandler = logging.StreamHandler()
13 | self.fileHandler = None
14 | self.logger.addHandler(self.cmdHandler)
15 | def set_logging(self, showOnCmd=True, loggingFile=None,
16 | loggingLevel=logging.INFO):
17 | if showOnCmd != self.showOnCmd:
18 | if showOnCmd:
19 | self.logger.addHandler(self.cmdHandler)
20 | else:
21 | self.logger.removeHandler(self.cmdHandler)
22 | self.showOnCmd = showOnCmd
23 | if loggingFile != self.loggingFile:
24 | if self.loggingFile is not None: # clear old fileHandler
25 | self.logger.removeHandler(self.fileHandler)
26 | self.fileHandler.close()
27 | if loggingFile is not None: # add new fileHandler
28 | self.fileHandler = logging.FileHandler(loggingFile)
29 | self.logger.addHandler(self.fileHandler)
30 | self.loggingFile = loggingFile
31 | if loggingLevel != self.loggingLevel:
32 | self.logger.setLevel(loggingLevel)
33 | self.loggingLevel = loggingLevel
34 |
35 | ls = LogSystem()
36 | set_logging = ls.set_logging
37 |
--------------------------------------------------------------------------------
/docker/Dockerfile.alpine:
--------------------------------------------------------------------------------
1 | FROM python:3.10-alpine
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | ARG CHATGPT_ON_WECHAT_VER
7 |
8 | ENV BUILD_PREFIX=/app
9 |
10 | RUN apk add --no-cache \
11 | bash \
12 | curl \
13 | wget \
14 | && export BUILD_GITHUB_TAG=${CHATGPT_ON_WECHAT_VER:-`curl -sL "https://api.github.com/repos/zhayujie/chatgpt-on-wechat/releases/latest" | \
15 | grep '"tag_name":' | \
16 | sed -E 's/.*"([^"]+)".*/\1/'`} \
17 | && wget -t 3 -T 30 -nv -O chatgpt-on-wechat-${BUILD_GITHUB_TAG}.tar.gz \
18 | https://github.com/zhayujie/chatgpt-on-wechat/archive/refs/tags/${BUILD_GITHUB_TAG}.tar.gz \
19 | && tar -xzf chatgpt-on-wechat-${BUILD_GITHUB_TAG}.tar.gz \
20 | && mv chatgpt-on-wechat-${BUILD_GITHUB_TAG} ${BUILD_PREFIX} \
21 | && rm chatgpt-on-wechat-${BUILD_GITHUB_TAG}.tar.gz \
22 | && cd ${BUILD_PREFIX} \
23 | && cp config-template.json ${BUILD_PREFIX}/config.json \
24 | && /usr/local/bin/python -m pip install --no-cache --upgrade pip \
25 | && pip install --no-cache -r requirements.txt --extra-index-url https://alpine-wheels.github.io/index\
26 | && pip install --no-cache -r requirements-optional.txt --extra-index-url https://alpine-wheels.github.io/index\
27 | && apk del curl wget
28 |
29 | WORKDIR ${BUILD_PREFIX}
30 |
31 | ADD ./entrypoint.sh /entrypoint.sh
32 |
33 | RUN chmod +x /entrypoint.sh \
34 | && adduser -D -h /home/noroot -u 1000 -s /bin/bash noroot \
35 | && chown -R noroot:noroot ${BUILD_PREFIX}
36 |
37 | USER noroot
38 |
39 | ENTRYPOINT ["/entrypoint.sh"]
40 |
--------------------------------------------------------------------------------
/plugins/event.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class Event(Enum):
7 | ON_RECEIVE_MESSAGE = 1 # 收到消息
8 | """
9 | e_context = { "channel": 消息channel, "context" : 本次消息的context}
10 | """
11 |
12 | ON_HANDLE_CONTEXT = 2 # 处理消息前
13 | """
14 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复,初始为空 }
15 | """
16 |
17 | ON_DECORATE_REPLY = 3 # 得到回复后准备装饰
18 | """
19 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 }
20 | """
21 |
22 | ON_SEND_REPLY = 4 # 发送回复前
23 | """
24 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 }
25 | """
26 |
27 | # AFTER_SEND_REPLY = 5 # 发送回复后
28 |
29 |
30 | class EventAction(Enum):
31 | CONTINUE = 1 # 事件未结束,继续交给下个插件处理,如果没有下个插件,则交付给默认的事件处理逻辑
32 | BREAK = 2 # 事件结束,不再给下个插件处理,交付给默认的事件处理逻辑
33 | BREAK_PASS = 3 # 事件结束,不再给下个插件处理,不交付给默认的事件处理逻辑
34 |
35 |
36 | class EventContext:
37 | def __init__(self, event, econtext=dict()):
38 | self.event = event
39 | self.econtext = econtext
40 | self.action = EventAction.CONTINUE
41 |
42 | def __getitem__(self, key):
43 | return self.econtext[key]
44 |
45 | def __setitem__(self, key, value):
46 | self.econtext[key] = value
47 |
48 | def __delitem__(self, key):
49 | del self.econtext[key]
50 |
51 | def is_pass(self):
52 | return self.action == EventAction.BREAK_PASS
53 |
54 | def is_break(self):
55 | return self.action == EventAction.BREAK or self.action == EventAction.BREAK_PASS
56 |
--------------------------------------------------------------------------------
/docker/Dockerfile.debian:
--------------------------------------------------------------------------------
1 | FROM python:3.10
2 |
3 | LABEL maintainer="foo@bar.com"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | ARG CHATGPT_ON_WECHAT_VER
7 |
8 | ENV BUILD_PREFIX=/app
9 |
10 | RUN apt-get update \
11 | && apt-get install -y --no-install-recommends \
12 | wget \
13 | curl \
14 | && rm -rf /var/lib/apt/lists/* \
15 | && export BUILD_GITHUB_TAG=${CHATGPT_ON_WECHAT_VER:-`curl -sL "https://api.github.com/repos/zhayujie/chatgpt-on-wechat/releases/latest" | \
16 | grep '"tag_name":' | \
17 | sed -E 's/.*"([^"]+)".*/\1/'`} \
18 | && wget -t 3 -T 30 -nv -O chatgpt-on-wechat-${BUILD_GITHUB_TAG}.tar.gz \
19 | https://github.com/zhayujie/chatgpt-on-wechat/archive/refs/tags/${BUILD_GITHUB_TAG}.tar.gz \
20 | && tar -xzf chatgpt-on-wechat-${BUILD_GITHUB_TAG}.tar.gz \
21 | && mv chatgpt-on-wechat-${BUILD_GITHUB_TAG} ${BUILD_PREFIX} \
22 | && rm chatgpt-on-wechat-${BUILD_GITHUB_TAG}.tar.gz \
23 | && cd ${BUILD_PREFIX} \
24 | && cp config-template.json ${BUILD_PREFIX}/config.json \
25 | && /usr/local/bin/python -m pip install --no-cache --upgrade pip \
26 | && pip install --no-cache -r requirements.txt \
27 | && pip install --no-cache -r requirements-optional.txt
28 |
29 | WORKDIR ${BUILD_PREFIX}
30 |
31 | ADD ./entrypoint.sh /entrypoint.sh
32 |
33 | RUN chmod +x /entrypoint.sh \
34 | && mkdir -p /home/noroot \
35 | && groupadd -r noroot \
36 | && useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
37 | && chown -R noroot:noroot /home/noroot ${BUILD_PREFIX} /usr/local/lib
38 |
39 | USER noroot
40 |
41 | ENTRYPOINT ["/entrypoint.sh"]
42 |
--------------------------------------------------------------------------------
/common/utils.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 |
4 | from PIL import Image
5 |
6 |
7 | def fsize(file):
8 | if isinstance(file, io.BytesIO):
9 | return file.getbuffer().nbytes
10 | elif isinstance(file, str):
11 | return os.path.getsize(file)
12 | elif hasattr(file, "seek") and hasattr(file, "tell"):
13 | pos = file.tell()
14 | file.seek(0, os.SEEK_END)
15 | size = file.tell()
16 | file.seek(pos)
17 | return size
18 | else:
19 | raise TypeError("Unsupported type")
20 |
21 |
22 | def compress_imgfile(file, max_size):
23 | if fsize(file) <= max_size:
24 | return file
25 | file.seek(0)
26 | img = Image.open(file)
27 | rgb_image = img.convert("RGB")
28 | quality = 95
29 | while True:
30 | out_buf = io.BytesIO()
31 | rgb_image.save(out_buf, "JPEG", quality=quality)
32 | if fsize(out_buf) <= max_size:
33 | return out_buf
34 | quality -= 5
35 |
36 |
37 | def split_string_by_utf8_length(string, max_length, max_split=0):
38 | encoded = string.encode("utf-8")
39 | start, end = 0, 0
40 | result = []
41 | while end < len(encoded):
42 | if max_split > 0 and len(result) >= max_split:
43 | result.append(encoded[start:].decode("utf-8"))
44 | break
45 | end = min(start + max_length, len(encoded))
46 | # 如果当前字节不是 UTF-8 编码的开始字节,则向前查找直到找到开始字节为止
47 | while end < len(encoded) and (encoded[end] & 0b11000000) == 0b10000000:
48 | end -= 1
49 | result.append(encoded[start:end].decode("utf-8"))
50 | start = end
51 | return result
52 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import os
4 | import signal
5 | import sys
6 |
7 | from channel import channel_factory
8 | from common.log import logger
9 | from config import conf, load_config
10 | from plugins import *
11 |
12 |
13 | def sigterm_handler_wrap(_signo):
14 | old_handler = signal.getsignal(_signo)
15 |
16 | def func(_signo, _stack_frame):
17 | logger.info("signal {} received, exiting...".format(_signo))
18 | conf().save_user_datas()
19 | if callable(old_handler): # check old_handler
20 | return old_handler(_signo, _stack_frame)
21 | sys.exit(0)
22 |
23 | signal.signal(_signo, func)
24 |
25 |
26 | def run():
27 | try:
28 | # load config
29 | load_config()
30 | # ctrl + c
31 | sigterm_handler_wrap(signal.SIGINT)
32 | # kill signal
33 | sigterm_handler_wrap(signal.SIGTERM)
34 |
35 | # create channel
36 | channel_name = conf().get("channel_type", "wx")
37 |
38 | if "--cmd" in sys.argv:
39 | channel_name = "terminal"
40 |
41 | if channel_name == "wxy":
42 | os.environ["WECHATY_LOG"] = "warn"
43 | # os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001'
44 |
45 | channel = channel_factory.create_channel(channel_name)
46 | if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app"]:
47 | PluginManager().load_plugins()
48 |
49 | # startup channel
50 | channel.startup()
51 | except Exception as e:
52 | logger.error("App startup failed!")
53 | logger.exception(e)
54 |
55 |
56 | if __name__ == "__main__":
57 | run()
58 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_image.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import openai
4 | import openai.error
5 |
6 | from common.log import logger
7 | from common.token_bucket import TokenBucket
8 | from config import conf
9 |
10 |
11 | # OPENAI提供的画图接口
12 | class OpenAIImage(object):
13 | def __init__(self):
14 | openai.api_key = conf().get("open_ai_api_key")
15 | if conf().get("rate_limit_dalle"):
16 | self.tb4dalle = TokenBucket(conf().get("rate_limit_dalle", 50))
17 |
18 | def create_img(self, query, retry_count=0, api_key=None):
19 | try:
20 | if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token():
21 | return False, "请求太快了,请休息一下再问我吧"
22 | logger.info("[OPEN_AI] image_query={}".format(query))
23 | response = openai.Image.create(
24 | api_key=api_key,
25 | prompt=query, # 图片描述
26 | n=1, # 每次生成图片的数量
27 | size=conf().get("image_create_size", "256x256"), # 图片大小,可选有 256x256, 512x512, 1024x1024
28 | )
29 | image_url = response["data"][0]["url"]
30 | logger.info("[OPEN_AI] image_url={}".format(image_url))
31 | return True, image_url
32 | except openai.error.RateLimitError as e:
33 | logger.warn(e)
34 | if retry_count < 1:
35 | time.sleep(5)
36 | logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count + 1))
37 | return self.create_img(query, retry_count + 1)
38 | else:
39 | return False, "提问太快啦,请休息一下再问我吧"
40 | except Exception as e:
41 | logger.exception(e)
42 | return False, str(e)
43 |
--------------------------------------------------------------------------------
/voice/baidu/README.md:
--------------------------------------------------------------------------------
1 | ## 说明
2 | 百度语音识别与合成参数说明
3 | 百度语音依赖,经常会出现问题,可能就是缺少依赖:
4 | pip install baidu-aip
5 | pip install pydub
6 | pip install pysilk
7 | 还有ffmpeg,不同系统安装方式不同
8 |
9 | 系统中收到的语音文件为mp3格式(wx)或者sil格式(wxy),如果要识别需要转换为pcm格式,转换后的文件为16k采样率,单声道,16bit的pcm文件
10 | 发送时又需要(wx)转换为mp3格式,转换后的文件为16k采样率,单声道,16bit的pcm文件,(wxy)转换为sil格式,还要计算声音长度,发送时需要带上声音长度
11 | 这些事情都在audio_convert.py中封装了,直接调用即可
12 |
13 |
14 | 参数说明
15 | 识别参数
16 | https://ai.baidu.com/ai-doc/SPEECH/Vk38lxily
17 | 合成参数
18 | https://ai.baidu.com/ai-doc/SPEECH/Gk38y8lzk
19 |
20 | ## 使用说明
21 | 分两个地方配置
22 |
23 | 1、对于def voiceToText(self, filename)函数中调用的百度语音识别API,中接口调用asr(参数)这个配置见CHATGPT-ON-WECHAT工程目录下的`config.json`文件和config.py文件。
24 | 参数 可需 描述
25 | app_id 必填 应用的APPID
26 | api_key 必填 应用的APIKey
27 | secret_key 必填 应用的SecretKey
28 | dev_pid 必填 语言选择,填写语言对应的dev_pid值
29 |
30 | 2、对于def textToVoice(self, text)函数中调用的百度语音合成API,中接口调用synthesis(参数)在本目录下的`config.json`文件中进行配置。
31 | 参数 可需 描述
32 | tex 必填 合成的文本,使用UTF-8编码,请注意文本长度必须小于1024字节
33 | lan 必填 固定值zh。语言选择,目前只有中英文混合模式,填写固定值zh
34 | spd 选填 语速,取值0-15,默认为5中语速
35 | pit 选填 音调,取值0-15,默认为5中语调
36 | vol 选填 音量,取值0-15,默认为5中音量(取值为0时为音量最小值,并非为无声)
37 | per(基础音库) 选填 度小宇=1,度小美=0,度逍遥(基础)=3,度丫丫=4
38 | per(精品音库) 选填 度逍遥(精品)=5003,度小鹿=5118,度博文=106,度小童=110,度小萌=111,度米朵=103,度小娇=5
39 | aue 选填 3为mp3格式(默认); 4为pcm-16k;5为pcm-8k;6为wav(内容同pcm-16k); 注意aue=4或者6是语音识别要求的格式,但是音频内容不是语音识别要求的自然人发音,所以识别效果会受影响。
40 |
41 | 关于per参数的说明,注意您购买的哪个音库,就填写哪个音库的参数,否则会报错。如果您购买的是基础音库,那么per参数只能填写0到4,如果您购买的是精品音库,那么per参数只能填写5003,5118,106,110,111,103,5其他的都会报错。
42 | ### 配置文件
43 |
44 | 将文件夹中`config.json.template`复制为`config.json`。
45 |
46 | ``` json
47 | {
48 | "lang": "zh",
49 | "ctp": 1,
50 | "spd": 5,
51 | "pit": 5,
52 | "vol": 5,
53 | "per": 0
54 | }
55 | ```
--------------------------------------------------------------------------------
/voice/google/google_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 |
5 | import time
6 |
7 | import speech_recognition
8 | from gtts import gTTS
9 |
10 | from bridge.reply import Reply, ReplyType
11 | from common.log import logger
12 | from common.tmp_dir import TmpDir
13 | from voice.voice import Voice
14 |
15 |
16 | class GoogleVoice(Voice):
17 | recognizer = speech_recognition.Recognizer()
18 |
19 | def __init__(self):
20 | pass
21 |
22 | def voiceToText(self, voice_file):
23 | with speech_recognition.AudioFile(voice_file) as source:
24 | audio = self.recognizer.record(source)
25 | try:
26 | text = self.recognizer.recognize_google(audio, language="zh-CN")
27 | logger.info("[Google] voiceToText text={} voice file name={}".format(text, voice_file))
28 | reply = Reply(ReplyType.TEXT, text)
29 | except speech_recognition.UnknownValueError:
30 | reply = Reply(ReplyType.ERROR, "抱歉,我听不懂")
31 | except speech_recognition.RequestError as e:
32 | reply = Reply(ReplyType.ERROR, "抱歉,无法连接到 Google 语音识别服务;{0}".format(e))
33 | finally:
34 | return reply
35 |
36 | def textToVoice(self, text):
37 | try:
38 | # Avoid the same filename under multithreading
39 | mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
40 | tts = gTTS(text=text, lang="zh")
41 | tts.save(mp3File)
42 | logger.info("[Google] textToVoice text={} voice file name={}".format(text, mp3File))
43 | reply = Reply(ReplyType.VOICE, mp3File)
44 | except Exception as e:
45 | reply = Reply(ReplyType.ERROR, str(e))
46 | finally:
47 | return reply
48 |
--------------------------------------------------------------------------------
/bridge/context.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class ContextType(Enum):
7 | TEXT = 1 # 文本消息
8 | VOICE = 2 # 音频消息
9 | IMAGE = 3 # 图片消息
10 | IMAGE_CREATE = 10 # 创建图片命令
11 | JOIN_GROUP = 20 # 加入群聊
12 | PATPAT = 21 # 拍了拍
13 |
14 | def __str__(self):
15 | return self.name
16 |
17 |
18 | class Context:
19 | def __init__(self, type: ContextType = None, content=None, kwargs=dict()):
20 | self.type = type
21 | self.content = content
22 | self.kwargs = kwargs
23 |
24 | def __contains__(self, key):
25 | if key == "type":
26 | return self.type is not None
27 | elif key == "content":
28 | return self.content is not None
29 | else:
30 | return key in self.kwargs
31 |
32 | def __getitem__(self, key):
33 | if key == "type":
34 | return self.type
35 | elif key == "content":
36 | return self.content
37 | else:
38 | return self.kwargs[key]
39 |
40 | def get(self, key, default=None):
41 | try:
42 | return self[key]
43 | except KeyError:
44 | return default
45 |
46 | def __setitem__(self, key, value):
47 | if key == "type":
48 | self.type = value
49 | elif key == "content":
50 | self.content = value
51 | else:
52 | self.kwargs[key] = value
53 |
54 | def __delitem__(self, key):
55 | if key == "type":
56 | self.type = None
57 | elif key == "content":
58 | self.content = None
59 | else:
60 | del self.kwargs[key]
61 |
62 | def __str__(self):
63 | return "Context(type={}, content={}, kwargs={})".format(self.type, self.content, self.kwargs)
64 |
--------------------------------------------------------------------------------
/common/time_check.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import re
3 | import time
4 |
5 | import config
6 | from common.log import logger
7 |
8 |
9 | def time_checker(f):
10 | def _time_checker(self, *args, **kwargs):
11 | _config = config.conf()
12 | chat_time_module = _config.get("chat_time_module", False)
13 | if chat_time_module:
14 | chat_start_time = _config.get("chat_start_time", "00:00")
15 | chat_stopt_time = _config.get("chat_stop_time", "24:00")
16 | time_regex = re.compile(r"^([01]?[0-9]|2[0-4])(:)([0-5][0-9])$") # 时间匹配,包含24:00
17 |
18 | starttime_format_check = time_regex.match(chat_start_time) # 检查停止时间格式
19 | stoptime_format_check = time_regex.match(chat_stopt_time) # 检查停止时间格式
20 | chat_time_check = chat_start_time < chat_stopt_time # 确定启动时间<停止时间
21 |
22 | # 时间格式检查
23 | if not (starttime_format_check and stoptime_format_check and chat_time_check):
24 | logger.warn("时间格式不正确,请在config.json中修改您的CHAT_START_TIME/CHAT_STOP_TIME,否则可能会影响您正常使用,开始({})-结束({})".format(starttime_format_check, stoptime_format_check))
25 | if chat_start_time > "23:59":
26 | logger.error("启动时间可能存在问题,请修改!")
27 |
28 | # 服务时间检查
29 | now_time = time.strftime("%H:%M", time.localtime())
30 | if chat_start_time <= now_time <= chat_stopt_time: # 服务时间内,正常返回回答
31 | f(self, *args, **kwargs)
32 | return None
33 | else:
34 | if args[0]["Content"] == "#更新配置": # 不在服务时间内也可以更新配置
35 | f(self, *args, **kwargs)
36 | else:
37 | logger.info("非服务时间内,不接受访问")
38 | return None
39 | else:
40 | f(self, *args, **kwargs) # 未开启时间模块则直接回答
41 |
42 | return _time_checker
43 |
--------------------------------------------------------------------------------
/.github/workflows/deploy-image.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # GitHub recommends pinning actions to a commit SHA.
7 | # To get a newer version, you will need to update the SHA.
8 | # You can also reference a tag or branch, but the action may change without warning.
9 |
10 | name: Create and publish a Docker image
11 |
12 | on:
13 | push:
14 | branches: ['master']
15 | create:
16 | env:
17 | REGISTRY: ghcr.io
18 | IMAGE_NAME: ${{ github.repository }}
19 |
20 | jobs:
21 | build-and-push-image:
22 | runs-on: ubuntu-latest
23 | permissions:
24 | contents: read
25 | packages: write
26 |
27 | steps:
28 | - name: Checkout repository
29 | uses: actions/checkout@v3
30 |
31 | - name: Log in to the Container registry
32 | uses: docker/login-action@v2
33 | with:
34 | registry: ${{ env.REGISTRY }}
35 | username: ${{ github.actor }}
36 | password: ${{ secrets.GITHUB_TOKEN }}
37 |
38 | - name: Extract metadata (tags, labels) for Docker
39 | id: meta
40 | uses: docker/metadata-action@v4
41 | with:
42 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
43 |
44 | - name: Build and push Docker image
45 | uses: docker/build-push-action@v3
46 | with:
47 | context: .
48 | push: true
49 | file: ./docker/Dockerfile.latest
50 | tags: ${{ steps.meta.outputs.tags }}
51 | labels: ${{ steps.meta.outputs.labels }}
52 |
53 | - uses: actions/delete-package-versions@v4
54 | with:
55 | package-name: 'chatgpt-on-wechat'
56 | package-type: 'container'
57 | min-versions-to-keep: 10
58 | delete-only-untagged-versions: 'true'
59 | token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/docker/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # build prefix
5 | CHATGPT_ON_WECHAT_PREFIX=${CHATGPT_ON_WECHAT_PREFIX:-""}
6 | # path to config.json
7 | CHATGPT_ON_WECHAT_CONFIG_PATH=${CHATGPT_ON_WECHAT_CONFIG_PATH:-""}
8 | # execution command line
9 | CHATGPT_ON_WECHAT_EXEC=${CHATGPT_ON_WECHAT_EXEC:-""}
10 |
11 | # use environment variables to pass parameters
12 | # if you have not defined environment variables, set them below
13 | # export OPEN_AI_API_KEY=${OPEN_AI_API_KEY:-'YOUR API KEY'}
14 | # export OPEN_AI_PROXY=${OPEN_AI_PROXY:-""}
15 | # export SINGLE_CHAT_PREFIX=${SINGLE_CHAT_PREFIX:-'["bot", "@bot"]'}
16 | # export SINGLE_CHAT_REPLY_PREFIX=${SINGLE_CHAT_REPLY_PREFIX:-'"[bot] "'}
17 | # export GROUP_CHAT_PREFIX=${GROUP_CHAT_PREFIX:-'["@bot"]'}
18 | # export GROUP_NAME_WHITE_LIST=${GROUP_NAME_WHITE_LIST:-'["ChatGPT测试群", "ChatGPT测试群2"]'}
19 | # export IMAGE_CREATE_PREFIX=${IMAGE_CREATE_PREFIX:-'["画", "看", "找"]'}
20 | # export CONVERSATION_MAX_TOKENS=${CONVERSATION_MAX_TOKENS:-"1000"}
21 | # export SPEECH_RECOGNITION=${SPEECH_RECOGNITION:-"False"}
22 | # export CHARACTER_DESC=${CHARACTER_DESC:-"你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"}
23 | # export EXPIRES_IN_SECONDS=${EXPIRES_IN_SECONDS:-"3600"}
24 |
25 | # CHATGPT_ON_WECHAT_PREFIX is empty, use /app
26 | if [ "$CHATGPT_ON_WECHAT_PREFIX" == "" ] ; then
27 | CHATGPT_ON_WECHAT_PREFIX=/app
28 | fi
29 |
30 | # CHATGPT_ON_WECHAT_CONFIG_PATH is empty, use '/app/config.json'
31 | if [ "$CHATGPT_ON_WECHAT_CONFIG_PATH" == "" ] ; then
32 | CHATGPT_ON_WECHAT_CONFIG_PATH=$CHATGPT_ON_WECHAT_PREFIX/config.json
33 | fi
34 |
35 | # CHATGPT_ON_WECHAT_EXEC is empty, use ‘python app.py’
36 | if [ "$CHATGPT_ON_WECHAT_EXEC" == "" ] ; then
37 | CHATGPT_ON_WECHAT_EXEC="python app.py"
38 | fi
39 |
40 | # modify content in config.json
41 | # if [ "$OPEN_AI_API_KEY" == "YOUR API KEY" ] || [ "$OPEN_AI_API_KEY" == "" ]; then
42 | # echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
43 | # fi
44 |
45 |
46 | # go to prefix dir
47 | cd $CHATGPT_ON_WECHAT_PREFIX
48 | # excute
49 | $CHATGPT_ON_WECHAT_EXEC
50 |
51 |
52 |
--------------------------------------------------------------------------------
/translate/baidu/baidu_translate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import random
4 | from hashlib import md5
5 |
6 | import requests
7 |
8 | from config import conf
9 | from translate.translator import Translator
10 |
11 |
12 | class BaiduTranslator(Translator):
13 | def __init__(self) -> None:
14 | super().__init__()
15 | endpoint = "http://api.fanyi.baidu.com"
16 | path = "/api/trans/vip/translate"
17 | self.url = endpoint + path
18 | self.appid = conf().get("baidu_translate_app_id")
19 | self.appkey = conf().get("baidu_translate_app_key")
20 | if not self.appid or not self.appkey:
21 | raise Exception("baidu translate appid or appkey not set")
22 |
23 | # For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes
24 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
25 | if not from_lang:
26 | from_lang = "auto" # baidu suppport auto detect
27 | salt = random.randint(32768, 65536)
28 | sign = self.make_md5("{}{}{}{}".format(self.appid, query, salt, self.appkey))
29 | headers = {"Content-Type": "application/x-www-form-urlencoded"}
30 | payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign}
31 |
32 | retry_cnt = 3
33 | while retry_cnt:
34 | r = requests.post(self.url, params=payload, headers=headers)
35 | result = r.json()
36 | errcode = result.get("error_code", "52000")
37 | if errcode != "52000":
38 | if errcode == "52001" or errcode == "52002":
39 | retry_cnt -= 1
40 | continue
41 | else:
42 | raise Exception(result["error_msg"])
43 | else:
44 | break
45 | text = "\n".join([item["dst"] for item in result["trans_result"]])
46 | return text
47 |
48 | def make_md5(self, s, encoding="utf-8"):
49 | return md5(s.encode(encoding)).hexdigest()
50 |
--------------------------------------------------------------------------------
/channel/wechatcom/wechatcomapp_message.py:
--------------------------------------------------------------------------------
1 | from wechatpy.enterprise import WeChatClient
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WechatComAppMessage(ChatMessage):
10 | def __init__(self, msg, client: WeChatClient, is_group=False):
11 | super().__init__(msg)
12 | self.msg_id = msg.id
13 | self.create_time = msg.time
14 | self.is_group = is_group
15 |
16 | if msg.type == "text":
17 | self.ctype = ContextType.TEXT
18 | self.content = msg.content
19 | elif msg.type == "voice":
20 | self.ctype = ContextType.VOICE
21 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
22 |
23 | def download_voice():
24 | # 如果响应状态码是200,则将响应内容写入本地文件
25 | response = client.media.download(msg.media_id)
26 | if response.status_code == 200:
27 | with open(self.content, "wb") as f:
28 | f.write(response.content)
29 | else:
30 | logger.info(f"[wechatcom] Failed to download voice file, {response.content}")
31 |
32 | self._prepare_fn = download_voice
33 | elif msg.type == "image":
34 | self.ctype = ContextType.IMAGE
35 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
36 |
37 | def download_image():
38 | # 如果响应状态码是200,则将响应内容写入本地文件
39 | response = client.media.download(msg.media_id)
40 | if response.status_code == 200:
41 | with open(self.content, "wb") as f:
42 | f.write(response.content)
43 | else:
44 | logger.info(f"[wechatcom] Failed to download image file, {response.content}")
45 |
46 | self._prepare_fn = download_image
47 | else:
48 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
49 |
50 | self.from_user_id = msg.source
51 | self.to_user_id = msg.target
52 | self.other_user_id = msg.source
53 |
--------------------------------------------------------------------------------
/channel/chat_message.py:
--------------------------------------------------------------------------------
1 | """
2 | 本类表示聊天消息,用于对itchat和wechaty的消息进行统一的封装。
3 |
4 | 填好必填项(群聊6个,非群聊8个),即可接入ChatChannel,并支持插件,参考TerminalChannel
5 |
6 | ChatMessage
7 | msg_id: 消息id (必填)
8 | create_time: 消息创建时间
9 |
10 | ctype: 消息类型 : ContextType (必填)
11 | content: 消息内容, 如果是声音/图片,这里是文件路径 (必填)
12 |
13 | from_user_id: 发送者id (必填)
14 | from_user_nickname: 发送者昵称
15 | to_user_id: 接收者id (必填)
16 | to_user_nickname: 接收者昵称
17 |
18 | other_user_id: 对方的id,如果你是发送者,那这个就是接收者id,如果你是接收者,那这个就是发送者id,如果是群消息,那这一直是群id (必填)
19 | other_user_nickname: 同上
20 |
21 | is_group: 是否是群消息 (群聊必填)
22 | is_at: 是否被at
23 |
24 | - (群消息时,一般会存在实际发送者,是群内某个成员的id和昵称,下列项仅在群消息时存在)
25 | actual_user_id: 实际发送者id (群聊必填)
26 | actual_user_nickname:实际发送者昵称
27 |
28 |
29 |
30 |
31 | _prepare_fn: 准备函数,用于准备消息的内容,比如下载图片等,
32 | _prepared: 是否已经调用过准备函数
33 | _rawmsg: 原始消息对象
34 |
35 | """
36 |
37 |
38 | class ChatMessage(object):
39 | msg_id = None
40 | create_time = None
41 |
42 | ctype = None
43 | content = None
44 |
45 | from_user_id = None
46 | from_user_nickname = None
47 | to_user_id = None
48 | to_user_nickname = None
49 | other_user_id = None
50 | other_user_nickname = None
51 |
52 | is_group = False
53 | is_at = False
54 | actual_user_id = None
55 | actual_user_nickname = None
56 |
57 | _prepare_fn = None
58 | _prepared = False
59 | _rawmsg = None
60 |
61 | def __init__(self, _rawmsg):
62 | self._rawmsg = _rawmsg
63 |
64 | def prepare(self):
65 | if self._prepare_fn and not self._prepared:
66 | self._prepared = True
67 | self._prepare_fn()
68 |
69 | def __str__(self):
70 | return "ChatMessage: id={}, create_time={}, ctype={}, content={}, from_user_id={}, from_user_nickname={}, to_user_id={}, to_user_nickname={}, other_user_id={}, other_user_nickname={}, is_group={}, is_at={}, actual_user_id={}, actual_user_nickname={}".format(
71 | self.msg_id,
72 | self.create_time,
73 | self.ctype,
74 | self.content,
75 | self.from_user_id,
76 | self.from_user_nickname,
77 | self.to_user_id,
78 | self.to_user_nickname,
79 | self.other_user_id,
80 | self.other_user_nickname,
81 | self.is_group,
82 | self.is_at,
83 | self.actual_user_id,
84 | self.actual_user_nickname,
85 | )
86 |
--------------------------------------------------------------------------------
/channel/wechatmp/wechatmp_message.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-#
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WeChatMPMessage(ChatMessage):
10 | def __init__(self, msg, client=None):
11 | super().__init__(msg)
12 | self.msg_id = msg.id
13 | self.create_time = msg.time
14 | self.is_group = False
15 |
16 | if msg.type == "text":
17 | self.ctype = ContextType.TEXT
18 | self.content = msg.content
19 | elif msg.type == "voice":
20 | if msg.recognition == None:
21 | self.ctype = ContextType.VOICE
22 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
23 |
24 | def download_voice():
25 | # 如果响应状态码是200,则将响应内容写入本地文件
26 | response = client.media.download(msg.media_id)
27 | if response.status_code == 200:
28 | with open(self.content, "wb") as f:
29 | f.write(response.content)
30 | else:
31 | logger.info(f"[wechatmp] Failed to download voice file, {response.content}")
32 |
33 | self._prepare_fn = download_voice
34 | else:
35 | self.ctype = ContextType.TEXT
36 | self.content = msg.recognition
37 | elif msg.type == "image":
38 | self.ctype = ContextType.IMAGE
39 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
40 |
41 | def download_image():
42 | # 如果响应状态码是200,则将响应内容写入本地文件
43 | response = client.media.download(msg.media_id)
44 | if response.status_code == 200:
45 | with open(self.content, "wb") as f:
46 | f.write(response.content)
47 | else:
48 | logger.info(f"[wechatmp] Failed to download image file, {response.content}")
49 |
50 | self._prepare_fn = download_image
51 | else:
52 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
53 |
54 | self.from_user_id = msg.source
55 | self.to_user_id = msg.target
56 | self.other_user_id = msg.source
57 |
--------------------------------------------------------------------------------
/channel/wechatcom/README.md:
--------------------------------------------------------------------------------
1 | # 企业微信应用号channel
2 |
3 | 企业微信官方提供了客服、应用等API,本channel使用的是企业微信的自建应用API的能力。
4 |
5 | 因为未来可能还会开发客服能力,所以本channel的类型名叫作`wechatcom_app`。
6 |
7 | `wechatcom_app` channel支持插件系统和图片声音交互等能力,除了无法加入群聊,作为个人使用的私人助理已绰绰有余。
8 |
9 | ## 开始之前
10 |
11 | - 在企业中确认自己拥有在企业内自建应用的权限。
12 | - 如果没有权限或者是个人用户,也可创建未认证的企业。操作方式:登录手机企业微信,选择`创建/加入企业`来创建企业,类型请选择企业,企业名称可随意填写。
13 | 未认证的企业有100人的服务人数上限,其他功能与认证企业没有差异。
14 |
15 | 本channel需安装的依赖与公众号一致,需要安装`wechatpy`和`web.py`,它们包含在`requirements-optional.txt`中。
16 |
17 | 此外,如果你是`Linux`系统,除了`ffmpeg`还需要安装`amr`编码器,否则会出现找不到编码器的错误,无法正常使用语音功能。
18 |
19 | - Ubuntu/Debian
20 |
21 | ```bash
22 | apt-get install libavcodec-extra
23 | ```
24 |
25 | - Alpine
26 |
27 | 需自行编译`ffmpeg`,在编译参数里加入`amr`编码器的支持
28 |
29 | ## 使用方法
30 |
31 | 1.查看企业ID
32 |
33 | - 扫码登陆[企业微信后台](https://work.weixin.qq.com)
34 | - 选择`我的企业`,点击`企业信息`,记住该`企业ID`
35 |
36 | 2.创建自建应用
37 |
38 | - 选择应用管理, 在自建区选创建应用来创建企业自建应用
39 | - 上传应用logo,填写应用名称等项
40 | - 创建应用后进入应用详情页面,记住`AgentId`和`Secert`
41 |
42 | 3.配置应用
43 |
44 | - 在详情页点击`企业可信IP`的配置(没看到可以不管),填入你服务器的公网IP,如果不知道可以先不填
45 | - 点击`接收消息`下的启用API接收消息
46 | - `URL`填写格式为`http://url:port/wxcomapp`,`port`是程序监听的端口,默认是9898
47 | 如果是未认证的企业,url可直接使用服务器的IP。如果是认证企业,需要使用备案的域名,可使用二级域名。
48 | - `Token`可随意填写,停留在这个页面
49 | - 在程序根目录`config.json`中增加配置(**去掉注释**),`wechatcomapp_aes_key`是当前页面的`wechatcomapp_aes_key`
50 |
51 | ```python
52 | "channel_type": "wechatcom_app",
53 | "wechatcom_corp_id": "", # 企业微信公司的corpID
54 | "wechatcomapp_token": "", # 企业微信app的token
55 | "wechatcomapp_port": 9898, # 企业微信app的服务端口, 不需要端口转发
56 | "wechatcomapp_secret": "", # 企业微信app的secret
57 | "wechatcomapp_agent_id": "", # 企业微信app的agent_id
58 | "wechatcomapp_aes_key": "", # 企业微信app的aes_key
59 | ```
60 |
61 | - 运行程序,在页面中点击保存,保存成功说明验证成功
62 |
63 | 4.连接个人微信
64 |
65 | 选择`我的企业`,点击`微信插件`,下面有个邀请关注的二维码。微信扫码后,即可在微信中看到对应企业,在这里你便可以和机器人沟通。
66 |
67 | 向机器人发送消息,如果日志里出现报错:
68 |
69 | ```bash
70 | Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx.xx.xx"
71 | ```
72 |
73 | 意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。
74 |
75 | ~~### Railway部署方式~~(2023-06-08已失效)
76 |
77 | ~~公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)!~~
78 |
79 | ~~填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP)~~
80 |
81 | ## 测试体验
82 |
83 | AIGC开放社区中已经部署了多个可免费使用的Bot,扫描下方的二维码会自动邀请你来体验。
84 |
85 |
86 |
--------------------------------------------------------------------------------
/plugins/keyword/keyword.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import json
4 | import os
5 |
6 | import plugins
7 | from bridge.context import ContextType
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from plugins import *
11 |
12 |
13 | @plugins.register(
14 | name="Keyword",
15 | desire_priority=900,
16 | hidden=True,
17 | desc="关键词匹配过滤",
18 | version="0.1",
19 | author="fengyege.top",
20 | )
21 | class Keyword(Plugin):
22 | def __init__(self):
23 | super().__init__()
24 | try:
25 | curdir = os.path.dirname(__file__)
26 | config_path = os.path.join(curdir, "config.json")
27 | conf = None
28 | if not os.path.exists(config_path):
29 | logger.debug(f"[keyword]不存在配置文件{config_path}")
30 | conf = {"keyword": {}}
31 | with open(config_path, "w", encoding="utf-8") as f:
32 | json.dump(conf, f, indent=4)
33 | else:
34 | logger.debug(f"[keyword]加载配置文件{config_path}")
35 | with open(config_path, "r", encoding="utf-8") as f:
36 | conf = json.load(f)
37 | # 加载关键词
38 | self.keyword = conf["keyword"]
39 |
40 | logger.info("[keyword] {}".format(self.keyword))
41 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
42 | logger.info("[keyword] inited.")
43 | except Exception as e:
44 | logger.warn("[keyword] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/keyword .")
45 | raise e
46 |
47 | def on_handle_context(self, e_context: EventContext):
48 | if e_context["context"].type != ContextType.TEXT:
49 | return
50 |
51 | content = e_context["context"].content.strip()
52 | logger.debug("[keyword] on_handle_context. content: %s" % content)
53 | if content in self.keyword:
54 | logger.debug(f"[keyword] 匹配到关键字【{content}】")
55 | reply_text = self.keyword[content]
56 |
57 | reply = Reply()
58 | reply.type = ReplyType.TEXT
59 | reply.content = reply_text
60 | e_context["reply"] = reply
61 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
62 |
63 | def get_help_text(self, **kwargs):
64 | help_text = "关键词过滤"
65 | return help_text
66 |
--------------------------------------------------------------------------------
/channel/wechatmp/wechatmp_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | from wechatpy.client import WeChatClient
5 | from wechatpy.exceptions import APILimitedException
6 |
7 | from channel.wechatmp.common import *
8 | from common.log import logger
9 |
10 |
11 | class WechatMPClient(WeChatClient):
12 | def __init__(self, appid, secret, access_token=None, session=None, timeout=None, auto_retry=True):
13 | super(WechatMPClient, self).__init__(appid, secret, access_token, session, timeout, auto_retry)
14 | self.fetch_access_token_lock = threading.Lock()
15 | self.clear_quota_lock = threading.Lock()
16 | self.last_clear_quota_time = -1
17 |
18 | def clear_quota(self):
19 | return self.post("clear_quota", data={"appid": self.appid})
20 |
21 | def clear_quota_v2(self):
22 | return self.post("clear_quota/v2", params={"appid": self.appid, "appsecret": self.secret})
23 |
24 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
25 | with self.fetch_access_token_lock:
26 | access_token = self.session.get(self.access_token_key)
27 | if access_token:
28 | if not self.expires_at:
29 | return access_token
30 | timestamp = time.time()
31 | if self.expires_at - timestamp > 60:
32 | return access_token
33 | return super().fetch_access_token()
34 |
35 | def _request(self, method, url_or_endpoint, **kwargs): # 重载父类方法,遇到API限流时,清除quota后重试
36 | try:
37 | return super()._request(method, url_or_endpoint, **kwargs)
38 | except APILimitedException as e:
39 | logger.error("[wechatmp] API quata has been used up. {}".format(e))
40 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
41 | with self.clear_quota_lock:
42 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
43 | self.last_clear_quota_time = time.time()
44 | response = self.clear_quota_v2()
45 | logger.debug("[wechatmp] API quata has been cleard, {}".format(response))
46 | return super()._request(method, url_or_endpoint, **kwargs)
47 | else:
48 | logger.error("[wechatmp] last clear quota time is {}, less than 60s, skip clear quota")
49 | raise e
50 |
--------------------------------------------------------------------------------
/bridge/bridge.py:
--------------------------------------------------------------------------------
1 | from bot.bot_factory import create_bot
2 | from bridge.context import Context
3 | from bridge.reply import Reply
4 | from common import const
5 | from common.log import logger
6 | from common.singleton import singleton
7 | from config import conf
8 | from translate.factory import create_translator
9 | from voice.factory import create_voice
10 |
11 |
12 | @singleton
13 | class Bridge(object):
14 | def __init__(self):
15 | self.btype = {
16 | "chat": const.CHATGPT,
17 | "voice_to_text": conf().get("voice_to_text", "openai"),
18 | "text_to_voice": conf().get("text_to_voice", "google"),
19 | "translate": conf().get("translate", "baidu"),
20 | }
21 | model_type = conf().get("model")
22 | if model_type in ["text-davinci-003"]:
23 | self.btype["chat"] = const.OPEN_AI
24 | if conf().get("use_azure_chatgpt", False):
25 | self.btype["chat"] = const.CHATGPTONAZURE
26 | if conf().get("use_linkai") and conf().get("linkai_api_key"):
27 | self.btype["chat"] = const.LINKAI
28 | self.bots = {}
29 |
30 | def get_bot(self, typename):
31 | if self.bots.get(typename) is None:
32 | logger.info("create bot {} for {}".format(self.btype[typename], typename))
33 | if typename == "text_to_voice":
34 | self.bots[typename] = create_voice(self.btype[typename])
35 | elif typename == "voice_to_text":
36 | self.bots[typename] = create_voice(self.btype[typename])
37 | elif typename == "chat":
38 | self.bots[typename] = create_bot(self.btype[typename])
39 | elif typename == "translate":
40 | self.bots[typename] = create_translator(self.btype[typename])
41 | return self.bots[typename]
42 |
43 | def get_bot_type(self, typename):
44 | return self.btype[typename]
45 |
46 | def fetch_reply_content(self, query, context: Context) -> Reply:
47 | return self.get_bot("chat").reply(query, context)
48 |
49 | def fetch_voice_to_text(self, voiceFile) -> Reply:
50 | return self.get_bot("voice_to_text").voiceToText(voiceFile)
51 |
52 | def fetch_text_to_voice(self, text) -> Reply:
53 | return self.get_bot("text_to_voice").textToVoice(text)
54 |
55 | def fetch_translate(self, text, from_lang="", to_lang="en") -> Reply:
56 | return self.get_bot("translate").translate(text, from_lang, to_lang)
57 |
--------------------------------------------------------------------------------
/common/sorted_dict.py:
--------------------------------------------------------------------------------
1 | import heapq
2 |
3 |
4 | class SortedDict(dict):
5 | def __init__(self, sort_func=lambda k, v: k, init_dict=None, reverse=False):
6 | if init_dict is None:
7 | init_dict = []
8 | if isinstance(init_dict, dict):
9 | init_dict = init_dict.items()
10 | self.sort_func = sort_func
11 | self.sorted_keys = None
12 | self.reverse = reverse
13 | self.heap = []
14 | for k, v in init_dict:
15 | self[k] = v
16 |
17 | def __setitem__(self, key, value):
18 | if key in self:
19 | super().__setitem__(key, value)
20 | for i, (priority, k) in enumerate(self.heap):
21 | if k == key:
22 | self.heap[i] = (self.sort_func(key, value), key)
23 | heapq.heapify(self.heap)
24 | break
25 | self.sorted_keys = None
26 | else:
27 | super().__setitem__(key, value)
28 | heapq.heappush(self.heap, (self.sort_func(key, value), key))
29 | self.sorted_keys = None
30 |
31 | def __delitem__(self, key):
32 | super().__delitem__(key)
33 | for i, (priority, k) in enumerate(self.heap):
34 | if k == key:
35 | del self.heap[i]
36 | heapq.heapify(self.heap)
37 | break
38 | self.sorted_keys = None
39 |
40 | def keys(self):
41 | if self.sorted_keys is None:
42 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)]
43 | return self.sorted_keys
44 |
45 | def items(self):
46 | if self.sorted_keys is None:
47 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)]
48 | sorted_items = [(k, self[k]) for k in self.sorted_keys]
49 | return sorted_items
50 |
51 | def _update_heap(self, key):
52 | for i, (priority, k) in enumerate(self.heap):
53 | if k == key:
54 | new_priority = self.sort_func(key, self[key])
55 | if new_priority != priority:
56 | self.heap[i] = (new_priority, key)
57 | heapq.heapify(self.heap)
58 | self.sorted_keys = None
59 | break
60 |
61 | def __iter__(self):
62 | return iter(self.keys())
63 |
64 | def __repr__(self):
65 | return f"{type(self).__name__}({dict(self)}, sort_func={self.sort_func.__name__}, reverse={self.reverse})"
66 |
--------------------------------------------------------------------------------
/lib/itchat/returnvalues.py:
--------------------------------------------------------------------------------
1 | #coding=utf8
2 | TRANSLATE = 'Chinese'
3 |
4 | class ReturnValue(dict):
5 | ''' turn return value of itchat into a boolean value
6 | for requests:
7 | ..code::python
8 |
9 | import requests
10 | r = requests.get('http://httpbin.org/get')
11 | print(ReturnValue(rawResponse=r)
12 |
13 | for normal dict:
14 | ..code::python
15 |
16 | returnDict = {
17 | 'BaseResponse': {
18 | 'Ret': 0,
19 | 'ErrMsg': 'My error msg', }, }
20 | print(ReturnValue(returnDict))
21 | '''
22 | def __init__(self, returnValueDict={}, rawResponse=None):
23 | if rawResponse:
24 | try:
25 | returnValueDict = rawResponse.json()
26 | except ValueError:
27 | returnValueDict = {
28 | 'BaseResponse': {
29 | 'Ret': -1004,
30 | 'ErrMsg': 'Unexpected return value', },
31 | 'Data': rawResponse.content, }
32 | for k, v in returnValueDict.items():
33 | self[k] = v
34 | if not 'BaseResponse' in self:
35 | self['BaseResponse'] = {
36 | 'ErrMsg': 'no BaseResponse in raw response',
37 | 'Ret': -1000, }
38 | if TRANSLATE:
39 | self['BaseResponse']['RawMsg'] = self['BaseResponse'].get('ErrMsg', '')
40 | self['BaseResponse']['ErrMsg'] = \
41 | TRANSLATION[TRANSLATE].get(
42 | self['BaseResponse'].get('Ret', '')) \
43 | or self['BaseResponse'].get('ErrMsg', u'No ErrMsg')
44 | self['BaseResponse']['RawMsg'] = \
45 | self['BaseResponse']['RawMsg'] or self['BaseResponse']['ErrMsg']
46 | def __nonzero__(self):
47 | return self['BaseResponse'].get('Ret') == 0
48 | def __bool__(self):
49 | return self.__nonzero__()
50 | def __str__(self):
51 | return '{%s}' % ', '.join(
52 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
53 | def __repr__(self):
54 | return '' % self.__str__()
55 |
56 | TRANSLATION = {
57 | 'Chinese': {
58 | -1000: u'返回值不带BaseResponse',
59 | -1001: u'无法找到对应的成员',
60 | -1002: u'文件位置错误',
61 | -1003: u'服务器拒绝连接',
62 | -1004: u'服务器返回异常值',
63 | -1005: u'参数错误',
64 | -1006: u'无效操作',
65 | 0: u'请求成功',
66 | },
67 | }
68 |
--------------------------------------------------------------------------------
/voice/pytts/pytts_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | pytts voice service (offline)
3 | """
4 |
5 | import os
6 | import sys
7 | import time
8 |
9 | import pyttsx3
10 |
11 | from bridge.reply import Reply, ReplyType
12 | from common.log import logger
13 | from common.tmp_dir import TmpDir
14 | from voice.voice import Voice
15 |
16 |
17 | class PyttsVoice(Voice):
18 | engine = pyttsx3.init()
19 |
20 | def __init__(self):
21 | # 语速
22 | self.engine.setProperty("rate", 125)
23 | # 音量
24 | self.engine.setProperty("volume", 1.0)
25 | if sys.platform == "win32":
26 | for voice in self.engine.getProperty("voices"):
27 | if "Chinese" in voice.name:
28 | self.engine.setProperty("voice", voice.id)
29 | else:
30 | self.engine.setProperty("voice", "zh")
31 | # If the problem of espeak is fixed, using runAndWait() and remove this startLoop()
32 | # TODO: check if this is work on win32
33 | self.engine.startLoop(useDriverLoop=False)
34 |
35 | def textToVoice(self, text):
36 | try:
37 | # Avoid the same filename under multithreading
38 | wavFileName = "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
39 | wavFile = TmpDir().path() + wavFileName
40 | logger.info("[Pytts] textToVoice text={} voice file name={}".format(text, wavFile))
41 |
42 | self.engine.save_to_file(text, wavFile)
43 |
44 | if sys.platform == "win32":
45 | self.engine.runAndWait()
46 | else:
47 | # In ubuntu, runAndWait do not really wait until the file created.
48 | # It will return once the task queue is empty, but the task is still running in coroutine.
49 | # And if you call runAndWait() and time.sleep() twice, it will stuck, so do not use this.
50 | # If you want to fix this, add self._proxy.setBusy(True) in line 127 in espeak.py, at the beginning of the function save_to_file.
51 | # self.engine.runAndWait()
52 |
53 | # Before espeak fix this problem, we iterate the generator and control the waiting by ourself.
54 | # But this is not the canonical way to use it, for example if the file already exists it also cannot wait.
55 | self.engine.iterate()
56 | while self.engine.isBusy() or wavFileName not in os.listdir(TmpDir().path()):
57 | time.sleep(0.1)
58 |
59 | reply = Reply(ReplyType.VOICE, wavFile)
60 |
61 | except Exception as e:
62 | reply = Reply(ReplyType.ERROR, str(e))
63 | finally:
64 | return reply
65 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class OpenAISession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="text-davinci-003"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 |
11 | def __str__(self):
12 | # 构造对话模型的输入
13 | """
14 | e.g. Q: xxx
15 | A: xxx
16 | Q: xxx
17 | """
18 | prompt = ""
19 | for item in self.messages:
20 | if item["role"] == "system":
21 | prompt += item["content"] + "<|endoftext|>\n\n\n"
22 | elif item["role"] == "user":
23 | prompt += "Q: " + item["content"] + "\n"
24 | elif item["role"] == "assistant":
25 | prompt += "\n\nA: " + item["content"] + "<|endoftext|>\n"
26 |
27 | if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
28 | prompt += "A: "
29 | return prompt
30 |
31 | def discard_exceeding(self, max_tokens, cur_tokens=None):
32 | precise = True
33 | try:
34 | cur_tokens = self.calc_tokens()
35 | except Exception as e:
36 | precise = False
37 | if cur_tokens is None:
38 | raise e
39 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
40 | while cur_tokens > max_tokens:
41 | if len(self.messages) > 1:
42 | self.messages.pop(0)
43 | elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant":
44 | self.messages.pop(0)
45 | if precise:
46 | cur_tokens = self.calc_tokens()
47 | else:
48 | cur_tokens = len(str(self))
49 | break
50 | elif len(self.messages) == 1 and self.messages[0]["role"] == "user":
51 | logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens))
52 | break
53 | else:
54 | logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages)))
55 | break
56 | if precise:
57 | cur_tokens = self.calc_tokens()
58 | else:
59 | cur_tokens = len(str(self))
60 | return cur_tokens
61 |
62 | def calc_tokens(self):
63 | return num_tokens_from_string(str(self), self.model)
64 |
65 |
66 | # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
67 | def num_tokens_from_string(string: str, model: str) -> int:
68 | """Returns the number of tokens in a text string."""
69 | import tiktoken
70 |
71 | encoding = tiktoken.encoding_for_model(model)
72 | num_tokens = len(encoding.encode(string, disallowed_special=()))
73 | return num_tokens
74 |
--------------------------------------------------------------------------------
/plugins/hello/hello.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import plugins
4 | from bridge.context import ContextType
5 | from bridge.reply import Reply, ReplyType
6 | from channel.chat_message import ChatMessage
7 | from common.log import logger
8 | from plugins import *
9 |
10 |
11 | @plugins.register(
12 | name="Hello",
13 | desire_priority=-1,
14 | hidden=True,
15 | desc="A simple plugin that says hello",
16 | version="0.1",
17 | author="lanvent",
18 | )
19 | class Hello(Plugin):
20 | def __init__(self):
21 | super().__init__()
22 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
23 | logger.info("[Hello] inited")
24 |
25 | def on_handle_context(self, e_context: EventContext):
26 | if e_context["context"].type not in [
27 | ContextType.TEXT,
28 | ContextType.JOIN_GROUP,
29 | ContextType.PATPAT,
30 | ]:
31 | return
32 |
33 | if e_context["context"].type == ContextType.JOIN_GROUP:
34 | e_context["context"].type = ContextType.TEXT
35 | msg: ChatMessage = e_context["context"]["msg"]
36 | e_context["context"].content = f'请你随机使用一种风格说一句问候语来欢迎新用户"{msg.actual_user_nickname}"加入群聊。'
37 | e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑
38 | return
39 |
40 | if e_context["context"].type == ContextType.PATPAT:
41 | e_context["context"].type = ContextType.TEXT
42 | msg: ChatMessage = e_context["context"]["msg"]
43 | e_context["context"].content = f"请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。"
44 | e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑
45 | return
46 |
47 | content = e_context["context"].content
48 | logger.debug("[Hello] on_handle_context. content: %s" % content)
49 | if content == "Hello":
50 | reply = Reply()
51 | reply.type = ReplyType.TEXT
52 | msg: ChatMessage = e_context["context"]["msg"]
53 | if e_context["context"]["isgroup"]:
54 | reply.content = f"Hello, {msg.actual_user_nickname} from {msg.from_user_nickname}"
55 | else:
56 | reply.content = f"Hello, {msg.from_user_nickname}"
57 | e_context["reply"] = reply
58 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
59 |
60 | if content == "Hi":
61 | reply = Reply()
62 | reply.type = ReplyType.TEXT
63 | reply.content = "Hi"
64 | e_context["reply"] = reply
65 | e_context.action = EventAction.BREAK # 事件结束,进入默认处理逻辑,一般会覆写reply
66 |
67 | if content == "End":
68 | # 如果是文本消息"End",将请求转换成"IMAGE_CREATE",并将content设置为"The World"
69 | e_context["context"].type = ContextType.IMAGE_CREATE
70 | content = "The World"
71 | e_context.action = EventAction.CONTINUE # 事件继续,交付给下个插件或默认逻辑
72 |
73 | def get_help_text(self, **kwargs):
74 | help_text = "输入Hello,我会回复你的名字\n输入End,我会回复你世界的图片\n"
75 | return help_text
76 |
--------------------------------------------------------------------------------
/channel/terminal/terminal_channel.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from bridge.context import *
4 | from bridge.reply import Reply, ReplyType
5 | from channel.chat_channel import ChatChannel, check_prefix
6 | from channel.chat_message import ChatMessage
7 | from common.log import logger
8 | from config import conf
9 |
10 |
11 | class TerminalMessage(ChatMessage):
12 | def __init__(
13 | self,
14 | msg_id,
15 | content,
16 | ctype=ContextType.TEXT,
17 | from_user_id="User",
18 | to_user_id="Chatgpt",
19 | other_user_id="Chatgpt",
20 | ):
21 | self.msg_id = msg_id
22 | self.ctype = ctype
23 | self.content = content
24 | self.from_user_id = from_user_id
25 | self.to_user_id = to_user_id
26 | self.other_user_id = other_user_id
27 |
28 |
29 | class TerminalChannel(ChatChannel):
30 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE]
31 |
32 | def send(self, reply: Reply, context: Context):
33 | print("\nBot:")
34 | if reply.type == ReplyType.IMAGE:
35 | from PIL import Image
36 |
37 | image_storage = reply.content
38 | image_storage.seek(0)
39 | img = Image.open(image_storage)
40 | print("")
41 | img.show()
42 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
43 | import io
44 |
45 | import requests
46 | from PIL import Image
47 |
48 | img_url = reply.content
49 | pic_res = requests.get(img_url, stream=True)
50 | image_storage = io.BytesIO()
51 | for block in pic_res.iter_content(1024):
52 | image_storage.write(block)
53 | image_storage.seek(0)
54 | img = Image.open(image_storage)
55 | print(img_url)
56 | img.show()
57 | else:
58 | print(reply.content)
59 | print("\nUser:", end="")
60 | sys.stdout.flush()
61 | return
62 |
63 | def startup(self):
64 | context = Context()
65 | logger.setLevel("WARN")
66 | print("\nPlease input your question:\nUser:", end="")
67 | sys.stdout.flush()
68 | msg_id = 0
69 | while True:
70 | try:
71 | prompt = self.get_input()
72 | except KeyboardInterrupt:
73 | print("\nExiting...")
74 | sys.exit()
75 | msg_id += 1
76 | trigger_prefixs = conf().get("single_chat_prefix", [""])
77 | if check_prefix(prompt, trigger_prefixs) is None:
78 | prompt = trigger_prefixs[0] + prompt # 给没触发的消息加上触发前缀
79 |
80 | context = self._compose_context(ContextType.TEXT, prompt, msg=TerminalMessage(msg_id, prompt))
81 | if context:
82 | self.produce(context)
83 | else:
84 | raise Exception("context is None")
85 |
86 | def get_input(self):
87 | """
88 | Multi-line input function
89 | """
90 | sys.stdout.flush()
91 | line = input()
92 | return line
93 |
--------------------------------------------------------------------------------
/channel/wechat/wechaty_message.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 |
4 | from wechaty import MessageType
5 | from wechaty.user import Message
6 |
7 | from bridge.context import ContextType
8 | from channel.chat_message import ChatMessage
9 | from common.log import logger
10 | from common.tmp_dir import TmpDir
11 |
12 |
13 | class aobject(object):
14 | """Inheriting this class allows you to define an async __init__.
15 |
16 | So you can create objects by doing something like `await MyClass(params)`
17 | """
18 |
19 | async def __new__(cls, *a, **kw):
20 | instance = super().__new__(cls)
21 | await instance.__init__(*a, **kw)
22 | return instance
23 |
24 | async def __init__(self):
25 | pass
26 |
27 |
28 | class WechatyMessage(ChatMessage, aobject):
29 | async def __init__(self, wechaty_msg: Message):
30 | super().__init__(wechaty_msg)
31 |
32 | room = wechaty_msg.room()
33 |
34 | self.msg_id = wechaty_msg.message_id
35 | self.create_time = wechaty_msg.payload.timestamp
36 | self.is_group = room is not None
37 |
38 | if wechaty_msg.type() == MessageType.MESSAGE_TYPE_TEXT:
39 | self.ctype = ContextType.TEXT
40 | self.content = wechaty_msg.text()
41 | elif wechaty_msg.type() == MessageType.MESSAGE_TYPE_AUDIO:
42 | self.ctype = ContextType.VOICE
43 | voice_file = await wechaty_msg.to_file_box()
44 | self.content = TmpDir().path() + voice_file.name # content直接存临时目录路径
45 |
46 | def func():
47 | loop = asyncio.get_event_loop()
48 | asyncio.run_coroutine_threadsafe(voice_file.to_file(self.content), loop).result()
49 |
50 | self._prepare_fn = func
51 |
52 | else:
53 | raise NotImplementedError("Unsupported message type: {}".format(wechaty_msg.type()))
54 |
55 | from_contact = wechaty_msg.talker() # 获取消息的发送者
56 | self.from_user_id = from_contact.contact_id
57 | self.from_user_nickname = from_contact.name
58 |
59 | # group中的from和to,wechaty跟itchat含义不一样
60 | # wecahty: from是消息实际发送者, to:所在群
61 | # itchat: 如果是你发送群消息,from和to是你自己和所在群,如果是别人发群消息,from和to是所在群和你自己
62 | # 但这个差别不影响逻辑,group中只使用到:1.用from来判断是否是自己发的,2.actual_user_id来判断实际发送用户
63 |
64 | if self.is_group:
65 | self.to_user_id = room.room_id
66 | self.to_user_nickname = await room.topic()
67 | else:
68 | to_contact = wechaty_msg.to()
69 | self.to_user_id = to_contact.contact_id
70 | self.to_user_nickname = to_contact.name
71 |
72 | if self.is_group or wechaty_msg.is_self(): # 如果是群消息,other_user设置为群,如果是私聊消息,而且自己发的,就设置成对方。
73 | self.other_user_id = self.to_user_id
74 | self.other_user_nickname = self.to_user_nickname
75 | else:
76 | self.other_user_id = self.from_user_id
77 | self.other_user_nickname = self.from_user_nickname
78 |
79 | if self.is_group: # wechaty群聊中,实际发送用户就是from_user
80 | self.is_at = await wechaty_msg.mention_self()
81 | if not self.is_at: # 有时候复制粘贴的消息,不算做@,但是内容里面会有@xxx,这里做一下兼容
82 | name = wechaty_msg.wechaty.user_self().name
83 | pattern = f"@{re.escape(name)}(\u2005|\u0020)"
84 | if re.search(pattern, self.content):
85 | logger.debug(f"wechaty message {self.msg_id} include at")
86 | self.is_at = True
87 |
88 | self.actual_user_id = self.from_user_id
89 | self.actual_user_nickname = self.from_user_nickname
90 |
--------------------------------------------------------------------------------
/channel/wechatmp/active_reply.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import web
4 | from wechatpy import parse_message
5 | from wechatpy.replies import create_reply
6 |
7 | from bridge.context import *
8 | from bridge.reply import *
9 | from channel.wechatmp.common import *
10 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
11 | from channel.wechatmp.wechatmp_message import WeChatMPMessage
12 | from common.log import logger
13 | from config import conf, subscribe_msg
14 |
15 |
16 | # This class is instantiated once per query
17 | class Query:
18 | def GET(self):
19 | return verify_server(web.input())
20 |
21 | def POST(self):
22 | # Make sure to return the instance that first created, @singleton will do that.
23 | try:
24 | args = web.input()
25 | verify_server(args)
26 | channel = WechatMPChannel()
27 | message = web.data()
28 | encrypt_func = lambda x: x
29 | if args.get("encrypt_type") == "aes":
30 | logger.debug("[wechatmp] Receive encrypted post data:\n" + message.decode("utf-8"))
31 | if not channel.crypto:
32 | raise Exception("Crypto not initialized, Please set wechatmp_aes_key in config.json")
33 | message = channel.crypto.decrypt_message(message, args.msg_signature, args.timestamp, args.nonce)
34 | encrypt_func = lambda x: channel.crypto.encrypt_message(x, args.nonce, args.timestamp)
35 | else:
36 | logger.debug("[wechatmp] Receive post data:\n" + message.decode("utf-8"))
37 | msg = parse_message(message)
38 | if msg.type in ["text", "voice", "image"]:
39 | wechatmp_msg = WeChatMPMessage(msg, client=channel.client)
40 | from_user = wechatmp_msg.from_user_id
41 | content = wechatmp_msg.content
42 | message_id = wechatmp_msg.msg_id
43 |
44 | logger.info(
45 | "[wechatmp] {}:{} Receive post query {} {}: {}".format(
46 | web.ctx.env.get("REMOTE_ADDR"),
47 | web.ctx.env.get("REMOTE_PORT"),
48 | from_user,
49 | message_id,
50 | content,
51 | )
52 | )
53 | if msg.type == "voice" and wechatmp_msg.ctype == ContextType.TEXT and conf().get("voice_reply_voice", False):
54 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, desire_rtype=ReplyType.VOICE, msg=wechatmp_msg)
55 | else:
56 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg)
57 | if context:
58 | channel.produce(context)
59 | # The reply will be sent by channel.send() in another thread
60 | return "success"
61 | elif msg.type == "event":
62 | logger.info("[wechatmp] Event {} from {}".format(msg.event, msg.source))
63 | if msg.event in ["subscribe", "subscribe_scan"]:
64 | reply_text = subscribe_msg()
65 | if reply_text:
66 | replyPost = create_reply(reply_text, msg)
67 | return encrypt_func(replyPost.render())
68 | else:
69 | return "success"
70 | else:
71 | logger.info("暂且不处理")
72 | return "success"
73 | except Exception as exc:
74 | logger.exception(exc)
75 | return exc
76 |
--------------------------------------------------------------------------------
/lib/itchat/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import Core
2 | from .config import VERSION, ASYNC_COMPONENTS
3 | from .log import set_logging
4 |
5 | if ASYNC_COMPONENTS:
6 | from .async_components import load_components
7 | else:
8 | from .components import load_components
9 |
10 |
11 | __version__ = VERSION
12 |
13 |
14 | instanceList = []
15 |
16 | def load_async_itchat() -> Core:
17 | """load async-based itchat instance
18 |
19 | Returns:
20 | Core: the abstract interface of itchat
21 | """
22 | from .async_components import load_components
23 | load_components(Core)
24 | return Core()
25 |
26 |
27 | def load_sync_itchat() -> Core:
28 | """load sync-based itchat instance
29 |
30 | Returns:
31 | Core: the abstract interface of itchat
32 | """
33 | from .components import load_components
34 | load_components(Core)
35 | return Core()
36 |
37 |
38 | if ASYNC_COMPONENTS:
39 | instance = load_async_itchat()
40 | else:
41 | instance = load_sync_itchat()
42 |
43 |
44 | instanceList = [instance]
45 |
46 | # I really want to use sys.modules[__name__] = originInstance
47 | # but it makes auto-fill a real mess, so forgive me for my following **
48 | # actually it toke me less than 30 seconds, god bless Uganda
49 |
50 | # components.login
51 | login = instance.login
52 | get_QRuuid = instance.get_QRuuid
53 | get_QR = instance.get_QR
54 | check_login = instance.check_login
55 | web_init = instance.web_init
56 | show_mobile_login = instance.show_mobile_login
57 | start_receiving = instance.start_receiving
58 | get_msg = instance.get_msg
59 | logout = instance.logout
60 | # components.contact
61 | update_chatroom = instance.update_chatroom
62 | update_friend = instance.update_friend
63 | get_contact = instance.get_contact
64 | get_friends = instance.get_friends
65 | get_chatrooms = instance.get_chatrooms
66 | get_mps = instance.get_mps
67 | set_alias = instance.set_alias
68 | set_pinned = instance.set_pinned
69 | accept_friend = instance.accept_friend
70 | get_head_img = instance.get_head_img
71 | create_chatroom = instance.create_chatroom
72 | set_chatroom_name = instance.set_chatroom_name
73 | delete_member_from_chatroom = instance.delete_member_from_chatroom
74 | add_member_into_chatroom = instance.add_member_into_chatroom
75 | # components.messages
76 | send_raw_msg = instance.send_raw_msg
77 | send_msg = instance.send_msg
78 | upload_file = instance.upload_file
79 | send_file = instance.send_file
80 | send_image = instance.send_image
81 | send_video = instance.send_video
82 | send = instance.send
83 | revoke = instance.revoke
84 | # components.hotreload
85 | dump_login_status = instance.dump_login_status
86 | load_login_status = instance.load_login_status
87 | # components.register
88 | auto_login = instance.auto_login
89 | configured_reply = instance.configured_reply
90 | msg_register = instance.msg_register
91 | run = instance.run
92 | # other functions
93 | search_friends = instance.search_friends
94 | search_chatrooms = instance.search_chatrooms
95 | search_mps = instance.search_mps
96 | set_logging = set_logging
97 |
--------------------------------------------------------------------------------
/bot/session_manager.py:
--------------------------------------------------------------------------------
1 | from common.expired_dict import ExpiredDict
2 | from common.log import logger
3 | from config import conf
4 |
5 |
6 | class Session(object):
7 | def __init__(self, session_id, system_prompt=None):
8 | self.session_id = session_id
9 | self.messages = []
10 | if system_prompt is None:
11 | self.system_prompt = conf().get("character_desc", "")
12 | else:
13 | self.system_prompt = system_prompt
14 |
15 | # 重置会话
16 | def reset(self):
17 | system_item = {"role": "system", "content": self.system_prompt}
18 | self.messages = [system_item]
19 |
20 | def set_system_prompt(self, system_prompt):
21 | self.system_prompt = system_prompt
22 | self.reset()
23 |
24 | def add_query(self, query):
25 | user_item = {"role": "user", "content": query}
26 | self.messages.append(user_item)
27 |
28 | def add_reply(self, reply):
29 | assistant_item = {"role": "assistant", "content": reply}
30 | self.messages.append(assistant_item)
31 |
32 | def discard_exceeding(self, max_tokens=None, cur_tokens=None):
33 | raise NotImplementedError
34 |
35 | def calc_tokens(self):
36 | raise NotImplementedError
37 |
38 |
39 | class SessionManager(object):
40 | def __init__(self, sessioncls, **session_args):
41 | if conf().get("expires_in_seconds"):
42 | sessions = ExpiredDict(conf().get("expires_in_seconds"))
43 | else:
44 | sessions = dict()
45 | self.sessions = sessions
46 | self.sessioncls = sessioncls
47 | self.session_args = session_args
48 |
49 | def build_session(self, session_id, system_prompt=None):
50 | """
51 | 如果session_id不在sessions中,创建一个新的session并添加到sessions中
52 | 如果system_prompt不会空,会更新session的system_prompt并重置session
53 | """
54 | if session_id is None:
55 | return self.sessioncls(session_id, system_prompt, **self.session_args)
56 |
57 | if session_id not in self.sessions:
58 | self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)
59 | elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
60 | self.sessions[session_id].set_system_prompt(system_prompt)
61 | session = self.sessions[session_id]
62 | return session
63 |
64 | def session_query(self, query, session_id):
65 | session = self.build_session(session_id)
66 | session.add_query(query)
67 | try:
68 | max_tokens = conf().get("conversation_max_tokens", 1000)
69 | total_tokens = session.discard_exceeding(max_tokens, None)
70 | logger.debug("prompt tokens used={}".format(total_tokens))
71 | except Exception as e:
72 | logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e)))
73 | return session
74 |
75 | def session_reply(self, reply, session_id, total_tokens=None):
76 | session = self.build_session(session_id)
77 | session.add_reply(reply)
78 | try:
79 | max_tokens = conf().get("conversation_max_tokens", 1000)
80 | tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
81 | logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt))
82 | except Exception as e:
83 | logger.debug("Exception when counting tokens precisely for session: {}".format(str(e)))
84 | return session
85 |
86 | def clear_session(self, session_id):
87 | if session_id in self.sessions:
88 | del self.sessions[session_id]
89 |
90 | def clear_all_session(self):
91 | self.sessions.clear()
92 |
--------------------------------------------------------------------------------
/voice/baidu/baidu_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | baidu voice service
3 | """
4 | import json
5 | import os
6 | import time
7 |
8 | from aip import AipSpeech
9 |
10 | from bridge.reply import Reply, ReplyType
11 | from common.log import logger
12 | from common.tmp_dir import TmpDir
13 | from config import conf
14 | from voice.audio_convert import get_pcm_from_wav
15 | from voice.voice import Voice
16 |
17 | """
18 | 百度的语音识别API.
19 | dev_pid:
20 | - 1936: 普通话远场
21 | - 1536:普通话(支持简单的英文识别)
22 | - 1537:普通话(纯中文识别)
23 | - 1737:英语
24 | - 1637:粤语
25 | - 1837:四川话
26 | 要使用本模块, 首先到 yuyin.baidu.com 注册一个开发者账号,
27 | 之后创建一个新应用, 然后在应用管理的"查看key"中获得 API Key 和 Secret Key
28 | 然后在 config.json 中填入这两个值, 以及 app_id, dev_pid
29 | """
30 |
31 |
32 | class BaiduVoice(Voice):
33 | def __init__(self):
34 | try:
35 | curdir = os.path.dirname(__file__)
36 | config_path = os.path.join(curdir, "config.json")
37 | bconf = None
38 | if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
39 | bconf = {"lang": "zh", "ctp": 1, "spd": 5, "pit": 5, "vol": 5, "per": 0}
40 | with open(config_path, "w") as fw:
41 | json.dump(bconf, fw, indent=4)
42 | else:
43 | with open(config_path, "r") as fr:
44 | bconf = json.load(fr)
45 |
46 | self.app_id = str(conf().get("baidu_app_id"))
47 | self.api_key = str(conf().get("baidu_api_key"))
48 | self.secret_key = str(conf().get("baidu_secret_key"))
49 | self.dev_id = conf().get("baidu_dev_pid")
50 | self.lang = bconf["lang"]
51 | self.ctp = bconf["ctp"]
52 | self.spd = bconf["spd"]
53 | self.pit = bconf["pit"]
54 | self.vol = bconf["vol"]
55 | self.per = bconf["per"]
56 |
57 | self.client = AipSpeech(self.app_id, self.api_key, self.secret_key)
58 | except Exception as e:
59 | logger.warn("BaiduVoice init failed: %s, ignore " % e)
60 |
61 | def voiceToText(self, voice_file):
62 | # 识别本地文件
63 | logger.debug("[Baidu] voice file name={}".format(voice_file))
64 | pcm = get_pcm_from_wav(voice_file)
65 | res = self.client.asr(pcm, "pcm", 16000, {"dev_pid": self.dev_id})
66 | if res["err_no"] == 0:
67 | logger.info("百度语音识别到了:{}".format(res["result"]))
68 | text = "".join(res["result"])
69 | reply = Reply(ReplyType.TEXT, text)
70 | else:
71 | logger.info("百度语音识别出错了: {}".format(res["err_msg"]))
72 | if res["err_msg"] == "request pv too much":
73 | logger.info(" 出现这个原因很可能是你的百度语音服务调用量超出限制,或未开通付费")
74 | reply = Reply(ReplyType.ERROR, "百度语音识别出错了;{0}".format(res["err_msg"]))
75 | return reply
76 |
77 | def textToVoice(self, text):
78 | result = self.client.synthesis(
79 | text,
80 | self.lang,
81 | self.ctp,
82 | {"spd": self.spd, "pit": self.pit, "vol": self.vol, "per": self.per},
83 | )
84 | if not isinstance(result, dict):
85 | # Avoid the same filename under multithreading
86 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
87 | with open(fileName, "wb") as f:
88 | f.write(result)
89 | logger.info("[Baidu] textToVoice text={} voice file name={}".format(text, fileName))
90 | reply = Reply(ReplyType.VOICE, fileName)
91 | else:
92 | logger.error("[Baidu] textToVoice error={}".format(result))
93 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
94 | return reply
95 |
--------------------------------------------------------------------------------
/bot/chatgpt/chat_gpt_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g. [
6 | {"role": "system", "content": "You are a helpful assistant."},
7 | {"role": "user", "content": "Who won the world series in 2020?"},
8 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
9 | {"role": "user", "content": "Where was it played?"}
10 | ]
11 | """
12 |
13 |
14 | class ChatGPTSession(Session):
15 | def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"):
16 | super().__init__(session_id, system_prompt)
17 | self.model = model
18 | self.reset()
19 |
20 | def discard_exceeding(self, max_tokens, cur_tokens=None):
21 | precise = True
22 | try:
23 | cur_tokens = self.calc_tokens()
24 | except Exception as e:
25 | precise = False
26 | if cur_tokens is None:
27 | raise e
28 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
29 | while cur_tokens > max_tokens:
30 | if len(self.messages) > 2:
31 | self.messages.pop(1)
32 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
33 | self.messages.pop(1)
34 | if precise:
35 | cur_tokens = self.calc_tokens()
36 | else:
37 | cur_tokens = cur_tokens - max_tokens
38 | break
39 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
40 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
41 | break
42 | else:
43 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
44 | break
45 | if precise:
46 | cur_tokens = self.calc_tokens()
47 | else:
48 | cur_tokens = cur_tokens - max_tokens
49 | return cur_tokens
50 |
51 | def calc_tokens(self):
52 | return num_tokens_from_messages(self.messages, self.model)
53 |
54 |
55 | # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
56 | def num_tokens_from_messages(messages, model):
57 | """Returns the number of tokens used by a list of messages."""
58 | import tiktoken
59 |
60 | if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo"]:
61 | return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
62 | elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613"]:
63 | return num_tokens_from_messages(messages, model="gpt-4")
64 |
65 | try:
66 | encoding = tiktoken.encoding_for_model(model)
67 | except KeyError:
68 | logger.debug("Warning: model not found. Using cl100k_base encoding.")
69 | encoding = tiktoken.get_encoding("cl100k_base")
70 | if model == "gpt-3.5-turbo":
71 | tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
72 | tokens_per_name = -1 # if there's a name, the role is omitted
73 | elif model == "gpt-4":
74 | tokens_per_message = 3
75 | tokens_per_name = 1
76 | else:
77 | logger.warn(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
78 | return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
79 | num_tokens = 0
80 | for message in messages:
81 | num_tokens += tokens_per_message
82 | for key, value in message.items():
83 | num_tokens += len(encoding.encode(value))
84 | if key == "name":
85 | num_tokens += tokens_per_name
86 | num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
87 | return num_tokens
88 |
--------------------------------------------------------------------------------
/channel/wechatmp/README.md:
--------------------------------------------------------------------------------
1 | # 微信公众号channel
2 |
3 | 鉴于个人微信号在服务器上通过itchat登录有封号风险,这里新增了微信公众号channel,提供无风险的服务。
4 | 目前支持订阅号和服务号两种类型的公众号,它们都支持文本交互,语音和图片输入。其中个人主体的微信订阅号由于无法通过微信认证,存在回复时间限制,每天的图片和声音回复次数也有限制。
5 |
6 | ## 使用方法(订阅号,服务号类似)
7 |
8 | 在开始部署前,你需要一个拥有公网IP的服务器,以提供微信服务器和我们自己服务器的连接。或者你需要进行内网穿透,否则微信服务器无法将消息发送给我们的服务器。
9 |
10 | 此外,需要在我们的服务器上安装python的web框架web.py和wechatpy。
11 | 以ubuntu为例(在ubuntu 22.04上测试):
12 | ```
13 | pip3 install web.py
14 | pip3 install wechatpy
15 | ```
16 |
17 | 然后在[微信公众平台](https://mp.weixin.qq.com)注册一个自己的公众号,类型选择订阅号,主体为个人即可。
18 |
19 | 然后根据[接入指南](https://developers.weixin.qq.com/doc/offiaccount/Basic_Information/Access_Overview.html)的说明,在[微信公众平台](https://mp.weixin.qq.com)的“设置与开发”-“基本配置”-“服务器配置”中填写服务器地址`URL`和令牌`Token`。`URL`填写格式为`http://url/wx`,可使用IP(成功几率看脸),`Token`是你自己编的一个特定的令牌。消息加解密方式如果选择了需要加密的模式,需要在配置中填写`wechatmp_aes_key`。
20 |
21 | 相关的服务器验证代码已经写好,你不需要再添加任何代码。你只需要在本项目根目录的`config.json`中添加
22 | ```
23 | "channel_type": "wechatmp", # 如果通过了微信认证,将"wechatmp"替换为"wechatmp_service",可极大的优化使用体验
24 | "wechatmp_token": "xxxx", # 微信公众平台的Token
25 | "wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
26 | "wechatmp_app_id": "xxxx", # 微信公众平台的appID
27 | "wechatmp_app_secret": "xxxx", # 微信公众平台的appsecret
28 | "wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
29 | "single_chat_prefix": [""], # 推荐设置,任意对话都可以触发回复,不添加前缀
30 | "single_chat_reply_prefix": "", # 推荐设置,回复不设置前缀
31 | "plugin_trigger_prefix": "&", # 推荐设置,在手机微信客户端中,$%^等符号与中文连在一起时会自动显示一段较大的间隔,用户体验不好。请不要使用管理员指令前缀"#",这会造成未知问题。
32 | ```
33 | 然后运行`python3 app.py`启动web服务器。这里会默认监听8080端口,但是微信公众号的服务器配置只支持80/443端口,有两种方法来解决这个问题。第一个是推荐的方法,使用端口转发命令将80端口转发到8080端口:
34 | ```
35 | sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 8080
36 | sudo iptables-save > /etc/iptables/rules.v4
37 | ```
38 | 第二个方法是让python程序直接监听80端口,在配置文件中设置`"wechatmp_port": 80` ,在linux上需要使用`sudo python3 app.py`启动程序。然而这会导致一系列环境和权限问题,因此不是推荐的方法。
39 |
40 | 443端口同理,注意需要支持SSL,也就是https的访问,在`wechatmp_channel.py`中需要修改相应的证书路径。
41 |
42 | 程序启动并监听端口后,在刚才的“服务器配置”中点击`提交`即可验证你的服务器。
43 | 随后在[微信公众平台](https://mp.weixin.qq.com)启用服务器,关闭手动填写规则的自动回复,即可实现ChatGPT的自动回复。
44 |
45 | 之后需要在公众号开发信息下将本机IP加入到IP白名单。
46 |
47 | 不然在启用后,发送语音、图片等消息可能会遇到如下报错:
48 | ```
49 | 'errcode': 40164, 'errmsg': 'invalid ip xx.xx.xx.xx not in whitelist rid
50 | ```
51 |
52 |
53 | ## 个人微信公众号的限制
54 | 由于人微信公众号不能通过微信认证,所以没有客服接口,因此公众号无法主动发出消息,只能被动回复。而微信官方对被动回复有5秒的时间限制,最多重试2次,因此最多只有15秒的自动回复时间窗口。因此如果问题比较复杂或者我们的服务器比较忙,ChatGPT的回答就没办法及时回复给用户。为了解决这个问题,这里做了回答缓存,它需要你在回复超时后,再次主动发送任意文字(例如1)来尝试拿到回答缓存。为了优化使用体验,目前设置了两分钟(120秒)的timeout,用户在至多两分钟后即可得到查询到回复或者错误原因。
55 |
56 | 另外,由于微信官方的限制,自动回复有长度限制。因此这里将ChatGPT的回答进行了拆分,以满足限制。
57 |
58 | ## 私有api_key
59 | 公共api有访问频率限制(免费账号每分钟最多3次ChatGPT的API调用),这在服务多人的时候会遇到问题。因此这里多加了一个设置私有api_key的功能。目前通过godcmd插件的命令来设置私有api_key。
60 |
61 | ## 语音输入
62 | 利用微信自带的语音识别功能,提供语音输入能力。需要在公众号管理页面的“设置与开发”->“接口权限”页面开启“接收语音识别结果”。
63 |
64 | ## 语音回复
65 | 请在配置文件中添加以下词条:
66 | ```
67 | "voice_reply_voice": true,
68 | ```
69 | 这样公众号将会用语音回复语音消息,实现语音对话。
70 |
71 | 默认的语音合成引擎是`google`,它是免费使用的。
72 |
73 | 如果要选择其他的语音合成引擎,请添加以下配置项:
74 | ```
75 | "text_to_voice": "pytts"
76 | ```
77 |
78 | pytts是本地的语音合成引擎。还支持baidu,azure,这些你需要自行配置相关的依赖和key。
79 |
80 | 如果使用pytts,在ubuntu上需要安装如下依赖:
81 | ```
82 | sudo apt update
83 | sudo apt install espeak
84 | sudo apt install ffmpeg
85 | python3 -m pip install pyttsx3
86 | ```
87 | 不是很建议开启pytts语音回复,因为它是离线本地计算,算的慢会拖垮服务器,且声音不好听。
88 |
89 | ## 图片回复
90 | 现在认证公众号和非认证公众号都可以实现的图片和语音回复。但是非认证公众号使用了永久素材接口,每天有1000次的调用上限(每个月有10次重置机会,程序中已设定遇到上限会自动重置),且永久素材库存也有上限。因此对于非认证公众号,我们会在回复图片或者语音消息后的10秒内从永久素材库存内删除该素材。
91 |
92 | ## 测试
93 | 目前在`RoboStyle`这个公众号上进行了测试(基于[wechatmp分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp)),感兴趣的可以关注并体验。开启了godcmd, Banwords, role, dungeon, finish这五个插件,其他的插件还没有详尽测试。百度的接口暂未测试。[wechatmp-stable分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp-stable)是较稳定的上个版本,但也缺少最新的功能支持。
94 |
95 | ## TODO
96 | - [x] 语音输入
97 | - [x] 图片输入
98 | - [x] 使用临时素材接口提供认证公众号的图片和语音回复
99 | - [x] 使用永久素材接口提供未认证公众号的图片和语音回复
100 | - [ ] 高并发支持
101 |
--------------------------------------------------------------------------------
/channel/wechat/wechat_message.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 | from lib import itchat
8 | from lib.itchat.content import *
9 |
10 |
11 | class WechatMessage(ChatMessage):
12 | def __init__(self, itchat_msg, is_group=False):
13 | super().__init__(itchat_msg)
14 | self.msg_id = itchat_msg["MsgId"]
15 | self.create_time = itchat_msg["CreateTime"]
16 | self.is_group = is_group
17 |
18 | if itchat_msg["Type"] == TEXT:
19 | self.ctype = ContextType.TEXT
20 | self.content = itchat_msg["Text"]
21 | elif itchat_msg["Type"] == VOICE:
22 | self.ctype = ContextType.VOICE
23 | self.content = TmpDir().path() + itchat_msg["FileName"] # content直接存临时目录路径
24 | self._prepare_fn = lambda: itchat_msg.download(self.content)
25 | elif itchat_msg["Type"] == PICTURE and itchat_msg["MsgType"] == 3:
26 | self.ctype = ContextType.IMAGE
27 | self.content = TmpDir().path() + itchat_msg["FileName"] # content直接存临时目录路径
28 | self._prepare_fn = lambda: itchat_msg.download(self.content)
29 | elif itchat_msg["Type"] == NOTE and itchat_msg["MsgType"] == 10000:
30 | if is_group and ("加入群聊" in itchat_msg["Content"] or "加入了群聊" in itchat_msg["Content"]):
31 | self.ctype = ContextType.JOIN_GROUP
32 | self.content = itchat_msg["Content"]
33 | # 这里只能得到nickname, actual_user_id还是机器人的id
34 | if "加入了群聊" in itchat_msg["Content"]:
35 | self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[-1]
36 | elif "加入群聊" in itchat_msg["Content"]:
37 | self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
38 | elif "拍了拍我" in itchat_msg["Content"]:
39 | self.ctype = ContextType.PATPAT
40 | self.content = itchat_msg["Content"]
41 | if is_group:
42 | self.actual_user_nickname = re.findall(r"\"(.*?)\"", itchat_msg["Content"])[0]
43 | else:
44 | raise NotImplementedError("Unsupported note message: " + itchat_msg["Content"])
45 | else:
46 | raise NotImplementedError("Unsupported message type: Type:{} MsgType:{}".format(itchat_msg["Type"], itchat_msg["MsgType"]))
47 |
48 | self.from_user_id = itchat_msg["FromUserName"]
49 | self.to_user_id = itchat_msg["ToUserName"]
50 |
51 | user_id = itchat.instance.storageClass.userName
52 | nickname = itchat.instance.storageClass.nickName
53 |
54 | # 虽然from_user_id和to_user_id用的少,但是为了保持一致性,还是要填充一下
55 | # 以下很繁琐,一句话总结:能填的都填了。
56 | if self.from_user_id == user_id:
57 | self.from_user_nickname = nickname
58 | if self.to_user_id == user_id:
59 | self.to_user_nickname = nickname
60 | try: # 陌生人时候, 'User'字段可能不存在
61 | self.other_user_id = itchat_msg["User"]["UserName"]
62 | self.other_user_nickname = itchat_msg["User"]["NickName"]
63 | if self.other_user_id == self.from_user_id:
64 | self.from_user_nickname = self.other_user_nickname
65 | if self.other_user_id == self.to_user_id:
66 | self.to_user_nickname = self.other_user_nickname
67 | except KeyError as e: # 处理偶尔没有对方信息的情况
68 | logger.warn("[WX]get other_user_id failed: " + str(e))
69 | if self.from_user_id == user_id:
70 | self.other_user_id = self.to_user_id
71 | else:
72 | self.other_user_id = self.from_user_id
73 |
74 | if self.is_group:
75 | self.is_at = itchat_msg["IsAt"]
76 | self.actual_user_id = itchat_msg["ActualUserName"]
77 | if self.ctype not in [ContextType.JOIN_GROUP, ContextType.PATPAT]:
78 | self.actual_user_nickname = itchat_msg["ActualNickName"]
79 |
--------------------------------------------------------------------------------
/plugins/banwords/banwords.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import json
4 | import os
5 |
6 | import plugins
7 | from bridge.context import ContextType
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from plugins import *
11 |
12 | from .lib.WordsSearch import WordsSearch
13 |
14 |
15 | @plugins.register(
16 | name="Banwords",
17 | desire_priority=100,
18 | hidden=True,
19 | desc="判断消息中是否有敏感词、决定是否回复。",
20 | version="1.0",
21 | author="lanvent",
22 | )
23 | class Banwords(Plugin):
24 | def __init__(self):
25 | super().__init__()
26 | try:
27 | curdir = os.path.dirname(__file__)
28 | config_path = os.path.join(curdir, "config.json")
29 | conf = None
30 | if not os.path.exists(config_path):
31 | conf = {"action": "ignore"}
32 | with open(config_path, "w") as f:
33 | json.dump(conf, f, indent=4)
34 | else:
35 | with open(config_path, "r") as f:
36 | conf = json.load(f)
37 | self.searchr = WordsSearch()
38 | self.action = conf["action"]
39 | banwords_path = os.path.join(curdir, "banwords.txt")
40 | with open(banwords_path, "r", encoding="utf-8") as f:
41 | words = []
42 | for line in f:
43 | word = line.strip()
44 | if word:
45 | words.append(word)
46 | self.searchr.SetKeywords(words)
47 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
48 | if conf.get("reply_filter", True):
49 | self.handlers[Event.ON_DECORATE_REPLY] = self.on_decorate_reply
50 | self.reply_action = conf.get("reply_action", "ignore")
51 | logger.info("[Banwords] inited")
52 | except Exception as e:
53 | logger.warn("[Banwords] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/banwords .")
54 | raise e
55 |
56 | def on_handle_context(self, e_context: EventContext):
57 | if e_context["context"].type not in [
58 | ContextType.TEXT,
59 | ContextType.IMAGE_CREATE,
60 | ]:
61 | return
62 |
63 | content = e_context["context"].content
64 | logger.debug("[Banwords] on_handle_context. content: %s" % content)
65 | if self.action == "ignore":
66 | f = self.searchr.FindFirst(content)
67 | if f:
68 | logger.info("[Banwords] %s in message" % f["Keyword"])
69 | e_context.action = EventAction.BREAK_PASS
70 | return
71 | elif self.action == "replace":
72 | if self.searchr.ContainsAny(content):
73 | reply = Reply(ReplyType.INFO, "发言中包含敏感词,请重试: \n" + self.searchr.Replace(content))
74 | e_context["reply"] = reply
75 | e_context.action = EventAction.BREAK_PASS
76 | return
77 |
78 | def on_decorate_reply(self, e_context: EventContext):
79 | if e_context["reply"].type not in [ReplyType.TEXT]:
80 | return
81 |
82 | reply = e_context["reply"]
83 | content = reply.content
84 | if self.reply_action == "ignore":
85 | f = self.searchr.FindFirst(content)
86 | if f:
87 | logger.info("[Banwords] %s in reply" % f["Keyword"])
88 | e_context["reply"] = None
89 | e_context.action = EventAction.BREAK_PASS
90 | return
91 | elif self.reply_action == "replace":
92 | if self.searchr.ContainsAny(content):
93 | reply = Reply(ReplyType.INFO, "已替换回复中的敏感词: \n" + self.searchr.Replace(content))
94 | e_context["reply"] = reply
95 | e_context.action = EventAction.CONTINUE
96 | return
97 |
98 | def get_help_text(self, **kwargs):
99 | return "过滤消息中的敏感词。"
100 |
--------------------------------------------------------------------------------
/lib/itchat/components/hotreload.py:
--------------------------------------------------------------------------------
1 | import pickle, os
2 | import logging
3 |
4 | import requests
5 |
6 | from ..config import VERSION
7 | from ..returnvalues import ReturnValue
8 | from ..storage import templates
9 | from .contact import update_local_chatrooms, update_local_friends
10 | from .messages import produce_msg
11 |
12 | logger = logging.getLogger('itchat')
13 |
14 | def load_hotreload(core):
15 | core.dump_login_status = dump_login_status
16 | core.load_login_status = load_login_status
17 |
18 | def dump_login_status(self, fileDir=None):
19 | fileDir = fileDir or self.hotReloadDir
20 | try:
21 | with open(fileDir, 'w') as f:
22 | f.write('itchat - DELETE THIS')
23 | os.remove(fileDir)
24 | except:
25 | raise Exception('Incorrect fileDir')
26 | status = {
27 | 'version' : VERSION,
28 | 'loginInfo' : self.loginInfo,
29 | 'cookies' : self.s.cookies.get_dict(),
30 | 'storage' : self.storageClass.dumps()}
31 | with open(fileDir, 'wb') as f:
32 | pickle.dump(status, f)
33 | logger.debug('Dump login status for hot reload successfully.')
34 |
35 | def load_login_status(self, fileDir,
36 | loginCallback=None, exitCallback=None):
37 | try:
38 | with open(fileDir, 'rb') as f:
39 | j = pickle.load(f)
40 | except Exception as e:
41 | logger.debug('No such file, loading login status failed.')
42 | return ReturnValue({'BaseResponse': {
43 | 'ErrMsg': 'No such file, loading login status failed.',
44 | 'Ret': -1002, }})
45 |
46 | if j.get('version', '') != VERSION:
47 | logger.debug(('you have updated itchat from %s to %s, ' +
48 | 'so cached status is ignored') % (
49 | j.get('version', 'old version'), VERSION))
50 | return ReturnValue({'BaseResponse': {
51 | 'ErrMsg': 'cached status ignored because of version',
52 | 'Ret': -1005, }})
53 | self.loginInfo = j['loginInfo']
54 | self.loginInfo['User'] = templates.User(self.loginInfo['User'])
55 | self.loginInfo['User'].core = self
56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies'])
57 | self.storageClass.loads(j['storage'])
58 | try:
59 | msgList, contactList = self.get_msg()
60 | except:
61 | msgList = contactList = None
62 | if (msgList or contactList) is None:
63 | self.logout()
64 | load_last_login_status(self.s, j['cookies'])
65 | logger.debug('server refused, loading login status failed.')
66 | return ReturnValue({'BaseResponse': {
67 | 'ErrMsg': 'server refused, loading login status failed.',
68 | 'Ret': -1003, }})
69 | else:
70 | if contactList:
71 | for contact in contactList:
72 | if '@@' in contact['UserName']:
73 | update_local_chatrooms(self, [contact])
74 | else:
75 | update_local_friends(self, [contact])
76 | if msgList:
77 | msgList = produce_msg(self, msgList)
78 | for msg in msgList: self.msgList.put(msg)
79 | self.start_receiving(exitCallback)
80 | logger.debug('loading login status succeeded.')
81 | if hasattr(loginCallback, '__call__'):
82 | loginCallback()
83 | return ReturnValue({'BaseResponse': {
84 | 'ErrMsg': 'loading login status succeeded.',
85 | 'Ret': 0, }})
86 |
87 | def load_last_login_status(session, cookiesDict):
88 | try:
89 | session.cookies = requests.utils.cookiejar_from_dict({
90 | 'webwxuvid': cookiesDict['webwxuvid'],
91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'],
92 | 'login_frequency': '2',
93 | 'last_wxuin': cookiesDict['wxuin'],
94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired',
95 | 'wxpluginkey': cookiesDict['wxloadtime'],
96 | 'wxuin': cookiesDict['wxuin'],
97 | 'mm_lang': 'zh_CN',
98 | 'MM_WX_NOTIFY_STATE': '1',
99 | 'MM_WX_SOUND_STATE': '1', })
100 | except:
101 | logger.info('Load status for push login failed, we may have experienced a cookies change.')
102 | logger.info('If you are using the newest version of itchat, you may report a bug.')
103 |
--------------------------------------------------------------------------------
/lib/itchat/async_components/hotreload.py:
--------------------------------------------------------------------------------
1 | import pickle, os
2 | import logging
3 |
4 | import requests # type: ignore
5 |
6 | from ..config import VERSION
7 | from ..returnvalues import ReturnValue
8 | from ..storage import templates
9 | from .contact import update_local_chatrooms, update_local_friends
10 | from .messages import produce_msg
11 |
12 | logger = logging.getLogger('itchat')
13 |
14 | def load_hotreload(core):
15 | core.dump_login_status = dump_login_status
16 | core.load_login_status = load_login_status
17 |
18 | async def dump_login_status(self, fileDir=None):
19 | fileDir = fileDir or self.hotReloadDir
20 | try:
21 | with open(fileDir, 'w') as f:
22 | f.write('itchat - DELETE THIS')
23 | os.remove(fileDir)
24 | except:
25 | raise Exception('Incorrect fileDir')
26 | status = {
27 | 'version' : VERSION,
28 | 'loginInfo' : self.loginInfo,
29 | 'cookies' : self.s.cookies.get_dict(),
30 | 'storage' : self.storageClass.dumps()}
31 | with open(fileDir, 'wb') as f:
32 | pickle.dump(status, f)
33 | logger.debug('Dump login status for hot reload successfully.')
34 |
35 | async def load_login_status(self, fileDir,
36 | loginCallback=None, exitCallback=None):
37 | try:
38 | with open(fileDir, 'rb') as f:
39 | j = pickle.load(f)
40 | except Exception as e:
41 | logger.debug('No such file, loading login status failed.')
42 | return ReturnValue({'BaseResponse': {
43 | 'ErrMsg': 'No such file, loading login status failed.',
44 | 'Ret': -1002, }})
45 |
46 | if j.get('version', '') != VERSION:
47 | logger.debug(('you have updated itchat from %s to %s, ' +
48 | 'so cached status is ignored') % (
49 | j.get('version', 'old version'), VERSION))
50 | return ReturnValue({'BaseResponse': {
51 | 'ErrMsg': 'cached status ignored because of version',
52 | 'Ret': -1005, }})
53 | self.loginInfo = j['loginInfo']
54 | self.loginInfo['User'] = templates.User(self.loginInfo['User'])
55 | self.loginInfo['User'].core = self
56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies'])
57 | self.storageClass.loads(j['storage'])
58 | try:
59 | msgList, contactList = self.get_msg()
60 | except:
61 | msgList = contactList = None
62 | if (msgList or contactList) is None:
63 | self.logout()
64 | await load_last_login_status(self.s, j['cookies'])
65 | logger.debug('server refused, loading login status failed.')
66 | return ReturnValue({'BaseResponse': {
67 | 'ErrMsg': 'server refused, loading login status failed.',
68 | 'Ret': -1003, }})
69 | else:
70 | if contactList:
71 | for contact in contactList:
72 | if '@@' in contact['UserName']:
73 | update_local_chatrooms(self, [contact])
74 | else:
75 | update_local_friends(self, [contact])
76 | if msgList:
77 | msgList = produce_msg(self, msgList)
78 | for msg in msgList: self.msgList.put(msg)
79 | await self.start_receiving(exitCallback)
80 | logger.debug('loading login status succeeded.')
81 | if hasattr(loginCallback, '__call__'):
82 | await loginCallback(self.storageClass.userName)
83 | return ReturnValue({'BaseResponse': {
84 | 'ErrMsg': 'loading login status succeeded.',
85 | 'Ret': 0, }})
86 |
87 | async def load_last_login_status(session, cookiesDict):
88 | try:
89 | session.cookies = requests.utils.cookiejar_from_dict({
90 | 'webwxuvid': cookiesDict['webwxuvid'],
91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'],
92 | 'login_frequency': '2',
93 | 'last_wxuin': cookiesDict['wxuin'],
94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired',
95 | 'wxpluginkey': cookiesDict['wxloadtime'],
96 | 'wxuin': cookiesDict['wxuin'],
97 | 'mm_lang': 'zh_CN',
98 | 'MM_WX_NOTIFY_STATE': '1',
99 | 'MM_WX_SOUND_STATE': '1', })
100 | except:
101 | logger.info('Load status for push login failed, we may have experienced a cookies change.')
102 | logger.info('If you are using the newest version of itchat, you may report a bug.')
103 |
--------------------------------------------------------------------------------
/voice/audio_convert.py:
--------------------------------------------------------------------------------
1 | import shutil
2 | import wave
3 |
4 | from common.log import logger
5 |
6 | try:
7 | import pysilk
8 | except ImportError:
9 | logger.warn("import pysilk failed, wechaty voice message will not be supported.")
10 |
11 | from pydub import AudioSegment
12 |
13 | sil_supports = [8000, 12000, 16000, 24000, 32000, 44100, 48000] # slk转wav时,支持的采样率
14 |
15 |
16 | def find_closest_sil_supports(sample_rate):
17 | """
18 | 找到最接近的支持的采样率
19 | """
20 | if sample_rate in sil_supports:
21 | return sample_rate
22 | closest = 0
23 | mindiff = 9999999
24 | for rate in sil_supports:
25 | diff = abs(rate - sample_rate)
26 | if diff < mindiff:
27 | closest = rate
28 | mindiff = diff
29 | return closest
30 |
31 |
32 | def get_pcm_from_wav(wav_path):
33 | """
34 | 从 wav 文件中读取 pcm
35 |
36 | :param wav_path: wav 文件路径
37 | :returns: pcm 数据
38 | """
39 | wav = wave.open(wav_path, "rb")
40 | return wav.readframes(wav.getnframes())
41 |
42 |
43 | def any_to_mp3(any_path, mp3_path):
44 | """
45 | 把任意格式转成mp3文件
46 | """
47 | if any_path.endswith(".mp3"):
48 | shutil.copy2(any_path, mp3_path)
49 | return
50 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
51 | sil_to_wav(any_path, any_path)
52 | any_path = mp3_path
53 | audio = AudioSegment.from_file(any_path)
54 | audio.export(mp3_path, format="mp3")
55 |
56 |
57 | def any_to_wav(any_path, wav_path):
58 | """
59 | 把任意格式转成wav文件
60 | """
61 | if any_path.endswith(".wav"):
62 | shutil.copy2(any_path, wav_path)
63 | return
64 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
65 | return sil_to_wav(any_path, wav_path)
66 | audio = AudioSegment.from_file(any_path)
67 | audio.export(wav_path, format="wav")
68 |
69 |
70 | def any_to_sil(any_path, sil_path):
71 | """
72 | 把任意格式转成sil文件
73 | """
74 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
75 | shutil.copy2(any_path, sil_path)
76 | return 10000
77 | audio = AudioSegment.from_file(any_path)
78 | rate = find_closest_sil_supports(audio.frame_rate)
79 | # Convert to PCM_s16
80 | pcm_s16 = audio.set_sample_width(2)
81 | pcm_s16 = pcm_s16.set_frame_rate(rate)
82 | wav_data = pcm_s16.raw_data
83 | silk_data = pysilk.encode(wav_data, data_rate=rate, sample_rate=rate)
84 | with open(sil_path, "wb") as f:
85 | f.write(silk_data)
86 | return audio.duration_seconds * 1000
87 |
88 |
89 | def any_to_amr(any_path, amr_path):
90 | """
91 | 把任意格式转成amr文件
92 | """
93 | if any_path.endswith(".amr"):
94 | shutil.copy2(any_path, amr_path)
95 | return
96 | if any_path.endswith(".sil") or any_path.endswith(".silk") or any_path.endswith(".slk"):
97 | raise NotImplementedError("Not support file type: {}".format(any_path))
98 | audio = AudioSegment.from_file(any_path)
99 | audio = audio.set_frame_rate(8000) # only support 8000
100 | audio.export(amr_path, format="amr")
101 | return audio.duration_seconds * 1000
102 |
103 |
104 | def sil_to_wav(silk_path, wav_path, rate: int = 24000):
105 | """
106 | silk 文件转 wav
107 | """
108 | wav_data = pysilk.decode_file(silk_path, to_wav=True, sample_rate=rate)
109 | with open(wav_path, "wb") as f:
110 | f.write(wav_data)
111 |
112 |
113 | def split_audio(file_path, max_segment_length_ms=60000):
114 | """
115 | 分割音频文件
116 | """
117 | audio = AudioSegment.from_file(file_path)
118 | audio_length_ms = len(audio)
119 | if audio_length_ms <= max_segment_length_ms:
120 | return audio_length_ms, [file_path]
121 | segments = []
122 | for start_ms in range(0, audio_length_ms, max_segment_length_ms):
123 | end_ms = min(audio_length_ms, start_ms + max_segment_length_ms)
124 | segment = audio[start_ms:end_ms]
125 | segments.append(segment)
126 | file_prefix = file_path[: file_path.rindex(".")]
127 | format = file_path[file_path.rindex(".") + 1 :]
128 | files = []
129 | for i, segment in enumerate(segments):
130 | path = f"{file_prefix}_{i+1}" + f".{format}"
131 | segment.export(path, format=format)
132 | files.append(path)
133 | return audio_length_ms, files
134 |
--------------------------------------------------------------------------------
/lib/itchat/components/register.py:
--------------------------------------------------------------------------------
1 | import logging, traceback, sys, threading
2 | try:
3 | import Queue
4 | except ImportError:
5 | import queue as Queue
6 |
7 | from ..log import set_logging
8 | from ..utils import test_connect
9 | from ..storage import templates
10 |
11 | logger = logging.getLogger('itchat')
12 |
13 | def load_register(core):
14 | core.auto_login = auto_login
15 | core.configured_reply = configured_reply
16 | core.msg_register = msg_register
17 | core.run = run
18 |
19 | def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl',
20 | enableCmdQR=False, picDir=None, qrCallback=None,
21 | loginCallback=None, exitCallback=None):
22 | if not test_connect():
23 | logger.info("You can't get access to internet or wechat domain, so exit.")
24 | sys.exit()
25 | self.useHotReload = hotReload
26 | self.hotReloadDir = statusStorageDir
27 | if hotReload:
28 | rval=self.load_login_status(statusStorageDir,
29 | loginCallback=loginCallback, exitCallback=exitCallback)
30 | if rval:
31 | return
32 | logger.error('Hot reload failed, logging in normally, error={}'.format(rval))
33 | self.logout()
34 | self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
35 | loginCallback=loginCallback, exitCallback=exitCallback)
36 | self.dump_login_status(statusStorageDir)
37 | else:
38 | self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback,
39 | loginCallback=loginCallback, exitCallback=exitCallback)
40 |
41 | def configured_reply(self):
42 | ''' determine the type of message and reply if its method is defined
43 | however, I use a strange way to determine whether a msg is from massive platform
44 | I haven't found a better solution here
45 | The main problem I'm worrying about is the mismatching of new friends added on phone
46 | If you have any good idea, pleeeease report an issue. I will be more than grateful.
47 | '''
48 | try:
49 | msg = self.msgList.get(timeout=1)
50 | except Queue.Empty:
51 | pass
52 | else:
53 | if isinstance(msg['User'], templates.User):
54 | replyFn = self.functionDict['FriendChat'].get(msg['Type'])
55 | elif isinstance(msg['User'], templates.MassivePlatform):
56 | replyFn = self.functionDict['MpChat'].get(msg['Type'])
57 | elif isinstance(msg['User'], templates.Chatroom):
58 | replyFn = self.functionDict['GroupChat'].get(msg['Type'])
59 | if replyFn is None:
60 | r = None
61 | else:
62 | try:
63 | r = replyFn(msg)
64 | if r is not None:
65 | self.send(r, msg.get('FromUserName'))
66 | except:
67 | logger.warning(traceback.format_exc())
68 |
69 | def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False):
70 | ''' a decorator constructor
71 | return a specific decorator based on information given '''
72 | if not (isinstance(msgType, list) or isinstance(msgType, tuple)):
73 | msgType = [msgType]
74 | def _msg_register(fn):
75 | for _msgType in msgType:
76 | if isFriendChat:
77 | self.functionDict['FriendChat'][_msgType] = fn
78 | if isGroupChat:
79 | self.functionDict['GroupChat'][_msgType] = fn
80 | if isMpChat:
81 | self.functionDict['MpChat'][_msgType] = fn
82 | if not any((isFriendChat, isGroupChat, isMpChat)):
83 | self.functionDict['FriendChat'][_msgType] = fn
84 | return fn
85 | return _msg_register
86 |
87 | def run(self, debug=False, blockThread=True):
88 | logger.info('Start auto replying.')
89 | if debug:
90 | set_logging(loggingLevel=logging.DEBUG)
91 | def reply_fn():
92 | try:
93 | while self.alive:
94 | self.configured_reply()
95 | except KeyboardInterrupt:
96 | if self.useHotReload:
97 | self.dump_login_status()
98 | self.alive = False
99 | logger.debug('itchat received an ^C and exit.')
100 | logger.info('Bye~')
101 | if blockThread:
102 | reply_fn()
103 | else:
104 | replyThread = threading.Thread(target=reply_fn)
105 | replyThread.setDaemon(True)
106 | replyThread.start()
107 |
--------------------------------------------------------------------------------
/plugins/dungeon/dungeon.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import plugins
4 | from bridge.bridge import Bridge
5 | from bridge.context import ContextType
6 | from bridge.reply import Reply, ReplyType
7 | from common import const
8 | from common.expired_dict import ExpiredDict
9 | from common.log import logger
10 | from config import conf
11 | from plugins import *
12 |
13 |
14 | # https://github.com/bupticybee/ChineseAiDungeonChatGPT
15 | class StoryTeller:
16 | def __init__(self, bot, sessionid, story):
17 | self.bot = bot
18 | self.sessionid = sessionid
19 | bot.sessions.clear_session(sessionid)
20 | self.first_interact = True
21 | self.story = story
22 |
23 | def reset(self):
24 | self.bot.sessions.clear_session(self.sessionid)
25 | self.first_interact = True
26 |
27 | def action(self, user_action):
28 | if user_action[-1] != "。":
29 | user_action = user_action + "。"
30 | if self.first_interact:
31 | prompt = (
32 | """现在来充当一个文字冒险游戏,描述时候注意节奏,不要太快,仔细描述各个人物的心情和周边环境。一次只需写四到六句话。
33 | 开头是,"""
34 | + self.story
35 | + " "
36 | + user_action
37 | )
38 | self.first_interact = False
39 | else:
40 | prompt = """继续,一次只需要续写四到六句话,总共就只讲5分钟内发生的事情。""" + user_action
41 | return prompt
42 |
43 |
44 | @plugins.register(
45 | name="Dungeon",
46 | desire_priority=0,
47 | namecn="文字冒险",
48 | desc="A plugin to play dungeon game",
49 | version="1.0",
50 | author="lanvent",
51 | )
52 | class Dungeon(Plugin):
53 | def __init__(self):
54 | super().__init__()
55 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
56 | logger.info("[Dungeon] inited")
57 | # 目前没有设计session过期事件,这里先暂时使用过期字典
58 | if conf().get("expires_in_seconds"):
59 | self.games = ExpiredDict(conf().get("expires_in_seconds"))
60 | else:
61 | self.games = dict()
62 |
63 | def on_handle_context(self, e_context: EventContext):
64 | if e_context["context"].type != ContextType.TEXT:
65 | return
66 | bottype = Bridge().get_bot_type("chat")
67 | if bottype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI]:
68 | return
69 | bot = Bridge().get_bot("chat")
70 | content = e_context["context"].content[:]
71 | clist = e_context["context"].content.split(maxsplit=1)
72 | sessionid = e_context["context"]["session_id"]
73 | logger.debug("[Dungeon] on_handle_context. content: %s" % clist)
74 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
75 | if clist[0] == f"{trigger_prefix}停止冒险":
76 | if sessionid in self.games:
77 | self.games[sessionid].reset()
78 | del self.games[sessionid]
79 | reply = Reply(ReplyType.INFO, "冒险结束!")
80 | e_context["reply"] = reply
81 | e_context.action = EventAction.BREAK_PASS
82 | elif clist[0] == f"{trigger_prefix}开始冒险" or sessionid in self.games:
83 | if sessionid not in self.games or clist[0] == f"{trigger_prefix}开始冒险":
84 | if len(clist) > 1:
85 | story = clist[1]
86 | else:
87 | story = "你在树林里冒险,指不定会从哪里蹦出来一些奇怪的东西,你握紧手上的手枪,希望这次冒险能够找到一些值钱的东西,你往树林深处走去。"
88 | self.games[sessionid] = StoryTeller(bot, sessionid, story)
89 | reply = Reply(ReplyType.INFO, "冒险开始,你可以输入任意内容,让故事继续下去。故事背景是:" + story)
90 | e_context["reply"] = reply
91 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
92 | else:
93 | prompt = self.games[sessionid].action(content)
94 | e_context["context"].type = ContextType.TEXT
95 | e_context["context"].content = prompt
96 | e_context.action = EventAction.BREAK # 事件结束,不跳过处理context的默认逻辑
97 |
98 | def get_help_text(self, **kwargs):
99 | help_text = "可以和机器人一起玩文字冒险游戏。\n"
100 | if kwargs.get("verbose") != True:
101 | return help_text
102 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
103 | help_text = f"{trigger_prefix}开始冒险 " + "背景故事: 开始一个基于{背景故事}的文字冒险,之后你的所有消息会协助完善这个故事。\n" + f"{trigger_prefix}停止冒险: 结束游戏。\n"
104 | if kwargs.get("verbose") == True:
105 | help_text += f"\n命令例子: '{trigger_prefix}开始冒险 你在树林里冒险,指不定会从哪里蹦出来一些奇怪的东西,你握紧手上的手枪,希望这次冒险能够找到一些值钱的东西,你往树林深处走去。'"
106 | return help_text
107 |
--------------------------------------------------------------------------------
/plugins/tool/README.md:
--------------------------------------------------------------------------------
1 | ## 插件描述
2 | 一个能让chatgpt联网,搜索,数字运算的插件,将赋予强大且丰富的扩展能力
3 | 使用说明(默认trigger_prefix为$):
4 | ```text
5 | #help tool: 查看tool帮助信息,可查看已加载工具列表
6 | $tool 命令: 根据给出的{命令}使用一些可用工具尽力为你得到结果。
7 | $tool reset: 重置工具。
8 | ```
9 | ### 本插件所有工具同步存放至专用仓库:[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)
10 |
11 |
12 | ## 使用说明
13 | 使用该插件后将默认使用4个工具, 无需额外配置长期生效:
14 | ### 1. python
15 | ###### python解释器,使用它来解释执行python指令,可以配合你想要chatgpt生成的代码输出结果或执行事务
16 |
17 | ### 2. 访问网页的工具汇总(默认url-get)
18 |
19 | #### 2.1 url-get
20 | ###### 往往用来获取某个网站具体内容,结果可能会被反爬策略影响
21 |
22 | #### 2.2 browser
23 | ###### 浏览器,功能与2.1类似,但能更好模拟,不会被识别为爬虫影响获取网站内容
24 |
25 | > 注1:url-get默认配置、browser需额外配置,browser依赖google-chrome,你需要提前安装好
26 |
27 | > 注2:当检测到长文本时会进入summary tool总结长文本,tokens可能会大量消耗!
28 |
29 | 这是debian端安装google-chrome教程,其他系统请自行查找
30 | > https://www.linuxjournal.com/content/how-can-you-install-google-browser-debian
31 |
32 | ### 3. terminal
33 | ###### 在你运行的电脑里执行shell命令,可以配合你想要chatgpt生成的代码使用,给予自然语言控制手段
34 |
35 | > terminal调优记录:https://github.com/zhayujie/chatgpt-on-wechat/issues/776#issue-1659347640
36 |
37 | ### 4. meteo-weather
38 | ###### 回答你有关天气的询问, 需要获取时间、地点上下文信息,本工具使用了[meteo open api](https://open-meteo.com/)
39 | 注:该工具需要较高的对话技巧,不保证你问的任何问题均能得到满意的回复
40 |
41 | > meteo调优记录:https://github.com/zhayujie/chatgpt-on-wechat/issues/776#issuecomment-1500771334
42 |
43 | ## 使用本插件对话(prompt)技巧
44 | ### 1. 有指引的询问
45 | #### 例如:
46 | - 总结这个链接的内容 https://github.com/goldfishh/chatgpt-tool-hub
47 | - 使用Terminal执行curl cip.cc
48 | - 使用python查询今天日期
49 |
50 | ### 2. 使用搜索引擎工具
51 | - 如果有搜索工具就能让chatgpt获取到你的未传达清楚的上下文信息,比如chatgpt不知道你的地理位置,现在时间等,所以无法查询到天气
52 |
53 | ## 其他工具
54 |
55 | ### 5. wikipedia
56 | ###### 可以回答你想要知道确切的人事物
57 |
58 | ### 6. news 新闻类工具集合
59 |
60 | > news更新:0.4版本对新闻类工具做了整合,配置文件只要加入`news`一个工具名就会自动加载所有新闻类工具
61 |
62 | #### 6.1. news-api *
63 | ###### 从全球 80,000 多个信息源中获取当前和历史新闻文章
64 |
65 | #### 6.2. morning-news *
66 | ###### 每日60秒早报,每天凌晨一点更新,本工具使用了[alapi-每日60秒早报](https://alapi.cn/api/view/93)
67 |
68 | ```text
69 | 可配置参数:
70 | 1. morning_news_use_llm: 是否使用LLM润色结果,默认false(可能会慢)
71 | ```
72 |
73 | > 该tool每天返回内容相同
74 |
75 | #### 6.3. finance-news
76 | ###### 获取实时的金融财政新闻
77 |
78 | > 该工具需要解决browser tool 的google-chrome依赖安装
79 |
80 |
81 |
82 | ### 7. bing-search *
83 | ###### bing搜索引擎,从此你不用再烦恼搜索要用哪些关键词
84 |
85 | ### 8. wolfram-alpha *
86 | ###### 知识搜索引擎、科学问答系统,常用于专业学科计算
87 |
88 | ### 9. google-search *
89 | ###### google搜索引擎,申请流程较bing-search繁琐
90 |
91 | ### 10. arxiv
92 | ###### 用于查找论文
93 |
94 | ```text
95 | 可配置参数:
96 | 1. arxiv_summary: 是否使用总结工具,默认true, 当为false时会直接返回论文的标题、作者、发布时间、摘要、分类、备注、pdf链接等内容
97 | ```
98 |
99 | > 0.4.2更新,例子:帮我找一篇吴恩达写的论文
100 |
101 | ### 11. summary
102 | ###### 总结工具,该工具必须输入一个本地文件的绝对路径
103 |
104 | > 该工具目前是和其他工具配合使用,暂未测试单独使用效果
105 |
106 | ### 12. image2text
107 | ###### 将图片转换成文字,底层调用imageCaption模型,该工具必须输入一个本地文件的绝对路径
108 |
109 | ### 13. searxng-search *
110 | ###### 一个私有化的搜索引擎工具
111 |
112 | > 安装教程:https://docs.searxng.org/admin/installation.html
113 |
114 | ---
115 |
116 | ###### 注1:带*工具需要获取api-key才能使用(在config.json内的kwargs添加项),部分工具需要外网支持
117 | ## [工具的api申请方法](https://github.com/goldfishh/chatgpt-tool-hub/blob/master/docs/apply_optional_tool.md)
118 |
119 | ## config.json 配置说明
120 | ###### 默认工具无需配置,其它工具需手动配置,以增加morning-news和bing-search两个工具为例:
121 | ```json
122 | {
123 | "tools": ["bing-search", "news", "你想要添加的其他工具"], // 填入你想用到的额外工具名,这里加入了工具"bing-search"和工具"news"(news工具会自动加载morning-news、finance-news等子工具)
124 | "kwargs": {
125 | "debug": true, // 当你遇到问题求助时,需要配置
126 | "request_timeout": 120, // openai接口超时时间
127 | "no_default": false, // 是否不使用默认的4个工具
128 | "bing_subscription_key": "4871f273a4804743",//带*工具需要申请api-key,这里填入了工具bing-search对应的api,api_name参考前述`工具的api申请方法`
129 | "morning_news_api_key": "5w1kjNh9VQlUc",// 这里填入了morning-news对应的api,
130 | }
131 | }
132 |
133 | ```
134 | 注:config.json文件非必须,未创建仍可使用本tool;带*工具需在kwargs填入对应api-key键值对
135 | - `tools`:本插件初始化时加载的工具, 上述一级标题即是对应工具名称,带*工具必须在kwargs中配置相应api-key
136 | - `kwargs`:工具执行时的配置,一般在这里存放**api-key**,或环境配置
137 | - `debug`: 输出chatgpt-tool-hub额外信息用于调试
138 | - `request_timeout`: 访问openai接口的超时时间,默认与wechat-on-chatgpt配置一致,可单独配置
139 | - `no_default`: 用于配置默认加载4个工具的行为,如果为true则仅使用tools列表工具,不加载默认工具
140 | - `top_k_results`: 控制所有有关搜索的工具返回条目数,数字越高则参考信息越多,但无用信息可能干扰判断,该值一般为2
141 | - `model_name`: 用于控制tool插件底层使用的llm模型,目前暂未测试3.5以外的模型,一般保持默认
142 |
143 | ---
144 |
145 | ## 备注
146 | - 强烈建议申请搜索工具搭配使用,推荐bing-search
147 | - 虽然我会有意加入一些限制,但请不要使用本插件做危害他人的事情,请提前了解清楚某些内容是否会违反相关规定,建议提前做好过滤
148 | - 如有本插件问题,请将debug设置为true无上下文重新问一遍,如仍有问题请访问[chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub)建个issue,将日志贴进去,我无法处理不能复现的问题
149 | - 欢迎 star & 宣传,有能力请提pr
150 |
--------------------------------------------------------------------------------
/docker/chatgpt-on-wechat-voice-reply/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # build prefix
5 | CHATGPT_ON_WECHAT_PREFIX=${CHATGPT_ON_WECHAT_PREFIX:-""}
6 | # path to config.json
7 | CHATGPT_ON_WECHAT_CONFIG_PATH=${CHATGPT_ON_WECHAT_CONFIG_PATH:-""}
8 | # execution command line
9 | CHATGPT_ON_WECHAT_EXEC=${CHATGPT_ON_WECHAT_EXEC:-""}
10 |
11 | OPEN_AI_API_KEY=${OPEN_AI_API_KEY:-""}
12 | OPEN_AI_PROXY=${OPEN_AI_PROXY:-""}
13 | SINGLE_CHAT_PREFIX=${SINGLE_CHAT_PREFIX:-""}
14 | SINGLE_CHAT_REPLY_PREFIX=${SINGLE_CHAT_REPLY_PREFIX:-""}
15 | GROUP_CHAT_PREFIX=${GROUP_CHAT_PREFIX:-""}
16 | GROUP_NAME_WHITE_LIST=${GROUP_NAME_WHITE_LIST:-""}
17 | IMAGE_CREATE_PREFIX=${IMAGE_CREATE_PREFIX:-""}
18 | CONVERSATION_MAX_TOKENS=${CONVERSATION_MAX_TOKENS:-""}
19 | SPEECH_RECOGNITION=${SPEECH_RECOGNITION:-""}
20 | CHARACTER_DESC=${CHARACTER_DESC:-""}
21 | EXPIRES_IN_SECONDS=${EXPIRES_IN_SECONDS:-""}
22 |
23 | VOICE_REPLY_VOICE=${VOICE_REPLY_VOICE:-""}
24 | BAIDU_APP_ID=${BAIDU_APP_ID:-""}
25 | BAIDU_API_KEY=${BAIDU_API_KEY:-""}
26 | BAIDU_SECRET_KEY=${BAIDU_SECRET_KEY:-""}
27 |
28 | # CHATGPT_ON_WECHAT_PREFIX is empty, use /app
29 | if [ "$CHATGPT_ON_WECHAT_PREFIX" == "" ] ; then
30 | CHATGPT_ON_WECHAT_PREFIX=/app
31 | fi
32 |
33 | # CHATGPT_ON_WECHAT_CONFIG_PATH is empty, use '/app/config.json'
34 | if [ "$CHATGPT_ON_WECHAT_CONFIG_PATH" == "" ] ; then
35 | CHATGPT_ON_WECHAT_CONFIG_PATH=$CHATGPT_ON_WECHAT_PREFIX/config.json
36 | fi
37 |
38 | # CHATGPT_ON_WECHAT_EXEC is empty, use ‘python app.py’
39 | if [ "$CHATGPT_ON_WECHAT_EXEC" == "" ] ; then
40 | CHATGPT_ON_WECHAT_EXEC="python app.py"
41 | fi
42 |
43 | # modify content in config.json
44 | if [ "$OPEN_AI_API_KEY" != "" ] ; then
45 | sed -i "s/\"open_ai_api_key\".*,$/\"open_ai_api_key\": \"$OPEN_AI_API_KEY\",/" $CHATGPT_ON_WECHAT_CONFIG_PATH
46 | else
47 | echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
48 | fi
49 |
50 | # use http_proxy as default
51 | if [ "$HTTP_PROXY" != "" ] ; then
52 | sed -i "s/\"proxy\".*,$/\"proxy\": \"$HTTP_PROXY\",/" $CHATGPT_ON_WECHAT_CONFIG_PATH
53 | fi
54 |
55 | if [ "$OPEN_AI_PROXY" != "" ] ; then
56 | sed -i "s/\"proxy\".*,$/\"proxy\": \"$OPEN_AI_PROXY\",/" $CHATGPT_ON_WECHAT_CONFIG_PATH
57 | fi
58 |
59 | if [ "$SINGLE_CHAT_PREFIX" != "" ] ; then
60 | sed -i "s/\"single_chat_prefix\".*,$/\"single_chat_prefix\": $SINGLE_CHAT_PREFIX,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
61 | fi
62 |
63 | if [ "$SINGLE_CHAT_REPLY_PREFIX" != "" ] ; then
64 | sed -i "s/\"single_chat_reply_prefix\".*,$/\"single_chat_reply_prefix\": $SINGLE_CHAT_REPLY_PREFIX,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
65 | fi
66 |
67 | if [ "$GROUP_CHAT_PREFIX" != "" ] ; then
68 | sed -i "s/\"group_chat_prefix\".*,$/\"group_chat_prefix\": $GROUP_CHAT_PREFIX,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
69 | fi
70 |
71 | if [ "$GROUP_NAME_WHITE_LIST" != "" ] ; then
72 | sed -i "s/\"group_name_white_list\".*,$/\"group_name_white_list\": $GROUP_NAME_WHITE_LIST,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
73 | fi
74 |
75 | if [ "$IMAGE_CREATE_PREFIX" != "" ] ; then
76 | sed -i "s/\"image_create_prefix\".*,$/\"image_create_prefix\": $IMAGE_CREATE_PREFIX,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
77 | fi
78 |
79 | if [ "$CONVERSATION_MAX_TOKENS" != "" ] ; then
80 | sed -i "s/\"conversation_max_tokens\".*,$/\"conversation_max_tokens\": $CONVERSATION_MAX_TOKENS,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
81 | fi
82 |
83 | if [ "$SPEECH_RECOGNITION" != "" ] ; then
84 | sed -i "s/\"speech_recognition\".*,$/\"speech_recognition\": $SPEECH_RECOGNITION,/" $CHATGPT_ON_WECHAT_CONFIG_PATH
85 | fi
86 |
87 | if [ "$CHARACTER_DESC" != "" ] ; then
88 | sed -i "s/\"character_desc\".*,$/\"character_desc\": \"$CHARACTER_DESC\",/" $CHATGPT_ON_WECHAT_CONFIG_PATH
89 | fi
90 |
91 | if [ "$EXPIRES_IN_SECONDS" != "" ] ; then
92 | sed -i "s/\"expires_in_seconds\".*$/\"expires_in_seconds\": $EXPIRES_IN_SECONDS/" $CHATGPT_ON_WECHAT_CONFIG_PATH
93 | fi
94 |
95 | # append
96 | if [ "$BAIDU_SECRET_KEY" != "" ] ; then
97 | sed -i "1a \ \ \"baidu_secret_key\": \"$BAIDU_SECRET_KEY\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
98 | fi
99 |
100 | if [ "$BAIDU_API_KEY" != "" ] ; then
101 | sed -i "1a \ \ \"baidu_api_key\": \"$BAIDU_API_KEY\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
102 | fi
103 |
104 | if [ "$BAIDU_APP_ID" != "" ] ; then
105 | sed -i "1a \ \ \"baidu_app_id\": \"$BAIDU_APP_ID\"," $CHATGPT_ON_WECHAT_CONFIG_PATH
106 | fi
107 |
108 | if [ "$VOICE_REPLY_VOICE" != "" ] ; then
109 | sed -i "1a \ \ \"voice_reply_voice\": $VOICE_REPLY_VOICE," $CHATGPT_ON_WECHAT_CONFIG_PATH
110 | fi
111 |
112 | # go to prefix dir
113 | cd $CHATGPT_ON_WECHAT_PREFIX
114 | # excute
115 | $CHATGPT_ON_WECHAT_EXEC
116 |
117 |
118 |
--------------------------------------------------------------------------------
/bot/linkai/link_ai_bot.py:
--------------------------------------------------------------------------------
1 | # access LinkAI knowledge base platform
2 | # docs: https://link-ai.tech/platform/link-app/wechat
3 |
4 | import time
5 |
6 | import requests
7 |
8 | from bot.bot import Bot
9 | from bot.chatgpt.chat_gpt_session import ChatGPTSession
10 | from bot.openai.open_ai_image import OpenAIImage
11 | from bot.session_manager import SessionManager
12 | from bridge.context import Context, ContextType
13 | from bridge.reply import Reply, ReplyType
14 | from common.log import logger
15 | from config import conf
16 |
17 |
18 | class LinkAIBot(Bot, OpenAIImage):
19 | # authentication failed
20 | AUTH_FAILED_CODE = 401
21 | NO_QUOTA_CODE = 406
22 |
23 | def __init__(self):
24 | super().__init__()
25 | self.base_url = "https://api.link-ai.chat/v1"
26 | self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
27 |
28 | def reply(self, query, context: Context = None) -> Reply:
29 | if context.type == ContextType.TEXT:
30 | return self._chat(query, context)
31 | elif context.type == ContextType.IMAGE_CREATE:
32 | ok, retstring = self.create_img(query, 0)
33 | reply = None
34 | if ok:
35 | reply = Reply(ReplyType.IMAGE_URL, retstring)
36 | else:
37 | reply = Reply(ReplyType.ERROR, retstring)
38 | return reply
39 | else:
40 | reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
41 | return reply
42 |
43 | def _chat(self, query, context, retry_count=0):
44 | if retry_count >= 2:
45 | # exit from retry 2 times
46 | logger.warn("[LINKAI] failed after maximum number of retry times")
47 | return Reply(ReplyType.ERROR, "请再问我一次吧")
48 |
49 | try:
50 | # load config
51 | if context.get("generate_breaked_by"):
52 | logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
53 | app_code = None
54 | else:
55 | app_code = conf().get("linkai_app_code")
56 | linkai_api_key = conf().get("linkai_api_key")
57 |
58 | session_id = context["session_id"]
59 |
60 | session = self.sessions.session_query(query, session_id)
61 |
62 | # remove system message
63 | if app_code and session.messages[0].get("role") == "system":
64 | session.messages.pop(0)
65 |
66 | logger.info(f"[LINKAI] query={query}, app_code={app_code}")
67 |
68 | body = {
69 | "appCode": app_code,
70 | "messages": session.messages,
71 | "model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
72 | "temperature": conf().get("temperature"),
73 | "top_p": conf().get("top_p", 1),
74 | "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
75 | "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
76 | }
77 | headers = {"Authorization": "Bearer " + linkai_api_key}
78 |
79 | # do http request
80 | res = requests.post(url=self.base_url + "/chat/completion", json=body, headers=headers).json()
81 |
82 | if not res or not res["success"]:
83 | if res.get("code") == self.AUTH_FAILED_CODE:
84 | logger.exception(f"[LINKAI] please check your linkai_api_key, res={res}")
85 | return Reply(ReplyType.ERROR, "请再问我一次吧")
86 |
87 | elif res.get("code") == self.NO_QUOTA_CODE:
88 | logger.exception(f"[LINKAI] please check your account quota, https://chat.link-ai.tech/console/account")
89 | return Reply(ReplyType.ERROR, "提问太快啦,请休息一下再问我吧")
90 |
91 | else:
92 | # retry
93 | time.sleep(2)
94 | logger.warn(f"[LINKAI] do retry, times={retry_count}")
95 | return self._chat(query, context, retry_count + 1)
96 |
97 | # execute success
98 | reply_content = res["data"]["content"]
99 | logger.info(f"[LINKAI] reply={reply_content}")
100 | self.sessions.session_reply(reply_content, session_id)
101 | return Reply(ReplyType.TEXT, reply_content)
102 |
103 | except Exception as e:
104 | logger.exception(e)
105 | # retry
106 | time.sleep(2)
107 | logger.warn(f"[LINKAI] do retry, times={retry_count}")
108 | return self._chat(query, context, retry_count + 1)
109 |
--------------------------------------------------------------------------------
/voice/azure/azure_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | azure voice service
3 | """
4 | import json
5 | import os
6 | import time
7 |
8 | import azure.cognitiveservices.speech as speechsdk
9 | from langid import classify
10 |
11 | from bridge.reply import Reply, ReplyType
12 | from common.log import logger
13 | from common.tmp_dir import TmpDir
14 | from config import conf
15 | from voice.voice import Voice
16 |
17 | """
18 | Azure voice
19 | 主目录设置文件中需填写azure_voice_api_key和azure_voice_region
20 |
21 | 查看可用的 voice: https://speech.microsoft.com/portal/voicegallery
22 |
23 | """
24 |
25 |
26 | class AzureVoice(Voice):
27 | def __init__(self):
28 | try:
29 | curdir = os.path.dirname(__file__)
30 | config_path = os.path.join(curdir, "config.json")
31 | config = None
32 | if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
33 | config = {
34 | "speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural", # 识别不出时的默认语音
35 | "auto_detect": True, # 是否自动检测语言
36 | "speech_synthesis_zh": "zh-CN-XiaozhenNeural",
37 | "speech_synthesis_en": "en-US-JacobNeural",
38 | "speech_synthesis_ja": "ja-JP-AoiNeural",
39 | "speech_synthesis_ko": "ko-KR-SoonBokNeural",
40 | "speech_synthesis_de": "de-DE-LouisaNeural",
41 | "speech_synthesis_fr": "fr-FR-BrigitteNeural",
42 | "speech_synthesis_es": "es-ES-LaiaNeural",
43 | "speech_recognition_language": "zh-CN",
44 | }
45 | with open(config_path, "w") as fw:
46 | json.dump(config, fw, indent=4)
47 | else:
48 | with open(config_path, "r") as fr:
49 | config = json.load(fr)
50 | self.config = config
51 | self.api_key = conf().get("azure_voice_api_key")
52 | self.api_region = conf().get("azure_voice_region")
53 | self.speech_config = speechsdk.SpeechConfig(subscription=self.api_key, region=self.api_region)
54 | self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
55 | self.speech_config.speech_recognition_language = self.config["speech_recognition_language"]
56 | except Exception as e:
57 | logger.warn("AzureVoice init failed: %s, ignore " % e)
58 |
59 | def voiceToText(self, voice_file):
60 | audio_config = speechsdk.AudioConfig(filename=voice_file)
61 | speech_recognizer = speechsdk.SpeechRecognizer(speech_config=self.speech_config, audio_config=audio_config)
62 | result = speech_recognizer.recognize_once()
63 | if result.reason == speechsdk.ResultReason.RecognizedSpeech:
64 | logger.info("[Azure] voiceToText voice file name={} text={}".format(voice_file, result.text))
65 | reply = Reply(ReplyType.TEXT, result.text)
66 | else:
67 | cancel_details = result.cancellation_details
68 | logger.error("[Azure] voiceToText error, result={}, errordetails={}".format(result, cancel_details.error_details))
69 | reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
70 | return reply
71 |
72 | def textToVoice(self, text):
73 | if self.config.get("auto_detect"):
74 | lang = classify(text)[0]
75 | key = "speech_synthesis_" + lang
76 | if key in self.config:
77 | logger.info("[Azure] textToVoice auto detect language={}, voice={}".format(lang, self.config[key]))
78 | self.speech_config.speech_synthesis_voice_name = self.config[key]
79 | else:
80 | self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
81 | else:
82 | self.speech_config.speech_synthesis_voice_name = self.config["speech_synthesis_voice_name"]
83 | # Avoid the same filename under multithreading
84 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
85 | audio_config = speechsdk.AudioConfig(filename=fileName)
86 | speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=self.speech_config, audio_config=audio_config)
87 | result = speech_synthesizer.speak_text(text)
88 | if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
89 | logger.info("[Azure] textToVoice text={} voice file name={}".format(text, fileName))
90 | reply = Reply(ReplyType.VOICE, fileName)
91 | else:
92 | cancel_details = result.cancellation_details
93 | logger.error("[Azure] textToVoice error, result={}, errordetails={}".format(result, cancel_details.error_details))
94 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
95 | return reply
96 |
--------------------------------------------------------------------------------
/lib/itchat/async_components/register.py:
--------------------------------------------------------------------------------
1 | import logging, traceback, sys, threading
2 | try:
3 | import Queue
4 | except ImportError:
5 | import queue as Queue # type: ignore
6 |
7 | from ..log import set_logging
8 | from ..utils import test_connect
9 | from ..storage import templates
10 |
11 | logger = logging.getLogger('itchat')
12 |
13 | def load_register(core):
14 | core.auto_login = auto_login
15 | core.configured_reply = configured_reply
16 | core.msg_register = msg_register
17 | core.run = run
18 |
19 | async def auto_login(self, EventScanPayload=None,ScanStatus=None,event_stream=None,
20 | hotReload=True, statusStorageDir='itchat.pkl',
21 | enableCmdQR=False, picDir=None, qrCallback=None,
22 | loginCallback=None, exitCallback=None):
23 | if not test_connect():
24 | logger.info("You can't get access to internet or wechat domain, so exit.")
25 | sys.exit()
26 | self.useHotReload = hotReload
27 | self.hotReloadDir = statusStorageDir
28 | if hotReload:
29 | if await self.load_login_status(statusStorageDir,
30 | loginCallback=loginCallback, exitCallback=exitCallback):
31 | return
32 | await self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, EventScanPayload=EventScanPayload, ScanStatus=ScanStatus, event_stream=event_stream,
33 | loginCallback=loginCallback, exitCallback=exitCallback)
34 | await self.dump_login_status(statusStorageDir)
35 | else:
36 | await self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, EventScanPayload=EventScanPayload, ScanStatus=ScanStatus, event_stream=event_stream,
37 | loginCallback=loginCallback, exitCallback=exitCallback)
38 |
39 | async def configured_reply(self, event_stream, payload, message_container):
40 | ''' determine the type of message and reply if its method is defined
41 | however, I use a strange way to determine whether a msg is from massive platform
42 | I haven't found a better solution here
43 | The main problem I'm worrying about is the mismatching of new friends added on phone
44 | If you have any good idea, pleeeease report an issue. I will be more than grateful.
45 | '''
46 | try:
47 | msg = self.msgList.get(timeout=1)
48 | if 'MsgId' in msg.keys():
49 | message_container[msg['MsgId']] = msg
50 | except Queue.Empty:
51 | pass
52 | else:
53 | if isinstance(msg['User'], templates.User):
54 | replyFn = self.functionDict['FriendChat'].get(msg['Type'])
55 | elif isinstance(msg['User'], templates.MassivePlatform):
56 | replyFn = self.functionDict['MpChat'].get(msg['Type'])
57 | elif isinstance(msg['User'], templates.Chatroom):
58 | replyFn = self.functionDict['GroupChat'].get(msg['Type'])
59 | if replyFn is None:
60 | r = None
61 | else:
62 | try:
63 | r = await replyFn(msg)
64 | if r is not None:
65 | await self.send(r, msg.get('FromUserName'))
66 | except:
67 | logger.warning(traceback.format_exc())
68 |
69 | def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False):
70 | ''' a decorator constructor
71 | return a specific decorator based on information given '''
72 | if not (isinstance(msgType, list) or isinstance(msgType, tuple)):
73 | msgType = [msgType]
74 | def _msg_register(fn):
75 | for _msgType in msgType:
76 | if isFriendChat:
77 | self.functionDict['FriendChat'][_msgType] = fn
78 | if isGroupChat:
79 | self.functionDict['GroupChat'][_msgType] = fn
80 | if isMpChat:
81 | self.functionDict['MpChat'][_msgType] = fn
82 | if not any((isFriendChat, isGroupChat, isMpChat)):
83 | self.functionDict['FriendChat'][_msgType] = fn
84 | return fn
85 | return _msg_register
86 |
87 | async def run(self, debug=False, blockThread=True):
88 | logger.info('Start auto replying.')
89 | if debug:
90 | set_logging(loggingLevel=logging.DEBUG)
91 | async def reply_fn():
92 | try:
93 | while self.alive:
94 | await self.configured_reply()
95 | except KeyboardInterrupt:
96 | if self.useHotReload:
97 | await self.dump_login_status()
98 | self.alive = False
99 | logger.debug('itchat received an ^C and exit.')
100 | logger.info('Bye~')
101 | if blockThread:
102 | await reply_fn()
103 | else:
104 | replyThread = threading.Thread(target=reply_fn)
105 | replyThread.setDaemon(True)
106 | replyThread.start()
107 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/1.bug.yml:
--------------------------------------------------------------------------------
1 | name: Bug report 🐛
2 | description: 项目运行中遇到的Bug或问题。
3 | labels: ['status: needs check']
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | ### ⚠️ 前置确认
9 | 1. 网络能够访问openai接口
10 | 2. python 已安装:版本在 3.7 ~ 3.10 之间
11 | 3. `git pull` 拉取最新代码
12 | 4. 执行`pip3 install -r requirements.txt`,检查依赖是否满足
13 | 5. 拓展功能请执行`pip3 install -r requirements-optional.txt`,检查依赖是否满足
14 | 6. [FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) 中无类似问题
15 | - type: checkboxes
16 | attributes:
17 | label: 前置确认
18 | options:
19 | - label: 我确认我运行的是最新版本的代码,并且安装了所需的依赖,在[FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs)中也未找到类似问题。
20 | required: true
21 | - type: checkboxes
22 | attributes:
23 | label: ⚠️ 搜索issues中是否已存在类似问题
24 | description: >
25 | 请在 [历史issue](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中清空输入框,搜索你的问题
26 | 或相关日志的关键词来查找是否存在类似问题。
27 | options:
28 | - label: 我已经搜索过issues和disscussions,没有跟我遇到的问题相关的issue
29 | required: true
30 | - type: markdown
31 | attributes:
32 | value: |
33 | 请在上方的`title`中填写你对你所遇到问题的简略总结,这将帮助其他人更好的找到相似问题,谢谢❤️。
34 | - type: dropdown
35 | attributes:
36 | label: 操作系统类型?
37 | description: >
38 | 请选择你运行程序的操作系统类型。
39 | options:
40 | - Windows
41 | - Linux
42 | - MacOS
43 | - Docker
44 | - Railway
45 | - Windows Subsystem for Linux (WSL)
46 | - Other (请在问题中说明)
47 | validations:
48 | required: true
49 | - type: dropdown
50 | attributes:
51 | label: 运行的python版本是?
52 | description: |
53 | 请选择你运行程序的`python`版本。
54 | 注意:在`python 3.7`中,有部分可选依赖无法安装。
55 | 经过长时间的观察,我们认为`python 3.8`是兼容性最好的版本。
56 | `python 3.7`~`python 3.10`以外版本的issue,将视情况直接关闭。
57 | options:
58 | - python 3.7
59 | - python 3.8
60 | - python 3.9
61 | - python 3.10
62 | - other
63 | validations:
64 | required: true
65 | - type: dropdown
66 | attributes:
67 | label: 使用的chatgpt-on-wechat版本是?
68 | description: |
69 | 请确保你使用的是 [releases](https://github.com/zhayujie/chatgpt-on-wechat/releases) 中的最新版本。
70 | 如果你使用git, 请使用`git branch`命令来查看分支。
71 | options:
72 | - Latest Release
73 | - Master (branch)
74 | validations:
75 | required: true
76 | - type: dropdown
77 | attributes:
78 | label: 运行的`channel`类型是?
79 | description: |
80 | 请确保你正确配置了该`channel`所需的配置项,所有可选的配置项都写在了[该文件中](https://github.com/zhayujie/chatgpt-on-wechat/blob/master/config.py),请将所需配置项填写在根目录下的`config.json`文件中。
81 | options:
82 | - wx(个人微信, itchat)
83 | - wxy(个人微信, wechaty)
84 | - wechatmp(公众号, 订阅号)
85 | - wechatmp_service(公众号, 服务号)
86 | - terminal
87 | - other
88 | validations:
89 | required: true
90 | - type: textarea
91 | attributes:
92 | label: 复现步骤 🕹
93 | description: |
94 | **⚠️ 不能复现将会关闭issue.**
95 | - type: textarea
96 | attributes:
97 | label: 问题描述 😯
98 | description: 详细描述出现的问题,或提供有关截图。
99 | - type: textarea
100 | attributes:
101 | label: 终端日志 📒
102 | description: |
103 | 在此处粘贴终端日志,可在主目录下`run.log`文件中找到,这会帮助我们更好的分析问题,注意隐去你的API key。
104 | 如果在配置文件中加入`"debug": true`,打印出的日志会更有帮助。
105 |
106 |
107 | 示例
108 | ```log
109 | [DEBUG][2023-04-16 00:23:22][plugin_manager.py:157] - Plugin SUMMARY triggered by event Event.ON_HANDLE_CONTEXT
110 | [DEBUG][2023-04-16 00:23:22][main.py:221] - [Summary] on_handle_context. content: $总结前100条消息
111 | [DEBUG][2023-04-16 00:23:24][main.py:240] - [Summary] limit: 100, duration: -1 seconds
112 | [ERROR][2023-04-16 00:23:24][chat_channel.py:244] - Worker return exception: name 'start_date' is not defined
113 | Traceback (most recent call last):
114 | File "C:\ProgramData\Anaconda3\lib\concurrent\futures\thread.py", line 57, in run
115 | result = self.fn(*self.args, **self.kwargs)
116 | File "D:\project\chatgpt-on-wechat\channel\chat_channel.py", line 132, in _handle
117 | reply = self._generate_reply(context)
118 | File "D:\project\chatgpt-on-wechat\channel\chat_channel.py", line 142, in _generate_reply
119 | e_context = PluginManager().emit_event(EventContext(Event.ON_HANDLE_CONTEXT, {
120 | File "D:\project\chatgpt-on-wechat\plugins\plugin_manager.py", line 159, in emit_event
121 | instance.handlers[e_context.event](e_context, *args, **kwargs)
122 | File "D:\project\chatgpt-on-wechat\plugins\summary\main.py", line 255, in on_handle_context
123 | records = self._get_records(session_id, start_time, limit)
124 | File "D:\project\chatgpt-on-wechat\plugins\summary\main.py", line 96, in _get_records
125 | c.execute("SELECT * FROM chat_records WHERE sessionid=? and timestamp>? ORDER BY timestamp DESC LIMIT ?", (session_id, start_date, limit))
126 | NameError: name 'start_date' is not defined
127 | [INFO][2023-04-16 00:23:36][app.py:14] - signal 2 received, exiting...
128 | ```
129 |
130 | value: |
131 | ```log
132 | <此处粘贴终端日志>
133 | ```
--------------------------------------------------------------------------------
/lib/itchat/storage/__init__.py:
--------------------------------------------------------------------------------
1 | import os, time, copy
2 | from threading import Lock
3 |
4 | from .messagequeue import Queue
5 | from .templates import (
6 | ContactList, AbstractUserDict, User,
7 | MassivePlatform, Chatroom, ChatroomMember)
8 |
9 | def contact_change(fn):
10 | def _contact_change(core, *args, **kwargs):
11 | with core.storageClass.updateLock:
12 | return fn(core, *args, **kwargs)
13 | return _contact_change
14 |
15 | class Storage(object):
16 | def __init__(self, core):
17 | self.userName = None
18 | self.nickName = None
19 | self.updateLock = Lock()
20 | self.memberList = ContactList()
21 | self.mpList = ContactList()
22 | self.chatroomList = ContactList()
23 | self.msgList = Queue(-1)
24 | self.lastInputUserName = None
25 | self.memberList.set_default_value(contactClass=User)
26 | self.memberList.core = core
27 | self.mpList.set_default_value(contactClass=MassivePlatform)
28 | self.mpList.core = core
29 | self.chatroomList.set_default_value(contactClass=Chatroom)
30 | self.chatroomList.core = core
31 | def dumps(self):
32 | return {
33 | 'userName' : self.userName,
34 | 'nickName' : self.nickName,
35 | 'memberList' : self.memberList,
36 | 'mpList' : self.mpList,
37 | 'chatroomList' : self.chatroomList,
38 | 'lastInputUserName' : self.lastInputUserName, }
39 | def loads(self, j):
40 | self.userName = j.get('userName', None)
41 | self.nickName = j.get('nickName', None)
42 | del self.memberList[:]
43 | for i in j.get('memberList', []):
44 | self.memberList.append(i)
45 | del self.mpList[:]
46 | for i in j.get('mpList', []):
47 | self.mpList.append(i)
48 | del self.chatroomList[:]
49 | for i in j.get('chatroomList', []):
50 | self.chatroomList.append(i)
51 | # I tried to solve everything in pickle
52 | # but this way is easier and more storage-saving
53 | for chatroom in self.chatroomList:
54 | if 'MemberList' in chatroom:
55 | for member in chatroom['MemberList']:
56 | member.core = chatroom.core
57 | member.chatroom = chatroom
58 | if 'Self' in chatroom:
59 | chatroom['Self'].core = chatroom.core
60 | chatroom['Self'].chatroom = chatroom
61 | self.lastInputUserName = j.get('lastInputUserName', None)
62 | def search_friends(self, name=None, userName=None, remarkName=None, nickName=None,
63 | wechatAccount=None):
64 | with self.updateLock:
65 | if (name or userName or remarkName or nickName or wechatAccount) is None:
66 | return copy.deepcopy(self.memberList[0]) # my own account
67 | elif userName: # return the only userName match
68 | for m in self.memberList:
69 | if m['UserName'] == userName:
70 | return copy.deepcopy(m)
71 | else:
72 | matchDict = {
73 | 'RemarkName' : remarkName,
74 | 'NickName' : nickName,
75 | 'Alias' : wechatAccount, }
76 | for k in ('RemarkName', 'NickName', 'Alias'):
77 | if matchDict[k] is None:
78 | del matchDict[k]
79 | if name: # select based on name
80 | contact = []
81 | for m in self.memberList:
82 | if any([m.get(k) == name for k in ('RemarkName', 'NickName', 'Alias')]):
83 | contact.append(m)
84 | else:
85 | contact = self.memberList[:]
86 | if matchDict: # select again based on matchDict
87 | friendList = []
88 | for m in contact:
89 | if all([m.get(k) == v for k, v in matchDict.items()]):
90 | friendList.append(m)
91 | return copy.deepcopy(friendList)
92 | else:
93 | return copy.deepcopy(contact)
94 | def search_chatrooms(self, name=None, userName=None):
95 | with self.updateLock:
96 | if userName is not None:
97 | for m in self.chatroomList:
98 | if m['UserName'] == userName:
99 | return copy.deepcopy(m)
100 | elif name is not None:
101 | matchList = []
102 | for m in self.chatroomList:
103 | if name in m['NickName']:
104 | matchList.append(copy.deepcopy(m))
105 | return matchList
106 | def search_mps(self, name=None, userName=None):
107 | with self.updateLock:
108 | if userName is not None:
109 | for m in self.mpList:
110 | if m['UserName'] == userName:
111 | return copy.deepcopy(m)
112 | elif name is not None:
113 | matchList = []
114 | for m in self.mpList:
115 | if name in m['NickName']:
116 | matchList.append(copy.deepcopy(m))
117 | return matchList
118 |
--------------------------------------------------------------------------------
/channel/wechat/wechaty_channel.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | """
4 | wechaty channel
5 | Python Wechaty - https://github.com/wechaty/python-wechaty
6 | """
7 | import asyncio
8 | import base64
9 | import os
10 | import time
11 |
12 | from wechaty import Contact, Wechaty
13 | from wechaty.user import Message
14 | from wechaty_puppet import FileBox
15 |
16 | from bridge.context import *
17 | from bridge.context import Context
18 | from bridge.reply import *
19 | from channel.chat_channel import ChatChannel
20 | from channel.wechat.wechaty_message import WechatyMessage
21 | from common.log import logger
22 | from common.singleton import singleton
23 | from config import conf
24 |
25 | try:
26 | from voice.audio_convert import any_to_sil
27 | except Exception as e:
28 | pass
29 |
30 |
31 | @singleton
32 | class WechatyChannel(ChatChannel):
33 | NOT_SUPPORT_REPLYTYPE = []
34 |
35 | def __init__(self):
36 | super().__init__()
37 |
38 | def startup(self):
39 | config = conf()
40 | token = config.get("wechaty_puppet_service_token")
41 | os.environ["WECHATY_PUPPET_SERVICE_TOKEN"] = token
42 | asyncio.run(self.main())
43 |
44 | async def main(self):
45 | loop = asyncio.get_event_loop()
46 | # 将asyncio的loop传入处理线程
47 | self.handler_pool._initializer = lambda: asyncio.set_event_loop(loop)
48 | self.bot = Wechaty()
49 | self.bot.on("login", self.on_login)
50 | self.bot.on("message", self.on_message)
51 | await self.bot.start()
52 |
53 | async def on_login(self, contact: Contact):
54 | self.user_id = contact.contact_id
55 | self.name = contact.name
56 | logger.info("[WX] login user={}".format(contact))
57 |
58 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
59 | def send(self, reply: Reply, context: Context):
60 | receiver_id = context["receiver"]
61 | loop = asyncio.get_event_loop()
62 | if context["isgroup"]:
63 | receiver = asyncio.run_coroutine_threadsafe(self.bot.Room.find(receiver_id), loop).result()
64 | else:
65 | receiver = asyncio.run_coroutine_threadsafe(self.bot.Contact.find(receiver_id), loop).result()
66 | msg = None
67 | if reply.type == ReplyType.TEXT:
68 | msg = reply.content
69 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result()
70 | logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver))
71 | elif reply.type == ReplyType.ERROR or reply.type == ReplyType.INFO:
72 | msg = reply.content
73 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result()
74 | logger.info("[WX] sendMsg={}, receiver={}".format(reply, receiver))
75 | elif reply.type == ReplyType.VOICE:
76 | voiceLength = None
77 | file_path = reply.content
78 | sil_file = os.path.splitext(file_path)[0] + ".sil"
79 | voiceLength = int(any_to_sil(file_path, sil_file))
80 | if voiceLength >= 60000:
81 | voiceLength = 60000
82 | logger.info("[WX] voice too long, length={}, set to 60s".format(voiceLength))
83 | # 发送语音
84 | t = int(time.time())
85 | msg = FileBox.from_file(sil_file, name=str(t) + ".sil")
86 | if voiceLength is not None:
87 | msg.metadata["voiceLength"] = voiceLength
88 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result()
89 | try:
90 | os.remove(file_path)
91 | if sil_file != file_path:
92 | os.remove(sil_file)
93 | except Exception as e:
94 | pass
95 | logger.info("[WX] sendVoice={}, receiver={}".format(reply.content, receiver))
96 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
97 | img_url = reply.content
98 | t = int(time.time())
99 | msg = FileBox.from_url(url=img_url, name=str(t) + ".png")
100 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result()
101 | logger.info("[WX] sendImage url={}, receiver={}".format(img_url, receiver))
102 | elif reply.type == ReplyType.IMAGE: # 从文件读取图片
103 | image_storage = reply.content
104 | image_storage.seek(0)
105 | t = int(time.time())
106 | msg = FileBox.from_base64(base64.b64encode(image_storage.read()), str(t) + ".png")
107 | asyncio.run_coroutine_threadsafe(receiver.say(msg), loop).result()
108 | logger.info("[WX] sendImage, receiver={}".format(receiver))
109 |
110 | async def on_message(self, msg: Message):
111 | """
112 | listen for message event
113 | """
114 | try:
115 | cmsg = await WechatyMessage(msg)
116 | except NotImplementedError as e:
117 | logger.debug("[WX] {}".format(e))
118 | return
119 | except Exception as e:
120 | logger.exception("[WX] {}".format(e))
121 | return
122 | logger.debug("[WX] message:{}".format(cmsg))
123 | room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None
124 | isgroup = room is not None
125 | ctype = cmsg.ctype
126 | context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg)
127 | if context:
128 | logger.info("[WX] receiveMsg={}, context={}".format(cmsg, context))
129 | self.produce(context)
130 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_bot.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import time
4 |
5 | import openai
6 | import openai.error
7 |
8 | from bot.bot import Bot
9 | from bot.openai.open_ai_image import OpenAIImage
10 | from bot.openai.open_ai_session import OpenAISession
11 | from bot.session_manager import SessionManager
12 | from bridge.context import ContextType
13 | from bridge.reply import Reply, ReplyType
14 | from common.log import logger
15 | from config import conf
16 |
17 | user_session = dict()
18 |
19 |
20 | # OpenAI对话模型API (可用)
21 | class OpenAIBot(Bot, OpenAIImage):
22 | def __init__(self):
23 | super().__init__()
24 | openai.api_key = conf().get("open_ai_api_key")
25 | if conf().get("open_ai_api_base"):
26 | openai.api_base = conf().get("open_ai_api_base")
27 | proxy = conf().get("proxy")
28 | if proxy:
29 | openai.proxy = proxy
30 |
31 | self.sessions = SessionManager(OpenAISession, model=conf().get("model") or "text-davinci-003")
32 | self.args = {
33 | "model": conf().get("model") or "text-davinci-003", # 对话模型的名称
34 | "temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
35 | "max_tokens": 1200, # 回复最大的字符数
36 | "top_p": 1,
37 | "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
38 | "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
39 | "request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
40 | "timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
41 | "stop": ["\n\n\n"],
42 | }
43 |
44 | def reply(self, query, context=None):
45 | # acquire reply content
46 | if context and context.type:
47 | if context.type == ContextType.TEXT:
48 | logger.info("[OPEN_AI] query={}".format(query))
49 | session_id = context["session_id"]
50 | reply = None
51 | if query == "#清除记忆":
52 | self.sessions.clear_session(session_id)
53 | reply = Reply(ReplyType.INFO, "记忆已清除")
54 | elif query == "#清除所有":
55 | self.sessions.clear_all_session()
56 | reply = Reply(ReplyType.INFO, "所有人记忆已清除")
57 | else:
58 | session = self.sessions.session_query(query, session_id)
59 | result = self.reply_text(session)
60 | total_tokens, completion_tokens, reply_content = (
61 | result["total_tokens"],
62 | result["completion_tokens"],
63 | result["content"],
64 | )
65 | logger.debug(
66 | "[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens)
67 | )
68 |
69 | if total_tokens == 0:
70 | reply = Reply(ReplyType.ERROR, reply_content)
71 | else:
72 | self.sessions.session_reply(reply_content, session_id, total_tokens)
73 | reply = Reply(ReplyType.TEXT, reply_content)
74 | return reply
75 | elif context.type == ContextType.IMAGE_CREATE:
76 | ok, retstring = self.create_img(query, 0)
77 | reply = None
78 | if ok:
79 | reply = Reply(ReplyType.IMAGE_URL, retstring)
80 | else:
81 | reply = Reply(ReplyType.ERROR, retstring)
82 | return reply
83 |
84 | def reply_text(self, session: OpenAISession, retry_count=0):
85 | try:
86 | response = openai.Completion.create(prompt=str(session), **self.args)
87 | res_content = response.choices[0]["text"].strip().replace("<|endoftext|>", "")
88 | total_tokens = response["usage"]["total_tokens"]
89 | completion_tokens = response["usage"]["completion_tokens"]
90 | logger.info("[OPEN_AI] reply={}".format(res_content))
91 | return {
92 | "total_tokens": total_tokens,
93 | "completion_tokens": completion_tokens,
94 | "content": res_content,
95 | }
96 | except Exception as e:
97 | need_retry = retry_count < 2
98 | result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
99 | if isinstance(e, openai.error.RateLimitError):
100 | logger.warn("[OPEN_AI] RateLimitError: {}".format(e))
101 | result["content"] = "提问太快啦,请休息一下再问我吧"
102 | if need_retry:
103 | time.sleep(20)
104 | elif isinstance(e, openai.error.Timeout):
105 | logger.warn("[OPEN_AI] Timeout: {}".format(e))
106 | result["content"] = "我没有收到你的消息"
107 | if need_retry:
108 | time.sleep(5)
109 | elif isinstance(e, openai.error.APIConnectionError):
110 | logger.warn("[OPEN_AI] APIConnectionError: {}".format(e))
111 | need_retry = False
112 | result["content"] = "我连接不到你的网络"
113 | else:
114 | logger.warn("[OPEN_AI] Exception: {}".format(e))
115 | need_retry = False
116 | self.sessions.clear_session(session.session_id)
117 |
118 | if need_retry:
119 | logger.warn("[OPEN_AI] 第{}次重试".format(retry_count + 1))
120 | return self.reply_text(session, retry_count + 1)
121 | else:
122 | return result
123 |
--------------------------------------------------------------------------------
/lib/itchat/utils.py:
--------------------------------------------------------------------------------
1 | import re, os, sys, subprocess, copy, traceback, logging
2 |
3 | try:
4 | from HTMLParser import HTMLParser
5 | except ImportError:
6 | from html.parser import HTMLParser
7 | try:
8 | from urllib import quote as _quote
9 | quote = lambda n: _quote(n.encode('utf8', 'replace'))
10 | except ImportError:
11 | from urllib.parse import quote
12 |
13 | import requests
14 |
15 | from . import config
16 |
17 | logger = logging.getLogger('itchat')
18 |
19 | emojiRegex = re.compile(r'')
20 | htmlParser = HTMLParser()
21 | if not hasattr(htmlParser, 'unescape'):
22 | import html
23 | htmlParser.unescape = html.unescape
24 | # FIX Python 3.9 HTMLParser.unescape is removed. See https://docs.python.org/3.9/whatsnew/3.9.html
25 | try:
26 | b = u'\u2588'
27 | sys.stdout.write(b + '\r')
28 | sys.stdout.flush()
29 | except UnicodeEncodeError:
30 | BLOCK = 'MM'
31 | else:
32 | BLOCK = b
33 | friendInfoTemplate = {}
34 | for k in ('UserName', 'City', 'DisplayName', 'PYQuanPin', 'RemarkPYInitial', 'Province',
35 | 'KeyWord', 'RemarkName', 'PYInitial', 'EncryChatRoomId', 'Alias', 'Signature',
36 | 'NickName', 'RemarkPYQuanPin', 'HeadImgUrl'):
37 | friendInfoTemplate[k] = ''
38 | for k in ('UniFriend', 'Sex', 'AppAccountFlag', 'VerifyFlag', 'ChatRoomId', 'HideInputBarFlag',
39 | 'AttrStatus', 'SnsFlag', 'MemberCount', 'OwnerUin', 'ContactFlag', 'Uin',
40 | 'StarFriend', 'Statues'):
41 | friendInfoTemplate[k] = 0
42 | friendInfoTemplate['MemberList'] = []
43 |
44 | def clear_screen():
45 | os.system('cls' if config.OS == 'Windows' else 'clear')
46 |
47 | def emoji_formatter(d, k):
48 | ''' _emoji_deebugger is for bugs about emoji match caused by wechat backstage
49 | like :face with tears of joy: will be replaced with :cat face with tears of joy:
50 | '''
51 | def _emoji_debugger(d, k):
52 | s = d[k].replace('') # fix missing bug
54 | def __fix_miss_match(m):
55 | return '' % ({
56 | '1f63c': '1f601', '1f639': '1f602', '1f63a': '1f603',
57 | '1f4ab': '1f616', '1f64d': '1f614', '1f63b': '1f60d',
58 | '1f63d': '1f618', '1f64e': '1f621', '1f63f': '1f622',
59 | }.get(m.group(1), m.group(1)))
60 | return emojiRegex.sub(__fix_miss_match, s)
61 | def _emoji_formatter(m):
62 | s = m.group(1)
63 | if len(s) == 6:
64 | return ('\\U%s\\U%s'%(s[:2].rjust(8, '0'), s[2:].rjust(8, '0'))
65 | ).encode('utf8').decode('unicode-escape', 'replace')
66 | elif len(s) == 10:
67 | return ('\\U%s\\U%s'%(s[:5].rjust(8, '0'), s[5:].rjust(8, '0'))
68 | ).encode('utf8').decode('unicode-escape', 'replace')
69 | else:
70 | return ('\\U%s'%m.group(1).rjust(8, '0')
71 | ).encode('utf8').decode('unicode-escape', 'replace')
72 | d[k] = _emoji_debugger(d, k)
73 | d[k] = emojiRegex.sub(_emoji_formatter, d[k])
74 |
75 | def msg_formatter(d, k):
76 | emoji_formatter(d, k)
77 | d[k] = d[k].replace('
', '\n')
78 | d[k] = htmlParser.unescape(d[k])
79 |
80 | def check_file(fileDir):
81 | try:
82 | with open(fileDir):
83 | pass
84 | return True
85 | except:
86 | return False
87 |
88 | def print_qr(fileDir):
89 | if config.OS == 'Darwin':
90 | subprocess.call(['open', fileDir])
91 | elif config.OS == 'Linux':
92 | subprocess.call(['xdg-open', fileDir])
93 | else:
94 | os.startfile(fileDir)
95 |
96 | def print_cmd_qr(qrText, white=BLOCK, black=' ', enableCmdQR=True):
97 | blockCount = int(enableCmdQR)
98 | if abs(blockCount) == 0:
99 | blockCount = 1
100 | white *= abs(blockCount)
101 | if blockCount < 0:
102 | white, black = black, white
103 | sys.stdout.write(' '*50 + '\r')
104 | sys.stdout.flush()
105 | qr = qrText.replace('0', white).replace('1', black)
106 | sys.stdout.write(qr)
107 | sys.stdout.flush()
108 |
109 | def struct_friend_info(knownInfo):
110 | member = copy.deepcopy(friendInfoTemplate)
111 | for k, v in copy.deepcopy(knownInfo).items(): member[k] = v
112 | return member
113 |
114 | def search_dict_list(l, key, value):
115 | ''' Search a list of dict
116 | * return dict with specific value & key '''
117 | for i in l:
118 | if i.get(key) == value:
119 | return i
120 |
121 | def print_line(msg, oneLine = False):
122 | if oneLine:
123 | sys.stdout.write(' '*40 + '\r')
124 | sys.stdout.flush()
125 | else:
126 | sys.stdout.write('\n')
127 | sys.stdout.write(msg.encode(sys.stdin.encoding or 'utf8', 'replace'
128 | ).decode(sys.stdin.encoding or 'utf8', 'replace'))
129 | sys.stdout.flush()
130 |
131 | def test_connect(retryTime=5):
132 | for i in range(retryTime):
133 | try:
134 | r = requests.get(config.BASE_URL)
135 | return True
136 | except:
137 | if i == retryTime - 1:
138 | logger.error(traceback.format_exc())
139 | return False
140 |
141 | def contact_deep_copy(core, contact):
142 | with core.storageClass.updateLock:
143 | return copy.deepcopy(contact)
144 |
145 | def get_image_postfix(data):
146 | data = data[:20]
147 | if b'GIF' in data:
148 | return 'gif'
149 | elif b'PNG' in data:
150 | return 'png'
151 | elif b'JFIF' in data:
152 | return 'jpg'
153 | return ''
154 |
155 | def update_info_dict(oldInfoDict, newInfoDict):
156 | ''' only normal values will be updated here
157 | because newInfoDict is normal dict, so it's not necessary to consider templates
158 | '''
159 | for k, v in newInfoDict.items():
160 | if any((isinstance(v, t) for t in (tuple, list, dict))):
161 | pass # these values will be updated somewhere else
162 | elif oldInfoDict.get(k) is None or v not in (None, '', '0', 0):
163 | oldInfoDict[k] = v
--------------------------------------------------------------------------------