├── .flake8
├── .github
├── ISSUE_TEMPLATE
│ ├── 1.bug.yml
│ └── 2.feature.yml
└── workflows
│ ├── deploy-image-arm.yml
│ └── deploy-image.yml
├── .gitignore
├── .pre-commit-config.yaml
├── Dockerfile
├── LICENSE
├── README.md
├── app.py
├── appui.py
├── bot
├── ali
│ ├── ali_qwen_bot.py
│ └── ali_qwen_session.py
├── baidu
│ ├── baidu_unit_bot.py
│ ├── baidu_wenxin.py
│ └── baidu_wenxin_session.py
├── bot.py
├── bot_factory.py
├── bytedance
│ └── bytedance_coze_bot.py
├── chatgpt
│ ├── chat_gpt_bot.py
│ └── chat_gpt_session.py
├── claude
│ ├── claude_ai_bot.py
│ └── claude_ai_session.py
├── claudeapi
│ └── claude_api_bot.py
├── dashscope
│ ├── dashscope_bot.py
│ └── dashscope_session.py
├── dify
│ ├── dify_bot.py
│ └── dify_session.py
├── gemini
│ └── google_gemini_bot.py
├── linkai
│ └── link_ai_bot.py
├── minimax
│ ├── minimax_bot.py
│ └── minimax_session.py
├── moonshot
│ ├── moonshot_bot.py
│ └── moonshot_session.py
├── openai
│ ├── open_ai_bot.py
│ ├── open_ai_image.py
│ ├── open_ai_session.py
│ └── open_ai_vision.py
├── session_manager.py
├── xunfei
│ └── xunfei_spark_bot.py
└── zhipuai
│ ├── zhipu_ai_image.py
│ ├── zhipu_ai_session.py
│ └── zhipuai_bot.py
├── bridge
├── bridge.py
├── context.py
└── reply.py
├── channel
├── channel.py
├── channel_factory.py
├── chat_channel.py
├── chat_message.py
├── dingtalk
│ ├── dingtalk_channel.py
│ └── dingtalk_message.py
├── feishu
│ ├── feishu_channel.py
│ └── feishu_message.py
├── terminal
│ └── terminal_channel.py
├── wechat
│ ├── wechat_channel.py
│ ├── wechat_message.py
│ ├── wechaty_channel.py
│ └── wechaty_message.py
├── wechatcom
│ ├── README.md
│ ├── wechatcomapp_channel.py
│ ├── wechatcomapp_client.py
│ └── wechatcomapp_message.py
├── wechatcs
│ ├── README.md
│ ├── wechatcomservice_channel.py
│ ├── wechatcomservice_client.py
│ └── wechatcomservice_message.py
├── wechatmp
│ ├── README.md
│ ├── active_reply.py
│ ├── common.py
│ ├── passive_reply.py
│ ├── wechatmp_channel.py
│ ├── wechatmp_client.py
│ └── wechatmp_message.py
└── wework
│ ├── run.py
│ ├── wework_channel.py
│ └── wework_message.py
├── common
├── const.py
├── dequeue.py
├── expired_dict.py
├── linkai_client.py
├── log.py
├── memory.py
├── package_manager.py
├── singleton.py
├── sorted_dict.py
├── time_check.py
├── tmp_dir.py
├── token_bucket.py
└── utils.py
├── config-template.json
├── config.py
├── docker
├── Dockerfile.latest
├── build.latest.sh
├── docker-compose.yml
└── entrypoint.sh
├── docs
├── audios
│ └── chengdu-disney.mp3
├── images
│ ├── image1.jpg
│ ├── image2.jpg
│ ├── image4.jpg
│ ├── image5.jpg
│ ├── image6.jpg
│ ├── plugin-suno-1.jpg
│ ├── plugin-suno-2.jpg
│ ├── supportme.jpg
│ ├── wechat.jpg
│ └── wework.jpg
└── version
│ └── old-version.md
├── dsl
└── chat-workflow.yml
├── lib
├── dify
│ └── dify_client.py
└── itchat
│ ├── LICENSE
│ ├── __init__.py
│ ├── async_components
│ ├── __init__.py
│ ├── contact.py
│ ├── hotreload.py
│ ├── login.py
│ ├── messages.py
│ └── register.py
│ ├── components
│ ├── __init__.py
│ ├── contact.py
│ ├── hotreload.py
│ ├── login.py
│ ├── messages.py
│ └── register.py
│ ├── config.py
│ ├── content.py
│ ├── core.py
│ ├── log.py
│ ├── returnvalues.py
│ ├── storage
│ ├── __init__.py
│ ├── messagequeue.py
│ └── templates.py
│ └── utils.py
├── nixpacks.toml
├── plugins
├── README.md
├── __init__.py
├── banwords
│ ├── .gitignore
│ ├── README.md
│ ├── __init__.py
│ ├── banwords.py
│ ├── banwords.txt.template
│ ├── config.json.template
│ └── lib
│ │ └── WordsSearch.py
├── bdunit
│ ├── README.md
│ ├── __init__.py
│ ├── bdunit.py
│ └── config.json.template
├── config.json.template
├── dungeon
│ ├── README.md
│ ├── __init__.py
│ └── dungeon.py
├── event.py
├── finish
│ ├── __init__.py
│ └── finish.py
├── godcmd
│ ├── README.md
│ ├── __init__.py
│ ├── config.json.template
│ └── godcmd.py
├── hello
│ ├── README.md
│ ├── __init__.py
│ ├── config.json.template
│ └── hello.py
├── jina_sum
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── config.json.template
│ ├── docs
│ │ └── images
│ │ │ ├── csdn.jpg
│ │ │ ├── red.jpg
│ │ │ └── wechat_mp.jpg
│ └── jina_sum.py
├── keyword
│ ├── README.md
│ ├── __init__.py
│ ├── config.json.template
│ ├── keyword.py
│ ├── test-keyword-more_replies.png
│ └── test-keyword.png
├── linkai
│ ├── README.md
│ ├── __init__.py
│ ├── config.json.template
│ ├── linkai.py
│ ├── midjourney.py
│ ├── summary.py
│ └── utils.py
├── plugin.py
├── plugin_manager.py
├── role
│ ├── README.md
│ ├── __init__.py
│ ├── role.py
│ └── roles.json
├── source.json
└── tool
│ ├── README.md
│ ├── __init__.py
│ ├── config.json.template
│ └── tool.py
├── pyproject.toml
├── requirements-optional.txt
├── requirements.txt
├── res
├── 111.png
├── 222.png
├── 333.png
├── 5555.png
└── qr-白话AGI-视频号-二维码.JPG
├── scripts
├── shutdown.sh
├── start.sh
└── tout.sh
├── start.sh
├── stop.sh
├── tail_log.sh
├── translate
├── baidu
│ └── baidu_translate.py
├── factory.py
└── translator.py
├── voice
├── ali
│ ├── ali_api.py
│ ├── ali_voice.py
│ └── config.json.template
├── audio_convert.py
├── azure
│ ├── azure_voice.py
│ └── config.json.template
├── baidu
│ ├── README.md
│ ├── baidu_voice.py
│ └── config.json.template
├── edge
│ └── edge_voice.py
├── elevent
│ └── elevent_voice.py
├── factory.py
├── google
│ └── google_voice.py
├── linkai
│ └── linkai_voice.py
├── openai
│ └── openai_voice.py
├── pytts
│ └── pytts_voice.py
├── voice.py
└── xunfei
│ ├── config.json.template
│ ├── xunfei_asr.py
│ ├── xunfei_tts.py
│ └── xunfei_voice.py
└── web_ui.py
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 176
3 | select = E303,W293,W291,W292,E305,E231,E302
4 | exclude =
5 | .tox,
6 | __pycache__,
7 | *.pyc,
8 | .env
9 | venv/*
10 | .venv/*
11 | reports/*
12 | dist/*
13 | lib/*
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/2.feature.yml:
--------------------------------------------------------------------------------
1 | name: Feature request 🚀
2 | description: 提出你对项目的新想法或建议。
3 | labels: ['status: needs check']
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | 请在上方的`title`中填写简略总结,谢谢❤️。
9 | - type: checkboxes
10 | attributes:
11 | label: ⚠️ 搜索是否存在类似issue
12 | description: >
13 | 请在 [历史issue](https://github.com/zhayujie/chatgpt-on-wechat/issues) 中清空输入框,搜索关键词查找是否存在相似issue。
14 | options:
15 | - label: 我已经搜索过issues和disscussions,没有发现相似issue
16 | required: true
17 | - type: textarea
18 | attributes:
19 | label: 总结
20 | description: 描述feature的功能。
21 | - type: textarea
22 | attributes:
23 | label: 举例
24 | description: 提供聊天示例,草图或相关网址。
25 | - type: textarea
26 | attributes:
27 | label: 动机
28 | description: 描述你提出该feature的动机,比如没有这项feature对你的使用造成了怎样的影响。 请提供更详细的场景描述,这可能会帮助我们发现并提出更好的解决方案。
--------------------------------------------------------------------------------
/.github/workflows/deploy-image-arm.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # GitHub recommends pinning actions to a commit SHA.
7 | # To get a newer version, you will need to update the SHA.
8 | # You can also reference a tag or branch, but the action may change without warning.
9 |
10 | name: Create and publish a Docker image
11 |
12 | on:
13 | push:
14 | branches: ['master']
15 | create:
16 | env:
17 | REGISTRY: ghcr.io
18 | IMAGE_NAME: ${{ github.repository }}
19 |
20 | jobs:
21 | build-and-push-image:
22 | if: github.repository == 'zhayujie/chatgpt-on-wechat'
23 | runs-on: ubuntu-latest
24 | permissions:
25 | contents: read
26 | packages: write
27 |
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v3
31 |
32 | - name: Set up QEMU
33 | uses: docker/setup-qemu-action@v1
34 |
35 | - name: Set up Docker Buildx
36 | id: buildx
37 | uses: docker/setup-buildx-action@v1
38 |
39 | - name: Available platforms
40 | run: echo ${{ steps.buildx.outputs.platforms }}
41 |
42 | - name: Log in to the Container registry
43 | uses: docker/login-action@v2
44 | with:
45 | registry: ${{ env.REGISTRY }}
46 | username: ${{ github.actor }}
47 | password: ${{ secrets.GITHUB_TOKEN }}
48 |
49 | - name: Extract metadata (tags, labels) for Docker
50 | id: meta
51 | uses: docker/metadata-action@v4
52 | with:
53 | images: |
54 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
55 |
56 | - name: Build and push Docker image
57 | uses: docker/build-push-action@v3
58 | with:
59 | context: .
60 | push: true
61 | file: ./docker/Dockerfile.latest
62 | platforms: linux/arm64
63 | tags: ${{ steps.meta.outputs.tags }}-arm64
64 | labels: ${{ steps.meta.outputs.labels }}
65 |
66 | - uses: actions/delete-package-versions@v4
67 | with:
68 | package-name: 'chatgpt-on-wechat'
69 | package-type: 'container'
70 | min-versions-to-keep: 10
71 | delete-only-untagged-versions: 'true'
72 | token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/deploy-image.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 |
6 | # GitHub recommends pinning actions to a commit SHA.
7 | # To get a newer version, you will need to update the SHA.
8 | # You can also reference a tag or branch, but the action may change without warning.
9 |
10 | name: Create and publish a Docker image
11 |
12 | on:
13 | push:
14 | branches: ['master']
15 | create:
16 | env:
17 | REGISTRY: ghcr.io
18 | IMAGE_NAME: ${{ github.repository }}
19 |
20 | jobs:
21 | build-and-push-image:
22 | if: github.repository == 'zhayujie/chatgpt-on-wechat'
23 | runs-on: ubuntu-latest
24 | permissions:
25 | contents: read
26 | packages: write
27 |
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v3
31 |
32 | - name: Login to Docker Hub
33 | uses: docker/login-action@v2
34 | with:
35 | username: ${{ secrets.DOCKERHUB_USERNAME }}
36 | password: ${{ secrets.DOCKERHUB_TOKEN }}
37 |
38 | - name: Log in to the Container registry
39 | uses: docker/login-action@v2
40 | with:
41 | registry: ${{ env.REGISTRY }}
42 | username: ${{ github.actor }}
43 | password: ${{ secrets.GITHUB_TOKEN }}
44 |
45 | - name: Extract metadata (tags, labels) for Docker
46 | id: meta
47 | uses: docker/metadata-action@v4
48 | with:
49 | images: |
50 | ${{ env.IMAGE_NAME }}
51 | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
52 |
53 | - name: Build and push Docker image
54 | uses: docker/build-push-action@v3
55 | with:
56 | context: .
57 | push: true
58 | file: ./docker/Dockerfile.latest
59 | tags: ${{ steps.meta.outputs.tags }}
60 | labels: ${{ steps.meta.outputs.labels }}
61 |
62 | - uses: actions/delete-package-versions@v4
63 | with:
64 | package-name: 'chatgpt-on-wechat'
65 | package-type: 'container'
66 | min-versions-to-keep: 10
67 | delete-only-untagged-versions: 'true'
68 | token: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 | .vscode
4 | .venv
5 | .vs
6 | .wechaty/
7 | __pycache__/
8 | venv*
9 | *.pyc
10 | config.json
11 | QR.png
12 | nohup.out
13 | tmp
14 | plugins.json
15 | itchat.pkl
16 | *.log
17 | user_datas.pkl
18 | chatgpt_tool_hub/
19 | plugins/**/
20 | !plugins/bdunit
21 | !plugins/dungeon
22 | !plugins/finish
23 | !plugins/godcmd
24 | !plugins/tool
25 | !plugins/banwords
26 | !plugins/banwords/**/
27 | plugins/banwords/__pycache__
28 | plugins/banwords/lib/__pycache__
29 | !plugins/hello
30 | !plugins/role
31 | !plugins/keyword
32 | !plugins/linkai
33 | !plugins/jina_sum
34 | !plugins/jina_sum/**/
35 | client_config.json
36 | # qrcode
37 | wx_qrcode.png
38 |
39 | # auto coder
40 | .auto-coder
41 | actions
42 | output.txt
43 |
44 | # test bot
45 | test_dify.py
46 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: fix-byte-order-marker
6 | - id: check-case-conflict
7 | - id: check-merge-conflict
8 | - id: debug-statements
9 | - id: pretty-format-json
10 | types: [text]
11 | files: \.json(.template)?$
12 | args: [ --autofix , --no-ensure-ascii, --indent=2, --no-sort-keys]
13 | - id: trailing-whitespace
14 | exclude: '(\/|^)lib\/'
15 | args: [ --markdown-linebreak-ext=md ]
16 | - repo: https://github.com/PyCQA/isort
17 | rev: 5.12.0
18 | hooks:
19 | - id: isort
20 | exclude: '(\/|^)lib\/'
21 | - repo: https://github.com/psf/black
22 | rev: 23.3.0
23 | hooks:
24 | - id: black
25 | exclude: '(\/|^)lib\/'
26 | - repo: https://github.com/PyCQA/flake8
27 | rev: 6.0.0
28 | hooks:
29 | - id: flake8
30 | exclude: '(\/|^)lib\/'
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM hanfangyuan4396/dify-on-wechat:latest
2 |
3 | ENTRYPOINT ["/entrypoint.sh"]
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2022 zhayujie
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | #### ---------------------------------------------------------------------
2 | ✅ 该项目已停止维护,使用微信机器人请关注我的新项目(技术更合规,使用更简单) ✅ :https://github.com/ImGoodBai/GoodWeBot
3 | #### ---------------------------------------------------------------------
4 |
5 |
6 | # 简介
7 |
8 | > 基于 https://github.com/hanfangyuan4396/dify-on-wechat 二次打包,想要获得原始的体验请访问原项目。。
9 | ----------------------------------------
10 | **(最近官方暴力风控,使用一定注意防封号)**
11 | ----------------------------------------
12 |
13 | 增加功能如下:
14 |
15 | - ✅ **coze API支持:** 主要增加了图形化界面方便配置coze botid和token;
16 | - ✅ **一键运行:** 已支持一键打包为exe文件,下载后可直接使用,无需再配置python运行环境;
17 | - ✅ **只支持windows:** 只在windows11上测试过没问题。
18 |
19 | # 四步开启coze+微信机器人聊天
20 | ### 第一步:
21 | 进入release或者直接点下面地址下载一键包:
22 | https://github.com/ImGoodBai/onewebot2/releases/download/02/onewebot2.zip
23 | ### 第二步:
24 | 解压zip包后,双击运行app-ui.exe
25 |
26 |
27 |
28 | ### 第三步:
29 | 在输入框中填入coze平台的botid和token后,点击运行;(需提前在coze.cn注册并配置后bot获取id和token)
30 |
31 |
32 |
33 | ### 第四步:
34 | 用微信(有封号风险,建议小号测试)扫描登陆,此时手机微信顶部会显示 *Web微信已登录* 字样
35 |
36 |
37 |
38 | ### 现在
39 | 如果看到类似下面信息,说明配置完成,开始微信对话体验AI聊天吧。
40 |
41 |
42 |
43 | ## 声明
44 |
45 | 1. 本项目遵循 [MIT开源协议](/LICENSE),仅用于技术研究和学习,使用本项目时需遵守所在地法律法规、相关政策以及企业章程,禁止用于任何违法或侵犯他人权益的行为
46 | 2. 境内使用该项目时,请使用国内厂商的大模型服务,并进行必要的内容安全审核及过滤
47 | 3. 本项目主要接入协同办公平台,推荐使用公众号、企微自建应用、钉钉、飞书等接入通道,其他通道为历史产物已不维护
48 | 4. 任何个人、团队和企业,无论以何种方式使用该项目、对何对象提供服务,所产生的一切后果,本项目均不承担任何责任
49 |
50 | ## 关注我了解更多搞钱项目
51 |
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import os
4 | import signal
5 | import sys
6 | import time
7 |
8 | from channel import channel_factory
9 | from common import const
10 | from config import load_config
11 | from plugins import *
12 | import threading
13 |
14 |
15 | def sigterm_handler_wrap(_signo):
16 | old_handler = signal.getsignal(_signo)
17 |
18 | def func(_signo, _stack_frame):
19 | logger.info("signal {} received, exiting...".format(_signo))
20 | conf().save_user_datas()
21 | if callable(old_handler): # check old_handler
22 | return old_handler(_signo, _stack_frame)
23 | sys.exit(0)
24 |
25 | signal.signal(_signo, func)
26 |
27 |
28 | def start_channel(channel_name: str):
29 | channel = channel_factory.create_channel(channel_name)
30 | if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework",
31 | "wechatcom_service", const.FEISHU, const.DINGTALK]:
32 | PluginManager().load_plugins()
33 |
34 | if conf().get("use_linkai"):
35 | try:
36 | from common import linkai_client
37 | threading.Thread(target=linkai_client.start, args=(channel,)).start()
38 | except Exception as e:
39 | pass
40 | channel.startup()
41 |
42 |
43 | def run():
44 | try:
45 | # load config
46 | load_config()
47 | # ctrl + c
48 | sigterm_handler_wrap(signal.SIGINT)
49 | # kill signal
50 | sigterm_handler_wrap(signal.SIGTERM)
51 |
52 | # create channel
53 | channel_name = conf().get("channel_type", "wx")
54 |
55 | if "--cmd" in sys.argv:
56 | channel_name = "terminal"
57 |
58 | if channel_name == "wxy":
59 | os.environ["WECHATY_LOG"] = "warn"
60 |
61 | start_channel(channel_name)
62 |
63 | while True:
64 | time.sleep(1)
65 | except Exception as e:
66 | logger.error("App startup failed!")
67 | logger.exception(e)
68 |
69 |
70 | if __name__ == "__main__":
71 | run()
72 |
--------------------------------------------------------------------------------
/bot/ali/ali_qwen_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g.
6 | [
7 | {"role": "system", "content": "You are a helpful assistant."},
8 | {"role": "user", "content": "Who won the world series in 2020?"},
9 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
10 | {"role": "user", "content": "Where was it played?"}
11 | ]
12 | """
13 |
14 | class AliQwenSession(Session):
15 | def __init__(self, session_id, system_prompt=None, model="qianwen"):
16 | super().__init__(session_id, system_prompt)
17 | self.model = model
18 | self.reset()
19 |
20 | def discard_exceeding(self, max_tokens, cur_tokens=None):
21 | precise = True
22 | try:
23 | cur_tokens = self.calc_tokens()
24 | except Exception as e:
25 | precise = False
26 | if cur_tokens is None:
27 | raise e
28 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
29 | while cur_tokens > max_tokens:
30 | if len(self.messages) > 2:
31 | self.messages.pop(1)
32 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
33 | self.messages.pop(1)
34 | if precise:
35 | cur_tokens = self.calc_tokens()
36 | else:
37 | cur_tokens = cur_tokens - max_tokens
38 | break
39 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
40 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
41 | break
42 | else:
43 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
44 | break
45 | if precise:
46 | cur_tokens = self.calc_tokens()
47 | else:
48 | cur_tokens = cur_tokens - max_tokens
49 | return cur_tokens
50 |
51 | def calc_tokens(self):
52 | return num_tokens_from_messages(self.messages, self.model)
53 |
54 | def num_tokens_from_messages(messages, model):
55 | """Returns the number of tokens used by a list of messages."""
56 | # 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
57 | # 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
58 | # 目前根据字符串长度粗略估计token数,不影响正常使用
59 | tokens = 0
60 | for msg in messages:
61 | tokens += len(msg["content"])
62 | return tokens
63 |
--------------------------------------------------------------------------------
/bot/baidu/baidu_unit_bot.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import requests
4 |
5 | from bot.bot import Bot
6 | from bridge.reply import Reply, ReplyType
7 |
8 |
9 | # Baidu Unit对话接口 (可用, 但能力较弱)
10 | class BaiduUnitBot(Bot):
11 | def reply(self, query, context=None):
12 | token = self.get_token()
13 | url = "https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat?access_token=" + token
14 | post_data = (
15 | '{"version":"3.0","service_id":"S73177","session_id":"","log_id":"7758521","skill_ids":["1221886"],"request":{"terminal_id":"88888","query":"'
16 | + query
17 | + '", "hyper_params": {"chat_custom_bot_profile": 1}}}'
18 | )
19 | print(post_data)
20 | headers = {"content-type": "application/x-www-form-urlencoded"}
21 | response = requests.post(url, data=post_data.encode(), headers=headers)
22 | if response:
23 | reply = Reply(
24 | ReplyType.TEXT,
25 | response.json()["result"]["context"]["SYS_PRESUMED_HIST"][1],
26 | )
27 | return reply
28 |
29 | def get_token(self):
30 | access_key = "YOUR_ACCESS_KEY"
31 | secret_key = "YOUR_SECRET_KEY"
32 | host = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=" + access_key + "&client_secret=" + secret_key
33 | response = requests.get(host)
34 | if response:
35 | print(response.json())
36 | return response.json()["access_token"]
37 |
--------------------------------------------------------------------------------
/bot/baidu/baidu_wenxin_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g. [
6 | {"role": "user", "content": "Who won the world series in 2020?"},
7 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
8 | {"role": "user", "content": "Where was it played?"}
9 | ]
10 | """
11 |
12 |
13 | class BaiduWenxinSession(Session):
14 | def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"):
15 | super().__init__(session_id, system_prompt)
16 | self.model = model
17 | # 百度文心不支持system prompt
18 | # self.reset()
19 |
20 | def discard_exceeding(self, max_tokens, cur_tokens=None):
21 | precise = True
22 | try:
23 | cur_tokens = self.calc_tokens()
24 | except Exception as e:
25 | precise = False
26 | if cur_tokens is None:
27 | raise e
28 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
29 | while cur_tokens > max_tokens:
30 | if len(self.messages) >= 2:
31 | self.messages.pop(0)
32 | self.messages.pop(0)
33 | else:
34 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
35 | break
36 | if precise:
37 | cur_tokens = self.calc_tokens()
38 | else:
39 | cur_tokens = cur_tokens - max_tokens
40 | return cur_tokens
41 |
42 | def calc_tokens(self):
43 | return num_tokens_from_messages(self.messages, self.model)
44 |
45 |
46 | def num_tokens_from_messages(messages, model):
47 | """Returns the number of tokens used by a list of messages."""
48 | tokens = 0
49 | for msg in messages:
50 | # 官方token计算规则暂不明确: "大约为 token数为 "中文字 + 其他语种单词数 x 1.3"
51 | # 这里先直接根据字数粗略估算吧,暂不影响正常使用,仅在判断是否丢弃历史会话的时候会有偏差
52 | tokens += len(msg["content"])
53 | return tokens
54 |
--------------------------------------------------------------------------------
/bot/bot.py:
--------------------------------------------------------------------------------
1 | """
2 | Auto-replay chat robot abstract class
3 | """
4 |
5 |
6 | from bridge.context import Context
7 | from bridge.reply import Reply
8 |
9 |
10 | class Bot(object):
11 | def reply(self, query, context: Context = None) -> Reply:
12 | """
13 | bot auto-reply content
14 | :param req: received message
15 | :return: reply content
16 | """
17 | raise NotImplementedError
18 |
--------------------------------------------------------------------------------
/bot/bot_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | channel factory
3 | """
4 | from common import const
5 |
6 |
7 | def create_bot(bot_type):
8 | """
9 | create a bot_type instance
10 | :param bot_type: bot type code
11 | :return: bot instance
12 | """
13 | if bot_type == const.BAIDU:
14 | # 替换Baidu Unit为Baidu文心千帆对话接口
15 | # from bot.baidu.baidu_unit_bot import BaiduUnitBot
16 | # return BaiduUnitBot()
17 | from bot.baidu.baidu_wenxin import BaiduWenxinBot
18 | return BaiduWenxinBot()
19 |
20 | elif bot_type == const.CHATGPT:
21 | # ChatGPT 网页端web接口
22 | from bot.chatgpt.chat_gpt_bot import ChatGPTBot
23 | return ChatGPTBot()
24 |
25 | elif bot_type == const.OPEN_AI:
26 | # OpenAI 官方对话模型API
27 | from bot.openai.open_ai_bot import OpenAIBot
28 | return OpenAIBot()
29 |
30 | elif bot_type == const.CHATGPTONAZURE:
31 | # Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
32 | from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
33 | return AzureChatGPTBot()
34 |
35 | elif bot_type == const.XUNFEI:
36 | from bot.xunfei.xunfei_spark_bot import XunFeiBot
37 | return XunFeiBot()
38 |
39 | elif bot_type == const.LINKAI:
40 | from bot.linkai.link_ai_bot import LinkAIBot
41 | return LinkAIBot()
42 |
43 | elif bot_type == const.CLAUDEAI:
44 | from bot.claude.claude_ai_bot import ClaudeAIBot
45 | return ClaudeAIBot()
46 | elif bot_type == const.CLAUDEAPI:
47 | from bot.claudeapi.claude_api_bot import ClaudeAPIBot
48 | return ClaudeAPIBot()
49 | elif bot_type == const.QWEN:
50 | from bot.ali.ali_qwen_bot import AliQwenBot
51 | return AliQwenBot()
52 | elif bot_type == const.QWEN_DASHSCOPE:
53 | from bot.dashscope.dashscope_bot import DashscopeBot
54 | return DashscopeBot()
55 | elif bot_type == const.GEMINI:
56 | from bot.gemini.google_gemini_bot import GoogleGeminiBot
57 | return GoogleGeminiBot()
58 |
59 | elif bot_type == const.DIFY:
60 | from bot.dify.dify_bot import DifyBot
61 | return DifyBot()
62 |
63 | elif bot_type == const.ZHIPU_AI:
64 | from bot.zhipuai.zhipuai_bot import ZHIPUAIBot
65 | return ZHIPUAIBot()
66 |
67 | elif bot_type == const.COZE:
68 | from bot.bytedance.bytedance_coze_bot import ByteDanceCozeBot
69 | return ByteDanceCozeBot()
70 |
71 | elif bot_type == const.MOONSHOT:
72 | from bot.moonshot.moonshot_bot import MoonshotBot
73 | return MoonshotBot()
74 |
75 | elif bot_type == const.MiniMax:
76 | from bot.minimax.minimax_bot import MinimaxBot
77 | return MinimaxBot()
78 |
79 |
80 | raise RuntimeError
81 |
--------------------------------------------------------------------------------
/bot/claude/claude_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 |
3 |
4 | class ClaudeAiSession(Session):
5 | def __init__(self, session_id, system_prompt=None, model="claude"):
6 | super().__init__(session_id, system_prompt)
7 | self.model = model
8 | # claude逆向不支持role prompt
9 | # self.reset()
10 |
--------------------------------------------------------------------------------
/bot/dashscope/dashscope_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class DashscopeSession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="qwen-turbo"):
7 | super().__init__(session_id)
8 | self.reset()
9 |
10 | def discard_exceeding(self, max_tokens, cur_tokens=None):
11 | precise = True
12 | try:
13 | cur_tokens = self.calc_tokens()
14 | except Exception as e:
15 | precise = False
16 | if cur_tokens is None:
17 | raise e
18 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
19 | while cur_tokens > max_tokens:
20 | if len(self.messages) > 2:
21 | self.messages.pop(1)
22 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
23 | self.messages.pop(1)
24 | if precise:
25 | cur_tokens = self.calc_tokens()
26 | else:
27 | cur_tokens = cur_tokens - max_tokens
28 | break
29 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
30 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
31 | break
32 | else:
33 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
34 | len(self.messages)))
35 | break
36 | if precise:
37 | cur_tokens = self.calc_tokens()
38 | else:
39 | cur_tokens = cur_tokens - max_tokens
40 | return cur_tokens
41 |
42 | def calc_tokens(self):
43 | return num_tokens_from_messages(self.messages)
44 |
45 |
46 | def num_tokens_from_messages(messages):
47 | # 只是大概,具体计算规则:https://help.aliyun.com/zh/dashscope/developer-reference/token-api?spm=a2c4g.11186623.0.0.4d8b12b0BkP3K9
48 | tokens = 0
49 | for msg in messages:
50 | tokens += len(msg["content"])
51 | return tokens
52 |
--------------------------------------------------------------------------------
/bot/dify/dify_session.py:
--------------------------------------------------------------------------------
1 | from common.expired_dict import ExpiredDict
2 | from config import conf
3 |
4 |
5 | class DifySession(object):
6 | def __init__(self, session_id: str, user: str, conversation_id: str=''):
7 | self.__session_id = session_id
8 | self.__user = user
9 | self.__conversation_id = conversation_id
10 | self.__user_message_counter = 0
11 |
12 | def get_session_id(self):
13 | return self.__session_id
14 |
15 | def get_user(self):
16 | return self.__user
17 |
18 | def get_conversation_id(self):
19 | return self.__conversation_id
20 |
21 | def set_conversation_id(self, conversation_id):
22 | self.__conversation_id = conversation_id
23 |
24 | def count_user_message(self):
25 | if conf().get("dify_conversation_max_messages", 5) <= 0:
26 | # 当设置的最大消息数小于等于0,则不限制
27 | return
28 | if self.__user_message_counter >= conf().get("dify_conversation_max_messages", 5):
29 | self.__user_message_counter = 0
30 | # FIXME: dify目前不支持设置历史消息长度,暂时使用超过5条清空会话的策略,缺点是没有滑动窗口,会突然丢失历史消息
31 | self.__conversation_id = ''
32 |
33 | self.__user_message_counter += 1
34 |
35 | class DifySessionManager(object):
36 | def __init__(self, sessioncls, **session_kwargs):
37 | if conf().get("expires_in_seconds"):
38 | sessions = ExpiredDict(conf().get("expires_in_seconds"))
39 | else:
40 | sessions = dict()
41 | self.sessions = sessions
42 | self.sessioncls = sessioncls
43 | self.session_kwargs = session_kwargs
44 |
45 | def _build_session(self, session_id: str, user: str):
46 | """
47 | 如果session_id不在sessions中,创建一个新的session并添加到sessions中
48 | """
49 | if session_id is None:
50 | return self.sessioncls(session_id, user)
51 |
52 | if session_id not in self.sessions:
53 | self.sessions[session_id] = self.sessioncls(session_id, user)
54 | session = self.sessions[session_id]
55 | return session
56 |
57 | def get_session(self, session_id, user):
58 | session = self._build_session(session_id, user)
59 | return session
60 |
61 | def clear_session(self, session_id):
62 | if session_id in self.sessions:
63 | del self.sessions[session_id]
64 |
65 | def clear_all_session(self):
66 | self.sessions.clear()
67 |
--------------------------------------------------------------------------------
/bot/gemini/google_gemini_bot.py:
--------------------------------------------------------------------------------
1 | """
2 | Google gemini bot
3 |
4 | @author zhayujie
5 | @Date 2023/12/15
6 | """
7 | # encoding:utf-8
8 |
9 | from bot.bot import Bot
10 | import google.generativeai as genai
11 | from bot.session_manager import SessionManager
12 | from bridge.context import ContextType, Context
13 | from bridge.reply import Reply, ReplyType
14 | from common.log import logger
15 | from config import conf
16 | from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
17 |
18 |
19 | # OpenAI对话模型API (可用)
20 | class GoogleGeminiBot(Bot):
21 |
22 | def __init__(self):
23 | super().__init__()
24 | self.api_key = conf().get("gemini_api_key")
25 | # 复用文心的token计算方式
26 | self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "gpt-3.5-turbo")
27 | self.model = conf().get("model") or "gemini-pro"
28 | if self.model == "gemini":
29 | self.model = "gemini-pro"
30 | def reply(self, query, context: Context = None) -> Reply:
31 | try:
32 | if context.type != ContextType.TEXT:
33 | logger.warn(f"[Gemini] Unsupported message type, type={context.type}")
34 | return Reply(ReplyType.TEXT, None)
35 | logger.info(f"[Gemini] query={query}")
36 | session_id = context["session_id"]
37 | session = self.sessions.session_query(query, session_id)
38 | gemini_messages = self._convert_to_gemini_messages(self.filter_messages(session.messages))
39 | genai.configure(api_key=self.api_key)
40 | model = genai.GenerativeModel(self.model)
41 | response = model.generate_content(gemini_messages)
42 | reply_text = response.text
43 | self.sessions.session_reply(reply_text, session_id)
44 | logger.info(f"[Gemini] reply={reply_text}")
45 | return Reply(ReplyType.TEXT, reply_text)
46 | except Exception as e:
47 | logger.error("[Gemini] fetch reply error, may contain unsafe content")
48 | logger.error(e)
49 | return Reply(ReplyType.ERROR, "invoke [Gemini] api failed!")
50 |
51 | def _convert_to_gemini_messages(self, messages: list):
52 | res = []
53 | for msg in messages:
54 | if msg.get("role") == "user":
55 | role = "user"
56 | elif msg.get("role") == "assistant":
57 | role = "model"
58 | else:
59 | continue
60 | res.append({
61 | "role": role,
62 | "parts": [{"text": msg.get("content")}]
63 | })
64 | return res
65 |
66 | @staticmethod
67 | def filter_messages(messages: list):
68 | res = []
69 | turn = "user"
70 | if not messages:
71 | return res
72 | for i in range(len(messages) - 1, -1, -1):
73 | message = messages[i]
74 | if message.get("role") != turn:
75 | continue
76 | res.insert(0, message)
77 | if turn == "user":
78 | turn = "assistant"
79 | elif turn == "assistant":
80 | turn = "user"
81 | return res
82 |
--------------------------------------------------------------------------------
/bot/minimax/minimax_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 | """
5 | e.g.
6 | [
7 | {"role": "system", "content": "You are a helpful assistant."},
8 | {"role": "user", "content": "Who won the world series in 2020?"},
9 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
10 | {"role": "user", "content": "Where was it played?"}
11 | ]
12 | """
13 |
14 |
15 | class MinimaxSession(Session):
16 | def __init__(self, session_id, system_prompt=None, model="minimax"):
17 | super().__init__(session_id, system_prompt)
18 | self.model = model
19 | # self.reset()
20 |
21 | def add_query(self, query):
22 | user_item = {"sender_type": "USER", "sender_name": self.session_id, "text": query}
23 | self.messages.append(user_item)
24 |
25 | def add_reply(self, reply):
26 | assistant_item = {"sender_type": "BOT", "sender_name": "MM智能助理", "text": reply}
27 | self.messages.append(assistant_item)
28 |
29 | def discard_exceeding(self, max_tokens, cur_tokens=None):
30 | precise = True
31 | try:
32 | cur_tokens = self.calc_tokens()
33 | except Exception as e:
34 | precise = False
35 | if cur_tokens is None:
36 | raise e
37 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
38 | while cur_tokens > max_tokens:
39 | if len(self.messages) > 2:
40 | self.messages.pop(1)
41 | elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "BOT":
42 | self.messages.pop(1)
43 | if precise:
44 | cur_tokens = self.calc_tokens()
45 | else:
46 | cur_tokens = cur_tokens - max_tokens
47 | break
48 | elif len(self.messages) == 2 and self.messages[1]["sender_type"] == "USER":
49 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
50 | break
51 | else:
52 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
53 | break
54 | if precise:
55 | cur_tokens = self.calc_tokens()
56 | else:
57 | cur_tokens = cur_tokens - max_tokens
58 | return cur_tokens
59 |
60 | def calc_tokens(self):
61 | return num_tokens_from_messages(self.messages, self.model)
62 |
63 |
64 | def num_tokens_from_messages(messages, model):
65 | """Returns the number of tokens used by a list of messages."""
66 | # 官方token计算规则:"对于中文文本来说,1个token通常对应一个汉字;对于英文文本来说,1个token通常对应3至4个字母或1个单词"
67 | # 详情请产看文档:https://help.aliyun.com/document_detail/2586397.html
68 | # 目前根据字符串长度粗略估计token数,不影响正常使用
69 | tokens = 0
70 | for msg in messages:
71 | tokens += len(msg["text"])
72 | return tokens
73 |
--------------------------------------------------------------------------------
/bot/moonshot/moonshot_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class MoonshotSession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="moonshot-v1-128k"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 |
11 | def discard_exceeding(self, max_tokens, cur_tokens=None):
12 | precise = True
13 | try:
14 | cur_tokens = self.calc_tokens()
15 | except Exception as e:
16 | precise = False
17 | if cur_tokens is None:
18 | raise e
19 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
20 | while cur_tokens > max_tokens:
21 | if len(self.messages) > 2:
22 | self.messages.pop(1)
23 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
24 | self.messages.pop(1)
25 | if precise:
26 | cur_tokens = self.calc_tokens()
27 | else:
28 | cur_tokens = cur_tokens - max_tokens
29 | break
30 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
31 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
32 | break
33 | else:
34 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
35 | len(self.messages)))
36 | break
37 | if precise:
38 | cur_tokens = self.calc_tokens()
39 | else:
40 | cur_tokens = cur_tokens - max_tokens
41 | return cur_tokens
42 |
43 | def calc_tokens(self):
44 | return num_tokens_from_messages(self.messages, self.model)
45 |
46 |
47 | def num_tokens_from_messages(messages, model):
48 | tokens = 0
49 | for msg in messages:
50 | tokens += len(msg["content"])
51 | return tokens
52 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_image.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import openai
4 | import openai.error
5 | from bridge.reply import Reply, ReplyType
6 |
7 | from common.log import logger
8 | from common.token_bucket import TokenBucket
9 | from config import conf
10 |
11 |
12 | # OPENAI提供的画图接口
13 | class OpenAIImage(object):
14 | def __init__(self):
15 | openai.api_base = conf().get("open_ai_api_base")
16 | openai.api_key = conf().get("open_ai_api_key")
17 | if conf().get("rate_limit_dalle"):
18 | self.tb4dalle = TokenBucket(conf().get("rate_limit_dalle", 50))
19 |
20 | def create_img(self, query, retry_count=0, api_key=None, context=None):
21 | """
22 | 参数:
23 | - context: 如果想要发送dalle3的revised_prompt,需要填写此参数
24 | """
25 | try:
26 | if conf().get("rate_limit_dalle") and not self.tb4dalle.get_token():
27 | return False, "请求太快了,请休息一下再问我吧"
28 | logger.info("[OPEN_AI] image_query={}".format(query))
29 | response = openai.Image.create(
30 | api_key=api_key,
31 | prompt=query, # 图片描述
32 | n=1, # 每次生成图片的数量
33 | model=conf().get("text_to_image") or "dall-e-2",
34 | # size=conf().get("image_create_size", "256x256"), # 图片大小,可选有 256x256, 512x512, 1024x1024
35 | )
36 | self.send_revised_prompt(context, response["data"][0].get("revised_prompt", ""), query)
37 | image_url = response["data"][0]["url"]
38 | logger.info("[OPEN_AI] image_url={}".format(image_url))
39 | return True, image_url
40 | except openai.error.RateLimitError as e:
41 | logger.warn(e)
42 | if retry_count < 1:
43 | time.sleep(5)
44 | logger.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count + 1))
45 | return self.create_img(query, retry_count + 1, context=context)
46 | else:
47 | return False, "画图出现问题,请休息一下再问我吧"
48 | except Exception as e:
49 | logger.exception(e)
50 | return False, "画图出现问题,请休息一下再问我吧"
51 |
52 | def send_revised_prompt(self, context, revised_prompt, query):
53 | if not context or not revised_prompt:
54 | return
55 | try:
56 | channel = context.get("channel")
57 | reply = Reply(ReplyType.TEXT, f"revised_prompt:\n{revised_prompt}\n\n- - - - - - - - - - - -\n🎨 Dall-E画图:{query}")
58 | channel.send(reply, context)
59 | except Exception as e:
60 | logger.error(e)
--------------------------------------------------------------------------------
/bot/openai/open_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class OpenAISession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="text-davinci-003"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 |
11 | def __str__(self):
12 | # 构造对话模型的输入
13 | """
14 | e.g. Q: xxx
15 | A: xxx
16 | Q: xxx
17 | """
18 | prompt = ""
19 | for item in self.messages:
20 | if item["role"] == "system":
21 | prompt += item["content"] + "<|endoftext|>\n\n\n"
22 | elif item["role"] == "user":
23 | prompt += "Q: " + item["content"] + "\n"
24 | elif item["role"] == "assistant":
25 | prompt += "\n\nA: " + item["content"] + "<|endoftext|>\n"
26 |
27 | if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
28 | prompt += "A: "
29 | return prompt
30 |
31 | def discard_exceeding(self, max_tokens, cur_tokens=None):
32 | precise = True
33 | try:
34 | cur_tokens = self.calc_tokens()
35 | except Exception as e:
36 | precise = False
37 | if cur_tokens is None:
38 | raise e
39 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
40 | while cur_tokens > max_tokens:
41 | if len(self.messages) > 1:
42 | self.messages.pop(0)
43 | elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant":
44 | self.messages.pop(0)
45 | if precise:
46 | cur_tokens = self.calc_tokens()
47 | else:
48 | cur_tokens = len(str(self))
49 | break
50 | elif len(self.messages) == 1 and self.messages[0]["role"] == "user":
51 | logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens))
52 | break
53 | else:
54 | logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages)))
55 | break
56 | if precise:
57 | cur_tokens = self.calc_tokens()
58 | else:
59 | cur_tokens = len(str(self))
60 | return cur_tokens
61 |
62 | def calc_tokens(self):
63 | return num_tokens_from_string(str(self), self.model)
64 |
65 |
66 | # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
67 | def num_tokens_from_string(string: str, model: str) -> int:
68 | """Returns the number of tokens in a text string."""
69 | import tiktoken
70 |
71 | encoding = tiktoken.encoding_for_model(model)
72 | num_tokens = len(encoding.encode(string, disallowed_special=()))
73 | return num_tokens
74 |
--------------------------------------------------------------------------------
/bot/openai/open_ai_vision.py:
--------------------------------------------------------------------------------
1 | import base64
2 |
3 | import requests
4 |
5 | from common.log import logger
6 | from common import const, utils, memory
7 | from config import conf
8 |
9 | # OPENAI提供的图像识别接口
10 | class OpenAIVision(object):
11 | def do_vision_completion_if_need(self, session_id: str, query: str):
12 | img_cache = memory.USER_IMAGE_CACHE.get(session_id)
13 | if img_cache and conf().get("image_recognition"):
14 | response, err = self.vision_completion(query, img_cache)
15 | if err:
16 | return {"completion_tokens": 0, "content": f"识别图片异常, {err}"}
17 | memory.USER_IMAGE_CACHE[session_id] = None
18 | return {
19 | "total_tokens": response["usage"]["total_tokens"],
20 | "completion_tokens": response["usage"]["completion_tokens"],
21 | "content": response['choices'][0]["message"]["content"],
22 | }
23 | return None
24 |
25 | def vision_completion(self, query: str, img_cache: dict):
26 | msg = img_cache.get("msg")
27 | path = img_cache.get("path")
28 | msg.prepare()
29 | logger.info(f"[CHATGPT] query with images, path={path}")
30 | payload = {
31 | "model": const.GPT4_VISION_PREVIEW,
32 | "messages": self.build_vision_msg(query, path),
33 | "temperature": conf().get("temperature"),
34 | "top_p": conf().get("top_p", 1),
35 | "frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
36 | "presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
37 | }
38 | headers = {"Authorization": "Bearer " + conf().get("open_ai_api_key", "")}
39 | # do http request
40 | base_url = conf().get("open_ai_api_base", "https://api.openai.com/v1")
41 | res = requests.post(url=base_url + "/chat/completions", json=payload, headers=headers,
42 | timeout=conf().get("request_timeout", 180))
43 | if res.status_code == 200:
44 | return res.json(), None
45 | else:
46 | logger.error(f"[CHATGPT] vision completion, status_code={res.status_code}, response={res.text}")
47 | return None, res.text
48 |
49 | def build_vision_msg(self, query: str, path: str):
50 | suffix = utils.get_path_suffix(path)
51 | with open(path, "rb") as file:
52 | base64_str = base64.b64encode(file.read()).decode('utf-8')
53 | messages = [{
54 | "role": "user",
55 | "content": [
56 | {
57 | "type": "text",
58 | "text": query
59 | },
60 | {
61 | "type": "image_url",
62 | "image_url": {
63 | "url": f"data:image/{suffix};base64,{base64_str}"
64 | }
65 | }
66 | ]
67 | }]
68 | return messages
69 |
--------------------------------------------------------------------------------
/bot/session_manager.py:
--------------------------------------------------------------------------------
1 | from common.expired_dict import ExpiredDict
2 | from common.log import logger
3 | from config import conf
4 |
5 |
6 | class Session(object):
7 | def __init__(self, session_id, system_prompt=None):
8 | self.session_id = session_id
9 | self.messages = []
10 | if system_prompt is None:
11 | self.system_prompt = conf().get("character_desc", "")
12 | else:
13 | self.system_prompt = system_prompt
14 |
15 | # 重置会话
16 | def reset(self):
17 | system_item = {"role": "system", "content": self.system_prompt}
18 | self.messages = [system_item]
19 |
20 | def set_system_prompt(self, system_prompt):
21 | self.system_prompt = system_prompt
22 | self.reset()
23 |
24 | def add_query(self, query):
25 | user_item = {"role": "user", "content": query}
26 | self.messages.append(user_item)
27 |
28 | def add_reply(self, reply):
29 | assistant_item = {"role": "assistant", "content": reply}
30 | self.messages.append(assistant_item)
31 |
32 | def discard_exceeding(self, max_tokens=None, cur_tokens=None):
33 | raise NotImplementedError
34 |
35 | def calc_tokens(self):
36 | raise NotImplementedError
37 |
38 |
39 | class SessionManager(object):
40 | def __init__(self, sessioncls, **session_args):
41 | if conf().get("expires_in_seconds"):
42 | sessions = ExpiredDict(conf().get("expires_in_seconds"))
43 | else:
44 | sessions = dict()
45 | self.sessions = sessions
46 | self.sessioncls = sessioncls
47 | self.session_args = session_args
48 |
49 | def build_session(self, session_id, system_prompt=None):
50 | """
51 | 如果session_id不在sessions中,创建一个新的session并添加到sessions中
52 | 如果system_prompt不会空,会更新session的system_prompt并重置session
53 | """
54 | if session_id is None:
55 | return self.sessioncls(session_id, system_prompt, **self.session_args)
56 |
57 | if session_id not in self.sessions:
58 | self.sessions[session_id] = self.sessioncls(session_id, system_prompt, **self.session_args)
59 | elif system_prompt is not None: # 如果有新的system_prompt,更新并重置session
60 | self.sessions[session_id].set_system_prompt(system_prompt)
61 | session = self.sessions[session_id]
62 | return session
63 |
64 | def session_query(self, query, session_id):
65 | session = self.build_session(session_id)
66 | session.add_query(query)
67 | try:
68 | max_tokens = conf().get("conversation_max_tokens", 1000)
69 | total_tokens = session.discard_exceeding(max_tokens, None)
70 | logger.debug("prompt tokens used={}".format(total_tokens))
71 | except Exception as e:
72 | logger.warning("Exception when counting tokens precisely for prompt: {}".format(str(e)))
73 | return session
74 |
75 | def session_reply(self, reply, session_id, total_tokens=None):
76 | session = self.build_session(session_id)
77 | session.add_reply(reply)
78 | try:
79 | max_tokens = conf().get("conversation_max_tokens", 1000)
80 | tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
81 | logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt))
82 | except Exception as e:
83 | logger.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
84 | return session
85 |
86 | def clear_session(self, session_id):
87 | if session_id in self.sessions:
88 | del self.sessions[session_id]
89 |
90 | def clear_all_session(self):
91 | self.sessions.clear()
92 |
--------------------------------------------------------------------------------
/bot/zhipuai/zhipu_ai_image.py:
--------------------------------------------------------------------------------
1 | from common.log import logger
2 | from config import conf
3 |
4 |
5 | # ZhipuAI提供的画图接口
6 |
7 | class ZhipuAIImage(object):
8 | def __init__(self):
9 | from zhipuai import ZhipuAI
10 | self.client = ZhipuAI(api_key=conf().get("zhipu_ai_api_key"))
11 |
12 | def create_img(self, query, retry_count=0, api_key=None, api_base=None):
13 | try:
14 | if conf().get("rate_limit_dalle"):
15 | return False, "请求太快了,请休息一下再问我吧"
16 | logger.info("[ZHIPU_AI] image_query={}".format(query))
17 | response = self.client.images.generations(
18 | prompt=query,
19 | n=1, # 每次生成图片的数量
20 | model=conf().get("text_to_image") or "cogview-3",
21 | size=conf().get("image_create_size", "1024x1024"), # 图片大小,可选有 256x256, 512x512, 1024x1024
22 | quality="standard",
23 | )
24 | image_url = response.data[0].url
25 | logger.info("[ZHIPU_AI] image_url={}".format(image_url))
26 | return True, image_url
27 | except Exception as e:
28 | logger.exception(e)
29 | return False, "画图出现问题,请休息一下再问我吧"
30 |
--------------------------------------------------------------------------------
/bot/zhipuai/zhipu_ai_session.py:
--------------------------------------------------------------------------------
1 | from bot.session_manager import Session
2 | from common.log import logger
3 |
4 |
5 | class ZhipuAISession(Session):
6 | def __init__(self, session_id, system_prompt=None, model="glm-4"):
7 | super().__init__(session_id, system_prompt)
8 | self.model = model
9 | self.reset()
10 | if not system_prompt:
11 | logger.warn("[ZhiPu] `character_desc` can not be empty")
12 |
13 | def discard_exceeding(self, max_tokens, cur_tokens=None):
14 | precise = True
15 | try:
16 | cur_tokens = self.calc_tokens()
17 | except Exception as e:
18 | precise = False
19 | if cur_tokens is None:
20 | raise e
21 | logger.debug("Exception when counting tokens precisely for query: {}".format(e))
22 | while cur_tokens > max_tokens:
23 | if len(self.messages) > 2:
24 | self.messages.pop(1)
25 | elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
26 | self.messages.pop(1)
27 | if precise:
28 | cur_tokens = self.calc_tokens()
29 | else:
30 | cur_tokens = cur_tokens - max_tokens
31 | break
32 | elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
33 | logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
34 | break
35 | else:
36 | logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
37 | len(self.messages)))
38 | break
39 | if precise:
40 | cur_tokens = self.calc_tokens()
41 | else:
42 | cur_tokens = cur_tokens - max_tokens
43 | return cur_tokens
44 |
45 | def calc_tokens(self):
46 | return num_tokens_from_messages(self.messages, self.model)
47 |
48 |
49 | def num_tokens_from_messages(messages, model):
50 | tokens = 0
51 | for msg in messages:
52 | tokens += len(msg["content"])
53 | return tokens
54 |
--------------------------------------------------------------------------------
/bridge/context.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class ContextType(Enum):
7 | TEXT = 1 # 文本消息
8 | VOICE = 2 # 音频消息
9 | IMAGE = 3 # 图片消息
10 | FILE = 4 # 文件信息
11 | VIDEO = 5 # 视频信息
12 | SHARING = 6 # 分享信息
13 |
14 | IMAGE_CREATE = 10 # 创建图片命令
15 | ACCEPT_FRIEND = 19 # 同意好友请求
16 | JOIN_GROUP = 20 # 加入群聊
17 | PATPAT = 21 # 拍了拍
18 | FUNCTION = 22 # 函数调用
19 | EXIT_GROUP = 23 #退出
20 |
21 |
22 | def __str__(self):
23 | return self.name
24 |
25 |
26 | class Context:
27 | def __init__(self, type: ContextType = None, content=None, kwargs=dict()):
28 | self.type = type
29 | self.content = content
30 | self.kwargs = kwargs
31 |
32 | def __contains__(self, key):
33 | if key == "type":
34 | return self.type is not None
35 | elif key == "content":
36 | return self.content is not None
37 | else:
38 | return key in self.kwargs
39 |
40 | def __getitem__(self, key):
41 | if key == "type":
42 | return self.type
43 | elif key == "content":
44 | return self.content
45 | else:
46 | return self.kwargs[key]
47 |
48 | def get(self, key, default=None):
49 | try:
50 | return self[key]
51 | except KeyError:
52 | return default
53 |
54 | def __setitem__(self, key, value):
55 | if key == "type":
56 | self.type = value
57 | elif key == "content":
58 | self.content = value
59 | else:
60 | self.kwargs[key] = value
61 |
62 | def __delitem__(self, key):
63 | if key == "type":
64 | self.type = None
65 | elif key == "content":
66 | self.content = None
67 | else:
68 | del self.kwargs[key]
69 |
70 | def __str__(self):
71 | return "Context(type={}, content={}, kwargs={})".format(self.type, self.content, self.kwargs)
72 |
--------------------------------------------------------------------------------
/bridge/reply.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class ReplyType(Enum):
7 | TEXT = 1 # 文本
8 | VOICE = 2 # 音频文件
9 | IMAGE = 3 # 图片文件
10 | IMAGE_URL = 4 # 图片URL
11 | VIDEO_URL = 5 # 视频URL
12 | FILE = 6 # 文件
13 | CARD = 7 # 微信名片,仅支持ntchat
14 | INVITE_ROOM = 8 # 邀请好友进群
15 | INFO = 9
16 | ERROR = 10
17 | TEXT_ = 11 # 强制文本
18 | VIDEO = 12
19 | MINIAPP = 13 # 小程序
20 | ACCEPT_FRIEND = 19 # 接受好友申请
21 |
22 | def __str__(self):
23 | return self.name
24 |
25 |
26 | class Reply:
27 | def __init__(self, type: ReplyType = None, content=None):
28 | self.type = type
29 | self.content = content
30 |
31 | def __str__(self):
32 | return "Reply(type={}, content={})".format(self.type, self.content)
33 |
--------------------------------------------------------------------------------
/channel/channel.py:
--------------------------------------------------------------------------------
1 | """
2 | Message sending channel abstract class
3 | """
4 |
5 | from bridge.bridge import Bridge
6 | from bridge.context import Context
7 | from bridge.reply import *
8 |
9 |
10 | class Channel(object):
11 | channel_type = ""
12 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE]
13 |
14 | def startup(self):
15 | """
16 | init channel
17 | """
18 | raise NotImplementedError
19 |
20 | def handle_text(self, msg):
21 | """
22 | process received msg
23 | :param msg: message object
24 | """
25 | raise NotImplementedError
26 |
27 | # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
28 | def send(self, reply: Reply, context: Context):
29 | """
30 | send message to user
31 | :param msg: message content
32 | :param receiver: receiver channel account
33 | :return:
34 | """
35 | raise NotImplementedError
36 |
37 | def build_reply_content(self, query, context: Context = None) -> Reply:
38 | return Bridge().fetch_reply_content(query, context)
39 |
40 | def build_voice_to_text(self, voice_file) -> Reply:
41 | return Bridge().fetch_voice_to_text(voice_file)
42 |
43 | def build_text_to_voice(self, text) -> Reply:
44 | return Bridge().fetch_text_to_voice(text)
45 |
--------------------------------------------------------------------------------
/channel/channel_factory.py:
--------------------------------------------------------------------------------
1 | """
2 | channel factory
3 | """
4 | from common import const
5 | from .channel import Channel
6 |
7 |
8 | def create_channel(channel_type) -> Channel:
9 | """
10 | create a channel instance
11 | :param channel_type: channel type code
12 | :return: channel instance
13 | """
14 | ch = Channel()
15 | if channel_type == "wx":
16 | from channel.wechat.wechat_channel import WechatChannel
17 | ch = WechatChannel()
18 | elif channel_type == "wxy":
19 | from channel.wechat.wechaty_channel import WechatyChannel
20 | ch = WechatyChannel()
21 | elif channel_type == "terminal":
22 | from channel.terminal.terminal_channel import TerminalChannel
23 | ch = TerminalChannel()
24 | elif channel_type == "wechatmp":
25 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
26 | ch = WechatMPChannel(passive_reply=True)
27 | elif channel_type == "wechatmp_service":
28 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
29 | ch = WechatMPChannel(passive_reply=False)
30 | elif channel_type == "wechatcom_app":
31 | from channel.wechatcom.wechatcomapp_channel import WechatComAppChannel
32 | ch = WechatComAppChannel()
33 | elif channel_type == "wechatcom_service":
34 | from channel.wechatcs.wechatcomservice_channel import WechatComServiceChannel
35 | ch = WechatComServiceChannel()
36 | elif channel_type == "wework":
37 | from channel.wework.wework_channel import WeworkChannel
38 | ch = WeworkChannel()
39 | elif channel_type == const.FEISHU:
40 | from channel.feishu.feishu_channel import FeiShuChanel
41 | ch = FeiShuChanel()
42 | elif channel_type == const.DINGTALK:
43 | from channel.dingtalk.dingtalk_channel import DingTalkChanel
44 | ch = DingTalkChanel()
45 | else:
46 | raise RuntimeError
47 | ch.channel_type = channel_type
48 | return ch
49 |
--------------------------------------------------------------------------------
/channel/chat_message.py:
--------------------------------------------------------------------------------
1 | """
2 | 本类表示聊天消息,用于对itchat和wechaty的消息进行统一的封装。
3 |
4 | 填好必填项(群聊6个,非群聊8个),即可接入ChatChannel,并支持插件,参考TerminalChannel
5 |
6 | ChatMessage
7 | msg_id: 消息id (必填)
8 | create_time: 消息创建时间
9 |
10 | ctype: 消息类型 : ContextType (必填)
11 | content: 消息内容, 如果是声音/图片,这里是文件路径 (必填)
12 |
13 | from_user_id: 发送者id (必填)
14 | from_user_nickname: 发送者昵称
15 | to_user_id: 接收者id (必填)
16 | to_user_nickname: 接收者昵称
17 |
18 | other_user_id: 对方的id,如果你是发送者,那这个就是接收者id,如果你是接收者,那这个就是发送者id,如果是群消息,那这一直是群id (必填)
19 | other_user_nickname: 同上
20 |
21 | is_group: 是否是群消息 (群聊必填)
22 | is_at: 是否被at
23 |
24 | - (群消息时,一般会存在实际发送者,是群内某个成员的id和昵称,下列项仅在群消息时存在)
25 | actual_user_id: 实际发送者id (群聊必填)
26 | actual_user_nickname:实际发送者昵称
27 | self_display_name: 自身的展示名,设置群昵称时,该字段表示群昵称
28 |
29 | _prepare_fn: 准备函数,用于准备消息的内容,比如下载图片等,
30 | _prepared: 是否已经调用过准备函数
31 | _rawmsg: 原始消息对象
32 |
33 | """
34 |
35 |
36 | class ChatMessage(object):
37 | msg_id = None
38 | create_time = None
39 |
40 | ctype = None
41 | content = None
42 |
43 | from_user_id = None
44 | from_user_nickname = None
45 | to_user_id = None
46 | to_user_nickname = None
47 | other_user_id = None
48 | other_user_nickname = None
49 | my_msg = False
50 | self_display_name = None
51 |
52 | is_group = False
53 | is_at = False
54 | actual_user_id = None
55 | actual_user_nickname = None
56 | at_list = None
57 |
58 | _prepare_fn = None
59 | _prepared = False
60 | _rawmsg = None
61 |
62 | def __init__(self, _rawmsg):
63 | self._rawmsg = _rawmsg
64 |
65 | def prepare(self):
66 | if self._prepare_fn and not self._prepared:
67 | self._prepared = True
68 | self._prepare_fn()
69 |
70 | def __str__(self):
71 | return "ChatMessage: id={}, create_time={}, ctype={}, content={}, from_user_id={}, from_user_nickname={}, to_user_id={}, to_user_nickname={}, other_user_id={}, other_user_nickname={}, is_group={}, is_at={}, actual_user_id={}, actual_user_nickname={}, at_list={}".format(
72 | self.msg_id,
73 | self.create_time,
74 | self.ctype,
75 | self.content,
76 | self.from_user_id,
77 | self.from_user_nickname,
78 | self.to_user_id,
79 | self.to_user_nickname,
80 | self.other_user_id,
81 | self.other_user_nickname,
82 | self.is_group,
83 | self.is_at,
84 | self.actual_user_id,
85 | self.actual_user_nickname,
86 | self.at_list
87 | )
88 |
--------------------------------------------------------------------------------
/channel/dingtalk/dingtalk_message.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import requests
4 | from dingtalk_stream import ChatbotMessage
5 |
6 | from bridge.context import ContextType
7 | from channel.chat_message import ChatMessage
8 | # -*- coding=utf-8 -*-
9 | from common.log import logger
10 | from common.tmp_dir import TmpDir
11 |
12 |
13 | class DingTalkMessage(ChatMessage):
14 | def __init__(self, event: ChatbotMessage, image_download_handler):
15 | super().__init__(event)
16 | self.image_download_handler = image_download_handler
17 | self.msg_id = event.message_id
18 | self.message_type = event.message_type
19 | self.incoming_message = event
20 | self.sender_staff_id = event.sender_staff_id
21 | self.other_user_id = event.conversation_id
22 | self.create_time = event.create_at
23 | self.image_content = event.image_content
24 | self.rich_text_content = event.rich_text_content
25 | if event.conversation_type == "1":
26 | self.is_group = False
27 | else:
28 | self.is_group = True
29 |
30 | if self.message_type == "text":
31 | self.ctype = ContextType.TEXT
32 |
33 | self.content = event.text.content.strip()
34 | elif self.message_type == "audio":
35 | # 钉钉支持直接识别语音,所以此处将直接提取文字,当文字处理
36 | self.content = event.extensions['content']['recognition'].strip()
37 | self.ctype = ContextType.TEXT
38 | elif (self.message_type == 'picture') or (self.message_type == 'richText'):
39 | self.ctype = ContextType.IMAGE
40 | # 钉钉图片类型或富文本类型消息处理
41 | image_list = event.get_image_list()
42 | if len(image_list) > 0:
43 | download_code = image_list[0]
44 | download_url = image_download_handler.get_image_download_url(download_code)
45 | self.content = download_image_file(download_url, TmpDir().path())
46 | else:
47 | logger.debug(f"[Dingtalk] messageType :{self.message_type} , imageList isEmpty")
48 |
49 | if self.is_group:
50 | self.from_user_id = event.conversation_id
51 | self.actual_user_id = event.sender_id
52 | self.is_at = True
53 | else:
54 | self.from_user_id = event.sender_id
55 | self.actual_user_id = event.sender_id
56 | self.to_user_id = event.chatbot_user_id
57 | self.other_user_nickname = event.conversation_title
58 |
59 |
60 | def download_image_file(image_url, temp_dir):
61 | headers = {
62 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
63 | }
64 | # 设置代理
65 | # self.proxies
66 | # , proxies=self.proxies
67 | response = requests.get(image_url, headers=headers, stream=True, timeout=60 * 5)
68 | if response.status_code == 200:
69 |
70 | # 生成文件名
71 | file_name = image_url.split("/")[-1].split("?")[0]
72 |
73 | # 检查临时目录是否存在,如果不存在则创建
74 | if not os.path.exists(temp_dir):
75 | os.makedirs(temp_dir)
76 |
77 | # 将文件保存到临时目录
78 | file_path = os.path.join(temp_dir, file_name)
79 | with open(file_path, 'wb') as file:
80 | file.write(response.content)
81 | return file_path
82 | else:
83 | logger.info(f"[Dingtalk] Failed to download image file, {response.content}")
84 | return None
85 |
--------------------------------------------------------------------------------
/channel/feishu/feishu_message.py:
--------------------------------------------------------------------------------
1 | from bridge.context import ContextType
2 | from channel.chat_message import ChatMessage
3 | import json
4 | import requests
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 | from common import utils
8 |
9 |
10 | class FeishuMessage(ChatMessage):
11 | def __init__(self, event: dict, is_group=False, access_token=None):
12 | super().__init__(event)
13 | msg = event.get("message")
14 | sender = event.get("sender")
15 | self.access_token = access_token
16 | self.msg_id = msg.get("message_id")
17 | self.create_time = msg.get("create_time")
18 | self.is_group = is_group
19 | msg_type = msg.get("message_type")
20 |
21 | if msg_type == "text":
22 | self.ctype = ContextType.TEXT
23 | content = json.loads(msg.get('content'))
24 | self.content = content.get("text").strip()
25 | elif msg_type == "file":
26 | self.ctype = ContextType.FILE
27 | content = json.loads(msg.get("content"))
28 | file_key = content.get("file_key")
29 | file_name = content.get("file_name")
30 |
31 | self.content = TmpDir().path() + file_key + "." + utils.get_path_suffix(file_name)
32 |
33 | def _download_file():
34 | # 如果响应状态码是200,则将响应内容写入本地文件
35 | url = f"https://open.feishu.cn/open-apis/im/v1/messages/{self.msg_id}/resources/{file_key}"
36 | headers = {
37 | "Authorization": "Bearer " + access_token,
38 | }
39 | params = {
40 | "type": "file"
41 | }
42 | response = requests.get(url=url, headers=headers, params=params)
43 | if response.status_code == 200:
44 | with open(self.content, "wb") as f:
45 | f.write(response.content)
46 | else:
47 | logger.info(f"[FeiShu] Failed to download file, key={file_key}, res={response.text}")
48 | self._prepare_fn = _download_file
49 | else:
50 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg_type))
51 |
52 | self.from_user_id = sender.get("sender_id").get("open_id")
53 | self.to_user_id = event.get("app_id")
54 | if is_group:
55 | # 群聊
56 | self.other_user_id = msg.get("chat_id")
57 | self.actual_user_id = self.from_user_id
58 | self.content = self.content.replace("@_user_1", "").strip()
59 | self.actual_user_nickname = ""
60 | else:
61 | # 私聊
62 | self.other_user_id = self.from_user_id
63 | self.actual_user_id = self.from_user_id
64 |
--------------------------------------------------------------------------------
/channel/terminal/terminal_channel.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from bridge.context import *
4 | from bridge.reply import Reply, ReplyType
5 | from channel.chat_channel import ChatChannel, check_prefix
6 | from channel.chat_message import ChatMessage
7 | from common.log import logger
8 | from config import conf
9 |
10 |
11 | class TerminalMessage(ChatMessage):
12 | def __init__(
13 | self,
14 | msg_id,
15 | content,
16 | ctype=ContextType.TEXT,
17 | from_user_id="User",
18 | to_user_id="Chatgpt",
19 | other_user_id="Chatgpt",
20 | ):
21 | self.msg_id = msg_id
22 | self.ctype = ctype
23 | self.content = content
24 | self.from_user_id = from_user_id
25 | self.to_user_id = to_user_id
26 | self.other_user_id = other_user_id
27 |
28 |
29 | class TerminalChannel(ChatChannel):
30 | NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE]
31 |
32 | def send(self, reply: Reply, context: Context):
33 | print("\nBot:")
34 | if reply.type == ReplyType.IMAGE:
35 | from PIL import Image
36 |
37 | image_storage = reply.content
38 | image_storage.seek(0)
39 | img = Image.open(image_storage)
40 | print("")
41 | img.show()
42 | elif reply.type == ReplyType.IMAGE_URL: # 从网络下载图片
43 | import io
44 |
45 | import requests
46 | from PIL import Image
47 |
48 | img_url = reply.content
49 | pic_res = requests.get(img_url, stream=True)
50 | image_storage = io.BytesIO()
51 | for block in pic_res.iter_content(1024):
52 | image_storage.write(block)
53 | image_storage.seek(0)
54 | img = Image.open(image_storage)
55 | print(img_url)
56 | img.show()
57 | else:
58 | print(reply.content)
59 | print("\nUser:", end="")
60 | sys.stdout.flush()
61 | return
62 |
63 | def startup(self):
64 | context = Context()
65 | logger.setLevel("WARN")
66 | print("\nPlease input your question:\nUser:", end="")
67 | sys.stdout.flush()
68 | msg_id = 0
69 | while True:
70 | try:
71 | prompt = self.get_input()
72 | except KeyboardInterrupt:
73 | print("\nExiting...")
74 | sys.exit()
75 | msg_id += 1
76 | trigger_prefixs = conf().get("single_chat_prefix", [""])
77 | if check_prefix(prompt, trigger_prefixs) is None:
78 | prompt = trigger_prefixs[0] + prompt # 给没触发的消息加上触发前缀
79 |
80 | context = self._compose_context(ContextType.TEXT, prompt, msg=TerminalMessage(msg_id, prompt))
81 | context["isgroup"] = False
82 | if context:
83 | self.produce(context)
84 | else:
85 | raise Exception("context is None")
86 |
87 | def get_input(self):
88 | """
89 | Multi-line input function
90 | """
91 | sys.stdout.flush()
92 | line = input()
93 | return line
94 |
--------------------------------------------------------------------------------
/channel/wechat/wechaty_message.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 |
4 | from wechaty import MessageType
5 | from wechaty.user import Message
6 |
7 | from bridge.context import ContextType
8 | from channel.chat_message import ChatMessage
9 | from common.log import logger
10 | from common.tmp_dir import TmpDir
11 |
12 |
13 | class aobject(object):
14 | """Inheriting this class allows you to define an async __init__.
15 |
16 | So you can create objects by doing something like `await MyClass(params)`
17 | """
18 |
19 | async def __new__(cls, *a, **kw):
20 | instance = super().__new__(cls)
21 | await instance.__init__(*a, **kw)
22 | return instance
23 |
24 | async def __init__(self):
25 | pass
26 |
27 |
28 | class WechatyMessage(ChatMessage, aobject):
29 | async def __init__(self, wechaty_msg: Message):
30 | super().__init__(wechaty_msg)
31 |
32 | room = wechaty_msg.room()
33 |
34 | self.msg_id = wechaty_msg.message_id
35 | self.create_time = wechaty_msg.payload.timestamp
36 | self.is_group = room is not None
37 |
38 | if wechaty_msg.type() == MessageType.MESSAGE_TYPE_TEXT:
39 | self.ctype = ContextType.TEXT
40 | self.content = wechaty_msg.text()
41 | elif wechaty_msg.type() == MessageType.MESSAGE_TYPE_AUDIO:
42 | self.ctype = ContextType.VOICE
43 | voice_file = await wechaty_msg.to_file_box()
44 | self.content = TmpDir().path() + voice_file.name # content直接存临时目录路径
45 |
46 | def func():
47 | loop = asyncio.get_event_loop()
48 | asyncio.run_coroutine_threadsafe(voice_file.to_file(self.content), loop).result()
49 |
50 | self._prepare_fn = func
51 |
52 | else:
53 | raise NotImplementedError("Unsupported message type: {}".format(wechaty_msg.type()))
54 |
55 | from_contact = wechaty_msg.talker() # 获取消息的发送者
56 | self.from_user_id = from_contact.contact_id
57 | self.from_user_nickname = from_contact.name
58 |
59 | # group中的from和to,wechaty跟itchat含义不一样
60 | # wecahty: from是消息实际发送者, to:所在群
61 | # itchat: 如果是你发送群消息,from和to是你自己和所在群,如果是别人发群消息,from和to是所在群和你自己
62 | # 但这个差别不影响逻辑,group中只使用到:1.用from来判断是否是自己发的,2.actual_user_id来判断实际发送用户
63 |
64 | if self.is_group:
65 | self.to_user_id = room.room_id
66 | self.to_user_nickname = await room.topic()
67 | else:
68 | to_contact = wechaty_msg.to()
69 | self.to_user_id = to_contact.contact_id
70 | self.to_user_nickname = to_contact.name
71 |
72 | if self.is_group or wechaty_msg.is_self(): # 如果是群消息,other_user设置为群,如果是私聊消息,而且自己发的,就设置成对方。
73 | self.other_user_id = self.to_user_id
74 | self.other_user_nickname = self.to_user_nickname
75 | else:
76 | self.other_user_id = self.from_user_id
77 | self.other_user_nickname = self.from_user_nickname
78 |
79 | if self.is_group: # wechaty群聊中,实际发送用户就是from_user
80 | self.is_at = await wechaty_msg.mention_self()
81 | if not self.is_at: # 有时候复制粘贴的消息,不算做@,但是内容里面会有@xxx,这里做一下兼容
82 | name = wechaty_msg.wechaty.user_self().name
83 | pattern = f"@{re.escape(name)}(\u2005|\u0020)"
84 | if re.search(pattern, self.content):
85 | logger.debug(f"wechaty message {self.msg_id} include at")
86 | self.is_at = True
87 |
88 | self.actual_user_id = self.from_user_id
89 | self.actual_user_nickname = self.from_user_nickname
90 |
--------------------------------------------------------------------------------
/channel/wechatcom/README.md:
--------------------------------------------------------------------------------
1 | # 企业微信应用号channel
2 |
3 | 企业微信官方提供了客服、应用等API,本channel使用的是企业微信的自建应用API的能力。
4 |
5 | 因为未来可能还会开发客服能力,所以本channel的类型名叫作`wechatcom_app`。
6 |
7 | `wechatcom_app` channel支持插件系统和图片声音交互等能力,除了无法加入群聊,作为个人使用的私人助理已绰绰有余。
8 |
9 | ## 开始之前
10 |
11 | - 在企业中确认自己拥有在企业内自建应用的权限。
12 | - 如果没有权限或者是个人用户,也可创建未认证的企业。操作方式:登录手机企业微信,选择`创建/加入企业`来创建企业,类型请选择企业,企业名称可随意填写。
13 | 未认证的企业有100人的服务人数上限,其他功能与认证企业没有差异。
14 |
15 | 本channel需安装的依赖与公众号一致,需要安装`wechatpy`和`web.py`,它们包含在`requirements-optional.txt`中。
16 |
17 | 此外,如果你是`Linux`系统,除了`ffmpeg`还需要安装`amr`编码器,否则会出现找不到编码器的错误,无法正常使用语音功能。
18 |
19 | - Ubuntu/Debian
20 |
21 | ```bash
22 | apt-get install libavcodec-extra
23 | ```
24 |
25 | - Alpine
26 |
27 | 需自行编译`ffmpeg`,在编译参数里加入`amr`编码器的支持
28 |
29 | ## 使用方法
30 |
31 | 1.查看企业ID
32 |
33 | - 扫码登陆[企业微信后台](https://work.weixin.qq.com)
34 | - 选择`我的企业`,点击`企业信息`,记住该`企业ID`
35 |
36 | 2.创建自建应用
37 |
38 | - 选择应用管理, 在自建区选创建应用来创建企业自建应用
39 | - 上传应用logo,填写应用名称等项
40 | - 创建应用后进入应用详情页面,记住`AgentId`和`Secert`
41 |
42 | 3.配置应用
43 |
44 | - 在详情页点击`企业可信IP`的配置(没看到可以不管),填入你服务器的公网IP,如果不知道可以先不填
45 | - 点击`接收消息`下的启用API接收消息
46 | - `URL`填写格式为`http://url:port/wxcomapp`,`port`是程序监听的端口,默认是9898
47 | 如果是未认证的企业,url可直接使用服务器的IP。如果是认证企业,需要使用备案的域名,可使用二级域名。
48 | - `Token`可随意填写,停留在这个页面
49 | - 在程序根目录`config.json`中增加配置(**去掉注释**),`wechatcomapp_aes_key`是当前页面的`wechatcomapp_aes_key`
50 |
51 | ```python
52 | "channel_type": "wechatcom_app",
53 | "wechatcom_corp_id": "", # 企业微信公司的corpID
54 | "wechatcomapp_token": "", # 企业微信app的token
55 | "wechatcomapp_port": 9898, # 企业微信app的服务端口, 不需要端口转发
56 | "wechatcomapp_secret": "", # 企业微信app的secret
57 | "wechatcomapp_agent_id": "", # 企业微信app的agent_id
58 | "wechatcomapp_aes_key": "", # 企业微信app的aes_key
59 | ```
60 |
61 | - 运行程序,在页面中点击保存,保存成功说明验证成功
62 |
63 | 4.连接个人微信
64 |
65 | 选择`我的企业`,点击`微信插件`,下面有个邀请关注的二维码。微信扫码后,即可在微信中看到对应企业,在这里你便可以和机器人沟通。
66 |
67 | 向机器人发送消息,如果日志里出现报错:
68 |
69 | ```bash
70 | Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx.xx.xx"
71 | ```
72 |
73 | 意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。
74 |
75 | ~~### Railway部署方式~~(2023-06-08已失效)
76 |
77 | ~~公众号不能在`Railway`上部署,但企业微信应用[可以](https://railway.app/template/-FHS--?referralCode=RC3znh)!~~
78 |
79 | ~~填写配置后,将部署完成后的网址```**.railway.app/wxcomapp```,填写在上一步的URL中。发送信息后观察日志,把报错的IP加入到可信IP。(每次重启后都需要加入可信IP)~~
80 |
81 | ## 测试体验
82 |
83 | AIGC开放社区中已经部署了多个可免费使用的Bot,扫描下方的二维码会自动邀请你来体验。
84 |
85 |
86 |
--------------------------------------------------------------------------------
/channel/wechatcom/wechatcomapp_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | from wechatpy.enterprise import WeChatClient
5 |
6 |
7 | class WechatComAppClient(WeChatClient):
8 | def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True):
9 | super(WechatComAppClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry)
10 | self.fetch_access_token_lock = threading.Lock()
11 |
12 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
13 | with self.fetch_access_token_lock:
14 | access_token = self.session.get(self.access_token_key)
15 | if access_token:
16 | if not self.expires_at:
17 | return access_token
18 | timestamp = time.time()
19 | if self.expires_at - timestamp > 60:
20 | return access_token
21 | return super().fetch_access_token()
22 |
--------------------------------------------------------------------------------
/channel/wechatcom/wechatcomapp_message.py:
--------------------------------------------------------------------------------
1 | from wechatpy.enterprise import WeChatClient
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WechatComAppMessage(ChatMessage):
10 | def __init__(self, msg, client: WeChatClient, is_group=False):
11 | super().__init__(msg)
12 | self.msg_id = msg.id
13 | self.create_time = msg.time
14 | self.is_group = is_group
15 |
16 | if msg.type == "text":
17 | self.ctype = ContextType.TEXT
18 | self.content = msg.content
19 | elif msg.type == "voice":
20 | self.ctype = ContextType.VOICE
21 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
22 |
23 | def download_voice():
24 | # 如果响应状态码是200,则将响应内容写入本地文件
25 | response = client.media.download(msg.media_id)
26 | if response.status_code == 200:
27 | with open(self.content, "wb") as f:
28 | f.write(response.content)
29 | else:
30 | logger.info(f"[wechatcom] Failed to download voice file, {response.content}")
31 |
32 | self._prepare_fn = download_voice
33 | elif msg.type == "image":
34 | self.ctype = ContextType.IMAGE
35 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
36 |
37 | def download_image():
38 | # 如果响应状态码是200,则将响应内容写入本地文件
39 | response = client.media.download(msg.media_id)
40 | if response.status_code == 200:
41 | with open(self.content, "wb") as f:
42 | f.write(response.content)
43 | else:
44 | logger.info(f"[wechatcom] Failed to download image file, {response.content}")
45 |
46 | self._prepare_fn = download_image
47 | else:
48 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
49 |
50 | self.from_user_id = msg.source
51 | self.to_user_id = msg.target
52 | self.other_user_id = msg.source
53 |
--------------------------------------------------------------------------------
/channel/wechatcs/README.md:
--------------------------------------------------------------------------------
1 | # 企业微信客服号channel
2 |
3 | 企业微信官方提供了客服、应用等API,本channel将原本的企业微信应用修改为企业客服应用,目前还没做兼容,此channel仅能在企业微信客服应用中回复,原本的应用不会回复。
4 |
5 | 本channel的类型名叫作`wechatcom_service`,用于企业微信客服应用,此channel大部分配置流程和`wechatcom_app`一样。
6 |
7 | 同样的,`wechatcom_service` channel支持插件系统和图片声音交互等能力,除了无法加入群聊,作为个人使用的私人助理已绰绰有余。
8 |
9 | ## 开始之前
10 |
11 | - 在企业中确认自己拥有在企业内自建应用的权限。
12 | - 如果没有权限或者是个人用户,也可创建未认证的企业。操作方式:登录手机企业微信,选择`创建/加入企业`来创建企业,类型请选择企业,企业名称可随意填写。
13 | 未认证的企业有100人的服务人数上限,其他功能与认证企业没有差异。
14 |
15 | 本channel需安装的依赖与公众号一致,需要安装`wechatpy`和`web.py`,它们包含在`requirements-optional.txt`中。
16 |
17 | 此外,如果你是`Linux`系统,除了`ffmpeg`还需要安装`amr`编码器,否则会出现找不到编码器的错误,无法正常使用语音功能。
18 |
19 | - Ubuntu/Debian
20 |
21 | ```bash
22 | apt-get install libavcodec-extra
23 | ```
24 |
25 | - Alpine
26 |
27 | 需自行编译`ffmpeg`,在编译参数里加入`amr`编码器的支持
28 |
29 | ## 使用方法
30 |
31 | 1.查看企业ID
32 |
33 | - 扫码登陆[企业微信后台](https://work.weixin.qq.com)
34 | - 选择`我的企业`,点击`企业信息`,记住该`企业ID`
35 |
36 | 2.创建自建应用
37 |
38 | - 选择应用管理, 在自建区选创建应用来创建企业自建应用
39 | - 上传应用logo,填写应用名称等项
40 | - 创建应用后进入应用详情页面,记住`AgentId`和`Secert`
41 |
42 | 3.配置应用
43 |
44 | - 在详情页点击`企业可信IP`的配置(没看到可以不管),填入你服务器的公网IP,如果不知道可以先不填
45 | - 点击`接收消息`下的启用API接收消息
46 | - `URL`填写格式为`http://url:port/wxcomapp`,`port`是程序监听的端口,默认是9898
47 | 如果是未认证的企业,url可直接使用服务器的IP。如果是认证企业,需要使用备案的域名,可使用二级域名。
48 | - `Token`可随意填写,停留在这个页面
49 | - 在程序根目录`config.json`中增加配置(**去掉注释**),`wechatcomapp_aes_key`是当前页面的`wechatcomapp_aes_key`
50 |
51 | ```python
52 | "channel_type": "wechatcom_service",
53 | "wechatcom_corp_id": "", # 企业微信公司的corpID
54 | "wechatcomapp_token": "", # 企业微信app的token
55 | "wechatcomapp_port": 9898, # 企业微信app的服务端口, 不需要端口转发
56 | "wechatcomapp_secret": "", # 企业微信app的secret
57 | "wechatcomapp_agent_id": "", # 企业微信app的agent_id
58 | "wechatcomapp_aes_key": "", # 企业微信app的aes_key
59 | ```
60 |
61 | - 运行程序,在页面中点击保存,保存成功说明验证成功
62 |
63 | 4.将微信客服接入程序
64 |
65 | - 点击应用中的`微信客服`,在`客服账号`栏下创建一个新的账号,输入名称后点击创建。
66 | - 点击`微信客服`应用详情中的`API`(默认状态下是折叠的),点击`可调用接口的应用`栏中的`修改`,勾选刚才创建的应用并点击确认。
67 | - 点击`微信客服`应用详情中的`API`(默认状态下是折叠的),点击`可调用接口的应用`栏中的`前往配置`。点击`操作选项`中的`更换客服账号`,将之前配置好的应用指定你刚才创建的客服号。
68 | - 顺利的情况下,回到`微信客服`应用详情中,点击客服账号,生成二维码让用户扫码即可跟AI客服对话。
69 |
70 | 向机器人发送消息,如果日志里出现报错:
71 |
72 | ```bash
73 | Error code: 60020, message: "not allow to access from your ip, ...from ip: xx.xx.xx.xx"
74 | ```
75 |
76 | 意思是IP不可信,需要参考上一步的`企业可信IP`配置,把这里的IP加进去。
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/channel/wechatcs/wechatcomservice_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | import requests
4 | import time
5 | from wechatpy.enterprise import WeChatClient
6 | from config import conf
7 |
8 |
9 | class WeChatTokenManager:
10 | def __init__(self):
11 | self.access_token = None
12 | self.expires_at = 0
13 |
14 | def get_token(self):
15 | current_time = time.time()
16 | if self.access_token and self.expires_at - current_time > 60:
17 | return self.access_token
18 |
19 | corpid = conf().get("wechatcom_corp_id")
20 | corpsecret = conf().get("wechatcomapp_secret")
21 | url = f"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
22 |
23 | response = requests.get(url).json()
24 | if 'access_token' in response:
25 | self.access_token = response['access_token']
26 | self.expires_at = current_time + response['expires_in'] - 60
27 | print(f'access_token:{self.access_token}')
28 | return self.access_token
29 | else:
30 | raise Exception("Failed to retrieve access token")
31 |
32 |
33 | # class WechatComAppClient(WeChatClient):
34 | # def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True):
35 | # super(WechatComAppClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry)
36 | # self.fetch_access_token_lock = threading.Lock()
37 | #
38 | # def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
39 | # with self.fetch_access_token_lock:
40 | # access_token = self.session.get(self.access_token_key)
41 | # if access_token:
42 | # if not self.expires_at:
43 | # return access_token
44 | # timestamp = time.time()
45 | # if self.expires_at - timestamp > 60:
46 | # return access_token
47 | # return super().fetch_access_token()
48 |
49 |
50 | class WechatComServiceClient(WeChatClient):
51 | def __init__(self, corp_id, secret, access_token=None, session=None, timeout=None, auto_retry=True):
52 | super(WechatComServiceClient, self).__init__(corp_id, secret, access_token, session, timeout, auto_retry)
53 | self.token_manager = WeChatTokenManager()
54 | self.fetch_access_token_lock = threading.Lock()
55 |
56 | def fetch_access_token(self):
57 | with self.fetch_access_token_lock:
58 | return self.token_manager.get_token()
59 |
60 |
--------------------------------------------------------------------------------
/channel/wechatcs/wechatcomservice_message.py:
--------------------------------------------------------------------------------
1 | from wechatpy.enterprise import WeChatClient
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WechatComServiceMessage(ChatMessage):
10 | def __init__(self, msg, client: WeChatClient = None, is_group=False):
11 | self.is_group = is_group
12 | self.msg_id = msg['msgid']
13 | self.external_userid = msg['external_userid']
14 | self.create_time = msg['send_time']
15 | self.origin = msg['origin']
16 | self.msgtype = msg['msgtype']
17 | self.open_kfid = msg['open_kfid']
18 |
19 | if self.msgtype == "text":
20 | self.content = msg['text']['content']
21 | self.ctype = ContextType.TEXT
22 | elif self.msgtype == "image":
23 | self.ctype = ContextType.IMAGE
24 | # 实现图像消息的处理逻辑
25 | self.content = TmpDir().path() + msg.get("image", {}).get("media_id", "") + "." + 'jpg' # 假设图片格式为jpg
26 |
27 | def download_image():
28 | # 下载图片逻辑
29 | response = client.media.download(msg.get("image", {}).get("media_id", ""))
30 | if response.status_code == 200:
31 | with open(self.content, "wb") as f:
32 | f.write(response.content)
33 | else:
34 | logger.info(f"[wechatcom_copy] Failed to download image file, {response.content}")
35 |
36 | # download_image()
37 | self._prepare_fn = download_image
38 | elif self.msgtype == "voice":
39 | self.ctype = ContextType.VOICE
40 | self.content = TmpDir().path() + msg.get("voice", {}).get("media_id", "") + "." + 'mp3' # content直接存临时目录路径
41 |
42 | def download_voice():
43 | # 如果响应状态码是200,则将响应内容写入本地文件
44 | response = client.media.download(msg.get("voice", {}).get("media_id", ""))
45 | if response.status_code == 200:
46 | with open(self.content, "wb") as f:
47 | f.write(response.content)
48 | else:
49 | logger.info(f"[wechatcom_copy] Failed to download voice file, {response.content}")
50 |
51 | # download_voice()
52 | self._prepare_fn = download_voice
53 | # 可以根据需要添加更多消息类型的处理
54 | self.from_user_id = self.external_userid
55 | self.to_user_id = self.open_kfid
56 | self.other_user_id = self.external_userid
57 |
--------------------------------------------------------------------------------
/channel/wechatmp/README.md:
--------------------------------------------------------------------------------
1 | # 微信公众号channel
2 |
3 | 鉴于个人微信号在服务器上通过itchat登录有封号风险,这里新增了微信公众号channel,提供无风险的服务。
4 | 目前支持订阅号和服务号两种类型的公众号,它们都支持文本交互,语音和图片输入。其中个人主体的微信订阅号由于无法通过微信认证,存在回复时间限制,每天的图片和声音回复次数也有限制。
5 |
6 | ## 使用方法(订阅号,服务号类似)
7 |
8 | 在开始部署前,你需要一个拥有公网IP的服务器,以提供微信服务器和我们自己服务器的连接。或者你需要进行内网穿透,否则微信服务器无法将消息发送给我们的服务器。
9 |
10 | 此外,需要在我们的服务器上安装python的web框架web.py和wechatpy。
11 | 以ubuntu为例(在ubuntu 22.04上测试):
12 | ```
13 | pip3 install web.py
14 | pip3 install wechatpy
15 | ```
16 |
17 | 然后在[微信公众平台](https://mp.weixin.qq.com)注册一个自己的公众号,类型选择订阅号,主体为个人即可。
18 |
19 | 然后根据[接入指南](https://developers.weixin.qq.com/doc/offiaccount/Basic_Information/Access_Overview.html)的说明,在[微信公众平台](https://mp.weixin.qq.com)的“设置与开发”-“基本配置”-“服务器配置”中填写服务器地址`URL`和令牌`Token`。`URL`填写格式为`http://url/wx`,可使用IP(成功几率看脸),`Token`是你自己编的一个特定的令牌。消息加解密方式如果选择了需要加密的模式,需要在配置中填写`wechatmp_aes_key`。
20 |
21 | 相关的服务器验证代码已经写好,你不需要再添加任何代码。你只需要在本项目根目录的`config.json`中添加
22 | ```
23 | "channel_type": "wechatmp", # 如果通过了微信认证,将"wechatmp"替换为"wechatmp_service",可极大的优化使用体验
24 | "wechatmp_token": "xxxx", # 微信公众平台的Token
25 | "wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
26 | "wechatmp_app_id": "xxxx", # 微信公众平台的appID
27 | "wechatmp_app_secret": "xxxx", # 微信公众平台的appsecret
28 | "wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
29 | "single_chat_prefix": [""], # 推荐设置,任意对话都可以触发回复,不添加前缀
30 | "single_chat_reply_prefix": "", # 推荐设置,回复不设置前缀
31 | "plugin_trigger_prefix": "&", # 推荐设置,在手机微信客户端中,$%^等符号与中文连在一起时会自动显示一段较大的间隔,用户体验不好。请不要使用管理员指令前缀"#",这会造成未知问题。
32 | ```
33 | 然后运行`python3 app.py`启动web服务器。这里会默认监听8080端口,但是微信公众号的服务器配置只支持80/443端口,有两种方法来解决这个问题。第一个是推荐的方法,使用端口转发命令将80端口转发到8080端口:
34 | ```
35 | sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 8080
36 | sudo iptables-save > /etc/iptables/rules.v4
37 | ```
38 | 第二个方法是让python程序直接监听80端口,在配置文件中设置`"wechatmp_port": 80` ,在linux上需要使用`sudo python3 app.py`启动程序。然而这会导致一系列环境和权限问题,因此不是推荐的方法。
39 |
40 | 443端口同理,注意需要支持SSL,也就是https的访问,在`wechatmp_channel.py`中需要修改相应的证书路径。
41 |
42 | 程序启动并监听端口后,在刚才的“服务器配置”中点击`提交`即可验证你的服务器。
43 | 随后在[微信公众平台](https://mp.weixin.qq.com)启用服务器,关闭手动填写规则的自动回复,即可实现ChatGPT的自动回复。
44 |
45 | 之后需要在公众号开发信息下将本机IP加入到IP白名单。
46 |
47 | 不然在启用后,发送语音、图片等消息可能会遇到如下报错:
48 | ```
49 | 'errcode': 40164, 'errmsg': 'invalid ip xx.xx.xx.xx not in whitelist rid
50 | ```
51 |
52 |
53 | ## 个人微信公众号的限制
54 | 由于人微信公众号不能通过微信认证,所以没有客服接口,因此公众号无法主动发出消息,只能被动回复。而微信官方对被动回复有5秒的时间限制,最多重试2次,因此最多只有15秒的自动回复时间窗口。因此如果问题比较复杂或者我们的服务器比较忙,ChatGPT的回答就没办法及时回复给用户。为了解决这个问题,这里做了回答缓存,它需要你在回复超时后,再次主动发送任意文字(例如1)来尝试拿到回答缓存。为了优化使用体验,目前设置了两分钟(120秒)的timeout,用户在至多两分钟后即可得到查询到回复或者错误原因。
55 |
56 | 另外,由于微信官方的限制,自动回复有长度限制。因此这里将ChatGPT的回答进行了拆分,以满足限制。
57 |
58 | ## 私有api_key
59 | 公共api有访问频率限制(免费账号每分钟最多3次ChatGPT的API调用),这在服务多人的时候会遇到问题。因此这里多加了一个设置私有api_key的功能。目前通过godcmd插件的命令来设置私有api_key。
60 |
61 | ## 语音输入
62 | 利用微信自带的语音识别功能,提供语音输入能力。需要在公众号管理页面的“设置与开发”->“接口权限”页面开启“接收语音识别结果”。
63 |
64 | ## 语音回复
65 | 请在配置文件中添加以下词条:
66 | ```
67 | "voice_reply_voice": true,
68 | ```
69 | 这样公众号将会用语音回复语音消息,实现语音对话。
70 |
71 | 默认的语音合成引擎是`google`,它是免费使用的。
72 |
73 | 如果要选择其他的语音合成引擎,请添加以下配置项:
74 | ```
75 | "text_to_voice": "pytts"
76 | ```
77 |
78 | pytts是本地的语音合成引擎。还支持baidu,azure,这些你需要自行配置相关的依赖和key。
79 |
80 | 如果使用pytts,在ubuntu上需要安装如下依赖:
81 | ```
82 | sudo apt update
83 | sudo apt install espeak
84 | sudo apt install ffmpeg
85 | python3 -m pip install pyttsx3
86 | ```
87 | 不是很建议开启pytts语音回复,因为它是离线本地计算,算的慢会拖垮服务器,且声音不好听。
88 |
89 | ## 图片回复
90 | 现在认证公众号和非认证公众号都可以实现的图片和语音回复。但是非认证公众号使用了永久素材接口,每天有1000次的调用上限(每个月有10次重置机会,程序中已设定遇到上限会自动重置),且永久素材库存也有上限。因此对于非认证公众号,我们会在回复图片或者语音消息后的10秒内从永久素材库存内删除该素材。
91 |
92 | ## 测试
93 | 目前在`RoboStyle`这个公众号上进行了测试(基于[wechatmp分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp)),感兴趣的可以关注并体验。开启了godcmd, Banwords, role, dungeon, finish这五个插件,其他的插件还没有详尽测试。百度的接口暂未测试。[wechatmp-stable分支](https://github.com/JS00000/chatgpt-on-wechat/tree/wechatmp-stable)是较稳定的上个版本,但也缺少最新的功能支持。
94 |
95 | ## TODO
96 | - [x] 语音输入
97 | - [x] 图片输入
98 | - [x] 使用临时素材接口提供认证公众号的图片和语音回复
99 | - [x] 使用永久素材接口提供未认证公众号的图片和语音回复
100 | - [ ] 高并发支持
101 |
--------------------------------------------------------------------------------
/channel/wechatmp/active_reply.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import web
4 | from wechatpy import parse_message
5 | from wechatpy.replies import create_reply
6 |
7 | from bridge.context import *
8 | from bridge.reply import *
9 | from channel.wechatmp.common import *
10 | from channel.wechatmp.wechatmp_channel import WechatMPChannel
11 | from channel.wechatmp.wechatmp_message import WeChatMPMessage
12 | from common.log import logger
13 | from config import conf, subscribe_msg
14 |
15 |
16 | # This class is instantiated once per query
17 | class Query:
18 | def GET(self):
19 | return verify_server(web.input())
20 |
21 | def POST(self):
22 | # Make sure to return the instance that first created, @singleton will do that.
23 | try:
24 | args = web.input()
25 | verify_server(args)
26 | channel = WechatMPChannel()
27 | message = web.data()
28 | encrypt_func = lambda x: x
29 | if args.get("encrypt_type") == "aes":
30 | logger.debug("[wechatmp] Receive encrypted post data:\n" + message.decode("utf-8"))
31 | if not channel.crypto:
32 | raise Exception("Crypto not initialized, Please set wechatmp_aes_key in config.json")
33 | message = channel.crypto.decrypt_message(message, args.msg_signature, args.timestamp, args.nonce)
34 | encrypt_func = lambda x: channel.crypto.encrypt_message(x, args.nonce, args.timestamp)
35 | else:
36 | logger.debug("[wechatmp] Receive post data:\n" + message.decode("utf-8"))
37 | msg = parse_message(message)
38 | if msg.type in ["text", "voice", "image"]:
39 | wechatmp_msg = WeChatMPMessage(msg, client=channel.client)
40 | from_user = wechatmp_msg.from_user_id
41 | content = wechatmp_msg.content
42 | message_id = wechatmp_msg.msg_id
43 |
44 | logger.info(
45 | "[wechatmp] {}:{} Receive post query {} {}: {}".format(
46 | web.ctx.env.get("REMOTE_ADDR"),
47 | web.ctx.env.get("REMOTE_PORT"),
48 | from_user,
49 | message_id,
50 | content,
51 | )
52 | )
53 | if msg.type == "voice" and wechatmp_msg.ctype == ContextType.TEXT and conf().get("voice_reply_voice", False):
54 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, desire_rtype=ReplyType.VOICE, msg=wechatmp_msg)
55 | else:
56 | context = channel._compose_context(wechatmp_msg.ctype, content, isgroup=False, msg=wechatmp_msg)
57 | if context:
58 | channel.produce(context)
59 | # The reply will be sent by channel.send() in another thread
60 | return "success"
61 | elif msg.type == "event":
62 | logger.info("[wechatmp] Event {} from {}".format(msg.event, msg.source))
63 | if msg.event in ["subscribe", "subscribe_scan"]:
64 | reply_text = subscribe_msg()
65 | if reply_text:
66 | replyPost = create_reply(reply_text, msg)
67 | return encrypt_func(replyPost.render())
68 | else:
69 | return "success"
70 | else:
71 | logger.info("暂且不处理")
72 | return "success"
73 | except Exception as exc:
74 | logger.exception(exc)
75 | return exc
76 |
--------------------------------------------------------------------------------
/channel/wechatmp/common.py:
--------------------------------------------------------------------------------
1 | import web
2 | from wechatpy.crypto import WeChatCrypto
3 | from wechatpy.exceptions import InvalidSignatureException
4 | from wechatpy.utils import check_signature
5 |
6 | from config import conf
7 |
8 | MAX_UTF8_LEN = 2048
9 |
10 |
11 | class WeChatAPIException(Exception):
12 | pass
13 |
14 |
15 | def verify_server(data):
16 | try:
17 | signature = data.signature
18 | timestamp = data.timestamp
19 | nonce = data.nonce
20 | echostr = data.get("echostr", None)
21 | token = conf().get("wechatmp_token") # 请按照公众平台官网\基本配置中信息填写
22 | check_signature(token, signature, timestamp, nonce)
23 | return echostr
24 | except InvalidSignatureException:
25 | raise web.Forbidden("Invalid signature")
26 | except Exception as e:
27 | raise web.Forbidden(str(e))
28 |
--------------------------------------------------------------------------------
/channel/wechatmp/wechatmp_client.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 | from wechatpy.client import WeChatClient
5 | from wechatpy.exceptions import APILimitedException
6 |
7 | from channel.wechatmp.common import *
8 | from common.log import logger
9 |
10 |
11 | class WechatMPClient(WeChatClient):
12 | def __init__(self, appid, secret, access_token=None, session=None, timeout=None, auto_retry=True):
13 | super(WechatMPClient, self).__init__(appid, secret, access_token, session, timeout, auto_retry)
14 | self.fetch_access_token_lock = threading.Lock()
15 | self.clear_quota_lock = threading.Lock()
16 | self.last_clear_quota_time = -1
17 |
18 | def clear_quota(self):
19 | return self.post("clear_quota", data={"appid": self.appid})
20 |
21 | def clear_quota_v2(self):
22 | return self.post("clear_quota/v2", params={"appid": self.appid, "appsecret": self.secret})
23 |
24 | def fetch_access_token(self): # 重载父类方法,加锁避免多线程重复获取access_token
25 | with self.fetch_access_token_lock:
26 | access_token = self.session.get(self.access_token_key)
27 | if access_token:
28 | if not self.expires_at:
29 | return access_token
30 | timestamp = time.time()
31 | if self.expires_at - timestamp > 60:
32 | return access_token
33 | return super().fetch_access_token()
34 |
35 | def _request(self, method, url_or_endpoint, **kwargs): # 重载父类方法,遇到API限流时,清除quota后重试
36 | try:
37 | return super()._request(method, url_or_endpoint, **kwargs)
38 | except APILimitedException as e:
39 | logger.error("[wechatmp] API quata has been used up. {}".format(e))
40 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
41 | with self.clear_quota_lock:
42 | if self.last_clear_quota_time == -1 or time.time() - self.last_clear_quota_time > 60:
43 | self.last_clear_quota_time = time.time()
44 | response = self.clear_quota_v2()
45 | logger.debug("[wechatmp] API quata has been cleard, {}".format(response))
46 | return super()._request(method, url_or_endpoint, **kwargs)
47 | else:
48 | logger.error("[wechatmp] last clear quota time is {}, less than 60s, skip clear quota")
49 | raise e
50 |
--------------------------------------------------------------------------------
/channel/wechatmp/wechatmp_message.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-#
2 |
3 | from bridge.context import ContextType
4 | from channel.chat_message import ChatMessage
5 | from common.log import logger
6 | from common.tmp_dir import TmpDir
7 |
8 |
9 | class WeChatMPMessage(ChatMessage):
10 | def __init__(self, msg, client=None):
11 | super().__init__(msg)
12 | self.msg_id = msg.id
13 | self.create_time = msg.time
14 | self.is_group = False
15 |
16 | if msg.type == "text":
17 | self.ctype = ContextType.TEXT
18 | self.content = msg.content
19 | elif msg.type == "voice":
20 | if msg.recognition == None:
21 | self.ctype = ContextType.VOICE
22 | self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
23 |
24 | def download_voice():
25 | # 如果响应状态码是200,则将响应内容写入本地文件
26 | response = client.media.download(msg.media_id)
27 | if response.status_code == 200:
28 | with open(self.content, "wb") as f:
29 | f.write(response.content)
30 | else:
31 | logger.info(f"[wechatmp] Failed to download voice file, {response.content}")
32 |
33 | self._prepare_fn = download_voice
34 | else:
35 | self.ctype = ContextType.TEXT
36 | self.content = msg.recognition
37 | elif msg.type == "image":
38 | self.ctype = ContextType.IMAGE
39 | self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
40 |
41 | def download_image():
42 | # 如果响应状态码是200,则将响应内容写入本地文件
43 | response = client.media.download(msg.media_id)
44 | if response.status_code == 200:
45 | with open(self.content, "wb") as f:
46 | f.write(response.content)
47 | else:
48 | logger.info(f"[wechatmp] Failed to download image file, {response.content}")
49 |
50 | self._prepare_fn = download_image
51 | else:
52 | raise NotImplementedError("Unsupported message type: Type:{} ".format(msg.type))
53 |
54 | self.from_user_id = msg.source
55 | self.to_user_id = msg.target
56 | self.other_user_id = msg.source
57 |
--------------------------------------------------------------------------------
/channel/wework/run.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | os.environ['ntwork_LOG'] = "ERROR"
4 | import ntwork
5 |
6 | wework = ntwork.WeWork()
7 |
8 |
9 | def forever():
10 | try:
11 | while True:
12 | time.sleep(0.1)
13 | except KeyboardInterrupt:
14 | ntwork.exit_()
15 | os._exit(0)
16 |
17 |
18 |
--------------------------------------------------------------------------------
/common/const.py:
--------------------------------------------------------------------------------
1 | # bot_type
2 | OPEN_AI = "openAI"
3 | CHATGPT = "chatGPT"
4 | BAIDU = "baidu" # 百度文心一言模型
5 | XUNFEI = "xunfei"
6 | CHATGPTONAZURE = "chatGPTOnAzure"
7 | LINKAI = "linkai"
8 | GEMINI = "gemini"
9 | DIFY = "dify"
10 | ZHIPU_AI = "glm-4"
11 | COZE = "coze"
12 | CLAUDEAI = "claude" # 使用cookie的历史模型
13 | CLAUDEAPI = "claudeAPI" # 通过Claude api调用模型
14 | QWEN = "qwen" # 旧版通义模型
15 | QWEN_DASHSCOPE = "dashscope" # 通义新版sdk和api key
16 | GEMINI = "gemini" # gemini-1.0-pro
17 | MOONSHOT = "moonshot"
18 | MiniMax = "minimax"
19 |
20 |
21 | # model
22 | CLAUDE3 = "claude-3-opus-20240229"
23 | GPT35 = "gpt-3.5-turbo"
24 | GPT35_0125 = "gpt-3.5-turbo-0125"
25 | GPT35_1106 = "gpt-3.5-turbo-1106"
26 | GPT_4o = "gpt-4o"
27 | GPT4_TURBO = "gpt-4-turbo"
28 | GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview"
29 | GPT4_TURBO_04_09 = "gpt-4-turbo-2024-04-09"
30 | GPT4_TURBO_01_25 = "gpt-4-0125-preview"
31 | GPT4_TURBO_11_06 = "gpt-4-1106-preview"
32 | GPT4_VISION_PREVIEW = "gpt-4-vision-preview"
33 | GPT_4O = "gpt-4o"
34 | GPT_4O_MINI = "gpt-4o-mini"
35 | GPT4 = "gpt-4"
36 | GPT_4o_MINI = "gpt-4o-mini"
37 | GPT4_32k = "gpt-4-32k"
38 | GPT4_06_13 = "gpt-4-0613"
39 | GPT4_32k_06_13 = "gpt-4-32k-0613"
40 |
41 | WHISPER_1 = "whisper-1"
42 | TTS_1 = "tts-1"
43 | TTS_1_HD = "tts-1-hd"
44 |
45 |
46 | WEN_XIN = "wenxin"
47 | WEN_XIN_4 = "wenxin-4"
48 |
49 | QWEN_TURBO = "qwen-turbo"
50 | QWEN_PLUS = "qwen-plus"
51 | QWEN_MAX = "qwen-max"
52 |
53 | LINKAI_35 = "linkai-3.5"
54 | LINKAI_4_TURBO = "linkai-4-turbo"
55 | LINKAI_4o = "linkai-4o"
56 |
57 | GEMINI_PRO = "gemini-1.0-pro"
58 | GEMINI_15_flash = "gemini-1.5-flash"
59 | GEMINI_15_PRO = "gemini-1.5-pro"
60 |
61 | MODEL_LIST = [
62 | GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
63 | GPT_4o, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
64 | WEN_XIN, WEN_XIN_4,
65 | XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax,
66 | GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO,
67 | "claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3-opus-20240229", "claude-3.5-sonnet",
68 | "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
69 | QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
70 | LINKAI_35, LINKAI_4_TURBO, LINKAI_4o,
71 | DIFY, COZE
72 | ]
73 |
74 | # channel
75 | FEISHU = "feishu"
76 | DINGTALK = "dingtalk"
77 |
--------------------------------------------------------------------------------
/common/dequeue.py:
--------------------------------------------------------------------------------
1 | from queue import Full, Queue
2 | from time import monotonic as time
3 |
4 |
5 | # add implementation of putleft to Queue
6 | class Dequeue(Queue):
7 | def putleft(self, item, block=True, timeout=None):
8 | with self.not_full:
9 | if self.maxsize > 0:
10 | if not block:
11 | if self._qsize() >= self.maxsize:
12 | raise Full
13 | elif timeout is None:
14 | while self._qsize() >= self.maxsize:
15 | self.not_full.wait()
16 | elif timeout < 0:
17 | raise ValueError("'timeout' must be a non-negative number")
18 | else:
19 | endtime = time() + timeout
20 | while self._qsize() >= self.maxsize:
21 | remaining = endtime - time()
22 | if remaining <= 0.0:
23 | raise Full
24 | self.not_full.wait(remaining)
25 | self._putleft(item)
26 | self.unfinished_tasks += 1
27 | self.not_empty.notify()
28 |
29 | def putleft_nowait(self, item):
30 | return self.putleft(item, block=False)
31 |
32 | def _putleft(self, item):
33 | self.queue.appendleft(item)
34 |
--------------------------------------------------------------------------------
/common/expired_dict.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 |
4 | class ExpiredDict(dict):
5 | def __init__(self, expires_in_seconds):
6 | super().__init__()
7 | self.expires_in_seconds = expires_in_seconds if expires_in_seconds else 3600
8 |
9 | def __getitem__(self, key):
10 | value, expiry_time = super().__getitem__(key)
11 | if datetime.now() > expiry_time:
12 | del self[key]
13 | raise KeyError("expired {}".format(key))
14 | self.__setitem__(key, value)
15 | return value
16 |
17 | def __setitem__(self, key, value):
18 | expiry_time = datetime.now() + timedelta(seconds=self.expires_in_seconds)
19 | super().__setitem__(key, (value, expiry_time))
20 |
21 | def get(self, key, default=None):
22 | try:
23 | return self[key]
24 | except KeyError:
25 | return default
26 |
27 | def __contains__(self, key):
28 | try:
29 | self[key]
30 | return True
31 | except KeyError:
32 | return False
33 |
34 | def keys(self):
35 | keys = list(super().keys())
36 | return [key for key in keys if key in self]
37 |
38 | def items(self):
39 | return [(key, self[key]) for key in self.keys()]
40 |
41 | def __iter__(self):
42 | return self.keys().__iter__()
43 |
--------------------------------------------------------------------------------
/common/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 |
5 | def _reset_logger(log):
6 | for handler in log.handlers:
7 | handler.close()
8 | log.removeHandler(handler)
9 | del handler
10 | log.handlers.clear()
11 | log.propagate = False
12 | console_handle = logging.StreamHandler(sys.stdout)
13 | console_handle.setFormatter(
14 | logging.Formatter(
15 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
16 | datefmt="%Y-%m-%d %H:%M:%S",
17 | )
18 | )
19 | file_handle = logging.FileHandler("run.log", encoding="utf-8")
20 | file_handle.setFormatter(
21 | logging.Formatter(
22 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s",
23 | datefmt="%Y-%m-%d %H:%M:%S",
24 | )
25 | )
26 | log.addHandler(file_handle)
27 | log.addHandler(console_handle)
28 |
29 |
30 | def _get_logger():
31 | log = logging.getLogger("log")
32 | _reset_logger(log)
33 | log.setLevel(logging.INFO)
34 | return log
35 |
36 |
37 | # 日志句柄
38 | logger = _get_logger()
39 |
--------------------------------------------------------------------------------
/common/memory.py:
--------------------------------------------------------------------------------
1 | from common.expired_dict import ExpiredDict
2 |
3 | USER_IMAGE_CACHE = ExpiredDict(60 * 3)
--------------------------------------------------------------------------------
/common/package_manager.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import pip
4 | from pip._internal import main as pipmain
5 |
6 | from common.log import _reset_logger, logger
7 |
8 |
9 | def install(package):
10 | pipmain(["install", package])
11 |
12 |
13 | def install_requirements(file):
14 | pipmain(["install", "-r", file, "--upgrade"])
15 | _reset_logger(logger)
16 |
17 |
18 | def check_dulwich():
19 | needwait = False
20 | for i in range(2):
21 | if needwait:
22 | time.sleep(3)
23 | needwait = False
24 | try:
25 | import dulwich
26 |
27 | return
28 | except ImportError:
29 | try:
30 | install("dulwich")
31 | except:
32 | needwait = True
33 | try:
34 | import dulwich
35 | except ImportError:
36 | raise ImportError("Unable to import dulwich")
37 |
--------------------------------------------------------------------------------
/common/singleton.py:
--------------------------------------------------------------------------------
1 | def singleton(cls):
2 | instances = {}
3 |
4 | def get_instance(*args, **kwargs):
5 | if cls not in instances:
6 | instances[cls] = cls(*args, **kwargs)
7 | return instances[cls]
8 |
9 | return get_instance
10 |
--------------------------------------------------------------------------------
/common/sorted_dict.py:
--------------------------------------------------------------------------------
1 | import heapq
2 |
3 |
4 | class SortedDict(dict):
5 | def __init__(self, sort_func=lambda k, v: k, init_dict=None, reverse=False):
6 | if init_dict is None:
7 | init_dict = []
8 | if isinstance(init_dict, dict):
9 | init_dict = init_dict.items()
10 | self.sort_func = sort_func
11 | self.sorted_keys = None
12 | self.reverse = reverse
13 | self.heap = []
14 | for k, v in init_dict:
15 | self[k] = v
16 |
17 | def __setitem__(self, key, value):
18 | if key in self:
19 | super().__setitem__(key, value)
20 | for i, (priority, k) in enumerate(self.heap):
21 | if k == key:
22 | self.heap[i] = (self.sort_func(key, value), key)
23 | heapq.heapify(self.heap)
24 | break
25 | self.sorted_keys = None
26 | else:
27 | super().__setitem__(key, value)
28 | heapq.heappush(self.heap, (self.sort_func(key, value), key))
29 | self.sorted_keys = None
30 |
31 | def __delitem__(self, key):
32 | super().__delitem__(key)
33 | for i, (priority, k) in enumerate(self.heap):
34 | if k == key:
35 | del self.heap[i]
36 | heapq.heapify(self.heap)
37 | break
38 | self.sorted_keys = None
39 |
40 | def keys(self):
41 | if self.sorted_keys is None:
42 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)]
43 | return self.sorted_keys
44 |
45 | def items(self):
46 | if self.sorted_keys is None:
47 | self.sorted_keys = [k for _, k in sorted(self.heap, reverse=self.reverse)]
48 | sorted_items = [(k, self[k]) for k in self.sorted_keys]
49 | return sorted_items
50 |
51 | def _update_heap(self, key):
52 | for i, (priority, k) in enumerate(self.heap):
53 | if k == key:
54 | new_priority = self.sort_func(key, self[key])
55 | if new_priority != priority:
56 | self.heap[i] = (new_priority, key)
57 | heapq.heapify(self.heap)
58 | self.sorted_keys = None
59 | break
60 |
61 | def __iter__(self):
62 | return iter(self.keys())
63 |
64 | def __repr__(self):
65 | return f"{type(self).__name__}({dict(self)}, sort_func={self.sort_func.__name__}, reverse={self.reverse})"
66 |
--------------------------------------------------------------------------------
/common/time_check.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import config
4 | from common.log import logger
5 |
6 |
7 | def time_checker(f):
8 | def _time_checker(self, *args, **kwargs):
9 | _config = config.conf()
10 | chat_time_module = _config.get("chat_time_module", False)
11 |
12 | if chat_time_module:
13 | chat_start_time = _config.get("chat_start_time", "00:00")
14 | chat_stop_time = _config.get("chat_stop_time", "24:00")
15 |
16 | time_regex = re.compile(r"^([01]?[0-9]|2[0-4])(:)([0-5][0-9])$")
17 |
18 | if not (time_regex.match(chat_start_time) and time_regex.match(chat_stop_time)):
19 | logger.warning("时间格式不正确,请在config.json中修改CHAT_START_TIME/CHAT_STOP_TIME。")
20 | return None
21 |
22 | now_time = time.strptime(time.strftime("%H:%M"), "%H:%M")
23 | chat_start_time = time.strptime(chat_start_time, "%H:%M")
24 | chat_stop_time = time.strptime(chat_stop_time, "%H:%M")
25 | # 结束时间小于开始时间,跨天了
26 | if chat_stop_time < chat_start_time and (chat_start_time <= now_time or now_time <= chat_stop_time):
27 | f(self, *args, **kwargs)
28 | # 结束大于开始时间代表,没有跨天
29 | elif chat_start_time < chat_stop_time and chat_start_time <= now_time <= chat_stop_time:
30 | f(self, *args, **kwargs)
31 | else:
32 | # 定义匹配规则,如果以 #reconf 或者 #更新配置 结尾, 非服务时间可以修改开始/结束时间并重载配置
33 | pattern = re.compile(r"^.*#(?:reconf|更新配置)$")
34 | if args and pattern.match(args[0].content):
35 | f(self, *args, **kwargs)
36 | else:
37 | logger.info("非服务时间内,不接受访问")
38 | return None
39 | else:
40 | f(self, *args, **kwargs) # 未开启时间模块则直接回答
41 |
42 | return _time_checker
43 |
--------------------------------------------------------------------------------
/common/tmp_dir.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 |
4 | from config import conf
5 |
6 |
7 | class TmpDir(object):
8 | """A temporary directory that is deleted when the object is destroyed."""
9 |
10 | tmpFilePath = pathlib.Path("./tmp/")
11 |
12 | def __init__(self):
13 | pathExists = os.path.exists(self.tmpFilePath)
14 | if not pathExists:
15 | os.makedirs(self.tmpFilePath)
16 |
17 | def path(self):
18 | return str(self.tmpFilePath) + "/"
19 |
--------------------------------------------------------------------------------
/common/token_bucket.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 |
4 |
5 | class TokenBucket:
6 | def __init__(self, tpm, timeout=None):
7 | self.capacity = int(tpm) # 令牌桶容量
8 | self.tokens = 0 # 初始令牌数为0
9 | self.rate = int(tpm) / 60 # 令牌每秒生成速率
10 | self.timeout = timeout # 等待令牌超时时间
11 | self.cond = threading.Condition() # 条件变量
12 | self.is_running = True
13 | # 开启令牌生成线程
14 | threading.Thread(target=self._generate_tokens).start()
15 |
16 | def _generate_tokens(self):
17 | """生成令牌"""
18 | while self.is_running:
19 | with self.cond:
20 | if self.tokens < self.capacity:
21 | self.tokens += 1
22 | self.cond.notify() # 通知获取令牌的线程
23 | time.sleep(1 / self.rate)
24 |
25 | def get_token(self):
26 | """获取令牌"""
27 | with self.cond:
28 | while self.tokens <= 0:
29 | flag = self.cond.wait(self.timeout)
30 | if not flag: # 超时
31 | return False
32 | self.tokens -= 1
33 | return True
34 |
35 | def close(self):
36 | self.is_running = False
37 |
38 |
39 | if __name__ == "__main__":
40 | token_bucket = TokenBucket(20, None) # 创建一个每分钟生产20个tokens的令牌桶
41 | # token_bucket = TokenBucket(20, 0.1)
42 | for i in range(3):
43 | if token_bucket.get_token():
44 | print(f"第{i+1}次请求成功")
45 | token_bucket.close()
46 |
--------------------------------------------------------------------------------
/config-template.json:
--------------------------------------------------------------------------------
1 | {
2 | "dify_api_base": "https://api.dify.ai/v1",
3 | "dify_api_key": "app-xxx",
4 | "dify_app_type": "chatbot",
5 | "channel_type": "wx",
6 | "model": "dify",
7 | "single_chat_prefix": [""],
8 | "single_chat_reply_prefix": "",
9 | "group_chat_prefix": ["@bot"],
10 | "group_name_white_list": ["ALL_GROUP"],
11 | "accept_friend_commands": ["加好友"],
12 | "group_exit_msg": "退群聊通知文案",
13 | "accept_friend_msg": "通过好友发送的提示语"
14 | }
15 |
--------------------------------------------------------------------------------
/docker/Dockerfile.latest:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim-bullseye
2 |
3 | LABEL maintainer="i@hanfangyuan.cn"
4 | ARG TZ='Asia/Shanghai'
5 |
6 | ENV BUILD_PREFIX=/app
7 |
8 | ADD . ${BUILD_PREFIX}
9 |
10 | RUN apt-get update \
11 | &&apt-get install -y --no-install-recommends bash ffmpeg espeak libavcodec-extra\
12 | && cd ${BUILD_PREFIX} \
13 | && cp config-template.json config.json \
14 | && /usr/local/bin/python -m pip install --no-cache --upgrade pip \
15 | && pip install --no-cache -r requirements.txt \
16 | && pip install --no-cache -r requirements-optional.txt \
17 | && pip install azure-cognitiveservices-speech
18 |
19 | WORKDIR ${BUILD_PREFIX}
20 |
21 | ADD docker/entrypoint.sh /entrypoint.sh
22 |
23 | RUN chmod +x /entrypoint.sh \
24 | && mkdir -p /home/noroot \
25 | && groupadd -r noroot \
26 | && useradd -r -g noroot -s /bin/bash -d /home/noroot noroot \
27 | && chown -R noroot:noroot /home/noroot ${BUILD_PREFIX} /usr/local/lib
28 |
29 | USER noroot
30 |
31 | ENTRYPOINT ["/entrypoint.sh"]
32 |
--------------------------------------------------------------------------------
/docker/build.latest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | unset KUBECONFIG
4 |
5 | cd .. && docker build -f docker/Dockerfile.latest \
6 | -t hanfangyuan/dify-on-wechat .
7 |
8 | docker tag hanfangyuan/dify-on-wechat hanfangyuan/dify-on-wechat:$(date +%y%m%d)
9 |
--------------------------------------------------------------------------------
/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | dify-on-wechat:
4 | # 对镜像地址的说明
5 | # docker hub 仓库国内无法访问, 可以使用ACR阿里云容器仓库,但由于我使用的是免费个人版仓库不保证可用性
6 |
7 | # 对tag的说明
8 | # tag为 master 表示是主分支的镜像,最新版本,功能不稳定
9 | # tag为 latest 表示是最新的release镜像,相对稳定(个人没有精力全面测试)
10 | # tag为 arm64 表示镜像为arm64架构镜像,其他为amd64架构镜像
11 |
12 | # image: hanfangyuan/dify-on-wechat:latest # docker hub
13 | # image: hanfangyuan/dify-on-wechat:arm64 # docker hub arm64
14 | image: registry.cn-hangzhou.aliyuncs.com/hanfangyuan/dify-on-wechat:latest # acr
15 | # image: registry.cn-hangzhou.aliyuncs.com/hanfangyuan/dify-on-wechat:arm64 # acr arm64
16 | restart: always
17 | pull_policy: always
18 | container_name: dify-on-wechat
19 | security_opt:
20 | - seccomp:unconfined
21 | environment:
22 | DIFY_API_BASE: 'https://api.dify.ai/v1'
23 | DIFY_API_KEY: 'app-xx'
24 | DIFY_APP_TYPE: 'chatbot' # dify助手类型 chatbot(对应聊天助手)/agent(对应Agent)/workflow(对应工作流),默认为chatbot
25 | # DIFY_CONVERSATION_MAX_MESSAGES: '5' # dify目前不支持设置历史消息长度,暂时使用超过最大消息数清空会话的策略,缺点是没有滑动窗口,会突然丢失历史消息,当设置的值小于等于0,则不限制历史消息长度
26 | EXPIRES_IN_SECONDS: '3600' # dify会话过期时间,单位秒,默认3600秒
27 | MODEL: 'dify'
28 | SINGLE_CHAT_PREFIX: '[""]' # 私聊前缀,配置示例 '["bot", "ai"]'
29 | SINGLE_CHAT_REPLY_PREFIX: '""' # 单聊回复前缀,配置示例 '"bot"' 只支持单个字符串,请勿配置成列表形式
30 | GROUP_CHAT_PREFIX: '["@bot"]'
31 | GROUP_NAME_WHITE_LIST: '["ALL_GROUP"]'
32 | TZ: 'Asia/Shanghai'
33 | # web_ui 相关配置,只支持wx channel_tpye生效,默认不开启,取消注释下方配置可开启web_ui功能
34 | # DIFY_ON_WECHAT_EXEC: 'python web_ui.py'
35 | # WEB_UI_PORT: '7860'
36 | # WEB_UI_USERNAME: 'dow'
37 | # WEB_UI_PASSWORD: 'dify-on-wechat' # 务必保证修改默认的用户名和密码
38 | # 图片理解功能
39 | # IMAGE_RECOGNITION: 'false' # 是否开启图片理解功能,默认为false
40 | ports:
41 | - "7860:7860"
42 |
--------------------------------------------------------------------------------
/docker/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # build prefix
5 | DIFY_ON_WECHAT_PREFIX=${DIFY_ON_WECHAT_PREFIX:-""}
6 | # path to config.json
7 | DIFY_ON_WECHAT_CONFIG_PATH=${DIFY_ON_WECHAT_CONFIG_PATH:-""}
8 | # execution command line
9 | DIFY_ON_WECHAT_EXEC=${DIFY_ON_WECHAT_EXEC:-""}
10 |
11 | # use environment variables to pass parameters
12 | # if you have not defined environment variables, set them below
13 | # export OPEN_AI_API_KEY=${OPEN_AI_API_KEY:-'YOUR API KEY'}
14 | # export OPEN_AI_PROXY=${OPEN_AI_PROXY:-""}
15 | # export SINGLE_CHAT_PREFIX=${SINGLE_CHAT_PREFIX:-'["bot", "@bot"]'}
16 | # export SINGLE_CHAT_REPLY_PREFIX=${SINGLE_CHAT_REPLY_PREFIX:-'"[bot] "'}
17 | # export GROUP_CHAT_PREFIX=${GROUP_CHAT_PREFIX:-'["@bot"]'}
18 | # export GROUP_NAME_WHITE_LIST=${GROUP_NAME_WHITE_LIST:-'["ChatGPT测试群", "ChatGPT测试群2"]'}
19 | # export IMAGE_CREATE_PREFIX=${IMAGE_CREATE_PREFIX:-'["画", "看", "找"]'}
20 | # export CONVERSATION_MAX_TOKENS=${CONVERSATION_MAX_TOKENS:-"1000"}
21 | # export SPEECH_RECOGNITION=${SPEECH_RECOGNITION:-"False"}
22 | # export CHARACTER_DESC=${CHARACTER_DESC:-"你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"}
23 | # export EXPIRES_IN_SECONDS=${EXPIRES_IN_SECONDS:-"3600"}
24 |
25 | # DIFY_ON_WECHAT_PREFIX is empty, use /app
26 | if [ "$DIFY_ON_WECHAT_PREFIX" == "" ] ; then
27 | DIFY_ON_WECHAT_PREFIX=/app
28 | fi
29 |
30 | # DIFY_ON_WECHAT_CONFIG_PATH is empty, use '/app/config.json'
31 | if [ "$DIFY_ON_WECHAT_CONFIG_PATH" == "" ] ; then
32 | DIFY_ON_WECHAT_CONFIG_PATH=$DIFY_ON_WECHAT_PREFIX/config.json
33 | fi
34 |
35 | # DIFY_ON_WECHAT_EXEC is empty, use ‘python app.py’
36 | if [ "$DIFY_ON_WECHAT_EXEC" == "" ] ; then
37 | DIFY_ON_WECHAT_EXEC="python app.py"
38 | fi
39 |
40 | # modify content in config.json
41 | # if [ "$OPEN_AI_API_KEY" == "YOUR API KEY" ] || [ "$OPEN_AI_API_KEY" == "" ]; then
42 | # echo -e "\033[31m[Warning] You need to set OPEN_AI_API_KEY before running!\033[0m"
43 | # fi
44 |
45 |
46 | # go to prefix dir
47 | cd $DIFY_ON_WECHAT_PREFIX
48 | # excute
49 | $DIFY_ON_WECHAT_EXEC
50 |
--------------------------------------------------------------------------------
/docs/audios/chengdu-disney.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/audios/chengdu-disney.mp3
--------------------------------------------------------------------------------
/docs/images/image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/image1.jpg
--------------------------------------------------------------------------------
/docs/images/image2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/image2.jpg
--------------------------------------------------------------------------------
/docs/images/image4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/image4.jpg
--------------------------------------------------------------------------------
/docs/images/image5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/image5.jpg
--------------------------------------------------------------------------------
/docs/images/image6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/image6.jpg
--------------------------------------------------------------------------------
/docs/images/plugin-suno-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/plugin-suno-1.jpg
--------------------------------------------------------------------------------
/docs/images/plugin-suno-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/plugin-suno-2.jpg
--------------------------------------------------------------------------------
/docs/images/supportme.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/supportme.jpg
--------------------------------------------------------------------------------
/docs/images/wechat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/wechat.jpg
--------------------------------------------------------------------------------
/docs/images/wework.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/docs/images/wework.jpg
--------------------------------------------------------------------------------
/docs/version/old-version.md:
--------------------------------------------------------------------------------
1 | ## 归档更新日志
2 |
3 | 2023.04.26: 支持企业微信应用号部署,兼容插件,并支持语音图片交互,私人助理理想选择,使用文档。(contributed by @lanvent in #944)
4 |
5 | 2023.04.05: 支持微信公众号部署,兼容插件,并支持语音图片交互,使用文档。(contributed by @JS00000 in #686)
6 |
7 | 2023.04.05: 增加能让ChatGPT使用工具的tool插件,使用文档。工具相关issue可反馈至chatgpt-tool-hub。(contributed by @goldfishh in #663)
8 |
9 | 2023.03.25: 支持插件化开发,目前已实现 多角色切换、文字冒险游戏、管理员指令、Stable Diffusion等插件,使用参考 #578。(contributed by @lanvent in #565)
10 |
11 | 2023.03.09: 基于 whisper API(后续已接入更多的语音API服务) 实现对语音消息的解析和回复,添加配置项 "speech_recognition":true 即可启用,使用参考 #415。(contributed by wanggang1987 in #385)
12 |
13 | 2023.02.09: 扫码登录存在账号限制风险,请谨慎使用,参考#58
--------------------------------------------------------------------------------
/lib/dify/dify_client.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 |
4 | class DifyClient:
5 | def __init__(self, api_key, base_url: str = 'https://api.dify.ai/v1'):
6 | self.api_key = api_key
7 | self.base_url = base_url
8 |
9 | def _send_request(self, method, endpoint, json=None, params=None, stream=False):
10 | headers = {
11 | "Authorization": f"Bearer {self.api_key}",
12 | "Content-Type": "application/json"
13 | }
14 |
15 | url = f"{self.base_url}{endpoint}"
16 | response = requests.request(method, url, json=json, params=params, headers=headers, stream=stream)
17 |
18 | return response
19 |
20 | def _send_request_with_files(self, method, endpoint, data, files):
21 | headers = {
22 | "Authorization": f"Bearer {self.api_key}"
23 | }
24 |
25 | url = f"{self.base_url}{endpoint}"
26 | response = requests.request(method, url, data=data, headers=headers, files=files)
27 |
28 | return response
29 |
30 | def message_feedback(self, message_id, rating, user):
31 | data = {
32 | "rating": rating,
33 | "user": user
34 | }
35 | return self._send_request("POST", f"/messages/{message_id}/feedbacks", data)
36 |
37 | def get_application_parameters(self, user):
38 | params = {"user": user}
39 | return self._send_request("GET", "/parameters", params=params)
40 |
41 | def file_upload(self, user, files):
42 | data = {
43 | "user": user
44 | }
45 | return self._send_request_with_files("POST", "/files/upload", data=data, files=files)
46 |
47 |
48 | class CompletionClient(DifyClient):
49 | def create_completion_message(self, inputs, response_mode, user, files=None):
50 | data = {
51 | "inputs": inputs,
52 | "response_mode": response_mode,
53 | "user": user,
54 | "files": files
55 | }
56 | return self._send_request("POST", "/completion-messages", data,
57 | stream=True if response_mode == "streaming" else False)
58 |
59 |
60 | class ChatClient(DifyClient):
61 | def create_chat_message(self, inputs, query, user, response_mode="blocking", conversation_id=None, files=None):
62 | data = {
63 | "inputs": inputs,
64 | "query": query,
65 | "user": user,
66 | "response_mode": response_mode,
67 | "files": files
68 | }
69 | if conversation_id:
70 | data["conversation_id"] = conversation_id
71 |
72 | return self._send_request("POST", "/chat-messages", data,
73 | stream=True if response_mode == "streaming" else False)
74 |
75 | def get_conversation_messages(self, user, conversation_id=None, first_id=None, limit=None):
76 | params = {"user": user}
77 |
78 | if conversation_id:
79 | params["conversation_id"] = conversation_id
80 | if first_id:
81 | params["first_id"] = first_id
82 | if limit:
83 | params["limit"] = limit
84 |
85 | return self._send_request("GET", "/messages", params=params)
86 |
87 | def get_conversations(self, user, last_id=None, limit=None, pinned=None):
88 | params = {"user": user, "last_id": last_id, "limit": limit, "pinned": pinned}
89 | return self._send_request("GET", "/conversations", params=params)
90 |
91 | def rename_conversation(self, conversation_id, name, user):
92 | data = {"name": name, "user": user}
93 | return self._send_request("POST", f"/conversations/{conversation_id}/name", data)
94 |
--------------------------------------------------------------------------------
/lib/itchat/LICENSE:
--------------------------------------------------------------------------------
1 | **The MIT License (MIT)**
2 |
3 | Copyright (c) 2017 LittleCoder ([littlecodersh@Github](https://github.com/littlecodersh))
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/lib/itchat/__init__.py:
--------------------------------------------------------------------------------
1 | from .core import Core
2 | from .config import VERSION, ASYNC_COMPONENTS
3 | from .log import set_logging
4 |
5 | if ASYNC_COMPONENTS:
6 | from .async_components import load_components
7 | else:
8 | from .components import load_components
9 |
10 |
11 | __version__ = VERSION
12 |
13 |
14 | instanceList = []
15 |
16 | def load_async_itchat() -> Core:
17 | """load async-based itchat instance
18 |
19 | Returns:
20 | Core: the abstract interface of itchat
21 | """
22 | from .async_components import load_components
23 | load_components(Core)
24 | return Core()
25 |
26 |
27 | def load_sync_itchat() -> Core:
28 | """load sync-based itchat instance
29 |
30 | Returns:
31 | Core: the abstract interface of itchat
32 | """
33 | from .components import load_components
34 | load_components(Core)
35 | return Core()
36 |
37 |
38 | if ASYNC_COMPONENTS:
39 | instance = load_async_itchat()
40 | else:
41 | instance = load_sync_itchat()
42 |
43 |
44 | instanceList = [instance]
45 |
46 | # I really want to use sys.modules[__name__] = originInstance
47 | # but it makes auto-fill a real mess, so forgive me for my following **
48 | # actually it toke me less than 30 seconds, god bless Uganda
49 |
50 | # components.login
51 | login = instance.login
52 | get_QRuuid = instance.get_QRuuid
53 | get_QR = instance.get_QR
54 | check_login = instance.check_login
55 | web_init = instance.web_init
56 | show_mobile_login = instance.show_mobile_login
57 | start_receiving = instance.start_receiving
58 | get_msg = instance.get_msg
59 | logout = instance.logout
60 | # components.contact
61 | update_chatroom = instance.update_chatroom
62 | update_friend = instance.update_friend
63 | get_contact = instance.get_contact
64 | get_friends = instance.get_friends
65 | get_chatrooms = instance.get_chatrooms
66 | get_mps = instance.get_mps
67 | set_alias = instance.set_alias
68 | set_pinned = instance.set_pinned
69 | accept_friend = instance.accept_friend
70 | get_head_img = instance.get_head_img
71 | create_chatroom = instance.create_chatroom
72 | set_chatroom_name = instance.set_chatroom_name
73 | delete_member_from_chatroom = instance.delete_member_from_chatroom
74 | add_member_into_chatroom = instance.add_member_into_chatroom
75 | # components.messages
76 | send_raw_msg = instance.send_raw_msg
77 | send_msg = instance.send_msg
78 | upload_file = instance.upload_file
79 | send_file = instance.send_file
80 | send_image = instance.send_image
81 | send_video = instance.send_video
82 | send = instance.send
83 | revoke = instance.revoke
84 | # components.hotreload
85 | dump_login_status = instance.dump_login_status
86 | load_login_status = instance.load_login_status
87 | # components.register
88 | auto_login = instance.auto_login
89 | configured_reply = instance.configured_reply
90 | msg_register = instance.msg_register
91 | run = instance.run
92 | # other functions
93 | search_friends = instance.search_friends
94 | search_chatrooms = instance.search_chatrooms
95 | search_mps = instance.search_mps
96 | set_logging = set_logging
97 |
--------------------------------------------------------------------------------
/lib/itchat/async_components/__init__.py:
--------------------------------------------------------------------------------
1 | from .contact import load_contact
2 | from .hotreload import load_hotreload
3 | from .login import load_login
4 | from .messages import load_messages
5 | from .register import load_register
6 |
7 | def load_components(core):
8 | load_contact(core)
9 | load_hotreload(core)
10 | load_login(core)
11 | load_messages(core)
12 | load_register(core)
13 |
--------------------------------------------------------------------------------
/lib/itchat/components/__init__.py:
--------------------------------------------------------------------------------
1 | from .contact import load_contact
2 | from .hotreload import load_hotreload
3 | from .login import load_login
4 | from .messages import load_messages
5 | from .register import load_register
6 |
7 | def load_components(core):
8 | load_contact(core)
9 | load_hotreload(core)
10 | load_login(core)
11 | load_messages(core)
12 | load_register(core)
13 |
--------------------------------------------------------------------------------
/lib/itchat/components/hotreload.py:
--------------------------------------------------------------------------------
1 | import pickle, os
2 | import logging
3 |
4 | import requests
5 |
6 | from ..config import VERSION
7 | from ..returnvalues import ReturnValue
8 | from ..storage import templates
9 | from .contact import update_local_chatrooms, update_local_friends
10 | from .messages import produce_msg
11 |
12 | logger = logging.getLogger('itchat')
13 |
14 | def load_hotreload(core):
15 | core.dump_login_status = dump_login_status
16 | core.load_login_status = load_login_status
17 |
18 | def dump_login_status(self, fileDir=None):
19 | fileDir = fileDir or self.hotReloadDir
20 | try:
21 | with open(fileDir, 'w') as f:
22 | f.write('itchat - DELETE THIS')
23 | os.remove(fileDir)
24 | except:
25 | raise Exception('Incorrect fileDir')
26 | status = {
27 | 'version' : VERSION,
28 | 'loginInfo' : self.loginInfo,
29 | 'cookies' : self.s.cookies.get_dict(),
30 | 'storage' : self.storageClass.dumps()}
31 | with open(fileDir, 'wb') as f:
32 | pickle.dump(status, f)
33 | logger.debug('Dump login status for hot reload successfully.')
34 |
35 | def load_login_status(self, fileDir,
36 | loginCallback=None, exitCallback=None):
37 | try:
38 | with open(fileDir, 'rb') as f:
39 | j = pickle.load(f)
40 | except Exception as e:
41 | logger.debug('No such file, loading login status failed.')
42 | return ReturnValue({'BaseResponse': {
43 | 'ErrMsg': 'No such file, loading login status failed.',
44 | 'Ret': -1002, }})
45 |
46 | if j.get('version', '') != VERSION:
47 | logger.debug(('you have updated itchat from %s to %s, ' +
48 | 'so cached status is ignored') % (
49 | j.get('version', 'old version'), VERSION))
50 | return ReturnValue({'BaseResponse': {
51 | 'ErrMsg': 'cached status ignored because of version',
52 | 'Ret': -1005, }})
53 | self.loginInfo = j['loginInfo']
54 | self.loginInfo['User'] = templates.User(self.loginInfo['User'])
55 | self.loginInfo['User'].core = self
56 | self.s.cookies = requests.utils.cookiejar_from_dict(j['cookies'])
57 | self.storageClass.loads(j['storage'])
58 | try:
59 | msgList, contactList = self.get_msg()
60 | except:
61 | msgList = contactList = None
62 | if (msgList or contactList) is None:
63 | self.logout()
64 | load_last_login_status(self.s, j['cookies'])
65 | logger.debug('server refused, loading login status failed.')
66 | return ReturnValue({'BaseResponse': {
67 | 'ErrMsg': 'server refused, loading login status failed.',
68 | 'Ret': -1003, }})
69 | else:
70 | if contactList:
71 | for contact in contactList:
72 | if '@@' in contact['UserName']:
73 | update_local_chatrooms(self, [contact])
74 | else:
75 | update_local_friends(self, [contact])
76 | if msgList:
77 | msgList = produce_msg(self, msgList)
78 | for msg in msgList: self.msgList.put(msg)
79 | self.start_receiving(exitCallback)
80 | logger.debug('loading login status succeeded.')
81 | if hasattr(loginCallback, '__call__'):
82 | loginCallback()
83 | return ReturnValue({'BaseResponse': {
84 | 'ErrMsg': 'loading login status succeeded.',
85 | 'Ret': 0, }})
86 |
87 | def load_last_login_status(session, cookiesDict):
88 | try:
89 | session.cookies = requests.utils.cookiejar_from_dict({
90 | 'webwxuvid': cookiesDict['webwxuvid'],
91 | 'webwx_auth_ticket': cookiesDict['webwx_auth_ticket'],
92 | 'login_frequency': '2',
93 | 'last_wxuin': cookiesDict['wxuin'],
94 | 'wxloadtime': cookiesDict['wxloadtime'] + '_expired',
95 | 'wxpluginkey': cookiesDict['wxloadtime'],
96 | 'wxuin': cookiesDict['wxuin'],
97 | 'mm_lang': 'zh_CN',
98 | 'MM_WX_NOTIFY_STATE': '1',
99 | 'MM_WX_SOUND_STATE': '1', })
100 | except:
101 | logger.info('Load status for push login failed, we may have experienced a cookies change.')
102 | logger.info('If you are using the newest version of itchat, you may report a bug.')
103 |
--------------------------------------------------------------------------------
/lib/itchat/config.py:
--------------------------------------------------------------------------------
1 | import os, platform
2 |
3 | VERSION = '1.5.0.dev'
4 |
5 | # use this envrionment to initialize the async & sync componment
6 | ASYNC_COMPONENTS = os.environ.get('ITCHAT_UOS_ASYNC', False)
7 |
8 | BASE_URL = 'https://login.weixin.qq.com'
9 | OS = platform.system() # Windows, Linux, Darwin
10 | DIR = os.getcwd()
11 | DEFAULT_QR = 'QR.png'
12 | TIMEOUT = (10, 60)
13 |
14 | USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'
15 |
16 | UOS_PATCH_CLIENT_VERSION = '2.0.0'
17 | UOS_PATCH_EXTSPAM = 'Go8FCIkFEokFCggwMDAwMDAwMRAGGvAESySibk50w5Wb3uTl2c2h64jVVrV7gNs06GFlWplHQbY/5FfiO++1yH4ykCyNPWKXmco+wfQzK5R98D3so7rJ5LmGFvBLjGceleySrc3SOf2Pc1gVehzJgODeS0lDL3/I/0S2SSE98YgKleq6Uqx6ndTy9yaL9qFxJL7eiA/R3SEfTaW1SBoSITIu+EEkXff+Pv8NHOk7N57rcGk1w0ZzRrQDkXTOXFN2iHYIzAAZPIOY45Lsh+A4slpgnDiaOvRtlQYCt97nmPLuTipOJ8Qc5pM7ZsOsAPPrCQL7nK0I7aPrFDF0q4ziUUKettzW8MrAaiVfmbD1/VkmLNVqqZVvBCtRblXb5FHmtS8FxnqCzYP4WFvz3T0TcrOqwLX1M/DQvcHaGGw0B0y4bZMs7lVScGBFxMj3vbFi2SRKbKhaitxHfYHAOAa0X7/MSS0RNAjdwoyGHeOepXOKY+h3iHeqCvgOH6LOifdHf/1aaZNwSkGotYnYScW8Yx63LnSwba7+hESrtPa/huRmB9KWvMCKbDThL/nne14hnL277EDCSocPu3rOSYjuB9gKSOdVmWsj9Dxb/iZIe+S6AiG29Esm+/eUacSba0k8wn5HhHg9d4tIcixrxveflc8vi2/wNQGVFNsGO6tB5WF0xf/plngOvQ1/ivGV/C1Qpdhzznh0ExAVJ6dwzNg7qIEBaw+BzTJTUuRcPk92Sn6QDn2Pu3mpONaEumacjW4w6ipPnPw+g2TfywJjeEcpSZaP4Q3YV5HG8D6UjWA4GSkBKculWpdCMadx0usMomsSS/74QgpYqcPkmamB4nVv1JxczYITIqItIKjD35IGKAUwAA=='
18 |
--------------------------------------------------------------------------------
/lib/itchat/content.py:
--------------------------------------------------------------------------------
1 | TEXT = 'Text'
2 | MAP = 'Map'
3 | CARD = 'Card'
4 | NOTE = 'Note'
5 | SHARING = 'Sharing'
6 | PICTURE = 'Picture'
7 | RECORDING = VOICE = 'Recording'
8 | ATTACHMENT = 'Attachment'
9 | VIDEO = 'Video'
10 | FRIENDS = 'Friends'
11 | SYSTEM = 'System'
12 |
13 | INCOME_MSG = [TEXT, MAP, CARD, NOTE, SHARING, PICTURE,
14 | RECORDING, VOICE, ATTACHMENT, VIDEO, FRIENDS, SYSTEM]
15 |
--------------------------------------------------------------------------------
/lib/itchat/log.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | class LogSystem(object):
4 | handlerList = []
5 | showOnCmd = True
6 | loggingLevel = logging.INFO
7 | loggingFile = None
8 | def __init__(self):
9 | self.logger = logging.getLogger('itchat')
10 | self.logger.addHandler(logging.NullHandler())
11 | self.logger.setLevel(self.loggingLevel)
12 | self.cmdHandler = logging.StreamHandler()
13 | self.fileHandler = None
14 | self.logger.addHandler(self.cmdHandler)
15 | def set_logging(self, showOnCmd=True, loggingFile=None,
16 | loggingLevel=logging.INFO):
17 | if showOnCmd != self.showOnCmd:
18 | if showOnCmd:
19 | self.logger.addHandler(self.cmdHandler)
20 | else:
21 | self.logger.removeHandler(self.cmdHandler)
22 | self.showOnCmd = showOnCmd
23 | if loggingFile != self.loggingFile:
24 | if self.loggingFile is not None: # clear old fileHandler
25 | self.logger.removeHandler(self.fileHandler)
26 | self.fileHandler.close()
27 | if loggingFile is not None: # add new fileHandler
28 | self.fileHandler = logging.FileHandler(loggingFile)
29 | self.logger.addHandler(self.fileHandler)
30 | self.loggingFile = loggingFile
31 | if loggingLevel != self.loggingLevel:
32 | self.logger.setLevel(loggingLevel)
33 | self.loggingLevel = loggingLevel
34 |
35 | ls = LogSystem()
36 | set_logging = ls.set_logging
37 |
--------------------------------------------------------------------------------
/lib/itchat/returnvalues.py:
--------------------------------------------------------------------------------
1 | #coding=utf8
2 | TRANSLATE = 'Chinese'
3 |
4 | class ReturnValue(dict):
5 | ''' turn return value of itchat into a boolean value
6 | for requests:
7 | ..code::python
8 |
9 | import requests
10 | r = requests.get('http://httpbin.org/get')
11 | print(ReturnValue(rawResponse=r)
12 |
13 | for normal dict:
14 | ..code::python
15 |
16 | returnDict = {
17 | 'BaseResponse': {
18 | 'Ret': 0,
19 | 'ErrMsg': 'My error msg', }, }
20 | print(ReturnValue(returnDict))
21 | '''
22 | def __init__(self, returnValueDict={}, rawResponse=None):
23 | if rawResponse:
24 | try:
25 | returnValueDict = rawResponse.json()
26 | except ValueError:
27 | returnValueDict = {
28 | 'BaseResponse': {
29 | 'Ret': -1004,
30 | 'ErrMsg': 'Unexpected return value', },
31 | 'Data': rawResponse.content, }
32 | for k, v in returnValueDict.items():
33 | self[k] = v
34 | if not 'BaseResponse' in self:
35 | self['BaseResponse'] = {
36 | 'ErrMsg': 'no BaseResponse in raw response',
37 | 'Ret': -1000, }
38 | if TRANSLATE:
39 | self['BaseResponse']['RawMsg'] = self['BaseResponse'].get('ErrMsg', '')
40 | self['BaseResponse']['ErrMsg'] = \
41 | TRANSLATION[TRANSLATE].get(
42 | self['BaseResponse'].get('Ret', '')) \
43 | or self['BaseResponse'].get('ErrMsg', u'No ErrMsg')
44 | self['BaseResponse']['RawMsg'] = \
45 | self['BaseResponse']['RawMsg'] or self['BaseResponse']['ErrMsg']
46 | def __nonzero__(self):
47 | return self['BaseResponse'].get('Ret') == 0
48 | def __bool__(self):
49 | return self.__nonzero__()
50 | def __str__(self):
51 | return '{%s}' % ', '.join(
52 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
53 | def __repr__(self):
54 | return '' % self.__str__()
55 |
56 | TRANSLATION = {
57 | 'Chinese': {
58 | -1000: u'返回值不带BaseResponse',
59 | -1001: u'无法找到对应的成员',
60 | -1002: u'文件位置错误',
61 | -1003: u'服务器拒绝连接',
62 | -1004: u'服务器返回异常值',
63 | -1005: u'参数错误',
64 | -1006: u'无效操作',
65 | 0: u'请求成功',
66 | },
67 | }
68 |
--------------------------------------------------------------------------------
/lib/itchat/storage/messagequeue.py:
--------------------------------------------------------------------------------
1 | import logging
2 | try:
3 | import Queue as queue
4 | except ImportError:
5 | import queue
6 |
7 | from .templates import AttributeDict
8 |
9 | logger = logging.getLogger('itchat')
10 |
11 | class Queue(queue.Queue):
12 | def put(self, message):
13 | queue.Queue.put(self, Message(message))
14 |
15 | class Message(AttributeDict):
16 | def download(self, fileName):
17 | if hasattr(self.text, '__call__'):
18 | return self.text(fileName)
19 | else:
20 | return b''
21 | def __getitem__(self, value):
22 | if value in ('isAdmin', 'isAt'):
23 | v = value[0].upper() + value[1:] # ''[1:] == ''
24 | logger.debug('%s is expired in 1.3.0, use %s instead.' % (value, v))
25 | value = v
26 | return super(Message, self).__getitem__(value)
27 | def __str__(self):
28 | return '{%s}' % ', '.join(
29 | ['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
30 | def __repr__(self):
31 | return '<%s: %s>' % (self.__class__.__name__.split('.')[-1],
32 | self.__str__())
33 |
--------------------------------------------------------------------------------
/nixpacks.toml:
--------------------------------------------------------------------------------
1 | [phases.setup]
2 | nixPkgs = ['python310']
3 | cmds = ['apt-get update','apt-get install -y --no-install-recommends ffmpeg espeak libavcodec-extra']
4 | [phases.install]
5 | cmds = ['python -m venv /opt/venv && . /opt/venv/bin/activate && pip install -r requirements.txt && pip install -r requirements-optional.txt']
6 | [start]
7 | cmd = "python ./app.py"
--------------------------------------------------------------------------------
/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | from .event import *
2 | from .plugin import *
3 | from .plugin_manager import PluginManager
4 |
5 | instance = PluginManager()
6 |
7 | register = instance.register
8 | # load_plugins = instance.load_plugins
9 | # emit_event = instance.emit_event
10 |
--------------------------------------------------------------------------------
/plugins/banwords/.gitignore:
--------------------------------------------------------------------------------
1 | banwords.txt
--------------------------------------------------------------------------------
/plugins/banwords/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## 插件描述
3 |
4 | 简易的敏感词插件,暂不支持分词,请自行导入词库到插件文件夹中的`banwords.txt`,每行一个词,一个参考词库是[1](https://github.com/cjh0613/tencent-sensitive-words/blob/main/sensitive_words_lines.txt)。
5 |
6 | 使用前将`config.json.template`复制为`config.json`,并自行配置。
7 |
8 | 目前插件对消息的默认处理行为有如下两种:
9 |
10 | - `ignore` : 无视这条消息。
11 | - `replace` : 将消息中的敏感词替换成"*",并回复违规。
12 |
13 | ```json
14 | "action": "replace",
15 | "reply_filter": true,
16 | "reply_action": "ignore"
17 | ```
18 |
19 | 在以上配置项中:
20 |
21 | - `action`: 对用户消息的默认处理行为
22 | - `reply_filter`: 是否对ChatGPT的回复也进行敏感词过滤
23 | - `reply_action`: 如果开启了回复过滤,对回复的默认处理行为
24 |
25 | ## 致谢
26 |
27 | 搜索功能实现来自https://github.com/toolgood/ToolGood.Words
--------------------------------------------------------------------------------
/plugins/banwords/__init__.py:
--------------------------------------------------------------------------------
1 | from .banwords import *
2 |
--------------------------------------------------------------------------------
/plugins/banwords/banwords.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import json
4 | import os
5 |
6 | import plugins
7 | from bridge.context import ContextType
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from plugins import *
11 |
12 | from .lib.WordsSearch import WordsSearch
13 |
14 |
15 | @plugins.register(
16 | name="Banwords",
17 | desire_priority=100,
18 | hidden=True,
19 | desc="判断消息中是否有敏感词、决定是否回复。",
20 | version="1.0",
21 | author="lanvent",
22 | )
23 | class Banwords(Plugin):
24 | def __init__(self):
25 | super().__init__()
26 | try:
27 | # load config
28 | conf = super().load_config()
29 | curdir = os.path.dirname(__file__)
30 | if not conf:
31 | # 配置不存在则写入默认配置
32 | config_path = os.path.join(curdir, "config.json")
33 | if not os.path.exists(config_path):
34 | conf = {"action": "ignore"}
35 | with open(config_path, "w") as f:
36 | json.dump(conf, f, indent=4)
37 |
38 | self.searchr = WordsSearch()
39 | self.action = conf["action"]
40 | banwords_path = os.path.join(curdir, "banwords.txt")
41 | with open(banwords_path, "r", encoding="utf-8") as f:
42 | words = []
43 | for line in f:
44 | word = line.strip()
45 | if word:
46 | words.append(word)
47 | self.searchr.SetKeywords(words)
48 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
49 | if conf.get("reply_filter", True):
50 | self.handlers[Event.ON_DECORATE_REPLY] = self.on_decorate_reply
51 | self.reply_action = conf.get("reply_action", "ignore")
52 | logger.info("[Banwords] inited")
53 | except Exception as e:
54 | logger.warn("[Banwords] init failed, ignore or see https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins/banwords .")
55 | raise e
56 |
57 | def on_handle_context(self, e_context: EventContext):
58 | if e_context["context"].type not in [
59 | ContextType.TEXT,
60 | ContextType.IMAGE_CREATE,
61 | ]:
62 | return
63 |
64 | content = e_context["context"].content
65 | logger.debug("[Banwords] on_handle_context. content: %s" % content)
66 | if self.action == "ignore":
67 | f = self.searchr.FindFirst(content)
68 | if f:
69 | logger.info("[Banwords] %s in message" % f["Keyword"])
70 | e_context.action = EventAction.BREAK_PASS
71 | return
72 | elif self.action == "replace":
73 | if self.searchr.ContainsAny(content):
74 | reply = Reply(ReplyType.INFO, "发言中包含敏感词,请重试: \n" + self.searchr.Replace(content))
75 | e_context["reply"] = reply
76 | e_context.action = EventAction.BREAK_PASS
77 | return
78 |
79 | def on_decorate_reply(self, e_context: EventContext):
80 | if e_context["reply"].type not in [ReplyType.TEXT]:
81 | return
82 |
83 | reply = e_context["reply"]
84 | content = reply.content
85 | if self.reply_action == "ignore":
86 | f = self.searchr.FindFirst(content)
87 | if f:
88 | logger.info("[Banwords] %s in reply" % f["Keyword"])
89 | e_context["reply"] = None
90 | e_context.action = EventAction.BREAK_PASS
91 | return
92 | elif self.reply_action == "replace":
93 | if self.searchr.ContainsAny(content):
94 | reply = Reply(ReplyType.INFO, "已替换回复中的敏感词: \n" + self.searchr.Replace(content))
95 | e_context["reply"] = reply
96 | e_context.action = EventAction.CONTINUE
97 | return
98 |
99 | def get_help_text(self, **kwargs):
100 | return "过滤消息中的敏感词。"
101 |
--------------------------------------------------------------------------------
/plugins/banwords/banwords.txt.template:
--------------------------------------------------------------------------------
1 | nipples
2 | pennis
3 | 法轮功
--------------------------------------------------------------------------------
/plugins/banwords/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "action": "replace",
3 | "reply_filter": true,
4 | "reply_action": "ignore"
5 | }
6 |
--------------------------------------------------------------------------------
/plugins/bdunit/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 利用百度UNIT实现智能对话
4 |
5 | - 1.解决问题:chatgpt无法处理的指令,交给百度UNIT处理如:天气,日期时间,数学运算等
6 | - 2.如问时间:现在几点钟,今天几号
7 | - 3.如问天气:明天广州天气怎么样,这个周末深圳会不会下雨
8 | - 4.如问数学运算:23+45=多少,100-23=多少,35转化为二进制是多少?
9 |
10 | ## 使用说明
11 |
12 | ### 获取apikey
13 |
14 | 在百度UNIT官网上自己创建应用,申请百度机器人,可以把预先训练好的模型导入到自己的应用中,
15 |
16 | see https://ai.baidu.com/unit/home#/home?track=61fe1b0d3407ce3face1d92cb5c291087095fc10c8377aaf https://console.bce.baidu.com/ai平台申请
17 |
18 | ### 配置文件
19 |
20 | 将文件夹中`config.json.template`复制为`config.json`。
21 |
22 | 在其中填写百度UNIT官网上获取应用的API Key和Secret Key
23 |
24 | ``` json
25 | {
26 | "service_id": "s...", #"机器人ID"
27 | "api_key": "",
28 | "secret_key": ""
29 | }
30 | ```
--------------------------------------------------------------------------------
/plugins/bdunit/__init__.py:
--------------------------------------------------------------------------------
1 | from .bdunit import *
2 |
--------------------------------------------------------------------------------
/plugins/bdunit/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "service_id": "s...",
3 | "api_key": "",
4 | "secret_key": ""
5 | }
6 |
--------------------------------------------------------------------------------
/plugins/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "godcmd": {
3 | "password": "",
4 | "admin_users": []
5 | },
6 | "banwords": {
7 | "action": "replace",
8 | "reply_filter": true,
9 | "reply_action": "ignore"
10 | },
11 | "tool": {
12 | "tools": [
13 | "url-get",
14 | "meteo-weather"
15 | ],
16 | "kwargs": {
17 | "top_k_results": 2,
18 | "no_default": false,
19 | "model_name": "gpt-3.5-turbo"
20 | }
21 | },
22 | "linkai": {
23 | "group_app_map": {
24 | "测试群1": "default",
25 | "测试群2": "Kv2fXJcH"
26 | },
27 | "midjourney": {
28 | "enabled": true,
29 | "auto_translate": true,
30 | "img_proxy": true,
31 | "max_tasks": 3,
32 | "max_tasks_per_user": 1,
33 | "use_image_create_prefix": true
34 | },
35 | "summary": {
36 | "enabled": true,
37 | "group_enabled": true,
38 | "max_file_size": 5000,
39 | "type": ["FILE", "SHARING"]
40 | }
41 | },
42 | "hello": {
43 | "group_welc_fixed_msg": {
44 | "群聊1": "群聊1的固定欢迎语",
45 | "群聊2": "群聊2的固定欢迎语"
46 | },
47 | "group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。",
48 |
49 | "group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。",
50 |
51 | "patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。",
52 |
53 | "use_character_desc": false
54 | },
55 | "Apilot": {
56 | "alapi_token": "xxx",
57 | "morning_news_text_enabled": false
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/plugins/dungeon/README.md:
--------------------------------------------------------------------------------
1 | 玩地牢游戏的聊天插件,触发方法如下:
2 |
3 | - `$开始冒险 <背景故事>` - 以<背景故事>开始一个地牢游戏,不填写会使用默认背景故事。之后聊天中你的所有消息会帮助ai完善这个故事。
4 | - `$停止冒险` - 停止一个地牢游戏,回归正常的ai。
5 |
--------------------------------------------------------------------------------
/plugins/dungeon/__init__.py:
--------------------------------------------------------------------------------
1 | from .dungeon import *
2 |
--------------------------------------------------------------------------------
/plugins/event.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | from enum import Enum
4 |
5 |
6 | class Event(Enum):
7 | ON_RECEIVE_MESSAGE = 1 # 收到消息
8 | """
9 | e_context = { "channel": 消息channel, "context" : 本次消息的context}
10 | """
11 |
12 | ON_HANDLE_CONTEXT = 2 # 处理消息前
13 | """
14 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复,初始为空 }
15 | """
16 |
17 | ON_DECORATE_REPLY = 3 # 得到回复后准备装饰
18 | """
19 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 }
20 | """
21 |
22 | ON_SEND_REPLY = 4 # 发送回复前
23 | """
24 | e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 }
25 | """
26 |
27 | # AFTER_SEND_REPLY = 5 # 发送回复后
28 |
29 |
30 | class EventAction(Enum):
31 | CONTINUE = 1 # 事件未结束,继续交给下个插件处理,如果没有下个插件,则交付给默认的事件处理逻辑
32 | BREAK = 2 # 事件结束,不再给下个插件处理,交付给默认的事件处理逻辑
33 | BREAK_PASS = 3 # 事件结束,不再给下个插件处理,不交付给默认的事件处理逻辑
34 |
35 |
36 | class EventContext:
37 | def __init__(self, event, econtext=dict()):
38 | self.event = event
39 | self.econtext = econtext
40 | self.action = EventAction.CONTINUE
41 |
42 | def __getitem__(self, key):
43 | return self.econtext[key]
44 |
45 | def __setitem__(self, key, value):
46 | self.econtext[key] = value
47 |
48 | def __delitem__(self, key):
49 | del self.econtext[key]
50 |
51 | def is_pass(self):
52 | return self.action == EventAction.BREAK_PASS
53 |
54 | def is_break(self):
55 | return self.action == EventAction.BREAK or self.action == EventAction.BREAK_PASS
56 |
--------------------------------------------------------------------------------
/plugins/finish/__init__.py:
--------------------------------------------------------------------------------
1 | from .finish import *
2 |
--------------------------------------------------------------------------------
/plugins/finish/finish.py:
--------------------------------------------------------------------------------
1 | # encoding:utf-8
2 |
3 | import plugins
4 | from bridge.context import ContextType
5 | from bridge.reply import Reply, ReplyType
6 | from common.log import logger
7 | from config import conf
8 | from plugins import *
9 |
10 |
11 | @plugins.register(
12 | name="Finish",
13 | desire_priority=-999,
14 | hidden=True,
15 | desc="A plugin that check unknown command",
16 | version="1.0",
17 | author="js00000",
18 | )
19 | class Finish(Plugin):
20 | def __init__(self):
21 | super().__init__()
22 | self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
23 | logger.info("[Finish] inited")
24 |
25 | def on_handle_context(self, e_context: EventContext):
26 | if e_context["context"].type != ContextType.TEXT:
27 | return
28 |
29 | content = e_context["context"].content
30 | logger.debug("[Finish] on_handle_context. content: %s" % content)
31 | trigger_prefix = conf().get("plugin_trigger_prefix", "$")
32 | if content.startswith(trigger_prefix):
33 | reply = Reply()
34 | reply.type = ReplyType.ERROR
35 | reply.content = "未知插件命令\n查看插件命令列表请输入#help 插件名\n"
36 | e_context["reply"] = reply
37 | e_context.action = EventAction.BREAK_PASS # 事件结束,并跳过处理context的默认逻辑
38 |
39 | def get_help_text(self, **kwargs):
40 | return ""
41 |
--------------------------------------------------------------------------------
/plugins/godcmd/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 指令插件
4 |
5 | ## 插件使用
6 |
7 | 将`config.json.template`复制为`config.json`,并修改其中`password`的值为口令。
8 |
9 | 如果没有设置命令,在命令行日志中会打印出本次的临时口令,请注意观察,打印格式如下。
10 |
11 | ```
12 | [INFO][2023-04-06 23:53:47][godcmd.py:165] - [Godcmd] 因未设置口令,本次的临时口令为0971。
13 | ```
14 |
15 | 在私聊中可使用`#auth`指令,输入口令进行管理员认证。更多详细指令请输入`#help`查看帮助文档:
16 |
17 | `#auth <口令>` - 管理员认证,仅可在私聊时认证。
18 | `#help` - 输出帮助文档,**是否是管理员**和是否是在群聊中会影响帮助文档的输出内容。
19 |
--------------------------------------------------------------------------------
/plugins/godcmd/__init__.py:
--------------------------------------------------------------------------------
1 | from .godcmd import *
2 |
--------------------------------------------------------------------------------
/plugins/godcmd/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "password": "",
3 | "admin_users": []
4 | }
5 |
--------------------------------------------------------------------------------
/plugins/hello/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 可以根据需求设置入群欢迎、群聊拍一拍、退群等消息的自定义提示词,也支持为每个群设置对应的固定欢迎语。
4 |
5 | 该插件也是用户根据需求开发自定义插件的示例插件,参考[插件开发说明](https://github.com/zhayujie/chatgpt-on-wechat/tree/master/plugins)
6 |
7 | ## 插件配置
8 |
9 | 将 `plugins/hello` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`。 (如果未配置则会默认使用`config.json.template`模板中配置)。
10 |
11 | 以下是插件配置项说明:
12 |
13 | ```bash
14 | {
15 | "group_welc_fixed_msg": { ## 这里可以为特定群里配置特定的固定欢迎语
16 | "群聊1": "群聊1的固定欢迎语",
17 | "群聊2": "群聊2的固定欢迎语"
18 | },
19 |
20 | "group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。", ## 群聊随机欢迎语的提示词
21 |
22 | "group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。", ## 移出群聊的提示词
23 |
24 | "patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。", ## 群内拍一拍的提示词
25 |
26 | "use_character_desc": false ## 是否在Hello插件中使用LinkAI应用的系统设定
27 | }
28 | ```
29 |
30 |
31 | 注意:
32 |
33 | - 设置全局的用户进群固定欢迎语,可以在***项目根目录下***的`config.json`文件里,可以添加参数`"group_welcome_msg": "" `,参考 [#1482](https://github.com/zhayujie/chatgpt-on-wechat/pull/1482)
34 | - 为每个群设置固定的欢迎语,可以在`"group_welc_fixed_msg": {}`配置群聊名和对应的固定欢迎语,优先级高于全局固定欢迎语
35 | - 如果没有配置以上两个参数,则使用随机欢迎语,如需设定风格,语言等,修改`"group_welc_prompt": `即可
36 | - 如果使用LinkAI的服务,想在随机欢迎中结合LinkAI应用的设定,配置`"use_character_desc": true `
37 | - 实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释
38 | - 如果是`docker`部署,可通过映射 `plugins/config.json` 到容器中来完成插件配置,参考[文档](https://github.com/zhayujie/chatgpt-on-wechat#3-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/plugins/hello/__init__.py:
--------------------------------------------------------------------------------
1 | from .hello import *
2 |
--------------------------------------------------------------------------------
/plugins/hello/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "group_welc_fixed_msg": {
3 | "群聊1": "群聊1的固定欢迎语",
4 | "群聊2": "群聊2的固定欢迎语"
5 | },
6 |
7 | "group_welc_prompt": "请你随机使用一种风格说一句问候语来欢迎新用户\"{nickname}\"加入群聊。",
8 |
9 | "group_exit_prompt": "请你随机使用一种风格跟其他群用户说他违反规则\"{nickname}\"退出群聊。",
10 |
11 | "patpat_prompt": "请你随机使用一种风格介绍你自己,并告诉用户输入#help可以查看帮助信息。",
12 |
13 | "use_character_desc": false
14 | }
--------------------------------------------------------------------------------
/plugins/jina_sum/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Han Fangyuan
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/plugins/jina_sum/README.md:
--------------------------------------------------------------------------------
1 | # jina_sumary
2 | ChatGPT on WeChat项目插件, 使用jina reader和ChatGPT总结网页链接内容
3 |
4 | 支持总结公众号、小红书、csdn等分享卡片链接(有的卡片链接会触发验证,一般直链没有此问题)
5 |
6 | 
7 | 
8 | 
9 |
10 | config.json 配置说明
11 | ```bash
12 | {
13 | "jina_reader_base": "https://r.jina.ai", # jina reader链接,默认为https://r.jina.ai
14 | "open_ai_api_base": "https://api.openai.com/v1", # chatgpt chat url
15 | "open_ai_api_key": "sk-xxx", # chatgpt api key
16 | "open_ai_model": "gpt-3.5-turbo", # chatgpt model
17 | "max_words": 8000, # 网页链接内容的最大字数,防止超过最大输入token,使用字符串长度简单计数
18 | "white_url_list": [], # url白名单, 列表为空时不做限制,黑名单优先级大于白名单,即当一个url既在白名单又在黑名单时,黑名单生效
19 | "black_url_list": ["https://support.weixin.qq.com", "https://channels-aladin.wxqcloud.qq.com"], # url黑名单,排除不支持总结的视频号等链接
20 | "prompt": "我需要对下面的文本进行总结,总结输出包括以下三个部分:\n📖 一句话总结\n🔑 关键要点,用数字序号列出3-5个文章的核心内容\n🏷 标签: #xx #xx\n请使用emoji让你的表达更生动。" # 链接内容总结提示词
21 | }
22 | ```
23 |
--------------------------------------------------------------------------------
/plugins/jina_sum/__init__.py:
--------------------------------------------------------------------------------
1 | from .jina_sum import *
2 |
--------------------------------------------------------------------------------
/plugins/jina_sum/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "jina_reader_base": "https://r.jina.ai",
3 | "open_ai_api_base": "https://api.openai.com/v1",
4 | "open_ai_api_key": "sk-xxx",
5 | "open_ai_model": "gpt-3.5-turbo",
6 | "max_words": 8000,
7 | "white_url_list": [],
8 | "black_url_list": ["https://support.weixin.qq.com", "https://channels-aladin.wxqcloud.qq.com"],
9 | "prompt": "我需要对下面的文本进行总结,总结输出包括以下三个部分:\n📖 一句话总结\n🔑 关键要点,用数字序号列出3-5个文章的核心内容\n🏷 标签: #xx #xx\n请使用emoji让你的表达更生动。"
10 | }
11 |
--------------------------------------------------------------------------------
/plugins/jina_sum/docs/images/csdn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/plugins/jina_sum/docs/images/csdn.jpg
--------------------------------------------------------------------------------
/plugins/jina_sum/docs/images/red.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/plugins/jina_sum/docs/images/red.jpg
--------------------------------------------------------------------------------
/plugins/jina_sum/docs/images/wechat_mp.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/plugins/jina_sum/docs/images/wechat_mp.jpg
--------------------------------------------------------------------------------
/plugins/keyword/README.md:
--------------------------------------------------------------------------------
1 | # 目的
2 | 关键字匹配并回复
3 |
4 | # 试用场景
5 | 目前是在微信公众号下面使用过。
6 |
7 | # 使用步骤
8 | 1. 复制 `config.json.template` 为 `config.json`
9 | 2. 在关键字 `keyword` 新增需要关键字匹配的内容
10 | 3. 重启程序做验证
11 |
12 | # 验证结果
13 | 
14 |
15 | # 功能优化
16 | 1. 优化关键字匹配的方式,之前是匹配关键词一一对应,现在可以支持单个关键词匹配多个回复(随机选择一个回复)。
--------------------------------------------------------------------------------
/plugins/keyword/__init__.py:
--------------------------------------------------------------------------------
1 | from .keyword import *
2 |
--------------------------------------------------------------------------------
/plugins/keyword/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "keyword": {
3 | "关键字匹配": "测试成功"
4 | "单关键词匹配多个回复": [
5 | "测试成功",
6 | "测试失败",
7 | "http://www.baidu.com/1.jpg",
8 | "http://www.google.com/2.mp4"
9 | ]
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/plugins/keyword/test-keyword-more_replies.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/plugins/keyword/test-keyword-more_replies.png
--------------------------------------------------------------------------------
/plugins/keyword/test-keyword.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/plugins/keyword/test-keyword.png
--------------------------------------------------------------------------------
/plugins/linkai/README.md:
--------------------------------------------------------------------------------
1 | ## 插件说明
2 |
3 | 基于 LinkAI 提供的知识库、Midjourney绘画、文档对话等能力对机器人的功能进行增强。平台地址: https://link-ai.tech/console
4 |
5 | ## 插件配置
6 |
7 | 将 `plugins/linkai` 目录下的 `config.json.template` 配置模板复制为最终生效的 `config.json`。 (如果未配置则会默认使用`config.json.template`模板中配置,但功能默认关闭,需要可通过指令进行开启)。
8 |
9 | 以下是插件配置项说明:
10 |
11 | ```bash
12 | {
13 | "group_app_map": { # 群聊 和 应用编码 的映射关系
14 | "测试群名称1": "default", # 表示在名称为 "测试群名称1" 的群聊中将使用app_code 为 default 的应用
15 | "测试群名称2": "Kv2fXJcH"
16 | },
17 | "midjourney": {
18 | "enabled": true, # midjourney 绘画开关
19 | "auto_translate": true, # 是否自动将提示词翻译为英文
20 | "img_proxy": true, # 是否对生成的图片使用代理,如果你是国外服务器,将这一项设置为false会获得更快的生成速度
21 | "max_tasks": 3, # 支持同时提交的总任务个数
22 | "max_tasks_per_user": 1, # 支持单个用户同时提交的任务个数
23 | "use_image_create_prefix": true # 是否使用全局的绘画触发词,如果开启将同时支持由`config.json`中的 image_create_prefix 配置触发
24 | },
25 | "summary": {
26 | "enabled": true, # 文档总结和对话功能开关
27 | "group_enabled": true, # 是否支持群聊开启
28 | "max_file_size": 5000, # 文件的大小限制,单位KB,默认为5M,超过该大小直接忽略
29 | "type": ["FILE", "SHARING", "IMAGE"] # 支持总结的类型,分别表示 文件、分享链接、图片,其中文件和链接默认打开,图片默认关闭
30 | }
31 | }
32 | ```
33 |
34 | 根目录 `config.json` 中配置,`API_KEY` 在 [控制台](https://link-ai.tech/console/interface) 中创建并复制过来:
35 |
36 | ```bash
37 | "linkai_api_key": "Link_xxxxxxxxx"
38 | ```
39 |
40 | 注意:
41 |
42 | - 配置项中 `group_app_map` 部分是用于映射群聊与LinkAI平台上的应用, `midjourney` 部分是 mj 画图的配置,`summary` 部分是文档总结及对话功能的配置。三部分的配置相互独立,可按需开启
43 | - 实际 `config.json` 配置中应保证json格式,不应携带 '#' 及后面的注释
44 | - 如果是`docker`部署,可通过映射 `plugins/config.json` 到容器中来完成插件配置,参考[文档](https://github.com/zhayujie/chatgpt-on-wechat#3-%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
45 |
46 | ## 插件使用
47 |
48 | > 使用插件中的知识库管理功能需要首先开启`linkai`对话,依赖全局 `config.json` 中的 `use_linkai` 和 `linkai_api_key` 配置;而midjourney绘画 和 summary文档总结对话功能则只需填写 `linkai_api_key` 配置,`use_linkai` 无论是否关闭均可使用。具体可参考 [详细文档](https://link-ai.tech/platform/link-app/wechat)。
49 |
50 | 完成配置后运行项目,会自动运行插件,输入 `#help linkai` 可查看插件功能。
51 |
52 | ### 1.知识库管理功能
53 |
54 | 提供在不同群聊使用不同应用的功能。可以在上述 `group_app_map` 配置中固定映射关系,也可以通过指令在群中快速完成切换。
55 |
56 | 应用切换指令需要首先完成管理员 (`godcmd`) 插件的认证,然后按以下格式输入:
57 |
58 | `$linkai app {app_code}`
59 |
60 | 例如输入 `$linkai app Kv2fXJcH`,即将当前群聊与 app_code为 Kv2fXJcH 的应用绑定。
61 |
62 | 另外,还可以通过 `$linkai close` 来一键关闭linkai对话,此时就会使用默认的openai接口;同理,发送 `$linkai open` 可以再次开启。
63 |
64 | ### 2.Midjourney绘画功能
65 |
66 | 若未配置 `plugins/linkai/config.json`,默认会关闭画图功能,直接使用 `$mj open` 可基于默认配置直接使用mj画图。
67 |
68 | 指令格式:
69 |
70 | ```
71 | - 图片生成: $mj 描述词1, 描述词2..
72 | - 图片放大: $mju 图片ID 图片序号
73 | - 图片变换: $mjv 图片ID 图片序号
74 | - 重置: $mjr 图片ID
75 | ```
76 |
77 | 例如:
78 |
79 | ```
80 | "$mj a little cat, white --ar 9:16"
81 | "$mju 1105592717188272288 2"
82 | "$mjv 11055927171882 2"
83 | "$mjr 11055927171882"
84 | ```
85 |
86 | 注意事项:
87 | 1. 使用 `$mj open` 和 `$mj close` 指令可以快速打开和关闭绘图功能
88 | 2. 海外环境部署请将 `img_proxy` 设置为 `false`
89 | 3. 开启 `use_image_create_prefix` 配置后可直接复用全局画图触发词,以"画"开头便可以生成图片。
90 | 4. 提示词内容中包含敏感词或者参数格式错误可能导致绘画失败,生成失败不消耗积分
91 | 5. 若未收到图片可能有两种可能,一种是收到了图片但微信发送失败,可以在后台日志查看有没有获取到图片url,一般原因是受到了wx限制,可以稍后重试或更换账号尝试;另一种情况是图片提示词存在疑似违规,mj不会直接提示错误但会在画图后删掉原图导致程序无法获取,这种情况不消耗积分。
92 |
93 | ### 3.文档总结对话功能
94 |
95 | #### 配置
96 |
97 | 该功能依赖 LinkAI的知识库及对话功能,需要在项目根目录的config.json中设置 `linkai_api_key`, 同时根据上述插件配置说明,在插件config.json添加 `summary` 部分的配置,设置 `enabled` 为 true。
98 |
99 | 如果不想创建 `plugins/linkai/config.json` 配置,可以直接通过 `$linkai sum open` 指令开启该功能。
100 |
101 | #### 使用
102 |
103 | 功能开启后,向机器人发送 **文件**、 **分享链接卡片**、**图片** 即可生成摘要,进一步可以与文件或链接的内容进行多轮对话。如果需要关闭某种类型的内容总结,设置 `summary`配置中的type字段即可。
104 |
105 | #### 限制
106 |
107 | 1. 文件目前 支持 `txt`, `docx`, `pdf`, `md`, `csv`格式,文件大小由 `max_file_size` 限制,最大不超过15M,文件字数最多可支持百万字的文件。但不建议上传字数过多的文件,一是token消耗过大,二是摘要很难覆盖到全部内容,只能通过多轮对话来了解细节。
108 | 2. 分享链接 目前仅支持 公众号文章,后续会支持更多文章类型及视频链接等
109 | 3. 总结及对话的 费用与 LinkAI 3.5-4K 模型的计费方式相同,按文档内容的tokens进行计算
110 |
--------------------------------------------------------------------------------
/plugins/linkai/__init__.py:
--------------------------------------------------------------------------------
1 | from .linkai import *
2 |
--------------------------------------------------------------------------------
/plugins/linkai/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "group_app_map": {
3 | "测试群名1": "default",
4 | "测试群名2": "Kv2fXJcH"
5 | },
6 | "midjourney": {
7 | "enabled": true,
8 | "auto_translate": true,
9 | "img_proxy": true,
10 | "max_tasks": 3,
11 | "max_tasks_per_user": 1,
12 | "use_image_create_prefix": true
13 | },
14 | "summary": {
15 | "enabled": true,
16 | "group_enabled": true,
17 | "max_file_size": 5000,
18 | "type": ["FILE", "SHARING"]
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/plugins/linkai/summary.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from config import conf
3 | from common.log import logger
4 | import os
5 | import html
6 |
7 |
8 | class LinkSummary:
9 | def __init__(self):
10 | pass
11 |
12 | def summary_file(self, file_path: str):
13 | file_body = {
14 | "file": open(file_path, "rb"),
15 | "name": file_path.split("/")[-1],
16 | }
17 | url = self.base_url() + "/v1/summary/file"
18 | res = requests.post(url, headers=self.headers(), files=file_body, timeout=(5, 300))
19 | return self._parse_summary_res(res)
20 |
21 | def summary_url(self, url: str):
22 | url = html.unescape(url)
23 | body = {
24 | "url": url
25 | }
26 | res = requests.post(url=self.base_url() + "/v1/summary/url", headers=self.headers(), json=body, timeout=(5, 180))
27 | return self._parse_summary_res(res)
28 |
29 | def summary_chat(self, summary_id: str):
30 | body = {
31 | "summary_id": summary_id
32 | }
33 | res = requests.post(url=self.base_url() + "/v1/summary/chat", headers=self.headers(), json=body, timeout=(5, 180))
34 | if res.status_code == 200:
35 | res = res.json()
36 | logger.debug(f"[LinkSum] chat open, res={res}")
37 | if res.get("code") == 200:
38 | data = res.get("data")
39 | return {
40 | "questions": data.get("questions"),
41 | "file_id": data.get("file_id")
42 | }
43 | else:
44 | res_json = res.json()
45 | logger.error(f"[LinkSum] summary error, status_code={res.status_code}, msg={res_json.get('message')}")
46 | return None
47 |
48 | def _parse_summary_res(self, res):
49 | if res.status_code == 200:
50 | res = res.json()
51 | logger.debug(f"[LinkSum] url summary, res={res}")
52 | if res.get("code") == 200:
53 | data = res.get("data")
54 | return {
55 | "summary": data.get("summary"),
56 | "summary_id": data.get("summary_id")
57 | }
58 | else:
59 | res_json = res.json()
60 | logger.error(f"[LinkSum] summary error, status_code={res.status_code}, msg={res_json.get('message')}")
61 | return None
62 |
63 | def base_url(self):
64 | return conf().get("linkai_api_base", "https://api.link-ai.tech")
65 |
66 | def headers(self):
67 | return {"Authorization": "Bearer " + conf().get("linkai_api_key")}
68 |
69 | def check_file(self, file_path: str, sum_config: dict) -> bool:
70 | file_size = os.path.getsize(file_path) // 1000
71 |
72 | if (sum_config.get("max_file_size") and file_size > sum_config.get("max_file_size")) or file_size > 15000:
73 | logger.warn(f"[LinkSum] file size exceeds limit, No processing, file_size={file_size}KB")
74 | return False
75 |
76 | suffix = file_path.split(".")[-1]
77 | support_list = ["txt", "csv", "docx", "pdf", "md", "jpg", "jpeg", "png"]
78 | if suffix not in support_list:
79 | logger.warn(f"[LinkSum] unsupported file, suffix={suffix}, support_list={support_list}")
80 | return False
81 |
82 | return True
83 |
84 | def check_url(self, url: str):
85 | if not url:
86 | return False
87 | support_list = ["http://mp.weixin.qq.com", "https://mp.weixin.qq.com"]
88 | black_support_list = ["https://mp.weixin.qq.com/mp/waerrpage"]
89 | for black_url_prefix in black_support_list:
90 | if url.strip().startswith(black_url_prefix):
91 | logger.warn(f"[LinkSum] unsupported url, no need to process, url={url}")
92 | return False
93 | for support_url in support_list:
94 | if url.strip().startswith(support_url):
95 | return True
96 | return False
97 |
--------------------------------------------------------------------------------
/plugins/linkai/utils.py:
--------------------------------------------------------------------------------
1 | from config import global_config
2 | from bridge.reply import Reply, ReplyType
3 | from plugins.event import EventContext, EventAction
4 |
5 |
6 | class Util:
7 | @staticmethod
8 | def is_admin(e_context: EventContext) -> bool:
9 | """
10 | 判断消息是否由管理员用户发送
11 | :param e_context: 消息上下文
12 | :return: True: 是, False: 否
13 | """
14 | context = e_context["context"]
15 | if context["isgroup"]:
16 | actual_user_id = context.kwargs.get("msg").actual_user_id
17 | for admin_user in global_config["admin_users"]:
18 | if actual_user_id and actual_user_id in admin_user:
19 | return True
20 | return False
21 | else:
22 | return context["receiver"] in global_config["admin_users"]
23 |
24 | @staticmethod
25 | def set_reply_text(content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
26 | reply = Reply(level, content)
27 | e_context["reply"] = reply
28 | e_context.action = EventAction.BREAK_PASS
29 |
--------------------------------------------------------------------------------
/plugins/plugin.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from config import pconf, plugin_config, conf
4 | from common.log import logger
5 |
6 |
7 | class Plugin:
8 | def __init__(self):
9 | self.handlers = {}
10 |
11 | def load_config(self) -> dict:
12 | """
13 | 加载当前插件配置
14 | :return: 插件配置字典
15 | """
16 | # 优先获取 plugins/config.json 中的全局配置
17 | plugin_conf = pconf(self.name)
18 | if not plugin_conf:
19 | # 全局配置不存在,则获取插件目录下的配置
20 | plugin_config_path = os.path.join(self.path, "config.json")
21 | logger.debug(f"loading plugin config, plugin_config_path={plugin_config_path}, exist={os.path.exists(plugin_config_path)}")
22 | if os.path.exists(plugin_config_path):
23 | with open(plugin_config_path, "r", encoding="utf-8") as f:
24 | plugin_conf = json.load(f)
25 |
26 | # 写入全局配置内存
27 | plugin_config[self.name] = plugin_conf
28 | logger.debug(f"loading plugin config, plugin_name={self.name}, conf={plugin_conf}")
29 | return plugin_conf
30 |
31 | def save_config(self, config: dict):
32 | try:
33 | plugin_config[self.name] = config
34 | # 写入全局配置
35 | global_config_path = "./plugins/config.json"
36 | if os.path.exists(global_config_path):
37 | with open(global_config_path, "w", encoding='utf-8') as f:
38 | json.dump(plugin_config, f, indent=4, ensure_ascii=False)
39 | # 写入插件配置
40 | plugin_config_path = os.path.join(self.path, "config.json")
41 | if os.path.exists(plugin_config_path):
42 | with open(plugin_config_path, "w", encoding='utf-8') as f:
43 | json.dump(config, f, indent=4, ensure_ascii=False)
44 |
45 | except Exception as e:
46 | logger.warn("save plugin config failed: {}".format(e))
47 |
48 | def get_help_text(self, **kwargs):
49 | return "暂无帮助信息"
50 |
51 | def reload(self):
52 | pass
53 |
--------------------------------------------------------------------------------
/plugins/role/README.md:
--------------------------------------------------------------------------------
1 | 用于让Bot扮演指定角色的聊天插件,触发方法如下:
2 |
3 | - `$角色/$role help/帮助` - 打印目前支持的角色列表。
4 | - `$角色/$role <角色名>` - 让AI扮演该角色,角色名支持模糊匹配。
5 | - `$停止扮演` - 停止角色扮演。
6 |
7 | 添加自定义角色请在`roles/roles.json`中添加。
8 |
9 | (大部分prompt来自https://github.com/rockbenben/ChatGPT-Shortcut/blob/main/src/data/users.tsx)
10 |
11 | 以下为例子:
12 | ```json
13 | {
14 | "title": "写作助理",
15 | "description": "As a writing improvement assistant, your task is to improve the spelling, grammar, clarity, concision, and overall readability of the text I provided, while breaking down long sentences, reducing repetition, and providing suggestions for improvement. Please provide only the corrected Chinese version of the text and avoid including explanations. Please treat every message I send later as text content.",
16 | "descn": "作为一名中文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请把我之后的每一条消息都当作文本内容。",
17 | "wrapper": "内容是:\n\"%s\"",
18 | "remark": "最常使用的角色,用于优化文本的语法、清晰度和简洁度,提高可读性。"
19 | }
20 | ```
21 |
22 | - `title`: 角色名。
23 | - `description`: 使用`$role`触发时,使用英语prompt。
24 | - `descn`: 使用`$角色`触发时,使用中文prompt。
25 | - `wrapper`: 用于包装用户消息,可起到强调作用,避免回复离题。
26 | - `remark`: 简短描述该角色,在打印帮助文档时显示。
27 |
--------------------------------------------------------------------------------
/plugins/role/__init__.py:
--------------------------------------------------------------------------------
1 | from .role import *
2 |
--------------------------------------------------------------------------------
/plugins/source.json:
--------------------------------------------------------------------------------
1 | {
2 | "repo": {
3 | "sdwebui": {
4 | "url": "https://github.com/lanvent/plugin_sdwebui.git",
5 | "desc": "利用stable-diffusion画图的插件"
6 | },
7 | "replicate": {
8 | "url": "https://github.com/lanvent/plugin_replicate.git",
9 | "desc": "利用replicate api画图的插件"
10 | },
11 | "summary": {
12 | "url": "https://github.com/lanvent/plugin_summary.git",
13 | "desc": "总结聊天记录的插件"
14 | },
15 | "timetask": {
16 | "url": "https://github.com/haikerapples/timetask.git",
17 | "desc": "一款定时任务系统的插件"
18 | },
19 | "Apilot": {
20 | "url": "https://github.com/6vision/Apilot.git",
21 | "desc": "通过api直接查询早报、热榜、快递、天气等实用信息的插件"
22 | },
23 | "GroupInvitation": {
24 | "url": "https://github.com/dfldylan/GroupInvitation.git",
25 | "desc": "根据特定关键词自动邀请用户加入指定的群聊"
26 | },
27 | "send_msg": {
28 | "url": "https://github.com/Isaac20231231/send_msg.git",
29 | "desc": "手动发送微信通知消息插件,通过api发送消息/微信命令发送消息,支持个人/群聊"
30 | },
31 | "pictureChange": {
32 | "url": "https://github.com/Yanyutin753/pictureChange.git",
33 | "desc": "1. 支持百度AI和Stable Diffusion WebUI进行图像处理,提供多种模型选择,支持图生图、文生图自定义模板。2. 支持Suno音乐AI可将图像和文字转为音乐。3. 支持自定义模型进行文件、图片总结功能。4. 支持管理员控制群聊内容与参数和功能改变。"
34 | },
35 | "Blackroom": {
36 | "url": "https://github.com/dividduang/blackroom.git",
37 | "desc": "小黑屋插件,被拉进小黑屋的人将不能使用@bot的功能的插件"
38 | },
39 | "midjourney": {
40 | "url": "https://github.com/baojingyu/midjourney.git",
41 | "desc": "利用midjourney实现ai绘图的的插件"
42 | },
43 | "solitaire": {
44 | "url": "https://github.com/Wang-zhechao/solitaire.git",
45 | "desc": "机器人微信接龙插件"
46 | },
47 | "HighSpeedTicket": {
48 | "url": "https://github.com/He0607/HighSpeedTicket.git",
49 | "desc": "高铁(火车)票查询插件"
50 | }
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/plugins/tool/__init__.py:
--------------------------------------------------------------------------------
1 | from .tool import *
2 |
--------------------------------------------------------------------------------
/plugins/tool/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "tools": [
3 | "url-get",
4 | "meteo"
5 | ],
6 | "kwargs": {
7 | "debug": false,
8 | "no_default": false,
9 | "model_name": "gpt-3.5-turbo"
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 176
3 | target-version = ['py37']
4 | include = '\.pyi?$'
5 | extend-exclude = '.+/(dist|.venv|venv|build|lib)/.+'
6 |
7 | [tool.isort]
8 | profile = "black"
--------------------------------------------------------------------------------
/requirements-optional.txt:
--------------------------------------------------------------------------------
1 | tiktoken>=0.3.2 # openai calculate token
2 |
3 | #voice
4 | pydub>=0.25.1 # need ffmpeg
5 | SpeechRecognition # google speech to text
6 | gTTS>=2.3.1 # google text to speech
7 | pyttsx3>=2.90 # pytsx text to speech
8 | baidu_aip>=4.16.10 # baidu voice
9 | azure-cognitiveservices-speech # azure voice
10 | edge-tts # edge-tts
11 | numpy<=1.24.2
12 | langid # language detect
13 | elevenlabs==1.0.3 # elevenlabs TTS
14 |
15 | #install plugin
16 | dulwich
17 |
18 | # wechatmp && wechatcom
19 | web.py
20 | wechatpy
21 |
22 | # chatgpt-tool-hub plugin
23 | chatgpt_tool_hub==0.5.0
24 |
25 | # xunfei spark
26 | websocket-client==1.2.0
27 |
28 | # claude bot
29 | curl_cffi
30 | # claude API
31 | anthropic
32 |
33 | # tongyi qwen
34 | broadscope_bailian
35 |
36 | # google
37 | google-generativeai
38 |
39 | # dingtalk
40 | dingtalk_stream
41 |
42 | # zhipuai
43 | zhipuai>=2.0.1
44 |
45 | # webui
46 | gradio==4.37.2
47 | gradio_client==1.0.2
48 | # tongyi qwen new sdk
49 | dashscope
50 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | openai==0.27.8
2 | HTMLParser>=0.0.2
3 | PyQRCode>=1.2.1
4 | qrcode>=7.4.2
5 | requests>=2.28.2
6 | chardet>=5.1.0
7 | Pillow
8 | pre-commit
9 | web.py
10 | linkai>=0.0.6.0
11 |
--------------------------------------------------------------------------------
/res/111.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/res/111.png
--------------------------------------------------------------------------------
/res/222.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/res/222.png
--------------------------------------------------------------------------------
/res/333.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/res/333.png
--------------------------------------------------------------------------------
/res/5555.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/res/5555.png
--------------------------------------------------------------------------------
/res/qr-白话AGI-视频号-二维码.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ImGoodBai/onewebot2/08c0b445b8dae3153cc2b858377630b11d6562e0/res/qr-白话AGI-视频号-二维码.JPG
--------------------------------------------------------------------------------
/scripts/shutdown.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #关闭服务
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'`
7 | if [ -z "$pid" ] ; then
8 | echo "No chatgpt-on-wechat running."
9 | exit -1;
10 | fi
11 |
12 | echo "The chatgpt-on-wechat(${pid}) is running..."
13 |
14 | kill ${pid}
15 |
16 | echo "Send shutdown request to chatgpt-on-wechat(${pid}) OK"
17 |
--------------------------------------------------------------------------------
/scripts/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #后台运行Chat_on_webchat执行脚本
3 |
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | echo $BASE_DIR
7 |
8 | # check the nohup.out log output file
9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10 | touch "${BASE_DIR}/nohup.out"
11 | echo "create file ${BASE_DIR}/nohup.out"
12 | fi
13 |
14 | nohup python3 "${BASE_DIR}/app.py" & tail -f "${BASE_DIR}/nohup.out"
15 |
16 | echo "Chat_on_webchat is starting,you can check the ${BASE_DIR}/nohup.out"
17 |
--------------------------------------------------------------------------------
/scripts/tout.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #打开日志
3 |
4 | cd `dirname $0`/..
5 | export BASE_DIR=`pwd`
6 | echo $BASE_DIR
7 |
8 | # check the nohup.out log output file
9 | if [ ! -f "${BASE_DIR}/nohup.out" ]; then
10 | echo "No file ${BASE_DIR}/nohup.out"
11 | exit -1;
12 | fi
13 |
14 | tail -f "${BASE_DIR}/nohup.out"
15 |
--------------------------------------------------------------------------------
/start.sh:
--------------------------------------------------------------------------------
1 | ps -ef | grep app.py | grep -v grep | awk '{print $2}' | xargs kill
2 | nohup /home/hfy/miniconda3/envs/gptac_venv/bin/python -u app.py >> wechat_robot.log 2>&1 &
3 |
4 |
--------------------------------------------------------------------------------
/stop.sh:
--------------------------------------------------------------------------------
1 | ps -ef | grep app.py | grep -v grep | awk '{print $2}' | xargs kill
2 |
3 |
--------------------------------------------------------------------------------
/tail_log.sh:
--------------------------------------------------------------------------------
1 | tail -f wechat_robot.log -n 20
2 |
3 |
--------------------------------------------------------------------------------
/translate/baidu/baidu_translate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import random
4 | from hashlib import md5
5 |
6 | import requests
7 |
8 | from config import conf
9 | from translate.translator import Translator
10 |
11 |
12 | class BaiduTranslator(Translator):
13 | def __init__(self) -> None:
14 | super().__init__()
15 | endpoint = "http://api.fanyi.baidu.com"
16 | path = "/api/trans/vip/translate"
17 | self.url = endpoint + path
18 | self.appid = conf().get("baidu_translate_app_id")
19 | self.appkey = conf().get("baidu_translate_app_key")
20 | if not self.appid or not self.appkey:
21 | raise Exception("baidu translate appid or appkey not set")
22 |
23 | # For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21`, need to convert to ISO 639-1 codes
24 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
25 | if not from_lang:
26 | from_lang = "auto" # baidu suppport auto detect
27 | salt = random.randint(32768, 65536)
28 | sign = self.make_md5("{}{}{}{}".format(self.appid, query, salt, self.appkey))
29 | headers = {"Content-Type": "application/x-www-form-urlencoded"}
30 | payload = {"appid": self.appid, "q": query, "from": from_lang, "to": to_lang, "salt": salt, "sign": sign}
31 |
32 | retry_cnt = 3
33 | while retry_cnt:
34 | r = requests.post(self.url, params=payload, headers=headers)
35 | result = r.json()
36 | errcode = result.get("error_code", "52000")
37 | if errcode != "52000":
38 | if errcode == "52001" or errcode == "52002":
39 | retry_cnt -= 1
40 | continue
41 | else:
42 | raise Exception(result["error_msg"])
43 | else:
44 | break
45 | text = "\n".join([item["dst"] for item in result["trans_result"]])
46 | return text
47 |
48 | def make_md5(self, s, encoding="utf-8"):
49 | return md5(s.encode(encoding)).hexdigest()
50 |
--------------------------------------------------------------------------------
/translate/factory.py:
--------------------------------------------------------------------------------
1 | def create_translator(voice_type):
2 | if voice_type == "baidu":
3 | from translate.baidu.baidu_translate import BaiduTranslator
4 |
5 | return BaiduTranslator()
6 | raise RuntimeError
7 |
--------------------------------------------------------------------------------
/translate/translator.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice service abstract class
3 | """
4 |
5 |
6 | class Translator(object):
7 | # please use https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes to specify language
8 | def translate(self, query: str, from_lang: str = "", to_lang: str = "en") -> str:
9 | """
10 | Translate text from one language to another
11 | """
12 | raise NotImplementedError
13 |
--------------------------------------------------------------------------------
/voice/ali/ali_voice.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Author: chazzjimel
4 | Email: chazzjimel@gmail.com
5 | wechat:cheung-z-x
6 |
7 | Description:
8 | ali voice service
9 |
10 | """
11 | import json
12 | import os
13 | import re
14 | import time
15 |
16 | from bridge.reply import Reply, ReplyType
17 | from common.log import logger
18 | from voice.audio_convert import get_pcm_from_wav
19 | from voice.voice import Voice
20 | from voice.ali.ali_api import AliyunTokenGenerator, speech_to_text_aliyun, text_to_speech_aliyun
21 | from config import conf
22 |
23 |
24 | class AliVoice(Voice):
25 | def __init__(self):
26 | """
27 | 初始化AliVoice类,从配置文件加载必要的配置。
28 | """
29 | try:
30 | curdir = os.path.dirname(__file__)
31 | config_path = os.path.join(curdir, "config.json")
32 | with open(config_path, "r") as fr:
33 | config = json.load(fr)
34 | self.token = None
35 | self.token_expire_time = 0
36 | # 默认复用阿里云千问的 access_key 和 access_secret
37 | self.api_url_voice_to_text = config.get("api_url_voice_to_text")
38 | self.api_url_text_to_voice = config.get("api_url_text_to_voice")
39 | self.app_key = config.get("app_key")
40 | self.access_key_id = conf().get("qwen_access_key_id") or config.get("access_key_id")
41 | self.access_key_secret = conf().get("qwen_access_key_secret") or config.get("access_key_secret")
42 | except Exception as e:
43 | logger.warn("AliVoice init failed: %s, ignore " % e)
44 |
45 | def textToVoice(self, text):
46 | """
47 | 将文本转换为语音文件。
48 |
49 | :param text: 要转换的文本。
50 | :return: 返回一个Reply对象,其中包含转换得到的语音文件或错误信息。
51 | """
52 | # 清除文本中的非中文、非英文和非基本字符
53 | text = re.sub(r'[^\u4e00-\u9fa5\u3040-\u30FF\uAC00-\uD7AFa-zA-Z0-9'
54 | r'äöüÄÖÜáéíóúÁÉÍÓÚàèìòùÀÈÌÒÙâêîôûÂÊÎÔÛçÇñÑ,。!?,.]', '', text)
55 | # 提取有效的token
56 | token_id = self.get_valid_token()
57 | fileName = text_to_speech_aliyun(self.api_url_text_to_voice, text, self.app_key, token_id)
58 | if fileName:
59 | logger.info("[Ali] textToVoice text={} voice file name={}".format(text, fileName))
60 | reply = Reply(ReplyType.VOICE, fileName)
61 | else:
62 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
63 | return reply
64 |
65 | def voiceToText(self, voice_file):
66 | """
67 | 将语音文件转换为文本。
68 |
69 | :param voice_file: 要转换的语音文件。
70 | :return: 返回一个Reply对象,其中包含转换得到的文本或错误信息。
71 | """
72 | # 提取有效的token
73 | token_id = self.get_valid_token()
74 | logger.debug("[Ali] voice file name={}".format(voice_file))
75 | pcm = get_pcm_from_wav(voice_file)
76 | text = speech_to_text_aliyun(self.api_url_voice_to_text, pcm, self.app_key, token_id)
77 | if text:
78 | logger.info("[Ali] VoicetoText = {}".format(text))
79 | reply = Reply(ReplyType.TEXT, text)
80 | else:
81 | reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
82 | return reply
83 |
84 | def get_valid_token(self):
85 | """
86 | 获取有效的阿里云token。
87 |
88 | :return: 返回有效的token字符串。
89 | """
90 | current_time = time.time()
91 | if self.token is None or current_time >= self.token_expire_time:
92 | get_token = AliyunTokenGenerator(self.access_key_id, self.access_key_secret)
93 | token_str = get_token.get_token()
94 | token_data = json.loads(token_str)
95 | self.token = token_data["Token"]["Id"]
96 | # 将过期时间减少一小段时间(例如5分钟),以避免在边界条件下的过期
97 | self.token_expire_time = token_data["Token"]["ExpireTime"] - 300
98 | logger.debug(f"新获取的阿里云token:{self.token}")
99 | else:
100 | logger.debug("使用缓存的token")
101 | return self.token
102 |
--------------------------------------------------------------------------------
/voice/ali/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "api_url_text_to_voice": "https://nls-gateway-cn-shanghai.aliyuncs.com/stream/v1/tts",
3 | "api_url_voice_to_text": "https://nls-gateway.cn-shanghai.aliyuncs.com/stream/v1/asr",
4 | "app_key": "",
5 | "access_key_id": "",
6 | "access_key_secret": ""
7 | }
--------------------------------------------------------------------------------
/voice/azure/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "speech_synthesis_voice_name": "zh-CN-XiaoxiaoNeural",
3 | "auto_detect": true,
4 | "speech_synthesis_zh": "zh-CN-YunxiNeural",
5 | "speech_synthesis_en": "en-US-JacobNeural",
6 | "speech_synthesis_ja": "ja-JP-AoiNeural",
7 | "speech_synthesis_ko": "ko-KR-SoonBokNeural",
8 | "speech_synthesis_de": "de-DE-LouisaNeural",
9 | "speech_synthesis_fr": "fr-FR-BrigitteNeural",
10 | "speech_synthesis_es": "es-ES-LaiaNeural",
11 | "speech_recognition_language": "zh-CN"
12 | }
13 |
--------------------------------------------------------------------------------
/voice/baidu/README.md:
--------------------------------------------------------------------------------
1 | ## 说明
2 | 百度语音识别与合成参数说明
3 | 百度语音依赖,经常会出现问题,可能就是缺少依赖:
4 | pip install baidu-aip
5 | pip install pydub
6 | pip install pysilk
7 | 还有ffmpeg,不同系统安装方式不同
8 |
9 | 系统中收到的语音文件为mp3格式(wx)或者sil格式(wxy),如果要识别需要转换为pcm格式,转换后的文件为16k采样率,单声道,16bit的pcm文件
10 | 发送时又需要(wx)转换为mp3格式,转换后的文件为16k采样率,单声道,16bit的pcm文件,(wxy)转换为sil格式,还要计算声音长度,发送时需要带上声音长度
11 | 这些事情都在audio_convert.py中封装了,直接调用即可
12 |
13 |
14 | 参数说明
15 | 识别参数
16 | https://ai.baidu.com/ai-doc/SPEECH/Vk38lxily
17 | 合成参数
18 | https://ai.baidu.com/ai-doc/SPEECH/Gk38y8lzk
19 |
20 | ## 使用说明
21 | 分两个地方配置
22 |
23 | 1、对于def voiceToText(self, filename)函数中调用的百度语音识别API,中接口调用asr(参数)这个配置见CHATGPT-ON-WECHAT工程目录下的`config.json`文件和config.py文件。
24 | 参数 可需 描述
25 | app_id 必填 应用的APPID
26 | api_key 必填 应用的APIKey
27 | secret_key 必填 应用的SecretKey
28 | dev_pid 必填 语言选择,填写语言对应的dev_pid值
29 |
30 | 2、对于def textToVoice(self, text)函数中调用的百度语音合成API,中接口调用synthesis(参数)在本目录下的`config.json`文件中进行配置。
31 | 参数 可需 描述
32 | tex 必填 合成的文本,使用UTF-8编码,请注意文本长度必须小于1024字节
33 | lan 必填 固定值zh。语言选择,目前只有中英文混合模式,填写固定值zh
34 | spd 选填 语速,取值0-15,默认为5中语速
35 | pit 选填 音调,取值0-15,默认为5中语调
36 | vol 选填 音量,取值0-15,默认为5中音量(取值为0时为音量最小值,并非为无声)
37 | per(基础音库) 选填 度小宇=1,度小美=0,度逍遥(基础)=3,度丫丫=4
38 | per(精品音库) 选填 度逍遥(精品)=5003,度小鹿=5118,度博文=106,度小童=110,度小萌=111,度米朵=103,度小娇=5
39 | aue 选填 3为mp3格式(默认); 4为pcm-16k;5为pcm-8k;6为wav(内容同pcm-16k); 注意aue=4或者6是语音识别要求的格式,但是音频内容不是语音识别要求的自然人发音,所以识别效果会受影响。
40 |
41 | 关于per参数的说明,注意您购买的哪个音库,就填写哪个音库的参数,否则会报错。如果您购买的是基础音库,那么per参数只能填写0到4,如果您购买的是精品音库,那么per参数只能填写5003,5118,106,110,111,103,5其他的都会报错。
42 | ### 配置文件
43 |
44 | 将文件夹中`config.json.template`复制为`config.json`。
45 |
46 | ``` json
47 | {
48 | "lang": "zh",
49 | "ctp": 1,
50 | "spd": 5,
51 | "pit": 5,
52 | "vol": 5,
53 | "per": 0
54 | }
55 | ```
--------------------------------------------------------------------------------
/voice/baidu/baidu_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | baidu voice service
3 | """
4 | import json
5 | import os
6 | import time
7 |
8 | from aip import AipSpeech
9 |
10 | from bridge.reply import Reply, ReplyType
11 | from common.log import logger
12 | from common.tmp_dir import TmpDir
13 | from config import conf
14 | from voice.audio_convert import get_pcm_from_wav
15 | from voice.voice import Voice
16 |
17 | """
18 | 百度的语音识别API.
19 | dev_pid:
20 | - 1936: 普通话远场
21 | - 1536:普通话(支持简单的英文识别)
22 | - 1537:普通话(纯中文识别)
23 | - 1737:英语
24 | - 1637:粤语
25 | - 1837:四川话
26 | 要使用本模块, 首先到 yuyin.baidu.com 注册一个开发者账号,
27 | 之后创建一个新应用, 然后在应用管理的"查看key"中获得 API Key 和 Secret Key
28 | 然后在 config.json 中填入这两个值, 以及 app_id, dev_pid
29 | """
30 |
31 |
32 | class BaiduVoice(Voice):
33 | def __init__(self):
34 | try:
35 | curdir = os.path.dirname(__file__)
36 | config_path = os.path.join(curdir, "config.json")
37 | bconf = None
38 | if not os.path.exists(config_path): # 如果没有配置文件,创建本地配置文件
39 | bconf = {"lang": "zh", "ctp": 1, "spd": 5, "pit": 5, "vol": 5, "per": 0}
40 | with open(config_path, "w") as fw:
41 | json.dump(bconf, fw, indent=4)
42 | else:
43 | with open(config_path, "r") as fr:
44 | bconf = json.load(fr)
45 |
46 | self.app_id = str(conf().get("baidu_app_id"))
47 | self.api_key = str(conf().get("baidu_api_key"))
48 | self.secret_key = str(conf().get("baidu_secret_key"))
49 | self.dev_id = conf().get("baidu_dev_pid")
50 | self.lang = bconf["lang"]
51 | self.ctp = bconf["ctp"]
52 | self.spd = bconf["spd"]
53 | self.pit = bconf["pit"]
54 | self.vol = bconf["vol"]
55 | self.per = bconf["per"]
56 |
57 | self.client = AipSpeech(self.app_id, self.api_key, self.secret_key)
58 | except Exception as e:
59 | logger.warn("BaiduVoice init failed: %s, ignore " % e)
60 |
61 | def voiceToText(self, voice_file):
62 | # 识别本地文件
63 | logger.debug("[Baidu] voice file name={}".format(voice_file))
64 | pcm = get_pcm_from_wav(voice_file)
65 | res = self.client.asr(pcm, "pcm", 16000, {"dev_pid": self.dev_id})
66 | if res["err_no"] == 0:
67 | logger.info("百度语音识别到了:{}".format(res["result"]))
68 | text = "".join(res["result"])
69 | reply = Reply(ReplyType.TEXT, text)
70 | else:
71 | logger.info("百度语音识别出错了: {}".format(res["err_msg"]))
72 | if res["err_msg"] == "request pv too much":
73 | logger.info(" 出现这个原因很可能是你的百度语音服务调用量超出限制,或未开通付费")
74 | reply = Reply(ReplyType.ERROR, "百度语音识别出错了;{0}".format(res["err_msg"]))
75 | return reply
76 |
77 | def textToVoice(self, text):
78 | result = self.client.synthesis(
79 | text,
80 | self.lang,
81 | self.ctp,
82 | {"spd": self.spd, "pit": self.pit, "vol": self.vol, "per": self.per},
83 | )
84 | if not isinstance(result, dict):
85 | # Avoid the same filename under multithreading
86 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
87 | with open(fileName, "wb") as f:
88 | f.write(result)
89 | logger.info("[Baidu] textToVoice text={} voice file name={}".format(text, fileName))
90 | reply = Reply(ReplyType.VOICE, fileName)
91 | else:
92 | logger.error("[Baidu] textToVoice error={}".format(result))
93 | reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
94 | return reply
95 |
--------------------------------------------------------------------------------
/voice/baidu/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "lang": "zh",
3 | "ctp": 1,
4 | "spd": 5,
5 | "pit": 5,
6 | "vol": 5,
7 | "per": 0
8 | }
9 |
--------------------------------------------------------------------------------
/voice/edge/edge_voice.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import edge_tts
4 | import asyncio
5 |
6 | from bridge.reply import Reply, ReplyType
7 | from common.log import logger
8 | from common.tmp_dir import TmpDir
9 | from voice.voice import Voice
10 |
11 |
12 | class EdgeVoice(Voice):
13 |
14 | def __init__(self):
15 | '''
16 | # 普通话
17 | zh-CN-XiaoxiaoNeural
18 | zh-CN-XiaoyiNeural
19 | zh-CN-YunjianNeural
20 | zh-CN-YunxiNeural
21 | zh-CN-YunxiaNeural
22 | zh-CN-YunyangNeural
23 | # 地方口音
24 | zh-CN-liaoning-XiaobeiNeural
25 | zh-CN-shaanxi-XiaoniNeural
26 | # 粤语
27 | zh-HK-HiuGaaiNeural
28 | zh-HK-HiuMaanNeural
29 | zh-HK-WanLungNeural
30 | # 湾湾腔
31 | zh-TW-HsiaoChenNeural
32 | zh-TW-HsiaoYuNeural
33 | zh-TW-YunJheNeural
34 | '''
35 | self.voice = "zh-CN-YunjianNeural"
36 |
37 | def voiceToText(self, voice_file):
38 | pass
39 |
40 | async def gen_voice(self, text, fileName):
41 | communicate = edge_tts.Communicate(text, self.voice)
42 | await communicate.save(fileName)
43 |
44 | def textToVoice(self, text):
45 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
46 |
47 | asyncio.run(self.gen_voice(text, fileName))
48 |
49 | logger.info("[EdgeTTS] textToVoice text={} voice file name={}".format(text, fileName))
50 | return Reply(ReplyType.VOICE, fileName)
51 |
--------------------------------------------------------------------------------
/voice/elevent/elevent_voice.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from elevenlabs.client import ElevenLabs
4 | from elevenlabs import save
5 | from bridge.reply import Reply, ReplyType
6 | from common.log import logger
7 | from common.tmp_dir import TmpDir
8 | from voice.voice import Voice
9 | from config import conf
10 |
11 | XI_API_KEY = conf().get("xi_api_key")
12 | client = ElevenLabs(api_key=XI_API_KEY)
13 | name = conf().get("xi_voice_id")
14 |
15 | class ElevenLabsVoice(Voice):
16 |
17 | def __init__(self):
18 | pass
19 |
20 | def voiceToText(self, voice_file):
21 | pass
22 |
23 | def textToVoice(self, text):
24 | audio = client.generate(
25 | text=text,
26 | voice=name,
27 | model='eleven_multilingual_v2'
28 | )
29 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
30 | save(audio, fileName)
31 | logger.info("[ElevenLabs] textToVoice text={} voice file name={}".format(text, fileName))
32 | return Reply(ReplyType.VOICE, fileName)
--------------------------------------------------------------------------------
/voice/factory.py:
--------------------------------------------------------------------------------
1 | """
2 | voice factory
3 | """
4 |
5 |
6 | def create_voice(voice_type):
7 | """
8 | create a voice instance
9 | :param voice_type: voice type code
10 | :return: voice instance
11 | """
12 | if voice_type == "baidu":
13 | from voice.baidu.baidu_voice import BaiduVoice
14 |
15 | return BaiduVoice()
16 | elif voice_type == "google":
17 | from voice.google.google_voice import GoogleVoice
18 |
19 | return GoogleVoice()
20 | elif voice_type == "openai":
21 | from voice.openai.openai_voice import OpenaiVoice
22 |
23 | return OpenaiVoice()
24 | elif voice_type == "pytts":
25 | from voice.pytts.pytts_voice import PyttsVoice
26 |
27 | return PyttsVoice()
28 | elif voice_type == "azure":
29 | from voice.azure.azure_voice import AzureVoice
30 |
31 | return AzureVoice()
32 | elif voice_type == "elevenlabs":
33 | from voice.elevent.elevent_voice import ElevenLabsVoice
34 |
35 | return ElevenLabsVoice()
36 |
37 | elif voice_type == "linkai":
38 | from voice.linkai.linkai_voice import LinkAIVoice
39 |
40 | return LinkAIVoice()
41 | elif voice_type == "ali":
42 | from voice.ali.ali_voice import AliVoice
43 |
44 | return AliVoice()
45 | elif voice_type == "edge":
46 | from voice.edge.edge_voice import EdgeVoice
47 |
48 | return EdgeVoice()
49 | elif voice_type == "xunfei":
50 | from voice.xunfei.xunfei_voice import XunfeiVoice
51 |
52 | return XunfeiVoice()
53 | raise RuntimeError
54 |
--------------------------------------------------------------------------------
/voice/google/google_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 |
5 | import time
6 |
7 | import speech_recognition
8 | from gtts import gTTS
9 |
10 | from bridge.reply import Reply, ReplyType
11 | from common.log import logger
12 | from common.tmp_dir import TmpDir
13 | from voice.voice import Voice
14 |
15 |
16 | class GoogleVoice(Voice):
17 | recognizer = speech_recognition.Recognizer()
18 |
19 | def __init__(self):
20 | pass
21 |
22 | def voiceToText(self, voice_file):
23 | with speech_recognition.AudioFile(voice_file) as source:
24 | audio = self.recognizer.record(source)
25 | try:
26 | text = self.recognizer.recognize_google(audio, language="zh-CN")
27 | logger.info("[Google] voiceToText text={} voice file name={}".format(text, voice_file))
28 | reply = Reply(ReplyType.TEXT, text)
29 | except speech_recognition.UnknownValueError:
30 | reply = Reply(ReplyType.ERROR, "抱歉,我听不懂")
31 | except speech_recognition.RequestError as e:
32 | reply = Reply(ReplyType.ERROR, "抱歉,无法连接到 Google 语音识别服务;{0}".format(e))
33 | finally:
34 | return reply
35 |
36 | def textToVoice(self, text):
37 | try:
38 | # Avoid the same filename under multithreading
39 | mp3File = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
40 | tts = gTTS(text=text, lang="zh")
41 | tts.save(mp3File)
42 | logger.info("[Google] textToVoice text={} voice file name={}".format(text, mp3File))
43 | reply = Reply(ReplyType.VOICE, mp3File)
44 | except Exception as e:
45 | reply = Reply(ReplyType.ERROR, str(e))
46 | finally:
47 | return reply
48 |
--------------------------------------------------------------------------------
/voice/linkai/linkai_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 | import random
5 | import requests
6 | from voice import audio_convert
7 | from bridge.reply import Reply, ReplyType
8 | from common.log import logger
9 | from config import conf
10 | from voice.voice import Voice
11 | from common import const
12 | import os
13 | import datetime
14 |
15 | class LinkAIVoice(Voice):
16 | def __init__(self):
17 | pass
18 |
19 | def voiceToText(self, voice_file):
20 | logger.debug("[LinkVoice] voice file name={}".format(voice_file))
21 | try:
22 | url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/audio/transcriptions"
23 | headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
24 | model = None
25 | if not conf().get("text_to_voice") or conf().get("voice_to_text") == "openai":
26 | model = const.WHISPER_1
27 | if voice_file.endswith(".amr"):
28 | try:
29 | mp3_file = os.path.splitext(voice_file)[0] + ".mp3"
30 | audio_convert.any_to_mp3(voice_file, mp3_file)
31 | voice_file = mp3_file
32 | except Exception as e:
33 | logger.warn(f"[LinkVoice] amr file transfer failed, directly send amr voice file: {format(e)}")
34 | file = open(voice_file, "rb")
35 | file_body = {
36 | "file": file
37 | }
38 | data = {
39 | "model": model
40 | }
41 | res = requests.post(url, files=file_body, headers=headers, data=data, timeout=(5, 60))
42 | if res.status_code == 200:
43 | text = res.json().get("text")
44 | else:
45 | res_json = res.json()
46 | logger.error(f"[LinkVoice] voiceToText error, status_code={res.status_code}, msg={res_json.get('message')}")
47 | return None
48 | reply = Reply(ReplyType.TEXT, text)
49 | logger.info(f"[LinkVoice] voiceToText success, text={text}, file name={voice_file}")
50 | except Exception as e:
51 | logger.error(e)
52 | return None
53 | return reply
54 |
55 | def textToVoice(self, text):
56 | try:
57 | url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/audio/speech"
58 | headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
59 | model = const.TTS_1
60 | if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]:
61 | model = conf().get("text_to_voice_model") or const.TTS_1
62 | data = {
63 | "model": model,
64 | "input": text,
65 | "voice": conf().get("tts_voice_id"),
66 | "app_code": conf().get("linkai_app_code")
67 | }
68 | res = requests.post(url, headers=headers, json=data, timeout=(5, 120))
69 | if res.status_code == 200:
70 | tmp_file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
71 | with open(tmp_file_name, 'wb') as f:
72 | f.write(res.content)
73 | reply = Reply(ReplyType.VOICE, tmp_file_name)
74 | logger.info(f"[LinkVoice] textToVoice success, input={text}, model={model}, voice_id={data.get('voice')}")
75 | return reply
76 | else:
77 | res_json = res.json()
78 | logger.error(f"[LinkVoice] textToVoice error, status_code={res.status_code}, msg={res_json.get('message')}")
79 | return None
80 | except Exception as e:
81 | logger.error(e)
82 | # reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧")
83 | return None
84 |
--------------------------------------------------------------------------------
/voice/openai/openai_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | google voice service
3 | """
4 | import json
5 |
6 | import openai
7 |
8 | from bridge.reply import Reply, ReplyType
9 | from common.log import logger
10 | from config import conf
11 | from voice.voice import Voice
12 | import requests
13 | from common import const
14 | import datetime, random
15 |
16 | class OpenaiVoice(Voice):
17 | def __init__(self):
18 | openai.api_key = conf().get("open_ai_api_key")
19 | openai.api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
20 |
21 | def voiceToText(self, voice_file):
22 | logger.debug("[Openai] voice file name={}".format(voice_file))
23 | try:
24 | file = open(voice_file, "rb")
25 | api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
26 | url = f'{api_base}/audio/transcriptions'
27 | headers = {
28 | 'Authorization': 'Bearer ' + conf().get("open_ai_api_key"),
29 | # 'Content-Type': 'multipart/form-data' # 加了会报错,不知道什么原因
30 | }
31 | files = {
32 | "file": file,
33 | }
34 | data = {
35 | "model": "whisper-1",
36 | }
37 | response = requests.post(url, headers=headers, files=files, data=data)
38 | response_data = response.json()
39 | text = response_data['text']
40 | reply = Reply(ReplyType.TEXT, text)
41 | logger.info("[Openai] voiceToText text={} voice file name={}".format(text, voice_file))
42 | except Exception as e:
43 | reply = Reply(ReplyType.ERROR, "我暂时还无法听清您的语音,请稍后再试吧~")
44 | finally:
45 | return reply
46 |
47 |
48 | def textToVoice(self, text):
49 | try:
50 | api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
51 | url = f'{api_base}/audio/speech'
52 | headers = {
53 | 'Authorization': 'Bearer ' + conf().get("open_ai_api_key"),
54 | 'Content-Type': 'application/json'
55 | }
56 | data = {
57 | 'model': conf().get("text_to_voice_model") or const.TTS_1,
58 | 'input': text,
59 | 'voice': conf().get("tts_voice_id") or "alloy"
60 | }
61 | response = requests.post(url, headers=headers, json=data)
62 | file_name = "tmp/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 1000)) + ".mp3"
63 | logger.debug(f"[OPENAI] text_to_Voice file_name={file_name}, input={text}")
64 | with open(file_name, 'wb') as f:
65 | f.write(response.content)
66 | logger.info(f"[OPENAI] text_to_Voice success")
67 | reply = Reply(ReplyType.VOICE, file_name)
68 | except Exception as e:
69 | logger.error(e)
70 | reply = Reply(ReplyType.ERROR, "遇到了一点小问题,请稍后再问我吧")
71 | return reply
72 |
--------------------------------------------------------------------------------
/voice/pytts/pytts_voice.py:
--------------------------------------------------------------------------------
1 | """
2 | pytts voice service (offline)
3 | """
4 |
5 | import os
6 | import sys
7 | import time
8 |
9 | import pyttsx3
10 |
11 | from bridge.reply import Reply, ReplyType
12 | from common.log import logger
13 | from common.tmp_dir import TmpDir
14 | from voice.voice import Voice
15 |
16 |
17 | class PyttsVoice(Voice):
18 | engine = pyttsx3.init()
19 |
20 | def __init__(self):
21 | # 语速
22 | self.engine.setProperty("rate", 125)
23 | # 音量
24 | self.engine.setProperty("volume", 1.0)
25 | if sys.platform == "win32":
26 | for voice in self.engine.getProperty("voices"):
27 | if "Chinese" in voice.name:
28 | self.engine.setProperty("voice", voice.id)
29 | else:
30 | self.engine.setProperty("voice", "zh")
31 | # If the problem of espeak is fixed, using runAndWait() and remove this startLoop()
32 | # TODO: check if this is work on win32
33 | self.engine.startLoop(useDriverLoop=False)
34 |
35 | def textToVoice(self, text):
36 | try:
37 | # Avoid the same filename under multithreading
38 | wavFileName = "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
39 | wavFile = TmpDir().path() + wavFileName
40 | logger.info("[Pytts] textToVoice text={} voice file name={}".format(text, wavFile))
41 |
42 | self.engine.save_to_file(text, wavFile)
43 |
44 | if sys.platform == "win32":
45 | self.engine.runAndWait()
46 | else:
47 | # In ubuntu, runAndWait do not really wait until the file created.
48 | # It will return once the task queue is empty, but the task is still running in coroutine.
49 | # And if you call runAndWait() and time.sleep() twice, it will stuck, so do not use this.
50 | # If you want to fix this, add self._proxy.setBusy(True) in line 127 in espeak.py, at the beginning of the function save_to_file.
51 | # self.engine.runAndWait()
52 |
53 | # Before espeak fix this problem, we iterate the generator and control the waiting by ourself.
54 | # But this is not the canonical way to use it, for example if the file already exists it also cannot wait.
55 | self.engine.iterate()
56 | while self.engine.isBusy() or wavFileName not in os.listdir(TmpDir().path()):
57 | time.sleep(0.1)
58 |
59 | reply = Reply(ReplyType.VOICE, wavFile)
60 |
61 | except Exception as e:
62 | reply = Reply(ReplyType.ERROR, str(e))
63 | finally:
64 | return reply
65 |
--------------------------------------------------------------------------------
/voice/voice.py:
--------------------------------------------------------------------------------
1 | """
2 | Voice service abstract class
3 | """
4 |
5 |
6 | class Voice(object):
7 | def voiceToText(self, voice_file):
8 | """
9 | Send voice to voice service and get text
10 | """
11 | raise NotImplementedError
12 |
13 | def textToVoice(self, text):
14 | """
15 | Send text to voice service and get voice
16 | """
17 | raise NotImplementedError
18 |
--------------------------------------------------------------------------------
/voice/xunfei/config.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "APPID":"xxx71xxx",
3 | "APIKey":"xxxx69058exxxxxx",
4 | "APISecret":"xxxx697f0xxxxxx",
5 | "BusinessArgsTTS":{"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "vcn": "xiaoyan", "tte": "utf8"},
6 | "BusinessArgsASR":{"domain": "iat", "language": "zh_cn", "accent": "mandarin", "vad_eos":10000, "dwa": "wpgs"}
7 | }
8 |
--------------------------------------------------------------------------------
/voice/xunfei/xunfei_voice.py:
--------------------------------------------------------------------------------
1 | #####################################################################
2 | # xunfei voice service
3 | # Auth: njnuko
4 | # Email: njnuko@163.com
5 | #
6 | # 要使用本模块, 首先到 xfyun.cn 注册一个开发者账号,
7 | # 之后创建一个新应用, 然后在应用管理的语音识别或者语音合同右边可以查看APPID API Key 和 Secret Key
8 | # 然后在 config.json 中填入这三个值
9 | #
10 | # 配置说明:
11 | # {
12 | # "APPID":"xxx71xxx",
13 | # "APIKey":"xxxx69058exxxxxx", #讯飞xfyun.cn控制台语音合成或者听写界面的APIKey
14 | # "APISecret":"xxxx697f0xxxxxx", #讯飞xfyun.cn控制台语音合成或者听写界面的APIKey
15 | # "BusinessArgsTTS":{"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "vcn": "xiaoyan", "tte": "utf8"}, #语音合成的参数,具体可以参考xfyun.cn的文档
16 | # "BusinessArgsASR":{"domain": "iat", "language": "zh_cn", "accent": "mandarin", "vad_eos":10000, "dwa": "wpgs"} #语音听写的参数,具体可以参考xfyun.cn的文档
17 | # }
18 | #####################################################################
19 |
20 | import json
21 | import os
22 | import time
23 |
24 | from bridge.reply import Reply, ReplyType
25 | from common.log import logger
26 | from common.tmp_dir import TmpDir
27 | from config import conf
28 | from voice.voice import Voice
29 | from .xunfei_asr import xunfei_asr
30 | from .xunfei_tts import xunfei_tts
31 | from voice.audio_convert import any_to_mp3
32 | import shutil
33 | from pydub import AudioSegment
34 |
35 |
36 | class XunfeiVoice(Voice):
37 | def __init__(self):
38 | try:
39 | curdir = os.path.dirname(__file__)
40 | config_path = os.path.join(curdir, "config.json")
41 | conf = None
42 | with open(config_path, "r") as fr:
43 | conf = json.load(fr)
44 | print(conf)
45 | self.APPID = str(conf.get("APPID"))
46 | self.APIKey = str(conf.get("APIKey"))
47 | self.APISecret = str(conf.get("APISecret"))
48 | self.BusinessArgsTTS = conf.get("BusinessArgsTTS")
49 | self.BusinessArgsASR= conf.get("BusinessArgsASR")
50 |
51 | except Exception as e:
52 | logger.warn("XunfeiVoice init failed: %s, ignore " % e)
53 |
54 | def voiceToText(self, voice_file):
55 | # 识别本地文件
56 | try:
57 | logger.debug("[Xunfei] voice file name={}".format(voice_file))
58 | #print("voice_file===========",voice_file)
59 | #print("voice_file_type===========",type(voice_file))
60 | #mp3_name, file_extension = os.path.splitext(voice_file)
61 | #mp3_file = mp3_name + ".mp3"
62 | #pcm_data=get_pcm_from_wav(voice_file)
63 | #mp3_name, file_extension = os.path.splitext(voice_file)
64 | #AudioSegment.from_wav(voice_file).export(mp3_file, format="mp3")
65 | #shutil.copy2(voice_file, 'tmp/test1.wav')
66 | #shutil.copy2(mp3_file, 'tmp/test1.mp3')
67 | #print("voice and mp3 file",voice_file,mp3_file)
68 | text = xunfei_asr(self.APPID,self.APISecret,self.APIKey,self.BusinessArgsASR,voice_file)
69 | logger.info("讯飞语音识别到了: {}".format(text))
70 | reply = Reply(ReplyType.TEXT, text)
71 | except Exception as e:
72 | logger.warn("XunfeiVoice init failed: %s, ignore " % e)
73 | reply = Reply(ReplyType.ERROR, "讯飞语音识别出错了;{0}")
74 | return reply
75 |
76 | def textToVoice(self, text):
77 | try:
78 | # Avoid the same filename under multithreading
79 | fileName = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".mp3"
80 | return_file = xunfei_tts(self.APPID,self.APIKey,self.APISecret,self.BusinessArgsTTS,text,fileName)
81 | logger.info("[Xunfei] textToVoice text={} voice file name={}".format(text, fileName))
82 | reply = Reply(ReplyType.VOICE, fileName)
83 | except Exception as e:
84 | logger.error("[Xunfei] textToVoice error={}".format(fileName))
85 | reply = Reply(ReplyType.ERROR, "抱歉,讯飞语音合成失败")
86 | return reply
87 |
--------------------------------------------------------------------------------