├── assets └── .gitkeep ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.yml │ └── bug_report.yml ├── workflows │ └── ruff.yml ├── pull_request_template.md └── release.yml ├── utils ├── gen.py ├── print.py ├── package.py ├── check.py ├── log.py ├── const.py ├── query_key.py ├── serialize.py └── api.py ├── .gitignore ├── requirements.txt ├── common ├── singleton.py ├── context.py ├── reply.py ├── expired_dict.py ├── emitter.py └── session.py ├── .pre-commit-config.yaml ├── plugins ├── source.json ├── __init__.py ├── plugin.py ├── built_in.py ├── event.py ├── README.md └── manager.py ├── bot ├── azure_chatgpt.py ├── bot.py ├── litellm.py └── chatgpt.py ├── channel ├── channel.py ├── message.py └── wechat.py ├── app.py ├── config.py ├── config.template.json ├── LICENSE ├── pyproject.toml ├── README_ZH.md └── README.md /assets/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /utils/gen.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | def gen_id(): 4 | return time.strftime("%Y%m%d%H%M%S") -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .DS_Store 3 | __pycache__ 4 | config.json 5 | assets/*.png 6 | assets/*.mp4 7 | .ruff_cache 8 | plugins/**/ 9 | litellm_uuid.txt -------------------------------------------------------------------------------- /.github/workflows/ruff.yml: -------------------------------------------------------------------------------- 1 | name: Ruff 2 | on: [push, pull_request] 3 | jobs: 4 | ruff: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v3 8 | - uses: chartboost/ruff-action@v1 9 | -------------------------------------------------------------------------------- /utils/print.py: -------------------------------------------------------------------------------- 1 | from pyfiglet import Figlet 2 | from termcolor import cprint 3 | 4 | 5 | def color_print(text, color="green"): 6 | content = Figlet(font="slant", width=2000).renderText(text) 7 | cprint(content, color) 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4==4.11.1 2 | lxml==4.9.2 3 | pyfiglet==0.8.post1 4 | requests==2.28.1 5 | termcolor==2.1.1 6 | websocket_client==1.5.1 7 | openai==0.27.8 8 | pydantic==2.1.1 9 | dulwich==0.21.5 10 | litellm==0.1.365 -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ⚠️ **Notice**: Apply a label to this pull request in one the following three labels: 👉 2 | 3 | - `bug`: Fix something isn't working 4 | - `enhancement`: Enhance existing features 5 | - `feature`: Add brand new features 6 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: Features 🎉 4 | labels: 5 | - feature 6 | - title: Enhancements ⚡️ 7 | labels: 8 | - enhancement 9 | - title: Fixed bugs 🛠 10 | labels: 11 | - bug 12 | -------------------------------------------------------------------------------- /common/singleton.py: -------------------------------------------------------------------------------- 1 | def singleton(cls): 2 | instances = {} 3 | 4 | def get_instance(*args, **kwargs): 5 | if cls not in instances: 6 | instances[cls] = cls(*args, **kwargs) 7 | return instances[cls] 8 | 9 | return get_instance 10 | -------------------------------------------------------------------------------- /utils/package.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | 4 | 5 | def install(package): 6 | subprocess.check_call([sys.executable, "-m", "pip", "install", package]) 7 | 8 | 9 | def install_file(file): 10 | subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", file]) 11 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 23.3.0 4 | hooks: 5 | - id: black 6 | language_version: python3.10 7 | - repo: https://github.com/astral-sh/ruff-pre-commit 8 | # Ruff version. 9 | rev: v0.0.275 10 | hooks: 11 | - id: ruff 12 | -------------------------------------------------------------------------------- /utils/check.py: -------------------------------------------------------------------------------- 1 | def check_prefix(content, prefix_list): 2 | for prefix in prefix_list: 3 | if content.startswith(prefix): 4 | return prefix 5 | return None 6 | 7 | 8 | def is_wx_account(id): 9 | if id is None: 10 | return False 11 | return not id.lower().startswith("gh_") 12 | -------------------------------------------------------------------------------- /plugins/source.json: -------------------------------------------------------------------------------- 1 | { 2 | "tiktok": { 3 | "repo": "https://github.com/iuiaoin/plugin_tiktok.git", 4 | "desc": "A plugin show you short videos with pretty girls" 5 | }, 6 | "weather": { 7 | "repo": "https://github.com/iuiaoin/plugin_weather.git", 8 | "desc": "A plugin that offers you daily weather report for free" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /bot/azure_chatgpt.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from bot.chatgpt import ChatGPTBot 3 | from config import conf 4 | 5 | 6 | class AzureChatGPTBot(ChatGPTBot): 7 | def __init__(self): 8 | super().__init__() 9 | openai.api_type = "azure" 10 | openai.api_version = "2023-06-01-preview" 11 | self.args["deployment_id"] = conf().get("azure_deployment_id") 12 | -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- 1 | from .event import EventType, EventAction, Event 2 | from .manager import PluginManager 3 | from .plugin import Plugin 4 | from utils.log import logger 5 | from common.reply import Reply, ReplyType 6 | 7 | __all__ = [ 8 | "EventType", 9 | "EventAction", 10 | "Event", 11 | "PluginManager", 12 | "Plugin", 13 | "logger", 14 | "Reply", 15 | "ReplyType", 16 | ] 17 | 18 | register = PluginManager().register 19 | -------------------------------------------------------------------------------- /utils/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | 5 | def _get_logger(): 6 | log = logging.getLogger("log") 7 | log.setLevel(logging.INFO) 8 | console_handle = logging.StreamHandler(sys.stdout) 9 | console_handle.setFormatter( 10 | logging.Formatter( 11 | "[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s", datefmt="%Y-%m-%d %H:%M:%S" 12 | ) 13 | ) 14 | log.addHandler(console_handle) 15 | return log 16 | 17 | 18 | # log handler 19 | logger = _get_logger() 20 | -------------------------------------------------------------------------------- /common/context.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from enum import Enum 3 | from config import conf 4 | 5 | 6 | class ContextType(Enum): 7 | CREATE_TEXT = 1 8 | CREATE_IMAGE = 2 9 | 10 | def __str__(self): 11 | return self.name 12 | 13 | 14 | class Context(BaseModel): 15 | session_id: str = None 16 | type: ContextType = ContextType.CREATE_TEXT 17 | query: str = None 18 | system_prompt: str = None 19 | 20 | def __init__(self): 21 | super().__init__() 22 | self.system_prompt = conf().get("role_desc") 23 | -------------------------------------------------------------------------------- /common/reply.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from pydantic import BaseModel 3 | 4 | 5 | class ReplyType(Enum): 6 | TEXT = 1 7 | IMAGE = 2 8 | VIDEO = 3 9 | 10 | def __str__(self): 11 | return self.name 12 | 13 | 14 | class Reply(BaseModel): 15 | type: ReplyType = None 16 | content: str = None 17 | 18 | def __init__(self, type: ReplyType, content: str): 19 | super().__init__() 20 | self.type = type 21 | self.content = content 22 | 23 | def __str__(self): 24 | return f"Reply(type={self.type}, content={self.content})" 25 | -------------------------------------------------------------------------------- /channel/channel.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Channel(ABC): 5 | @abstractmethod 6 | def handle_message(self, msg): 7 | pass 8 | 9 | @abstractmethod 10 | def handle_group(self, msg): 11 | pass 12 | 13 | @abstractmethod 14 | def handle_single(self, msg): 15 | pass 16 | 17 | @abstractmethod 18 | def decorate_reply(self, reply, msg): 19 | pass 20 | 21 | @abstractmethod 22 | def handle_reply(self, msg, context): 23 | pass 24 | 25 | @abstractmethod 26 | def send(self, reply, msg): 27 | pass 28 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from channel.wechat import WeChatChannel 2 | from config import load_config 3 | from utils.log import logger 4 | from utils.print import color_print 5 | from plugins.manager import PluginManager 6 | 7 | 8 | if __name__ == "__main__": 9 | try: 10 | # load config 11 | load_config() 12 | 13 | # print banner 14 | color_print("WeChat GPTBot") 15 | 16 | # load plugins 17 | PluginManager().load_plugins() 18 | 19 | # start wechat channel 20 | WeChatChannel().startup() 21 | except Exception as e: 22 | logger.error("App startup failed!") 23 | logger.exception(e) 24 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from utils.log import logger 4 | 5 | config = {} 6 | 7 | 8 | def load_config(): 9 | global config 10 | config_path = "config.json" 11 | if not os.path.exists(config_path): 12 | raise Exception("Config file is not exist, please create config.json according to config.template.json") 13 | 14 | config_str = read_file(config_path) 15 | # deserialize json string to dict 16 | config = json.loads(config_str) 17 | logger.info(f"Load config: {config}") 18 | 19 | 20 | def read_file(path): 21 | with open(path, mode="r", encoding="utf-8") as f: 22 | return f.read() 23 | 24 | 25 | def conf(): 26 | return config 27 | -------------------------------------------------------------------------------- /utils/const.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | # server 4 | IP = "127.0.0.1" 5 | PORT = "5555" 6 | SERVER = f"ws://{IP}:{PORT}" 7 | 8 | # "SUCCSESSED" should be a typo in the hook server😂 9 | SUCCESS = "SUCCSESSED" 10 | 11 | 12 | class MessageType(Enum): 13 | RECV_TXT_MSG = 1 14 | RECV_PIC_MSG = 3 15 | RECV_TXT_CITE_MSG = 49 16 | PIC_MSG = 500 17 | AT_MSG = 550 18 | TXT_MSG = 555 19 | USER_LIST = 5000 20 | GET_USER_LIST_SUCCESS = 5001 21 | GET_USER_LIST_FAIL = 5002 22 | ATTACH_FILE = 5003 23 | HEART_BEAT = 5005 24 | CHATROOM_MEMBER = 5010 25 | CHATROOM_MEMBER_NICK = 5020 26 | PERSONAL_INFO = 6500 27 | PERSONAL_DETAIL = 6550 28 | DEBUG_SWITCH = 6000 29 | DESTROY_ALL = 9999 30 | JOIN_ROOM = 10000 31 | -------------------------------------------------------------------------------- /plugins/plugin.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from plugins.event import Event 3 | 4 | 5 | class Plugin(ABC): 6 | name = None 7 | 8 | def __init__(self, config: dict): 9 | super().__init__() 10 | if self.name is None: 11 | raise NotImplementedError("Plugin name is not defined") 12 | self.config = config 13 | 14 | @abstractmethod 15 | def did_receive_message(self, event: Event): 16 | pass 17 | 18 | @abstractmethod 19 | def will_generate_reply(self, event: Event): 20 | pass 21 | 22 | @abstractmethod 23 | def will_decorate_reply(self, event: Event): 24 | pass 25 | 26 | @abstractmethod 27 | def will_send_reply(self, event: Event): 28 | pass 29 | 30 | @abstractmethod 31 | def help(self, **kwargs) -> str: 32 | return "No help docs" 33 | -------------------------------------------------------------------------------- /common/expired_dict.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | 3 | 4 | class ExpiredDict(dict): 5 | def __init__(self, expired_duration): 6 | super().__init__() 7 | self.expired_duration = expired_duration 8 | 9 | def __getitem__(self, key): 10 | value, expired_time = super().__getitem__(key) 11 | if datetime.now() > expired_time: 12 | del self[key] 13 | raise KeyError(f"expired {key}") 14 | self.__setitem__(key, value) 15 | return value 16 | 17 | def __setitem__(self, key, value): 18 | expired_time = datetime.now() + timedelta(seconds=self.expired_duration) 19 | super().__setitem__(key, (value, expired_time)) 20 | 21 | def get(self, key, default=None): 22 | try: 23 | return self[key] 24 | except KeyError: 25 | return default 26 | -------------------------------------------------------------------------------- /config.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "openai_api_key": "YOUR API SECRET KEY", 3 | "model": "gpt-3.5-turbo", 4 | "use_azure_chatgpt": false, 5 | "azure_deployment_id": "", 6 | "role_desc": "You are a helpful assistant.", 7 | "session_expired_duration": 3600, 8 | "max_tokens": 1000, 9 | "temperature": 0.9, 10 | "proxy": "", 11 | "openai_api_base": "", 12 | "create_image_prefix": ["draw", "paint", "imagine"], 13 | "create_image_model": "dall-e-3", 14 | "create_image_size": "1024x1024", 15 | "create_image_style": "vivid", 16 | "create_image_quality": "hd", 17 | "clear_current_session_command": "#clear session", 18 | "clear_all_sessions_command": "#clear all sessions", 19 | "chat_group_session_independent": false, 20 | "single_chat_prefix": ["bot", "@bot"], 21 | "query_key_command": "#query key", 22 | "recent_days": 5, 23 | "plugins": [{ "name": "tiktok", "command": "#tiktok" }], 24 | "openai_sensitive_id": "" 25 | } 26 | -------------------------------------------------------------------------------- /bot/bot.py: -------------------------------------------------------------------------------- 1 | import litellm 2 | from common.context import Context 3 | from config import conf 4 | from common.singleton import singleton 5 | from common.reply import Reply 6 | 7 | 8 | @singleton 9 | class Bot: 10 | def __init__(self): 11 | use_azure_chatgpt = conf().get("use_azure_chatgpt", False) 12 | model = conf().get("model", "gpt-3.5-turbo") 13 | if use_azure_chatgpt: 14 | from bot.azure_chatgpt import AzureChatGPTBot 15 | 16 | self.bot = AzureChatGPTBot() 17 | 18 | elif model in litellm.open_ai_chat_completion_models: 19 | from bot.chatgpt import ChatGPTBot 20 | 21 | self.bot = ChatGPTBot() 22 | else: 23 | # see litellm supported models here: 24 | # https://litellm.readthedocs.io/en/latest/supported/ 25 | from bot.litellm import LiteLLMChatGPTBot 26 | 27 | self.bot = LiteLLMChatGPTBot() 28 | 29 | def reply(self, context: Context) -> Reply: 30 | return self.bot.reply(context) 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature request 🎉 2 | description: Propose your new idea or suggestion for the project 3 | title: "[Feat]: " 4 | labels: ["feature"] 5 | body: 6 | - type: checkboxes 7 | attributes: 8 | label: Search for answers in existing issues 9 | description: Please check existing issues [here](https://github.com/iuiaoin/wechat-gptbot/issues?q=is%3Aissue) 10 | options: 11 | - label: I have searched issues, there is no similar issue related 12 | required: true 13 | - type: textarea 14 | id: description 15 | attributes: 16 | label: Feature description 17 | description: Describe the feature in detail, or provide relevant samples 18 | placeholder: I have a proposal that enable wechat-gptbot to... 19 | - type: textarea 20 | id: motivation 21 | attributes: 22 | label: Motivation 23 | description: Summarize your motivation and it may help us discover and come up with better solutions 24 | placeholder: Without this feature, I cannot... 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Declan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utils/query_key.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from config import conf 3 | 4 | 5 | class QueryKey(object): 6 | def get_key(): 7 | api_base = conf().get("openai_api_base") 8 | if api_base: 9 | api_base = api_base 10 | else: 11 | api_base = "https://api.openai.com/v1" 12 | subscription_url = api_base + "/dashboard/billing/credit_grants" 13 | headers = { 14 | "Authorization": "Bearer " + conf().get("openai_sensitive_id"), 15 | "Content-Type": "application/json", 16 | } 17 | subscription_response = requests.get(subscription_url, headers=headers) 18 | if subscription_response.status_code == 200: 19 | data = subscription_response.json() 20 | total_granted = data.get("total_granted") 21 | total_used = data.get("total_used") 22 | total_available = data.get("total_available") 23 | else: 24 | return subscription_response.text 25 | 26 | return ( 27 | f"## Total:\t{total_granted:.2f}$ \n" 28 | f"## Used:\t{total_used:.2f}$ \n" 29 | f"## Available:\t{total_available:.2f}$ \n" 30 | ) 31 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.ruff] 2 | # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default. 3 | select = ["E", "F"] 4 | ignore = ["E501"] 5 | 6 | # Allow autofix for all enabled rules (when `--fix`) is provided. 7 | fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"] 8 | unfixable = [] 9 | 10 | # Exclude a variety of commonly ignored directories. 11 | exclude = [ 12 | ".bzr", 13 | ".direnv", 14 | ".eggs", 15 | ".git", 16 | ".git-rewrite", 17 | ".hg", 18 | ".mypy_cache", 19 | ".nox", 20 | ".pants.d", 21 | ".pytype", 22 | ".ruff_cache", 23 | ".svn", 24 | ".tox", 25 | ".venv", 26 | "__pypackages__", 27 | "_build", 28 | "buck-out", 29 | "build", 30 | "dist", 31 | "node_modules", 32 | "venv", 33 | ] 34 | per-file-ignores = {} 35 | 36 | # Same as Black. 37 | line-length = 88 38 | 39 | # Allow unused variables when underscore-prefixed. 40 | dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" 41 | 42 | # Assume Python 3.10. 43 | target-version = "py310" 44 | 45 | [tool.ruff.mccabe] 46 | # Unlike Flake8, default to a complexity level of 10. 47 | max-complexity = 10 48 | -------------------------------------------------------------------------------- /common/emitter.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Callable 3 | 4 | 5 | class Emitter: 6 | def __init__(self): 7 | self.__events__ = {} 8 | 9 | # subscribe event 10 | def on(self, type: Enum, fn: Callable) -> None: 11 | if type not in self.__events__: 12 | self.__events__[type] = [] 13 | if not self.has(type, fn): 14 | self.__events__[type].append(fn) 15 | 16 | # unsubscribe event 17 | def off(self, type: Enum, fn: Callable) -> None: 18 | listeners = self.__events__.get(type) 19 | if listeners is not None and len(listeners) > 0: 20 | listeners.remove(fn) 21 | 22 | # check if the function has subscribed the event 23 | def has(self, type: Enum, fn: Callable) -> bool: 24 | listeners = self.__events__.get(type) 25 | if listeners is None or len(listeners) == 0: 26 | return False 27 | return fn in listeners 28 | 29 | # emit event 30 | def emit(self, type: Enum, *args, **kwargs) -> None: 31 | listeners = self.__events__.get(type) 32 | if listeners is not None and len(listeners) > 0: 33 | for fn in listeners: 34 | fn(*args, **kwargs) 35 | 36 | # subscribe event and unsubscribe after once 37 | def once(self, type: Enum, fn: Callable) -> None: 38 | def once_fn(*args, **kwargs): 39 | fn(*args, **kwargs) 40 | self.off(type, once_fn) 41 | 42 | self.on(type, once_fn) 43 | -------------------------------------------------------------------------------- /channel/message.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from utils.api import get_sender_name 3 | 4 | 5 | class Message(BaseModel): 6 | room_id: str = None 7 | sender_id: str = None 8 | sender_name: str = None 9 | receiver_id: str = None 10 | receiver_name: str = None 11 | content: str = None 12 | type: int = None # MessageType value 13 | is_group: bool = False 14 | is_at: bool = False 15 | create_time: str = None 16 | _raw_msg: dict = None 17 | 18 | def __init__(self, msg, info): 19 | super().__init__() 20 | self._raw_msg = msg 21 | self.receiver_id = info["wx_id"] 22 | self.receiver_name = info["wx_name"] 23 | self.content = msg["content"].strip() 24 | self.type = msg["type"] 25 | self.create_time = msg["time"] 26 | if "@chatroom" in msg["wxid"]: 27 | self.is_group = True 28 | self.room_id = msg["wxid"] 29 | self.sender_id = msg["id1"] 30 | self.is_at = f"@{self.receiver_name}" in self.content 31 | else: 32 | self.is_group = False 33 | self.sender_id = msg["wxid"] 34 | self.sender_name = get_sender_name(self.room_id, self.sender_id) 35 | 36 | def __str__(self): 37 | return f"Message(room_id={self.room_id}, sender_id={self.sender_id}, sender_name={self.sender_name}, receiver_id={self.receiver_id}, receiver_name={self.receiver_name}, content={self.content}, type={self.type}, is_group={self.is_group}, create_time={self.create_time}, is_at={self.is_at})" 38 | -------------------------------------------------------------------------------- /plugins/built_in.py: -------------------------------------------------------------------------------- 1 | from config import conf 2 | from common.singleton import singleton 3 | from common.session import Session 4 | from common.reply import Reply, ReplyType 5 | from plugins.event import Event 6 | from utils.query_key import QueryKey 7 | 8 | 9 | @singleton 10 | class Cmd: 11 | def __init__(self, plugins: dict): 12 | self.config = conf() 13 | self.plugins = plugins 14 | 15 | def will_generate_reply(self, event: Event): 16 | query = event.context.query 17 | session_id = event.context.session_id 18 | if query == self.config.get("clear_current_session_command", "#clear session"): 19 | Session.clear_session(session_id) 20 | event.reply = Reply(ReplyType.TEXT, "The session has been cleared") 21 | event.bypass() 22 | elif query == self.config.get( 23 | "clear_all_sessions_command", "#clear all sessions" 24 | ): 25 | Session.clear_all_session() 26 | event.reply = Reply(ReplyType.TEXT, "All sessions have been cleared") 27 | event.bypass() 28 | elif query == self.config.get("query_key_command", "#query key"): 29 | event.reply = Reply(ReplyType.TEXT, QueryKey.get_key()) 30 | event.bypass() 31 | elif query.startswith("#help "): 32 | plugin_name = query.split(" ")[1] 33 | reply_text = f"No plugin named {plugin_name}" 34 | for name in self.plugins: 35 | if name == plugin_name: 36 | reply_text = self.plugins[name].help() 37 | break 38 | event.reply = Reply(ReplyType.TEXT, reply_text) 39 | event.bypass() 40 | -------------------------------------------------------------------------------- /utils/serialize.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import json 4 | import requests 5 | from channel.message import Message 6 | from utils.log import logger 7 | from utils.const import MessageType 8 | from utils.gen import gen_id 9 | 10 | 11 | def serialize_img(image_url: str) -> str: 12 | return serialize_file(image_url, "png") 13 | 14 | 15 | def serialize_video(video_url: str) -> str: 16 | return serialize_file(video_url, "mp4") 17 | 18 | 19 | def serialize_file(file_url: str, suffix: str) -> str: 20 | try: 21 | # download file 22 | path = os.path.abspath("./assets") 23 | file_name = int(time.time() * 1000) 24 | response = requests.get(file_url, stream=True) 25 | response.raise_for_status() # Raise exception if invalid response 26 | 27 | with open(f"{path}\\{file_name}.{suffix}", "wb+") as f: 28 | for chunk in response.iter_content(chunk_size=8192): 29 | if chunk: # filter out keep-alive new chunks 30 | f.write(chunk) 31 | f.close() 32 | img_path = os.path.abspath(f"{path}\\{file_name}.{suffix}").replace( 33 | "\\", "\\\\" 34 | ) 35 | return img_path 36 | except Exception as e: 37 | logger.error(f"[Download File Error]: {e}") 38 | 39 | 40 | def serialize_text(text: str, msg: Message) -> str: 41 | msg_type = MessageType.AT_MSG.value if msg.is_group else MessageType.TXT_MSG.value 42 | msg = { 43 | "id": gen_id(), 44 | "type": msg_type, 45 | "roomid": msg.room_id or "null", 46 | "wxid": msg.sender_id or "null", 47 | "content": text, 48 | "nickname": msg.sender_name or "null", 49 | "ext": "null", 50 | } 51 | return json.dumps(msg) 52 | -------------------------------------------------------------------------------- /plugins/event.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from pydantic import BaseModel 3 | from common.context import Context 4 | from common.reply import Reply 5 | from channel.message import Message 6 | from channel.channel import Channel 7 | 8 | 9 | class EventType(Enum): 10 | DID_RECEIVE_MESSAGE = 1 # receive message 11 | WILL_GENERATE_REPLY = 2 # generate reply 12 | WILL_DECORATE_REPLY = 3 # decorate reply 13 | WILL_SEND_REPLY = 4 # send reply 14 | 15 | def __str__(self): 16 | return self.name 17 | 18 | 19 | class EventAction(Enum): 20 | PROCEED = 1 # proceed the plugin chain 21 | STOP = 2 # stop the plugin chain 22 | BYPASS = 3 # bypass the plugin chain and default logic 23 | 24 | 25 | class Event(BaseModel): 26 | class Config: 27 | arbitrary_types_allowed = True 28 | 29 | type: EventType = None 30 | channel: Channel = None 31 | message: Message = None 32 | context: Context = None 33 | reply: Reply = None 34 | action: EventAction = EventAction.PROCEED 35 | 36 | def __init__(self, type: EventType, data: dict): 37 | super().__init__() 38 | self.type = type 39 | self.channel = data.get("channel") 40 | self.message = data.get("message") 41 | self.context = data.get("context") 42 | self.reply = data.get("reply") 43 | 44 | def proceed(self): 45 | self.action = EventAction.PROCEED 46 | 47 | def stop(self): 48 | self.action = EventAction.STOP 49 | 50 | def bypass(self): 51 | self.action = EventAction.BYPASS 52 | 53 | @property 54 | def is_proceed(self) -> bool: 55 | return self.action == EventAction.PROCEED 56 | 57 | @property 58 | def is_stop(self) -> bool: 59 | return self.action == EventAction.STOP 60 | 61 | @property 62 | def is_bypass(self) -> bool: 63 | return self.action == EventAction.BYPASS 64 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 🐞 2 | description: File a bug report about the project 3 | title: "[Bug]: " 4 | labels: ["bug"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | #### Confirm in advance: 10 | 1. Have `git pull` the latest code 11 | 2. Python installed, version between 3.8.X~3.10.X 12 | 3. Dependencies installed via `pip install -r requirements.txt` 13 | 4. No missing configuration in `config.json` 14 | 5. No network issue(able to connect to openai server) 15 | - type: checkboxes 16 | attributes: 17 | label: Search for answers in existing issues 18 | description: Please check existing issues [here](https://github.com/iuiaoin/wechat-gptbot/issues?q=is%3Aissue) 19 | options: 20 | - label: I have searched issues, there is no issue related to the problem I encountered 21 | required: true 22 | - type: dropdown 23 | id: python-version 24 | attributes: 25 | label: Python version 26 | description: What version of python are you running? 27 | options: 28 | - python 3.8 29 | - python 3.9 30 | - python 3.10 31 | - other 32 | validations: 33 | required: true 34 | - type: textarea 35 | id: description 36 | attributes: 37 | label: Issue description 38 | description: Describe the problem in detail, or provide relevant screenshots 39 | placeholder: A bug happened! 40 | - type: textarea 41 | id: repro-steps 42 | attributes: 43 | label: Repro steps 44 | description: Summarize the steps that can repro your issue 45 | placeholder: | 46 | 1. 47 | 2. 48 | 3. 49 | - type: textarea 50 | id: logs 51 | attributes: 52 | label: Relevant log output 53 | description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. 54 | render: shell 55 | -------------------------------------------------------------------------------- /common/session.py: -------------------------------------------------------------------------------- 1 | from common.expired_dict import ExpiredDict 2 | from config import conf 3 | from common.context import Context 4 | 5 | 6 | class Session(object): 7 | all_sessions = ExpiredDict(conf().get("session_expired_duration") or 3600) 8 | 9 | @staticmethod 10 | def build_session_query(context: Context): 11 | """ 12 | build query with conversation history 13 | e.g. [ 14 | {"role": "system", "content": "You are a helpful assistant."}, 15 | {"role": "user", "content": "Who won the world series in 2020?"}, 16 | {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, 17 | {"role": "user", "content": "Where was it played?"} 18 | ] 19 | :param query: query content 20 | :param session_id: session id 21 | :return: query content with conversaction 22 | """ 23 | session = Session.all_sessions.get(context.session_id, []) 24 | if len(session) == 0: 25 | system_item = {"role": "system", "content": context.system_prompt} 26 | session.append(system_item) 27 | Session.all_sessions[context.session_id] = session 28 | user_item = {"role": "user", "content": context.query} 29 | session.append(user_item) 30 | return session 31 | 32 | @staticmethod 33 | def save_session(answer, session_id, total_tokens): 34 | max_tokens = conf().get("max_tokens") 35 | session = Session.all_sessions.get(session_id) 36 | if session: 37 | # append conversation 38 | gpt_item = {"role": "assistant", "content": answer} 39 | session.append(gpt_item) 40 | 41 | # discard exceed limit conversation 42 | Session.discard_exceed_conversation(session, max_tokens, total_tokens) 43 | 44 | @staticmethod 45 | def discard_exceed_conversation(session, max_tokens, total_tokens): 46 | dec_tokens = int(total_tokens) 47 | while dec_tokens > max_tokens: 48 | # pop first conversation 49 | if len(session) > 3: 50 | session.pop(1) 51 | session.pop(1) 52 | else: 53 | break 54 | dec_tokens = dec_tokens - max_tokens 55 | 56 | @staticmethod 57 | def clear_session(session_id): 58 | Session.all_sessions[session_id] = [] 59 | 60 | @staticmethod 61 | def clear_all_session(): 62 | Session.all_sessions.clear() 63 | -------------------------------------------------------------------------------- /bot/litellm.py: -------------------------------------------------------------------------------- 1 | from bot.chatgpt import ChatGPTBot 2 | import openai 3 | import litellm 4 | from litellm import completion 5 | from utils.log import logger 6 | from config import conf 7 | 8 | 9 | class LiteLLMChatGPTBot(ChatGPTBot): 10 | def __init__(self): 11 | api_key = conf().get("openai_api_key") 12 | model = conf().get("model", "gpt-3.5-turbo") 13 | api_base = conf().get("openai_api_base") 14 | proxy = conf().get("proxy") 15 | 16 | if model in litellm.cohere_models: 17 | litellm.cohere_key = api_key 18 | elif model in litellm.anthropic_models: 19 | litellm.anthropic_key = api_key 20 | else: 21 | litellm.openai_key = api_key 22 | 23 | if api_base: 24 | litellm.api_base = api_base 25 | if proxy: 26 | openai.proxy = proxy 27 | self.name = self.__class__.__name__ 28 | self.args = { 29 | "model": model, 30 | "temperature": conf().get("temperature"), 31 | } 32 | 33 | def reply_text(self, session): 34 | try: 35 | response = completion( 36 | messages=session, 37 | top_p=1.0, 38 | frequency_penalty=0.0, 39 | presence_penalty=0.0, 40 | **self.args, 41 | ) 42 | return { 43 | "total_tokens": response["usage"]["total_tokens"], 44 | "completion_tokens": response["usage"]["completion_tokens"], 45 | "content": response.choices[0]["message"]["content"], 46 | } 47 | except Exception as e: 48 | result = {"completion_tokens": 0, "content": "Please ask me again"} 49 | if isinstance(e, openai.error.RateLimitError): 50 | logger.warn(f"[{self.name}] RateLimitError: {e}") 51 | result["content"] = "Ask too frequently, please try again in 20s" 52 | elif isinstance(e, openai.error.APIConnectionError): 53 | logger.warn(f"[{self.name}] APIConnectionError: {e}") 54 | result[ 55 | "content" 56 | ] = "I cannot connect the server, please check the network and try again" 57 | elif isinstance(e, openai.error.Timeout): 58 | logger.warn(f"[{self.name}] Timeout: {e}") 59 | result["content"] = "I didn't receive your message, please try again" 60 | elif isinstance(e, openai.error.APIError): 61 | logger.warn(f"[{self.name}] APIError: {e}") 62 | else: 63 | logger.exception(f"[{self.name}] Exception: {e}") 64 | return result 65 | -------------------------------------------------------------------------------- /utils/api.py: -------------------------------------------------------------------------------- 1 | from utils import const 2 | from utils.gen import gen_id 3 | import requests 4 | import json 5 | from utils.log import logger 6 | from utils.const import MessageType 7 | 8 | 9 | def fetch(path, data): 10 | base_data = { 11 | "id": gen_id(), 12 | "type": "null", 13 | "roomid": "null", 14 | "wxid": "null", 15 | "content": "null", 16 | "nickname": "null", 17 | "ext": "null", 18 | } 19 | base_data.update(data) 20 | url = f"http://{const.IP}:{const.PORT}/{path}" 21 | response = requests.post(url, json={"para": base_data}, timeout=5) 22 | return response.json() 23 | 24 | 25 | def get_personal_info(): 26 | path = "/api/get_personal_info" 27 | data = { 28 | "type": MessageType.PERSONAL_INFO.value, 29 | "content": "op:personal info", 30 | } 31 | try: 32 | response = fetch(path, data) 33 | content = json.loads(response["content"]) 34 | logger.info( 35 | f""" 36 | wechat login info: 37 | 38 | nickName: {content['wx_name']} 39 | account: {content['wx_code']} 40 | wechatId: {content['wx_id']} 41 | startTime: {response['time']} 42 | """ 43 | ) 44 | return content 45 | except Exception as e: 46 | logger.error("Get personal info failed!") 47 | logger.exception(e) 48 | 49 | 50 | # get sender's nickname in group chat 51 | def get_sender_name(room_id, sender_id): 52 | path = "api/getmembernick" 53 | data = { 54 | "type": MessageType.CHATROOM_MEMBER_NICK.value, 55 | "wxid": sender_id, 56 | "roomid": room_id or "null", 57 | } 58 | response = fetch(path, data) 59 | return json.loads(response["content"])["nick"] 60 | 61 | 62 | def send_txt(msg, wx_id): 63 | path = "api/sendtxtmsg" 64 | data = { 65 | "type": MessageType.TXT_MSG.value, 66 | "content": msg, 67 | "wxid": wx_id, 68 | } 69 | response = fetch(path, data) 70 | if response["status"] == const.SUCCESS: 71 | logger.info("text sent successfully") 72 | else: 73 | logger.error(f"[Server Error]: {response.text}") 74 | 75 | 76 | def send_image(img_path, wx_id): 77 | path = "api/sendpic" 78 | data = { 79 | "type": MessageType.PIC_MSG.value, 80 | "content": img_path, 81 | "wxid": wx_id, 82 | } 83 | response = fetch(path, data) 84 | if response["status"] == const.SUCCESS: 85 | logger.info("image sent successfully") 86 | else: 87 | logger.error(f"[Server Error]: {response.text}") 88 | 89 | 90 | def send_file(file_path, wx_id): 91 | path = "api/sendattatch" 92 | data = { 93 | "type": MessageType.ATTACH_FILE.value, 94 | "content": file_path, 95 | "wxid": wx_id, 96 | } 97 | response = fetch(path, data) 98 | if response["status"] == const.SUCCESS: 99 | logger.info("file sent successfully") 100 | else: 101 | logger.error(f"[Server Error]: {response.text}") 102 | -------------------------------------------------------------------------------- /plugins/README.md: -------------------------------------------------------------------------------- 1 | ## Plugin System 2 | 3 | Support personalized plugin extensions, you can easily integrate the functions you want 4 | 5 | ## Usage 6 | 7 | 1. Find available plugins [here](source.json), and pick the plugin you want 8 | 2. Add them in your `config.json` like this: 9 | 10 | ```bash 11 | "plugins": [ 12 | { 13 | "name": "tiktok", // plugin name, this is required 14 | "command": "#tiktok" // plugin configs, you can find related docs in the plugin repo 15 | ... 16 | } 17 | ] 18 | ``` 19 | 20 | And then you are done, nothing else to do! Just play with your plugins! 21 | 22 | ## Contributing 23 | 24 | ### 1. Create your plugin repo 25 | 26 | > Maintain the plugin in your own independent GitHub repository. 27 | 28 | Take this [sample plugin](https://github.com/iuiaoin/plugin_tiktok) as an example, your plugin repo should be like: 29 | 30 | ``` 31 | tiktok 32 | ├──.gitignore 33 | ├── __init__.py 34 | ├── tiktok.py 35 | ├── requirements.txt 36 | └── README.md 37 | ``` 38 | 39 | ### 2. Write plugin class 40 | 41 | Use `@register` decorator to register plugin and extend basic `Plugin` class 42 | 43 | ```python 44 | @register 45 | class TikTok(Plugin): 46 | name = "tiktok" # name is required and must be the same as in source.json 47 | ``` 48 | 49 | ### 3. Implement abstract methods 50 | 51 | There're four abstract methods in `Plugin` class need to be implemented, and the three hook methods will take the `Event` object as the parameter 52 | 53 | - `did_receive_message`: will be called as soon as received the message, its Event contains `channel` and `message` 54 | - `will_generate_reply`: will be called before the reply is generated, its Event contains `channel`, `message` and `context` 55 | - `will_decorate_reply`: will be called before decorate the reply, its Event contains `channel`, `message`, `context` and `reply` 56 | - `will_send_reply`: will be called before sending the reply, its Event contains `channel`, `message`, `context` and decorated `reply` 57 | - `help`: will be used to show help docs to users by `#help ` command 58 | 59 | You can modify the `context` and `reply` to change the default behavior, and call `event action method` to decide whether to continue the plugin chain or whether to execute the default logic. 60 | 61 | - `event.proceed()`: proceed the plugin chain 62 | - `event.stop()`: stop the plugin chain 63 | - `event.bypass()`: bypass the plugin chain and default logic 64 | 65 | Here's an example: 66 | 67 | ```python 68 | def did_receive_message(self, event: Event): 69 | pass 70 | 71 | def will_generate_reply(self, event: Event): 72 | query = event.context.query 73 | if query == self.config.get("command"): # instance will get plugin configs when inits 74 | event.reply = self.reply() # modify the reply 75 | event.bypass() # bypass the plugin chain and default logic 76 | 77 | def will_decorate_reply(self, event: Event): 78 | pass 79 | 80 | def will_send_reply(self, event: Event): 81 | pass 82 | 83 | def help(self, **kwargs) -> str: 84 | return "Use the command #tiktok(or whatever you like set with command field in the config) to get a wonderful video" 85 | ``` 86 | 87 | ### 4. Test your plugin 88 | 89 | Run and test the plugin to make sure it works as you expected 90 | 91 | ### 5. Add to source.json 92 | 93 | After testing, you can add your plugin to [source.json](source.json). When the app starts, it will automatically check the plugin configured in config.json and also refer to source.json to install it. 94 | 95 | ```json 96 | { 97 | "tiktok": { 98 | "repo": "https://github.com/iuiaoin/plugin_tiktok.git", 99 | "desc": "A plugin show you short videos with pretty girls" 100 | } 101 | } 102 | ``` 103 | -------------------------------------------------------------------------------- /bot/chatgpt.py: -------------------------------------------------------------------------------- 1 | import openai 2 | from config import conf 3 | from utils.log import logger 4 | from common.session import Session 5 | from common.reply import Reply, ReplyType 6 | from common.context import ContextType, Context 7 | 8 | 9 | class ChatGPTBot: 10 | def __init__(self): 11 | openai.api_key = conf().get("openai_api_key") 12 | api_base = conf().get("openai_api_base") 13 | proxy = conf().get("proxy") 14 | if api_base: 15 | openai.api_base = api_base 16 | if proxy: 17 | openai.proxy = proxy 18 | self.name = self.__class__.__name__ 19 | self.args = { 20 | "model": conf().get("model"), 21 | "temperature": conf().get("temperature"), 22 | } 23 | 24 | def reply(self, context: Context) -> Reply: 25 | query = context.query 26 | logger.info(f"[{self.name}] Query={query}") 27 | if context.type == ContextType.CREATE_IMAGE: 28 | return self.reply_img(query) 29 | else: 30 | session_id = context.session_id 31 | session = Session.build_session_query(context) 32 | response = self.reply_text(session) 33 | logger.info(f"[{self.name}] Response={response['content']}") 34 | if response["completion_tokens"] > 0: 35 | Session.save_session( 36 | response["content"], session_id, response["total_tokens"] 37 | ) 38 | return Reply(ReplyType.TEXT, response["content"]) 39 | 40 | def reply_img(self, query) -> Reply: 41 | create_image_size = conf().get("create_image_size", "512x512") 42 | create_image_model = conf().get("create_image_model", "dall-e-3") 43 | create_image_style = conf().get("create_image_style", "vivid") 44 | create_image_quality = conf().get("create_image_quality", "standard") 45 | 46 | try: 47 | response = openai.Image.create(prompt=query, model=create_image_model, n=1, size=create_image_size, 48 | style=create_image_style, quality=create_image_quality) 49 | image_url = response["data"][0]["url"] 50 | logger.info(f"[{self.name}] Image={image_url}") 51 | return Reply(ReplyType.IMAGE, image_url) 52 | except Exception as e: 53 | logger.error(f"[{self.name}] Create image failed: {e}") 54 | return Reply(ReplyType.TEXT, "Image created failed") 55 | 56 | def reply_text(self, session): 57 | try: 58 | response = openai.ChatCompletion.create( 59 | messages=session, 60 | top_p=1.0, 61 | frequency_penalty=0.0, 62 | presence_penalty=0.0, 63 | **self.args, 64 | ) 65 | return { 66 | "total_tokens": response["usage"]["total_tokens"], 67 | "completion_tokens": response["usage"]["completion_tokens"], 68 | "content": response.choices[0]["message"]["content"], 69 | } 70 | except Exception as e: 71 | result = {"completion_tokens": 0, "content": "Please ask me again"} 72 | if isinstance(e, openai.error.RateLimitError): 73 | logger.warn(f"[{self.name}] RateLimitError: {e}") 74 | result["content"] = "Ask too frequently, please try again in 20s" 75 | elif isinstance(e, openai.error.APIConnectionError): 76 | logger.warn(f"[{self.name}] APIConnectionError: {e}") 77 | result[ 78 | "content" 79 | ] = "I cannot connect the server, please check the network and try again" 80 | elif isinstance(e, openai.error.Timeout): 81 | logger.warn(f"[{self.name}] Timeout: {e}") 82 | result["content"] = "I didn't receive your message, please try again" 83 | elif isinstance(e, openai.error.APIError): 84 | logger.warn(f"[{self.name}] APIError: {e}") 85 | else: 86 | logger.exception(f"[{self.name}] Exception: {e}") 87 | return result 88 | -------------------------------------------------------------------------------- /plugins/manager.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import os 4 | import importlib 5 | from common.singleton import singleton 6 | from config import conf 7 | from utils.log import logger 8 | from typing import Set 9 | from dulwich import porcelain 10 | from utils.package import install_file 11 | from plugins.plugin import Plugin 12 | from common.emitter import Emitter 13 | from plugins.event import Event, EventType 14 | from plugins.built_in import Cmd 15 | 16 | 17 | @singleton 18 | class PluginManager(Emitter): 19 | def __init__(self): 20 | super().__init__() 21 | self._plugins = {} 22 | self._configs = {} 23 | self.built_in(self._plugins) 24 | 25 | def register(self, cls: Plugin): 26 | name = cls.name 27 | config = self._configs.get(name) 28 | self._plugins[name] = cls(config) 29 | return cls 30 | 31 | def load_plugins(self): 32 | new_plugins = self.check_plugins() 33 | failed_plugins = self.install_plugins(new_plugins) 34 | all_plugins = conf().get("plugins") or [] 35 | plugins = [ 36 | plugin for plugin in all_plugins if plugin["name"] not in failed_plugins 37 | ] 38 | self.import_plugins(plugins) 39 | self.activate_plugins(plugins) 40 | 41 | def check_plugins(self) -> Set[str]: 42 | logger.info("Checking plugins...") 43 | plugins = conf().get("plugins") or [] 44 | existed_plugins = self.get_existed() 45 | new_plugins = set() 46 | for plugin in plugins: 47 | if plugin["name"] not in existed_plugins: 48 | new_plugins.add(plugin["name"]) 49 | return new_plugins 50 | 51 | def install_plugins(self, plugins: Set[str]) -> Set[str]: 52 | failed_plugins = set() 53 | if len(plugins) == 0: 54 | logger.info("All plugins are installed") 55 | return failed_plugins 56 | else: 57 | logger.info(f"Installing plugins: {plugins}") 58 | source = dict() 59 | try: 60 | with open("./plugins/source.json", "r", encoding="utf-8") as f: 61 | source = json.load(f) 62 | except Exception as e: 63 | logger.error(f"Invalid plugin source: {e}") 64 | return plugins 65 | for plugin_name in plugins: 66 | if plugin_name in source: 67 | repo = source[plugin_name]["repo"] 68 | match = re.match( 69 | r"^(https?:\/\/|git@)([^\/:]+)[\/:]([^\/:]+)\/(.+).git$", repo 70 | ) 71 | if not match: 72 | failed_plugins.add(plugin_name) 73 | logger.error(f"Invalid repo: {repo}") 74 | else: 75 | try: 76 | dirname = os.path.join("./plugins", plugin_name) 77 | porcelain.clone(repo, dirname, checkout=True) 78 | dependency_path = os.path.join(dirname, "requirements.txt") 79 | if os.path.exists(dependency_path): 80 | logger.info( 81 | f"Installing dependencies for {plugin_name}" 82 | ) 83 | install_file(dependency_path) 84 | logger.info(f"Install plugin {plugin_name} successfully") 85 | except Exception as e: 86 | failed_plugins.add(plugin_name) 87 | logger.error(f"Fail to install plugin {plugin_name}: {e}") 88 | else: 89 | failed_plugins.add(plugin_name) 90 | logger.error(f"Plugin {plugin_name} is not found in source.json") 91 | return failed_plugins 92 | 93 | def get_existed(self) -> Set[str]: 94 | plugins_dir = os.path.abspath("./plugins") 95 | existed_plugins = set() 96 | for plugin_name in os.listdir(plugins_dir): 97 | plugin_path = os.path.join(plugins_dir, plugin_name) 98 | if os.path.isdir(plugin_path): 99 | # detect __init__.py in the plugin directory 100 | module_path = os.path.join(plugin_path, "__init__.py") 101 | if os.path.isfile(module_path): 102 | existed_plugins.add(plugin_name) 103 | return existed_plugins 104 | 105 | def import_plugins(self, plugins: list) -> None: 106 | for plugin in plugins: 107 | try: 108 | self._configs[plugin["name"]] = plugin 109 | importlib.import_module(f"plugins.{plugin['name']}") 110 | except Exception as e: 111 | logger.exception(f"Failed to load plugin {plugin['name']}: {e}") 112 | 113 | def activate_plugins(self, plugins: list) -> None: 114 | for plugin in plugins: 115 | instance = self._plugins.get(plugin["name"]) 116 | if instance is not None: 117 | self.on(EventType.DID_RECEIVE_MESSAGE, instance.did_receive_message) 118 | self.on(EventType.WILL_GENERATE_REPLY, instance.will_generate_reply) 119 | self.on(EventType.WILL_DECORATE_REPLY, instance.will_decorate_reply) 120 | self.on(EventType.WILL_SEND_REPLY, instance.will_send_reply) 121 | 122 | def emit(self, event: Event) -> Event: 123 | listeners = self.__events__.get(event.type) 124 | if listeners is not None and len(listeners) > 0: 125 | for fn in listeners: 126 | if event.is_proceed: 127 | fn(event) 128 | else: 129 | break 130 | return event 131 | 132 | def built_in(self, plugins: dict): 133 | self.on(EventType.WILL_GENERATE_REPLY, Cmd(plugins).will_generate_reply) 134 | -------------------------------------------------------------------------------- /README_ZH.md: -------------------------------------------------------------------------------- 1 |

欢迎使用 wechat-gptbot 👋

2 |
3 | 4 |
5 |

6 | Version 7 | 8 | License: MIT 9 | 10 | 11 | Python Version 15 | 16 | 17 | litellm 21 | 22 |

23 | 24 | > 基于 ChatGPT 的微信机器人,无风险且非常稳定! 🚀 25 | > [English](README.md) | 中文文档 26 | 27 | ## 🎤 简介 28 | 29 | > 我在使用基于 `itchat` 和 `wechaty` 的聊天机器人时,经常会遇到扫码登录账号被限制的风险。参考 [#158](https://github.com/AutumnWhj/ChatGPT-wechat-bot/issues/158). 有没有安全的方法来使用微信机器人呢? 在这里~ 30 | 31 | ## 🌟 特性 32 | 33 | - [x] **非常稳定:** 基于 windows hook 实现,不用担心微信账号被限制的风险 34 | - [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3,GPT-3.5,GPT-4, Claude-2, Claude Instant-1, Command Nightly, Palm models 和其他在 [litellm](https://litellm.readthedocs.io/en/latest/supported/) 中的模型 35 | - [x] **图片生成:** 支持图片生成, 目前暂时只支持 Dell-E 模型 36 | - [x] **灵活配置:** 支持 prompt 设置, proxy, 命令设置等. 37 | - [x] **插件系统:** 支持个性化插件扩展,您可以轻松集成您想要的功能 38 | 39 | ## 📝 更新日志 40 | 41 | > **2023.07.13:** 引入`插件系统`,让 gptbot 拥有更多可能性,且易于扩展 [#46](https://github.com/iuiaoin/wechat-gptbot/pull/46). 这是第一个好玩的插件: [tiktok](https://github.com/iuiaoin/plugin_tiktok), 赶快来尝试一下吧! 另请参阅此处的[文档](plugins/README.md)来了解用法和如何贡献~ 42 | 43 | ## 🚀 快速开始 44 | 45 | ### 环境 46 | 47 | 支持 Windows 系统(以后可能会基于 [sandbox](https://github.com/huan/docker-wechat) 支持 Linux) 同时需要安装 `Python` 48 | 49 | > 建议 Python 版本在 3.8.X~3.10.X 之间, 推荐 3.10 版本 50 | 51 | #### 1. 克隆项目 52 | 53 | ```bash 54 | git clone https://github.com/iuiaoin/wechat-gptbot && cd wechat-gptbot 55 | ``` 56 | 57 | #### 2. 安装依赖 58 | 59 | ```bash 60 | pip install -r requirements.txt 61 | ``` 62 | 63 | ### 配置 64 | 65 | 配置文件的模板在根目录的 `config.template.json` 中,需复制该模板创建最终生效的 `config.json` 文件 66 | 67 | ```bash 68 | cp config.template.json config.json 69 | ``` 70 | 71 | 在 `config.json` 中填入配置,以下是对默认配置的说明,可根据需要进行自定义修改: 72 | 73 | ```bash 74 | { 75 | "openai_api_key": "YOUR API SECRET KEY", # 填入你的 OpenAI API Key 76 | "model": "gpt-3.5-turbo", # 要使用的模型 ID, 支持 gpt-3.5-turbo, gpt-4, gpt-4-32k 等 77 | "use_azure_chatgpt": false, # 是否使用 Azure OpenAI API 78 | "azure_deployment_id": "", # Azure 模型部署名称 79 | "role_desc": "You are a helpful assistant.", # 角色描述, 作为系统 prompt 80 | "session_expired_duration": 3600, # 对话记忆的保留时长 81 | "max_tokens": 1000, # 对话记忆字符的最大 token 数量 82 | "temperature": 0.9, # 在 0 到 2 之间. 更高的数值会使 chatGPT 的输出更加随机, 而较低的数值会使其更加稳定 83 | "proxy": "127.0.0.1:3000", # 代理客户端的ip和端口 84 | "openai_api_base": "", # openai 服务使用的 api url 85 | "create_image_size": "256x256", # Dall-E 图片大小, 支持 256x256, 512x512, 1024x1024 86 | "create_image_prefix": ["draw", "paint", "imagine"], # 开启图片回复的前缀 87 | "clear_current_session_command": "#clear session", # 清楚当前对话记忆 88 | "clear_all_sessions_command": "#clear all sessions", # 清楚所有对话记忆 89 | "chat_group_session_independent": false, # 群聊中的用户会话上下文是否是各自独立的 90 | "single_chat_prefix": ["bot", "@bot"], # 在私聊中以“bot”或“@bot”开始对话以触发机器人,如果你想让bot一直处于激活状态,请将其留空 91 | "group_chat_reply_prefix": "", # 群聊中的回复前缀, 可用来区分机器人/真人 92 | "group_chat_reply_suffix": "", # 群聊中的回复后缀, \n 可换行 93 | "single_chat_reply_prefix": "", # 私聊中的回复前缀, 可用来区分机器人/真人 94 | "single_chat_reply_suffix": "", # 私聊中的回复后缀, \n 可换行 95 | "query_key_command": "#query key", # 查询 api key 使用情况 96 | "recent_days": 5 # 查询最近的天 97 | "plugins": [{ "name": , other configs }]# 添加你喜爱的插件 98 | "openai_sensitive_id": "" # 查询api key时使用 99 | } 100 | ``` 101 | 102 | openai_sensitive_id获取:登录https://platform.openai.com/overview页面,按F12找到如下值,维护到配置中 103 | ![image](https://github.com/maq917/wechat-gptbot/assets/126306230/36b146dd-649f-4b91-9905-32875f3455b2) 104 | 105 | 106 | 107 | ### 运行 108 | 109 | #### 1. 准备 110 | 111 | > 我们需要特定的微信版本和 dll 来使 windows hook 正常生效。 112 | 113 | 1. 从 [release](https://github.com/iuiaoin/wechat-gptbot/releases/tag/v1.0.0) 中下载相关文件 114 | 2. 安装 WeChatSetup 3.2.1.121 版本并且登录 115 | 3. 运行微信 dll 注入器 116 | 4. 选择 3.2.1.121-LTS.dll 并且 点击 `注入dll`, 如果成功的话你将会看到: "成功注入: 3.2.1.121-LTS.dll" 117 | 118 | #### 2. 运行命令 119 | 120 | ```bash 121 | python app.py 122 | ``` 123 | 124 | 125 | 126 | 噹噹! 享受你的探索之旅吧~ 127 | 128 | ## ✨ 慷慨支持者 129 | 130 | > 非常感谢您的支持, 这将是我最大的动力! 131 | 132 | 133 | 134 | 135 | 136 | ## 🤝 为项目添砖加瓦 137 | 138 | 欢迎提出 Contributions, issues 与 feature requests!
随时查看 [issues page](https://github.com/iuiaoin/wechat-gptbot/issues). 139 | 140 | ## 🙏 感谢支持 141 | 142 | 如果你喜欢这个项目的话,请为它点上一颗 ⭐️ 143 | 144 | ## 📢 声明 145 | 146 | WeChatSetup 安装包来自于 [wechat-windows-versions](https://github.com/tom-snow/wechat-windows-versions/releases), 微信 dll 注入器来自于 [wechat-bot](https://github.com/cixingguangming55555/wechat-bot), 所以你可以放心使用它。还要感谢两个 repo 的所有者的贡献。 147 | 148 | ## 💖 赞助 149 | 150 | > 在 **[爱发电](https://afdian.net/a/declan)** 上成为赞助者. 你的名字将会被特别列在慷慨支持者下~ 151 | 152 | 153 | 154 | 155 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

Welcome to wechat-gptbot 👋

2 |
3 | 4 |
5 |

6 | Version 7 | 8 | License: MIT 9 | 10 | 11 | Python Version 15 | 16 | 17 | litellm 21 | 22 |

23 | 24 | > A wechat robot based on ChatGPT with no risk, very stable! 🚀 25 | > English | [中文文档](README_ZH.md) 26 | 27 | ## 🎤 Introduction 28 | 29 | > When I use bots based on `itchat` and `wechaty`, I often encounter the risk of account restrictions when scanning codes to log in. Refer to [#158](https://github.com/AutumnWhj/ChatGPT-wechat-bot/issues/158). Is there a safe way to use wechat bots? Here it is~ 30 | 31 | ## 🌟 Features 32 | 33 | - [x] **Extremely Stable:** Implement based on windows hook, no worry about risk of wechat account restriction 34 | - [x] **Basic Conversation:** Smart reply for private chat and group chat, support multiple rounds of session context memory, support GPT-3, GPT-3.5, GPT-4, Claude-2, Claude Instant-1, Command Nightly, Palm models and other models in [litellm](https://litellm.readthedocs.io/en/latest/supported/) 35 | - [x] **Image Generation:** Support image generation, Dell-E only model for now 36 | - [x] **Flexible Configuration:** Support prompt settings, proxy, command settings and etc. 37 | - [x] **Plugin System:** Support personalized plugin extensions, you can easily integrate the functions you want 38 | 39 | ## 📝 Changelog 40 | 41 | > **2023.07.13:** Introduce `plugin system` to make gptbot have more possibilities and easy to expand [#46](https://github.com/iuiaoin/wechat-gptbot/pull/46). Here's the first interesting plugin: [tiktok](https://github.com/iuiaoin/plugin_tiktok), try it and have fun! Also refer to [docs](plugins/README.md) to learn the usage and how to contribute~ 42 | 43 | ## 🚀 Getting Start 44 | 45 | ### Environment 46 | 47 | Support Windows system(probably support Linux in the future based on [sandbox](https://github.com/huan/docker-wechat)) and `Python` needs to be installed at the same time 48 | 49 | > It is recommended that the Python version be between 3.8.X~3.10.X, version 3.10 is perfect 50 | 51 | #### 1. Clone repo 52 | 53 | ```bash 54 | git clone https://github.com/iuiaoin/wechat-gptbot && cd wechat-gptbot 55 | ``` 56 | 57 | #### 2. Install dependencies 58 | 59 | ```bash 60 | pip install -r requirements.txt 61 | ``` 62 | 63 | ### Configuration 64 | 65 | `config.template.json` in the root directory contains the configs template, you need to copy the template to create the final effective `config.json` 66 | 67 | ```bash 68 | cp config.template.json config.json 69 | ``` 70 | 71 | Then fill in the configuration in `config.json`, the following is the description of the default configuration, which can be customized according to the needs: 72 | 73 | ```bash 74 | { 75 | "openai_api_key": "YOUR API SECRET KEY", # Fill in your OpenAI API Key 76 | "model": "gpt-3.5-turbo", # ID of the model to use, support gpt-3.5-turbo, gpt-4, gpt-4-32k etc. 77 | "use_azure_chatgpt": false, # Whether use Azure OpenAI API 78 | "azure_deployment_id": "", # Azure model deployment name 79 | "role_desc": "You are a helpful assistant.", # Role description as system prompt 80 | "session_expired_duration": 3600, # Session memory kept duration 81 | "max_tokens": 1000, # Max tokens of characters for session memory 82 | "temperature": 0.9, # Between 0 and 2. Higher values make the output more random, while lower values more focused 83 | "proxy": "127.0.0.1:3000", # Proxy client ip and port 84 | "openai_api_base": "", # api url used by openai service 85 | "create_image_size": "256x256", # Dall-E image size, support 256x256, 512x512, 1024x1024 86 | "create_image_prefix": ["draw", "paint", "imagine"], # Text prefix for image generation 87 | "clear_current_session_command": "#clear session", # Clear current session 88 | "clear_all_sessions_command": "#clear all sessions", # Clear all sessions 89 | "chat_group_session_independent": false, # Whether sessions of users are independent in chat group 90 | "single_chat_prefix": ["bot", "@bot"], # Start conversation with "bot" or "@bot" in single chat to trigger the bot, leave it empty if you wanna make the bot active all the time 91 | "group_chat_reply_prefix": "", # Reply prefix in group chat 92 | "group_chat_reply_suffix": "", # Reply suffix in group chat 93 | "single_chat_reply_prefix": "", # Reply prefix in single chat 94 | "single_chat_reply_suffix": "", # Reply suffix in single chat 95 | "query_key_command": "#query key" # Querying the usage of the api key 96 | "recent_days": 5 # The usage in days 97 | "plugins": [{ "name": , other configs }]# Add the your favorite plugins 98 | } 99 | ``` 100 | 101 | ### Running 102 | 103 | #### 1. Prepare 104 | 105 | > We need the specific wechat version and dll to make windows hook work. 106 | 107 | 1. Download assets from the [release](https://github.com/iuiaoin/wechat-gptbot/releases/tag/v1.0.0) 108 | 2. Install WeChatSetup-3.2.1.121.exe and login 109 | 3. Run the wechat-dll-injectorV1.0.3.exe 110 | 4. Select 3.2.1.121-LTS.dll and click `inject dll`, you will see "Successfully injected: 3.2.1.121-LTS.dll" 111 | 112 | #### 2. Run command 113 | 114 | ```bash 115 | python app.py 116 | ``` 117 | 118 | 119 | 120 | Voilà! Enjoy your exploring journey~ 121 | 122 | ## ✨ Generous Backers 123 | 124 | > Thank you very much for your support, it will be my biggest motivation! 125 | 126 | 127 | 128 | 129 | 130 | ## 🤝 Contributing 131 | 132 | Contributions, issues and feature requests are welcome!
Feel free to 133 | check [issues page](https://github.com/iuiaoin/wechat-gptbot/issues). 134 | 135 | ## 🙏 Show your support 136 | 137 | Give a ⭐️ if you like this project! 138 | 139 | ## 📢 Announcements 140 | 141 | The WeChatSetup is coming from [wechat-windows-versions](https://github.com/tom-snow/wechat-windows-versions/releases) and wechat-dll-injector from [wechat-bot](https://github.com/cixingguangming55555/wechat-bot), so you can use it without concern. Also thanks the two repo's owners for their contributions. 142 | 143 | ## 💖 Sponsor 144 | 145 | > Become a Sponsor on **[AFDIAN](https://afdian.net/a/declan)**. Your name will be specifically listed under Generous Backers~ 146 | 147 | 148 | 149 | 150 | -------------------------------------------------------------------------------- /channel/wechat.py: -------------------------------------------------------------------------------- 1 | import json 2 | import warnings 3 | import websocket 4 | from bs4 import BeautifulSoup 5 | import requests 6 | from utils.log import logger 7 | from utils import const 8 | import os 9 | from bot.bot import Bot 10 | from common.singleton import singleton 11 | from config import conf 12 | from utils.check import check_prefix, is_wx_account 13 | from common.reply import ReplyType, Reply 14 | from channel.message import Message 15 | from utils.api import get_personal_info, send_image, send_file 16 | from utils.const import MessageType 17 | from utils.serialize import serialize_img, serialize_text, serialize_video 18 | from plugins.manager import PluginManager 19 | from common.context import ContextType, Context 20 | from plugins.event import EventType, Event 21 | from channel.channel import Channel 22 | 23 | 24 | @singleton 25 | class WeChatChannel(Channel): 26 | def __init__(self): 27 | requests.packages.urllib3.disable_warnings() 28 | warnings.filterwarnings("ignore") 29 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" 30 | self.personal_info = get_personal_info() 31 | self.ws = websocket.WebSocketApp( 32 | const.SERVER, 33 | on_open=self.on_open, 34 | on_message=self.on_message, 35 | on_error=self.on_error, 36 | on_close=self.on_close, 37 | ) 38 | 39 | def startup(self): 40 | logger.info("App startup successfully!") 41 | self.ws.run_forever() 42 | 43 | def on_message(self, ws, message): 44 | raw_msg = json.loads(message) 45 | msg_type = raw_msg["type"] 46 | handlers = { 47 | MessageType.AT_MSG.value: self.handle_message, 48 | MessageType.TXT_MSG.value: self.handle_message, 49 | MessageType.PIC_MSG.value: self.handle_message, 50 | MessageType.RECV_PIC_MSG.value: self.handle_message, 51 | MessageType.RECV_TXT_MSG.value: self.handle_message, 52 | MessageType.RECV_TXT_CITE_MSG.value: self.handle_cite_message, 53 | MessageType.HEART_BEAT.value: self.noop, 54 | } 55 | handlers.get(msg_type, logger.info)(raw_msg) 56 | 57 | def noop(self, raw_msg): 58 | pass 59 | 60 | def handle_cite_message(self, raw_msg): 61 | xml_msg = ( 62 | raw_msg["content"]["content"] 63 | .replace("&", "&") 64 | .replace("<", "<") 65 | .replace(">", ">") 66 | ) 67 | soup = BeautifulSoup(xml_msg, "lxml") 68 | cooked_msg = { 69 | "content": soup.select_one("title").text, 70 | "id": raw_msg["id"], 71 | "id1": raw_msg["content"]["id2"], 72 | "id2": "", 73 | "id3": "", 74 | "srvid": raw_msg["srvid"], 75 | "time": raw_msg["time"], 76 | "type": raw_msg["type"], 77 | "wxid": raw_msg["content"]["id1"], 78 | } 79 | self.handle_message(cooked_msg) 80 | 81 | def handle_message(self, raw_msg): 82 | if "wxid" not in raw_msg and raw_msg["status"] == const.SUCCESS: 83 | logger.info("message sent successfully") 84 | return 85 | # ignore message sent by self 86 | if raw_msg["id2"] == self.personal_info["wx_id"]: 87 | logger.info("message sent by self, ignore") 88 | return 89 | msg = Message(raw_msg, self.personal_info) 90 | logger.info(f"message received: {msg}") 91 | e = PluginManager().emit( 92 | Event(EventType.DID_RECEIVE_MESSAGE, {"channel": self, "message": msg}) 93 | ) 94 | if e.is_bypass: 95 | return self.send(e.reply, e.message) 96 | if e.message.is_group: 97 | self.handle_group(e.message) 98 | else: 99 | self.handle_single(e.message) 100 | 101 | def handle_group(self, msg: Message): 102 | session_independent = conf().get("chat_group_session_independent") 103 | context = Context() 104 | context.session_id = msg.sender_id if session_independent else msg.room_id 105 | if msg.is_at: 106 | query = msg.content.replace(f"@{msg.receiver_name}", "", 1).strip() 107 | context.query = query 108 | create_image_prefix = conf().get("create_image_prefix") 109 | match_prefix = check_prefix(query, create_image_prefix) 110 | if match_prefix: 111 | context.type = ContextType.CREATE_IMAGE 112 | self.handle_reply(msg, context) 113 | 114 | def handle_single(self, msg: Message): 115 | # ignore message sent by public/subscription account 116 | if not is_wx_account(msg.sender_id): 117 | logger.info("message sent by public/subscription account, ignore") 118 | return 119 | context = Context() 120 | context.session_id = msg.sender_id 121 | query = msg.content 122 | single_chat_prefix = conf().get("single_chat_prefix") 123 | if single_chat_prefix is not None and len(single_chat_prefix) > 0: 124 | match_chat_prefix = check_prefix(query, single_chat_prefix) 125 | if match_chat_prefix is not None: 126 | query = query.replace(match_chat_prefix, "", 1).strip() 127 | else: 128 | logger.info("your message is not start with single_chat_prefix, ignore") 129 | return 130 | context.query = query 131 | create_image_prefix = conf().get("create_image_prefix") 132 | match_image_prefix = check_prefix(query, create_image_prefix) 133 | if match_image_prefix: 134 | context.type = ContextType.CREATE_IMAGE 135 | self.handle_reply(msg, context) 136 | 137 | def decorate_reply(self, reply: Reply, msg: Message) -> Reply: 138 | if reply.type == ReplyType.TEXT: 139 | group_chat_reply_prefix = conf().get("group_chat_reply_prefix", "") 140 | group_chat_reply_suffix = conf().get("group_chat_reply_suffix", "") 141 | single_chat_reply_prefix = conf().get("single_chat_reply_prefix", "") 142 | single_chat_reply_suffix = conf().get("single_chat_reply_suffix", "") 143 | reply_text = reply.content 144 | if msg.is_group: 145 | reply_text = ( 146 | group_chat_reply_prefix + reply_text + group_chat_reply_suffix 147 | ) 148 | else: 149 | reply_text = ( 150 | single_chat_reply_prefix + reply_text + single_chat_reply_suffix 151 | ) 152 | reply.content = reply_text 153 | return reply 154 | 155 | def handle_reply(self, msg: Message, context: Context): 156 | e1 = PluginManager().emit( 157 | Event( 158 | EventType.WILL_GENERATE_REPLY, 159 | {"channel": self, "message": msg, "context": context}, 160 | ) 161 | ) 162 | if e1.is_bypass: 163 | return self.send(e1.reply, e1.message) 164 | 165 | rawReply = Bot().reply(e1.context) 166 | 167 | e2 = PluginManager().emit( 168 | Event( 169 | EventType.WILL_DECORATE_REPLY, 170 | { 171 | "channel": self, 172 | "message": e1.message, 173 | "context": e1.context, 174 | "reply": rawReply, 175 | }, 176 | ) 177 | ) 178 | if e2.is_bypass: 179 | return self.send(e2.reply, e2.message) 180 | 181 | reply = self.decorate_reply(rawReply, msg) 182 | 183 | e3 = PluginManager().emit( 184 | Event( 185 | EventType.WILL_SEND_REPLY, 186 | { 187 | "channel": self, 188 | "message": e2.message, 189 | "context": e2.context, 190 | "reply": reply, 191 | }, 192 | ) 193 | ) 194 | self.send(e3.reply, e3.message) 195 | 196 | def send(self, reply: Reply, msg: Message): 197 | if reply is None: 198 | return 199 | if reply.type == ReplyType.IMAGE: 200 | img_path = serialize_img(reply.content) 201 | wx_id = msg.room_id if msg.is_group else msg.sender_id 202 | send_image(img_path, wx_id) 203 | elif reply.type == ReplyType.VIDEO: 204 | file_path = serialize_video(reply.content) 205 | wx_id = msg.room_id if msg.is_group else msg.sender_id 206 | send_file(file_path, wx_id) 207 | else: 208 | reply_msg = serialize_text(reply.content, msg) 209 | self.ws.send(reply_msg) 210 | 211 | def on_open(self, ws): 212 | logger.info("[Websocket] connected") 213 | 214 | def on_close(self, ws): 215 | logger.info("[Websocket] disconnected") 216 | 217 | def on_error(self, ws, error): 218 | logger.error(f"[Websocket] Error: {error}") 219 | --------------------------------------------------------------------------------