├── .gitignore ├── requirements.txt ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── base ├── chatglm │ ├── __init__.py │ ├── README.MD │ ├── base.json │ ├── tool_registry.py │ ├── comfyUI_api.py │ └── code_kernel.py ├── func_xinghuo_web.py ├── func_tigerbot.py ├── func_news.py ├── func_report_reminder.py ├── func_chengyu.py ├── func_chatgpt.py └── func_chatglm.py ├── constants.py ├── LICENSE ├── configuration.py ├── main.py ├── config.yaml.template ├── job_mgmt.py ├── README.MD └── robot.py /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.gitignore 3 | !.github/ 4 | 5 | *pyc 6 | __pycache__ 7 | 8 | logs/ 9 | *.log 10 | *.log.* 11 | 12 | config.yaml 13 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | chinese_calendar 2 | lxml 3 | openai==0.27.6 4 | pandas 5 | pyyaml 6 | requests 7 | schedule 8 | pyhandytools 9 | sparkdesk-api==1.3.0 10 | wcferry>=39.0.5.0 11 | websocket 12 | pillow 13 | jupyter_client 14 | zhdate 15 | ipykernel 16 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 请求添加新功能 3 | about: 提出一个关于本项目新功能 / 新特性的建议 4 | title: "[\U0001F4A1SUG] 一句话描述你希望新增的功能或特性" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **你希望添加的功能是否与某个问题相关?** 11 | 关于这个问题的简洁清晰的描述,例如,当 [...] 时,我总是很沮丧。 12 | 13 | **描述你希望的解决方案** 14 | 关于解决方案的简洁清晰的描述。 15 | 16 | **描述你考虑的替代方案** 17 | 关于你考虑的,能实现这个功能的其他替代方案的简洁清晰的描述。 18 | 19 | **其他** 20 | 你可以添加其他任何的资料、链接或者屏幕截图,以帮助我们理解这个新功能。 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug 报告 3 | about: 提交一份 bug 报告,帮助 WeChatRobot 变得更好 4 | title: "[\U0001F41BBUG] 用一句话描述您的问题。" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **描述这个 bug** 11 | 对 bug 作一个清晰简明的描述: 12 | - 想做什么? 13 | - 现在怎么做? 14 | - 遇到什么问题? 15 | 16 | **使用环境(请补全下列信息):** 17 | - 操作系统:【如 Windows 7, Windows 10, Windows Server 2008 等】 18 | - 操作系统版本:【32 位或 64 位】 19 | - Python 版本 【如 3.7.9 32 位,3.8.15 64 位 等】 20 | 21 | **屏幕截图** 22 | 添加屏幕截图以帮助解释您的问题。(可选) 23 | -------------------------------------------------------------------------------- /base/chatglm/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | class UnsupportedPythonVersionError(Exception): 5 | def __init__(self, error_msg: str): 6 | super().__init__(error_msg) 7 | 8 | 9 | python_version_info = sys.version_info 10 | if not sys.version_info >= (3, 9): 11 | msg = "当前Python版本: " + ".".join(map(str, python_version_info[:3])) + (', 需要python版本 >= 3.9, 前往下载: ' 12 | 'https://www.python.org/downloads/') 13 | raise UnsupportedPythonVersionError(msg) -------------------------------------------------------------------------------- /constants.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum, unique 2 | 3 | 4 | @unique 5 | class ChatType(IntEnum): 6 | # UnKnown = 0 # 未知, 即未设置 7 | TIGER_BOT = 1 # TigerBot 8 | CHATGPT = 2 # ChatGPT 9 | XINGHUO_WEB = 3 # 讯飞星火 10 | CHATGLM = 4 # ChatGLM 11 | 12 | @staticmethod 13 | def is_in_chat_types(chat_type: int) -> bool: 14 | if chat_type in [ChatType.TIGER_BOT.value, ChatType.CHATGPT.value, 15 | ChatType.XINGHUO_WEB.value, ChatType.CHATGLM.value]: 16 | return True 17 | return False 18 | 19 | @staticmethod 20 | def help_hint() -> str: 21 | return str({member.value: member.name for member in ChatType}).replace('{', '').replace('}', '') 22 | 23 | -------------------------------------------------------------------------------- /base/func_xinghuo_web.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | from sparkdesk_web.core import SparkWeb 4 | 5 | 6 | class XinghuoWeb: 7 | def __init__(self, xhconf=None) -> None: 8 | 9 | self._sparkWeb = SparkWeb( 10 | cookie=xhconf["cookie"], 11 | fd=xhconf["fd"], 12 | GtToken=xhconf["GtToken"], 13 | ) 14 | self._chat = self._sparkWeb.create_continuous_chat() 15 | # 如果有提示词 16 | if xhconf["prompt"]: 17 | self._chat.chat(xhconf["prompt"]) 18 | 19 | def __repr__(self): 20 | return 'XinghuoWeb' 21 | 22 | @staticmethod 23 | def value_check(conf: dict) -> bool: 24 | if conf: 25 | return all(conf.values()) 26 | return False 27 | 28 | def get_answer(self, msg: str, sender: str = None) -> str: 29 | answer = self._chat.chat(msg) 30 | return answer 31 | 32 | 33 | if __name__ == "__main__": 34 | from configuration import Config 35 | c = Config() 36 | xinghuo = XinghuoWeb(c.XINGHUO_WEB) 37 | rsp = xinghuo.get_answer("你还活着?") 38 | print(rsp) 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Changhua 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /configuration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import logging.config 5 | import os 6 | import shutil 7 | 8 | import yaml 9 | 10 | 11 | class Config(object): 12 | def __init__(self) -> None: 13 | self.reload() 14 | 15 | def _load_config(self) -> dict: 16 | pwd = os.path.dirname(os.path.abspath(__file__)) 17 | try: 18 | with open(f"{pwd}/config.yaml", "rb") as fp: 19 | yconfig = yaml.safe_load(fp) 20 | except FileNotFoundError: 21 | shutil.copyfile(f"{pwd}/config.yaml.template", f"{pwd}/config.yaml") 22 | with open(f"{pwd}/config.yaml", "rb") as fp: 23 | yconfig = yaml.safe_load(fp) 24 | 25 | return yconfig 26 | 27 | def reload(self) -> None: 28 | yconfig = self._load_config() 29 | logging.config.dictConfig(yconfig["logging"]) 30 | self.GROUPS = yconfig["groups"]["enable"] 31 | self.NEWS = yconfig["news"]["receivers"] 32 | self.REPORT_REMINDERS = yconfig["report_reminder"]["receivers"] 33 | 34 | self.CHATGPT = yconfig.get("chatgpt", {}) 35 | self.TIGERBOT = yconfig.get("tigerbot", {}) 36 | self.XINGHUO_WEB = yconfig.get("xinghuo_web", {}) 37 | self.CHATGLM = yconfig.get("chatglm", {}) 38 | -------------------------------------------------------------------------------- /base/func_tigerbot.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import logging 5 | 6 | import requests 7 | from random import randint 8 | 9 | 10 | class TigerBot: 11 | def __init__(self, tbconf=None) -> None: 12 | self.LOG = logging.getLogger(__file__) 13 | self.tburl = "https://api.tigerbot.com/bot-service/ai_service/gpt" 14 | self.tbheaders = {"Authorization": "Bearer " + tbconf["key"]} 15 | self.tbmodel = tbconf["model"] 16 | self.fallback = ["滚", "快滚", "赶紧滚"] 17 | 18 | def __repr__(self): 19 | return 'TigerBot' 20 | 21 | @staticmethod 22 | def value_check(conf: dict) -> bool: 23 | if conf: 24 | return all(conf.values()) 25 | return False 26 | 27 | def get_answer(self, msg: str, sender: str = None) -> str: 28 | payload = { 29 | "text": msg, 30 | "modelVersion": self.tbmodel 31 | } 32 | rsp = "" 33 | try: 34 | rsp = requests.post(self.tburl, headers=self.tbheaders, json=payload).json() 35 | rsp = rsp["data"]["result"][0] 36 | except Exception as e: 37 | self.LOG.error(f"{e}: {payload}\n{rsp}") 38 | idx = randint(0, len(self.fallback) - 1) 39 | rsp = self.fallback[idx] 40 | 41 | return rsp 42 | 43 | 44 | if __name__ == "__main__": 45 | from configuration import Config 46 | c = Config() 47 | tbot = TigerBot(c.TIGERBOT) 48 | rsp = tbot.get_answer("你还活着?") 49 | print(rsp) 50 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import signal 5 | from argparse import ArgumentParser 6 | 7 | from base.func_report_reminder import ReportReminder 8 | from configuration import Config 9 | from constants import ChatType 10 | from robot import Robot 11 | from wcferry import Wcf 12 | 13 | 14 | def weather_report(robot: Robot) -> None: 15 | """模拟发送天气预报 16 | """ 17 | 18 | # 获取接收人 19 | receivers = ["filehelper"] 20 | 21 | # 获取天气,需要自己实现,可以参考 https://gitee.com/lch0821/WeatherScrapy 获取天气。 22 | report = "这就是获取到的天气情况了" 23 | 24 | for r in receivers: 25 | robot.sendTextMsg(report, r) 26 | # robot.sendTextMsg(report, r, "notify@all") # 发送消息并@所有人 27 | 28 | 29 | def main(chat_type: int): 30 | config = Config() 31 | wcf = Wcf(debug=True) 32 | 33 | def handler(sig, frame): 34 | wcf.cleanup() # 退出前清理环境 35 | exit(0) 36 | 37 | signal.signal(signal.SIGINT, handler) 38 | 39 | robot = Robot(config, wcf, chat_type) 40 | robot.LOG.info("正在启动机器人···") 41 | 42 | # 机器人启动发送测试消息 43 | robot.sendTextMsg("机器人启动成功!", "filehelper") 44 | 45 | # 接收消息 46 | # robot.enableRecvMsg() # 可能会丢消息? 47 | robot.enableReceivingMsg() # 加队列 48 | 49 | # 每天 7 点发送天气预报 50 | robot.onEveryTime("07:00", weather_report, robot=robot) 51 | 52 | # 每天 7:30 发送新闻 53 | robot.onEveryTime("07:30", robot.newsReport) 54 | 55 | # 每天 16:30 提醒发日报周报月报 56 | robot.onEveryTime("16:30", ReportReminder.remind, robot=robot) 57 | 58 | # 让机器人一直跑 59 | robot.keepRunningAndBlockProcess() 60 | 61 | 62 | if __name__ == "__main__": 63 | parser = ArgumentParser() 64 | parser.add_argument('-c', type=int, default=0, help=f'选择模型参数序号: {ChatType.help_hint()}') 65 | args = parser.parse_args().c 66 | main(args) 67 | -------------------------------------------------------------------------------- /base/func_news.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | import re 6 | import logging 7 | import time 8 | from datetime import datetime 9 | 10 | import requests 11 | from lxml import etree 12 | 13 | 14 | class News(object): 15 | def __init__(self) -> None: 16 | self.LOG = logging.getLogger(__name__) 17 | self.week = {0: "周一", 1: "周二", 2: "周三", 3: "周四", 4: "周五", 5: "周六", 6: "周日"} 18 | self.headers = { 19 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/110.0"} 20 | 21 | def get_important_news(self): 22 | url = "https://www.cls.cn/api/sw?app=CailianpressWeb&os=web&sv=7.7.5" 23 | data = {"type": "telegram", "keyword": "你需要知道的隔夜全球要闻", "page": 0, 24 | "rn": 1, "os": "web", "sv": "7.7.5", "app": "CailianpressWeb"} 25 | try: 26 | rsp = requests.post(url=url, headers=self.headers, data=data) 27 | data = json.loads(rsp.text)["data"]["telegram"]["data"][0] 28 | news = data["descr"] 29 | timestamp = data["time"] 30 | ts = time.localtime(timestamp) 31 | weekday_news = datetime(*ts[:6]).weekday() 32 | except Exception as e: 33 | self.LOG.error(e) 34 | return "" 35 | 36 | weekday_now = datetime.now().weekday() 37 | if weekday_news != weekday_now: 38 | return "" # 旧闻,观察发现周二~周六早晨6点半左右发布 39 | 40 | fmt_time = time.strftime("%Y年%m月%d日", ts) 41 | 42 | news = re.sub(r"(\d{1,2}、)", r"\n\1", news) 43 | fmt_news = "".join(etree.HTML(news).xpath(" // text()")) 44 | fmt_news = re.sub(r"周[一|二|三|四|五|六|日]你需要知道的", r"", fmt_news) 45 | 46 | return f"{fmt_time} {self.week[weekday_news]}\n{fmt_news}" 47 | 48 | 49 | if __name__ == "__main__": 50 | news = News() 51 | print(news.get_important_news()) 52 | -------------------------------------------------------------------------------- /base/chatglm/README.MD: -------------------------------------------------------------------------------- 1 | # ChatGLM3 集成使用说明 2 | 3 | 1. 需要取消配置中 chatglm 的注释, 并配置对应信息,使用 [ChatGLM3](https://github.com/THUDM/ChatGLM3), 启用最新版 ChatGLM3 根目录下 openai_api.py 获取 api 地址: 4 | ```yaml 5 | # 如果要使用 chatglm,取消下面的注释并填写相关内容 6 | chatglm: 7 | key: sk-012345678901234567890123456789012345678901234567 # 根据需要自己做key校验 8 | api: http://localhost:8000/v1 # 根据自己的chatglm地址修改 9 | proxy: # 如果你在国内,你可能需要魔法,大概长这样:http://域名或者IP地址:端口号 10 | prompt: 你是智能聊天机器人,你叫小薇 # 根据需要对角色进行设定 11 | file_path: F:/Pictures/temp #设定生成图片和代码使用的文件夹路径 12 | ``` 13 | 14 | 2. 修改 chatglm/tool_registry.py 工具里面的一下配置,comfyUI 地址或者根据需要自己配置一些工具,函数名上需要加 @register_tool, 函数里面需要叫'''函数描述''',参数需要用 Annotated[str,'',True] 修饰,分别是类型,参数说明,是否必填,再加 ->加上对应的返回类型 15 | ```python 16 | @register_tool 17 | def get_confyui_image(prompt: Annotated[str, '要生成图片的提示词,注意必须是英文', True]) -> dict: 18 | ''' 19 | 生成图片 20 | ''' 21 | with open("func_chatglm\\base.json", "r", encoding="utf-8") as f: 22 | data2 = json.load(f) 23 | data2['prompt']['3']['inputs']['seed'] = ''.join( 24 | random.sample('123456789012345678901234567890', 14)) 25 | # 模型名称 26 | data2['prompt']['4']['inputs']['ckpt_name'] = 'chilloutmix_NiPrunedFp32Fix.safetensors' 27 | data2['prompt']['6']['inputs']['text'] = prompt # 正向提示词 28 | # data2['prompt']['7']['inputs']['text']='' #反向提示词 29 | cfui = ComfyUIApi(server_address="127.0.0.1:8188") # 根据自己comfyUI地址修改 30 | images = cfui.get_images(data2['prompt']) 31 | return {'res': images[0]['image'], 'res_type': 'image', 'filename': images[0]['filename']} 32 | 33 | ``` 34 | 35 | 3. 使用 Code Interpreter 还需要安装 Jupyter 内核,默认名称叫 chatglm3: 36 | ``` 37 | ipython kernel install --name chatglm3 --user 38 | ``` 39 | 40 | 如果名称需要自定义,可以配置系统环境变量:IPYKERNEL 或者修改 chatglm/code_kernel.py 41 | ``` 42 | IPYKERNEL = os.environ.get('IPYKERNEL', 'chatglm3') 43 | ``` 44 | 45 | 4. 启动后,发送 #帮助 可以查看 模式和常用指令 46 | -------------------------------------------------------------------------------- /config.yaml.template: -------------------------------------------------------------------------------- 1 | logging: 2 | version: 1 3 | disable_existing_loggers: False 4 | 5 | formatters: 6 | simple: 7 | format: "%(asctime)s %(message)s" 8 | datefmt: "%Y-%m-%d %H:%M:%S" 9 | error: 10 | format: "%(asctime)s %(name)s %(levelname)s %(filename)s::%(funcName)s[%(lineno)d]:%(message)s" 11 | 12 | handlers: 13 | console: 14 | class: logging.StreamHandler 15 | level: INFO 16 | formatter: simple 17 | stream: ext://sys.stdout 18 | 19 | info_file_handler: 20 | class: logging.handlers.RotatingFileHandler 21 | level: INFO 22 | formatter: simple 23 | filename: wx_info.log 24 | maxBytes: 10485760 # 10MB 25 | backupCount: 20 26 | encoding: utf8 27 | 28 | error_file_handler: 29 | class: logging.handlers.RotatingFileHandler 30 | level: ERROR 31 | formatter: error 32 | filename: wx_error.log 33 | maxBytes: 10485760 # 10MB 34 | backupCount: 20 35 | encoding: utf8 36 | 37 | root: 38 | level: INFO 39 | handlers: [console, info_file_handler, error_file_handler] 40 | 41 | groups: 42 | enable: [] # 允许响应的群 roomId,大概长这样:2xxxxxxxxx3@chatroom 43 | 44 | news: 45 | receivers: [] # 定时新闻接收人(roomid 或者 wxid) 46 | 47 | report_reminder: 48 | receivers: [] # 定时日报周报月报提醒(roomid 或者 wxid) 49 | 50 | chatgpt: # -----chatgpt配置这行不填----- 51 | key: # 填写你 ChatGPT 的 key 52 | api: https://api.openai.com/v1 # 如果你不知道这是干嘛的,就不要改 53 | proxy: # 如果你在国内,你可能需要魔法,大概长这样:http://域名或者IP地址:端口号 54 | prompt: 你是智能聊天机器人,你叫wcferry # 根据需要对角色进行设定 55 | 56 | chatglm: # -----chatglm配置这行不填----- 57 | key: sk-012345678901234567890123456789012345678901234567 # 这个应该不用动 58 | api: http://localhost:8000/v1 # 根据自己的chatglm地址修改 59 | proxy: # 如果你在国内,你可能需要魔法,大概长这样:http://域名或者IP地址:端口号 60 | prompt: 你是智能聊天机器人,你叫小薇 # 根据需要对角色进行设定 61 | file_path: F:/Pictures/temp #设定生成图片和代码使用的文件夹路径 62 | 63 | tigerbot: # -----tigerbot配置这行不填----- 64 | key: # key 65 | model: # tigerbot-7b-sft 66 | 67 | xinghuo_web: # -----讯飞星火web模式api配置这行不填 抓取方式详见文档:https://www.bilibili.com/read/cv27066577----- 68 | cookie: # cookie 69 | fd: # fd 70 | GtToken: # GtToken 71 | prompt: 你是智能聊天机器人,你叫wcferry。请用这个角色回答我的问题 # 根据需要对角色进行设定 72 | -------------------------------------------------------------------------------- /base/chatglm/base.json: -------------------------------------------------------------------------------- 1 | { 2 | "prompt": { 3 | "3": { 4 | "inputs": { 5 | "seed": 1000573256060686, 6 | "steps": 20, 7 | "cfg": 8, 8 | "sampler_name": "euler", 9 | "scheduler": "normal", 10 | "denoise": 1, 11 | "model": [ 12 | "4", 13 | 0 14 | ], 15 | "positive": [ 16 | "6", 17 | 0 18 | ], 19 | "negative": [ 20 | "7", 21 | 0 22 | ], 23 | "latent_image": [ 24 | "5", 25 | 0 26 | ] 27 | }, 28 | "class_type": "KSampler" 29 | }, 30 | "4": { 31 | "inputs": { 32 | "ckpt_name": "(修复)512-inpainting-ema.safetensors" 33 | }, 34 | "class_type": "CheckpointLoaderSimple" 35 | }, 36 | "5": { 37 | "inputs": { 38 | "width": 512, 39 | "height": 512, 40 | "batch_size": 1 41 | }, 42 | "class_type": "EmptyLatentImage" 43 | }, 44 | "6": { 45 | "inputs": { 46 | "text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,dress, ", 47 | "clip": [ 48 | "4", 49 | 1 50 | ] 51 | }, 52 | "class_type": "CLIPTextEncode" 53 | }, 54 | "7": { 55 | "inputs": { 56 | "text": "text, watermark", 57 | "clip": [ 58 | "4", 59 | 1 60 | ] 61 | }, 62 | "class_type": "CLIPTextEncode" 63 | }, 64 | "8": { 65 | "inputs": { 66 | "samples": [ 67 | "3", 68 | 0 69 | ], 70 | "vae": [ 71 | "4", 72 | 2 73 | ] 74 | }, 75 | "class_type": "VAEDecode" 76 | }, 77 | "9": { 78 | "inputs": { 79 | "filename_prefix": "ComfyUI", 80 | "images": [ 81 | "8", 82 | 0 83 | ] 84 | }, 85 | "class_type": "SaveImage" 86 | } 87 | } 88 | } -------------------------------------------------------------------------------- /base/func_report_reminder.py: -------------------------------------------------------------------------------- 1 | import calendar 2 | import datetime 3 | 4 | from chinese_calendar import is_workday 5 | from robot import Robot 6 | 7 | 8 | class ReportReminder: 9 | 10 | @staticmethod 11 | def remind(robot: Robot) -> None: 12 | 13 | receivers = robot.config.REPORT_REMINDERS 14 | if not receivers: 15 | receivers = ["filehelper"] 16 | # 日报周报月报提醒 17 | for receiver in receivers: 18 | today = datetime.datetime.now().date() 19 | # 如果是非工作日 20 | if not is_workday(today): 21 | robot.sendTextMsg("休息日快乐", receiver) 22 | # 如果是工作日 23 | if is_workday(today): 24 | robot.sendTextMsg("该发日报啦", receiver) 25 | # 如果是本周最后一个工作日 26 | if ReportReminder.last_work_day_of_week(today) == today: 27 | robot.sendTextMsg("该发周报啦", receiver) 28 | # 如果本日是本月最后一整周的最后一个工作日: 29 | if ReportReminder.last_work_friday_of_month(today) == today: 30 | robot.sendTextMsg("该发月报啦", receiver) 31 | 32 | # 计算本月最后一个周的最后一个工作日 33 | @staticmethod 34 | def last_work_friday_of_month(d: datetime.date) -> datetime.date: 35 | days_in_month = calendar.monthrange(d.year, d.month)[1] 36 | weekday = calendar.weekday(d.year, d.month, days_in_month) 37 | if weekday == 4: 38 | last_friday_of_month = datetime.date( 39 | d.year, d.month, days_in_month) 40 | else: 41 | if weekday >= 5: 42 | last_friday_of_month = datetime.date(d.year, d.month, days_in_month) - \ 43 | datetime.timedelta(days=(weekday - 4)) 44 | else: 45 | last_friday_of_month = datetime.date(d.year, d.month, days_in_month) - \ 46 | datetime.timedelta(days=(weekday + 3)) 47 | while not is_workday(last_friday_of_month): 48 | last_friday_of_month = last_friday_of_month - datetime.timedelta(days=1) 49 | return last_friday_of_month 50 | 51 | # 计算本周最后一个工作日 52 | @staticmethod 53 | def last_work_day_of_week(d: datetime.date) -> datetime.date: 54 | weekday = calendar.weekday(d.year, d.month, d.day) 55 | last_work_day_of_week = datetime.date( 56 | d.year, d.month, d.day) + datetime.timedelta(days=(6 - weekday)) 57 | 58 | while not is_workday(last_work_day_of_week): 59 | last_work_day_of_week = last_work_day_of_week - \ 60 | datetime.timedelta(days=1) 61 | return last_work_day_of_week 62 | -------------------------------------------------------------------------------- /base/func_chengyu.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import os 4 | import random 5 | 6 | import pandas as pd 7 | 8 | 9 | class Chengyu(object): 10 | def __init__(self) -> None: 11 | root = os.path.dirname(os.path.abspath(__file__)) 12 | self.df = pd.read_csv(f"{root}/chengyu.csv", delimiter="\t") 13 | self.cys, self.zis, self.yins = self._build_data() 14 | 15 | def _build_data(self): 16 | df = self.df.copy() 17 | df["shouzi"] = df["chengyu"].apply(lambda x: x[0]) 18 | df["mozi"] = df["chengyu"].apply(lambda x: x[-1]) 19 | 20 | df["shouyin"] = df["pingyin"].apply(lambda x: x.split(" ")[0]) 21 | df["moyin"] = df["pingyin"].apply(lambda x: x.split(" ")[-1]) 22 | 23 | cys = dict(zip(df["chengyu"], df["moyin"])) 24 | zis = df.groupby("shouzi").agg({"chengyu": set})["chengyu"].to_dict() 25 | yins = df.groupby("shouyin").agg({"chengyu": set})["chengyu"].to_dict() 26 | 27 | return cys, zis, yins 28 | 29 | def isChengyu(self, cy: str) -> bool: 30 | return self.cys.get(cy, None) is not None 31 | 32 | def getNext(self, cy: str, tongyin: bool = True) -> str: 33 | """获取下一个成语 34 | cy: 当前成语 35 | tongyin: 是否允许同音字 36 | """ 37 | zi = cy[-1] 38 | ansers = list(self.zis.get(zi, {})) 39 | try: 40 | ansers.remove(cy) # 移除当前成语 41 | except Exception as e: 42 | pass # Just ignore... 43 | 44 | if ansers: 45 | return random.choice(ansers) 46 | 47 | # 如果找不到同字,允许同音 48 | if tongyin: 49 | yin = self.cys.get(cy) 50 | ansers = list(self.yins.get(yin, {})) 51 | 52 | try: 53 | ansers.remove(cy) # 移除当前成语 54 | except Exception as e: 55 | pass # Just ignore... 56 | 57 | if ansers: 58 | return random.choice(ansers) 59 | 60 | return None 61 | 62 | def getMeaning(self, cy: str) -> str: 63 | ress = self.df[self.df["chengyu"] == cy].to_dict(orient="records") 64 | if ress: 65 | res = ress[0] 66 | rsp = res["chengyu"] + "\n" + res["pingyin"] + "\n" + res["jieshi"] 67 | if res["chuchu"] and res["chuchu"] != "无": 68 | rsp += "\n出处:" + res["chuchu"] 69 | if res["lizi"] and res["lizi"] != "无": 70 | rsp += "\n例子:" + res["lizi"] 71 | return rsp 72 | return None 73 | 74 | 75 | cy = Chengyu() 76 | 77 | if __name__ == "__main__": 78 | answer = cy.getNext("便宜行事") 79 | print(answer) 80 | -------------------------------------------------------------------------------- /job_mgmt.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import time 4 | from typing import Any, Callable 5 | 6 | import schedule 7 | 8 | 9 | class Job(object): 10 | def __init__(self) -> None: 11 | pass 12 | 13 | def onEverySeconds(self, seconds: int, task: Callable[..., Any], *args, **kwargs) -> None: 14 | """ 15 | 每 seconds 秒执行 16 | :param seconds: 间隔,秒 17 | :param task: 定时执行的方法 18 | :return: None 19 | """ 20 | schedule.every(seconds).seconds.do(task, *args, **kwargs) 21 | 22 | def onEveryMinutes(self, minutes: int, task: Callable[..., Any], *args, **kwargs) -> None: 23 | """ 24 | 每 minutes 分钟执行 25 | :param minutes: 间隔,分钟 26 | :param task: 定时执行的方法 27 | :return: None 28 | """ 29 | schedule.every(minutes).minutes.do(task, *args, **kwargs) 30 | 31 | def onEveryHours(self, hours: int, task: Callable[..., Any], *args, **kwargs) -> None: 32 | """ 33 | 每 hours 小时执行 34 | :param hours: 间隔,小时 35 | :param task: 定时执行的方法 36 | :return: None 37 | """ 38 | schedule.every(hours).hours.do(task, *args, **kwargs) 39 | 40 | def onEveryDays(self, days: int, task: Callable[..., Any], *args, **kwargs) -> None: 41 | """ 42 | 每 days 天执行 43 | :param days: 间隔,天 44 | :param task: 定时执行的方法 45 | :return: None 46 | """ 47 | schedule.every(days).days.do(task, *args, **kwargs) 48 | 49 | def onEveryTime(self, times: int, task: Callable[..., Any], *args, **kwargs) -> None: 50 | """ 51 | 每天定时执行 52 | :param times: 时间字符串列表,格式: 53 | - For daily jobs -> HH:MM:SS or HH:MM 54 | - For hourly jobs -> MM:SS or :MM 55 | - For minute jobs -> :SS 56 | :param task: 定时执行的方法 57 | :return: None 58 | 59 | 例子: times=["10:30", "10:45", "11:00"] 60 | """ 61 | if not isinstance(times, list): 62 | times = [times] 63 | 64 | for t in times: 65 | schedule.every(1).days.at(t).do(task, *args, **kwargs) 66 | 67 | def runPendingJobs(self) -> None: 68 | schedule.run_pending() 69 | 70 | 71 | if __name__ == "__main__": 72 | def printStr(s): 73 | print(s) 74 | 75 | job = Job() 76 | job.onEverySeconds(59, printStr, "onEverySeconds 59") 77 | job.onEveryMinutes(59, printStr, "onEveryMinutes 59") 78 | job.onEveryHours(23, printStr, "onEveryHours 23") 79 | job.onEveryDays(1, printStr, "onEveryDays 1") 80 | job.onEveryTime("23:59", printStr, "onEveryTime 23:59") 81 | 82 | while True: 83 | job.runPendingJobs() 84 | time.sleep(1) 85 | -------------------------------------------------------------------------------- /base/func_chatgpt.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from datetime import datetime 5 | 6 | import openai 7 | 8 | 9 | class ChatGPT: 10 | 11 | def __init__(self, conf: dict) -> None: 12 | openai.api_key = conf["key"] 13 | # 自己搭建或第三方代理的接口 14 | openai.api_base = conf["api"] 15 | proxy = conf["proxy"] 16 | if proxy: 17 | openai.proxy = {"http": proxy, "https": proxy} 18 | self.conversation_list = {} 19 | self.system_content_msg = {"role": "system", "content": conf["prompt"]} 20 | 21 | def __repr__(self): 22 | return 'ChatGPT' 23 | 24 | @staticmethod 25 | def value_check(conf: dict) -> bool: 26 | if conf: 27 | if conf.get("key") and conf.get("api") and conf.get("prompt"): 28 | return True 29 | return False 30 | 31 | def get_answer(self, question: str, wxid: str) -> str: 32 | # wxid或者roomid,个人时为微信id,群消息时为群id 33 | self.updateMessage(wxid, question, "user") 34 | 35 | try: 36 | ret = openai.ChatCompletion.create( 37 | model="gpt-3.5-turbo", 38 | messages=self.conversation_list[wxid], 39 | temperature=0.2 40 | ) 41 | 42 | rsp = ret["choices"][0]["message"]["content"] 43 | rsp = rsp[2:] if rsp.startswith("\n\n") else rsp 44 | rsp = rsp.replace("\n\n", "\n") 45 | self.updateMessage(wxid, rsp, "assistant") 46 | except openai.error.AuthenticationError as e3: 47 | rsp = "OpenAI API 认证失败,请检查 API 密钥是否正确" 48 | except openai.error.APIConnectionError as e2: 49 | rsp = "无法连接到 OpenAI API,请检查网络连接" 50 | except openai.error.APIError as e1: 51 | rsp = "OpenAI API 返回了错误:" + str(e1) 52 | except Exception as e0: 53 | rsp = "发生未知错误:" + str(e0) 54 | 55 | return rsp 56 | 57 | def updateMessage(self, wxid: str, question: str, role: str) -> None: 58 | now_time = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) 59 | 60 | time_mk = "当需要回答时间时请直接参考回复:" 61 | # 初始化聊天记录,组装系统信息 62 | if wxid not in self.conversation_list.keys(): 63 | question_ = [ 64 | self.system_content_msg, 65 | {"role": "system", "content": "" + time_mk + now_time} 66 | ] 67 | self.conversation_list[wxid] = question_ 68 | 69 | # 当前问题 70 | content_question_ = {"role": role, "content": question} 71 | self.conversation_list[wxid].append(content_question_) 72 | 73 | for cont in self.conversation_list[wxid]: 74 | if cont["role"] != "system": 75 | continue 76 | if cont["content"].startswith(time_mk): 77 | cont["content"] = time_mk + now_time 78 | 79 | # 只存储10条记录,超过滚动清除 80 | i = len(self.conversation_list[wxid]) 81 | if i > 10: 82 | print("滚动清除微信记录:" + wxid) 83 | # 删除多余的记录,倒着删,且跳过第一个的系统消息 84 | del self.conversation_list[wxid][1] 85 | 86 | 87 | if __name__ == "__main__": 88 | from configuration import Config 89 | config = Config().CHATGPT 90 | if not config: 91 | exit(0) 92 | 93 | chat = ChatGPT(config) 94 | 95 | while True: 96 | q = input(">>> ") 97 | try: 98 | time_start = datetime.now() # 记录开始时间 99 | print(chat.get_answer(q, "wxid")) 100 | time_end = datetime.now() # 记录结束时间 101 | 102 | print(f"{round((time_end - time_start).total_seconds(), 2)}s") # 计算的时间差为程序的执行时间,单位为秒/s 103 | except Exception as e: 104 | print(e) 105 | -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | # WeChatRobot 2 | 一个基于 [WeChatFerry](https://github.com/lich0821/WeChatFerry) 的微信机器人示例。 3 | 4 | |[📖 文档](https://wechatferry.readthedocs.io/)|[📺 视频教程](https://mp.weixin.qq.com/s/APdjGyZ2hllXxyG_sNCfXQ)|[🙋 FAQ](https://mp.weixin.qq.com/s/5Xle4X_OcxLoIX9PJjYCmQ)|[🚨【微信机器人】沙雕行为合集](https://mp.weixin.qq.com/s/mc8O5iuhy46X4Bgqs80E8g)| 5 | |:-:|:-:|:-:|:-:| 6 | 7 | |![碲矿](https://s2.loli.net/2023/09/25/fub5VAPSa8srwyM.jpg)|![赞赏](https://s2.loli.net/2023/09/25/gkh9uWZVOxzNPAX.jpg)| 8 | |:-:|:-:| 9 | |后台回复 `WeChatFerry` 加群交流|如果你觉得有用| 10 | 11 | ## Quick Start 12 | 0. 遇到问题先看看上面的文档、教程、FAQ 和【微信机器人】沙雕行为合集。 13 | 1. 安装 Python>=3.9,例如 [3.10.11](https://www.python.org/ftp/python/3.10.11/python-3.10.11-amd64.exe) 14 | 2. 安装微信 `3.9.2.23`,下载地址在 [这里](https://github.com/lich0821/WeChatFerry/releases/download/v39.0.0/WeChatSetup-3.9.2.23.exe);也可以从 [WeChatSetup](https://gitee.com/lch0821/WeChatSetup) 找到。 15 | 3. 克隆项目 16 | ```sh 17 | git clone https://github.com/lich0821/WeChatRobot.git 18 | 19 | # 如果网络原因打不开,可以科学上网,或者使用gitee 20 | git clone https://gitee.com/lch0821/WeChatRobot.git 21 | ``` 22 | 23 | 如果觉得克隆复杂,也可以直接下载[最新版](https://github.com/lich0821/WeChatRobot/releases/latest) (打不开试试[这个](https://gitee.com/lch0821/WeChatRobot/releases/latest)) 到本地解压。 24 | 25 | 4. 安装依赖 26 | ```sh 27 | # 升级 pip 28 | python -m pip install -U pip 29 | # 安装必要依赖 30 | pip install -r requirements.txt 31 | ``` 32 | 33 | 5. 运行 34 | 35 | 我们需要运行两次 main.py 第一次是生成配置文件 config.yaml, 第二次是真正跑你的机器人。 36 | 直接运行程序会自动拉起微信,如果微信未打开,会自动打开微信;如果版本不对,也会有提示;其他报错,请进群交流。 37 | 38 | 下面代码为第一次运行:第一次运行 main.py 会在 WeChatRobot 目录下生成一个 config.yaml 文件,参照修改配置进行修改。 39 | 40 | 其中 chatgpt、tigerbot、chatglm 和 xinghuo_web 是四种模型的配置信息,你需要配置它们的参数,不知道的可以加群交流。 41 | 42 | ```sh 43 | python main.py 44 | 45 | # 需要停止按 Ctrl+C 46 | ``` 47 | 48 | 启动之后,可以正常接收消息但不会响应群消息。参考下方 [修改配置](#config) 进行配置,以便响应特定群聊。 49 | 50 | 下面代码为第二次运行:你可以通过命令行参数选择模型,默认是不选择,这样你配置了什么参数就跑什么模型。正因如此你需要配置前面所说四种模型中的至少一种(当然也可以都配置,想跑那个模型就选什么参数), 然后就可以开始使用你的机器人了。 51 | ```sh 52 | python main.py 53 | 54 | # 需要停止按 Ctrl+C 55 | ``` 56 | 57 | 如果你配置了多个模型(不需要将其他配置注释或者移除),下面的内容才对你有帮助否则略过,通过 python main.py -h 通过参数可以选择要跑的模型。 58 | ```sh 59 | # 查看帮助 60 | python main.py -h 61 | #optional arguments: 62 | # -h, --help show this help message and exit 63 | # -c C, --chat_model C 选择要使用的AI模型,默认不选择,可选参数:1. tigerbot 模型 2. chatgpt 模型 3. 讯飞星火模型 4. chatglm 模型 64 | ``` 65 | 66 | ```sh 67 | # 例: 我想运行选择chatgpt的机器人 68 | python main.py -c 2 69 | 70 | # 需要停止按 Ctrl+C 71 | ``` 72 | 73 | > python main.py -c C 其中参数 C 可选择如下所示 74 | >> 1. tigerbot 模型 75 | >> 2. chatgpt 模型 76 | >> 3. 讯飞星火模型 77 | >> 4. chatglm 模型 78 | 79 | 6. 停止 80 | 81 | 不要那么粗暴,温柔点儿; 82 | 不要直接关闭窗口,温柔点儿。 83 | 84 | 输入:`Ctrl+C`。否则,会出现各种奇怪问题。 85 | 86 | ### 修改配置 87 | ℹ️ *修改配置后,需要重新启动,以便让配置生效。* 88 | 89 | 配置文件 `config.yaml` 是运行程序后自动从模板复制过来的,功能默认关闭。 90 | 91 | #### 响应被 @ 消息 92 | 为了响应群聊消息,需要添加相应的 `roomId`。 93 | 94 | 第一次运行的时候,可以在手机上往需要响应的群里发消息,打印的消息中方括号里的就是;多个群用 `,` 分隔。 95 | ```yaml 96 | groups: 97 | enable: [] # 允许响应的群 roomId,大概长这样:2xxxxxxxxx3@chatroom, 多个群用 `,` 分隔 98 | ``` 99 | 100 | #### 配置 AI 模型 101 | 为了使用 AI 模型,需要对相应模型并进行配置。 102 | 103 | 使用 ChatGLM 见注意事项 [README.MD](base/chatglm/README.MD) 104 | 105 | ```yaml 106 | chatgpt: # -----chatgpt配置这行不填----- 107 | key: # 填写你 ChatGPT 的 key 108 | api: https://api.openai.com/v1 # 如果你不知道这是干嘛的,就不要改 109 | proxy: # 如果你在国内,你可能需要魔法,大概长这样:http://域名或者IP地址:端口号 110 | prompt: 你是智能聊天机器人,你叫 wcferry # 根据需要对角色进行设定 111 | 112 | chatglm: # -----chatglm配置这行不填----- 113 | key: sk-012345678901234567890123456789012345678901234567 # 这个应该不用动 114 | api: http://localhost:8000/v1 # 根据自己的chatglm地址修改 115 | proxy: # 如果你在国内,你可能需要魔法,大概长这样:http://域名或者IP地址:端口号 116 | prompt: 你是智能聊天机器人,你叫小薇 # 根据需要对角色进行设定 117 | file_path: C:/Pictures/temp #设定生成图片和代码使用的文件夹路径 118 | 119 | tigerbot: # -----tigerbot配置这行不填----- 120 | key: # key 121 | model: # tigerbot-7b-sft 122 | 123 | # 抓取方式详见文档:https://www.bilibili.com/read/cv27066577 124 | xinghuo_web: # -----讯飞星火web模式api配置这行不填----- 125 | cookie: # cookie 126 | fd: # fd 127 | GtToken: # GtToken 128 | prompt: 你是智能聊天机器人,你叫 wcferry。请用这个角色回答我的问题 # 根据需要对角色进行设定 129 | ``` 130 | 131 | ## HTTP 132 | 如需要使用 HTTP 接口,请参考 [wcfhttp](https://wechatferry.readthedocs.io/zh/latest/?badge=latest)。 133 | 134 | [![PyPi](https://img.shields.io/pypi/v/wcfhttp.svg)](https://pypi.python.org/pypi/wcfhttp) [![Downloads](https://static.pepy.tech/badge/wcfhttp)](https://pypi.python.org/pypi/wcfhttp) [![Documentation Status](https://readthedocs.org/projects/wechatferry/badge/?version=latest)](https://wechatferry.readthedocs.io/zh/latest/?badge=latest) 135 | -------------------------------------------------------------------------------- /base/chatglm/tool_registry.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import json 3 | import random 4 | import re 5 | import traceback 6 | from copy import deepcopy 7 | from datetime import datetime 8 | from types import GenericAlias 9 | from typing import Annotated, get_origin 10 | 11 | from base.chatglm.comfyUI_api import ComfyUIApi 12 | from base.func_news import News 13 | from zhdate import ZhDate 14 | 15 | _TOOL_HOOKS = {} 16 | _TOOL_DESCRIPTIONS = {} 17 | 18 | 19 | def extract_code(text: str) -> str: 20 | pattern = r'```([^\n]*)\n(.*?)```' 21 | matches = re.findall(pattern, text, re.DOTALL) 22 | return matches[-1][1] 23 | 24 | 25 | def register_tool(func: callable): 26 | tool_name = func.__name__ 27 | tool_description = inspect.getdoc(func).strip() 28 | python_params = inspect.signature(func).parameters 29 | tool_params = [] 30 | for name, param in python_params.items(): 31 | annotation = param.annotation 32 | if annotation is inspect.Parameter.empty: 33 | raise TypeError(f"Parameter `{name}` missing type annotation") 34 | if get_origin(annotation) != Annotated: 35 | raise TypeError( 36 | f"Annotation type for `{name}` must be typing.Annotated") 37 | 38 | typ, (description, required) = annotation.__origin__, annotation.__metadata__ 39 | typ: str = str(typ) if isinstance(typ, GenericAlias) else typ.__name__ 40 | if not isinstance(description, str): 41 | raise TypeError(f"Description for `{name}` must be a string") 42 | if not isinstance(required, bool): 43 | raise TypeError(f"Required for `{name}` must be a bool") 44 | 45 | tool_params.append({ 46 | "name": name, 47 | "description": description, 48 | "type": typ, 49 | "required": required 50 | }) 51 | tool_def = { 52 | "name": tool_name, 53 | "description": tool_description, 54 | "params": tool_params 55 | } 56 | 57 | # print("[registered tool] " + pformat(tool_def)) 58 | _TOOL_HOOKS[tool_name] = func 59 | _TOOL_DESCRIPTIONS[tool_name] = tool_def 60 | 61 | return func 62 | 63 | 64 | def dispatch_tool(tool_name: str, tool_params: dict) -> str: 65 | if tool_name not in _TOOL_HOOKS: 66 | return f"Tool `{tool_name}` not found. Please use a provided tool." 67 | tool_call = _TOOL_HOOKS[tool_name] 68 | try: 69 | ret = tool_call(**tool_params) 70 | except BaseException: 71 | ret = traceback.format_exc() 72 | return ret 73 | 74 | 75 | def get_tools() -> dict: 76 | return deepcopy(_TOOL_DESCRIPTIONS) 77 | 78 | # Tool Definitions 79 | 80 | # @register_tool 81 | # def random_number_generator( 82 | # seed: Annotated[int, 'The random seed used by the generator', True], 83 | # range: Annotated[tuple[int, int], 'The range of the generated numbers', True], 84 | # ) -> int: 85 | # """ 86 | # Generates a random number x, s.t. range[0] <= x < range[1] 87 | # """ 88 | # if not isinstance(seed, int): 89 | # raise TypeError("Seed must be an integer") 90 | # if not isinstance(range, tuple): 91 | # raise TypeError("Range must be a tuple") 92 | # if not isinstance(range[0], int) or not isinstance(range[1], int): 93 | # raise TypeError("Range must be a tuple of integers") 94 | 95 | # import random 96 | # return random.Random(seed).randint(*range) 97 | 98 | 99 | @register_tool 100 | def get_weather( 101 | city_name: Annotated[str, 'The name of the city to be queried', True], 102 | ) -> str: 103 | """ 104 | Get the current weather for `city_name` 105 | """ 106 | if not isinstance(city_name, str): 107 | raise TypeError("City name must be a string") 108 | 109 | key_selection = { 110 | "current_condition": ["temp_C", "FeelsLikeC", "humidity", "weatherDesc", "observation_time"], 111 | } 112 | import requests 113 | try: 114 | resp = requests.get(f"https://wttr.in/{city_name}?format=j1") 115 | resp.raise_for_status() 116 | resp = resp.json() 117 | ret = {k: {_v: resp[k][0][_v] for _v in v} 118 | for k, v in key_selection.items()} 119 | except BaseException: 120 | import traceback 121 | ret = "Error encountered while fetching weather data!\n" + traceback.format_exc() 122 | 123 | return str(ret) 124 | 125 | 126 | @register_tool 127 | def get_confyui_image(prompt: Annotated[str, '要生成图片的提示词,注意必须是英文', True]) -> dict: 128 | ''' 129 | 生成图片 130 | ''' 131 | with open("chatglm\\base.json", "r", encoding="utf-8") as f: 132 | data2 = json.load(f) 133 | data2['prompt']['3']['inputs']['seed'] = ''.join( 134 | random.sample('123456789012345678901234567890', 14)) 135 | # 模型名称 136 | data2['prompt']['4']['inputs']['ckpt_name'] = 'chilloutmix_NiPrunedFp32Fix.safetensors' 137 | data2['prompt']['6']['inputs']['text'] = prompt # 正向提示词 138 | # data2['prompt']['7']['inputs']['text']='' #反向提示词 139 | cfui = ComfyUIApi(server_address="127.0.0.1:8188") # 根据自己comfyUI地址修改 140 | images = cfui.get_images(data2['prompt']) 141 | return {'res': images[0]['image'], 'res_type': 'image', 'filename': images[0]['filename']} 142 | 143 | 144 | @register_tool 145 | def get_news() -> str: 146 | ''' 147 | 获取最新新闻 148 | ''' 149 | news = News() 150 | return news.get_important_news() 151 | 152 | 153 | @register_tool 154 | def get_time() -> str: 155 | ''' 156 | 获取当前日期,时间,农历日期,星期几 157 | ''' 158 | time = datetime.now() 159 | date2 = ZhDate.from_datetime(time) 160 | week_list = ["星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"] 161 | 162 | return '{} {} {}'.format(time.strftime("%Y年%m月%d日 %H:%M:%S"), week_list[time.weekday()], '农历:' + date2.chinese()) 163 | 164 | 165 | if __name__ == "__main__": 166 | print(dispatch_tool("get_weather", {"city_name": "beijing"})) 167 | print(get_tools()) 168 | -------------------------------------------------------------------------------- /base/chatglm/comfyUI_api.py: -------------------------------------------------------------------------------- 1 | # This is an example that uses the websockets api to know when a prompt execution is done 2 | # Once the prompt execution is done it downloads the images using the /history endpoint 3 | 4 | import io 5 | import json 6 | import random 7 | import urllib 8 | import uuid 9 | 10 | import requests 11 | # NOTE: websocket-client (https://github.com/websocket-client/websocket-client) 12 | import websocket 13 | from PIL import Image 14 | 15 | 16 | class ComfyUIApi(): 17 | def __init__(self, server_address="127.0.0.1:8188"): 18 | self.server_address = server_address 19 | self.client_id = str(uuid.uuid4()) 20 | self.ws = websocket.WebSocket() 21 | self.ws.connect( 22 | "ws://{}/ws?clientId={}".format(server_address, self.client_id)) 23 | 24 | def queue_prompt(self, prompt): 25 | p = {"prompt": prompt, "client_id": self.client_id} 26 | data = json.dumps(p).encode('utf-8') 27 | req = requests.post( 28 | "http://{}/prompt".format(self.server_address), data=data) 29 | print(req.text) 30 | return json.loads(req.text) 31 | 32 | def get_image(self, filename, subfolder, folder_type): 33 | data = {"filename": filename, 34 | "subfolder": subfolder, "type": folder_type} 35 | url_values = urllib.parse.urlencode(data) 36 | with requests.get("http://{}/view?{}".format(self.server_address, url_values)) as response: 37 | image = Image.open(io.BytesIO(response.content)) 38 | return image 39 | 40 | def get_image_url(self, filename, subfolder, folder_type): 41 | data = {"filename": filename, 42 | "subfolder": subfolder, "type": folder_type} 43 | url_values = urllib.parse.urlencode(data) 44 | return "http://{}/view?{}".format(self.server_address, url_values) 45 | 46 | def get_history(self, prompt_id): 47 | with requests.get("http://{}/history/{}".format(self.server_address, prompt_id)) as response: 48 | return json.loads(response.text) 49 | 50 | def get_images(self, prompt, isUrl=False): 51 | prompt_id = self.queue_prompt(prompt)['prompt_id'] 52 | output_images = [] 53 | while True: 54 | out = self.ws.recv() 55 | if isinstance(out, str): 56 | message = json.loads(out) 57 | if message['type'] == 'executing': 58 | data = message['data'] 59 | if data['node'] is None and data['prompt_id'] == prompt_id: 60 | break # Execution is done 61 | else: 62 | continue # previews are binary data 63 | 64 | history = self.get_history(prompt_id)[prompt_id] 65 | for o in history['outputs']: 66 | for node_id in history['outputs']: 67 | node_output = history['outputs'][node_id] 68 | if 'images' in node_output: 69 | for image in node_output['images']: 70 | image_data = self.get_image_url(image['filename'], image['subfolder'], image['type']) if isUrl else self.get_image( 71 | image['filename'], image['subfolder'], image['type']) 72 | image['image'] = image_data 73 | output_images.append(image) 74 | 75 | return output_images 76 | 77 | 78 | prompt_text = """ 79 | { 80 | "3": { 81 | "class_type": "KSampler", 82 | "inputs": { 83 | "cfg": 8, 84 | "denoise": 1, 85 | "latent_image": [ 86 | "5", 87 | 0 88 | ], 89 | "model": [ 90 | "4", 91 | 0 92 | ], 93 | "negative": [ 94 | "7", 95 | 0 96 | ], 97 | "positive": [ 98 | "6", 99 | 0 100 | ], 101 | "sampler_name": "euler", 102 | "scheduler": "normal", 103 | "seed": 8566257, 104 | "steps": 20 105 | } 106 | }, 107 | "4": { 108 | "class_type": "CheckpointLoaderSimple", 109 | "inputs": { 110 | "ckpt_name": "chilloutmix_NiPrunedFp32Fix.safetensors" 111 | } 112 | }, 113 | "5": { 114 | "class_type": "EmptyLatentImage", 115 | "inputs": { 116 | "batch_size": 1, 117 | "height": 512, 118 | "width": 512 119 | } 120 | }, 121 | "6": { 122 | "class_type": "CLIPTextEncode", 123 | "inputs": { 124 | "clip": [ 125 | "4", 126 | 1 127 | ], 128 | "text": "masterpiece best quality girl" 129 | } 130 | }, 131 | "7": { 132 | "class_type": "CLIPTextEncode", 133 | "inputs": { 134 | "clip": [ 135 | "4", 136 | 1 137 | ], 138 | "text": "bad hands" 139 | } 140 | }, 141 | "8": { 142 | "class_type": "VAEDecode", 143 | "inputs": { 144 | "samples": [ 145 | "3", 146 | 0 147 | ], 148 | "vae": [ 149 | "4", 150 | 2 151 | ] 152 | } 153 | }, 154 | "9": { 155 | "class_type": "SaveImage", 156 | "inputs": { 157 | "filename_prefix": "ComfyUI", 158 | "images": [ 159 | "8", 160 | 0 161 | ] 162 | } 163 | } 164 | } 165 | """ 166 | if __name__ == '__main__': 167 | prompt = json.loads(prompt_text) 168 | # set the text prompt for our positive CLIPTextEncode 169 | prompt["6"]["inputs"]["text"] = "masterpiece best quality man" 170 | 171 | # set the seed for our KSampler node 172 | prompt["3"]["inputs"]["seed"] = ''.join( 173 | random.sample('123456789012345678901234567890', 14)) 174 | 175 | cfui = ComfyUIApi() 176 | images = cfui.get_images(prompt) 177 | 178 | # Commented out code to display the output images: 179 | 180 | for node_id in images: 181 | for image_data in images[node_id]: 182 | import io 183 | 184 | from PIL import Image 185 | image = Image.open(io.BytesIO(image_data)) 186 | image.show() 187 | -------------------------------------------------------------------------------- /base/chatglm/code_kernel.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | import queue 4 | import re 5 | from io import BytesIO 6 | from subprocess import PIPE 7 | from typing import Optional, Union 8 | 9 | import jupyter_client 10 | from PIL import Image 11 | 12 | IPYKERNEL = os.environ.get('IPYKERNEL', 'chatglm3') 13 | 14 | 15 | class CodeKernel(object): 16 | def __init__(self, 17 | kernel_name='kernel', 18 | kernel_id=None, 19 | kernel_config_path="", 20 | python_path=None, 21 | ipython_path=None, 22 | init_file_path="./startup.py", 23 | verbose=1): 24 | 25 | self.kernel_name = kernel_name 26 | self.kernel_id = kernel_id 27 | self.kernel_config_path = kernel_config_path 28 | self.python_path = python_path 29 | self.ipython_path = ipython_path 30 | self.init_file_path = init_file_path 31 | self.verbose = verbose 32 | 33 | if python_path is None and ipython_path is None: 34 | env = None 35 | else: 36 | env = {"PATH": self.python_path + ":$PATH", 37 | "PYTHONPATH": self.python_path} 38 | 39 | # Initialize the backend kernel 40 | self.kernel_manager = jupyter_client.KernelManager(kernel_name=IPYKERNEL, 41 | connection_file=self.kernel_config_path, 42 | exec_files=[ 43 | self.init_file_path], 44 | env=env) 45 | if self.kernel_config_path: 46 | self.kernel_manager.load_connection_file() 47 | self.kernel_manager.start_kernel(stdout=PIPE, stderr=PIPE) 48 | print("Backend kernel started with the configuration: {}".format( 49 | self.kernel_config_path)) 50 | else: 51 | self.kernel_manager.start_kernel(stdout=PIPE, stderr=PIPE) 52 | print("Backend kernel started with the configuration: {}".format( 53 | self.kernel_manager.connection_file)) 54 | 55 | if verbose: 56 | print(self.kernel_manager.get_connection_info()) 57 | 58 | # Initialize the code kernel 59 | self.kernel = self.kernel_manager.blocking_client() 60 | # self.kernel.load_connection_file() 61 | self.kernel.start_channels() 62 | print("Code kernel started.") 63 | 64 | def execute(self, code): 65 | self.kernel.execute(code) 66 | try: 67 | shell_msg = self.kernel.get_shell_msg(timeout=40) 68 | io_msg_content = self.kernel.get_iopub_msg(timeout=40)['content'] 69 | while True: 70 | msg_out = io_msg_content 71 | # Poll the message 72 | try: 73 | io_msg_content = self.kernel.get_iopub_msg(timeout=40)[ 74 | 'content'] 75 | if 'execution_state' in io_msg_content and io_msg_content['execution_state'] == 'idle': 76 | break 77 | except queue.Empty: 78 | break 79 | 80 | return shell_msg, msg_out 81 | except Exception as e: 82 | print(e) 83 | return None 84 | 85 | def execute_interactive(self, code, verbose=False): 86 | shell_msg = self.kernel.execute_interactive(code) 87 | if shell_msg is queue.Empty: 88 | if verbose: 89 | print("Timeout waiting for shell message.") 90 | self.check_msg(shell_msg, verbose=verbose) 91 | 92 | return shell_msg 93 | 94 | def inspect(self, code, verbose=False): 95 | msg_id = self.kernel.inspect(code) 96 | shell_msg = self.kernel.get_shell_msg(timeout=30) 97 | if shell_msg is queue.Empty: 98 | if verbose: 99 | print("Timeout waiting for shell message.") 100 | self.check_msg(shell_msg, verbose=verbose) 101 | 102 | return shell_msg 103 | 104 | def get_error_msg(self, msg, verbose=False) -> Optional[str]: 105 | if msg['content']['status'] == 'error': 106 | try: 107 | error_msg = msg['content']['traceback'] 108 | except BaseException: 109 | try: 110 | error_msg = msg['content']['traceback'][-1].strip() 111 | except BaseException: 112 | error_msg = "Traceback Error" 113 | if verbose: 114 | print("Error: ", error_msg) 115 | return error_msg 116 | return None 117 | 118 | def check_msg(self, msg, verbose=False): 119 | status = msg['content']['status'] 120 | if status == 'ok': 121 | if verbose: 122 | print("Execution succeeded.") 123 | elif status == 'error': 124 | for line in msg['content']['traceback']: 125 | if verbose: 126 | print(line) 127 | 128 | def shutdown(self): 129 | # Shutdown the backend kernel 130 | self.kernel_manager.shutdown_kernel() 131 | print("Backend kernel shutdown.") 132 | # Shutdown the code kernel 133 | self.kernel.shutdown() 134 | print("Code kernel shutdown.") 135 | 136 | def restart(self): 137 | # Restart the backend kernel 138 | self.kernel_manager.restart_kernel() 139 | # print("Backend kernel restarted.") 140 | 141 | def interrupt(self): 142 | # Interrupt the backend kernel 143 | self.kernel_manager.interrupt_kernel() 144 | # print("Backend kernel interrupted.") 145 | 146 | def is_alive(self): 147 | return self.kernel.is_alive() 148 | 149 | 150 | def b64_2_img(data): 151 | buff = BytesIO(base64.b64decode(data)) 152 | return Image.open(buff) 153 | 154 | 155 | def clean_ansi_codes(input_string): 156 | ansi_escape = re.compile(r'(\x9B|\x1B\[|\u001b\[)[0-?]*[ -/]*[@-~]') 157 | return ansi_escape.sub('', input_string) 158 | 159 | 160 | def execute(code, kernel: CodeKernel) -> tuple[str, Union[str, Image.Image]]: 161 | res = "" 162 | res_type = None 163 | code = code.replace("<|observation|>", "") 164 | code = code.replace("<|assistant|>interpreter", "") 165 | code = code.replace("<|assistant|>", "") 166 | code = code.replace("<|user|>", "") 167 | code = code.replace("<|system|>", "") 168 | msg, output = kernel.execute(code) 169 | 170 | if msg['metadata']['status'] == "timeout": 171 | return res_type, 'Timed out' 172 | elif msg['metadata']['status'] == 'error': 173 | return res_type, clean_ansi_codes('\n'.join(kernel.get_error_msg(msg, verbose=True))) 174 | 175 | if 'text' in output: 176 | res_type = "text" 177 | res = output['text'] 178 | elif 'data' in output: 179 | for key in output['data']: 180 | if 'image/png' in key: 181 | res_type = "image" 182 | res = output['data'][key] 183 | break 184 | elif 'text/plain' in key: 185 | res_type = "text" 186 | res = output['data'][key] 187 | 188 | if res_type == "image": 189 | return res_type, b64_2_img(res) 190 | elif res_type == "text" or res_type == "traceback": 191 | res = res 192 | 193 | return res_type, res 194 | 195 | 196 | def extract_code(text: str) -> str: 197 | pattern = r'```([^\n]*)\n(.*?)```' 198 | matches = re.findall(pattern, text, re.DOTALL) 199 | return matches[-1][1] 200 | -------------------------------------------------------------------------------- /base/func_chatglm.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | import os 6 | import random 7 | from datetime import datetime 8 | from typing import Optional 9 | 10 | import openai 11 | from base.chatglm.code_kernel import CodeKernel, execute 12 | from base.chatglm.tool_registry import dispatch_tool, extract_code, get_tools 13 | from wcferry import Wcf 14 | 15 | functions = get_tools() 16 | 17 | 18 | class ChatGLM: 19 | 20 | def __init__(self, config={}, wcf: Optional[Wcf] = None, max_retry=5) -> None: 21 | openai.api_key = config.get("key", "empty") 22 | # 自己搭建或第三方代理的接口 23 | openai.api_base = config["api"] 24 | proxy = config.get("proxy") 25 | if proxy: 26 | openai.proxy = {"http": proxy, "https": proxy} 27 | self.conversation_list = {} 28 | self.chat_type = {} 29 | self.max_retry = max_retry 30 | self.wcf = wcf 31 | self.filePath = config["file_path"] 32 | self.kernel = CodeKernel() 33 | self.system_content_msg = {"chat": [{"role": "system", "content": config["prompt"]}], 34 | "tool": [{"role": "system", "content": "Answer the following questions as best as you can. You have access to the following tools:"}], 35 | "code": [{"role": "system", "content": "你是一位智能AI助手,你叫ChatGLM,你连接着一台电脑,但请注意不能联网。在使用Python解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑上的文件,文件默认存储路径是{}。".format(self.filePath)}]} 36 | 37 | def __repr__(self): 38 | return 'ChatGLM' 39 | 40 | @staticmethod 41 | def value_check(conf: dict) -> bool: 42 | if conf: 43 | if conf.get("api") and conf.get("prompt") and conf.get("file_path"): 44 | return True 45 | return False 46 | 47 | def get_answer(self, question: str, wxid: str) -> str: 48 | # wxid或者roomid,个人时为微信id,群消息时为群id 49 | if '#帮助' == question: 50 | return '本助手有三种模式,#聊天模式 = #1 ,#工具模式 = #2 ,#代码模式 = #3 , #清除模式会话 = #4 , #清除全部会话 = #5 可用发送#对应模式 或者 #编号 进行切换' 51 | elif '#聊天模式' == question or '#1' == question: 52 | self.chat_type[wxid] = 'chat' 53 | return '已切换#聊天模式' 54 | elif '#工具模式' == question or '#2' == question: 55 | self.chat_type[wxid] = 'tool' 56 | return '已切换#工具模式 \n工具有:查看天气,日期,新闻,comfyUI文生图。例如:\n帮我生成一张小鸟的图片,提示词必须是英文' 57 | elif '#代码模式' == question or '#3' == question: 58 | self.chat_type[wxid] = 'code' 59 | return '已切换#代码模式 \n代码模式可以用于写python代码,例如:\n用python画一个爱心' 60 | elif '#清除模式会话' == question or '#4' == question: 61 | self.conversation_list[wxid][self.chat_type[wxid] 62 | ] = self.system_content_msg[self.chat_type[wxid]] 63 | return '已清除' 64 | elif '#清除全部会话' == question or '#5' == question: 65 | self.conversation_list[wxid] = self.system_content_msg 66 | return '已清除' 67 | 68 | self.updateMessage(wxid, question, "user") 69 | 70 | try: 71 | params = dict(model="chatglm3", temperature=1.0, 72 | messages=self.conversation_list[wxid][self.chat_type[wxid]], stream=False) 73 | if 'tool' == self.chat_type[wxid]: 74 | params["functions"] = functions 75 | response = openai.ChatCompletion.create(**params) 76 | for _ in range(self.max_retry): 77 | if response.choices[0].message.get("function_call"): 78 | function_call = response.choices[0].message.function_call 79 | print( 80 | f"Function Call Response: {function_call.to_dict_recursive()}") 81 | 82 | function_args = json.loads(function_call.arguments) 83 | observation = dispatch_tool( 84 | function_call.name, function_args) 85 | if isinstance(observation, dict): 86 | res_type = observation['res_type'] if 'res_type' in observation else 'text' 87 | res = observation['res'] if 'res_type' in observation else str( 88 | observation) 89 | if res_type == 'image': 90 | filename = observation['filename'] 91 | filePath = os.path.join(self.filePath, filename) 92 | res.save(filePath) 93 | self.wcf and self.wcf.send_image(filePath, wxid) 94 | tool_response = '[Image]' if res_type == 'image' else res 95 | else: 96 | tool_response = observation if isinstance( 97 | observation, str) else str(observation) 98 | print(f"Tool Call Response: {tool_response}") 99 | 100 | params["messages"].append(response.choices[0].message) 101 | params["messages"].append( 102 | { 103 | "role": "function", 104 | "name": function_call.name, 105 | "content": tool_response, # 调用函数返回结果 106 | } 107 | ) 108 | self.updateMessage(wxid, tool_response, "function") 109 | response = openai.ChatCompletion.create(**params) 110 | elif response.choices[0].message.content.find('interpreter') != -1: 111 | output_text = response.choices[0].message.content 112 | code = extract_code(output_text) 113 | self.wcf and self.wcf.send_text('代码如下:\n' + code, wxid) 114 | self.wcf and self.wcf.send_text('执行代码...', wxid) 115 | try: 116 | res_type, res = execute(code, self.kernel) 117 | except Exception as e: 118 | rsp = f'代码执行错误: {e}' 119 | break 120 | if res_type == 'image': 121 | filename = '{}.png'.format(''.join(random.sample( 122 | 'abcdefghijklmnopqrstuvwxyz1234567890', 8))) 123 | filePath = os.path.join(self.filePath, filename) 124 | res.save(filePath) 125 | self.wcf and self.wcf.send_image(filePath, wxid) 126 | else: 127 | self.wcf and self.wcf.send_text("执行结果:\n" + res, wxid) 128 | tool_response = '[Image]' if res_type == 'image' else res 129 | print("Received:", res_type, res) 130 | params["messages"].append(response.choices[0].message) 131 | params["messages"].append( 132 | { 133 | "role": "function", 134 | "name": "interpreter", 135 | "content": tool_response, # 调用函数返回结果 136 | } 137 | ) 138 | self.updateMessage(wxid, tool_response, "function") 139 | response = openai.ChatCompletion.create(**params) 140 | else: 141 | rsp = response.choices[0].message.content 142 | break 143 | 144 | self.updateMessage(wxid, rsp, "assistant") 145 | except Exception as e0: 146 | rsp = "发生未知错误:" + str(e0) 147 | 148 | return rsp 149 | 150 | def updateMessage(self, wxid: str, question: str, role: str) -> None: 151 | now_time = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) 152 | 153 | # 初始化聊天记录,组装系统信息 154 | if wxid not in self.conversation_list.keys(): 155 | self.conversation_list[wxid] = self.system_content_msg 156 | if wxid not in self.chat_type.keys(): 157 | self.chat_type[wxid] = 'chat' 158 | 159 | # 当前问题 160 | content_question_ = {"role": role, "content": question} 161 | self.conversation_list[wxid][self.chat_type[wxid]].append( 162 | content_question_) 163 | 164 | # 只存储10条记录,超过滚动清除 165 | i = len(self.conversation_list[wxid][self.chat_type[wxid]]) 166 | if i > 10: 167 | print("滚动清除微信记录:" + wxid) 168 | # 删除多余的记录,倒着删,且跳过第一个的系统消息 169 | del self.conversation_list[wxid][self.chat_type[wxid]][1] 170 | 171 | 172 | if __name__ == "__main__": 173 | from configuration import Config 174 | config = Config().CHATGLM 175 | if not config: 176 | exit(0) 177 | 178 | chat = ChatGLM(config) 179 | 180 | while True: 181 | q = input(">>> ") 182 | try: 183 | time_start = datetime.now() # 记录开始时间 184 | print(chat.get_answer(q, "wxid")) 185 | time_end = datetime.now() # 记录结束时间 186 | 187 | # 计算的时间差为程序的执行时间,单位为秒/s 188 | print(f"{round((time_end - time_start).total_seconds(), 2)}s") 189 | except Exception as e: 190 | print(e) 191 | -------------------------------------------------------------------------------- /robot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import logging 4 | import re 5 | import time 6 | import xml.etree.ElementTree as ET 7 | from queue import Empty 8 | from threading import Thread 9 | 10 | from base.func_chatglm import ChatGLM 11 | from base.func_chatgpt import ChatGPT 12 | from base.func_chengyu import cy 13 | from base.func_news import News 14 | from base.func_tigerbot import TigerBot 15 | from base.func_xinghuo_web import XinghuoWeb 16 | from configuration import Config 17 | from constants import ChatType 18 | from job_mgmt import Job 19 | from wcferry import Wcf, WxMsg 20 | 21 | 22 | class Robot(Job): 23 | """个性化自己的机器人 24 | """ 25 | 26 | def __init__(self, config: Config, wcf: Wcf, chat_type: int) -> None: 27 | self.wcf = wcf 28 | self.config = config 29 | self.LOG = logging.getLogger("Robot") 30 | self.wxid = self.wcf.get_self_wxid() 31 | self.allContacts = self.getAllContacts() 32 | 33 | if ChatType.is_in_chat_types(chat_type): 34 | if chat_type == ChatType.TIGER_BOT.value and TigerBot.value_check(self.config.TIGERBOT): 35 | self.chat = TigerBot(self.config.TIGERBOT) 36 | elif chat_type == ChatType.CHATGPT.value and ChatGPT.value_check(self.config.CHATGPT): 37 | self.chat = ChatGPT(self.config.CHATGPT) 38 | elif chat_type == ChatType.XINGHUO_WEB.value and XinghuoWeb.value_check(self.config.XINGHUO_WEB): 39 | self.chat = XinghuoWeb(self.config.XINGHUO_WEB) 40 | elif chat_type == ChatType.CHATGLM.value and ChatGLM.value_check(self.config.CHATGLM): 41 | self.chat = ChatGLM(self.config.CHATGLM) 42 | else: 43 | self.LOG.warning("未配置模型") 44 | self.chat = None 45 | else: 46 | if TigerBot.value_check(self.config.TIGERBOT): 47 | self.chat = TigerBot(self.config.TIGERBOT) 48 | elif ChatGPT.value_check(self.config.CHATGPT): 49 | self.chat = ChatGPT(self.config.CHATGPT) 50 | elif XinghuoWeb.value_check(self.config.XINGHUO_WEB): 51 | self.chat = XinghuoWeb(self.config.XINGHUO_WEB) 52 | elif ChatGLM.value_check(self.config.CHATGLM): 53 | self.chat = ChatGLM(self.config.CHATGLM) 54 | else: 55 | self.LOG.warning("未配置模型") 56 | self.chat = None 57 | 58 | self.LOG.info(f"已选择: {self.chat}") 59 | 60 | @staticmethod 61 | def value_check(args: dict) -> bool: 62 | if args: 63 | return all(value is not None for key, value in args.items() if key != 'proxy') 64 | return False 65 | 66 | def toAt(self, msg: WxMsg) -> bool: 67 | """处理被 @ 消息 68 | :param msg: 微信消息结构 69 | :return: 处理状态,`True` 成功,`False` 失败 70 | """ 71 | return self.toChitchat(msg) 72 | 73 | def toChengyu(self, msg: WxMsg) -> bool: 74 | """ 75 | 处理成语查询/接龙消息 76 | :param msg: 微信消息结构 77 | :return: 处理状态,`True` 成功,`False` 失败 78 | """ 79 | status = False 80 | texts = re.findall(r"^([#|?|?])(.*)$", msg.content) 81 | # [('#', '天天向上')] 82 | if texts: 83 | flag = texts[0][0] 84 | text = texts[0][1] 85 | if flag == "#": # 接龙 86 | if cy.isChengyu(text): 87 | rsp = cy.getNext(text) 88 | if rsp: 89 | self.sendTextMsg(rsp, msg.roomid) 90 | status = True 91 | elif flag in ["?", "?"]: # 查词 92 | if cy.isChengyu(text): 93 | rsp = cy.getMeaning(text) 94 | if rsp: 95 | self.sendTextMsg(rsp, msg.roomid) 96 | status = True 97 | 98 | return status 99 | 100 | def toChitchat(self, msg: WxMsg) -> bool: 101 | """闲聊,接入 ChatGPT 102 | """ 103 | if not self.chat: # 没接 ChatGPT,固定回复 104 | rsp = "你@我干嘛?" 105 | else: # 接了 ChatGPT,智能回复 106 | q = re.sub(r"@.*?[\u2005|\s]", "", msg.content).replace(" ", "") 107 | rsp = self.chat.get_answer(q, (msg.roomid if msg.from_group() else msg.sender)) 108 | 109 | if rsp: 110 | if msg.from_group(): 111 | self.sendTextMsg(rsp, msg.roomid, msg.sender) 112 | else: 113 | self.sendTextMsg(rsp, msg.sender) 114 | 115 | return True 116 | else: 117 | self.LOG.error(f"无法从 ChatGPT 获得答案") 118 | return False 119 | 120 | def processMsg(self, msg: WxMsg) -> None: 121 | """当接收到消息的时候,会调用本方法。如果不实现本方法,则打印原始消息。 122 | 此处可进行自定义发送的内容,如通过 msg.content 关键字自动获取当前天气信息,并发送到对应的群组@发送者 123 | 群号:msg.roomid 微信ID:msg.sender 消息内容:msg.content 124 | content = "xx天气信息为:" 125 | receivers = msg.roomid 126 | self.sendTextMsg(content, receivers, msg.sender) 127 | """ 128 | 129 | # 群聊消息 130 | if msg.from_group(): 131 | # 如果在群里被 @ 132 | if msg.roomid not in self.config.GROUPS: # 不在配置的响应的群列表里,忽略 133 | return 134 | 135 | if msg.is_at(self.wxid): # 被@ 136 | self.toAt(msg) 137 | 138 | else: # 其他消息 139 | self.toChengyu(msg) 140 | 141 | return # 处理完群聊信息,后面就不需要处理了 142 | 143 | # 非群聊信息,按消息类型进行处理 144 | if msg.type == 37: # 好友请求 145 | self.autoAcceptFriendRequest(msg) 146 | 147 | elif msg.type == 10000: # 系统信息 148 | self.sayHiToNewFriend(msg) 149 | 150 | elif msg.type == 0x01: # 文本消息 151 | # 让配置加载更灵活,自己可以更新配置。也可以利用定时任务更新。 152 | if msg.from_self(): 153 | if msg.content == "^更新$": 154 | self.config.reload() 155 | self.LOG.info("已更新") 156 | else: 157 | self.toChitchat(msg) # 闲聊 158 | 159 | def onMsg(self, msg: WxMsg) -> int: 160 | try: 161 | self.LOG.info(msg) # 打印信息 162 | self.processMsg(msg) 163 | except Exception as e: 164 | self.LOG.error(e) 165 | 166 | return 0 167 | 168 | def enableRecvMsg(self) -> None: 169 | self.wcf.enable_recv_msg(self.onMsg) 170 | 171 | def enableReceivingMsg(self) -> None: 172 | def innerProcessMsg(wcf: Wcf): 173 | while wcf.is_receiving_msg(): 174 | try: 175 | msg = wcf.get_msg() 176 | self.LOG.info(msg) 177 | self.processMsg(msg) 178 | except Empty: 179 | continue # Empty message 180 | except Exception as e: 181 | self.LOG.error(f"Receiving message error: {e}") 182 | 183 | self.wcf.enable_receiving_msg() 184 | Thread(target=innerProcessMsg, name="GetMessage", args=(self.wcf,), daemon=True).start() 185 | 186 | def sendTextMsg(self, msg: str, receiver: str, at_list: str = "") -> None: 187 | """ 发送消息 188 | :param msg: 消息字符串 189 | :param receiver: 接收人wxid或者群id 190 | :param at_list: 要@的wxid, @所有人的wxid为:notify@all 191 | """ 192 | # msg 中需要有 @ 名单中一样数量的 @ 193 | ats = "" 194 | if at_list: 195 | if at_list == "notify@all": # @所有人 196 | ats = " @所有人" 197 | else: 198 | wxids = at_list.split(",") 199 | for wxid in wxids: 200 | # 根据 wxid 查找群昵称 201 | ats += f" @{self.wcf.get_alias_in_chatroom(wxid, receiver)}" 202 | 203 | # {msg}{ats} 表示要发送的消息内容后面紧跟@,例如 北京天气情况为:xxx @张三 204 | if ats == "": 205 | self.LOG.info(f"To {receiver}: {msg}") 206 | self.wcf.send_text(f"{msg}", receiver, at_list) 207 | else: 208 | self.LOG.info(f"To {receiver}: {ats}\r{msg}") 209 | self.wcf.send_text(f"{ats}\n\n{msg}", receiver, at_list) 210 | 211 | def getAllContacts(self) -> dict: 212 | """ 213 | 获取联系人(包括好友、公众号、服务号、群成员……) 214 | 格式: {"wxid": "NickName"} 215 | """ 216 | contacts = self.wcf.query_sql("MicroMsg.db", "SELECT UserName, NickName FROM Contact;") 217 | return {contact["UserName"]: contact["NickName"] for contact in contacts} 218 | 219 | def keepRunningAndBlockProcess(self) -> None: 220 | """ 221 | 保持机器人运行,不让进程退出 222 | """ 223 | while True: 224 | self.runPendingJobs() 225 | time.sleep(1) 226 | 227 | def autoAcceptFriendRequest(self, msg: WxMsg) -> None: 228 | try: 229 | xml = ET.fromstring(msg.content) 230 | v3 = xml.attrib["encryptusername"] 231 | v4 = xml.attrib["ticket"] 232 | scene = int(xml.attrib["scene"]) 233 | self.wcf.accept_new_friend(v3, v4, scene) 234 | 235 | except Exception as e: 236 | self.LOG.error(f"同意好友出错:{e}") 237 | 238 | def sayHiToNewFriend(self, msg: WxMsg) -> None: 239 | nickName = re.findall(r"你已添加了(.*),现在可以开始聊天了。", msg.content) 240 | if nickName: 241 | # 添加了好友,更新好友列表 242 | self.allContacts[msg.sender] = nickName[0] 243 | self.sendTextMsg(f"Hi {nickName[0]},我自动通过了你的好友请求。", msg.sender) 244 | 245 | def newsReport(self) -> None: 246 | receivers = self.config.NEWS 247 | if not receivers: 248 | return 249 | 250 | news = News().get_important_news() 251 | for r in receivers: 252 | self.sendTextMsg(news, r) 253 | --------------------------------------------------------------------------------