├── .gitignore ├── LICENSE ├── README.md ├── cache.py ├── config.py ├── consumer.py ├── deploy.dockerfile ├── img ├── callback_config.png ├── deploy.png ├── group.jpg ├── single.jpg └── wechat.jpg ├── install.sh ├── log.py ├── main.py ├── openim ├── __init__.py └── send_msg.py ├── requirements.txt ├── robot ├── __init__.py ├── chatgpt.py └── robot.py ├── server.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | # Byte-compiled / optimized / DLL files 18 | __pycache__/ 19 | *.py[cod] 20 | *$py.class 21 | 22 | # C extensions 23 | *.so 24 | 25 | # Distribution / packaging 26 | .Python 27 | build/ 28 | develop-eggs/ 29 | dist/ 30 | downloads/ 31 | eggs/ 32 | .eggs/ 33 | lib/ 34 | lib64/ 35 | parts/ 36 | sdist/ 37 | var/ 38 | wheels/ 39 | share/python-wheels/ 40 | *.egg-info/ 41 | .installed.cfg 42 | *.egg 43 | MANIFEST 44 | 45 | # PyInstaller 46 | # Usually these files are written by a python script from a template 47 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 48 | *.manifest 49 | *.spec 50 | 51 | # Installer logs 52 | pip-log.txt 53 | pip-delete-this-directory.txt 54 | 55 | # Unit test / coverage reports 56 | htmlcov/ 57 | .tox/ 58 | .nox/ 59 | .coverage 60 | .coverage.* 61 | .cache 62 | nosetests.xml 63 | coverage.xml 64 | *.cover 65 | *.py,cover 66 | .hypothesis/ 67 | .pytest_cache/ 68 | cover/ 69 | 70 | # Translations 71 | *.mo 72 | *.pot 73 | 74 | # Django stuff: 75 | *.log 76 | local_settings.py 77 | db.sqlite3 78 | db.sqlite3-journal 79 | 80 | # Flask stuff: 81 | instance/ 82 | .webassets-cache 83 | 84 | # Scrapy stuff: 85 | .scrapy 86 | 87 | # Sphinx documentation 88 | docs/_build/ 89 | 90 | # PyBuilder 91 | .pybuilder/ 92 | target/ 93 | 94 | # Jupyter Notebook 95 | .ipynb_checkpoints 96 | 97 | # IPython 98 | profile_default/ 99 | ipython_config.py 100 | 101 | # pyenv 102 | # For a library or package, you might want to ignore these files since the code is 103 | # intended to run in multiple environments; otherwise, check them in: 104 | # .python-version 105 | 106 | # pipenv 107 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 108 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 109 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 110 | # install all needed dependencies. 111 | #Pipfile.lock 112 | 113 | # poetry 114 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 115 | # This is especially recommended for binary packages to ensure reproducibility, and is more 116 | # commonly ignored for libraries. 117 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 118 | #poetry.lock 119 | 120 | # pdm 121 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 122 | #pdm.lock 123 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 124 | # in version control. 125 | # https://pdm.fming.dev/#use-with-ide 126 | .pdm.toml 127 | 128 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 129 | __pypackages__/ 130 | 131 | # Celery stuff 132 | celerybeat-schedule 133 | celerybeat.pid 134 | 135 | # SageMath parsed files 136 | *.sage.py 137 | 138 | # Environments 139 | .env 140 | .venv 141 | env/ 142 | venv/ 143 | ENV/ 144 | env.bak/ 145 | venv.bak/ 146 | 147 | # Spyder project settings 148 | .spyderproject 149 | .spyproject 150 | 151 | # Rope project settings 152 | .ropeproject 153 | 154 | # mkdocs documentation 155 | /site 156 | 157 | # mypy 158 | .mypy_cache/ 159 | .dmypy.json 160 | dmypy.json 161 | 162 | # Pyre type checker 163 | .pyre/ 164 | 165 | # pytype static type analyzer 166 | .pytype/ 167 | 168 | # Cython debug symbols 169 | cython_debug/ 170 | 171 | # PyCharm 172 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 173 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 174 | # and can be added to the global gitignore or merged into this file. For a more nuclear 175 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 176 | #.idea/ 177 | 178 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 OpenIM Corporation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ChatGPT 2 | 扫码进群交流 3 | ![](https://github.com/EthanForAi/ChatGPT/blob/main/img/wechat.jpg) 4 | ## 功能介绍 5 | 6 | 由于ChatGPT只提供了单纯的api,提供完整服务需要额外的开发工作。OpenIM把实时推送、消息记录、会话隔离、上下文管理、多端同步等强大的工程能力赋予了ChatGPT,协助开发者打造真正的聊天机器人 7 | 8 | ## 部署OpenIM 9 | 10 | 1. 使用该机器人需要先部署openIM服务器 [open-im-server部署文档](https://doc.rentsoft.cn/#/v2/validation/all) 11 | 12 | 1.1 项目clone 13 | 14 | ``` 15 | git clone https://github.com/OpenIMSDK/Open-IM-Server.git --recursive;Copy to clipboardErrorCopied 16 | ``` 17 | 18 | 1.2 初始化安装 19 | 20 | ``` 21 | cd Open-IM-Server; chmod +x install_im_server.sh; ./install_im_server.sh;Copy to clipboardErrorCopied 22 | ``` 23 | 24 | 1.3 检查服务 25 | 26 | ``` 27 | cd script;./docker_check_service.sh 28 | ``` 29 | 30 | ![](https://github.com/EthanForAi/ChatGPT/blob/main/docs/docker_success.png) 31 | 32 | 2. 设置callback 33 | 该机器人使用了openIM的回调功能,关于该功能具体查看openIM官网的第三方回调说明文档。[第三方回调官方文档](https://doc.rentsoft.cn/#/callback/callback) 34 | 35 | config/config.yaml 36 | 37 | ``` 38 | callback: 39 | callbackUrl : "http://127.0.0.1:8080/callback" 40 | callbackBeforeSendSingleMsg: 41 | enable: true 42 | callbackBeforeSendGroupMsg: 43 | enable: true 44 | ``` 45 | 46 | 3. 重启 47 | 48 | ``` 49 | docker-compose down; docker-compose up -d 50 | ``` 51 | 52 | ## 部署callback打通OpenIM和ChatGPT 53 | 54 | ### docker部署(推荐) 55 | 56 | 最新镜像:openim/chat_gpt:v0.0.1 57 | 58 | ``` 59 | docker run --name open_im_chat_gpt --net=host openim/chat_gpt:v0.0.1 python3 main.py --admin_id openIM123456 --api_key {{openai key}} --secret {{secret}} --im_api_url http://127.0.0.1:10002 --robot_user_id {{your robot id}} --host 127.0.0.1 --port 8080 --redis_addr 127.0.0.1:16379 --redis_pwd openIM123 60 | ``` 61 | 62 | ### 源码部署 63 | 64 | 部署版本需要python3.9(需要安装好python包管理工具pip3) 65 | 66 | 安装命令: pip3 install -r requirements.txt 67 | 68 | 运行命令 69 | 70 | ``` 71 | python3 ./main.py --admin_id openIM123456 --api_key {{openai key}} --secret {{secret}} --im_api_url http://127.0.0.1:10002 --robot_user_id {{your robot id}} --host 127.0.0.1 --port 8080 --redis_addr 127.0.0.1:16379 --redis_pwd openIM123 72 | ``` 73 | 74 | ### 启动参数详解 75 | 76 | | 参数 | 详解 | 77 | | ------------- | ------------------------------------------------------------ | 78 | | admin_id | openIM管理员的userID, config.yaml文件manager.appManagerUid中的一个,默认为openIM123456 | 79 | | api_key | openai的密钥,自行获取 | 80 | | secret | openIM系统的密钥secret,.env中的PASSWORD,默认为openIM123 | 81 | | im_api_url | im消息推送api,如果单机部署则默认为http://127.0.0.1:10002 | 82 | | robot_user_id | 机器人userID,需先手动注册,英文字母和数字组成,注意不能和其他userID重复。 | 83 | | host | im消息callback ip(单机部署默认为127.0.0.1,和openIM config.yaml的callback配置一样) | 84 | | port | im消息callback 端口(默认8080,和openIM config.yaml的callback配置一样) | 85 | | redis_addr | 保存会话上下文使用redis,redis的地址, 单机部署默认为127.0.0.1:16379 | 86 | | redis_pwd | redis_pwd 密码, 单机部署默认为openIM123 | 87 | 88 | 89 | ## 部署验证和效果演示 90 | 91 | 部署成功验证![avatar](https://github.com/EthanForAi/ChatGPT/blob/main/img/deploy.png) 92 | 93 | 单聊效果演示![avatar](https://github.com/EthanForAi/ChatGPT/blob/main/img/single.jpg) 94 | 95 | 96 | 群聊效果演示![avatar](https://github.com/EthanForAi/ChatGPT/blob/main/img/group.jpg) 97 | 98 | -------------------------------------------------------------------------------- /cache.py: -------------------------------------------------------------------------------- 1 | from redis import asyncio as aioredis 2 | 3 | import config 4 | 5 | class RedisClient: 6 | def __init__(self) -> None: 7 | self.client = None 8 | self.key = "gpt_cache_" 9 | 10 | async def init_redis(self, addr, secret): 11 | l = addr.split(":") 12 | client = await aioredis.Redis(host=l[0], port=l[1], db=config.db, password=secret) 13 | self.client = client 14 | # client 15 | 16 | def get_key(self, user_id): 17 | return self.key+user_id 18 | 19 | def get_group_key(self, user_id, group_id): 20 | return self.key+group_id+"_"+user_id 21 | 22 | redis_client = RedisClient() 23 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | api_key = "" 2 | robot_user_id = "" 3 | admin_id = "" 4 | secret = "" 5 | im_api_url = "" 6 | redis_addr = "" 7 | redis_pwd = "" 8 | db = 10 9 | 10 | host = "" 11 | port = "" 12 | -------------------------------------------------------------------------------- /consumer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import re 3 | import server 4 | 5 | from openim import Open_im_api 6 | import config 7 | import robot 8 | import json 9 | 10 | import cache 11 | 12 | import log 13 | 14 | async def start_consumer(): 15 | consumer = Consumer() 16 | await consumer.init_open_im_api() 17 | asyncio.create_task(consumer.run_group()) 18 | asyncio.create_task(consumer.run_single()) 19 | 20 | class Consumer: 21 | def __init__(self): 22 | self.open_im_api = Open_im_api(secret=config.secret, admin_id=config.admin_id, base_url=config.im_api_url) 23 | self.chat_gpt = robot.Chat_gpt() 24 | 25 | async def init_open_im_api(self): 26 | token = await self.open_im_api.get_admin_token(config.admin_id) 27 | self.open_im_api.token = token 28 | log.info("","token is {}".format(token)) 29 | 30 | async def run_single(self): 31 | while True: 32 | msg = await server.single_chat_queue.get() 33 | asyncio.create_task(self.consume_single_chat(msg)) 34 | 35 | async def run_group(self): 36 | while True: 37 | msg = await server.group_chat_queue.get() 38 | asyncio.create_task(self.consume_group_chat(msg)) 39 | 40 | async def handle_msg(self, key, content): 41 | historys = await cache.redis_client.client.lrange(key, 0, 4) 42 | # 新会话 43 | if len(historys) == 0: 44 | gpt_resp = await self.chat_gpt.ask_chat_gpt(content) 45 | else: 46 | s = list() 47 | for i in historys: 48 | s.append(i.decode("utf-8")) 49 | gpt_resp = await self.chat_gpt.ask_chat_gpt_context(content, s) 50 | if len(gpt_resp) > 0: 51 | if gpt_resp[0] == "?" or gpt_resp[0] == "?": 52 | gpt_resp = gpt_resp[1:] 53 | gpt_resp = gpt_resp.strip() 54 | await cache.redis_client.client.lpush(key, content, gpt_resp) 55 | await cache.redis_client.client.expire(key, 60*60*24) 56 | return gpt_resp 57 | 58 | # single 59 | async def single(self, operation_id, user_id, content): 60 | gpt_resp = await self.handle_msg(cache.redis_client.get_key(user_id), content) 61 | log.info(operation_id, "gpt resp success") 62 | await self.open_im_api.send_msg(recv_id=user_id, text=gpt_resp) 63 | 64 | # group 65 | async def group(self, operation_id, user_id, group_id, content, session_type, sender_nickname): 66 | gpt_resp = await self.handle_msg(cache.redis_client.get_group_key(user_id, group_id), content) 67 | log.info(operation_id, "gpt resp success") 68 | await self.open_im_api.send_at_msg(group_id=group_id, text=gpt_resp, at_user_id=user_id, session_type=session_type, sender_nickname=sender_nickname) 69 | 70 | 71 | # recvID is gpt 72 | async def consume_single_chat(self, msg): 73 | content = msg.get("content") 74 | send_id = msg.get("sendID") 75 | operation_id = msg.get("operationID") 76 | try: 77 | await self.single(operation_id, send_id, content) 78 | except Exception as e: 79 | log.error(operation_id, "single chatgpt failed {}".format(e)) 80 | try: 81 | await self.open_im_api.send_msg(recv_id=send_id, text=str(e)) 82 | except Exception as e2: 83 | log.error(operation_id, "send error msg failed "+e2) 84 | 85 | async def consume_group_chat(self, msg): 86 | content = msg.get("content") 87 | text = "" 88 | if content: 89 | content = json.loads(content) 90 | text = content.get("text") 91 | reg = re.compile(r'@\S+\s?') 92 | text = reg.sub("", text) 93 | 94 | group_id = msg.get("groupID") 95 | session_type = msg.get("sessionType") 96 | send_id = msg.get("sendID") 97 | operation_id = msg.get("operationID") 98 | sender_nickname = msg.get("senderNickname") 99 | if text == "": 100 | return 101 | for i in text: 102 | if i != " ": 103 | break 104 | else: 105 | return 106 | 107 | try: 108 | await self.group(operation_id, send_id, group_id, text, session_type, sender_nickname) 109 | except Exception as e: 110 | log.error(operation_id, "chatgpt in group error {}".format(e)) 111 | try: 112 | await self.open_im_api.send_at_msg(group_id=group_id, text=str(e), at_user_id=send_id, session_type=session_type, sender_nickname=sender_nickname) 113 | except Exception as e2: 114 | log.error(operation_id, "send error msg failed, error {}".format(e2)) 115 | -------------------------------------------------------------------------------- /deploy.dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9 2 | # Set the working directory to /app 3 | WORKDIR /robot 4 | 5 | RUN apt-get update && apt-get install -y \ 6 | build-essential \ 7 | libssl-dev \ 8 | libffi-dev \ 9 | python3-dev \ 10 | python3-pip 11 | 12 | 13 | # Copy the current directory contents into the container at /app 14 | COPY . /robot 15 | 16 | # Install any needed packages specified in requirements.txt 17 | RUN pip3 install --no-cache-dir -r requirements.txt 18 | 19 | CMD ["python3", "main.py"] -------------------------------------------------------------------------------- /img/callback_config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EthanForAi/ChatGPT/fece2097da1825c8ef9dce6c147127ddf76205f0/img/callback_config.png -------------------------------------------------------------------------------- /img/deploy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EthanForAi/ChatGPT/fece2097da1825c8ef9dce6c147127ddf76205f0/img/deploy.png -------------------------------------------------------------------------------- /img/group.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EthanForAi/ChatGPT/fece2097da1825c8ef9dce6c147127ddf76205f0/img/group.jpg -------------------------------------------------------------------------------- /img/single.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EthanForAi/ChatGPT/fece2097da1825c8ef9dce6c147127ddf76205f0/img/single.jpg -------------------------------------------------------------------------------- /img/wechat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EthanForAi/ChatGPT/fece2097da1825c8ef9dce6c147127ddf76205f0/img/wechat.jpg -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | python3 --version 2 | pip install -r requirements.txt 3 | -------------------------------------------------------------------------------- /log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | 4 | log = logging.getLogger() 5 | stream_handler = logging.StreamHandler() 6 | stream_handler.setLevel(logging.INFO) 7 | log.setLevel(logging.INFO) 8 | log.addHandler(stream_handler) 9 | 10 | def info(operation_id, text): 11 | log.info("{} [INFO] [operationID:{}] [{}]".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), operation_id, text)) 12 | 13 | def warn(operation_id, text): 14 | log.warn("{} [INFO] [operationID:{}] [{}]".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), operation_id, text)) 15 | 16 | def debug(operation_id, text): 17 | log.debug("{} [INFO] [operationID:{}] [{}]".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), operation_id, text)) 18 | 19 | def error(operation_id, text): 20 | log.error("{} [INFO] [operationID:{}] [{}]".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), operation_id, text)) 21 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import argparse 3 | import sys 4 | import openai 5 | 6 | # import uvloop 7 | # asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) 8 | from server import start_server 9 | import config 10 | import consumer 11 | import cache 12 | import log 13 | 14 | def parse_arguments(argv): 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--admin_id', type=str, default=config.admin_id, help='openIM adminID') 17 | parser.add_argument('--api_key', type=str, default=config.api_key, help="chatgpt's apiKey") 18 | parser.add_argument('--secret', type=str, default=config.secret, help='openIM secret') 19 | parser.add_argument('--im_api_url', type=str, default=config.im_api_url, help='openIM api url') 20 | parser.add_argument('--robot_user_id',type=str, default=config.robot_user_id, help='robot userID in openIM') 21 | parser.add_argument("--redis_addr", type=str, default=config.host, help="redis addr") 22 | parser.add_argument("--redis_pwd", type=str, default=config.host, help="redis pwd") 23 | parser.add_argument('--host', type=str, default =config.host, help='robot server listen host') 24 | parser.add_argument('--port', type=int, default =config.port, help='robot server listen port') 25 | args = parser.parse_args(argv) 26 | if args.admin_id: 27 | config.admin_id = args.admin_id 28 | if args.api_key: 29 | config.api_key = args.api_key 30 | if args.secret: 31 | config.secret = args.secret 32 | if args.im_api_url: 33 | config.im_api_url = args.im_api_url 34 | if args.robot_user_id: 35 | config.robot_user_id = args.robot_user_id 36 | if args.redis_addr: 37 | config.redis_addr = args.redis_addr 38 | if args.redis_pwd: 39 | config.redis_pwd = args.redis_pwd 40 | if args.host: 41 | config.host = args.host 42 | if args.port: 43 | config.port = args.port 44 | if args.api_key: 45 | config.api_key = args.api_key 46 | openai.api_key = config.api_key 47 | openai.api_base = "https://api.openai.com/v2" 48 | # openai. 49 | return args 50 | 51 | async def main(): 52 | args = parse_arguments(sys.argv[1:]) 53 | await cache.redis_client.init_redis(args.redis_addr, args.redis_pwd) 54 | asyncio.create_task(start_server(config.host, config.port)) 55 | asyncio.create_task(consumer.start_consumer()) 56 | 57 | if __name__ == "__main__": 58 | log.info("","start robot callback server") 59 | loop = asyncio.get_event_loop() 60 | loop.create_task(main()) 61 | loop.run_forever() -------------------------------------------------------------------------------- /openim/__init__.py: -------------------------------------------------------------------------------- 1 | from .send_msg import * -------------------------------------------------------------------------------- /openim/send_msg.py: -------------------------------------------------------------------------------- 1 | import string 2 | import json 3 | import aiohttp 4 | 5 | import config 6 | import utils 7 | 8 | class Open_im_api: 9 | def __init__(self, secret, admin_id, base_url) -> None: 10 | self.secret = secret 11 | self.admin_id = admin_id 12 | self.base_url = base_url 13 | self.token = "" 14 | 15 | async def request(self, route, data): 16 | headers = {"token": self.token} 17 | resp = await self.make_post_request(self.base_url+route, data=data, headers=headers) 18 | if resp.get("errCode") != 0: 19 | raise Exception("errCode is {}, errMsg is{}".format(resp.get("errCode"), resp.get("errMsg"))) 20 | return resp 21 | 22 | async def make_post_request(self, url, data, headers): 23 | async with aiohttp.ClientSession() as session: 24 | data = json.dumps(data) 25 | async with session.post(url, data=data, headers=headers) as response: 26 | return await response.json() 27 | 28 | async def get_admin_token(self, admin_id) -> string: 29 | data = { 30 | "secret": config.secret, 31 | "platform": 1, 32 | "userID": admin_id, 33 | "operationID": "robot get token" 34 | } 35 | resp = await self.request("/auth/user_token", data=data) 36 | return resp["data"]["token"] 37 | 38 | @utils.async_retry(num_retries=3, delay=0.1) 39 | async def send_msg(self, recv_id, text): 40 | msg = { 41 | "operationID": "chatgptoperationid", 42 | "sendID": config.robot_user_id, 43 | "recvID": recv_id, 44 | "senderPlatformID": 1, 45 | "content": { 46 | "text": text 47 | }, 48 | "contentType": 101, 49 | "sessionType": 1, 50 | "isOnlineOnly": False 51 | } 52 | await self.request("/msg/manage_send_msg", msg) 53 | 54 | @utils.async_retry(num_retries=3, delay=0.1) 55 | async def send_at_msg(self, group_id, at_user_id, text, session_type, sender_nickname): 56 | msg = { 57 | "operationID": "chatgptoperationid", 58 | "sendID": config.robot_user_id, 59 | "groupID": group_id, 60 | "senderPlatformID": 1, 61 | "senderNickName": "ChatGPT", 62 | "content": { 63 | "text": "@{} \n".format(at_user_id)+text, 64 | "atUserList": [at_user_id], 65 | "atUsersInfo": [ 66 | {"atUserID": at_user_id, "groupNickname": sender_nickname} 67 | ], 68 | "isAtSelf": False, 69 | }, 70 | "contentType": 106, 71 | "sessionType": session_type, 72 | "isOnlineOnly": False 73 | } 74 | print(msg) 75 | await self.request("/msg/manage_send_msg", msg) 76 | 77 | 78 | async def get_user_info(self, user_id): 79 | msg = { 80 | 81 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EthanForAi/ChatGPT/fece2097da1825c8ef9dce6c147127ddf76205f0/requirements.txt -------------------------------------------------------------------------------- /robot/__init__.py: -------------------------------------------------------------------------------- 1 | from .robot import * 2 | from .chatgpt import * -------------------------------------------------------------------------------- /robot/chatgpt.py: -------------------------------------------------------------------------------- 1 | import openai 2 | 3 | from robot import robot 4 | 5 | engine = "gpt-3.5-turbo-0301" 6 | davinci_engine = "text-davinci-003" 7 | 8 | class Chat_gpt(robot): 9 | def __init__(self) -> None: 10 | super().__init__() 11 | self.request_timeout = 60 12 | 13 | # @utils.async_retry(num_retries=3, delay=0.1) 14 | async def ask_chat_gpt(self, question): 15 | prompt = question 16 | completions = await openai.Completion.acreate( 17 | model=engine, 18 | # engine=engine, 19 | prompt=prompt, 20 | max_tokens=1024, 21 | n=1, 22 | temperature=0.5, 23 | request_timeout=self.request_timeout, 24 | ) 25 | return completions.choices[0].text 26 | 27 | # @utils.async_retry(num_retries=3, delay=0.1) 28 | async def ask_chat_gpt_context(self, question, args): 29 | args.reverse() 30 | args.append(question) 31 | prompt = '\n'.join(args)[-1000:] 32 | completions = await openai.Completion.acreate( 33 | model=engine, 34 | # engine=engine, 35 | prompt=prompt, 36 | max_tokens=2500, 37 | n=1, 38 | temperature=0.5, 39 | request_timeout=self.request_timeout, 40 | ) 41 | return completions.choices[0].text.strip() 42 | -------------------------------------------------------------------------------- /robot/robot.py: -------------------------------------------------------------------------------- 1 | 2 | class robot: 3 | def __init__(self) -> None: 4 | pass 5 | 6 | -------------------------------------------------------------------------------- /server.py: -------------------------------------------------------------------------------- 1 | from aiohttp import web 2 | import asyncio 3 | import json 4 | 5 | import log 6 | import config 7 | 8 | 9 | app = web.Application() 10 | single_chat_queue = asyncio.Queue() 11 | group_chat_queue = asyncio.Queue() 12 | 13 | resp = { 14 | "actionCode": 0, 15 | "errCode": 0, 16 | "errMsg": "", 17 | "operationID": "", 18 | } 19 | 20 | class Server(web.View): 21 | async def post(self): 22 | body = await self.request.json() 23 | body = dict(body) 24 | operation_id = body.get("operationID") 25 | recv_id = body.get("recvID") 26 | send_id = body.get("sendID") 27 | content_type = body.get("contentType") 28 | callback_command = body.get("callbackCommand") 29 | 30 | group_id = body.get("groupID") 31 | log.info(operation_id, body) 32 | if (callback_command=="callbackBeforeSendSingleMsgCommand" or callback_command == "callbackBeforeSendGroupMsgCommand") and send_id != config.robot_user_id: 33 | 34 | if group_id and content_type == 106: 35 | content = body.get("content") 36 | at_content = json.loads(content) 37 | at_user_list = at_content.get("atUserList") 38 | if config.robot_user_id in at_user_list: 39 | log.info(operation_id, "recv group msg") 40 | await group_chat_queue.put(body) 41 | if recv_id == config.robot_user_id and content_type == 101: 42 | log.info(operation_id, "recv single msg") 43 | await single_chat_queue.put(body) 44 | return web.json_response(resp) 45 | 46 | app.router.add_view("/callback", Server) 47 | 48 | def start_server(host, port): 49 | web_server = web._run_app(app, host=host, port=port) 50 | return web_server 51 | 52 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | def async_retry(num_retries=3, exceptions=(Exception,), delay=0.1): 4 | def decorator(func): 5 | async def wrapper(*args, **kwargs): 6 | for i in range(num_retries): 7 | try: 8 | result = await func(*args, **kwargs) 9 | return result 10 | except exceptions as e: 11 | if i == num_retries - 1: 12 | raise e 13 | await asyncio.sleep(delay) 14 | return wrapper 15 | return decorator --------------------------------------------------------------------------------