├── LICENSE ├── MIGPT.py ├── README.md ├── V3.py ├── miaccount.py ├── minaservice.py ├── requirements.txt └── 效果.png /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Afool4U 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MIGPT.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import asyncio 3 | import json 4 | import os 5 | import subprocess 6 | from http.cookies import SimpleCookie 7 | from pathlib import Path 8 | import threading 9 | import time 10 | from aiohttp import ClientSession 11 | from minaservice import MiNAService 12 | from miaccount import MiAccount 13 | from requests.utils import cookiejar_from_dict 14 | from V3 import Chatbot 15 | 16 | LATEST_ASK_API = "https://userprofile.mina.mi.com/device_profile/v2/conversation?source=dialogu&hardware={hardware}×tamp={timestamp}&limit=2" 17 | COOKIE_TEMPLATE = "deviceId={device_id}; serviceToken={service_token}; userId={user_id}" 18 | 19 | HARDWARE_COMMAND_DICT = { 20 | "LX06": "5-1", # 小爱音箱Pro(黑色) 21 | "L05B": "5-3", # 小爱音箱Play 22 | "S12A": "5-1", # 小爱音箱 23 | "LX01": "5-1", # 小爱音箱mini 24 | "L06A": "5-1", # 小爱音箱 25 | "LX04": "5-1", # 小爱触屏音箱 26 | "L05C": "5-3", # 小爱音箱Play增强版 27 | "L17A": "7-3", # 小爱音箱Sound Pro 28 | "X08E": "7-3", # 红米小爱触屏音箱Pro 29 | "LX05A": "5-1", # 小爱音箱遥控版(黑色) 30 | "LX5A": "5-1", # 小爱音箱遥控版(黑色) 31 | # add more here 32 | } 33 | MI_USER = "你的小米账号" # 小米账号(手机号) 34 | MI_PASS = "你的小米账号密码" # 小米账号密码 35 | OPENAI_API_KEY = "你的API KEY" # openai的api key 36 | SOUND_TYPE = "你的音箱型号" # 音箱型号 37 | 38 | # 检查必要的数据 39 | if MI_USER == "你的小米账号": 40 | raise ValueError("请先在MIGPT.py中填写小米账号!") 41 | if MI_PASS == "你的小米账号密码": 42 | raise ValueError("请先在MIGPT.py中填写小米账号密码!") 43 | if OPENAI_API_KEY == "你的API KEY": 44 | raise ValueError("请先在MIGPT.py中填写openai的api key!") 45 | if SOUND_TYPE == "你的音箱型号": 46 | raise ValueError("请先在MIGPT.py中填写音箱型号!") 47 | if SOUND_TYPE not in HARDWARE_COMMAND_DICT: 48 | raise ValueError("{}不在型号列表中!请检查型号是否正确。".format(SOUND_TYPE)) 49 | 50 | SWITCH = True # 是否开启chatgpt回答 51 | PROMPT = "请用100字以内回答,第一句一定不要超过10个汉字或5个单词,并且请快速生成前几句话" # 限制回答字数在100以内 52 | 53 | loop = asyncio.get_event_loop() 54 | 55 | 56 | ### HELP FUNCTION ### 57 | def parse_cookie_string(cookie_string): 58 | cookie = SimpleCookie() 59 | cookie.load(cookie_string) 60 | cookies_dict = {} 61 | cookiejar = None 62 | for k, m in cookie.items(): 63 | cookies_dict[k] = m.value 64 | cookiejar = cookiejar_from_dict(cookies_dict, cookiejar=None, overwrite=True) 65 | return cookiejar 66 | 67 | 68 | class MiGPT: 69 | def __init__( 70 | self, 71 | hardware=SOUND_TYPE, 72 | use_command=False, 73 | ): 74 | self.mi_token_home = os.path.join(Path.home(), "." + MI_USER + ".mi.token") 75 | self.hardware = hardware 76 | self.cookie_string = "" 77 | self.last_timestamp = 0 # timestamp last call mi speaker 78 | self.session = None 79 | self.chatbot = None # a little slow to init we move it after xiaomi init 80 | self.user_id = "" 81 | self.device_id = "" 82 | self.service_token = "" 83 | self.cookie = "" 84 | self.use_command = use_command 85 | self.tts_command = HARDWARE_COMMAND_DICT.get(hardware, "5-1") 86 | self.conversation_id = None 87 | self.parent_id = None 88 | self.miboy_account = None 89 | self.mina_service = None 90 | 91 | async def init_all_data(self, session): 92 | await self.login_miboy(session) 93 | await self._init_data_hardware() 94 | with open(self.mi_token_home) as f: 95 | user_data = json.loads(f.read()) 96 | self.user_id = user_data.get("userId") 97 | self.service_token = user_data.get("micoapi")[1] 98 | self._init_cookie() 99 | await self._init_first_data_and_chatbot() 100 | 101 | async def login_miboy(self, session): 102 | self.session = session 103 | self.account = MiAccount( 104 | session, 105 | MI_USER, 106 | MI_PASS, 107 | str(self.mi_token_home), 108 | ) 109 | # Forced login to refresh token 110 | await self.account.login("micoapi") 111 | self.mina_service = MiNAService(self.account) 112 | 113 | async def _init_data_hardware(self): 114 | if self.cookie: 115 | # cookie does not need init 116 | return 117 | hardware_data = await self.mina_service.device_list() 118 | for h in hardware_data: 119 | if h.get("hardware", "") == self.hardware: 120 | self.device_id = h.get("deviceID") 121 | break 122 | else: 123 | raise Exception(f"we have no hardware: {self.hardware} please check") 124 | 125 | def _init_cookie(self): 126 | if self.cookie: 127 | self.cookie = parse_cookie_string(self.cookie) 128 | else: 129 | self.cookie_string = COOKIE_TEMPLATE.format( 130 | device_id=self.device_id, 131 | service_token=self.service_token, 132 | user_id=self.user_id, 133 | ) 134 | self.cookie = parse_cookie_string(self.cookie_string) 135 | 136 | async def _init_first_data_and_chatbot(self): 137 | data = await self.get_latest_ask_from_xiaoai() 138 | self.last_timestamp, self.last_record = self.get_last_timestamp_and_record(data) 139 | self.chatbot = Chatbot(api_key=OPENAI_API_KEY) 140 | 141 | async def get_latest_ask_from_xiaoai(self): 142 | r = await self.session.get( 143 | LATEST_ASK_API.format( 144 | hardware=self.hardware, timestamp=str(int(time.time() * 1000)) 145 | ), 146 | cookies=parse_cookie_string(self.cookie), 147 | ) 148 | return await r.json() 149 | 150 | def get_last_timestamp_and_record(self, data): 151 | if d := data.get("data"): 152 | records = json.loads(d).get("records") 153 | if not records: 154 | return 0, None 155 | last_record = records[0] 156 | timestamp = last_record.get("time") 157 | return timestamp, last_record 158 | 159 | async def do_tts(self, value): 160 | if not self.use_command: 161 | try: 162 | await self.mina_service.text_to_speech(self.device_id, value) 163 | except: 164 | # do nothing is ok 165 | pass 166 | else: 167 | subprocess.check_output(["micli", self.tts_command, value]) 168 | 169 | async def get_if_xiaoai_is_playing(self): 170 | playing_info = await self.mina_service.player_get_status(self.device_id) 171 | # WTF xiaomi api 172 | is_playing = ( 173 | json.loads(playing_info.get("data", {}).get("info", "{}")).get("status", -1) 174 | == 1 175 | ) 176 | return is_playing 177 | 178 | async def stop_if_xiaoai_is_playing(self): 179 | is_playing = await self.get_if_xiaoai_is_playing() 180 | if is_playing: 181 | # stop it 182 | await self.mina_service.player_pause(self.device_id) 183 | 184 | async def check_new_query(self, session): 185 | try: 186 | r = await self.get_latest_ask_from_xiaoai() 187 | except Exception: 188 | # we try to init all again 189 | await self.init_all_data(session) 190 | r = await self.get_latest_ask_from_xiaoai() 191 | new_timestamp, last_record = self.get_last_timestamp_and_record(r) 192 | if new_timestamp > self.last_timestamp: 193 | return new_timestamp, last_record.get("query", "") 194 | return False, None 195 | 196 | async def run_forever(self): 197 | global SWITCH 198 | print("正在运行 MiGPT, 请用\"打开/关闭高级对话\"控制对话模式。") 199 | async with ClientSession() as session: 200 | await self.init_all_data(session) 201 | while True: 202 | try: 203 | r = await self.get_latest_ask_from_xiaoai() 204 | except Exception: 205 | # we try to init all again 206 | await self.init_all_data(session) 207 | r = await self.get_latest_ask_from_xiaoai() 208 | new_timestamp, last_record = self.get_last_timestamp_and_record(r) 209 | if new_timestamp > self.last_timestamp: 210 | self.last_timestamp = new_timestamp 211 | query = last_record.get("query", "") 212 | if query.startswith('闭嘴') or query.startswith('停止'): # 反悔操作 213 | await self.stop_if_xiaoai_is_playing() 214 | continue 215 | if query.startswith('打开高级对话') or query.startswith('开启高级对话'): 216 | SWITCH = True 217 | print("\033[1;32m高级对话已开启\033[0m") 218 | await self.do_tts("高级对话已开启") 219 | continue 220 | if query.startswith('关闭高级对话'): 221 | SWITCH = False 222 | print("\033[1;32m高级对话已关闭\033[0m") 223 | await self.do_tts("高级对话已关闭") 224 | continue 225 | if SWITCH: 226 | commas = 0 227 | wait_times = 3 228 | await self.stop_if_xiaoai_is_playing() 229 | query = f"{query},{PROMPT}" 230 | try: 231 | print( 232 | "以下是小爱的回答: ", 233 | last_record.get("answers")[0] 234 | .get("tts", {}) 235 | .get("text").strip(), 236 | ) 237 | except: 238 | print("小爱没回") 239 | print("以下是GPT的回答: ", end="") 240 | lock = threading.Lock() 241 | stop_event = threading.Event() 242 | thread = threading.Thread(target=self.chatbot.ask_stream, args=(query, lock, stop_event)) 243 | thread.start() 244 | while 1: 245 | success = lock.acquire(blocking=False) 246 | if success: # 如果成功获取锁 247 | try: 248 | this_sentence = self.chatbot.sentence # 获取句子(目前的) 249 | if this_sentence == "" and not thread.is_alive(): 250 | break 251 | is_a_sentence = False 252 | for x in ((",", "。", "?", "!", ";", ",", ".", "?", "!", ";") 253 | if commas <= wait_times else ("。", "?", "!", ";", ".", "?", "!", ";")): 254 | pos = this_sentence.rfind(x) 255 | if pos != -1: 256 | is_a_sentence = True 257 | # 取出完整的句组,剩下的放回去 258 | self.chatbot.sentence = this_sentence[pos + 1:] 259 | this_sentence = this_sentence[:pos + 1] 260 | break 261 | finally: 262 | lock.release() 263 | else: 264 | time.sleep(0.01) 265 | continue 266 | if not is_a_sentence: 267 | time.sleep(0.01) 268 | continue 269 | if not await self.get_if_xiaoai_is_playing(): 270 | if commas <= wait_times: 271 | commas += sum([1 for x in this_sentence if 272 | x in {",", "。", "?", "!", ";", ",", ".", "?", "!", ";"}]) + 1 273 | await self.do_tts(this_sentence) 274 | while await self.get_if_xiaoai_is_playing() and not \ 275 | (await self.check_new_query(session))[0]: 276 | await asyncio.sleep(0.1) 277 | time_stamp, query = await self.check_new_query(session) 278 | if time_stamp: 279 | stop_event.set() 280 | while True: 281 | success = lock.acquire(blocking=False) 282 | if success: 283 | try: 284 | self.chatbot.sentence = "" 285 | finally: 286 | lock.release() 287 | break 288 | await self.stop_if_xiaoai_is_playing() 289 | await self.do_tts('') # 空串施法打断 290 | if not self.chatbot.has_printed: 291 | print() 292 | if query.startswith('闭嘴'): 293 | self.last_timestamp = time_stamp 294 | # 打印彩色信息 295 | print('\033[1;34m' + 'INFO: ' + '\033[0m', end='') 296 | print('\033[1;31m' + 'ChatGPT暂停回答' + '\033[0m') 297 | else: 298 | print('\033[1;34m' + 'INFO: ' + '\033[0m', end='') 299 | print('\033[1;33m' + '有新的问答,ChatGPT停止当前回答' + '\033[0m') 300 | break 301 | 302 | 303 | if __name__ == "__main__": 304 | miboy = MiGPT() 305 | asyncio.run(miboy.run_forever()) 306 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MIGPT 2 | 基于API流式对话的低延迟版MIGPT 3 | 4 | ## 简单介绍 5 | 本项目利用了OpenAI官方API的原生流式传输对话方式,无需等待,即刻对话! 6 | 7 | 在作者的笔记本上,实测从提问到回答的时间仅不到1.5秒(当然,这个时间也取决于你的梯子和网络质量)。 8 | 9 | _* 点[此链接](https://v.douyin.com/Sg8rMrJ/)观看2分半的测试视频_ 10 | 11 | ![image](https://github.com/Afool4U/MIGPT/blob/main/%E6%95%88%E6%9E%9C.png) 12 | 13 | _注意:本项目因采用了流式传输,暂时不支持LX04、L05B和L05C型号。如您的音箱是该型号,请使用[xiaogpt](https://github.com/yihong0618/xiaogpt)。_ 14 | 15 | ## 详细介绍 16 | ### 项目描述 17 | 18 | 本项目旨在通过集成ChatGPT与小爱同学,打造一个创新的智能家居控制方案。该方案通过高效的API调用与流式对话技术,实现了快速、自然的家居设备控制和交互。核心功能包括使用OpenAI官方API进行流式对话传输,设备状态实时监控与控制,基于微调BERT +动态量化和TF-IDF特征+SVM的大模型自动调用功能(_此功能还未上传,需要可以加QQ交流群:622695590_)。 19 | 20 | ### 主要工作 21 | 22 | 集成ChatGPT与小爱同学:通过高效的API调用,实现了与ChatGPT的无缝连接,为用户提供即时、准确的对话交互体验。利用小爱同学的设备控制能力,扩展了智能家居的交互方式。 23 | 流式对话处理:引入基于生产者-消费者模式的流式对话技术,创新性地使用了流式对话的分割算法,无需等待完整回复即可相应,相对其他项目平均减少80%的用户等待时间,大大而提高了交互效率和用户满意度。 24 | 自研大模型调用算法:创新性的使用深度学习和机器学习技术,从用户被动手动开关模型到自动调用大模型,既引入了大模型的智慧能力,又不影响家居控制、天气查询等操作,实现了真正意义上的无缝接入。 项目提供两个可选择模型:基于微调BERT +8位动态量化的大模型分类器,基于TF-IDF特征+SVM的分类器。通过自己收集的数据集训练,准确率均能达到90%以上。 25 | 用户体验优化:在项目设计中注重用户体验,采用异步编程模型处理并发请求,通过动态的交互提示和及时的反馈,提升了用户使用的舒适度和满意度。 26 | 27 | ### 项目特点 28 | 29 | 实时性与效率平衡:在保证对话实时性的同时,优化算法以减少处理延时,是本项目的一个技术特点。通过流式对话分割算法有效解决了这一问题,实现了快速响应。 30 | 大模型调用算法:传统接入方案无法同时使用原生小爱模型和GPT模型,两者只能取其一,无法做到长期实际部署。通过使用NLP技术,使用自行收集的数据集,分别训练了深度和机器学习模型,从用户被动手动开关模型到自动调用大模型,既引入了大模型的智慧能力,又不影响家居控制、天气查询等操作,实现了真正意义上的无缝接入。 31 | 32 | ## 使用方法 33 | _分为3个steps:_ 34 | 35 | ### step 1 : 36 | 37 | 在项目路径执行pip install -r requirements.txt安装需要的依赖。 38 | 如果没有C++编译环境,则安装tiktoken时会报如下错误:distutils.errors.DistutilsPlatformError: Microsoft Visual C++ 14.0 or greater is required. Get it with "Microsoft C++ Build Tools")。 39 | 40 | 解决方法: 41 | 42 | (1) 本地执行pip debug --verbose查看当前平台支持的版本,然后在[此链接](https://pypi.tuna.tsinghua.edu.cn/simple/tiktoken/)中找到对应版本的whl文件并下载。 43 | 44 | (2) 在whl文件同级目录执行pip install "whl全名带后缀",注意:不要修改原始whl文件的名称。 45 | 46 | ### step 2 : 47 | 在[MIGPT.py](https://github.com/Afool4U/MIGPT/blob/main/MIGPT.py)中填写小米账号、密码、[API Key](https://platform.openai.com/account/api-keys)和音箱型号。 48 | 49 | ### step 3 : 50 | 科学上网后,运行[MIGPT.py](https://github.com/Afool4U/MIGPT/blob/main/MIGPT.py)文件即可。 51 | 52 | ## 使用技巧 53 | 54 | 1. 运行过程中,可用“打开/关闭高级对话"控制是否打开ChatGPT。 55 | 2. 当ChatGPT正在回答问题时,可用“闭嘴”或“停止”终止回答。 56 | 3. 可随时提问新的问题打断ChatGPT的回答。 57 | 58 | ## 致谢引用 59 | 60 | - @[yihong0618](https://github.com/yihong0618) 的 [xiaogpt](https://github.com/yihong0618/xiaogpt) 61 | - @[acheong08](https://github.com/acheong08) 的 [ChatGPT](https://github.com/acheong08/ChatGPT) 62 | - @[Yonsm](https://github.com/Yonsm) 的 [MiService](https://github.com/Yonsm/MiService) 63 | 64 | ## 联系作者 65 | 66 | 请联系QQ : 2312163474 67 | -------------------------------------------------------------------------------- /V3.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple wrapper for the official ChatGPT API 3 | """ 4 | import json 5 | import requests 6 | import tiktoken 7 | 8 | 9 | class Chatbot: 10 | """ 11 | Official ChatGPT API 12 | """ 13 | 14 | def __init__( 15 | self, 16 | api_key: str, 17 | engine: str = "gpt-3.5-turbo", 18 | proxy: str = None, 19 | max_tokens: int = 3000, 20 | temperature: float = 0.5, 21 | top_p: float = 1.0, 22 | presence_penalty: float = 0.0, 23 | frequency_penalty: float = 0.0, 24 | reply_count: int = 1, 25 | system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally", 26 | ) -> None: 27 | """ 28 | Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys) 29 | """ 30 | self.engine = engine 31 | self.session = requests.Session() 32 | self.api_key = api_key 33 | self.proxy = proxy 34 | 35 | self.system_prompt = system_prompt 36 | self.max_tokens = max_tokens 37 | self.temperature = temperature 38 | self.top_p = top_p 39 | self.presence_penalty = presence_penalty 40 | self.frequency_penalty = frequency_penalty 41 | self.reply_count = reply_count 42 | 43 | self.sentence = "" 44 | self.temp = "" 45 | self.has_printed = False 46 | 47 | if self.proxy: 48 | proxies = { 49 | "http": self.proxy, 50 | "https": self.proxy, 51 | } 52 | self.session.proxies = proxies 53 | self.conversation: dict = { 54 | "default": [ 55 | { 56 | "role": "system", 57 | "content": system_prompt, 58 | }, 59 | ], 60 | } 61 | if max_tokens > 4000: 62 | raise Exception("Max tokens cannot be greater than 4000") 63 | 64 | if self.get_token_count("default") > self.max_tokens: 65 | raise Exception("System prompt is too long") 66 | 67 | def add_to_conversation( 68 | self, 69 | message: str, 70 | role: str, 71 | convo_id: str = "default", 72 | ) -> None: 73 | """ 74 | Add a message to the conversation 75 | """ 76 | self.conversation[convo_id].append({"role": role, "content": message}) 77 | 78 | def __truncate_conversation(self, convo_id: str = "default") -> None: 79 | """ 80 | Truncate the conversation 81 | """ 82 | while True: 83 | if ( 84 | self.get_token_count(convo_id) > self.max_tokens 85 | and len(self.conversation[convo_id]) > 1 86 | ): 87 | # Don't remove the first message 88 | self.conversation[convo_id].pop(1) 89 | else: 90 | break 91 | 92 | # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb 93 | def get_token_count(self, convo_id: str = "default") -> int: 94 | """ 95 | Get token count 96 | """ 97 | if self.engine not in ["gpt-3.5-turbo", "gpt-3.5-turbo-0301"]: 98 | raise NotImplementedError("Unsupported engine {self.engine}") 99 | 100 | encoding = tiktoken.encoding_for_model(self.engine) 101 | 102 | num_tokens = 0 103 | for message in self.conversation[convo_id]: 104 | # every message follows {role/name}\n{content}\n 105 | num_tokens += 4 106 | for key, value in message.items(): 107 | num_tokens += len(encoding.encode(value)) 108 | if key == "name": # if there's a name, the role is omitted 109 | num_tokens += -1 # role is always required and always 1 token 110 | num_tokens += 2 # every reply is primed with assistant 111 | return num_tokens 112 | 113 | def get_max_tokens(self, convo_id: str) -> int: 114 | """ 115 | Get max tokens 116 | """ 117 | return self.max_tokens - self.get_token_count(convo_id) 118 | 119 | def ask_stream( 120 | self, 121 | prompt: str, 122 | lock=None, 123 | stop_event=None, 124 | role: str = "user", 125 | convo_id: str = "default", 126 | ) -> None: 127 | """ 128 | Ask a question 129 | """ 130 | self.has_printed = False 131 | # Make conversation if it doesn't exist 132 | if convo_id not in self.conversation: 133 | self.reset(convo_id=convo_id, system_prompt=self.system_prompt) 134 | self.add_to_conversation(prompt, "user", convo_id=convo_id) 135 | self.__truncate_conversation(convo_id=convo_id) 136 | # Get response 137 | response = self.session.post( 138 | "https://api.openai.com/v1/chat/completions", 139 | headers={"Authorization": f"Bearer {self.api_key}"}, 140 | json={ 141 | "model": self.engine, 142 | "messages": self.conversation[convo_id], 143 | "stream": True, 144 | "temperature": self.temperature, 145 | "top_p": self.top_p, 146 | "presence_penalty": 147 | self.presence_penalty, 148 | "frequency_penalty": 149 | self.frequency_penalty, 150 | "n": self.reply_count, 151 | "user": role, 152 | "max_tokens": self.get_max_tokens(convo_id=convo_id), 153 | }, 154 | stream=True, 155 | ) 156 | if response.status_code != 200: 157 | raise Exception( 158 | f"Error: {response.status_code} {response.reason} {response.text}", 159 | ) 160 | response_role: str = None 161 | full_response: str = "" 162 | 163 | for line in response.iter_lines(): 164 | if stop_event.is_set(): 165 | self.temp = "" 166 | return 167 | if not line: 168 | continue 169 | # Remove "data: " 170 | line = line.decode("utf-8")[6:] 171 | if line == "[DONE]": 172 | break 173 | resp: dict = json.loads(line) 174 | choices = resp.get("choices") 175 | if not choices: 176 | continue 177 | delta = choices[0].get("delta") 178 | if not delta: 179 | continue 180 | if "role" in delta: 181 | response_role = delta["role"] 182 | if "content" in delta: 183 | content = delta["content"] # .replace(" ", "") # (只能用于汉语发音严重不连续的情况,英文会导致空格丢失) 184 | success = lock.acquire(blocking=False) 185 | if success: 186 | try: 187 | self.sentence += self.temp + content # 确保每次别的线程都能拿到完整的句子,但是会保留前面的句子 188 | self.temp = "" 189 | finally: 190 | lock.release() 191 | else: 192 | self.temp += content 193 | print(content, end="") 194 | full_response += content 195 | print() 196 | self.has_printed = True 197 | self.add_to_conversation(full_response, response_role, convo_id=convo_id) 198 | 199 | def rollback(self, n: int = 1, convo_id: str = "default") -> None: 200 | """ 201 | Rollback the conversation 202 | """ 203 | for _ in range(n): 204 | self.conversation[convo_id].pop() 205 | 206 | def reset(self, convo_id: str = "default", system_prompt: str = None) -> None: 207 | """ 208 | Reset the conversation 209 | """ 210 | self.conversation[convo_id] = [ 211 | {"role": "system", "content": system_prompt or self.system_prompt}, 212 | ] 213 | 214 | def save(self, file: str, *convo_ids: str) -> bool: 215 | """ 216 | Save the conversation to a JSON file 217 | """ 218 | try: 219 | with open(file, "w", encoding="utf-8") as f: 220 | if convo_ids: 221 | json.dump({k: self.conversation[k] for k in convo_ids}, f, indent=2) 222 | else: 223 | json.dump(self.conversation, f, indent=2) 224 | except (FileNotFoundError, KeyError): 225 | return False 226 | return True 227 | # print(f"Error: {file} could not be created") 228 | 229 | def load(self, file: str, *convo_ids: str) -> bool: 230 | """ 231 | Load the conversation from a JSON file 232 | """ 233 | try: 234 | with open(file, encoding="utf-8") as f: 235 | if convo_ids: 236 | convos = json.load(f) 237 | self.conversation.update({k: convos[k] for k in convo_ids}) 238 | else: 239 | self.conversation = json.load(f) 240 | except (FileNotFoundError, KeyError, json.decoder.JSONDecodeError): 241 | return False 242 | return True 243 | 244 | def load_config(self, file: str, no_api_key: bool = False) -> bool: 245 | """ 246 | Load the configuration from a JSON file 247 | """ 248 | try: 249 | with open(file, encoding="utf-8") as f: 250 | config = json.load(f) 251 | if config is not None: 252 | self.api_key = config.get("api_key") or self.api_key 253 | if self.api_key is None: 254 | # Make sure the API key is set 255 | raise Exception("Error: API key is not set") 256 | self.engine = config.get("engine") or self.engine 257 | self.temperature = config.get("temperature") or self.temperature 258 | self.top_p = config.get("top_p") or self.top_p 259 | self.presence_penalty = ( 260 | config.get("presence_penalty") or self.presence_penalty 261 | ) 262 | self.frequency_penalty = ( 263 | config.get("frequency_penalty") or self.frequency_penalty 264 | ) 265 | self.reply_count = config.get("reply_count") or self.reply_count 266 | self.max_tokens = config.get("max_tokens") or self.max_tokens 267 | 268 | if config.get("system_prompt") is not None: 269 | self.system_prompt = ( 270 | config.get("system_prompt") or self.system_prompt 271 | ) 272 | self.reset(system_prompt=self.system_prompt) 273 | 274 | if config.get("proxy") is not None: 275 | self.proxy = config.get("proxy") or self.proxy 276 | proxies = { 277 | "http": self.proxy, 278 | "https": self.proxy, 279 | } 280 | self.session.proxies = proxies 281 | except (FileNotFoundError, KeyError, json.decoder.JSONDecodeError): 282 | return False 283 | return True 284 | -------------------------------------------------------------------------------- /miaccount.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | import json 4 | import logging 5 | import os 6 | import random 7 | import string 8 | from urllib import parse 9 | from aiohttp import ClientSession 10 | 11 | _LOGGER = logging.getLogger(__package__) 12 | 13 | 14 | def get_random(length): 15 | return "".join(random.sample(string.ascii_letters + string.digits, length)) 16 | 17 | 18 | class MiTokenStore: 19 | def __init__(self, token_path): 20 | self.token_path = token_path 21 | 22 | def load_token(self): 23 | if os.path.isfile(self.token_path): 24 | try: 25 | with open(self.token_path) as f: 26 | return json.load(f) 27 | except Exception: 28 | _LOGGER.exception("Exception on load token from %s", self.token_path) 29 | return None 30 | 31 | def save_token(self, token=None): 32 | if token: 33 | try: 34 | with open(self.token_path, "w") as f: 35 | json.dump(token, f, indent=2) 36 | except Exception: 37 | _LOGGER.exception("Exception on save token to %s", self.token_path) 38 | elif os.path.isfile(self.token_path): 39 | os.remove(self.token_path) 40 | 41 | 42 | class MiAccount: 43 | def __init__(self, session: ClientSession, username, password, token_store=None): 44 | self.session = session 45 | self.username = username 46 | self.password = password 47 | self.token_store = ( 48 | MiTokenStore(token_store) if isinstance(token_store, str) else token_store 49 | ) 50 | self.token = token_store is not None and self.token_store.load_token() 51 | 52 | async def login(self, sid): 53 | if not self.token: 54 | self.token = {"deviceId": get_random(16).upper()} 55 | try: 56 | resp = await self._serviceLogin(f"serviceLogin?sid={sid}&_json=true") 57 | if resp["code"] != 0: 58 | data = { 59 | "_json": "true", 60 | "qs": resp["qs"], 61 | "sid": resp["sid"], 62 | "_sign": resp["_sign"], 63 | "callback": resp["callback"], 64 | "user": self.username, 65 | "hash": hashlib.md5(self.password.encode()).hexdigest().upper(), 66 | } 67 | resp = await self._serviceLogin("serviceLoginAuth2", data) 68 | if resp["code"] != 0: 69 | raise Exception(resp) 70 | 71 | self.token["userId"] = resp["userId"] 72 | self.token["passToken"] = resp["passToken"] 73 | 74 | serviceToken = await self._securityTokenService( 75 | resp["location"], resp["nonce"], resp["ssecurity"] 76 | ) 77 | self.token[sid] = (resp["ssecurity"], serviceToken) 78 | if self.token_store: 79 | self.token_store.save_token(self.token) 80 | return True 81 | 82 | except Exception as e: 83 | self.token = None 84 | if self.token_store: 85 | self.token_store.save_token() 86 | _LOGGER.exception("Exception on login %s: %s", self.username, e) 87 | return False 88 | 89 | async def _serviceLogin(self, uri, data=None): 90 | headers = { 91 | "User-Agent": "APP/com.xiaomi.mihome APPV/6.0.103 iosPassportSDK/3.9.0 iOS/14.4 miHSTS" 92 | } 93 | cookies = {"sdkVersion": "3.9", "deviceId": self.token["deviceId"]} 94 | if "passToken" in self.token: 95 | cookies["userId"] = self.token["userId"] 96 | cookies["passToken"] = self.token["passToken"] 97 | url = "https://account.xiaomi.com/pass/" + uri 98 | async with self.session.request( 99 | "GET" if data is None else "POST", 100 | url, 101 | data=data, 102 | cookies=cookies, 103 | headers=headers, 104 | ssl = False, 105 | ) as r: 106 | raw = await r.read() 107 | resp = json.loads(raw[11:]) 108 | _LOGGER.debug("%s: %s", uri, resp) 109 | return resp 110 | 111 | async def _securityTokenService(self, location, nonce, ssecurity): 112 | nsec = "nonce=" + str(nonce) + "&" + ssecurity 113 | clientSign = base64.b64encode(hashlib.sha1(nsec.encode()).digest()).decode() 114 | async with self.session.get( 115 | location + "&clientSign=" + parse.quote(clientSign) 116 | ) as r: 117 | serviceToken = r.cookies["serviceToken"].value 118 | if not serviceToken: 119 | raise Exception(await r.text()) 120 | return serviceToken 121 | 122 | async def mi_request(self, sid, url, data, headers, relogin=True): 123 | if (self.token and sid in self.token) or await self.login(sid): # Ensure login 124 | cookies = { 125 | "userId": self.token["userId"], 126 | "serviceToken": self.token[sid][1], 127 | } 128 | content = data(self.token, cookies) if callable(data) else data 129 | method = "GET" if data is None else "POST" 130 | _LOGGER.info("%s %s", url, content) 131 | async with self.session.request( 132 | method, url, data=content, cookies=cookies, headers=headers 133 | ) as r: 134 | status = r.status 135 | if status == 200: 136 | resp = await r.json(content_type=None) 137 | code = resp["code"] 138 | if code == 0: 139 | return resp 140 | if "auth" in resp.get("message", "").lower(): 141 | status = 401 142 | else: 143 | resp = await r.text() 144 | if status == 401 and relogin: 145 | _LOGGER.warn("Auth error on request %s %s, relogin...", url, resp) 146 | self.token = None # Auth error, reset login 147 | return await self.mi_request(sid, url, data, headers, False) 148 | else: 149 | resp = "Login failed" 150 | raise Exception(f"Error {url}: {resp}") 151 | -------------------------------------------------------------------------------- /minaservice.py: -------------------------------------------------------------------------------- 1 | import json 2 | from miaccount import MiAccount, get_random 3 | 4 | import logging 5 | 6 | _LOGGER = logging.getLogger(__package__) 7 | 8 | 9 | class MiNAService: 10 | def __init__(self, account: MiAccount): 11 | self.account = account 12 | 13 | async def mina_request(self, uri, data=None): 14 | requestId = "app_ios_" + get_random(30) 15 | if data is not None: 16 | data["requestId"] = requestId 17 | else: 18 | uri += "&requestId=" + requestId 19 | headers = { 20 | "User-Agent": "MiHome/6.0.103 (com.xiaomi.mihome; build:6.0.103.1; iOS 14.4.0) Alamofire/6.0.103 MICO/iOSApp/appStore/6.0.103" 21 | } 22 | return await self.account.mi_request( 23 | "micoapi", "https://api2.mina.mi.com" + uri, data, headers 24 | ) 25 | 26 | async def device_list(self, master=0): 27 | result = await self.mina_request("/admin/v2/device_list?master=" + str(master)) 28 | return result.get("data") if result else None 29 | 30 | async def ubus_request(self, deviceId, method, path, message): 31 | message = json.dumps(message) 32 | result = await self.mina_request( 33 | "/remote/ubus", 34 | {"deviceId": deviceId, "message": message, "method": method, "path": path}, 35 | ) 36 | return result 37 | 38 | async def text_to_speech(self, deviceId, text): 39 | return await self.ubus_request( 40 | deviceId, "text_to_speech", "mibrain", {"text": text} 41 | ) 42 | 43 | async def player_set_volume(self, deviceId, volume): 44 | return await self.ubus_request( 45 | deviceId, 46 | "player_set_volume", 47 | "mediaplayer", 48 | {"volume": volume, "media": "app_ios"}, 49 | ) 50 | 51 | async def player_pause(self, deviceId): 52 | return await self.ubus_request( 53 | deviceId, 54 | "player_play_operation", 55 | "mediaplayer", 56 | {"action": "pause", "media": "app_ios"}, 57 | ) 58 | 59 | async def player_play(self, deviceId): 60 | return await self.ubus_request( 61 | deviceId, 62 | "player_play_operation", 63 | "mediaplayer", 64 | {"action": "play", "media": "app_ios"}, 65 | ) 66 | 67 | async def player_get_status(self, deviceId): 68 | return await self.ubus_request( 69 | deviceId, 70 | "player_get_play_status", 71 | "mediaplayer", 72 | {"media": "app_ios"}, 73 | ) 74 | 75 | async def play_by_url(self, deviceId, url): 76 | return await self.ubus_request( 77 | deviceId, 78 | "player_play_url", 79 | "mediaplayer", 80 | {"url": url, "type": 1, "media": "app_ios"}, 81 | ) 82 | 83 | async def send_message(self, devices, devno, message, volume=None): # -1/0/1... 84 | result = False 85 | for i in range(0, len(devices)): 86 | if ( 87 | devno == -1 88 | or devno != i + 1 89 | or devices[i]["capabilities"].get("yunduantts") 90 | ): 91 | _LOGGER.debug( 92 | "Send to devno=%d index=%d: %s", devno, i, message or volume 93 | ) 94 | deviceId = devices[i]["deviceID"] 95 | result = ( 96 | True 97 | if volume is None 98 | else await self.player_set_volume(deviceId, volume) 99 | ) 100 | if result and message: 101 | result = await self.text_to_speech(deviceId, message) 102 | if not result: 103 | _LOGGER.error("Send failed: %s", message or volume) 104 | if devno != -1 or not result: 105 | break 106 | return result 107 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp 2 | requests 3 | urllib3==1.25.11 4 | tiktoken -------------------------------------------------------------------------------- /效果.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Afool4U/MIGPT/5abf0f3b18875f010a656fc00982c5abc1b8a482/效果.png --------------------------------------------------------------------------------