├── .gitignore ├── README.md ├── demo ├── config.py ├── echo_client.py ├── echo_server.py ├── file_receiver.py ├── file_sender.py ├── http_proxy.py ├── http_proxy_backend.py ├── http_proxy_frontend.py ├── picture.jpg └── utils.py └── qdp ├── __init__.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # ----- Python ----- 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # pyenv 78 | .python-version 79 | 80 | # celery beat schedule file 81 | celerybeat-schedule 82 | 83 | # SageMath parsed files 84 | *.sage.py 85 | 86 | # Environments 87 | .env 88 | .venv 89 | env/ 90 | venv/ 91 | ENV/ 92 | env.bak/ 93 | venv.bak/ 94 | 95 | # Spyder project settings 96 | .spyderproject 97 | .spyproject 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | # mkdocs documentation 103 | /site 104 | 105 | # mypy 106 | .mypy_cache/ 107 | 108 | # ----- Project ----- 109 | 110 | .idea 111 | .vscode 112 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QDP (QQ-based Datagram Protocol) 2 | 3 | QDP is a transport protocol based on QQ messages. It can transfer any binary data through QQ messages, and do the fragmentation and reassembly things automatically. 4 | 5 | Currently, QDP is just a proof of concept. The transfer rate is very very low, which according to limited tests, is around 20~30 Kbps (at 5 msg/s). 6 | 7 | In QDP, QQ number is used as something like IP address. The concept "packet" is similar to the one in UDP, and "fragment" is similar to IP fragment. 8 | 9 | This repo contains a rough implementation of QDP in Python, using "[酷Q](https://cqp.cc)" and "[CQHTTP](https://cqhttp.cc)" as its underlying QQ bot framework. You can check the [demo](demo) to see how the interfaces are. 10 | 11 | To run the demos in this repo, you need to config CQHTTP as following: 12 | 13 | ```ini 14 | use_ws = true 15 | post_message_format = string 16 | enable_rate_limited_actions = true 17 | rate_limit_interval = 200 18 | ``` 19 | -------------------------------------------------------------------------------- /demo/config.py: -------------------------------------------------------------------------------- 1 | CQHTTP_WS_URL_MAPPING = { 2 | 3281334718: 'ws://vm101.zt.rclab.tk:6701', 3 | 2474763007: 'ws://vm101.zt.rclab.tk:6702', 4 | } 5 | -------------------------------------------------------------------------------- /demo/echo_client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import sys 4 | 5 | import qdp 6 | from demo import config 7 | 8 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 9 | 10 | qdp.init(config.CQHTTP_WS_URL_MAPPING) 11 | logging.info('QDP service initialized') 12 | 13 | 14 | async def main(): 15 | sock = qdp.Socket() 16 | await sock.bind((2474763007, None)) 17 | logging.info('QDP socket created') 18 | 19 | addr = (3281334718, 12345) 20 | for text in ['你好', 'hello', '123']: 21 | await sock.sendto(text.encode('utf-8'), addr) 22 | logging.info(f'Sent to {addr[0]}:{addr[1]}, data: {text}') 23 | data, _ = await sock.recvfrom() 24 | text_recv = data.decode('utf-8') 25 | logging.info(f'Received: {text_recv}') 26 | 27 | await sock.close() 28 | 29 | 30 | asyncio.run(main()) 31 | -------------------------------------------------------------------------------- /demo/echo_server.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import sys 4 | 5 | import qdp 6 | from demo import config 7 | 8 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 9 | 10 | qdp.init(config.CQHTTP_WS_URL_MAPPING) 11 | logging.info('QDP service initialized') 12 | 13 | 14 | async def main(): 15 | sock = qdp.Socket() 16 | await sock.bind((3281334718, 12345)) 17 | logging.info('QDP socket created') 18 | 19 | while True: 20 | data, addr = await sock.recvfrom() 21 | text = data.decode('utf-8') 22 | logging.info(f'Received from {addr[0]}:{addr[1]}, data: {text}') 23 | text_send = 'Hello, %s!' % text 24 | await sock.sendto(text_send.encode('utf-8'), addr) 25 | logging.info(f'Sent to {addr[0]}:{addr[1]}, data: {text_send}') 26 | 27 | 28 | asyncio.run(main()) 29 | -------------------------------------------------------------------------------- /demo/file_receiver.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import sys 4 | import time 5 | 6 | import qdp 7 | from demo import config 8 | 9 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 10 | 11 | qdp.init(config.CQHTTP_WS_URL_MAPPING) 12 | logging.info('QDP service initialized') 13 | 14 | 15 | async def main(): 16 | sock = qdp.Socket() 17 | await sock.bind((3281334718, 10000)) 18 | logging.info('QDP socket created') 19 | 20 | while True: 21 | data, addr = await sock.recvfrom() 22 | logging.info(f'Receive end: {time.time()}') 23 | logging.info(f'Received from {addr[0]}:{addr[1]}, ' 24 | f'data length: {len(data)}') 25 | with open('picture_received.jpg', 'wb') as f: 26 | f.write(data) 27 | await sock.sendto(b'OK', addr) 28 | 29 | 30 | asyncio.run(main()) 31 | -------------------------------------------------------------------------------- /demo/file_sender.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import sys 4 | import time 5 | 6 | import qdp 7 | from demo import config 8 | 9 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 10 | 11 | qdp.init(config.CQHTTP_WS_URL_MAPPING) 12 | logging.info('QDP service initialized') 13 | 14 | 15 | async def main(): 16 | sock = qdp.Socket() 17 | await sock.bind((2474763007, None)) 18 | logging.info('QDP socket created') 19 | 20 | addr = (3281334718, 10000) 21 | with open('picture.jpg', 'rb') as f: 22 | data = f.read() 23 | logging.info(f'Send begin: {time.time()}') 24 | await sock.sendto(data, addr) 25 | logging.info(f'Send end: {time.time()}') 26 | recv, _ = await sock.recvfrom() 27 | logging.info(recv.decode('utf-8')) 28 | await sock.close() 29 | 30 | 31 | asyncio.run(main()) 32 | -------------------------------------------------------------------------------- /demo/http_proxy.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a standard HTTP proxy implementation. 3 | 4 | See also https://imququ.com/post/web-proxy.html 5 | """ 6 | 7 | import asyncio 8 | from urllib.parse import urlparse 9 | 10 | from demo.utils import CaseInsensitiveDict 11 | 12 | 13 | def split_host_port(host_port): 14 | try: 15 | host, port = host_port.rsplit(':', maxsplit=1) 16 | except ValueError: 17 | host = host_port 18 | port = 80 19 | return host, port 20 | 21 | 22 | async def pipe(reader, writer): 23 | try: 24 | while True: 25 | data = await reader.read(1024) 26 | writer.write(data) 27 | await writer.drain() 28 | if reader.at_eof(): 29 | break 30 | except ConnectionError: 31 | pass 32 | 33 | 34 | async def pipe_2way(reader1, writer1, reader2, writer2): 35 | task1 = asyncio.create_task(pipe(reader1, writer1)) 36 | task2 = asyncio.create_task(pipe(reader2, writer2)) 37 | await asyncio.wait([task1, task2]) 38 | 39 | 40 | class RequestHandler: 41 | def __init__(self, 42 | reader: asyncio.StreamReader, 43 | writer: asyncio.StreamWriter): 44 | self.reader = reader 45 | self.writer = writer 46 | self.method = None 47 | self.path = None 48 | self.version = None 49 | self.headers = None 50 | 51 | async def handle(self): 52 | request_line = (await self.reader.readline()).decode().rstrip() 53 | self.method, self.path, self.version, *remained = request_line.split() 54 | if remained: 55 | await self.write_status(400) 56 | return 57 | 58 | self.headers = CaseInsensitiveDict() 59 | while True: 60 | header_line = (await self.reader.readline()).decode().strip() 61 | if not header_line: 62 | # empty line indicating the end of headers 63 | break 64 | else: 65 | key, value = header_line.split(':', maxsplit=1) 66 | self.headers[key.rstrip()] = value.lstrip() 67 | 68 | self.method = self.method.upper() 69 | print(f'{self.method} {self.path}') 70 | 71 | method = self.method.lower() 72 | method_handler = getattr(self, method, None) 73 | if method_handler: 74 | try: 75 | await method_handler() 76 | except ConnectionError: 77 | print(f'{self.method} {self.path} FAILED') 78 | else: 79 | await self.write_status(405) 80 | return 81 | 82 | async def write_status(self, status, reason=None): 83 | if reason is None: 84 | reason = { 85 | 200: 'OK', 86 | 400: 'Bad Request', 87 | 405: 'Method Not Allowed', 88 | }.get(status) 89 | self.writer.write(f'{self.version} {status} {reason}\r\n'.encode()) 90 | await self.writer.drain() 91 | 92 | async def write(self, data): 93 | if isinstance(data, str): 94 | data = data.encode('utf-8') 95 | try: 96 | self.writer.write(data) 97 | await self.writer.drain() 98 | except ConnectionError: 99 | pass 100 | 101 | def __getattr__(self, item): 102 | if item not in ('get', 'post', 'put', 'patch', 'delete', 'head'): 103 | return None 104 | 105 | return self.normal_method 106 | 107 | async def normal_method(self): 108 | url = self.path 109 | p = urlparse(url) 110 | host, port = split_host_port(p.netloc) 111 | path = p.path 112 | if p.params: 113 | path += f';{p.params}' 114 | if p.query: 115 | path += f'?{p.query}' 116 | if p.fragment: 117 | path += f'#{p.fragment}' 118 | 119 | svr_rd, svr_wt = await asyncio.open_connection(host, port) 120 | 121 | svr_wt.write(f'{self.method} {path} {self.version}\r\n'.encode()) 122 | await svr_wt.drain() 123 | for key, value in self.headers.items(): 124 | svr_wt.write(f'{key}: {value}\r\n'.encode()) 125 | await svr_wt.drain() 126 | svr_wt.write(b'\r\n') 127 | await svr_wt.drain() 128 | 129 | await asyncio.wait_for(pipe_2way(self.reader, svr_wt, 130 | svr_rd, self.writer), 131 | timeout=20) 132 | svr_wt.close() 133 | 134 | async def connect(self): 135 | host, port = split_host_port(self.path) 136 | 137 | svr_rd, svr_wt = await asyncio.open_connection(host, port) 138 | 139 | await self.write_status(200, 'Connection Established') 140 | await self.write('\r\n') 141 | await asyncio.wait_for(pipe_2way(self.reader, svr_wt, 142 | svr_rd, self.writer), 143 | timeout=20) 144 | svr_wt.close() 145 | 146 | 147 | async def handler(reader: asyncio.StreamReader, 148 | writer: asyncio.StreamWriter): 149 | h = RequestHandler(reader, writer) 150 | try: 151 | await h.handle() 152 | except Exception: 153 | pass 154 | finally: 155 | writer.close() 156 | 157 | 158 | async def main(): 159 | server = await asyncio.start_server(handler, '127.0.0.1', 8080) 160 | async with server: 161 | await server.serve_forever() 162 | 163 | 164 | if __name__ == '__main__': 165 | asyncio.run(main()) 166 | -------------------------------------------------------------------------------- /demo/http_proxy_backend.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import base64 4 | 5 | import qdp 6 | from demo import config 7 | 8 | QDP_SERVER_QQ = 3281334718 9 | QDP_SERVER_PORT = 20000 10 | QDP_SERVER_ADDR = QDP_SERVER_QQ, QDP_SERVER_PORT 11 | 12 | qdp.init(config.CQHTTP_WS_URL_MAPPING) 13 | sock = qdp.Socket() 14 | 15 | cmd_queues = {} 16 | 17 | 18 | class CommandBridge: 19 | @classmethod 20 | async def send(cls, addr, id_, method, params=None): 21 | if params is None: 22 | params = {} 23 | cmd = { 24 | "id": id_, 25 | 'method': method, 26 | 'params': params, 27 | } 28 | data = json.dumps(cmd, ensure_ascii=False).encode('utf-8') 29 | await sock.sendto(data, addr) 30 | 31 | 32 | async def pipe_b2f(reader, addr, id_): 33 | try: 34 | while True: 35 | data = await reader.read(1024) 36 | print(f'{id_}, TRANSFER b2f:', data) 37 | await CommandBridge.send(addr, id_, 'transfer', { 38 | 'data': base64.b64encode(data).decode() 39 | }) 40 | if reader.at_eof(): 41 | break 42 | except ConnectionError: 43 | pass 44 | 45 | 46 | async def real_handle(addr, connect_cmd, cmd_queue): 47 | id_ = connect_cmd['id'] 48 | 49 | host = connect_cmd['params']['host'] 50 | port = connect_cmd['params']['port'] 51 | print(f'{id_}, CONNECT {host}:{port}') 52 | 53 | try: 54 | reader, writer = await asyncio.open_connection(host, port) 55 | 56 | asyncio.create_task(pipe_b2f(reader, addr, id_)) 57 | 58 | while True: 59 | cmd = await cmd_queue.get() 60 | print(cmd) 61 | 62 | if cmd['method'] == 'close': 63 | print(f'{id_}, CLOSE') 64 | return 65 | 66 | if cmd['method'] == 'transfer': 67 | data = base64.b64decode(cmd['params']['data']) 68 | print(f'{id_}, TRANSFER f2b:', data) 69 | writer.write(data) 70 | await writer.drain() 71 | except Exception: 72 | pass 73 | finally: 74 | await CommandBridge.send(addr, id_, 'close') 75 | 76 | 77 | async def handle(addr, connect_cmd, cmd_queue): 78 | await asyncio.wait_for( 79 | asyncio.ensure_future(real_handle(addr, connect_cmd, cmd_queue)), 80 | timeout=300 81 | ) 82 | 83 | 84 | async def main(): 85 | await sock.bind(QDP_SERVER_ADDR) 86 | 87 | while True: 88 | data, addr = await sock.recvfrom() 89 | try: 90 | cmd = json.loads(data.decode('utf-8')) 91 | except json.JSONDecodeError: 92 | continue 93 | 94 | if cmd['method'] == 'connect': 95 | cmd_queue = asyncio.Queue() 96 | cmd_queues[cmd['id']] = cmd_queue 97 | asyncio.ensure_future(handle(addr, cmd, cmd_queue)) 98 | elif cmd['id'] in cmd_queues: 99 | cmd_queues[cmd['id']].put_nowait(cmd) 100 | if cmd['method'] == 'close': 101 | del cmd_queues[cmd['id']] 102 | 103 | 104 | asyncio.run(main()) 105 | -------------------------------------------------------------------------------- /demo/http_proxy_frontend.py: -------------------------------------------------------------------------------- 1 | import json 2 | import uuid 3 | import base64 4 | 5 | import qdp 6 | from demo import config 7 | from demo.http_proxy import * 8 | 9 | QDP_CLIENT_QQ = 2474763007 10 | QDP_SERVER_QQ = 3281334718 11 | QDP_SERVER_PORT = 20000 12 | QDP_SERVER_ADDR = QDP_SERVER_QQ, QDP_SERVER_PORT 13 | 14 | qdp.init(config.CQHTTP_WS_URL_MAPPING) 15 | sock = qdp.Socket() 16 | 17 | 18 | class CommandBridge: 19 | _futures = {} # key: seq, value: asyncio.Future 20 | 21 | @classmethod 22 | async def send(cls, id_, method, params=None): 23 | if params is None: 24 | params = {} 25 | cmd = { 26 | 'id': id_, 27 | 'method': method, 28 | 'params': params, 29 | } 30 | data = json.dumps(cmd, ensure_ascii=False).encode('utf-8') 31 | await sock.sendto(data, QDP_SERVER_ADDR) 32 | 33 | @classmethod 34 | async def listen(cls): 35 | while True: 36 | data, addr = await sock.recvfrom() 37 | if addr != QDP_SERVER_ADDR: 38 | continue 39 | 40 | cmd = json.loads(data.decode('utf-8')) 41 | cls._add_received(cmd) 42 | 43 | @classmethod 44 | def _add_received(cls, command): 45 | future = cls._futures.get(command.get('id')) 46 | if future: 47 | future.set_result(command) 48 | 49 | @classmethod 50 | async def receive(cls, id_): 51 | future = asyncio.get_event_loop().create_future() 52 | cls._futures[id_] = future 53 | try: 54 | return await asyncio.wait_for(future, 300) 55 | finally: 56 | # don't forget to remove the future object 57 | del cls._futures[id_] 58 | 59 | 60 | async def pipe_f2b(reader, id_): 61 | try: 62 | while True: 63 | data = await reader.read(1024) 64 | print(f'{id_}, TRANSFER f2b:', data) 65 | await CommandBridge.send(id_, 'transfer', { 66 | 'data': base64.b64encode(data).decode() 67 | }) 68 | if reader.at_eof(): 69 | break 70 | except ConnectionError: 71 | pass 72 | finally: 73 | await CommandBridge.send(id_, 'close') 74 | 75 | 76 | async def pipe_b2f(id_, writer): 77 | try: 78 | while True: 79 | cmd = await CommandBridge.receive(id_) 80 | print(cmd) 81 | if cmd['method'] == 'close': 82 | print(f'{id_}, CLOSE') 83 | break 84 | 85 | if cmd['method'] == 'transfer': 86 | data = base64.b64decode(cmd['params']['data']) 87 | print(f'{id_}, TRANSFER b2f:', data) 88 | writer.write(data) 89 | await writer.drain() 90 | except ConnectionError: 91 | pass 92 | 93 | 94 | async def pipe_2way(id_, reader, writer): 95 | task1 = asyncio.create_task(pipe_f2b(reader, id_)) 96 | task2 = asyncio.create_task(pipe_b2f(id_, writer)) 97 | await asyncio.wait([task1, task2]) 98 | 99 | 100 | class MyRequestHandler(RequestHandler): 101 | async def normal_method(self): 102 | id_ = str(uuid.uuid4()) 103 | 104 | url = self.path 105 | p = urlparse(url) 106 | host, port = split_host_port(p.netloc) 107 | path = p.path 108 | if p.params: 109 | path += f';{p.params}' 110 | if p.query: 111 | path += f'?{p.query}' 112 | if p.fragment: 113 | path += f'#{p.fragment}' 114 | 115 | await CommandBridge.send(id_, 'connect', {'host': host, 'port': port}) 116 | 117 | head = f'{self.method} {path} {self.version}\r\n'.encode() 118 | for key, value in self.headers.items(): 119 | head += f'{key}: {value}\r\n'.encode() 120 | head += b'\r\n' 121 | await CommandBridge.send(id_, 'transfer', { 122 | 'data': base64.b64encode(head).decode() 123 | }) 124 | 125 | await asyncio.wait_for(pipe_2way(id_, self.reader, self.writer), 126 | timeout=300) 127 | 128 | async def connect(self): 129 | id_ = str(uuid.uuid4()) 130 | host, port = split_host_port(self.path) 131 | await CommandBridge.send(id_, 'connect', {'host': host, 'port': port}) 132 | 133 | await self.write_status(200, 'Connection Established') 134 | await self.write('\r\n') 135 | await asyncio.wait_for(pipe_2way(id_, self.reader, self.writer), 136 | timeout=300) 137 | 138 | 139 | async def handler(reader: asyncio.StreamReader, 140 | writer: asyncio.StreamWriter): 141 | h = MyRequestHandler(reader, writer) 142 | try: 143 | await h.handle() 144 | except Exception: 145 | pass 146 | finally: 147 | writer.close() 148 | 149 | 150 | async def main(): 151 | await sock.bind((QDP_CLIENT_QQ, None)) 152 | asyncio.create_task(CommandBridge.listen()) 153 | 154 | server = await asyncio.start_server(handler, '127.0.0.1', 8080) 155 | async with server: 156 | await server.serve_forever() 157 | 158 | 159 | if __name__ == '__main__': 160 | asyncio.run(main()) 161 | -------------------------------------------------------------------------------- /demo/picture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stdrc/qdp/4cc4e704a6995a15ae4443a5b6af21032183d730/demo/picture.jpg -------------------------------------------------------------------------------- /demo/utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | 4 | class CaseInsensitiveDict(collections.MutableMapping): 5 | """ 6 | From https://github.com/requests/requests/blob/v1.2.3/requests/structures.py#L37 7 | """ 8 | 9 | def __init__(self, data=None, **kwargs): 10 | self._store = dict() 11 | if data is None: 12 | data = {} 13 | self.update(data, **kwargs) 14 | 15 | def __setitem__(self, key, value): 16 | # Use the lowercased key for lookups, but store the actual 17 | # key alongside the value. 18 | self._store[key.lower()] = (key, value) 19 | 20 | def __getitem__(self, key): 21 | return self._store[key.lower()][1] 22 | 23 | def __delitem__(self, key): 24 | del self._store[key.lower()] 25 | 26 | def __iter__(self): 27 | return (casedkey for casedkey, mappedvalue in self._store.values()) 28 | 29 | def __len__(self): 30 | return len(self._store) 31 | 32 | def lower_items(self): 33 | """Like iteritems(), but with all lowercase keys.""" 34 | return ( 35 | (lowerkey, keyval[1]) 36 | for (lowerkey, keyval) 37 | in self._store.items() 38 | ) 39 | 40 | def __eq__(self, other): 41 | if isinstance(other, collections.Mapping): 42 | other = CaseInsensitiveDict(other) 43 | else: 44 | return NotImplemented 45 | # Compare insensitively 46 | return dict(self.lower_items()) == dict(other.lower_items()) 47 | 48 | # Copy is required 49 | def copy(self): 50 | return CaseInsensitiveDict(self._store.values()) 51 | 52 | def __repr__(self): 53 | return '%s(%r)' % (self.__class__.__name__, dict(self.items())) 54 | -------------------------------------------------------------------------------- /qdp/__init__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import base64 3 | import json 4 | import random 5 | import struct 6 | from collections import defaultdict 7 | from dataclasses import dataclass 8 | from typing import Optional, Dict, Tuple, ClassVar 9 | 10 | import websockets 11 | 12 | from qdp import utils 13 | 14 | MAGIC_PREFIX = '<<<42>>>~' 15 | MTU = int(4500 * 3 / 4) # bytes, 3/4 is because we will send base64 16 | MAX_FRAGMENT_SIZE = MTU - len(MAGIC_PREFIX.encode('ascii')) 17 | 18 | _qq_ws_url_mapping: Dict[int, str] = {} 19 | 20 | 21 | def init(mapping: Dict[int, str]): 22 | global _qq_ws_url_mapping 23 | _qq_ws_url_mapping = mapping.copy() 24 | 25 | 26 | class Socket: 27 | def __init__(self): 28 | self.addr = None 29 | self.cqhttp_ws_url = None 30 | self.cqhttp_ws = None 31 | 32 | # elem type: Tuple[src_qq, dst_qq, packet] 33 | self.recv_queue = None 34 | 35 | async def bind(self, addr: Tuple[int, Optional[int]]): 36 | qq, port = addr 37 | 38 | ws_url = _qq_ws_url_mapping.get(qq) 39 | if not ws_url: 40 | raise LookupError('there is no such qq that can be bound') 41 | 42 | if port is None: 43 | while port is None or (qq, port) in _socket_registry: 44 | port = random.randint(50000, 65535) 45 | else: 46 | if (qq, port) in _socket_registry: 47 | raise RuntimeError('the port specified is in use') 48 | 49 | self.addr = qq, port 50 | self.cqhttp_ws_url = ws_url 51 | await self._connect_cqhttp_ws() 52 | self.recv_queue = asyncio.Queue() 53 | asyncio.create_task(self._listen_cqhttp_event()) 54 | 55 | _socket_registry[self.addr] = self 56 | 57 | async def close(self): 58 | if self.cqhttp_ws: 59 | await self.cqhttp_ws.close() 60 | self.cqhttp_ws = None 61 | if self.addr in _socket_registry: 62 | del _socket_registry[self.addr] 63 | self.addr = None 64 | self.recv_queue = None 65 | 66 | async def sendto(self, data: bytes, addr: Tuple[int, int]): 67 | packet = Packet(src_port=self.addr[1], 68 | dst_port=addr[1], 69 | data=data) 70 | packet_data = packet.to_bytes() 71 | packet_id = random.randint(1, 65535) 72 | 73 | fragments = list() 74 | sequence = 1 75 | while len(packet_data) > Fragment.MAX_DATA_SIZE: 76 | fragments.append(Fragment( 77 | packet_id=packet_id, 78 | more_fragment=True, 79 | sequence=sequence, 80 | data=packet_data[:Fragment.MAX_DATA_SIZE] 81 | )) 82 | sequence += 1 83 | packet_data = packet_data[Fragment.MAX_DATA_SIZE:] 84 | fragments.append(Fragment( 85 | packet_id=packet_id, 86 | more_fragment=False, 87 | sequence=sequence, 88 | data=packet_data 89 | )) 90 | 91 | if self.cqhttp_ws: 92 | for fragment in fragments: 93 | fragment_data = fragment.to_bytes() 94 | msg = base64.b64encode(fragment_data).decode('ascii') 95 | await self.cqhttp_ws.send(json.dumps({ 96 | 'action': 'send_private_msg_rate_limited', 97 | 'params': { 98 | 'user_id': addr[0], 99 | 'message': MAGIC_PREFIX + msg 100 | } 101 | })) 102 | 103 | async def recvfrom(self) -> Tuple[bytes, Tuple[int, int]]: 104 | src_qq, _, packet = await self.recv_queue.get() 105 | return packet.data, (src_qq, packet.src_port) 106 | 107 | async def _connect_cqhttp_ws(self): 108 | self.cqhttp_ws = await websockets.connect(self.cqhttp_ws_url) 109 | 110 | async def _listen_cqhttp_event(self): 111 | buffer = defaultdict(set) # key: packet id, value: Set[fragment] 112 | while True: 113 | try: 114 | payload = json.loads(await self.cqhttp_ws.recv()) 115 | if payload['post_type'] != 'message' or \ 116 | payload['message_type'] != 'private': 117 | continue 118 | 119 | src_qq = payload['user_id'] 120 | dst_qq = payload['self_id'] 121 | msg = payload['message'] 122 | 123 | if dst_qq != self.addr[0] or not msg.startswith(MAGIC_PREFIX): 124 | continue 125 | 126 | msg = msg[len(MAGIC_PREFIX):] 127 | fragment_data = base64.b64decode(msg.encode('ascii')) 128 | fragment = Fragment.from_bytes(fragment_data) 129 | 130 | if fragment.more_fragment: 131 | buffer[fragment.packet_id].add(fragment) 132 | else: 133 | # the packet is fully received 134 | fragment_set = buffer[fragment.packet_id] 135 | del buffer[fragment.packet_id] 136 | 137 | fragment_set.add(fragment) 138 | fragments = sorted(fragment_set, key=lambda f: f.sequence) 139 | 140 | if fragments[-1].sequence != len(fragments): 141 | # there are some fragments missed, so drop it 142 | continue 143 | 144 | # construct the packet from fragments 145 | packet_data = b'' 146 | for f in fragments: 147 | packet_data += f.data 148 | packet = Packet.from_bytes(packet_data) 149 | 150 | if packet.dst_port != self.addr[1]: 151 | # it's not for this socket 152 | continue 153 | 154 | await self.recv_queue.put((src_qq, dst_qq, packet)) 155 | except (json.JSONDecodeError, KeyError): 156 | continue 157 | except websockets.exceptions.ConnectionClosed as e: 158 | if e.code != 1000: 159 | await self._connect_cqhttp_ws() 160 | else: 161 | break 162 | 163 | 164 | _socket_registry: Dict[Tuple[int, int], Socket] = {} 165 | 166 | 167 | @dataclass(frozen=True) 168 | class Packet: 169 | """ 170 | Packet is similar to UDP packet. 171 | """ 172 | src_port: int # 16 bit 173 | dst_port: int # 16 bit 174 | data: bytes # variable length 175 | 176 | _HEAD_FMT: ClassVar[str] = '>HH' 177 | 178 | def __repr__(self): 179 | return f'Packet(src_port={self.src_port}, dst_port={self.dst_port}, ' \ 180 | f'data={self.data})' 181 | 182 | def __hash__(self) -> int: 183 | return super().__hash__() 184 | 185 | def to_bytes(self): 186 | return struct.pack(self._HEAD_FMT, 187 | self.src_port, self.dst_port) + self.data 188 | 189 | @classmethod 190 | def from_bytes(cls, data: bytes): 191 | (src_port, dst_port), data = utils.unpack_data(cls._HEAD_FMT, data) 192 | return cls(src_port, dst_port, data) 193 | 194 | 195 | @dataclass(frozen=True) 196 | class Fragment: 197 | """ 198 | Fragment is similar to IP fragment. 199 | 200 | There are no "src_qq" and "dst_qq" fields because 201 | the two valued can be known from cqhttp's event data. 202 | """ 203 | packet_id: int # 16 bit 204 | more_fragment: bool # 1 bit 205 | sequence: int # 15 bit 206 | data: bytes # variable length 207 | 208 | _HEAD_FMT: ClassVar[str] = '>HH' 209 | MAX_DATA_SIZE: ClassVar[int] = MAX_FRAGMENT_SIZE - \ 210 | struct.calcsize(_HEAD_FMT) 211 | 212 | def __repr__(self): 213 | return f'Fragment(' \ 214 | f'packet_id={self.packet_id}, ' \ 215 | f'more_fragment={self.more_fragment}, ' \ 216 | f'sequence={self.sequence}, ' \ 217 | f'data={self.data})' 218 | 219 | def to_bytes(self): 220 | return struct.pack( 221 | self._HEAD_FMT, 222 | self.packet_id, 223 | (int(self.more_fragment) << 15) + self.sequence 224 | ) + self.data 225 | 226 | @classmethod 227 | def from_bytes(cls, data: bytes): 228 | (packet_id, tmp), data = utils.unpack_data(cls._HEAD_FMT, data) 229 | more_fragment, sequence = bool(tmp >> 15), tmp & 0x7FFF 230 | return cls(packet_id, more_fragment, sequence, data) 231 | -------------------------------------------------------------------------------- /qdp/utils.py: -------------------------------------------------------------------------------- 1 | import struct 2 | from typing import Tuple, Any 3 | 4 | 5 | def unpack_data(fmt: str, 6 | data_with_head: bytes) -> Tuple[Tuple[Any, ...], bytes]: 7 | head_size = struct.calcsize(fmt) 8 | return (struct.unpack(fmt, data_with_head[:head_size]), 9 | data_with_head[head_size:]) 10 | --------------------------------------------------------------------------------