├── danmaku ├── paramgen │ ├── __init__.py │ ├── enc.py │ ├── arcparam.py │ └── liveparam.py ├── tars │ ├── tars │ │ ├── __init__.py │ │ ├── QueryF.tars │ │ └── EndpointF.tars │ ├── exception.py │ ├── __init__.py │ ├── core.py │ ├── EndpointF.py │ ├── __logger.py │ ├── __tup.py │ ├── __packet.py │ ├── __async.py │ ├── __util.py │ ├── __TimeoutQueue.py │ ├── QueryF.py │ ├── __servantproxy.py │ ├── __rpc.py │ ├── __trans.py │ ├── __tars.py │ └── __adapterproxy.py ├── twitch.py ├── log.py ├── douyu.py ├── __init__.py ├── huya.py ├── bilibili.py └── youtube.py ├── example.py ├── README.md ├── setup.py ├── .gitignore └── LICENSE /danmaku/paramgen/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /danmaku/tars/tars/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /danmaku/tars/tars/QueryF.tars: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THMonster/danmaku/HEAD/danmaku/tars/tars/QueryF.tars -------------------------------------------------------------------------------- /danmaku/tars/tars/EndpointF.tars: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/THMonster/danmaku/HEAD/danmaku/tars/tars/EndpointF.tars -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import danmaku 3 | import sys 4 | 5 | 6 | async def printer(q): 7 | while True: 8 | m = await q.get() 9 | print(m) 10 | 11 | 12 | async def main(): 13 | q = asyncio.Queue() 14 | dmc = danmaku.DanmakuClient(sys.argv[1], q) 15 | asyncio.create_task(printer(q)) 16 | await dmc.start() 17 | 18 | 19 | asyncio.run(main()) 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # danmaku 2 | 一个基于aiohttp的直播网站弹幕库(WIP) 3 | 4 | 目前支持斗鱼、虎牙、B站 5 | 6 | 感谢[danmu](https://github.com/littlecodersh/danmu) 7 | 8 | ## 用法 9 | 10 | ```python3 11 | import asyncio 12 | import danmaku 13 | 14 | async def printer(q): 15 | while True: 16 | m = await q.get() 17 | print(m) 18 | 19 | 20 | async def main(): 21 | q = asyncio.Queue() 22 | dmc = danmaku.DanmakuClient('https://douyu.com/9999', q) 23 | asyncio.create_task(printer(q)) 24 | await dmc.start() 25 | 26 | asyncio.run(main()) 27 | ``` 28 | -------------------------------------------------------------------------------- /danmaku/paramgen/enc.py: -------------------------------------------------------------------------------- 1 | def vn(val): 2 | if val < 0: 3 | raise ValueError 4 | buf = b"" 5 | while val >> 7: 6 | m = val & 0xFF | 0x80 7 | buf += m.to_bytes(1, "big") 8 | val >>= 7 9 | buf += val.to_bytes(1, "big") 10 | return buf 11 | 12 | 13 | def tp(a, b, ary): 14 | return vn((b << 3) | a) + ary 15 | 16 | 17 | def rs(a, ary): 18 | if isinstance(ary, str): 19 | ary = ary.encode() 20 | return tp(2, a, vn(len(ary)) + ary) 21 | 22 | 23 | def nm(a, ary): 24 | return tp(0, a, vn(ary)) 25 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="danmaku", 8 | version="0.3.3", 9 | author="IsoaSFlus", 10 | author_email="me@isoasflus.com", 11 | description="A python package for getting danmaku of some streaming site", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/IsoaSFlus/danmaku", 15 | packages=setuptools.find_packages(), 16 | install_requires=[ 17 | "aiohttp", 18 | ], 19 | classifiers=[ 20 | "Programming Language :: Python :: 3", 21 | "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", 22 | ], 23 | python_requires=">=3.7", 24 | ) 25 | -------------------------------------------------------------------------------- /danmaku/paramgen/arcparam.py: -------------------------------------------------------------------------------- 1 | from . import enc 2 | from base64 import urlsafe_b64encode as b64enc 3 | from urllib.parse import quote 4 | 5 | 6 | def _header(video_id, channel_id) -> str: 7 | S1_3 = enc.rs(1, video_id) 8 | S1_5 = enc.rs(1, channel_id) + enc.rs(2, video_id) 9 | S1 = enc.rs(3, S1_3) + enc.rs(5, S1_5) 10 | S3 = enc.rs(48687757, enc.rs(1, video_id)) 11 | header_replay = enc.rs(1, S1) + enc.rs(3, S3) + enc.nm(4, 1) 12 | return b64enc(header_replay) 13 | 14 | 15 | def _build(video_id, seektime, topchat_only, channel_id) -> str: 16 | chattype = 4 if topchat_only else 1 17 | if seektime < 0: 18 | seektime = 0 19 | timestamp = int(seektime * 1000000) 20 | header = enc.rs(3, _header(video_id, channel_id)) 21 | timestamp = enc.nm(5, timestamp) 22 | s6 = enc.nm(6, 0) 23 | s7 = enc.nm(7, 0) 24 | s8 = enc.nm(8, 0) 25 | s9 = enc.nm(9, 4) 26 | s10 = enc.rs(10, enc.nm(4, 0)) 27 | chattype = enc.rs(14, enc.nm(1, 4)) 28 | s15 = enc.nm(15, 0) 29 | entity = b"".join((header, timestamp, s6, s7, s8, s9, s10, chattype, s15)) 30 | continuation = enc.rs(156074452, entity) 31 | return quote(b64enc(continuation).decode()) 32 | 33 | 34 | def getparam(video_id, seektime=0, topchat_only=False, channel_id="") -> str: 35 | """ 36 | Parameter 37 | --------- 38 | seektime : int 39 | unit:seconds 40 | start position of fetching chat data. 41 | topchat_only : bool 42 | if True, fetch only 'top chat' 43 | """ 44 | return _build(video_id, seektime, topchat_only, channel_id) 45 | -------------------------------------------------------------------------------- /danmaku/twitch.py: -------------------------------------------------------------------------------- 1 | import json, re, select, random, traceback 2 | from struct import pack, unpack 3 | 4 | import asyncio, aiohttp, zlib 5 | 6 | 7 | class Twitch: 8 | heartbeat = "PING" 9 | 10 | async def get_ws_info(url): 11 | reg_datas = [] 12 | room_id = re.search(r"/([^/?]+)[^/]*$", url).group(1) 13 | 14 | reg_datas.append("CAP REQ :twitch.tv/tags twitch.tv/commands twitch.tv/membership") 15 | reg_datas.append("PASS SCHMOOPIIE") 16 | nick = f"justinfan{int(8e4 * random.random() + 1e3)}" 17 | reg_datas.append(f"NICK {nick}") 18 | reg_datas.append(f"USER {nick} 8 * :{nick}") 19 | reg_datas.append(f"JOIN #{room_id}") 20 | 21 | return "wss://irc-ws.chat.twitch.tv", reg_datas 22 | 23 | def decode_msg(data): 24 | # print(data) 25 | # print('----------------') 26 | msgs = [] 27 | for d in data.splitlines(): 28 | try: 29 | msg = {} 30 | msg["name"] = re.search(r"display-name=([^;]+);", d).group(1) 31 | msg["content"] = re.search(r"PRIVMSG [^:]+:(.+)", d).group(1) 32 | # msg['msg_type'] = {'dgb': 'gift', 'chatmsg': 'danmaku', 33 | # 'uenter': 'enter'}.get(msg['type'], 'other') 34 | msg["msg_type"] = "danmaku" 35 | c = re.search(r"color=#([a-zA-Z0-9]{6});", d) 36 | msg["color"] = "ffffff" if c == None else c.group(1).lower() 37 | msgs.append(msg) 38 | except Exception as e: 39 | # traceback.print_exc() 40 | pass 41 | return msgs 42 | -------------------------------------------------------------------------------- /danmaku/log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | class LogSystem(object): 5 | handlerList = [] 6 | showOnCmd = True 7 | loggingLevel = logging.INFO 8 | loggingFile = None 9 | 10 | def __init__(self): 11 | self.cmdHandler = None 12 | for handler in logging.getLogger().handlers: 13 | if "StreamHandler" in str(handler): 14 | self.cmdHandler = handler 15 | if self.cmdHandler is None: 16 | self.cmdHandler = logging.StreamHandler() 17 | logging.getLogger().addHandler(self.cmdHandler) 18 | self.logger = logging.getLogger("danmu") 19 | self.logger.addHandler(logging.NullHandler()) 20 | self.logger.setLevel(self.loggingLevel) 21 | self.fileHandler = None 22 | 23 | def set_logging(self, showOnCmd=True, loggingFile=None, loggingLevel=logging.INFO): 24 | if showOnCmd != self.showOnCmd: 25 | if showOnCmd: 26 | logging.getLogger().addHandler(self.cmdHandler) 27 | else: 28 | logging.getLogger().removeHandler(self.cmdHandler) 29 | self.showOnCmd = showOnCmd 30 | if loggingFile != self.loggingFile: 31 | if self.loggingFile is not None: # clear old fileHandler 32 | self.logger.removeHandler(self.fileHandler) 33 | self.fileHandler.close() 34 | if loggingFile is not None: # add new fileHandler 35 | self.fileHandler = logging.FileHandler(loggingFile) 36 | self.logger.addHandler(self.fileHandler) 37 | self.loggingFile = loggingFile 38 | if loggingLevel != self.loggingLevel: 39 | self.logger.setLevel(loggingLevel) 40 | self.loggingLevel = loggingLevel 41 | 42 | 43 | ls = LogSystem() 44 | set_logging = ls.set_logging 45 | -------------------------------------------------------------------------------- /danmaku/douyu.py: -------------------------------------------------------------------------------- 1 | import json, re, select, random 2 | from struct import pack, unpack 3 | import asyncio, aiohttp 4 | 5 | color_tab = { 6 | "2": "1e87f0", 7 | "3": "7ac84b", 8 | "4": "ff7f00", 9 | "6": "ff69b4", 10 | "5": "9b39f4", 11 | "1": "ff0000", 12 | } 13 | 14 | 15 | class Douyu: 16 | heartbeat = b"\x14\x00\x00\x00\x14\x00\x00\x00\xb1\x02\x00\x00\x74\x79\x70\x65\x40\x3d\x6d\x72\x6b\x6c\x2f\x00" 17 | 18 | async def get_ws_info(url): 19 | reg_datas = [] 20 | room_id = url.split("/")[-1] 21 | data = f"type@=loginreq/roomid@={room_id}/" 22 | s = pack("i", 9 + len(data)) * 2 23 | s += b"\xb1\x02\x00\x00" # 689 24 | s += data.encode("ascii") + b"\x00" 25 | reg_datas.append(s) 26 | data = f"type@=joingroup/rid@={room_id}/gid@=-9999/" 27 | s = pack("i", 9 + len(data)) * 2 28 | s += b"\xb1\x02\x00\x00" # 689 29 | s += data.encode("ascii") + b"\x00" 30 | reg_datas.append(s) 31 | return "wss://danmuproxy.douyu.com:8506/", reg_datas 32 | 33 | def decode_msg(data): 34 | msgs = [] 35 | for msg in re.findall(b"(type@=.*?)\x00", data): 36 | try: 37 | msg = msg.replace(b"@=", b'":"').replace(b"/", b'","') 38 | msg = msg.replace(b"@A", b"@").replace(b"@S", b"/") 39 | msg = json.loads((b'{"' + msg[:-2] + b"}").decode("utf8", "ignore")) 40 | msg["name"] = msg.get("nn", "") 41 | msg["content"] = msg.get("txt", "") 42 | msg["msg_type"] = {"dgb": "gift", "chatmsg": "danmaku", "uenter": "enter"}.get( 43 | msg["type"], "other" 44 | ) 45 | msg["color"] = color_tab.get(msg.get("col", "-1"), "ffffff") 46 | msgs.append(msg) 47 | except Exception as e: 48 | pass 49 | return msgs 50 | -------------------------------------------------------------------------------- /danmaku/tars/exception.py: -------------------------------------------------------------------------------- 1 | # Tencent is pleased to support the open source community by making Tars available. 2 | # 3 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 4 | # 5 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 6 | # in compliance with the License. You may obtain a copy of the License at 7 | # 8 | # https://opensource.org/licenses/BSD-3-Clause 9 | # 10 | # Unless required by applicable law or agreed to in writing, software distributed 11 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | # specific language governing permissions and limitations under the License. 14 | # 15 | 16 | 17 | class TarsException(Exception): 18 | pass 19 | 20 | 21 | class TarsTarsDecodeRequireNotExist(TarsException): 22 | pass 23 | 24 | 25 | class TarsTarsDecodeMismatch(TarsException): 26 | pass 27 | 28 | 29 | class TarsTarsDecodeInvalidValue(TarsException): 30 | pass 31 | 32 | 33 | class TarsTarsUnsupportType(TarsException): 34 | pass 35 | 36 | 37 | class TarsNetConnectException(TarsException): 38 | pass 39 | 40 | 41 | class TarsNetConnectLostException(TarsException): 42 | pass 43 | 44 | 45 | class TarsNetSocketException(TarsException): 46 | pass 47 | 48 | 49 | class TarsProxyDecodeException(TarsException): 50 | pass 51 | 52 | 53 | class TarsProxyEncodeException(TarsException): 54 | pass 55 | 56 | 57 | class TarsServerEncodeException(TarsException): 58 | pass 59 | 60 | 61 | class TarsServerDecodeException(TarsException): 62 | pass 63 | 64 | 65 | class TarsServerNoFuncException(TarsException): 66 | pass 67 | 68 | 69 | class TarsServerNoServantException(TarsException): 70 | pass 71 | 72 | 73 | class TarsServerQueueTimeoutException(TarsException): 74 | pass 75 | 76 | 77 | class TarsServerUnknownException(TarsException): 78 | pass 79 | 80 | 81 | class TarsSyncCallTimeoutException(TarsException): 82 | pass 83 | 84 | 85 | class TarsRegistryException(TarsException): 86 | pass 87 | 88 | 89 | class TarsServerResetGridException(TarsException): 90 | pass 91 | -------------------------------------------------------------------------------- /danmaku/tars/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | __version__ = "0.0.1" 21 | 22 | from .__util import util 23 | from .__tars import TarsInputStream 24 | from .__tars import TarsOutputStream 25 | from .__tup import TarsUniPacket 26 | 27 | 28 | class tarscore: 29 | class TarsInputStream(TarsInputStream): 30 | pass 31 | 32 | class TarsOutputStream(TarsOutputStream): 33 | pass 34 | 35 | class TarsUniPacket(TarsUniPacket): 36 | pass 37 | 38 | class boolean(util.boolean): 39 | pass 40 | 41 | class int8(util.int8): 42 | pass 43 | 44 | class uint8(util.uint8): 45 | pass 46 | 47 | class int16(util.int16): 48 | pass 49 | 50 | class uint16(util.uint16): 51 | pass 52 | 53 | class int32(util.int32): 54 | pass 55 | 56 | class uint32(util.uint32): 57 | pass 58 | 59 | class int64(util.int64): 60 | pass 61 | 62 | class float(util.float): 63 | pass 64 | 65 | class double(util.double): 66 | pass 67 | 68 | class bytes(util.bytes): 69 | pass 70 | 71 | class string(util.string): 72 | pass 73 | 74 | class struct(util.struct): 75 | pass 76 | 77 | @staticmethod 78 | def mapclass(ktype, vtype): 79 | return util.mapclass(ktype, vtype) 80 | 81 | @staticmethod 82 | def vctclass(vtype): 83 | return util.vectorclass(vtype) 84 | 85 | @staticmethod 86 | def printHex(buff): 87 | util.printHex(buff) 88 | -------------------------------------------------------------------------------- /danmaku/tars/core.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | __version__ = "0.0.1" 21 | 22 | from __util import util 23 | from __tars import TarsInputStream 24 | from __tars import TarsOutputStream 25 | from __tup import TarsUniPacket 26 | 27 | 28 | class tarscore: 29 | class TarsInputStream(TarsInputStream): 30 | pass 31 | 32 | class TarsOutputStream(TarsOutputStream): 33 | pass 34 | 35 | class TarsUniPacket(TarsUniPacket): 36 | pass 37 | 38 | class boolean(util.boolean): 39 | pass 40 | 41 | class int8(util.int8): 42 | pass 43 | 44 | class uint8(util.uint8): 45 | pass 46 | 47 | class int16(util.int16): 48 | pass 49 | 50 | class uint16(util.uint16): 51 | pass 52 | 53 | class int32(util.int32): 54 | pass 55 | 56 | class uint32(util.uint32): 57 | pass 58 | 59 | class int64(util.int64): 60 | pass 61 | 62 | class float(util.float): 63 | pass 64 | 65 | class double(util.double): 66 | pass 67 | 68 | class bytes(util.bytes): 69 | pass 70 | 71 | class string(util.string): 72 | pass 73 | 74 | class struct(util.struct): 75 | pass 76 | 77 | @staticmethod 78 | def mapclass(ktype, vtype): 79 | return util.mapclass(ktype, vtype) 80 | 81 | @staticmethod 82 | def vctclass(vtype): 83 | return util.vectorclass(vtype) 84 | 85 | @staticmethod 86 | def printHex(buff): 87 | util.printHex(buff) 88 | 89 | 90 | # 被用户引用 91 | from __util import configParse 92 | from __rpc import Communicator 93 | from exception import * 94 | from __logger import tarsLogger 95 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | dmc.py 131 | -------------------------------------------------------------------------------- /danmaku/paramgen/liveparam.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | from . import enc 4 | from base64 import urlsafe_b64encode as b64enc 5 | from urllib.parse import quote 6 | 7 | 8 | def _header(video_id, channel_id) -> str: 9 | S1_3 = enc.rs(1, video_id) 10 | S1_5 = enc.rs(1, channel_id) + enc.rs(2, video_id) 11 | S1 = enc.rs(3, S1_3) + enc.rs(5, S1_5) 12 | S3 = enc.rs(48687757, enc.rs(1, video_id)) 13 | header_replay = enc.rs(1, S1) + enc.rs(3, S3) + enc.nm(4, 1) 14 | return b64enc(header_replay) 15 | 16 | 17 | def _build(video_id, channel_id, ts1, ts2, ts3, ts4, ts5, topchat_only) -> str: 18 | chattype = 4 if topchat_only else 1 19 | 20 | b1 = enc.nm(1, 0) 21 | b2 = enc.nm(2, 0) 22 | b3 = enc.nm(3, 0) 23 | b4 = enc.nm(4, 0) 24 | b7 = enc.rs(7, "") 25 | b8 = enc.nm(8, 0) 26 | b9 = enc.rs(9, "") 27 | timestamp2 = enc.nm(10, ts2) 28 | b11 = enc.nm(11, 3) 29 | b15 = enc.nm(15, 0) 30 | 31 | header = enc.rs(3, _header(video_id, channel_id)) 32 | timestamp1 = enc.nm(5, ts1) 33 | s6 = enc.nm(6, 0) 34 | s7 = enc.nm(7, 0) 35 | s8 = enc.nm(8, 1) 36 | body = enc.rs(9, b"".join((b1, b2, b3, b4, b7, b8, b9, timestamp2, b11, b15))) 37 | timestamp3 = enc.nm(10, ts3) 38 | timestamp4 = enc.nm(11, ts4) 39 | s13 = enc.nm(13, chattype) 40 | chattype = enc.rs(16, enc.nm(1, chattype)) 41 | s17 = enc.nm(17, 0) 42 | str19 = enc.rs(19, enc.nm(1, 0)) 43 | timestamp5 = enc.nm(20, ts5) 44 | entity = b"".join( 45 | ( 46 | header, 47 | timestamp1, 48 | s6, 49 | s7, 50 | s8, 51 | body, 52 | timestamp3, 53 | timestamp4, 54 | s13, 55 | chattype, 56 | s17, 57 | str19, 58 | timestamp5, 59 | ) 60 | ) 61 | continuation = enc.rs(119693434, entity) 62 | return quote(b64enc(continuation).decode()) 63 | 64 | 65 | def _times(past_sec): 66 | n = int(time.time()) 67 | _ts1 = n - random.uniform(0, 1 * 3) 68 | _ts2 = n - random.uniform(0.01, 0.99) 69 | _ts3 = n - past_sec + random.uniform(0, 1) 70 | _ts4 = n - random.uniform(10 * 60, 60 * 60) 71 | _ts5 = n - random.uniform(0.01, 0.99) 72 | return list(map(lambda x: int(x * 1000000), [_ts1, _ts2, _ts3, _ts4, _ts5])) 73 | 74 | 75 | def getparam(video_id, channel_id, past_sec=0, topchat_only=False) -> str: 76 | """ 77 | Parameter 78 | --------- 79 | past_sec : int 80 | seconds to load past chat data 81 | topchat_only : bool 82 | if True, fetch only 'top chat' 83 | """ 84 | return _build(video_id, channel_id, *_times(past_sec), topchat_only) 85 | -------------------------------------------------------------------------------- /danmaku/tars/EndpointF.py: -------------------------------------------------------------------------------- 1 | # Tencent is pleased to support the open source community by making Tars available. 2 | # 3 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 4 | # 5 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 6 | # in compliance with the License. You may obtain a copy of the License at 7 | # 8 | # https://opensource.org/licenses/BSD-3-Clause 9 | # 10 | # Unless required by applicable law or agreed to in writing, software distributed 11 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | # specific language governing permissions and limitations under the License. 14 | # 15 | 16 | from core import tarscore 17 | 18 | 19 | class EndpointF(tarscore.struct): 20 | __tars_class__ = "register.EndpointF" 21 | 22 | def __init__(self): 23 | self.host = "" 24 | self.port = 0 25 | self.timeout = 0 26 | self.istcp = 0 27 | self.grid = 0 28 | self.groupworkid = 0 29 | self.grouprealid = 0 30 | self.setId = "" 31 | self.qos = 0 32 | self.bakFlag = 0 33 | self.weight = 0 34 | self.weightType = 0 35 | 36 | @staticmethod 37 | def writeTo(oos, value): 38 | oos.write(tarscore.string, 0, value.host) 39 | oos.write(tarscore.int32, 1, value.port) 40 | oos.write(tarscore.int32, 2, value.timeout) 41 | oos.write(tarscore.int32, 3, value.istcp) 42 | oos.write(tarscore.int32, 4, value.grid) 43 | oos.write(tarscore.int32, 5, value.groupworkid) 44 | oos.write(tarscore.int32, 6, value.grouprealid) 45 | oos.write(tarscore.string, 7, value.setId) 46 | oos.write(tarscore.int32, 8, value.qos) 47 | oos.write(tarscore.int32, 9, value.bakFlag) 48 | oos.write(tarscore.int32, 11, value.weight) 49 | oos.write(tarscore.int32, 12, value.weightType) 50 | 51 | @staticmethod 52 | def readFrom(ios): 53 | value = EndpointF() 54 | value.host = ios.read(tarscore.string, 0, True, value.host) 55 | value.port = ios.read(tarscore.int32, 1, True, value.port) 56 | value.timeout = ios.read(tarscore.int32, 2, True, value.timeout) 57 | value.istcp = ios.read(tarscore.int32, 3, True, value.istcp) 58 | value.grid = ios.read(tarscore.int32, 4, True, value.grid) 59 | value.groupworkid = ios.read(tarscore.int32, 5, False, value.groupworkid) 60 | value.grouprealid = ios.read(tarscore.int32, 6, False, value.grouprealid) 61 | value.setId = ios.read(tarscore.string, 7, False, value.setId) 62 | value.qos = ios.read(tarscore.int32, 8, False, value.qos) 63 | value.bakFlag = ios.read(tarscore.int32, 9, False, value.bakFlag) 64 | value.weight = ios.read(tarscore.int32, 11, False, value.weight) 65 | value.weightType = ios.read(tarscore.int32, 12, False, value.weightType) 66 | return value 67 | -------------------------------------------------------------------------------- /danmaku/__init__.py: -------------------------------------------------------------------------------- 1 | import re, asyncio, aiohttp 2 | 3 | from .youtube import Youtube 4 | from .twitch import Twitch 5 | from .bilibili import Bilibili 6 | from .douyu import Douyu 7 | from .huya import Huya 8 | 9 | __all__ = ["DanmakuClient"] 10 | 11 | 12 | class DanmakuClient: 13 | def __init__(self, url, q, **kargs): 14 | self.__url = "" 15 | self.__site = None 16 | self.__usite = None 17 | self.__hs = None 18 | self.__ws = None 19 | self.__stop = False 20 | self.__dm_queue = q 21 | self.__link_status = True 22 | self.__extra_data = kargs 23 | if "http://" == url[:7] or "https://" == url[:8]: 24 | self.__url = url 25 | else: 26 | self.__url = "http://" + url 27 | for u, s in { 28 | "douyu.com": Douyu, 29 | "live.bilibili.com": Bilibili, 30 | "twitch.tv": Twitch, 31 | "huya.com": Huya, 32 | }.items(): 33 | if re.match(r"^(?:http[s]?://)?.*?%s/(.+?)$" % u, url): 34 | self.__site = s 35 | break 36 | if self.__site == None: 37 | for u, s in {"youtube.com/channel": Youtube, "youtube.com/watch": Youtube}.items(): 38 | if re.match(r"^(?:http[s]?://)?.*?%s(.+?)$" % u, url): 39 | self.__usite = s 40 | if self.__usite == None: 41 | raise Exception("Invalid link!") 42 | self.__hs = aiohttp.ClientSession() 43 | 44 | async def init_ws(self): 45 | ws_url, reg_datas = await self.__site.get_ws_info(self.__url) 46 | self.__ws = await self.__hs.ws_connect(ws_url) 47 | for reg_data in reg_datas: 48 | if type(reg_data) == str: 49 | await self.__ws.send_str(reg_data) 50 | else: 51 | await self.__ws.send_bytes(reg_data) 52 | 53 | async def heartbeats(self): 54 | while self.__stop != True: 55 | # print('heartbeat') 56 | await asyncio.sleep(20) 57 | try: 58 | if type(self.__site.heartbeat) == str: 59 | await self.__ws.send_str(self.__site.heartbeat) 60 | else: 61 | await self.__ws.send_bytes(self.__site.heartbeat) 62 | except: 63 | pass 64 | 65 | async def fetch_danmaku(self): 66 | while self.__stop != True: 67 | async for msg in self.__ws: 68 | # self.__link_status = True 69 | ms = self.__site.decode_msg(msg.data) 70 | for m in ms: 71 | await self.__dm_queue.put(m) 72 | if self.__stop != True: 73 | await asyncio.sleep(1) 74 | await self.init_ws() 75 | await asyncio.sleep(1) 76 | 77 | async def start(self): 78 | if self.__site != None: 79 | await self.init_ws() 80 | await asyncio.gather( 81 | self.heartbeats(), 82 | self.fetch_danmaku(), 83 | ) 84 | else: 85 | await self.__usite.run(self.__url, self.__dm_queue, self.__hs, **self.__extra_data) 86 | 87 | async def stop(self): 88 | self.__stop = True 89 | if self.__site != None: 90 | await self.__hs.close() 91 | else: 92 | await self.__usite.stop() 93 | await self.__hs.close() 94 | -------------------------------------------------------------------------------- /danmaku/tars/__logger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # filename: __logger.py 5 | 6 | # Tencent is pleased to support the open source community by making Tars available. 7 | # 8 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 9 | # 10 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 11 | # in compliance with the License. You may obtain a copy of the License at 12 | # 13 | # https://opensource.org/licenses/BSD-3-Clause 14 | # 15 | # Unless required by applicable law or agreed to in writing, software distributed 16 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 17 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 18 | # specific language governing permissions and limitations under the License. 19 | # 20 | 21 | """ 22 | @version: 0.01 23 | @brief: 日志模块 24 | """ 25 | 26 | # 仅用于调试 27 | 28 | import logging 29 | from logging.handlers import RotatingFileHandler 30 | import os 31 | import re 32 | 33 | tarsLogger = logging.getLogger("TARS client") 34 | strToLoggingLevel = { 35 | "critical": logging.CRITICAL, 36 | "error": logging.ERROR, 37 | "warn": logging.WARNING, 38 | "info": logging.INFO, 39 | "debug": logging.DEBUG, 40 | "none": logging.NOTSET, 41 | } 42 | # console = logging.StreamHandler() 43 | # console.setLevel(logging.DEBUG) 44 | # filelog = logging.FileHandler('tars.log') 45 | # filelog.setLevel(logging.DEBUG) 46 | # formatter = logging.Formatter('%(asctime)s | %(levelname)8s | [%(name)s] %(message)s', '%Y-%m-%d %H:%M:%S') 47 | # console.setFormatter(formatter) 48 | # filelog.setFormatter(formatter) 49 | # tarsLogger.addHandler(console) 50 | # tarsLogger.addHandler(filelog) 51 | # tarsLogger.setLevel(logging.DEBUG) 52 | # tarsLogger.setLevel(logging.INFO) 53 | # tarsLogger.setLevel(logging.ERROR) 54 | 55 | 56 | def createLogFile(filename): 57 | if filename.endswith("/"): 58 | raise ValueError("The logfile is a dir not a file") 59 | if os.path.exists(filename) and os.path.isfile(filename): 60 | pass 61 | else: 62 | fileComposition = str.split(filename, "/") 63 | print(fileComposition) 64 | currentFile = "" 65 | for item in fileComposition: 66 | if item == fileComposition[-1]: 67 | currentFile += item 68 | if not os.path.exists(currentFile) or not os.path.isfile(currentFile): 69 | while True: 70 | try: 71 | os.mknod(currentFile) 72 | break 73 | except OSError as msg: 74 | errno = re.findall(r"\d+", str(msg)) 75 | if len(errno) > 0 and errno[0] == "17": 76 | currentFile += ".log" 77 | continue 78 | break 79 | currentFile += item + "/" 80 | if not os.path.exists(currentFile): 81 | os.mkdir(currentFile) 82 | 83 | 84 | def initLog(logpath, logsize, lognum, loglevel): 85 | createLogFile(logpath) 86 | handler = RotatingFileHandler(filename=logpath, maxBytes=logsize, backupCount=lognum) 87 | formatter = logging.Formatter( 88 | "%(asctime)s | %(levelname)6s | [%(filename)18s:%(lineno)4d] | [%(thread)d] %(message)s", 89 | "%Y-%m-%d %H:%M:%S", 90 | ) 91 | handler.setFormatter(formatter) 92 | tarsLogger.addHandler(handler) 93 | if loglevel in strToLoggingLevel: 94 | tarsLogger.setLevel(strToLoggingLevel[loglevel]) 95 | else: 96 | tarsLogger.setLevel(strToLoggingLevel["error"]) 97 | 98 | 99 | if __name__ == "__main__": 100 | tarsLogger.debug("debug log") 101 | tarsLogger.info("info log") 102 | tarsLogger.warning("warning log") 103 | tarsLogger.error("error log") 104 | tarsLogger.critical("critical log") 105 | -------------------------------------------------------------------------------- /danmaku/tars/__tup.py: -------------------------------------------------------------------------------- 1 | # Tencent is pleased to support the open source community by making Tars available. 2 | # 3 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 4 | # 5 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 6 | # in compliance with the License. You may obtain a copy of the License at 7 | # 8 | # https://opensource.org/licenses/BSD-3-Clause 9 | # 10 | # Unless required by applicable law or agreed to in writing, software distributed 11 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | # specific language governing permissions and limitations under the License. 14 | # 15 | 16 | import struct 17 | import string 18 | from .__util import util 19 | from .__tars import TarsOutputStream 20 | from .__tars import TarsInputStream 21 | from .__packet import RequestPacket 22 | 23 | 24 | class TarsUniPacket(object): 25 | def __init__(self): 26 | self.__mapa = util.mapclass(util.string, util.bytes) 27 | self.__mapv = util.mapclass(util.string, self.__mapa) 28 | self.__buffer = self.__mapv() 29 | self.__code = RequestPacket() 30 | 31 | # @property 32 | # def version(self): 33 | # return self.__code.iVersion 34 | 35 | # @version.setter 36 | # def version(self, value): 37 | # self.__code.iVersion = value 38 | 39 | @property 40 | def servant(self): 41 | return self.__code.sServantName 42 | 43 | @servant.setter 44 | def servant(self, value): 45 | self.__code.sServantName = value 46 | 47 | @property 48 | def func(self): 49 | return self.__code.sFuncName 50 | 51 | @func.setter 52 | def func(self, value): 53 | self.__code.sFuncName = value 54 | 55 | @property 56 | def requestid(self): 57 | return self.__code.iRequestId 58 | 59 | @requestid.setter 60 | def requestid(self, value): 61 | self.__code.iRequestId = value 62 | 63 | @property 64 | def result_code(self): 65 | if ("STATUS_RESULT_CODE" in self.__code.status) == False: 66 | return 0 67 | 68 | return string.atoi(self.__code.status["STATUS_RESULT_CODE"]) 69 | 70 | @property 71 | def result_desc(self): 72 | if ("STATUS_RESULT_DESC" in self.__code.status) == False: 73 | return "" 74 | 75 | return self.__code.status["STATUS_RESULT_DESC"] 76 | 77 | def put(self, vtype, name, value): 78 | oos = TarsOutputStream() 79 | oos.write(vtype, 0, value) 80 | self.__buffer[name] = {vtype.__tars_class__: oos.getBuffer()} 81 | 82 | def get(self, vtype, name): 83 | if (name in self.__buffer) == False: 84 | raise Exception("UniAttribute not found key:%s,type:%s" % (name, vtype.__tars_class__)) 85 | 86 | t = self.__buffer[name] 87 | if (vtype.__tars_class__ in t) == False: 88 | raise Exception("UniAttribute not found type:" + vtype.__tars_class__) 89 | 90 | o = TarsInputStream(t[vtype.__tars_class__]) 91 | return o.read(vtype, 0, True) 92 | 93 | def encode(self): 94 | oos = TarsOutputStream() 95 | oos.write(self.__mapv, 0, self.__buffer) 96 | 97 | self.__code.iVersion = 2 98 | self.__code.sBuffer = oos.getBuffer() 99 | 100 | sos = TarsOutputStream() 101 | RequestPacket.writeTo(sos, self.__code) 102 | 103 | return struct.pack("!i", 4 + len(sos.getBuffer())) + sos.getBuffer() 104 | 105 | def decode(self, buf): 106 | ois = TarsInputStream(buf[4:]) 107 | self.__code = RequestPacket.readFrom(ois) 108 | 109 | sis = TarsInputStream(self.__code.sBuffer) 110 | self.__buffer = sis.read(self.__mapv, 0, True) 111 | 112 | def clear(self): 113 | self.__code.__init__() 114 | 115 | def haskey(self, name): 116 | return name in self.__buffer 117 | -------------------------------------------------------------------------------- /danmaku/huya.py: -------------------------------------------------------------------------------- 1 | import json, re, select, random 2 | from struct import pack, unpack 3 | 4 | import asyncio, aiohttp 5 | 6 | from .tars import tarscore 7 | 8 | 9 | class Huya: 10 | heartbeat = b"\x00\x03\x1d\x00\x00\x69\x00\x00\x00\x69\x10\x03\x2c\x3c\x4c\x56\x08\x6f\x6e\x6c\x69\x6e\x65\x75\x69\x66\x0f\x4f\x6e\x55\x73\x65\x72\x48\x65\x61\x72\x74\x42\x65\x61\x74\x7d\x00\x00\x3c\x08\x00\x01\x06\x04\x74\x52\x65\x71\x1d\x00\x00\x2f\x0a\x0a\x0c\x16\x00\x26\x00\x36\x07\x61\x64\x72\x5f\x77\x61\x70\x46\x00\x0b\x12\x03\xae\xf0\x0f\x22\x03\xae\xf0\x0f\x3c\x42\x6d\x52\x02\x60\x5c\x60\x01\x7c\x82\x00\x0b\xb0\x1f\x9c\xac\x0b\x8c\x98\x0c\xa8\x0c" 11 | 12 | async def get_ws_info(url): 13 | reg_datas = [] 14 | url = "https://m.huya.com/" + url.split("/")[-1] 15 | headers = { 16 | "user-agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Mobile Safari/537.36" 17 | } 18 | async with aiohttp.ClientSession() as session: 19 | async with session.get(url, headers=headers) as resp: 20 | room_page = await resp.text() 21 | # print(room_page) 22 | m = re.search(r"window.HNF_GLOBAL_INIT *= *(\{.+?\})\s*", room_page, re.MULTILINE) 23 | j = json.loads(m.group(1)) 24 | ayyuid = j["roomInfo"]["tProfileInfo"]["lUid"] 25 | # tid = j["roomInfo"]["tLiveInfo"]["tLiveStreamInfo"]["vStreamInfo"]["value"][0]["lChannelId"] 26 | # sid = j["roomInfo"]["tLiveInfo"]["tLiveStreamInfo"]["vStreamInfo"]["value"][0]["lSubChannelId"] 27 | 28 | # print(ayyuid) 29 | # print(tid) 30 | # print(sid) 31 | 32 | # a = tarscore.string 33 | 34 | l = tarscore.vctclass(tarscore.string)() 35 | l.append(f"live:{ayyuid}") 36 | l.append(f"chat:{ayyuid}") 37 | oos = tarscore.TarsOutputStream() 38 | oos.write(tarscore.vctclass(tarscore.string), 0, l) 39 | oos.write(tarscore.string, 1, "") 40 | 41 | # oos.write(tarscore.int64, 0, int(ayyuid)) 42 | # oos.write(tarscore.boolean, 1, True) # Anonymous 43 | # oos.write(tarscore.string, 2, "") # sGuid 44 | # oos.write(tarscore.string, 3, "") 45 | # oos.write(tarscore.int64, 4, int(tid)) 46 | # oos.write(tarscore.int64, 5, int(sid)) 47 | # oos.write(tarscore.int64, 6, 0) 48 | # oos.write(tarscore.int64, 7, 0) 49 | 50 | 51 | wscmd = tarscore.TarsOutputStream() 52 | wscmd.write(tarscore.int32, 0, 16) 53 | # wscmd.write(tarscore.int32, 0, 1) 54 | wscmd.write(tarscore.bytes, 1, oos.getBuffer()) 55 | 56 | reg_datas.append(wscmd.getBuffer()) 57 | return "wss://cdnws.api.huya.com/", reg_datas 58 | 59 | def decode_msg(data): 60 | class user(tarscore.struct): 61 | def readFrom(ios): 62 | return ios.read(tarscore.string, 2, False).decode("utf8") 63 | 64 | class dcolor(tarscore.struct): 65 | def readFrom(ios): 66 | return ios.read(tarscore.int32, 0, False) 67 | 68 | name = "" 69 | content = "" 70 | msgs = [] 71 | ios = tarscore.TarsInputStream(data) 72 | 73 | if ios.read(tarscore.int32, 0, False) == 7: 74 | ios = tarscore.TarsInputStream(ios.read(tarscore.bytes, 1, False)) 75 | if ios.read(tarscore.int64, 1, False) == 1400: 76 | ios = tarscore.TarsInputStream(ios.read(tarscore.bytes, 2, False)) 77 | name = ios.read(user, 0, False) # username 78 | content = ios.read(tarscore.string, 3, False).decode("utf8") # content 79 | color = ios.read(dcolor, 6, False) # danmaku color 80 | if color == -1: 81 | color = 16777215 82 | 83 | if name != "": 84 | msg = {"name": name, "color": f"{color:06x}", "content": content, "msg_type": "danmaku"} 85 | else: 86 | msg = {"name": "", "content": "", "msg_type": "other"} 87 | msgs.append(msg) 88 | return msgs 89 | -------------------------------------------------------------------------------- /danmaku/bilibili.py: -------------------------------------------------------------------------------- 1 | import json, re, select, random, traceback 2 | from struct import pack, unpack 3 | 4 | import asyncio, aiohttp, zlib 5 | 6 | 7 | class Bilibili: 8 | heartbeat = b"\x00\x00\x00\x1f\x00\x10\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\x5b\x6f\x62\x6a\x65\x63\x74\x20\x4f\x62\x6a\x65\x63\x74\x5d" 9 | 10 | async def get_ws_info(url): 11 | url = "https://api.live.bilibili.com/room/v1/Room/room_init?id=" + url.split("/")[-1] 12 | reg_datas = [] 13 | async with aiohttp.ClientSession() as session: 14 | async with session.get(url) as resp: 15 | room_json = json.loads(await resp.text()) 16 | room_id = room_json["data"]["room_id"] 17 | data = json.dumps( 18 | {"roomid": room_id, "uid": int(1e14 + 2e14 * random.random()), "protover": 2}, 19 | separators=(",", ":"), 20 | ).encode("ascii") 21 | data = ( 22 | pack(">i", len(data) + 16) 23 | + b"\x00\x10\x00\x01" 24 | + pack(">i", 7) 25 | + pack(">i", 1) 26 | + data 27 | ) 28 | reg_datas.append(data) 29 | 30 | return "wss://broadcastlv.chat.bilibili.com/sub", reg_datas 31 | 32 | def decode_msg(data): 33 | dm_list_compressed = [] 34 | dm_list = [] 35 | ops = [] 36 | msgs = [] 37 | # print(data) 38 | while True: 39 | try: 40 | packetLen, headerLen, ver, op, seq = unpack("!IHHII", data[0:16]) 41 | except Exception as e: 42 | break 43 | if len(data) < packetLen: 44 | break 45 | if ver == 1 or ver == 0: 46 | ops.append(op) 47 | dm_list.append(data[16:packetLen]) 48 | elif ver == 2: 49 | dm_list_compressed.append(data[16:packetLen]) 50 | if len(data) == packetLen: 51 | data = b"" 52 | break 53 | else: 54 | data = data[packetLen:] 55 | 56 | for dm in dm_list_compressed: 57 | d = zlib.decompress(dm) 58 | while True: 59 | try: 60 | packetLen, headerLen, ver, op, seq = unpack("!IHHII", d[0:16]) 61 | except Exception as e: 62 | break 63 | if len(d) < packetLen: 64 | break 65 | ops.append(op) 66 | dm_list.append(d[16:packetLen]) 67 | if len(d) == packetLen: 68 | d = b"" 69 | break 70 | else: 71 | d = d[packetLen:] 72 | 73 | for i, d in enumerate(dm_list): 74 | try: 75 | msg = {} 76 | if ops[i] == 5: 77 | j = json.loads(d) 78 | msg["msg_type"] = { 79 | "SEND_GIFT": "gift", 80 | "DANMU_MSG": "danmaku", 81 | "WELCOME": "enter", 82 | "NOTICE_MSG": "broadcast", 83 | }.get(j.get("cmd"), "other") 84 | if msg["msg_type"] == "danmaku": 85 | msg["name"] = j.get("info", ["", "", ["", ""]])[2][1] or j.get( 86 | "data", {} 87 | ).get("uname", "") 88 | msg["content"] = j.get("info", ["", ""])[1] 89 | msg["color"] = f"{j.get('info', [[0, 0, 0, 16777215]])[0][3]:06x}" 90 | elif msg["msg_type"] == "broadcast": 91 | msg["type"] = j.get("msg_type", 0) 92 | msg["roomid"] = j.get("real_roomid", 0) 93 | msg["content"] = j.get("msg_common", "none") 94 | msg["raw"] = j 95 | else: 96 | msg["content"] = j 97 | else: 98 | msg = {"name": "", "content": d, "msg_type": "other"} 99 | msgs.append(msg) 100 | except Exception as e: 101 | # traceback.print_exc() 102 | # print(e) 103 | pass 104 | 105 | return msgs 106 | -------------------------------------------------------------------------------- /danmaku/tars/__packet.py: -------------------------------------------------------------------------------- 1 | # Tencent is pleased to support the open source community by making Tars available. 2 | # 3 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 4 | # 5 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 6 | # in compliance with the License. You may obtain a copy of the License at 7 | # 8 | # https://opensource.org/licenses/BSD-3-Clause 9 | # 10 | # Unless required by applicable law or agreed to in writing, software distributed 11 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | # specific language governing permissions and limitations under the License. 14 | # 15 | 16 | 17 | from .__util import util 18 | 19 | 20 | class RequestPacket(util.struct): 21 | mapcls_context = util.mapclass(util.string, util.string) 22 | mapcls_status = util.mapclass(util.string, util.string) 23 | 24 | def __init__(self): 25 | self.iVersion = 0 26 | self.cPacketType = 0 27 | self.iMessageType = 0 28 | self.iRequestId = 0 29 | self.sServantName = "" 30 | self.sFuncName = "" 31 | self.sBuffer = bytes() 32 | self.iTimeout = 0 33 | self.context = RequestPacket.mapcls_context() 34 | self.status = RequestPacket.mapcls_status() 35 | 36 | @staticmethod 37 | def writeTo(oos, value): 38 | oos.write(util.int16, 1, value.iVersion) 39 | oos.write(util.int8, 2, value.cPacketType) 40 | oos.write(util.int32, 3, value.iMessageType) 41 | oos.write(util.int32, 4, value.iRequestId) 42 | oos.write(util.string, 5, value.sServantName) 43 | oos.write(util.string, 6, value.sFuncName) 44 | oos.write(util.bytes, 7, value.sBuffer) 45 | oos.write(util.int32, 8, value.iTimeout) 46 | oos.write(RequestPacket.mapcls_context, 9, value.context) 47 | oos.write(RequestPacket.mapcls_status, 10, value.status) 48 | 49 | @staticmethod 50 | def readFrom(ios): 51 | value = RequestPacket() 52 | value.iVersion = ios.read(util.int16, 1, True, 0) 53 | print(("iVersion = %d" % value.iVersion)) 54 | value.cPacketType = ios.read(util.int8, 2, True, 0) 55 | print(("cPackerType = %d" % value.cPacketType)) 56 | value.iMessageType = ios.read(util.int32, 3, True, 0) 57 | print(("iMessageType = %d" % value.iMessageType)) 58 | value.iRequestId = ios.read(util.int32, 4, True, 0) 59 | print(("iRequestId = %d" % value.iRequestId)) 60 | value.sServantName = ios.read(util.string, 5, True, "22222222") 61 | value.sFuncName = ios.read(util.string, 6, True, "") 62 | value.sBuffer = ios.read(util.bytes, 7, True, value.sBuffer) 63 | value.iTimeout = ios.read(util.int32, 8, True, 0) 64 | value.context = ios.read(RequestPacket.mapcls_context, 9, True, value.context) 65 | value.status = ios.read(RequestPacket.mapcls_status, 10, True, value.status) 66 | return value 67 | 68 | 69 | class ResponsePacket(util.struct): 70 | __tars_class__ = "tars.RpcMessage.ResponsePacket" 71 | mapcls_status = util.mapclass(util.string, util.string) 72 | 73 | def __init__(self): 74 | self.iVersion = 0 75 | self.cPacketType = 0 76 | self.iRequestId = 0 77 | self.iMessageType = 0 78 | self.iRet = 0 79 | self.sBuffer = bytes() 80 | self.status = RequestPacket.mapcls_status() 81 | 82 | @staticmethod 83 | def writeTo(oos, value): 84 | oos.write(util.int16, 1, value.iVersion) 85 | oos.write(util.int8, 2, value.cPacketType) 86 | oos.write(util.int32, 3, value.iRequestId) 87 | oos.write(util.int32, 4, value.iMessageType) 88 | oos.write(util.int32, 5, value.iRet) 89 | oos.write(util.bytes, 6, value.sBuffer) 90 | oos.write(value.mapcls_status, 7, value.status) 91 | 92 | @staticmethod 93 | def readFrom(ios): 94 | value = ResponsePacket() 95 | value.iVersion = ios.read(util.int16, 1, True) 96 | value.cPacketType = ios.read(util.int8, 2, True) 97 | value.iRequestId = ios.read(util.int32, 3, True) 98 | value.iMessageType = ios.read(util.int32, 4, True) 99 | value.iRet = ios.read(util.int32, 5, True) 100 | value.sBuffer = ios.read(util.bytes, 6, True) 101 | value.status = ios.read(value.mapcls_status, 7, True) 102 | return value 103 | -------------------------------------------------------------------------------- /danmaku/youtube.py: -------------------------------------------------------------------------------- 1 | import json, re, select, random, traceback, urllib, datetime, base64 2 | import asyncio, aiohttp 3 | 4 | # The core codes for YouTube support are basically from taizan-hokuto/pytchat 5 | 6 | headers = { 7 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36", 8 | } 9 | 10 | 11 | class Youtube: 12 | q = None 13 | url = "" 14 | vid = "" 15 | ctn = "" 16 | client = None 17 | stop = False 18 | 19 | @classmethod 20 | async def run(cls, url, q, client, **kargs): 21 | from .paramgen import liveparam 22 | 23 | cls.q = q 24 | cls.url = url 25 | cls.client = client 26 | cls.stop = False 27 | cls.key = "eW91dHViZWkvdjEvbGl2ZV9jaGF0L2dldF9saXZlX2NoYXQ/a2V5PUFJemFTeUFPX0ZKMlNscVU4UTRTVEVITEdDaWx3X1k5XzExcWNXOA==" 28 | await cls.get_url() 29 | while cls.stop == False: 30 | try: 31 | await cls.get_room_info() 32 | cls.ctn = liveparam.getparam(cls.vid, cls.cid, 1) 33 | await cls.get_chat() 34 | except: 35 | traceback.print_exc() 36 | await asyncio.sleep(1) 37 | 38 | @classmethod 39 | async def stop(cls): 40 | cls.stop == True 41 | 42 | @classmethod 43 | async def get_url(cls): 44 | a = re.search(r"youtube.com/channel/([^/?]+)", cls.url) 45 | try: 46 | cid = a.group(1) 47 | cls.cid = cid 48 | cls.url = f"https://www.youtube.com/channel/{cid}/videos" 49 | except: 50 | a = re.search(r"youtube.com/watch\?v=([^/?]+)", cls.url) 51 | async with cls.client.request( 52 | "get", f"https://www.youtube.com/embed/{a.group(1)}" 53 | ) as resp: 54 | b = re.search(r'\\"channelId\\":\\"(.{24})\\"', await resp.text()) 55 | cls.cid = b.group(1) 56 | cls.url = f"https://www.youtube.com/channel/{cls.cid}/videos" 57 | 58 | @classmethod 59 | async def get_room_info(cls): 60 | async with cls.client.request("get", cls.url) as resp: 61 | t = re.search( 62 | r'"gridVideoRenderer"((.(?!"gridVideoRenderer"))(?!"style":"UPCOMING"))+"label":"(LIVE|LIVE NOW|PREMIERING NOW)"([\s\S](?!"style":"UPCOMING"))+?("gridVideoRenderer"|)', 63 | await resp.text(), 64 | ).group(0) 65 | cls.vid = re.search(r'"gridVideoRenderer".+?"videoId":"(.+?)"', t).group(1) 66 | # print(cls.vid) 67 | 68 | @classmethod 69 | async def get_chat_single(cls): 70 | msgs = [] 71 | data = { 72 | "context": { 73 | "client": { 74 | "visitorData": "", 75 | "userAgent": headers["user-agent"], 76 | "clientName": "WEB", 77 | "clientVersion": "".join( 78 | ( 79 | "2.", 80 | (datetime.datetime.today() - datetime.timedelta(days=1)).strftime( 81 | "%Y%m%d" 82 | ), 83 | ".01.00", 84 | ) 85 | ), 86 | }, 87 | }, 88 | "continuation": cls.ctn, 89 | } 90 | u = f'https://www.youtube.com/{base64.b64decode(cls.key).decode("utf-8")}' 91 | async with cls.client.request("post", u, headers=headers, json=data) as resp: 92 | # print(await resp.text()) 93 | j = await resp.json() 94 | j = j["continuationContents"] 95 | cont = j["liveChatContinuation"]["continuations"][0] 96 | if cont is None: 97 | raise Exception("No Continuation") 98 | metadata = ( 99 | cont.get("invalidationContinuationData") 100 | or cont.get("timedContinuationData") 101 | or cont.get("reloadContinuationData") 102 | or cont.get("liveChatReplayContinuationData") 103 | ) 104 | cls.ctn = metadata["continuation"] 105 | # print(j['liveChatContinuation'].get('actions')) 106 | for action in j["liveChatContinuation"].get("actions", []): 107 | try: 108 | renderer = action["addChatItemAction"]["item"]["liveChatTextMessageRenderer"] 109 | msg = {} 110 | msg["name"] = renderer["authorName"]["simpleText"] 111 | message = "" 112 | runs = renderer["message"].get("runs") 113 | for r in runs: 114 | if r.get("emoji"): 115 | message += r["emoji"].get("shortcuts", [""])[0] 116 | else: 117 | message += r.get("text", "") 118 | msg["content"] = message 119 | msg["msg_type"] = "danmaku" 120 | msgs.append(msg) 121 | except: 122 | pass 123 | 124 | return msgs 125 | 126 | @classmethod 127 | async def get_chat(cls): 128 | while cls.stop == False: 129 | ms = await cls.get_chat_single() 130 | if len(ms) != 0: 131 | interval = 1 / len(ms) 132 | else: 133 | await asyncio.sleep(1) 134 | for m in ms: 135 | await cls.q.put(m) 136 | await asyncio.sleep(interval) 137 | -------------------------------------------------------------------------------- /danmaku/tars/__async.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # filename: __rpc.py 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | """ 21 | @version: 0.01 22 | @brief: 异步rpc实现 23 | """ 24 | 25 | import threading 26 | import Queue 27 | from __logger import tarsLogger 28 | from __packet import ResponsePacket 29 | from __servantproxy import ServantProxy 30 | 31 | 32 | class AsyncProcThread: 33 | """ 34 | @brief: 异步调用线程管理类 35 | """ 36 | 37 | def __init__(self): 38 | tarsLogger.debug("AsyncProcThread:__init__") 39 | self.__initialize = False 40 | self.__runners = [] 41 | self.__queue = None 42 | self.__nrunner = 0 43 | self.__popTimeout = 0.1 44 | 45 | def __del__(self): 46 | tarsLogger.debug("AsyncProcThread:__del__") 47 | 48 | def initialize(self, nrunner=3): 49 | """ 50 | @brief: 使用AsyncProcThread前必须先调用此函数 51 | @param nrunner: 异步线程个数 52 | @type nrunner: int 53 | @return: None 54 | @rtype: None 55 | """ 56 | tarsLogger.debug("AsyncProcThread:initialize") 57 | if self.__initialize: 58 | return 59 | self.__nrunner = nrunner 60 | self.__queue = Queue.Queue() 61 | self.__initialize = True 62 | 63 | def terminate(self): 64 | """ 65 | @brief: 关闭所有异步线程 66 | @return: None 67 | @rtype: None 68 | """ 69 | tarsLogger.debug("AsyncProcThread:terminate") 70 | 71 | for runner in self.__runners: 72 | runner.terminate() 73 | 74 | for runner in self.__runners: 75 | runner.join() 76 | self.__runners = [] 77 | 78 | def put(self, reqmsg): 79 | """ 80 | @brief: 处理数据入队列 81 | @param reqmsg: 待处理数据 82 | @type reqmsg: ReqMessage 83 | @return: None 84 | @rtype: None 85 | """ 86 | tarsLogger.debug("AsyncProcThread:put") 87 | # 异步请求超时 88 | if not reqmsg.response: 89 | reqmsg.response = ResponsePacket() 90 | reqmsg.response.iVerson = reqmsg.request.iVerson 91 | reqmsg.response.cPacketType = reqmsg.request.cPacketType 92 | reqmsg.response.iRequestId = reqmsg.request.iRequestId 93 | reqmsg.response.iRet = ServantProxy.TARSASYNCCALLTIMEOUT 94 | 95 | self.__queue.put(reqmsg) 96 | 97 | def pop(self): 98 | """ 99 | @brief: 处理数据出队列 100 | @return: ReqMessage 101 | @rtype: ReqMessage 102 | """ 103 | # tarsLogger.debug('AsyncProcThread:pop') 104 | ret = None 105 | try: 106 | ret = self.__queue.get(True, self.__popTimeout) 107 | except Queue.Empty: 108 | pass 109 | return ret 110 | 111 | def start(self): 112 | """ 113 | @brief: 启动异步线程 114 | @return: None 115 | @rtype: None 116 | """ 117 | tarsLogger.debug("AsyncProcThread:start") 118 | for i in xrange(self.__nrunner): 119 | runner = AsyncProcThreadRunner() 120 | runner.initialize(self) 121 | runner.start() 122 | self.__runners.append(runner) 123 | 124 | 125 | class AsyncProcThreadRunner(threading.Thread): 126 | """ 127 | @brief: 异步调用线程 128 | """ 129 | 130 | def __init__(self): 131 | tarsLogger.debug("AsyncProcThreadRunner:__init__") 132 | super(AsyncProcThreadRunner, self).__init__() 133 | # threading.Thread.__init__(self) 134 | self.__terminate = False 135 | self.__initialize = False 136 | self.__procQueue = None 137 | 138 | def __del__(self): 139 | tarsLogger.debug("AsyncProcThreadRunner:__del__") 140 | 141 | def initialize(self, queue): 142 | """ 143 | @brief: 使用AsyncProcThreadRunner前必须调用此函数 144 | @param queue: 有pop()的类,用于提取待处理数据 145 | @type queue: AsyncProcThread 146 | @return: None 147 | @rtype: None 148 | """ 149 | tarsLogger.debug("AsyncProcThreadRunner:initialize") 150 | self.__procQueue = queue 151 | 152 | def terminate(self): 153 | """ 154 | @brief: 关闭线程 155 | @return: None 156 | @rtype: None 157 | """ 158 | tarsLogger.debug("AsyncProcThreadRunner:terminate") 159 | self.__terminate = True 160 | 161 | def run(self): 162 | """ 163 | @brief: 线程启动函数,执行异步调用 164 | """ 165 | tarsLogger.debug("AsyncProcThreadRunner:run") 166 | while not self.__terminate: 167 | if self.__terminate: 168 | break 169 | reqmsg = self.__procQueue.pop() 170 | if not reqmsg or not reqmsg.callback: 171 | continue 172 | 173 | if reqmsg.adapter: 174 | succ = reqmsg.response.iRet == ServantProxy.TARSSERVERSUCCESS 175 | reqmsg.adapter.finishInvoke(succ) 176 | 177 | try: 178 | reqmsg.callback.onDispatch(reqmsg) 179 | except Exception, msg: 180 | tarsLogger.error("AsyncProcThread excepttion: %s", msg) 181 | 182 | tarsLogger.debug("AsyncProcThreadRunner:run finished") 183 | 184 | 185 | class ServantProxyCallback(object): 186 | """ 187 | @brief: 异步回调对象基类 188 | """ 189 | 190 | def __init__(self): 191 | tarsLogger.debug("ServantProxyCallback:__init__") 192 | 193 | def onDispatch(reqmsg): 194 | """ 195 | @brief: 分配响应报文到对应的回调函数 196 | @param queue: 有pop()的类,用于提取待处理数据 197 | @type queue: AsyncProcThread 198 | @return: None 199 | @rtype: None 200 | """ 201 | raise NotImplementedError() 202 | -------------------------------------------------------------------------------- /danmaku/tars/__util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Tencent is pleased to support the open source community by making Tars available. 5 | # 6 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 7 | # 8 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 9 | # in compliance with the License. You may obtain a copy of the License at 10 | # 11 | # https://opensource.org/licenses/BSD-3-Clause 12 | # 13 | # Unless required by applicable law or agreed to in writing, software distributed 14 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 15 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 16 | # specific language governing permissions and limitations under the License. 17 | # 18 | 19 | 20 | import sys 21 | from threading import Lock 22 | import hashlib 23 | from xml.etree import cElementTree as ET 24 | from .exception import TarsException 25 | 26 | 27 | class util: 28 | @staticmethod 29 | def printHex(buff): 30 | count = 0 31 | for c in buff: 32 | sys.stdout.write("0X%02X " % ord(c)) 33 | count += 1 34 | if count % 16 == 0: 35 | sys.stdout.write("\n") 36 | sys.stdout.write("\n") 37 | sys.stdout.flush() 38 | 39 | @staticmethod 40 | def mapclass(ktype, vtype): 41 | class mapklass(dict): 42 | def size(self): 43 | return len(self) 44 | 45 | setattr(mapklass, "__tars_index__", 8) 46 | setattr( 47 | mapklass, 48 | "__tars_class__", 49 | "map<" + ktype.__tars_class__ + "," + vtype.__tars_class__ + ">", 50 | ) 51 | setattr(mapklass, "ktype", ktype) 52 | setattr(mapklass, "vtype", vtype) 53 | return mapklass 54 | 55 | @staticmethod 56 | def vectorclass(vtype): 57 | class klass(list): 58 | def size(self): 59 | return len(self) 60 | 61 | setattr(klass, "__tars_index__", 9) 62 | setattr(klass, "__tars_class__", "list<" + vtype.__tars_class__ + ">") 63 | setattr(klass, "vtype", vtype) 64 | return klass 65 | 66 | class boolean: 67 | __tars_index__ = 999 68 | __tars_class__ = "bool" 69 | 70 | class int8: 71 | __tars_index__ = 0 72 | __tars_class__ = "char" 73 | 74 | class uint8: 75 | __tars_index__ = 1 76 | __tars_class__ = "short" 77 | 78 | class int16: 79 | __tars_index__ = 1 80 | __tars_class__ = "short" 81 | 82 | class uint16: 83 | __tars_index__ = 2 84 | __tars_class__ = "int32" 85 | 86 | class int32: 87 | __tars_index__ = 2 88 | __tars_class__ = "int32" 89 | 90 | class uint32: 91 | __tars_index__ = 3 92 | __tars_class__ = "int64" 93 | 94 | class int64: 95 | __tars_index__ = 3 96 | __tars_class__ = "int64" 97 | 98 | class float: 99 | __tars_index__ = 4 100 | __tars_class__ = "float" 101 | 102 | class double: 103 | __tars_index__ = 5 104 | __tars_class__ = "double" 105 | 106 | class bytes: 107 | __tars_index__ = 13 108 | __tars_class__ = "list" 109 | 110 | class string: 111 | __tars_index__ = 67 112 | __tars_class__ = "string" 113 | 114 | class struct: 115 | __tars_index__ = 1011 116 | 117 | 118 | def xml2dict(node, dic={}): 119 | """ 120 | @brief: 将xml解析树转成字典 121 | @param node: 树的根节点 122 | @type node: cElementTree.Element 123 | @param dic: 存储信息的字典 124 | @type dic: dict 125 | @return: 转换好的字典 126 | @rtype: dict 127 | """ 128 | dic[node.tag] = ndic = {} 129 | [xml2dict(child, ndic) for child in node.getchildren() if child != node] 130 | ndic.update( 131 | [list(map(str.strip, exp.split("=")[:2])) for exp in node.text.splitlines() if "=" in exp] 132 | ) 133 | return dic 134 | 135 | 136 | def configParse(filename): 137 | """ 138 | @brief: 解析tars配置文件 139 | @param filename: 文件名 140 | @type filename: str 141 | @return: 解析出来的配置信息 142 | @rtype: dict 143 | """ 144 | tree = ET.parse(filename) 145 | return xml2dict(tree.getroot()) 146 | 147 | 148 | class NewLock(object): 149 | def __init__(self): 150 | self.__count = 0 151 | self.__lock = Lock() 152 | self.__lockForCount = Lock() 153 | pass 154 | 155 | def newAcquire(self): 156 | self.__lockForCount.acquire() 157 | self.__count += 1 158 | if self.__count == 1: 159 | self.__lock.acquire() 160 | self.__lockForCount.release() 161 | pass 162 | 163 | def newRelease(self): 164 | self.__lockForCount.acquire() 165 | self.__count -= 1 166 | if self.__count == 0: 167 | self.__lock.release() 168 | self.__lockForCount.release() 169 | 170 | 171 | class LockGuard(object): 172 | def __init__(self, newLock): 173 | self.__newLock = newLock 174 | self.__newLock.newAcquire() 175 | 176 | def __del__(self): 177 | self.__newLock.newRelease() 178 | 179 | 180 | class ConsistentHashNew(object): 181 | def __init__(self, nodes=None, nodeNumber=3): 182 | """ 183 | :param nodes: 服务器的节点的epstr列表 184 | :param n_number: 一个节点对应的虚拟节点数量 185 | :return: 186 | """ 187 | self.__nodes = nodes 188 | self.__nodeNumber = nodeNumber # 每一个节点对应多少个虚拟节点,这里默认是3个 189 | self.__nodeDict = dict() # 用于记录虚拟节点的hash值与服务器epstr的对应关系 190 | self.__sortListForKey = [] # 用于存放所有的虚拟节点的hash值,这里需要保持排序,以找出对应的服务器 191 | if nodes: 192 | for node in nodes: 193 | self.addNode(node) 194 | 195 | @property 196 | def nodes(self): 197 | return self.__nodes 198 | 199 | @nodes.setter 200 | def nodes(self, value): 201 | self.__nodes = value 202 | 203 | def addNode(self, node): 204 | """ 205 | 添加node,首先要根据虚拟节点的数目,创建所有的虚拟节点,并将其与对应的node对应起来 206 | 当然还需要将虚拟节点的hash值放到排序的里面 207 | 这里在添加了节点之后,需要保持虚拟节点hash值的顺序 208 | :param node: 209 | :return: 210 | """ 211 | for i in range(self.__nodeNumber): 212 | nodeStr = "%s%s" % (node, i) 213 | key = self.__genKey(nodeStr) 214 | self.__nodeDict[key] = node 215 | self.__sortListForKey.append(key) 216 | self.__sortListForKey.sort() 217 | 218 | def removeNode(self, node): 219 | """ 220 | 这里一个节点的退出,需要将这个节点的所有的虚拟节点都删除 221 | :param node: 222 | :return: 223 | """ 224 | for i in range(self.__nodeNumber): 225 | nodeStr = "%s%s" % (node, i) 226 | key = self.__genKey(nodeStr) 227 | del self.__nodeDict[key] 228 | self.__sortListForKey.remove(key) 229 | 230 | def getNode(self, key): 231 | """ 232 | 返回这个字符串应该对应的node,这里先求出字符串的hash值,然后找到第一个小于等于的虚拟节点,然后返回node 233 | 如果hash值大于所有的节点,那么用第一个虚拟节点 234 | :param : hashNum or keyStr 235 | :return: 236 | """ 237 | keyStr = "" 238 | if isinstance(key, int): 239 | keyStr = "the keyStr is %d" % key 240 | elif isinstance(key, type("a")): 241 | keyStr = key 242 | else: 243 | raise TarsException("the hash code has wrong type") 244 | if self.__sortListForKey: 245 | key = self.__genKey(keyStr) 246 | for keyItem in self.__sortListForKey: 247 | if key <= keyItem: 248 | return self.__nodeDict[keyItem] 249 | return self.__nodeDict[self.__sortListForKey[0]] 250 | else: 251 | return None 252 | 253 | def __genKey(self, keyStr): 254 | """ 255 | 通过key,返回当前key的hash值,这里采用md5 256 | :param key: 257 | :return: 258 | """ 259 | md5Str = hashlib.md5(keyStr).hexdigest() 260 | return int(md5Str, 16) 261 | -------------------------------------------------------------------------------- /danmaku/tars/__TimeoutQueue.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # filename: __timeQueue.py 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | """ 21 | @version: 0.01 22 | @brief: 请求响应报文和超时队列 23 | """ 24 | 25 | import threading 26 | import time 27 | import struct 28 | 29 | from .__logger import tarsLogger 30 | from .__tars import TarsInputStream 31 | from .__tars import TarsOutputStream 32 | from .__packet import RequestPacket 33 | from .__packet import ResponsePacket 34 | from .__util import NewLock, LockGuard 35 | 36 | 37 | class ReqMessage: 38 | """ 39 | @brief: 请求响应报文,保存一个请求响应所需要的数据 40 | """ 41 | 42 | SYNC_CALL = 1 43 | ASYNC_CALL = 2 44 | ONE_WAY = 3 45 | 46 | def __init__(self): 47 | self.type = ReqMessage.SYNC_CALL 48 | self.servant = None 49 | self.lock = None 50 | self.adapter = None 51 | self.request = None 52 | self.response = None 53 | self.callback = None 54 | self.begtime = None 55 | self.endtime = None 56 | self.isHash = False 57 | self.isConHash = False 58 | self.hashCode = 0 59 | 60 | def packReq(self): 61 | """ 62 | @brief: 序列化请求报文 63 | @return: 序列化后的请求报文 64 | @rtype: str 65 | """ 66 | if not self.request: 67 | return "" 68 | oos = TarsOutputStream() 69 | RequestPacket.writeTo(oos, self.request) 70 | reqpkt = oos.getBuffer() 71 | plen = len(reqpkt) + 4 72 | reqpkt = struct.pack("!i", plen) + reqpkt 73 | return reqpkt 74 | 75 | @staticmethod 76 | def unpackRspList(buf): 77 | """ 78 | @brief: 解码响应报文 79 | @param buf: 多个序列化后的响应报文数据 80 | @type buf: str 81 | @return: 解码出来的响应报文和解码的buffer长度 82 | @rtype: rsplist: 装有ResponsePacket的list 83 | unpacklen: int 84 | """ 85 | rsplist = [] 86 | if not buf: 87 | return rsplist 88 | 89 | unpacklen = 0 90 | buf = buffer(buf) 91 | while True: 92 | if len(buf) - unpacklen < 4: 93 | break 94 | packsize = buf[unpacklen : unpacklen + 4] 95 | (packsize,) = struct.unpack_from("!i", packsize) 96 | if len(buf) < unpacklen + packsize: 97 | break 98 | 99 | ios = TarsInputStream(buf[unpacklen + 4 : unpacklen + packsize]) 100 | rsp = ResponsePacket.readFrom(ios) 101 | rsplist.append(rsp) 102 | unpacklen += packsize 103 | 104 | return rsplist, unpacklen 105 | 106 | 107 | # 超时队列,加锁,线程安全 108 | 109 | 110 | class TimeoutQueue: 111 | """ 112 | @brief: 超时队列,加锁,线程安全 113 | 可以像队列一样FIFO,也可以像字典一样按key取item 114 | @todo: 限制队列长度 115 | """ 116 | 117 | def __init__(self, timeout=3): 118 | self.__uniqId = 0 119 | # self.__lock = threading.Lock() 120 | self.__lock = NewLock() 121 | self.__data = {} 122 | self.__queue = [] 123 | self.__timeout = timeout 124 | 125 | def getTimeout(self): 126 | """ 127 | @brief: 获取超时时间,单位为s 128 | @return: 超时时间 129 | @rtype: float 130 | """ 131 | return self.__timeout 132 | 133 | def setTimeout(self, timeout): 134 | """ 135 | @brief: 设置超时时间,单位为s 136 | @param timeout: 超时时间 137 | @type timeout: float 138 | @return: None 139 | @rtype: None 140 | """ 141 | self.__timeout = timeout 142 | 143 | def size(self): 144 | """ 145 | @brief: 获取队列长度 146 | @return: 队列长度 147 | @rtype: int 148 | """ 149 | # self.__lock.acquire() 150 | lock = LockGuard(self.__lock) 151 | ret = len(self.__data) 152 | # self.__lock.release() 153 | return ret 154 | 155 | def generateId(self): 156 | """ 157 | @brief: 生成唯一id,0 < id < 2 ** 32 158 | @return: id 159 | @rtype: int 160 | """ 161 | # self.__lock.acquire() 162 | lock = LockGuard(self.__lock) 163 | ret = self.__uniqId 164 | ret = (ret + 1) % 0x7FFFFFFF 165 | while ret <= 0: 166 | ret = (ret + 1) % 0x7FFFFFFF 167 | self.__uniqId = ret 168 | # self.__lock.release() 169 | return ret 170 | 171 | def pop(self, uniqId=0, erase=True): 172 | """ 173 | @brief: 弹出item 174 | @param uniqId: item的id,如果为0,按FIFO弹出 175 | @type uniqId: int 176 | @param erase: 弹出后是否从字典里删除item 177 | @type erase: bool 178 | @return: item 179 | @rtype: any type 180 | """ 181 | ret = None 182 | 183 | # self.__lock.acquire() 184 | lock = LockGuard(self.__lock) 185 | 186 | if not uniqId: 187 | if len(self.__queue): 188 | uniqId = self.__queue.pop(0) 189 | if uniqId: 190 | if erase: 191 | ret = self.__data.pop(uniqId, None) 192 | else: 193 | ret = self.__data.get(uniqId, None) 194 | 195 | # self.__lock.release() 196 | 197 | return ret[0] if ret else None 198 | 199 | def push(self, item, uniqId): 200 | """ 201 | @brief: 数据入队列,如果队列已经有了uniqId,插入失败 202 | @param item: 插入的数据 203 | @type item: any type 204 | @return: 插入是否成功 205 | @rtype: bool 206 | """ 207 | begtime = time.time() 208 | ret = True 209 | # self.__lock.acquire() 210 | lock = LockGuard(self.__lock) 211 | 212 | if uniqId in self.__data: 213 | ret = False 214 | else: 215 | self.__data[uniqId] = [item, begtime] 216 | self.__queue.append(uniqId) 217 | # self.__lock.release() 218 | return ret 219 | 220 | def peek(self, uniqId): 221 | """ 222 | @brief: 根据uniqId获取item,不会删除item 223 | @param uniqId: item的id 224 | @type uniqId: int 225 | @return: item 226 | @rtype: any type 227 | """ 228 | # self.__lock.acquire() 229 | lock = LockGuard(self.__lock) 230 | 231 | ret = self.__data.get(uniqId, None) 232 | # self.__lock.release() 233 | if not ret: 234 | return None 235 | return ret[0] 236 | 237 | def timeout(self): 238 | """ 239 | @brief: 检测是否有item超时,如果有就删除 240 | @return: None 241 | @rtype: None 242 | """ 243 | endtime = time.time() 244 | # self.__lock.acquire() 245 | lock = LockGuard(self.__lock) 246 | 247 | # 处理异常情况,防止死锁 248 | try: 249 | new_data = {} 250 | for uniqId, item in self.__data.items(): 251 | if endtime - item[1] < self.__timeout: 252 | new_data[uniqId] = item 253 | else: 254 | tarsLogger.debug("TimeoutQueue:timeout remove id : %d" % uniqId) 255 | self.__data = new_data 256 | finally: 257 | # self.__lock.release() 258 | pass 259 | 260 | 261 | class QueueTimeout(threading.Thread): 262 | """ 263 | 超时线程,定时触发超时事件 264 | """ 265 | 266 | def __init__(self, timeout=0.1): 267 | # threading.Thread.__init__(self) 268 | tarsLogger.debug("QueueTimeout:__init__") 269 | super(QueueTimeout, self).__init__() 270 | self.timeout = timeout 271 | self.__terminate = False 272 | self.__handler = None 273 | self.__lock = threading.Condition() 274 | 275 | def terminate(self): 276 | tarsLogger.debug("QueueTimeout:terminate") 277 | self.__terminate = True 278 | self.__lock.acquire() 279 | self.__lock.notifyAll() 280 | self.__lock.release() 281 | 282 | def setHandler(self, handler): 283 | self.__handler = handler 284 | 285 | def run(self): 286 | while not self.__terminate: 287 | try: 288 | self.__lock.acquire() 289 | self.__lock.wait(self.timeout) 290 | self.__lock.release() 291 | if self.__terminate: 292 | break 293 | self.__handler() 294 | except Exception as msg: 295 | tarsLogger.error("QueueTimeout:run exception : %s", msg) 296 | 297 | tarsLogger.debug("QueueTimeout:run finished") 298 | 299 | 300 | if __name__ == "__main__": 301 | pass 302 | -------------------------------------------------------------------------------- /danmaku/tars/QueryF.py: -------------------------------------------------------------------------------- 1 | # Tencent is pleased to support the open source community by making Tars available. 2 | # 3 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 4 | # 5 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 6 | # in compliance with the License. You may obtain a copy of the License at 7 | # 8 | # https://opensource.org/licenses/BSD-3-Clause 9 | # 10 | # Unless required by applicable law or agreed to in writing, software distributed 11 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | # specific language governing permissions and limitations under the License. 14 | # 15 | 16 | # from tars.core import tarscore; 17 | # from tars.core import ServantProxy; 18 | # from tars.core import ServantProxyCallback; 19 | # from com.qq.register.EndpointF import *; 20 | from .__init__ import tarscore 21 | from .__servantproxy import ServantProxy 22 | from .__async import ServantProxyCallback 23 | from .EndpointF import EndpointF 24 | 25 | import time 26 | 27 | # proxy for client 28 | 29 | 30 | class QueryFProxy(ServantProxy): 31 | def findObjectById(self, id, context=ServantProxy.mapcls_context()): 32 | oos = tarscore.TarsOutputStream() 33 | oos.write(tarscore.string, 1, id) 34 | 35 | rsp = self.tars_invoke( 36 | ServantProxy.TARSNORMAL, "findObjectById", oos.getBuffer(), context, None 37 | ) 38 | 39 | ios = tarscore.TarsInputStream(rsp.sBuffer) 40 | ret = ios.read(tarscore.vctclass(EndpointF), 0, True) 41 | 42 | return ret 43 | 44 | def async_findObjectById(self, callback, id, context=ServantProxy.mapcls_context()): 45 | oos = tarscore.TarsOutputStream() 46 | oos.write(tarscore.string, 1, id) 47 | 48 | self.tars_invoke_async( 49 | ServantProxy.TARSNORMAL, "findObjectById", oos.getBuffer(), context, None, callback 50 | ) 51 | 52 | def findObjectById4Any(self, id, context=ServantProxy.mapcls_context()): 53 | oos = tarscore.TarsOutputStream() 54 | oos.write(tarscore.string, 1, id) 55 | 56 | rsp = self.tars_invoke( 57 | ServantProxy.TARSNORMAL, "findObjectById4Any", oos.getBuffer(), context, None 58 | ) 59 | 60 | ios = tarscore.TarsInputStream(rsp.sBuffer) 61 | ret = ios.read(tarscore.int32, 0, True) 62 | activeEp = ios.read(tarscore.vctclass(EndpointF), 2, True) 63 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 64 | 65 | return (ret, activeEp, inactiveEp) 66 | 67 | def async_findObjectById4Any(self, callback, id, context=ServantProxy.mapcls_context()): 68 | oos = tarscore.TarsOutputStream() 69 | oos.write(tarscore.string, 1, id) 70 | 71 | self.tars_invoke_async( 72 | ServantProxy.TARSNORMAL, "findObjectById4Any", oos.getBuffer(), context, None, callback 73 | ) 74 | 75 | def findObjectById4All(self, id, context=ServantProxy.mapcls_context()): 76 | oos = tarscore.TarsOutputStream() 77 | oos.write(tarscore.string, 1, id) 78 | 79 | rsp = self.tars_invoke( 80 | ServantProxy.TARSNORMAL, "findObjectById4All", oos.getBuffer(), context, None 81 | ) 82 | 83 | ios = tarscore.TarsInputStream(rsp.sBuffer) 84 | ret = ios.read(tarscore.int32, 0, True) 85 | activeEp = ios.read(tarscore.vctclass(EndpointF), 2, True) 86 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 87 | 88 | return (ret, activeEp, inactiveEp) 89 | 90 | def async_findObjectById4All(self, callback, id, context=ServantProxy.mapcls_context()): 91 | oos = tarscore.TarsOutputStream() 92 | oos.write(tarscore.string, 1, id) 93 | 94 | self.tars_invoke_async( 95 | ServantProxy.TARSNORMAL, "findObjectById4All", oos.getBuffer(), context, None, callback 96 | ) 97 | 98 | def findObjectByIdInSameGroup(self, id, context=ServantProxy.mapcls_context()): 99 | oos = tarscore.TarsOutputStream() 100 | oos.write(tarscore.string, 1, id) 101 | rsp = self.tars_invoke( 102 | ServantProxy.TARSNORMAL, "findObjectByIdInSameGroup", oos.getBuffer(), context, None 103 | ) 104 | 105 | startDecodeTime = time.time() 106 | ios = tarscore.TarsInputStream(rsp.sBuffer) 107 | ret = ios.read(tarscore.int32, 0, True) 108 | activeEp = ios.read(tarscore.vctclass(EndpointF), 2, True) 109 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 110 | endDecodeTime = time.time() 111 | return (ret, activeEp, inactiveEp, (endDecodeTime - startDecodeTime)) 112 | 113 | def async_findObjectByIdInSameGroup(self, callback, id, context=ServantProxy.mapcls_context()): 114 | oos = tarscore.TarsOutputStream() 115 | oos.write(tarscore.string, 1, id) 116 | 117 | self.tars_invoke_async( 118 | ServantProxy.TARSNORMAL, 119 | "findObjectByIdInSameGroup", 120 | oos.getBuffer(), 121 | context, 122 | None, 123 | callback, 124 | ) 125 | 126 | def findObjectByIdInSameStation(self, id, sStation, context=ServantProxy.mapcls_context()): 127 | oos = tarscore.TarsOutputStream() 128 | oos.write(tarscore.string, 1, id) 129 | oos.write(tarscore.string, 2, sStation) 130 | 131 | rsp = self.tars_invoke( 132 | ServantProxy.TARSNORMAL, "findObjectByIdInSameStation", oos.getBuffer(), context, None 133 | ) 134 | 135 | ios = tarscore.TarsInputStream(rsp.sBuffer) 136 | ret = ios.read(tarscore.int32, 0, True) 137 | activeEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 138 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 4, True) 139 | 140 | return (ret, activeEp, inactiveEp) 141 | 142 | def async_findObjectByIdInSameStation( 143 | self, callback, id, sStation, context=ServantProxy.mapcls_context() 144 | ): 145 | oos = tarscore.TarsOutputStream() 146 | oos.write(tarscore.string, 1, id) 147 | oos.write(tarscore.string, 2, sStation) 148 | 149 | self.tars_invoke_async( 150 | ServantProxy.TARSNORMAL, 151 | "findObjectByIdInSameStation", 152 | oos.getBuffer(), 153 | context, 154 | None, 155 | callback, 156 | ) 157 | 158 | def findObjectByIdInSameSet(self, id, setId, context=ServantProxy.mapcls_context()): 159 | oos = tarscore.TarsOutputStream() 160 | oos.write(tarscore.string, 1, id) 161 | oos.write(tarscore.string, 2, setId) 162 | 163 | rsp = self.tars_invoke( 164 | ServantProxy.TARSNORMAL, "findObjectByIdInSameSet", oos.getBuffer(), context, None 165 | ) 166 | 167 | ios = tarscore.TarsInputStream(rsp.sBuffer) 168 | ret = ios.read(tarscore.int32, 0, True) 169 | activeEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 170 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 4, True) 171 | 172 | return (ret, activeEp, inactiveEp) 173 | 174 | def async_findObjectByIdInSameSet( 175 | self, callback, id, setId, context=ServantProxy.mapcls_context() 176 | ): 177 | oos = tarscore.TarsOutputStream() 178 | oos.write(tarscore.string, 1, id) 179 | oos.write(tarscore.string, 2, setId) 180 | 181 | self.tars_invoke_async( 182 | ServantProxy.TARSNORMAL, 183 | "findObjectByIdInSameSet", 184 | oos.getBuffer(), 185 | context, 186 | None, 187 | callback, 188 | ) 189 | 190 | 191 | # ======================================================== 192 | # callback of async proxy for client 193 | # ======================================================== 194 | class QueryFPrxCallback(ServantProxyCallback): 195 | def __init__(self): 196 | ServantProxyCallback.__init__(self) 197 | self.callback_map = { 198 | "findObjectById": self.__invoke_findObjectById, 199 | "findObjectById4Any": self.__invoke_findObjectById4Any, 200 | "findObjectById4All": self.__invoke_findObjectById4All, 201 | "findObjectByIdInSameGroup": self.__invoke_findObjectByIdInSameGroup, 202 | "findObjectByIdInSameStation": self.__invoke_findObjectByIdInSameStation, 203 | "findObjectByIdInSameSet": self.__invoke_findObjectByIdInSameSet, 204 | } 205 | 206 | def callback_findObjectById(self, ret): 207 | raise NotImplementedError() 208 | 209 | def callback_findObjectById_exception(self, ret): 210 | raise NotImplementedError() 211 | 212 | def callback_findObjectById4Any(self, ret, activeEp, inactiveEp): 213 | raise NotImplementedError() 214 | 215 | def callback_findObjectById4Any_exception(self, ret): 216 | raise NotImplementedError() 217 | 218 | def callback_findObjectById4All(self, ret, activeEp, inactiveEp): 219 | raise NotImplementedError() 220 | 221 | def callback_findObjectById4All_exception(self, ret): 222 | raise NotImplementedError() 223 | 224 | def callback_findObjectByIdInSameGroup(self, ret, activeEp, inactiveEp): 225 | raise NotImplementedError() 226 | 227 | def callback_findObjectByIdInSameGroup_exception(self, ret): 228 | raise NotImplementedError() 229 | 230 | def callback_findObjectByIdInSameStation(self, ret, activeEp, inactiveEp): 231 | raise NotImplementedError() 232 | 233 | def callback_findObjectByIdInSameStation_exception(self, ret): 234 | raise NotImplementedError() 235 | 236 | def callback_findObjectByIdInSameSet(self, ret, activeEp, inactiveEp): 237 | raise NotImplementedError() 238 | 239 | def callback_findObjectByIdInSameSet_exception(self, ret): 240 | raise NotImplementedError() 241 | 242 | def __invoke_findObjectById(self, reqmsg): 243 | rsp = reqmsg.response 244 | if rsp.iRet != ServantProxy.TARSSERVERSUCCESS: 245 | self.callback_findObjectById_exception(rsp.iRet) 246 | return rsp.iRet 247 | ios = tarscore.TarsInputStream(rsp.sBuffer) 248 | ret = ios.read(tarscore.vctclass(EndpointF), 0, True) 249 | self.callback_findObjectById(ret) 250 | 251 | def __invoke_findObjectById4Any(self, reqmsg): 252 | rsp = reqmsg.response 253 | if rsp.iRet != ServantProxy.TARSSERVERSUCCESS: 254 | self.callback_findObjectById4Any_exception(rsp.iRet) 255 | return rsp.iRet 256 | ios = tarscore.TarsInputStream(rsp.sBuffer) 257 | ret = ios.read(tarscore.int32, 0, True) 258 | activeEp = ios.read(tarscore.vctclass(EndpointF), 2, True) 259 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 260 | self.callback_findObjectById4Any(ret, activeEp, inactiveEp) 261 | 262 | def __invoke_findObjectById4All(self, reqmsg): 263 | rsp = reqmsg.response 264 | if rsp.iRet != ServantProxy.TARSSERVERSUCCESS: 265 | self.callback_findObjectById4All_exception(rsp.iRet) 266 | return rsp.iRet 267 | ios = tarscore.TarsInputStream(rsp.sBuffer) 268 | ret = ios.read(tarscore.int32, 0, True) 269 | activeEp = ios.read(tarscore.vctclass(EndpointF), 2, True) 270 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 271 | self.callback_findObjectById4All(ret, activeEp, inactiveEp) 272 | 273 | def __invoke_findObjectByIdInSameGroup(self, reqmsg): 274 | rsp = reqmsg.response 275 | if rsp.iRet != ServantProxy.TARSSERVERSUCCESS: 276 | self.callback_findObjectByIdInSameGroup_exception(rsp.iRet) 277 | return rsp.iRet 278 | ios = tarscore.TarsInputStream(rsp.sBuffer) 279 | ret = ios.read(tarscore.int32, 0, True) 280 | activeEp = ios.read(tarscore.vctclass(EndpointF), 2, True) 281 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 282 | self.callback_findObjectByIdInSameGroup(ret, activeEp, inactiveEp) 283 | 284 | def __invoke_findObjectByIdInSameStation(self, reqmsg): 285 | rsp = reqmsg.response 286 | if rsp.iRet != ServantProxy.TARSSERVERSUCCESS: 287 | self.callback_findObjectByIdInSameStation_exception(rsp.iRet) 288 | return rsp.iRet 289 | ios = tarscore.TarsInputStream(rsp.sBuffer) 290 | ret = ios.read(tarscore.int32, 0, True) 291 | activeEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 292 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 4, True) 293 | self.callback_findObjectByIdInSameStation(ret, activeEp, inactiveEp) 294 | 295 | def __invoke_findObjectByIdInSameSet(self, reqmsg): 296 | rsp = reqmsg.response 297 | if rsp.iRet != ServantProxy.TARSSERVERSUCCESS: 298 | self.callback_findObjectByIdInSameSet_exception(rsp.iRet) 299 | return rsp.iRet 300 | ios = tarscore.TarsInputStream(rsp.sBuffer) 301 | ret = ios.read(tarscore.int32, 0, True) 302 | activeEp = ios.read(tarscore.vctclass(EndpointF), 3, True) 303 | inactiveEp = ios.read(tarscore.vctclass(EndpointF), 4, True) 304 | self.callback_findObjectByIdInSameSet(ret, activeEp, inactiveEp) 305 | 306 | def onDispatch(self, reqmsg): 307 | self.callback_map[reqmsg.request.sFuncName](reqmsg) 308 | -------------------------------------------------------------------------------- /danmaku/tars/__servantproxy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # filename: __servantproxy.py 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | 21 | """ 22 | @version: 0.01 23 | @brief: rpc抽离出servantproxy 24 | """ 25 | import threading 26 | import time 27 | 28 | from __logger import tarsLogger 29 | from __util import util 30 | from __packet import RequestPacket 31 | 32 | # from __packet import ResponsePacket 33 | from __TimeoutQueue import ReqMessage 34 | import exception 35 | from exception import TarsException 36 | 37 | 38 | class ServantProxy(object): 39 | """ 40 | @brief: 1、远程对象的本地代理 41 | 2、同名servant在一个通信器中最多只有一个实例 42 | 3、防止和用户在Tars中定义的函数名冲突,接口以tars_开头 43 | """ 44 | 45 | # 服务器响应的错误码 46 | TARSSERVERSUCCESS = 0 # 服务器端处理成功 47 | TARSSERVERDECODEERR = -1 # 服务器端解码异常 48 | TARSSERVERENCODEERR = -2 # 服务器端编码异常 49 | TARSSERVERNOFUNCERR = -3 # 服务器端没有该函数 50 | TARSSERVERNOSERVANTERR = -4 # 服务器端五该Servant对象 51 | TARSSERVERRESETGRID = -5 # 服务器端灰度状态不一致 52 | TARSSERVERQUEUETIMEOUT = -6 # 服务器队列超过限制 53 | TARSASYNCCALLTIMEOUT = -7 # 异步调用超时 54 | TARSPROXYCONNECTERR = -8 # proxy链接异常 55 | TARSSERVERUNKNOWNERR = -99 # 服务器端未知异常 56 | 57 | TARSVERSION = 1 58 | TUPVERSION = 2 59 | TUPVERSION2 = 3 60 | 61 | TARSNORMAL = 0 62 | TARSONEWAY = 1 63 | 64 | TARSMESSAGETYPENULL = 0 65 | TARSMESSAGETYPEHASH = 1 66 | TARSMESSAGETYPEGRID = 2 67 | TARSMESSAGETYPEDYED = 4 68 | TARSMESSAGETYPESAMPLE = 8 69 | TARSMESSAGETYPEASYNC = 16 70 | 71 | mapcls_context = util.mapclass(util.string, util.string) 72 | 73 | def __init__(self): 74 | tarsLogger.debug("ServantProxy:__init__") 75 | self.__reactor = None 76 | self.__object = None 77 | self.__initialize = False 78 | 79 | def __del__(self): 80 | tarsLogger.debug("ServantProxy:__del__") 81 | 82 | def _initialize(self, reactor, obj): 83 | """ 84 | @brief: 初始化函数,需要调用才能使用ServantProxy 85 | @param reactor: 网络管理的reactor实例 86 | @type reactor: FDReactor 87 | @return: None 88 | @rtype: None 89 | """ 90 | tarsLogger.debug("ServantProxy:_initialize") 91 | 92 | assert reactor and obj 93 | if self.__initialize: 94 | return 95 | self.__reactor = reactor 96 | self.__object = obj 97 | self.__initialize = True 98 | 99 | def _terminate(self): 100 | """ 101 | @brief: 不再使用ServantProxy时调用,会释放相应资源 102 | @return: None 103 | @rtype: None 104 | """ 105 | tarsLogger.debug("ServantProxy:_terminate") 106 | self.__object = None 107 | self.__reactor = None 108 | self.__initialize = False 109 | 110 | def tars_name(self): 111 | """ 112 | @brief: 获取ServantProxy的名字 113 | @return: ServantProxy的名字 114 | @rtype: str 115 | """ 116 | return self.__object.name() 117 | 118 | def tars_timeout(self): 119 | """ 120 | @brief: 获取超时时间,单位是ms 121 | @return: 超时时间 122 | @rtype: int 123 | """ 124 | # 默认的为3S = ObjectProxy.DEFAULT_TIMEOUT 125 | return int(self.__timeout() * 1000) 126 | 127 | def tars_ping(self): 128 | pass 129 | 130 | # def tars_initialize(self): 131 | # pass 132 | 133 | # def tars_terminate(self): 134 | # pass 135 | 136 | def tars_invoke(self, cPacketType, sFuncName, sBuffer, context, status): 137 | """ 138 | @brief: TARS协议同步方法调用 139 | @param cPacketType: 请求包类型 140 | @type cPacketType: int 141 | @param sFuncName: 调用函数名 142 | @type sFuncName: str 143 | @param sBuffer: 序列化后的发送参数 144 | @type sBuffer: str 145 | @param context: 上下文件信息 146 | @type context: ServantProxy.mapcls_context 147 | @param status: 状态信息 148 | @type status: 149 | @return: 响应报文 150 | @rtype: ResponsePacket 151 | """ 152 | tarsLogger.debug("ServantProxy:tars_invoke, func: %s", sFuncName) 153 | req = RequestPacket() 154 | req.iVersion = ServantProxy.TARSVERSION 155 | req.cPacketType = cPacketType 156 | req.iMessageType = ServantProxy.TARSMESSAGETYPENULL 157 | req.iRequestId = 0 158 | req.sServantName = self.tars_name() 159 | req.sFuncName = sFuncName 160 | req.sBuffer = sBuffer 161 | req.iTimeout = self.tars_timeout() 162 | 163 | reqmsg = ReqMessage() 164 | reqmsg.type = ReqMessage.SYNC_CALL 165 | reqmsg.servant = self 166 | reqmsg.lock = threading.Condition() 167 | reqmsg.request = req 168 | reqmsg.begtime = time.time() 169 | # # test 170 | reqmsg.isHash = True 171 | reqmsg.isConHash = True 172 | reqmsg.hashCode = 123456 173 | 174 | rsp = None 175 | try: 176 | rsp = self.__invoke(reqmsg) 177 | except exception.TarsSyncCallTimeoutException: 178 | if reqmsg.adapter: 179 | reqmsg.adapter.finishInvoke(True) 180 | raise 181 | except TarsException: 182 | raise 183 | except: 184 | raise TarsException("ServantProxy::tars_invoke excpetion") 185 | 186 | if reqmsg.adapter: 187 | reqmsg.adapter.finishInvoke(False) 188 | 189 | return rsp 190 | 191 | def tars_invoke_async(self, cPacketType, sFuncName, sBuffer, context, status, callback): 192 | """ 193 | @brief: TARS协议同步方法调用 194 | @param cPacketType: 请求包类型 195 | @type cPacketType: int 196 | @param sFuncName: 调用函数名 197 | @type sFuncName: str 198 | @param sBuffer: 序列化后的发送参数 199 | @type sBuffer: str 200 | @param context: 上下文件信息 201 | @type context: ServantProxy.mapcls_context 202 | @param status: 状态信息 203 | @type status: 204 | @param callback: 异步调用回调对象 205 | @type callback: ServantProxyCallback的子类 206 | @return: 响应报文 207 | @rtype: ResponsePacket 208 | """ 209 | tarsLogger.debug("ServantProxy:tars_invoke") 210 | req = RequestPacket() 211 | req.iVersion = ServantProxy.TARSVERSION 212 | req.cPacketType = cPacketType if callback else ServantProxy.TARSONEWAY 213 | req.iMessageType = ServantProxy.TARSMESSAGETYPENULL 214 | req.iRequestId = 0 215 | req.sServantName = self.tars_name() 216 | req.sFuncName = sFuncName 217 | req.sBuffer = sBuffer 218 | req.iTimeout = self.tars_timeout() 219 | 220 | reqmsg = ReqMessage() 221 | reqmsg.type = ReqMessage.ASYNC_CALL if callback else ReqMessage.ONE_WAY 222 | reqmsg.callback = callback 223 | reqmsg.servant = self 224 | reqmsg.request = req 225 | reqmsg.begtime = time.time() 226 | 227 | rsp = None 228 | try: 229 | rsp = self.__invoke(reqmsg) 230 | except TarsException: 231 | raise 232 | except Exception: 233 | raise TarsException("ServantProxy::tars_invoke excpetion") 234 | 235 | if reqmsg.adapter: 236 | reqmsg.adapter.finishInvoke(False) 237 | 238 | return rsp 239 | 240 | def __timeout(self): 241 | """ 242 | @brief: 获取超时时间,单位是s 243 | @return: 超时时间 244 | @rtype: float 245 | """ 246 | return self.__object.timeout() 247 | 248 | def __invoke(self, reqmsg): 249 | """ 250 | @brief: 远程过程调用 251 | @param reqmsg: 请求数据 252 | @type reqmsg: ReqMessage 253 | @return: 调用成功或失败 254 | @rtype: bool 255 | """ 256 | tarsLogger.debug("ServantProxy:invoke, func: %s", reqmsg.request.sFuncName) 257 | ret = self.__object.invoke(reqmsg) 258 | if ret == -2: 259 | errmsg = ( 260 | "ServantProxy::invoke fail, no valid servant," 261 | + " servant name : %s, function name : %s" 262 | % ( 263 | reqmsg.request.sServantName, 264 | reqmsg.request.sFuncName, 265 | ) 266 | ) 267 | raise TarsException(errmsg) 268 | if ret == -1: 269 | errmsg = ( 270 | "ServantProxy::invoke connect fail," 271 | + " servant name : %s, function name : %s, adapter : %s" 272 | % ( 273 | reqmsg.request.sServantName, 274 | reqmsg.request.sFuncName, 275 | reqmsg.adapter.getEndPointInfo(), 276 | ) 277 | ) 278 | raise TarsException(errmsg) 279 | elif ret != 0: 280 | errmsg = ( 281 | "ServantProxy::invoke unknown fail, " 282 | + "Servant name : %s, function name : %s" 283 | % ( 284 | reqmsg.request.sServantName, 285 | reqmsg.request.sFuncName, 286 | ) 287 | ) 288 | raise TarsException(errmsg) 289 | 290 | if reqmsg.type == ReqMessage.SYNC_CALL: 291 | reqmsg.lock.acquire() 292 | reqmsg.lock.wait(self.__timeout()) 293 | reqmsg.lock.release() 294 | 295 | if not reqmsg.response: 296 | errmsg = ( 297 | "ServantProxy::invoke timeout: %d, servant name" 298 | ": %s, adapter: %s, request id: %d" 299 | % ( 300 | self.tars_timeout(), 301 | self.tars_name(), 302 | reqmsg.adapter.trans().getEndPointInfo(), 303 | reqmsg.request.iRequestId, 304 | ) 305 | ) 306 | raise exception.TarsSyncCallTimeoutException(errmsg) 307 | elif reqmsg.response.iRet == ServantProxy.TARSSERVERSUCCESS: 308 | return reqmsg.response 309 | else: 310 | errmsg = "servant name: %s, function name: %s" % ( 311 | self.tars_name(), 312 | reqmsg.request.sFuncName, 313 | ) 314 | self.tarsRaiseException(reqmsg.response.iRet, errmsg) 315 | 316 | def _finished(self, reqmsg): 317 | """ 318 | @brief: 通知远程过程调用线程响应报文到了 319 | @param reqmsg: 请求响应报文 320 | @type reqmsg: ReqMessage 321 | @return: 函数执行成功或失败 322 | @rtype: bool 323 | """ 324 | tarsLogger.debug("ServantProxy:finished") 325 | if not reqmsg.lock: 326 | return False 327 | reqmsg.lock.acquire() 328 | reqmsg.lock.notifyAll() 329 | reqmsg.lock.release() 330 | return True 331 | 332 | def tarsRaiseException(self, errno, desc): 333 | """ 334 | @brief: 服务器调用失败,根据服务端给的错误码抛出异常 335 | @param errno: 错误码 336 | @type errno: int 337 | @param desc: 错误描述 338 | @type desc: str 339 | @return: 没有返回值,函数会抛出异常 340 | @rtype: 341 | """ 342 | if errno == ServantProxy.TARSSERVERSUCCESS: 343 | return 344 | 345 | elif errno == ServantProxy.TARSSERVERDECODEERR: 346 | raise exception.TarsServerDecodeException( 347 | "server decode exception: errno: %d, msg: %s" % (errno, desc) 348 | ) 349 | 350 | elif errno == ServantProxy.TARSSERVERENCODEERR: 351 | raise exception.TarsServerEncodeException( 352 | "server encode exception: errno: %d, msg: %s" % (errno, desc) 353 | ) 354 | 355 | elif errno == ServantProxy.TARSSERVERNOFUNCERR: 356 | raise exception.TarsServerNoFuncException( 357 | "server function mismatch exception: errno: %d, msg: %s" % (errno, desc) 358 | ) 359 | 360 | elif errno == ServantProxy.TARSSERVERNOSERVANTERR: 361 | raise exception.TarsServerNoServantException( 362 | "server servant mismatch exception: errno: %d, msg: %s" % (errno, desc) 363 | ) 364 | 365 | elif errno == ServantProxy.TARSSERVERRESETGRID: 366 | raise exception.TarsServerResetGridException( 367 | "server reset grid exception: errno: %d, msg: %s" % (errno, desc) 368 | ) 369 | 370 | elif errno == ServantProxy.TARSSERVERQUEUETIMEOUT: 371 | raise exception.TarsServerQueueTimeoutException( 372 | "server queue timeout exception: errno: %d, msg: %s" % (errno, desc) 373 | ) 374 | 375 | elif errno == ServantProxy.TARSPROXYCONNECTERR: 376 | raise exception.TarsServerQueueTimeoutException( 377 | "server connection lost: errno: %d, msg: %s" % (errno, desc) 378 | ) 379 | 380 | else: 381 | raise exception.TarsServerUnknownException( 382 | "server unknown exception: errno: %d, msg: %s" % (errno, desc) 383 | ) 384 | -------------------------------------------------------------------------------- /danmaku/tars/__rpc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # filename: __rpc.py 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | 21 | """ 22 | @version: 0.01 23 | @brief: rpc调用逻辑实现 24 | """ 25 | 26 | import time 27 | import argparse 28 | 29 | from .__logger import tarsLogger 30 | from .__logger import initLog 31 | from .__trans import EndPointInfo 32 | from .__TimeoutQueue import TimeoutQueue 33 | from .__TimeoutQueue import QueueTimeout 34 | from .__trans import FDReactor 35 | from .__adapterproxy import AdapterProxyManager 36 | from .__servantproxy import ServantProxy 37 | from .exception import TarsException 38 | from .__async import AsyncProcThread 39 | 40 | 41 | class Communicator: 42 | """ 43 | @brief: 通讯器,创建和维护ServantProxy、ObjectProxy、FDReactor线程和超时线程 44 | """ 45 | 46 | default_config = { 47 | "tars": { 48 | "application": { 49 | "client": { 50 | "async-invoke-timeout": 20000, 51 | "asyncthread": 0, 52 | "locator": "", 53 | "loglevel": "error", 54 | "logpath": "tars.log", 55 | "logsize": 15728640, 56 | "lognum": 0, 57 | "refresh-endpoint-interval": 60000, 58 | "sync-invoke-timeout": 5000, 59 | } 60 | } 61 | } 62 | } 63 | 64 | def __init__(self, config={}): 65 | tarsLogger.debug("Communicator:__init__") 66 | self.__terminate = False 67 | self.__initialize = False 68 | self.__objects = {} 69 | self.__servants = {} 70 | self.__reactor = None 71 | self.__qTimeout = None 72 | self.__asyncProc = None 73 | self.__config = Communicator.default_config.copy() 74 | self.__config.update(config) 75 | self.initialize() 76 | 77 | def __del__(self): 78 | tarsLogger.debug("Communicator:__del__") 79 | 80 | def initialize(self): 81 | """ 82 | @brief: 使用通讯器前必须先调用此函数 83 | """ 84 | tarsLogger.debug("Communicator:initialize") 85 | if self.__initialize: 86 | return 87 | logpath = self.getProperty("logpath") 88 | logsize = self.getProperty("logsize", int) 89 | lognum = self.getProperty("lognum", int) 90 | loglevel = self.getProperty("loglevel") 91 | initLog(logpath, logsize, lognum, loglevel) 92 | 93 | self.__reactor = FDReactor() 94 | self.__reactor.initialize() 95 | self.__reactor.start() 96 | 97 | self.__qTimeout = QueueTimeout() 98 | self.__qTimeout.setHandler(self.handleTimeout) 99 | self.__qTimeout.start() 100 | 101 | async_num = self.getProperty("asyncthread", int) 102 | self.__asyncProc = AsyncProcThread() 103 | self.__asyncProc.initialize(async_num) 104 | self.__asyncProc.start() 105 | 106 | self.__initialize = True 107 | 108 | def terminate(self): 109 | """ 110 | @brief: 不再使用通讯器需调用此函数释放资源 111 | """ 112 | tarsLogger.debug("Communicator:terminate") 113 | 114 | if not self.__initialize: 115 | return 116 | 117 | self.__reactor.terminate() 118 | self.__qTimeout.terminate() 119 | self.__asyncProc.terminate() 120 | 121 | for objName in self.__servants: 122 | self.__servants[objName]._terminate() 123 | 124 | for objName in self.__objects: 125 | self.__objects[objName].terminate() 126 | 127 | self.__objects = {} 128 | self.__servants = {} 129 | self.__reactor = None 130 | self.__initialize = False 131 | 132 | def parseConnAddr(self, connAddr): 133 | """ 134 | @brief: 解析connAddr字符串 135 | @param connAddr: 连接地址 136 | @type connAddr: str 137 | @return: 解析结果 138 | @rtype: dict, key是str,val里name是str, 139 | timeout是float,endpoint是EndPointInfo的list 140 | """ 141 | tarsLogger.debug("Communicator:parseConnAddr") 142 | connAddr = connAddr.strip() 143 | connInfo = {"name": "", "timeout": -1, "endpoint": []} 144 | if "@" not in connAddr: 145 | connInfo["name"] = connAddr 146 | return connInfo 147 | 148 | try: 149 | tks = connAddr.split("@") 150 | connInfo["name"] = tks[0] 151 | tks = tks[1].lower().split(":") 152 | parser = argparse.ArgumentParser(add_help=False) 153 | parser.add_argument("-h") 154 | parser.add_argument("-p") 155 | parser.add_argument("-t") 156 | for tk in tks: 157 | argv = tk.split() 158 | if argv[0] != "tcp": 159 | raise TarsException("unsupport transmission protocal : %s" % connInfo["name"]) 160 | mes = parser.parse_args(argv[1:]) 161 | try: 162 | ip = mes.h if mes.h is not None else "" 163 | port = int(mes.p) if mes.p is not None else "-1" 164 | timeout = int(mes.t) if mes.t is not None else "-1" 165 | connInfo["endpoint"].append(EndPointInfo(ip, port, timeout)) 166 | except Exception: 167 | raise TarsException("Unrecognized option : %s" % mes) 168 | except TarsException: 169 | raise 170 | 171 | except Exception as exp: 172 | raise TarsException(exp) 173 | 174 | return connInfo 175 | 176 | def getReactor(self): 177 | """ 178 | @brief: 获取reactor 179 | """ 180 | return self.__reactor 181 | 182 | def getAsyncProc(self): 183 | """ 184 | @brief: 获取asyncProc 185 | """ 186 | return self.__asyncProc 187 | 188 | def getProperty(self, name, dt_type=str): 189 | """ 190 | @brief: 获取配置 191 | @param name: 配置名称 192 | @type name: str 193 | @param dt_type: 数据类型 194 | @type name: type 195 | @return: 配置内容 196 | @rtype: str 197 | """ 198 | try: 199 | ret = self.__config["tars"]["application"]["client"][name] 200 | ret = dt_type(ret) 201 | except: 202 | ret = Communicator.default_config["tars"]["application"]["client"][name] 203 | 204 | return ret 205 | 206 | def setProperty(self, name, value): 207 | """ 208 | @brief: 修改配置 209 | @param name: 配置名称 210 | @type propertys: str 211 | @param value: 配置内容 212 | @type propertys: str 213 | @return: 设置是否成功 214 | @rtype: bool 215 | """ 216 | try: 217 | self.__config["tars"]["application"]["client"][name] = value 218 | return True 219 | except: 220 | return False 221 | 222 | def setPropertys(self, propertys): 223 | """ 224 | @brief: 修改配置 225 | @param propertys: 配置集合 226 | @type propertys: map, key type: str, value type: str 227 | @return: 无 228 | @rtype: None 229 | """ 230 | pass 231 | 232 | def updateConfig(self): 233 | """ 234 | @brief: 重新设置配置 235 | """ 236 | 237 | def stringToProxy(self, servantProxy, connAddr): 238 | """ 239 | @brief: 初始化ServantProxy 240 | @param connAddr: 服务器地址信息 241 | @type connAddr: str 242 | @param servant: servant proxy 243 | @type servant: ServantProxy子类 244 | @return: 无 245 | @rtype: None 246 | @note: 如果connAddr的ServantObj连接过,返回连接过的ServantProxy 247 | 如果没有连接过,用参数servant初始化,返回servant 248 | """ 249 | tarsLogger.debug("Communicator:stringToProxy") 250 | 251 | connInfo = self.parseConnAddr(connAddr) 252 | objName = connInfo["name"] 253 | if objName in self.__servants: 254 | return self.__servants[objName] 255 | 256 | objectPrx = ObjectProxy() 257 | objectPrx.initialize(self, connInfo) 258 | 259 | servantPrx = servantProxy() 260 | servantPrx._initialize(self.__reactor, objectPrx) 261 | self.__objects[objName] = objectPrx 262 | self.__servants[objName] = servantPrx 263 | return servantPrx 264 | 265 | def handleTimeout(self): 266 | """ 267 | @brief: 处理超时事件 268 | @return: 无 269 | @rtype: None 270 | """ 271 | # tarsLogger.debug('Communicator:handleTimeout') 272 | for obj in self.__objects.values(): 273 | obj.handleQueueTimeout() 274 | 275 | 276 | class ObjectProxy: 277 | """ 278 | @brief: 一个object name在一个Communicator里有一个objectproxy 279 | 管理收发的消息队列 280 | """ 281 | 282 | DEFAULT_TIMEOUT = 3.0 283 | 284 | def __init__(self): 285 | tarsLogger.debug("ObjectProxy:__init__") 286 | self.__name = "" 287 | self.__timeout = ObjectProxy.DEFAULT_TIMEOUT 288 | self.__comm = None 289 | self.__epi = None 290 | self.__adpmanager = None 291 | self.__timeoutQueue = None 292 | # self.__adapter = None 293 | self.__initialize = False 294 | 295 | def __del__(self): 296 | tarsLogger.debug("ObjectProxy:__del__") 297 | 298 | def initialize(self, comm, connInfo): 299 | """ 300 | @brief: 初始化,使用ObjectProxy前必须调用 301 | @param comm: 通讯器 302 | @type comm: Communicator 303 | @param connInfo: 连接信息 304 | @type comm: dict 305 | @return: None 306 | @rtype: None 307 | """ 308 | if self.__initialize: 309 | return 310 | tarsLogger.debug("ObjectProxy:initialize") 311 | self.__comm = comm 312 | # async-invoke-timeout来设置队列时间 313 | async_timeout = self.__comm.getProperty("async-invoke-timeout", float) / 1000 314 | self.__timeoutQueue = TimeoutQueue(async_timeout) 315 | 316 | self.__name = connInfo["name"] 317 | 318 | self.__timeout = self.__comm.getProperty("sync-invoke-timeout", float) / 1000 319 | 320 | # 通过Communicator的配置设置超时 321 | # 不再通过连接信息的-t来设置 322 | # if connInfo['timeout'] != -1: 323 | # self.__timeout = connInfo['timeout'] 324 | eplist = connInfo["endpoint"] 325 | 326 | self.__adpmanager = AdapterProxyManager() 327 | self.__adpmanager.initialize(comm, self, eplist) 328 | 329 | self.__initialize = True 330 | 331 | def terminate(self): 332 | """ 333 | @brief: 回收资源,不再使用ObjectProxy时调用 334 | @return: None 335 | @rtype: None 336 | """ 337 | tarsLogger.debug("ObjectProxy:terminate") 338 | self.__timeoutQueue = None 339 | self.__adpmanager.terminate() 340 | self.__initialize = False 341 | 342 | def name(self): 343 | """ 344 | @brief: 获取object name 345 | @return: object name 346 | @rtype: str 347 | """ 348 | return self.__name 349 | 350 | # def setTimeout(self, timeout): 351 | # ''' 352 | # @brief: 设置超时 353 | # @param timeout: 超时时间,单位为s 354 | # @type timeout: float 355 | # @return: None 356 | # @rtype: None 357 | # ''' 358 | # self.__timeout = timeout 359 | # self.__timeoutQueue.setTimeout(timeout) 360 | 361 | def timeout(self): 362 | """ 363 | @brief: 获取超时时间 364 | @return: 超时时间,单位为s 365 | @rtype: float 366 | """ 367 | return self.__timeout 368 | 369 | def getTimeoutQueue(self): 370 | """ 371 | @brief: 获取超时队列 372 | @return: 超时队列 373 | @rtype: TimeoutQueue 374 | """ 375 | return self.__timeoutQueue 376 | 377 | def handleQueueTimeout(self): 378 | """ 379 | @brief: 超时事件发生时处理超时事务 380 | @return: None 381 | @rtype: None 382 | """ 383 | # tarsLogger.debug('ObjectProxy:handleQueueTimeout') 384 | self.__timeoutQueue.timeout() 385 | 386 | def invoke(self, reqmsg): 387 | """ 388 | @brief: 远程过程调用 389 | @param reqmsg: 请求响应报文 390 | @type reqmsg: ReqMessage 391 | @return: 错误码 392 | @rtype: 393 | """ 394 | tarsLogger.debug( 395 | "ObjectProxy:invoke, objname: %s, func: %s", self.__name, reqmsg.request.sFuncName 396 | ) 397 | # 负载均衡 398 | # adapter = self.__adpmanager.getNextValidProxy() 399 | adapter = self.__adpmanager.selectAdapterProxy(reqmsg) 400 | if not adapter: 401 | tarsLogger.error("invoke %s, select adapter proxy return None", self.__name) 402 | return -2 403 | 404 | adapter.checkActive(True) 405 | reqmsg.adapter = adapter 406 | return adapter.invoke(reqmsg) 407 | 408 | # 弹出请求报文 409 | def popRequest(self): 410 | """ 411 | @brief: 返回消息队列里的请求响应报文,FIFO 412 | 不删除TimeoutQueue里的数据,响应时要用 413 | @return: 请求响应报文 414 | @rtype: ReqMessage 415 | """ 416 | return self.__timeoutQueue.pop(erase=False) 417 | 418 | 419 | if __name__ == "__main__": 420 | connAddr = "apptest.lightServer.lightServantObj@tcp -h 10.130.64.220 -p 10001 -t 10000" 421 | connAddr = "MTT.BookMarksUnifyServer.BookMarksUnifyObj@tcp -h 172.17.149.77 -t 60000 -p 10023" 422 | comm = Communicator() 423 | comm.initialize() 424 | servant = ServantProxy() 425 | servant = comm.stringToProxy(connAddr, servant) 426 | print(servant.tars_timeout()) 427 | try: 428 | rsp = servant.tars_invoke( 429 | ServantProxy.TARSNORMAL, "test", "", ServantProxy.mapcls_context(), None 430 | ) 431 | print("Servant invoke success, request id: %d, iRet: %d" % (rsp.iRequestId, rsp.iRet)) 432 | except Exception as msg: 433 | print(msg) 434 | finally: 435 | servant.tars_terminate() 436 | time.sleep(2) 437 | print("app closing ...") 438 | comm.terminate() 439 | time.sleep(2) 440 | print("cpp closed") 441 | -------------------------------------------------------------------------------- /danmaku/tars/__trans.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # filename: __trans.py 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | 21 | """ 22 | @version: 0.01 23 | @brief: 网络相关模块 24 | """ 25 | 26 | import socket 27 | import select 28 | import errno 29 | import threading 30 | 31 | from .__logger import tarsLogger 32 | from .__TimeoutQueue import ReqMessage 33 | 34 | 35 | class EndPointInfo: 36 | """ 37 | @brief: 保存每个连接端口的信息 38 | """ 39 | 40 | SOCK_TCP = "TCP" 41 | SOCK_UDP = "UDP" 42 | 43 | def __init__(self, ip="", port=0, timeout=-1, weight=0, weightType=0, connType=SOCK_TCP): 44 | self.__ip = ip 45 | self.__port = port 46 | self.__timeout = timeout 47 | self.__connType = connType 48 | self.__weightType = weightType 49 | self.__weight = weight 50 | 51 | def getIp(self): 52 | return self.__ip 53 | 54 | def getPort(self): 55 | return self.__port 56 | 57 | def getConnType(self): 58 | """ 59 | @return: 传输层连接类型 60 | @rtype: EndPointInfo.SOCK_TCP 或 EndPointInfo.SOCK_UDP 61 | """ 62 | return self.__connType 63 | 64 | def getWeightType(self): 65 | return self.__weightType 66 | 67 | def getWeight(self): 68 | return self.__weight 69 | 70 | def __str__(self): 71 | return "%s %s:%s %d:%d" % ( 72 | self.__connType, 73 | self.__ip, 74 | self.__port, 75 | self.__weightType, 76 | self.__weight, 77 | ) 78 | 79 | 80 | class Transceiver: 81 | """ 82 | @brief: 网络传输基类,提供网络send/recv接口 83 | """ 84 | 85 | CONNECTED = 0 86 | CONNECTING = 1 87 | UNCONNECTED = 2 88 | 89 | def __init__(self, endPointInfo): 90 | tarsLogger.debug("Transceiver:__init__, %s", endPointInfo) 91 | self.__epi = endPointInfo 92 | self.__sock = None 93 | self.__connStatus = Transceiver.UNCONNECTED 94 | self.__connFailed = False 95 | # 这两个变量要给子类用,不能用name mangling隐藏 96 | self._sendBuff = "" 97 | self._recvBuf = "" 98 | 99 | def __del__(self): 100 | tarsLogger.debug("Transceiver:__del__") 101 | self.close() 102 | 103 | def getSock(self): 104 | """ 105 | @return: socket对象 106 | @rtype: socket.socket 107 | """ 108 | return self.__sock 109 | 110 | def getFd(self): 111 | """ 112 | @brief: 获取socket的文件描述符 113 | @return: 如果self.__sock没有建立返回-1 114 | @rtype: int 115 | """ 116 | if self.__sock: 117 | return self.__sock.fileno() 118 | else: 119 | return -1 120 | 121 | def getEndPointInfo(self): 122 | """ 123 | @return: 端口信息 124 | @rtype: EndPointInfo 125 | """ 126 | return self.__epi 127 | 128 | def isValid(self): 129 | """ 130 | @return: 是否创建了socket 131 | @rtype: bool 132 | """ 133 | return self.__sock is not None 134 | 135 | def hasConnected(self): 136 | """ 137 | @return: 是否连接上了 138 | @rtype: bool 139 | """ 140 | return self.isValid() and self.__connStatus == Transceiver.CONNECTED 141 | 142 | def isConnFailed(self): 143 | """ 144 | @return: 是否连接失败 145 | @rtype: bool 146 | """ 147 | return self.__connFailed 148 | 149 | def isConnecting(self): 150 | """ 151 | @return: 是否正在连接 152 | @rtype: bool 153 | """ 154 | return self.isValid() and self.__connStatus == Transceiver.CONNECTING 155 | 156 | def setConnFailed(self): 157 | """ 158 | @brief: 设置为连接失败 159 | @return: None 160 | @rtype: None 161 | """ 162 | self.__connFailed = True 163 | self.__connStatus = Transceiver.UNCONNECTED 164 | 165 | def setConnected(self): 166 | """ 167 | @brief: 设置为连接完 168 | @return: None 169 | @rtype: None 170 | """ 171 | self.__connFailed = False 172 | self.__connStatus = Transceiver.CONNECTED 173 | 174 | def close(self): 175 | """ 176 | @brief: 关闭连接 177 | @return: None 178 | @rtype: None 179 | @note: 多次调用不会有问题 180 | """ 181 | tarsLogger.debug("Transceiver:close") 182 | if not self.isValid(): 183 | return 184 | self.__sock.close() 185 | self.__sock = None 186 | self.__connStatus = Transceiver.UNCONNECTED 187 | self.__connFailed = False 188 | self._sendBuff = "" 189 | self._recvBuf = "" 190 | tarsLogger.info("trans close : %s" % self.__epi) 191 | 192 | def writeToSendBuf(self, msg): 193 | """ 194 | @brief: 把数据添加到send buffer里 195 | @param msg: 发送的数据 196 | @type msg: str 197 | @return: None 198 | @rtype: None 199 | @note: 没有加锁,多线程调用会有race conditions 200 | """ 201 | self._sendBuff += msg 202 | 203 | def recv(self, bufsize, flag=0): 204 | raise NotImplementedError() 205 | 206 | def send(self, buf, flag=0): 207 | raise NotImplementedError() 208 | 209 | def doResponse(self): 210 | raise NotImplementedError() 211 | 212 | def doRequest(self): 213 | """ 214 | @brief: 将请求数据发送出去 215 | @return: 发送的字节数 216 | @rtype: int 217 | """ 218 | tarsLogger.debug("Transceiver:doRequest") 219 | if not self.isValid(): 220 | return -1 221 | 222 | nbytes = 0 223 | buf = buffer(self._sendBuff) 224 | while True: 225 | if not buf: 226 | break 227 | ret = self.send(buf[nbytes:]) 228 | if ret > 0: 229 | nbytes += ret 230 | else: 231 | break 232 | 233 | # 发送前面的字节后将后面的字节拷贝上来 234 | self._sendBuff = buf[nbytes:] 235 | return nbytes 236 | 237 | def reInit(self): 238 | """ 239 | @brief: 初始化socket,并连接服务器 240 | @return: 成功返回0,失败返回-1 241 | @rtype: int 242 | """ 243 | tarsLogger.debug("Transceiver:reInit") 244 | assert self.isValid() is False 245 | if self.__epi.getConnType() != EndPointInfo.SOCK_TCP: 246 | return -1 247 | try: 248 | self.__sock = socket.socket() 249 | self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 250 | self.__sock.setblocking(0) 251 | self.__sock.connect((self.__epi.getIp(), self.__epi.getPort())) 252 | self.__connStatus = Transceiver.CONNECTED 253 | except socket.error as msg: 254 | if msg.errno == errno.EINPROGRESS: 255 | self.__connStatus = Transceiver.CONNECTING 256 | else: 257 | tarsLogger.info("reInit, %s, faild!, %s", self.__epi, msg) 258 | self.__sock = None 259 | return -1 260 | tarsLogger.info("reInit, connect: %s, fd: %d", self.__epi, self.getFd()) 261 | return 0 262 | 263 | 264 | class TcpTransceiver(Transceiver): 265 | """ 266 | @brief: TCP传输实现 267 | """ 268 | 269 | def send(self, buf, flag=0): 270 | """ 271 | @brief: 实现tcp的发送 272 | @param buf: 发送的数据 273 | @type buf: str 274 | @param flag: 发送标志 275 | @param flag: int 276 | @return: 发送字节数 277 | @rtype: int 278 | """ 279 | tarsLogger.debug("TcpTransceiver:send") 280 | if not self.isValid(): 281 | return -1 282 | 283 | nbytes = 0 284 | try: 285 | nbytes = self.getSock().send(buf, flag) 286 | tarsLogger.info( 287 | "tcp send, fd: %d, %s, len: %d", self.getFd(), self.getEndPointInfo(), nbytes 288 | ) 289 | except socket.error as msg: 290 | if msg.errno != errno.EAGAIN: 291 | tarsLogger.error( 292 | "tcp send, fd: %d, %s, fail!, %s, close", 293 | self.getFd(), 294 | self.getEndPointInfo(), 295 | msg, 296 | ) 297 | self.close() 298 | return 0 299 | return nbytes 300 | 301 | def recv(self, bufsize, flag=0): 302 | """ 303 | @brief: 实现tcp的recv 304 | @param bufsize: 接收大小 305 | @type bufsize: int 306 | @param flag: 接收标志 307 | @param flag: int 308 | @return: 接收的内容,接收出错返回None 309 | @rtype: str 310 | """ 311 | tarsLogger.debug("TcpTransceiver:recv") 312 | assert self.isValid() 313 | 314 | buf = "" 315 | try: 316 | buf = self.getSock().recv(bufsize, flag) 317 | if len(buf) == 0: 318 | tarsLogger.info( 319 | "tcp recv, fd: %d, %s, recv 0 bytes, close", 320 | self.getFd(), 321 | self.getEndPointInfo(), 322 | ) 323 | self.close() 324 | return None 325 | except socket.error as msg: 326 | if msg.errno != errno.EAGAIN: 327 | tarsLogger.info( 328 | "tcp recv, fd: %d, %s, faild!, %s, close", 329 | self.getFd(), 330 | self.getEndPointInfo(), 331 | msg, 332 | ) 333 | self.close() 334 | return None 335 | 336 | tarsLogger.info( 337 | "tcp recv, fd: %d, %s, nbytes: %d", self.getFd(), self.getEndPointInfo(), len(buf) 338 | ) 339 | return buf 340 | 341 | def doResponse(self): 342 | """ 343 | @brief: 处理接收的数据 344 | @return: 返回响应报文的列表,如果出错返回None 345 | @rtype: list: ResponsePacket 346 | """ 347 | tarsLogger.debug("TcpTransceiver:doResponse") 348 | if not self.isValid(): 349 | return None 350 | 351 | bufs = [self._recvBuf] 352 | while True: 353 | buf = self.recv(8292) 354 | if not buf: 355 | break 356 | bufs.append(buf) 357 | self._recvBuf = "".join(bufs) 358 | tarsLogger.info("tcp doResponse, fd: %d, recvbuf: %d", self.getFd(), len(self._recvBuf)) 359 | 360 | if not self._recvBuf: 361 | return None 362 | 363 | rsplist = None 364 | try: 365 | rsplist, bufsize = ReqMessage.unpackRspList(self._recvBuf) 366 | self._recvBuf = self._recvBuf[bufsize:] 367 | except Exception as msg: 368 | tarsLogger.error( 369 | "tcp doResponse, fd: %d, %s, tcp recv unpack error: %s", 370 | self.getFd(), 371 | self.getEndPointInfo(), 372 | msg, 373 | ) 374 | self.close() 375 | 376 | return rsplist 377 | 378 | 379 | class FDReactor(threading.Thread): 380 | """ 381 | @brief: 监听FD事件并解发注册的handle 382 | """ 383 | 384 | def __init__(self): 385 | tarsLogger.debug("FDReactor:__init__") 386 | # threading.Thread.__init__(self) 387 | super(FDReactor, self).__init__() 388 | self.__terminate = False 389 | self.__ep = None 390 | self.__shutdown = None 391 | # {fd : adapterproxy} 392 | self.__adapterTab = {} 393 | 394 | def __del__(self): 395 | tarsLogger.debug("FDReactor:__del__") 396 | self.__ep.close() 397 | self.__shutdown.close() 398 | self.__ep = None 399 | self.__shutdown = None 400 | 401 | def initialize(self): 402 | """ 403 | @brief: 初始化,使用FDReactor前必须调用 404 | @return: None 405 | @rtype: None 406 | """ 407 | tarsLogger.debug("FDReactor:initialize") 408 | self.__ep = select.epoll() 409 | self.__shutdown = socket.socket() 410 | self.__ep.register(self.__shutdown.fileno(), select.EPOLLET | select.EPOLLIN) 411 | tarsLogger.debug("FDReactor init, shutdown fd : %d", self.__shutdown.fileno()) 412 | 413 | def terminate(self): 414 | """ 415 | @brief: 结束FDReactor的线程 416 | @return: None 417 | @rtype: None 418 | """ 419 | tarsLogger.debug("FDReactor:terminate") 420 | self.__terminate = True 421 | self.__ep.modify(self.__shutdown.fileno(), select.EPOLLOUT) 422 | self.__adapterTab = {} 423 | 424 | def handle(self, adapter, events): 425 | """ 426 | @brief: 处理epoll事件 427 | @param adapter: 事件对应的adapter 428 | @type adapter: AdapterProxy 429 | @param events: epoll事件 430 | @param events: int 431 | @return: None 432 | @rtype: None 433 | """ 434 | tarsLogger.debug("FDReactor:handle events : %d", events) 435 | assert adapter 436 | 437 | try: 438 | if events == 0: 439 | return 440 | 441 | if events & (select.EPOLLERR | select.EPOLLHUP): 442 | tarsLogger.debug( 443 | "FDReactor::handle EPOLLERR or EPOLLHUP: %s", adapter.trans().getEndPointInfo() 444 | ) 445 | adapter.trans().close() 446 | return 447 | 448 | if adapter.shouldCloseTrans(): 449 | tarsLogger.debug( 450 | "FDReactor::handle should close trans: %s", adapter.trans().getEndPointInfo() 451 | ) 452 | adapter.setCloseTrans(False) 453 | adapter.trans().close() 454 | return 455 | 456 | if adapter.trans().isConnecting(): 457 | if not adapter.finishConnect(): 458 | return 459 | 460 | if events & select.EPOLLIN: 461 | self.handleInput(adapter) 462 | 463 | if events & select.EPOLLOUT: 464 | self.handleOutput(adapter) 465 | 466 | except Exception as msg: 467 | tarsLogger.error("FDReactor handle exception: %s", msg) 468 | 469 | def handleExcept(self): 470 | pass 471 | 472 | def handleInput(self, adapter): 473 | """ 474 | @brief: 处理接收事件 475 | @param adapter: 事件对应的adapter 476 | @type adapter: AdapterProxy 477 | @return: None 478 | @rtype: None 479 | """ 480 | 481 | tarsLogger.debug("FDReactor:handleInput") 482 | if not adapter.trans().isValid(): 483 | return 484 | 485 | rsplist = adapter.trans().doResponse() 486 | if not rsplist: 487 | return 488 | for rsp in rsplist: 489 | adapter.finished(rsp) 490 | 491 | def handleOutput(self, adapter): 492 | """ 493 | @brief: 处理发送事件 494 | @param adapter: 事件对应的adapter 495 | @type adapter: AdapterProxy 496 | @return: None 497 | @rtype: None 498 | """ 499 | tarsLogger.debug("FDReactor:handleOutput") 500 | if not adapter.trans().isValid(): 501 | return 502 | while adapter.trans().doRequest() >= 0 and adapter.sendRequest(): 503 | pass 504 | 505 | def notify(self, adapter): 506 | """ 507 | @brief: 更新adapter对应的fd的epoll状态 508 | @return: None 509 | @rtype: None 510 | @note: FDReactor使用的epoll是EPOLLET模式,同一事件只通知一次 511 | 希望某一事件再次通知需调用此函数 512 | """ 513 | tarsLogger.debug("FDReactor:notify") 514 | fd = adapter.trans().getFd() 515 | if fd != -1: 516 | self.__ep.modify(fd, select.EPOLLET | select.EPOLLOUT | select.EPOLLIN) 517 | 518 | def registerAdapter(self, adapter, events): 519 | """ 520 | @brief: 注册adapter 521 | @param adapter: 收发事件处理类 522 | @type adapter: AdapterProxy 523 | @param events: 注册事件 524 | @type events: int 525 | @return: None 526 | @rtype: None 527 | """ 528 | tarsLogger.debug("FDReactor:registerAdapter events : %d", events) 529 | events |= select.EPOLLET 530 | try: 531 | self.__ep.unregister(adapter.trans().getFd()) 532 | except: 533 | pass 534 | self.__ep.register(adapter.trans().getFd(), events) 535 | self.__adapterTab[adapter.trans().getFd()] = adapter 536 | 537 | def unregisterAdapter(self, adapter): 538 | """ 539 | @brief: 注销adapter 540 | @param adapter: 收发事件处理类 541 | @type adapter: AdapterProxy 542 | @return: None 543 | @rtype: None 544 | """ 545 | tarsLogger.debug("FDReactor:registerAdapter") 546 | self.__ep.unregister(adapter.trans().getFd()) 547 | self.__adapterTab.pop(adapter.trans().getFd(), None) 548 | 549 | def run(self): 550 | """ 551 | @brief: 线程启动函数,循环监听网络事件 552 | """ 553 | tarsLogger.debug("FDReactor:run") 554 | 555 | while not self.__terminate: 556 | try: 557 | eplist = self.__ep.poll(1) 558 | if eplist: 559 | tarsLogger.debug( 560 | "FDReactor run get eplist : %s, terminate : %s", 561 | str(eplist), 562 | self.__terminate, 563 | ) 564 | if self.__terminate: 565 | tarsLogger.debug("FDReactor terminate") 566 | break 567 | for fd, events in eplist: 568 | adapter = self.__adapterTab.get(fd, None) 569 | if not adapter: 570 | continue 571 | self.handle(adapter, events) 572 | except Exception as msg: 573 | tarsLogger.error("FDReactor run exception: %s", msg) 574 | 575 | tarsLogger.debug("FDReactor:run finished") 576 | 577 | 578 | if __name__ == "__main__": 579 | print("hello world") 580 | epi = EndPointInfo("127.0.0.1", 1313) 581 | print(epi) 582 | trans = TcpTransceiver(epi) 583 | print(trans.getSock()) 584 | print(trans.getFd()) 585 | print(trans.reInit()) 586 | print(trans.isConnecting()) 587 | print(trans.hasConnected()) 588 | buf = "hello world" 589 | print(trans.send(buf)) 590 | buf = trans.recv(1024) 591 | print(buf) 592 | trans.close() 593 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /danmaku/tars/__tars.py: -------------------------------------------------------------------------------- 1 | # Tencent is pleased to support the open source community by making Tars available. 2 | # 3 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 4 | # 5 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 6 | # in compliance with the License. You may obtain a copy of the License at 7 | # 8 | # https://opensource.org/licenses/BSD-3-Clause 9 | # 10 | # Unless required by applicable law or agreed to in writing, software distributed 11 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 12 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 13 | # specific language governing permissions and limitations under the License. 14 | # 15 | 16 | import struct 17 | from .__util import util 18 | from .exception import * 19 | 20 | 21 | class BinBuffer: 22 | def __init__(self, buff=bytes()): 23 | self.buffer = buff 24 | self.position = 0 25 | 26 | def writeBuf(self, buff): 27 | self.buffer += buff 28 | 29 | def getBuffer(self): 30 | return self.buffer 31 | 32 | def length(self): 33 | return len(self.buffer) 34 | 35 | 36 | class DataHead: 37 | EN_INT8 = 0 38 | EN_INT16 = 1 39 | EN_INT32 = 2 40 | EN_INT64 = 3 41 | EN_FLOAT = 4 42 | EN_DOUBLE = 5 43 | EN_STRING1 = 6 44 | EN_STRING4 = 7 45 | EN_MAP = 8 46 | EN_LIST = 9 47 | EN_STRUCTBEGIN = 10 48 | EN_STRUCTEND = 11 49 | EN_ZERO = 12 50 | EN_BYTES = 13 51 | 52 | @staticmethod 53 | def writeTo(buff, tag, vtype): 54 | if tag < 15: 55 | helper = (tag << 4) | vtype 56 | buff.writeBuf(struct.pack("!B", helper)) 57 | else: 58 | helper = (0xF0 | vtype) << 8 | tag 59 | buff.writeBuf(struct.pack("!H", helper)) 60 | 61 | 62 | class TarsOutputStream(object): 63 | def __init__(self): 64 | self.__buffer = BinBuffer() 65 | 66 | def __writeBoolean(self, tag, value): 67 | self.__writeInt8(tag, int(value)) 68 | 69 | def __writeInt8(self, tag, value): 70 | if value == 0: 71 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_ZERO) 72 | else: 73 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_INT8) 74 | self.__buffer.writeBuf(struct.pack("!b", value)) 75 | 76 | def __writeInt16(self, tag, value): 77 | if value >= -128 and value <= 127: 78 | self.__writeInt8(tag, value) 79 | else: 80 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_INT16) 81 | self.__buffer.writeBuf(struct.pack("!h", value)) 82 | 83 | def __writeInt32(self, tag, value): 84 | if value >= -32768 and value <= 32767: 85 | self.__writeInt16(tag, value) 86 | else: 87 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_INT32) 88 | self.__buffer.writeBuf(struct.pack("!i", value)) 89 | 90 | def __writeInt64(self, tag, value): 91 | if value >= (-2147483648) and value <= 2147483647: 92 | self.__writeInt32(tag, value) 93 | else: 94 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_INT64) 95 | self.__buffer.writeBuf(struct.pack("!q", value)) 96 | 97 | def __writeFloat(self, tag, value): 98 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_FLOAT) 99 | self.__buffer.writeBuf(struct.pack("!f", value)) 100 | 101 | def __writeDouble(self, tag, value): 102 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_DOUBLE) 103 | self.__buffer.writeBuf(struct.pack("!d", value)) 104 | 105 | def __writeString(self, tag, value): 106 | length = len(value) 107 | if length <= 255: 108 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_STRING1) 109 | self.__buffer.writeBuf(struct.pack("!B", length)) 110 | self.__buffer.writeBuf(str.encode(value)) 111 | else: 112 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_STRING4) 113 | self.__buffer.writeBuf(struct.pack("!I", length)) 114 | self.__buffer.writeBuf(str.encode(value)) 115 | 116 | def __writeBytes(self, tag, value): 117 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_BYTES) 118 | DataHead.writeTo(self.__buffer, 0, DataHead.EN_INT8) 119 | length = len(value) 120 | self.__writeInt32(0, length) 121 | self.__buffer.buffer += value 122 | self.__buffer.position += length 123 | 124 | def __writeMap(self, coder, tag, value): 125 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_MAP) 126 | self.__writeInt32(0, len(value)) 127 | for key in value: 128 | self.write(coder.ktype, 0, key) 129 | self.write(coder.vtype, 1, value.get(key)) 130 | 131 | def __writeVector(self, coder, tag, value): 132 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_LIST) 133 | n = len(value) 134 | self.__writeInt32(0, n) 135 | for i in range(0, n): 136 | self.write(value.vtype, 0, value[i]) 137 | 138 | def __writeStruct(self, coder, tag, value): 139 | DataHead.writeTo(self.__buffer, tag, DataHead.EN_STRUCTBEGIN) 140 | value.writeTo(self, value) 141 | DataHead.writeTo(self.__buffer, 0, DataHead.EN_STRUCTEND) 142 | 143 | def write(self, coder, tag, value): 144 | if coder.__tars_index__ == 999: 145 | self.__writeBoolean(tag, value) 146 | elif coder.__tars_index__ == 0: 147 | self.__writeInt8(tag, value) 148 | elif coder.__tars_index__ == 1: 149 | self.__writeInt16(tag, value) 150 | elif coder.__tars_index__ == 2: 151 | self.__writeInt32(tag, value) 152 | elif coder.__tars_index__ == 3: 153 | self.__writeInt64(tag, value) 154 | elif coder.__tars_index__ == 4: 155 | self.__writeFloat(tag, value) 156 | elif coder.__tars_index__ == 5: 157 | self.__writeDouble(tag, value) 158 | elif coder.__tars_index__ == 13: 159 | self.__writeBytes(tag, value) 160 | elif coder.__tars_index__ == 67: 161 | self.__writeString(tag, value) 162 | elif coder.__tars_index__ == 8: 163 | self.__writeMap(coder, tag, value) 164 | elif coder.__tars_index__ == 9: 165 | self.__writeVector(coder, tag, value) 166 | elif coder.__tars_index__ == 1011: 167 | self.__writeStruct(coder, tag, value) 168 | else: 169 | raise TarsTarsUnsupportType("tars unsupport data type:" % coder.__tars_index__) 170 | 171 | def getBuffer(self): 172 | return self.__buffer.getBuffer() 173 | 174 | def printHex(self): 175 | util.printHex(self.__buffer.getBuffer()) 176 | 177 | 178 | class TarsInputStream(object): 179 | def __init__(self, buff): 180 | self.__buffer = BinBuffer(buff) 181 | 182 | def __peekFrom(self): 183 | (helper,) = struct.unpack_from("!B", self.__buffer.buffer, self.__buffer.position) 184 | t = (helper & 0xF0) >> 4 185 | p = helper & 0x0F 186 | l = 1 187 | if t >= 15: 188 | l = 2 189 | (t,) = struct.unpack_from("!B", self.__buffer.buffer, self.__buffer.position + 1) 190 | return (t, p, l) 191 | 192 | def __readFrom(self): 193 | t, p, l = self.__peekFrom() 194 | self.__buffer.position += l 195 | return (t, p, l) 196 | 197 | def __skipToStructEnd(self): 198 | t, p, l = self.__readFrom() 199 | while p != DataHead.EN_STRUCTEND: 200 | self.__skipField(p) 201 | t, p, l = self.__readFrom() 202 | 203 | def __skipField(self, p): 204 | if p == DataHead.EN_INT8: 205 | self.__buffer.position += 1 206 | elif p == DataHead.EN_INT16: 207 | self.__buffer.position += 2 208 | elif p == DataHead.EN_INT32: 209 | self.__buffer.position += 4 210 | elif p == DataHead.EN_INT64: 211 | self.__buffer.position += 8 212 | elif p == DataHead.EN_FLOAT: 213 | self.__buffer.position += 4 214 | elif p == DataHead.EN_DOUBLE: 215 | self.__buffer.position += 8 216 | elif p == DataHead.EN_STRING1: 217 | (length,) = struct.unpack_from("!B", self.__buffer.buffer, self.__buffer.position) 218 | self.__buffer.position += length + 1 219 | elif p == DataHead.EN_STRING4: 220 | (length,) = struct.unpack_from("!i", self.__buffer.buffer, self.__buffer.position) 221 | self.__buffer.position += length + 4 222 | elif p == DataHead.EN_MAP: 223 | size = self.__readInt32(0, True) 224 | for i in range(0, size * 2): 225 | ti, pi, li = self.__readFrom() 226 | self.__skipField(pi) 227 | elif p == DataHead.EN_LIST: 228 | size = self.__readInt32(0, True) 229 | for i in range(0, size): 230 | ti, pi, li = self.__readFrom() 231 | self.__skipField(pi) 232 | elif p == DataHead.EN_BYTES: 233 | ti, pi, li = self.__readFrom() 234 | if pi != DataHead.EN_INT8: 235 | raise TarsTarsDecodeInvalidValue( 236 | "skipField with invalid type, type value: %d, %d." % (p, pi) 237 | ) 238 | size = self.__readInt32(0, True) 239 | self.__buffer.position += size 240 | elif p == DataHead.EN_STRUCTBEGIN: 241 | self.__skipToStructEnd() 242 | elif p == DataHead.EN_STRUCTEND: 243 | pass 244 | # self.__buffer.position += length + 1; 245 | elif p == DataHead.EN_ZERO: 246 | pass 247 | # self.__buffer.position += length + 1; 248 | else: 249 | raise TarsTarsDecodeMismatch("skipField with invalid type, type value:%d" % p) 250 | 251 | def __skipToTag(self, tag): 252 | length = self.__buffer.length() 253 | while self.__buffer.position < length: 254 | t, p, l = self.__peekFrom() 255 | if tag <= t or p == DataHead.EN_STRUCTEND: 256 | return False if (p == DataHead.EN_STRUCTEND) else (t == tag) 257 | 258 | self.__buffer.position += l 259 | self.__skipField(p) 260 | return False 261 | 262 | def __readBoolean(self, tag, require, default=None): 263 | v = self.__readInt8(tag, require) 264 | if v is None: 265 | return default 266 | else: 267 | return v != 0 268 | 269 | def __readInt8(self, tag, require, default=None): 270 | if self.__skipToTag(tag): 271 | t, p, l = self.__readFrom() 272 | if p == DataHead.EN_ZERO: 273 | return 0 274 | elif p == DataHead.EN_INT8: 275 | (value,) = struct.unpack_from("!b", self.__buffer.buffer, self.__buffer.position) 276 | self.__buffer.position += 1 277 | return value 278 | else: 279 | raise TarsTarsDecodeMismatch( 280 | "read 'Char' type mismatch, tag: %d , get type: %d." % (tag, p) 281 | ) 282 | elif require: 283 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 284 | return default 285 | 286 | def __readInt16(self, tag, require, default=None): 287 | if self.__skipToTag(tag): 288 | t, p, l = self.__readFrom() 289 | if p == DataHead.EN_ZERO: 290 | return 0 291 | elif p == DataHead.EN_INT8: 292 | (value,) = struct.unpack_from("!b", self.__buffer.buffer, self.__buffer.position) 293 | self.__buffer.position += 1 294 | return value 295 | elif p == DataHead.EN_INT16: 296 | (value,) = struct.unpack_from("!h", self.__buffer.buffer, self.__buffer.position) 297 | self.__buffer.position += 2 298 | return value 299 | else: 300 | raise TarsTarsDecodeMismatch( 301 | "read 'Short' type mismatch, tag: %d , get type: %d." % (tag, p) 302 | ) 303 | elif require: 304 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 305 | return default 306 | 307 | def __readInt32(self, tag, require, default=None): 308 | if self.__skipToTag(tag): 309 | t, p, l = self.__readFrom() 310 | if p == DataHead.EN_ZERO: 311 | return 0 312 | elif p == DataHead.EN_INT8: 313 | (value,) = struct.unpack_from("!b", self.__buffer.buffer, self.__buffer.position) 314 | self.__buffer.position += 1 315 | return value 316 | elif p == DataHead.EN_INT16: 317 | (value,) = struct.unpack_from("!h", self.__buffer.buffer, self.__buffer.position) 318 | self.__buffer.position += 2 319 | return value 320 | elif p == DataHead.EN_INT32: 321 | (value,) = struct.unpack_from("!i", self.__buffer.buffer, self.__buffer.position) 322 | self.__buffer.position += 4 323 | return value 324 | else: 325 | raise TarsTarsDecodeMismatch( 326 | "read 'Int32' type mismatch, tag: %d, get type: %d." % (tag, p) 327 | ) 328 | elif require: 329 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 330 | return default 331 | 332 | def __readInt64(self, tag, require, default=None): 333 | if self.__skipToTag(tag): 334 | t, p, l = self.__readFrom() 335 | if p == DataHead.EN_ZERO: 336 | return 0 337 | elif p == DataHead.EN_INT8: 338 | (value,) = struct.unpack_from("!b", self.__buffer.buffer, self.__buffer.position) 339 | self.__buffer.position += 1 340 | return value 341 | elif p == DataHead.EN_INT16: 342 | (value,) = struct.unpack_from("!h", self.__buffer.buffer, self.__buffer.position) 343 | self.__buffer.position += 2 344 | return value 345 | elif p == DataHead.EN_INT32: 346 | (value,) = struct.unpack_from("!i", self.__buffer.buffer, self.__buffer.position) 347 | self.__buffer.position += 4 348 | return value 349 | elif p == DataHead.EN_INT64: 350 | (value,) = struct.unpack_from("!q", self.__buffer.buffer, self.__buffer.position) 351 | self.__buffer.position += 8 352 | return value 353 | else: 354 | raise TarsTarsDecodeMismatch( 355 | "read 'Int64' type mismatch, tag: %d, get type: %d." % (tag, p) 356 | ) 357 | elif require: 358 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 359 | return default 360 | 361 | def __readString(self, tag, require, default=None): 362 | if self.__skipToTag(tag): 363 | t, p, l = self.__readFrom() 364 | if p == DataHead.EN_STRING1: 365 | (length,) = struct.unpack_from("!B", self.__buffer.buffer, self.__buffer.position) 366 | self.__buffer.position += 1 367 | (value,) = struct.unpack_from( 368 | str(length) + "s", self.__buffer.buffer, self.__buffer.position 369 | ) 370 | self.__buffer.position += length 371 | return value 372 | elif p == DataHead.EN_STRING4: 373 | (length,) = struct.unpack_from("!i", self.__buffer.buffer, self.__buffer.position) 374 | self.__buffer.position += 4 375 | (value,) = struct.unpack_from( 376 | str(length) + "s", self.__buffer.buffer, self.__buffer.position 377 | ) 378 | self.__buffer.position += length 379 | return value 380 | else: 381 | raise TarsTarsDecodeMismatch( 382 | "read 'string' type mismatch, tag: %d, get type: %d." % (tag, p) 383 | ) 384 | elif require: 385 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 386 | return default 387 | 388 | def __readBytes(self, tag, require, default=None): 389 | if self.__skipToTag(tag): 390 | t, p, l = self.__readFrom() 391 | if p == DataHead.EN_BYTES: 392 | ti, pi, li = self.__readFrom() 393 | if pi != DataHead.EN_INT8: 394 | raise TarsTarsDecodeMismatch( 395 | "type mismatch, tag: %d, type: %d, %d" % (tag, p, pi) 396 | ) 397 | size = self.__readInt32(0, True) 398 | (value,) = struct.unpack_from( 399 | str(size) + "s", self.__buffer.buffer, self.__buffer.position 400 | ) 401 | self.__buffer.position += size 402 | return value 403 | else: 404 | raise TarsTarsDecodeMismatch("type mismatch, tag: %d, type: %d" % (tag, p)) 405 | elif require: 406 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 407 | return default 408 | 409 | def __readFloat(self, tag, require, default=None): 410 | if self.__skipToTag(tag): 411 | t, p, l = self.__readFrom() 412 | if p == DataHead.EN_ZERO: 413 | return 0 414 | elif p == DataHead.EN_FLOAT: 415 | (value,) = struct.unpack_from("!f", self.__buffer.buffer, self.__buffer.position) 416 | self.__buffer.position += 4 417 | return value 418 | else: 419 | raise TarsTarsDecodeMismatch( 420 | "read 'Float' type mismatch, tag: %d, get type: %d." % (tag, p) 421 | ) 422 | elif require: 423 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 424 | return default 425 | 426 | def __readDouble(self, tag, require, default=None): 427 | if self.__skipToTag(tag): 428 | t, p, l = self.__readFrom() 429 | if p == DataHead.EN_ZERO: 430 | return 0 431 | elif p == DataHead.EN_FLOAT: 432 | (value,) = struct.unpack_from("!f", self.__buffer.buffer, self.__buffer.position) 433 | self.__buffer.position += 4 434 | return value 435 | elif p == DataHead.EN_DOUBLE: 436 | (value,) = struct.unpack_from("!d", self.__buffer.buffer, self.__buffer.position) 437 | self.__buffer.position += 8 438 | return value 439 | else: 440 | raise TarsTarsDecodeMismatch( 441 | "read 'Double' type mismatch, tag: %d, get type: %d." % (tag, p) 442 | ) 443 | elif require: 444 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 445 | return default 446 | 447 | def __readStruct(self, coder, tag, require, default=None): 448 | if self.__skipToTag(tag): 449 | t, p, l = self.__readFrom() 450 | if p != DataHead.EN_STRUCTBEGIN: 451 | raise TarsTarsDecodeMismatch( 452 | "read 'struct' type mismatch, tag: %d, get type: %d." % (tag, p) 453 | ) 454 | value = coder.readFrom(self) 455 | self.__skipToStructEnd() 456 | return value 457 | elif require: 458 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 459 | return default 460 | 461 | def __readMap(self, coder, tag, require, default=None): 462 | if self.__skipToTag(tag): 463 | t, p, l = self.__readFrom() 464 | if p == DataHead.EN_MAP: 465 | size = self.__readInt32(0, True) 466 | omap = coder() 467 | for i in range(0, size): 468 | k = self.read(coder.ktype, 0, True) 469 | v = self.read(coder.vtype, 1, True) 470 | omap[k] = v 471 | return omap 472 | else: 473 | raise TarsTarsDecodeMismatch( 474 | "read 'map' type mismatch, tag: %d, get type: %d." % (tag, p) 475 | ) 476 | elif require: 477 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 478 | return default 479 | 480 | def __readVector(self, coder, tag, require, default=None): 481 | if self.__skipToTag(tag): 482 | t, p, l = self.__readFrom() 483 | if p == DataHead.EN_LIST: 484 | size = self.__readInt32(0, True) 485 | value = coder() 486 | for i in range(0, size): 487 | k = self.read(coder.vtype, 0, True) 488 | value.append(k) 489 | return value 490 | else: 491 | raise TarsTarsDecodeMismatch( 492 | "read 'vector' type mismatch, tag: %d, get type: %d." % (tag, p) 493 | ) 494 | elif require: 495 | raise TarsTarsDecodeRequireNotExist("require field not exist, tag: %d" % tag) 496 | return default 497 | 498 | def read(self, coder, tag, require, default=None): 499 | if coder.__tars_index__ == 999: 500 | return self.__readBoolean(tag, require, default) 501 | elif coder.__tars_index__ == 0: 502 | return self.__readInt8(tag, require, default) 503 | elif coder.__tars_index__ == 1: 504 | return self.__readInt16(tag, require, default) 505 | elif coder.__tars_index__ == 2: 506 | return self.__readInt32(tag, require, default) 507 | elif coder.__tars_index__ == 3: 508 | return self.__readInt64(tag, require, default) 509 | elif coder.__tars_index__ == 4: 510 | return self.__readFloat(tag, require, default) 511 | elif coder.__tars_index__ == 5: 512 | return self.__readDouble(tag, require, default) 513 | elif coder.__tars_index__ == 13: 514 | return self.__readBytes(tag, require, default) 515 | elif coder.__tars_index__ == 67: 516 | return self.__readString(tag, require, default) 517 | elif coder.__tars_index__ == 8: 518 | return self.__readMap(coder, tag, require, default) 519 | elif coder.__tars_index__ == 9: 520 | return self.__readVector(coder, tag, require, default) 521 | elif coder.__tars_index__ == 1011: 522 | return self.__readStruct(coder, tag, require, default) 523 | else: 524 | raise TarsTarsUnsupportType("tars unsupport data type:" % coder.__tars_index__) 525 | 526 | def printHex(self): 527 | util.printHex(self.__buffer.buffer) 528 | -------------------------------------------------------------------------------- /danmaku/tars/__adapterproxy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # filename: __adapterproxymanager.py_compiler 4 | 5 | # Tencent is pleased to support the open source community by making Tars available. 6 | # 7 | # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. 8 | # 9 | # Licensed under the BSD 3-Clause License (the "License"); you may not use this file except 10 | # in compliance with the License. You may obtain a copy of the License at 11 | # 12 | # https://opensource.org/licenses/BSD-3-Clause 13 | # 14 | # Unless required by applicable law or agreed to in writing, software distributed 15 | # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR 16 | # CONDITIONS OF ANY KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations under the License. 18 | # 19 | 20 | """ 21 | @version: 0.01 22 | @brief: 将rpc部分中的adapterproxymanager抽离出来,实现不同的负载均衡 23 | """ 24 | 25 | from enum import Enum 26 | import random 27 | import socket 28 | import select 29 | import os 30 | import time 31 | 32 | 33 | from .__util import LockGuard, NewLock, ConsistentHashNew 34 | from .__trans import EndPointInfo 35 | from .__logger import tarsLogger 36 | from . import exception 37 | from .__trans import TcpTransceiver 38 | from .__TimeoutQueue import ReqMessage 39 | from .exception import TarsException 40 | 41 | 42 | # 因为循环import的问题只能放这里,不能放文件开始处 43 | from .QueryF import QueryFProxy 44 | from .QueryF import QueryFPrxCallback 45 | 46 | 47 | class AdapterProxy: 48 | """ 49 | @brief: 每一个Adapter管理一个服务端端口的连接,数据收发 50 | """ 51 | 52 | def __init__(self): 53 | tarsLogger.debug("AdapterProxy:__init__") 54 | self.__closeTrans = False 55 | self.__trans = None 56 | self.__object = None 57 | self.__reactor = None 58 | self.__lock = None 59 | self.__asyncProc = None 60 | self.__activeStateInReg = True 61 | 62 | @property 63 | def activatestateinreg(self): 64 | return self.__activeStateInReg 65 | 66 | @activatestateinreg.setter 67 | def activatestateinreg(self, value): 68 | self.__activeStateInReg = value 69 | 70 | def __del__(self): 71 | tarsLogger.debug("AdapterProxy:__del__") 72 | 73 | def initialize(self, endPointInfo, objectProxy, reactor, asyncProc): 74 | """ 75 | @brief: 初始化 76 | @param endPointInfo: 连接对端信息 77 | @type endPointInfo: EndPointInfo 78 | @type objectProxy: ObjectProxy 79 | @type reactor: FDReactor 80 | @type asyncProc: AsyncProcThread 81 | """ 82 | tarsLogger.debug("AdapterProxy:initialize") 83 | self.__closeTrans = False 84 | self.__trans = TcpTransceiver(endPointInfo) 85 | self.__object = objectProxy 86 | self.__reactor = reactor 87 | # self.__lock = threading.Lock() 88 | self.__lock = NewLock() 89 | self.__asyncProc = asyncProc 90 | 91 | def terminate(self): 92 | """ 93 | @brief: 关闭 94 | """ 95 | tarsLogger.debug("AdapterProxy:terminate") 96 | self.setCloseTrans(True) 97 | 98 | def trans(self): 99 | """ 100 | @brief: 获取传输类 101 | @return: 负责网络传输的trans 102 | @rtype: Transceiver 103 | """ 104 | return self.__trans 105 | 106 | def invoke(self, reqmsg): 107 | """ 108 | @brief: 远程过程调用处理方法 109 | @param reqmsg: 请求响应报文 110 | @type reqmsg: ReqMessage 111 | @return: 错误码:0表示成功,-1表示连接失败 112 | @rtype: int 113 | """ 114 | tarsLogger.debug("AdapterProxy:invoke") 115 | assert self.__trans 116 | 117 | if not self.__trans.hasConnected() and not self.__trans.isConnecting: 118 | # -1表示连接失败 119 | return -1 120 | 121 | reqmsg.request.iRequestId = self.__object.getTimeoutQueue().generateId() 122 | self.__object.getTimeoutQueue().push(reqmsg, reqmsg.request.iRequestId) 123 | 124 | self.__reactor.notify(self) 125 | 126 | return 0 127 | 128 | def finished(self, rsp): 129 | """ 130 | @brief: 远程过程调用返回处理 131 | @param rsp: 响应报文 132 | @type rsp: ResponsePacket 133 | @return: 函数是否执行成功 134 | @rtype: bool 135 | """ 136 | tarsLogger.debug("AdapterProxy:finished") 137 | reqmsg = self.__object.getTimeoutQueue().pop(rsp.iRequestId) 138 | if not reqmsg: 139 | tarsLogger.error( 140 | "finished, can not get ReqMessage, may be timeout, id: %d", rsp.iRequestId 141 | ) 142 | return False 143 | 144 | reqmsg.response = rsp 145 | if reqmsg.type == ReqMessage.SYNC_CALL: 146 | return reqmsg.servant._finished(reqmsg) 147 | elif reqmsg.callback: 148 | self.__asyncProc.put(reqmsg) 149 | return True 150 | 151 | tarsLogger.error( 152 | "finished, adapter proxy finish fail, id: %d, ret: %d", rsp.iRequestId, rsp.iRet 153 | ) 154 | return False 155 | 156 | # 检测连接是否失败,失效时重连 157 | def checkActive(self, forceConnect=False): 158 | """ 159 | @brief: 检测连接是否失效 160 | @param forceConnect: 是否强制发起连接,为true时不对状态进行判断就发起连接 161 | @type forceConnect: bool 162 | @return: 连接是否有效 163 | @rtype: bool 164 | """ 165 | tarsLogger.debug("AdapterProxy:checkActive") 166 | # self.__lock.acquire() 167 | lock = LockGuard(self.__lock) 168 | tarsLogger.info( 169 | "checkActive, %s, forceConnect: %s", self.__trans.getEndPointInfo(), forceConnect 170 | ) 171 | 172 | if not self.__trans.isConnecting() and not self.__trans.hasConnected(): 173 | self.doReconnect() 174 | 175 | # self.__lock.release() 176 | return self.__trans.isConnecting() or self.__trans.hasConnected() 177 | 178 | def doReconnect(self): 179 | """ 180 | @brief: 重新发起连接 181 | @return: None 182 | @rtype: None 183 | """ 184 | tarsLogger.debug("AdapterProxy:doReconnect") 185 | assert self.__trans 186 | 187 | self.__trans.reInit() 188 | tarsLogger.info( 189 | "doReconnect, connect: %s, fd:%d", self.__trans.getEndPointInfo(), self.__trans.getFd() 190 | ) 191 | 192 | self.__reactor.registerAdapter(self, select.EPOLLIN | select.EPOLLOUT) 193 | 194 | def sendRequest(self): 195 | """ 196 | @brief: 把队列中的请求放到Transceiver的发送缓存里 197 | @return: 放入缓存的数据长度 198 | @rtype: int 199 | """ 200 | tarsLogger.debug("AdapterProxy:sendRequest") 201 | if not self.__trans.hasConnected(): 202 | return False 203 | 204 | reqmsg = self.__object.popRequest() 205 | blen = 0 206 | while reqmsg: 207 | reqmsg.adapter = self 208 | buf = reqmsg.packReq() 209 | self.__trans.writeToSendBuf(buf) 210 | tarsLogger.info("sendRequest, id: %d, len: %d", reqmsg.request.iRequestId, len(buf)) 211 | blen += len(buf) 212 | # 合并一次发送的包 最大合并至8k 提高异步时客户端效率? 213 | if self.__trans.getEndPointInfo().getConnType() == EndPointInfo.SOCK_UDP or blen > 8192: 214 | break 215 | reqmsg = self.__object.popRequest() 216 | 217 | return blen 218 | 219 | def finishConnect(self): 220 | """ 221 | @brief: 使用的非阻塞socket连接不能立刻判断是否连接成功, 222 | 在epoll响应后调用此函数处理connect结束后的操作 223 | @return: 是否连接成功 224 | @rtype: bool 225 | """ 226 | tarsLogger.debug("AdapterProxy:finishConnect") 227 | success = True 228 | errmsg = "" 229 | try: 230 | ret = self.__trans.getSock().getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) 231 | if ret: 232 | success = False 233 | errmsg = os.strerror(ret) 234 | except Exception as msg: 235 | errmsg = msg 236 | success = False 237 | 238 | if not success: 239 | self.__reactor.unregisterAdapter(self, socket.EPOLLIN | socket.EPOLLOUT) 240 | self.__trans.close() 241 | self.__trans.setConnFailed() 242 | tarsLogger.error( 243 | "AdapterProxy finishConnect, exception: %s, error: %s", 244 | self.__trans.getEndPointInfo(), 245 | errmsg, 246 | ) 247 | return False 248 | self.__trans.setConnected() 249 | self.__reactor.notify(self) 250 | tarsLogger.info( 251 | "AdapterProxy finishConnect, connect %s success", self.__trans.getEndPointInfo() 252 | ) 253 | return True 254 | 255 | def finishInvoke(self, isTimeout): 256 | pass 257 | 258 | # 弹出请求报文 259 | def popRequest(self): 260 | pass 261 | 262 | def shouldCloseTrans(self): 263 | """ 264 | @brief: 是否设置关闭连接 265 | @return: 关闭连接的flag的值 266 | @rtype: bool 267 | """ 268 | return self.__closeTrans 269 | 270 | def setCloseTrans(self, closeTrans): 271 | """ 272 | @brief: 设置关闭连接flag的值 273 | @param closeTrans: 是否关闭连接 274 | @type closeTrans: bool 275 | @return: None 276 | @rtype: None 277 | """ 278 | self.__closeTrans = closeTrans 279 | 280 | 281 | class QueryRegisterCallback(QueryFPrxCallback): 282 | def __init__(self, adpManager): 283 | self.__adpManager = adpManager 284 | super(QueryRegisterCallback, self).__init__() 285 | # QueryFPrxCallback.__init__(self) 286 | 287 | def callback_findObjectById4All(self, ret, activeEp, inactiveEp): 288 | eplist = [ 289 | EndPointInfo(x.host, x.port, x.timeout, x.weight, x.weightType) 290 | for x in activeEp 291 | if ret == 0 and x.istcp 292 | ] 293 | ieplist = [ 294 | EndPointInfo(x.host, x.port, x.timeout, x.weight, x.weightType) 295 | for x in inactiveEp 296 | if ret == 0 and x.istcp 297 | ] 298 | self.__adpManager.setEndpoints(eplist, ieplist) 299 | 300 | def callback_findObjectById4All_exception(self, ret): 301 | tarsLogger.error("callback_findObjectById4All_exception ret: %d", ret) 302 | 303 | 304 | class EndpointWeightType(Enum): 305 | E_LOOP = 0 306 | E_STATIC_WEIGHT = 1 307 | 308 | 309 | class AdapterProxyManager: 310 | """ 311 | @brief: 管理Adapter 312 | """ 313 | 314 | def __init__(self): 315 | tarsLogger.debug("AdapterProxyManager:__init__") 316 | self.__comm = None 317 | self.__object = None 318 | # __adps的key=str(EndPointInfo) value=[EndPointInfo, AdapterProxy, cnt] 319 | # cnt是访问次数 320 | self.__adps = {} 321 | self.__iadps = {} 322 | self.__newLock = None 323 | self.__isDirectProxy = True 324 | self.__lastFreshTime = 0 325 | self.__queryRegisterCallback = QueryRegisterCallback(self) 326 | self.__regAdapterProxyDict = {} 327 | self.__lastConHashPrxList = [] 328 | self.__consistentHashWeight = None 329 | self.__weightType = EndpointWeightType.E_LOOP 330 | self.__update = True 331 | self.__lastWeightedProxyData = {} 332 | 333 | def initialize(self, comm, objectProxy, eplist): 334 | """ 335 | @brief: 初始化 336 | """ 337 | tarsLogger.debug("AdapterProxyManager:initialize") 338 | self.__comm = comm 339 | self.__object = objectProxy 340 | self.__newLock = NewLock() 341 | 342 | self.__isDirectProxy = len(eplist) > 0 343 | if self.__isDirectProxy: 344 | self.setEndpoints(eplist, {}) 345 | else: 346 | self.refreshEndpoints() 347 | 348 | def terminate(self): 349 | """ 350 | @brief: 释放资源 351 | """ 352 | tarsLogger.debug("AdapterProxyManager:terminate") 353 | # self.__lock.acquire() 354 | lock = LockGuard(self.__newLock) 355 | for ep, epinfo in self.__adps.items(): 356 | epinfo[1].terminate() 357 | self.__adps = {} 358 | self.__lock.release() 359 | 360 | def refreshEndpoints(self): 361 | """ 362 | @brief: 刷新服务器列表 363 | @return: 新的服务列表 364 | @rtype: EndPointInfo列表 365 | """ 366 | tarsLogger.debug("AdapterProxyManager:refreshEndpoints") 367 | if self.__isDirectProxy: 368 | return 369 | 370 | interval = self.__comm.getProperty("refresh-endpoint-interval", float) / 1000 371 | locator = self.__comm.getProperty("locator") 372 | 373 | if "@" not in locator: 374 | raise exception.TarsRegistryException("locator is not valid: " + locator) 375 | 376 | now = time.time() 377 | last = self.__lastFreshTime 378 | epSize = len(self.__adps) 379 | if last + interval < now or (epSize <= 0 and last + 2 < now): 380 | queryFPrx = self.__comm.stringToProxy(QueryFProxy, locator) 381 | # 首次访问是同步调用,之后访问是异步调用 382 | if epSize == 0 or last == 0: 383 | ret, activeEps, inactiveEps = queryFPrx.findObjectById4All(self.__object.name()) 384 | # 目前只支持TCP 385 | eplist = [ 386 | EndPointInfo(x.host, x.port, x.timeout, x.weight, x.weightType) 387 | for x in activeEps 388 | if ret == 0 and x.istcp 389 | ] 390 | ieplist = [ 391 | EndPointInfo(x.host, x.port, x.timeout, x.weight, x.weightType) 392 | for x in inactiveEps 393 | if ret == 0 and x.istcp 394 | ] 395 | self.setEndpoints(eplist, ieplist) 396 | else: 397 | queryFPrx.async_findObjectById4All( 398 | self.__queryRegisterCallback, self.__object.name() 399 | ) 400 | self.__lastFreshTime = now 401 | 402 | def getEndpoints(self): 403 | """ 404 | @brief: 获取可用服务列表 如果启用分组,只返回同分组的服务端ip 405 | @return: 获取节点列表 406 | @rtype: EndPointInfo列表 407 | """ 408 | tarsLogger.debug("AdapterProxyManager:getEndpoints") 409 | # self.__lock.acquire() 410 | lock = LockGuard(self.__newLock) 411 | ret = [x[1][0] for x in list(self.__adps.items())] 412 | # self.__lock.release() 413 | 414 | return ret 415 | 416 | def setEndpoints(self, eplist, ieplist): 417 | """ 418 | @brief: 设置服务端信息 419 | @para eplist: 活跃的被调节点列表 420 | @para ieplist: 不活跃的被调节点列表 421 | """ 422 | tarsLogger.debug("AdapterProxyManager:setEndpoints") 423 | adps = {} 424 | iadps = {} 425 | comm = self.__comm 426 | isNeedNotify = False 427 | # self.__lock.acquire() 428 | lock = LockGuard(self.__newLock) 429 | isStartStatic = True 430 | 431 | for ep in eplist: 432 | if ep.getWeightType() == 0: 433 | isStartStatic = False 434 | epstr = str(ep) 435 | if epstr in self.__adps: 436 | adps[epstr] = self.__adps[epstr] 437 | continue 438 | isNeedNotify = True 439 | self.__update = True 440 | adapter = AdapterProxy() 441 | adapter.initialize(ep, self.__object, comm.getReactor(), comm.getAsyncProc()) 442 | adapter.activatestateinreg = True 443 | adps[epstr] = [ep, adapter, 0] 444 | self.__adps, adps = adps, self.__adps 445 | 446 | for iep in ieplist: 447 | iepstr = str(iep) 448 | if iepstr in self.__iadps: 449 | iadps[iepstr] = self.__iadps[iepstr] 450 | continue 451 | isNeedNotify = True 452 | adapter = AdapterProxy() 453 | adapter.initialize(iep, self.__object, comm.getReactor(), comm.getAsyncProc()) 454 | adapter.activatestateinreg = False 455 | iadps[iepstr] = [iep, adapter, 0] 456 | self.__iadps, iadps = iadps, self.__iadps 457 | 458 | if isStartStatic: 459 | self.__weightType = EndpointWeightType.E_STATIC_WEIGHT 460 | else: 461 | self.__weightType = EndpointWeightType.E_LOOP 462 | 463 | # self.__lock.release() 464 | if isNeedNotify: 465 | self.__notifyEndpoints(self.__adps, self.__iadps) 466 | # 关闭已经失效的连接 467 | for ep in adps: 468 | if ep not in self.__adps: 469 | adps[ep][1].terminate() 470 | 471 | def __notifyEndpoints(self, actives, inactives): 472 | # self.__lock.acquire() 473 | lock = LockGuard(self.__newLock) 474 | self.__regAdapterProxyDict.clear() 475 | self.__regAdapterProxyDict.update(actives) 476 | self.__regAdapterProxyDict.update(inactives) 477 | # self.__lock.release() 478 | 479 | def __getNextValidProxy(self): 480 | """ 481 | @brief: 刷新本地缓存列表,如果服务下线了,要求删除本地缓存 482 | @return: 483 | @rtype: EndPointInfo列表 484 | @todo: 优化负载均衡算法 485 | """ 486 | tarsLogger.debug("AdapterProxyManager:getNextValidProxy") 487 | lock = LockGuard(self.__newLock) 488 | if len(self.__adps) == 0: 489 | raise TarsException("the activate adapter proxy is empty") 490 | 491 | sortedActivateAdp = sorted(list(self.__adps.items()), key=lambda item: item[1][2]) 492 | # self.refreshEndpoints() 493 | # self.__lock.acquire() 494 | sortedActivateAdpSize = len(sortedActivateAdp) 495 | 496 | while sortedActivateAdpSize != 0: 497 | if sortedActivateAdp[0][1][1].checkActive(): 498 | self.__adps[sortedActivateAdp[0][0]][2] += 1 499 | # 返回的是 adapterProxy 500 | return self.__adps[sortedActivateAdp[0][0]][1] 501 | sortedActivateAdp.pop(0) 502 | sortedActivateAdpSize -= 1 503 | # 随机重连一个可用节点 504 | adpPrx = list(self.__adps.items())[random.randint(0, len(self.__adps))][1][1] 505 | adpPrx.checkActive() 506 | return None 507 | # self.__lock.release() 508 | 509 | def __getHashProxy(self, reqmsg): 510 | if self.__weightType == EndpointWeightType.E_LOOP: 511 | if reqmsg.isConHash: 512 | return self.__getConHashProxyForNormal(reqmsg.hashCode) 513 | else: 514 | return self.__getHashProxyForNormal(reqmsg.hashCode) 515 | else: 516 | if reqmsg.isConHash: 517 | return self.__getConHashProxyForWeight(reqmsg.hashCode) 518 | else: 519 | return self.__getHashProxyForWeight(reqmsg.hashCode) 520 | 521 | def __getHashProxyForNormal(self, hashCode): 522 | tarsLogger.debug("AdapterProxyManager:getHashProxyForNormal") 523 | # self.__lock.acquire() 524 | lock = LockGuard(self.__newLock) 525 | regAdapterProxyList = sorted( 526 | list(self.__regAdapterProxyDict.items()), key=lambda item: item[0] 527 | ) 528 | 529 | allPrxSize = len(regAdapterProxyList) 530 | if allPrxSize == 0: 531 | raise TarsException("the adapter proxy is empty") 532 | hashNum = hashCode % allPrxSize 533 | 534 | if ( 535 | regAdapterProxyList[hashNum][1][1].activatestateinreg 536 | and regAdapterProxyList[hashNum][1][1].checkActive() 537 | ): 538 | epstr = regAdapterProxyList[hashNum][0] 539 | self.__regAdapterProxyDict[epstr][2] += 1 540 | if epstr in self.__adps: 541 | self.__adps[epstr][2] += 1 542 | elif epstr in self.__iadps: 543 | self.__iadps[epstr][2] += 1 544 | return self.__regAdapterProxyDict[epstr][1] 545 | else: 546 | if len(self.__adps) == 0: 547 | raise TarsException("the activate adapter proxy is empty") 548 | activeProxyList = list(self.__adps.items()) 549 | actPrxSize = len(activeProxyList) 550 | while actPrxSize != 0: 551 | hashNum = hashCode % actPrxSize 552 | if activeProxyList[hashNum][1][1].checkActive(): 553 | self.__adps[activeProxyList[hashNum][0]][2] += 1 554 | return self.__adps[activeProxyList[hashNum][0]][1] 555 | activeProxyList.pop(hashNum) 556 | actPrxSize -= 1 557 | # 随机重连一个可用节点 558 | adpPrx = list(self.__adps.items())[random.randint(0, len(self.__adps))][1][1] 559 | adpPrx.checkActive() 560 | return None 561 | 562 | def __getConHashProxyForNormal(self, hashCode): 563 | tarsLogger.debug("AdapterProxyManager:getConHashProxyForNormal") 564 | lock = LockGuard(self.__newLock) 565 | if len(self.__regAdapterProxyDict) == 0: 566 | raise TarsException("the adapter proxy is empty") 567 | if self.__consistentHashWeight is None or self.__checkConHashChange( 568 | self.__lastConHashPrxList 569 | ): 570 | self.__updateConHashProxyWeighted() 571 | 572 | if len(self.__consistentHashWeight.nodes) > 0: 573 | conHashIndex = self.__consistentHashWeight.getNode(hashCode) 574 | if ( 575 | conHashIndex in self.__regAdapterProxyDict 576 | and self.__regAdapterProxyDict[conHashIndex][1].activatestateinreg 577 | and self.__regAdapterProxyDict[conHashIndex][1].checkActive() 578 | ): 579 | self.__regAdapterProxyDict[conHashIndex][2] += 1 580 | if conHashIndex in self.__adps: 581 | self.__adps[conHashIndex][2] += 1 582 | elif conHashIndex in self.__iadps: 583 | self.__iadps[conHashIndex][2] += 1 584 | return self.__regAdapterProxyDict[conHashIndex][1] 585 | else: 586 | if len(self.__adps) == 0: 587 | raise TarsException("the activate adapter proxy is empty") 588 | activeProxyList = list(self.__adps.items()) 589 | actPrxSize = len(activeProxyList) 590 | while actPrxSize != 0: 591 | hashNum = hashCode % actPrxSize 592 | if activeProxyList[hashNum][1][1].checkActive(): 593 | self.__adps[activeProxyList[hashNum][0]][2] += 1 594 | return self.__adps[activeProxyList[hashNum][0]][1] 595 | activeProxyList.pop(hashNum) 596 | actPrxSize -= 1 597 | # 随机重连一个可用节点 598 | adpPrx = list(self.__adps.items())[random.randint(0, len(self.__adps))][1][1] 599 | adpPrx.checkActive() 600 | return None 601 | pass 602 | else: 603 | return self.__getHashProxyForNormal(hashCode) 604 | 605 | def __getHashProxyForWeight(self, hashCode): 606 | return None 607 | pass 608 | 609 | def __getConHashProxyForWeight(self, hashCode): 610 | return None 611 | pass 612 | 613 | def __checkConHashChange(self, lastConHashPrxList): 614 | tarsLogger.debug("AdapterProxyManager:checkConHashChange") 615 | lock = LockGuard(self.__newLock) 616 | if len(lastConHashPrxList) != len(self.__regAdapterProxyDict): 617 | return True 618 | regAdapterProxyList = sorted( 619 | list(self.__regAdapterProxyDict.items()), key=lambda item: item[0] 620 | ) 621 | regAdapterProxyListSize = len(regAdapterProxyList) 622 | for index in range(regAdapterProxyListSize): 623 | if cmp(lastConHashPrxList[index][0], regAdapterProxyList[index][0]) != 0: 624 | return True 625 | return False 626 | 627 | def __updateConHashProxyWeighted(self): 628 | tarsLogger.debug("AdapterProxyManager:updateConHashProxyWeighted") 629 | lock = LockGuard(self.__newLock) 630 | if len(self.__regAdapterProxyDict) == 0: 631 | raise TarsException("the adapter proxy is empty") 632 | self.__lastConHashPrxList = sorted( 633 | list(self.__regAdapterProxyDict.items()), key=lambda item: item[0] 634 | ) 635 | nodes = [] 636 | for var in self.__lastConHashPrxList: 637 | nodes.append(var[0]) 638 | if self.__consistentHashWeight is None: 639 | self.__consistentHashWeight = ConsistentHashNew(nodes) 640 | else: 641 | theOldActiveNodes = [var for var in nodes if var in self.__consistentHashWeight.nodes] 642 | 643 | theOldInactiveNodes = [ 644 | var for var in self.__consistentHashWeight.nodes if var not in theOldActiveNodes 645 | ] 646 | for var in theOldInactiveNodes: 647 | self.__consistentHashWeight.removeNode(var) 648 | 649 | theNewActiveNodes = [var for var in nodes if var not in theOldActiveNodes] 650 | for var in theNewActiveNodes: 651 | self.__consistentHashWeight.addNode(var) 652 | 653 | self.__consistentHashWeight.nodes = nodes 654 | pass 655 | 656 | def __getWeightedProxy(self): 657 | tarsLogger.debug("AdapterProxyManager:getWeightedProxy") 658 | lock = LockGuard(self.__newLock) 659 | if len(self.__adps) == 0: 660 | raise TarsException("the activate adapter proxy is empty") 661 | 662 | if self.__update is True: 663 | self.__lastWeightedProxyData.clear() 664 | weightedProxyData = {} 665 | minWeight = (list(self.__adps.items())[0][1][0]).getWeight() 666 | for item in list(self.__adps.items()): 667 | weight = item[1][0].getWeight() 668 | weightedProxyData[item[0]] = weight 669 | if minWeight > weight: 670 | minWeight = weight 671 | 672 | if minWeight <= 0: 673 | addWeight = -minWeight + 1 674 | for item in list(weightedProxyData.items()): 675 | item[1] += addWeight 676 | 677 | self.__update = False 678 | self.__lastWeightedProxyData = weightedProxyData 679 | 680 | weightedProxyData = self.__lastWeightedProxyData 681 | while len(weightedProxyData) > 0: 682 | total = sum(weightedProxyData.values()) 683 | rand = random.randint(1, total) 684 | temp = 0 685 | for item in list(weightedProxyData.items()): 686 | temp += item[1] 687 | if rand <= temp: 688 | if self.__adps[item[0]][1].checkActive(): 689 | self.__adps[item[0]][2] += 1 690 | return self.__adps[item[0]][1] 691 | else: 692 | weightedProxyData.pop(item[0]) 693 | break 694 | # 没有一个活跃的节点 695 | # 随机重连一个可用节点 696 | adpPrx = list(self.__adps.items())[random.randint(0, len(self.__adps))][1][1] 697 | adpPrx.checkActive() 698 | return None 699 | 700 | def selectAdapterProxy(self, reqmsg): 701 | """ 702 | @brief: 刷新本地缓存列表,如果服务下线了,要求删除本地缓存,通过一定算法返回AdapterProxy 703 | @param: reqmsg:请求响应报文 704 | @type reqmsg: ReqMessage 705 | @return: 706 | @rtype: EndPointInfo列表 707 | @todo: 优化负载均衡算法 708 | """ 709 | tarsLogger.debug("AdapterProxyManager:selectAdapterProxy") 710 | self.refreshEndpoints() 711 | if reqmsg.isHash: 712 | return self.__getHashProxy(reqmsg) 713 | else: 714 | if self.__weightType == EndpointWeightType.E_STATIC_WEIGHT: 715 | return self.__getWeightedProxy() 716 | else: 717 | return self.__getNextValidProxy() 718 | --------------------------------------------------------------------------------