├── .github └── workflows │ └── docker-release.yml ├── .gitignore ├── Dockerfile ├── README.md ├── example ├── tcpclient.py ├── tcpserver.py ├── udpclient.py └── udpserver.py ├── setup.py └── sevent ├── __init__.py ├── buffer.py ├── cbuffer.c ├── coroutines ├── __init__.py ├── chain.py ├── dns.py ├── event.py ├── future.py ├── loop.py ├── pipe.py ├── tcp.py └── udp.py ├── dns.py ├── errors.py ├── event.py ├── helpers ├── __init__.py ├── __main__.py ├── arproxy.py ├── proxy2proxy.py ├── redirect2proxy.py ├── simple_proxy.py ├── tcp2proxy.py ├── tcp_forward.py ├── tcp_reverse.py └── utils.py ├── impl ├── __init__.py ├── epoll_loop.py ├── kqueue_loop.py └── select_loop.py ├── loop.py ├── pipe.py ├── sslsocket ├── __init__.py └── tcp.py ├── tcp.py ├── udp.py ├── utils.py ├── waker.py └── win32util.py /.github/workflows/docker-release.yml: -------------------------------------------------------------------------------- 1 | name: Docker Release 2 | 3 | on: 4 | release: 5 | types: [ published, edited ] 6 | 7 | workflow_dispatch: 8 | inputs: 9 | no_cache: 10 | type: boolean 11 | description: 'Build from scratch, without using cached layers' 12 | 13 | env: 14 | IMAGE_NAME: sevent 15 | DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USERNAME }}/sevent 16 | 17 | jobs: 18 | build: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout repository 22 | uses: actions/checkout@v3 23 | 24 | - name: Log in to Docker hub 25 | uses: docker/login-action@v2 26 | with: 27 | username: ${{ secrets.DOCKER_USERNAME }} 28 | password: ${{ secrets.DOCKER_PASSWORD }} 29 | 30 | - name: Set up Docker Buildx 31 | uses: docker/setup-buildx-action@v2 32 | 33 | # slashes are not allowed in image tags, but can appear in git branch or tag names 34 | - id: sanitize_tag 35 | name: Sanitize image tag 36 | run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT 37 | env: 38 | raw_tag: ${{ github.ref_name }} 39 | 40 | - id: build 41 | name: Build image 42 | uses: docker/build-push-action@v3 43 | with: 44 | build-args: BUILD_TYPE=release 45 | load: true # save to docker images 46 | # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555 47 | tags: > 48 | ${{ env.IMAGE_NAME }}, 49 | ${{ env.DEPLOY_IMAGE_NAME }}:latest, 50 | ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }} 51 | 52 | # cache layers in GitHub Actions cache to speed up builds 53 | cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release 54 | cache-to: type=gha,scope=docker-release,mode=max 55 | 56 | - name: Push image to Docker Hub 57 | run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .idea -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | 3 | MAINTAINER snower sujian199@gmail.com 4 | 5 | WORKDIR /root 6 | 7 | RUN apt-get update && apt-get install -y ca-certificates git gcc python3-dev 8 | 9 | RUN python -m pip install --upgrade pip && \ 10 | pip install git+https://github.com/snower/sevent.git#egg=sevent 11 | 12 | CMD ["/usr/local/bin/python3", "-m", "sevent.helpers", "-h"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sevent 2 | 3 | The highest performance event loop. 4 | 5 | # Examples 6 | 7 | ### Simple Http Client 8 | 9 | ```python 10 | import sevent 11 | 12 | def on_data(s, data): 13 | print(data.decode("utf-8")) 14 | 15 | s = sevent.tcp.Socket() 16 | s.on_data(on_data) 17 | s.on_close(lambda s: sevent.current().stop()) 18 | s.connect(('www.google.com', 80)) 19 | s.write(b'GET / HTTP/1.1\r\nHost: www.google.com\r\nConnection: Close\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\n\r\n') 20 | 21 | sevent.instance().start() 22 | ``` 23 | 24 | ```python 25 | import sevent 26 | 27 | async def http_test(): 28 | s = sevent.tcp.Socket() 29 | await s.connectof(('www.google.com', 80)) 30 | await s.send(b'GET / HTTP/1.1\r\nHost: www.google.com\r\nConnection: Close\r\nUser-Agent: curl/7.58.0\r\nAccept: */*\r\n\r\n') 31 | 32 | data = b'' 33 | while True: 34 | try: 35 | data += (await s.recv()).read() 36 | except sevent.tcp.SocketClosed: 37 | break 38 | print(data.decode("utf-8")) 39 | await s.closeof() 40 | 41 | sevent.run(http_test) 42 | ``` 43 | 44 | ### Simple TCP Port Forward 45 | 46 | ```python 47 | import sys 48 | import sevent 49 | 50 | def on_connection(server, conn): 51 | pconn = sevent.tcp.Socket() 52 | pconn.connect((sys.argv[2], int(sys.argv[3]))) 53 | conn.link(pconn) 54 | 55 | server = sevent.tcp.Server() 56 | server.on_connection(on_connection) 57 | server.listen(("0.0.0.0", int(sys.argv[1]))) 58 | sevent.instance().start() 59 | ``` 60 | 61 | ```python 62 | import sys 63 | import sevent 64 | 65 | async def tcp_port_forward_server(): 66 | server = sevent.tcp.Server() 67 | server.listen(("0.0.0.0", int(sys.argv[1]))) 68 | 69 | while True: 70 | conn = await server.accept() 71 | pconn = sevent.tcp.Socket() 72 | pconn.connect((sys.argv[2], int(sys.argv[3]))) 73 | conn.link(pconn) 74 | 75 | sevent.run(tcp_port_forward_server) 76 | ``` 77 | 78 | # License 79 | 80 | sevent uses the MIT license, see LICENSE file for the details. -------------------------------------------------------------------------------- /example/tcpclient.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import sevent 3 | import logging 4 | 5 | logging.basicConfig(level=logging.DEBUG) 6 | 7 | loop = sevent.instance() 8 | 9 | 10 | def on_connect(s): 11 | print('on_connect') 12 | s.write(b'GET / HTTP/1.0\r\nHost: www.baidu.com\r\nConnection: Close\r\n\r\n') 13 | 14 | 15 | def on_data(s, data): 16 | print('on_data') 17 | sys.stdout.write(data) 18 | 19 | 20 | def on_end(s): 21 | print('on_end') 22 | 23 | 24 | def on_close(s): 25 | print('on_close') 26 | loop.stop() 27 | 28 | 29 | def on_error(s, e): 30 | print('on_error') 31 | print(e) 32 | 33 | 34 | s = sevent.tcp.Socket() 35 | s.on('connect', on_connect) 36 | s.on('data', on_data) 37 | s.on('end', on_end) 38 | s.on('close', on_close) 39 | s.on('error', on_error) 40 | s.connect(('www.baidu.com', 80)) 41 | 42 | loop.start() 43 | -------------------------------------------------------------------------------- /example/tcpserver.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import sevent 3 | 4 | loop = sevent.instance() 5 | 6 | 7 | def on_connection(s, conn): 8 | print('on_connection') 9 | conn.on('data', on_data) 10 | conn.on('end', on_end) 11 | conn.on('close', on_close) 12 | conn.on('error', on_error) 13 | 14 | 15 | def on_data(s, data): 16 | print('on_data') 17 | sys.stdout.write(data) 18 | s.write(b'HTTP/1.0 200 OK\r\nHost: 127.0.0.1\r\nConnection: Close\r\n\r\nHello world!\r\n') 19 | s.end() 20 | 21 | 22 | def on_end(s): 23 | print('on_end') 24 | 25 | 26 | def on_close(s): 27 | print('on_close') 28 | 29 | 30 | def on_error(s, e): 31 | print('on_error') 32 | print(e) 33 | 34 | 35 | s = sevent.tcp.Server() 36 | s.on('connection', on_connection) 37 | s.on('error', on_error) 38 | s.listen(('0.0.0.0', 8080)) 39 | 40 | loop.start() 41 | -------------------------------------------------------------------------------- /example/udpclient.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 15/1/4 3 | # create by: snower 4 | 5 | import sevent 6 | 7 | loop = sevent.instance() 8 | 9 | def on_data(s, address, data): 10 | print(address, data) 11 | 12 | def start(): 13 | socket = sevent.udp.Socket() 14 | socket.write((b"hello world!", ("127.0.0.1", 20000))) 15 | socket.on("data", on_data) 16 | 17 | loop.add_async(start) 18 | loop.start() -------------------------------------------------------------------------------- /example/udpserver.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 15/1/4 3 | # create by: snower 4 | 5 | import sevent 6 | 7 | loop = sevent.instance() 8 | 9 | def on_data(s, address, data): 10 | print(address, data) 11 | s.write((b"hello world!", address)) 12 | 13 | server = sevent.udp.Server() 14 | server.bind(("0.0.0.0", 20000)) 15 | server.on("data", on_data) 16 | loop.start() -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | import platform 6 | from setuptools import setup, Extension 7 | 8 | if platform.python_implementation() == "CPython": 9 | if platform.system() != 'Windows': 10 | ext_modules = [Extension('sevent.cbuffer', sources=['sevent/cbuffer.c'])] 11 | else: 12 | if sys.version_info[0] >= 3: 13 | ext_modules = [Extension('sevent.cbuffer', sources=['sevent/cbuffer.c'], libraries=["ws2_32"])] 14 | else: 15 | ext_modules = [] 16 | else: 17 | ext_modules = [] 18 | 19 | if os.path.exists("README.md"): 20 | if sys.version_info[0] >= 3: 21 | with open("README.md", encoding="utf-8") as fp: 22 | long_description = fp.read() 23 | else: 24 | with open("README.md") as fp: 25 | long_description = fp.read() 26 | else: 27 | long_description = '' 28 | 29 | setup( 30 | name='sevent', 31 | version='0.4.30', 32 | packages=['sevent', 'sevent.impl', 'sevent.coroutines', 'sevent.sslsocket', 'sevent.helpers'], 33 | ext_modules=ext_modules, 34 | package_data={ 35 | '': ['README.md'], 36 | }, 37 | install_requires=[ 38 | 'dnslib>=0.9.7', 39 | 'greenlet>=0.4.2', 40 | ], 41 | author='snower', 42 | author_email='sujian199@gmail.com', 43 | url='https://github.com/snower/sevent', 44 | license='MIT', 45 | description='lightweight event loop', 46 | long_description=long_description, 47 | long_description_content_type="text/markdown", 48 | ) 49 | -------------------------------------------------------------------------------- /sevent/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | version = '0.4.30' 4 | version_info = (0, 4, 30) 5 | 6 | from .utils import is_py3, set_logger 7 | from .loop import instance, current 8 | from .event import EventEmitter 9 | from . import tcp 10 | from . import udp 11 | from . import pipe 12 | from .buffer import Buffer 13 | from .dns import DNSResolver 14 | from . import sslsocket as ssl 15 | from . import errors 16 | 17 | if is_py3: 18 | from .coroutines.future import Future 19 | from .coroutines.chain import Chain 20 | from . import loop 21 | 22 | 23 | def run(callback, *args, **kwargs): 24 | return loop.instance().run(callback, *args, **kwargs) 25 | 26 | 27 | def go(callback, *args, **kwargs): 28 | if not loop._mul_ioloop: 29 | return loop._ioloop.go(callback, *args, **kwargs) 30 | return loop._thread_local._sevent_ioloop.go(callback, *args, **kwargs) 31 | 32 | 33 | def sleep(seconds): 34 | if not loop._mul_ioloop: 35 | return loop._ioloop.sleep(seconds) 36 | return loop._thread_local._sevent_ioloop.sleep(seconds) 37 | -------------------------------------------------------------------------------- /sevent/buffer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 15/1/27 3 | # create by: snower 4 | 5 | import os 6 | import time 7 | from collections import deque 8 | from .event import EventEmitter 9 | from .loop import current 10 | from .utils import get_logger 11 | 12 | try: 13 | RECV_BUFFER_SIZE = int(os.environ.get("SEVENT_RECV_BUFFER_SIZE", 0)) 14 | except: 15 | RECV_BUFFER_SIZE = 0 16 | 17 | try: 18 | MAX_BUFFER_SIZE = int(os.environ.get("SEVENT_MAX_BUFFER_SIZE", 4 * 1024 * 1024)) 19 | except: 20 | MAX_BUFFER_SIZE = 4 * 1024 * 1024 21 | 22 | try: 23 | BUFFER_DRAIN_RATE = min(max(float(os.environ.get("SEVENT_BUFFER_DRAIN_RATE", 0.5)), 0.1), 0.9) 24 | except: 25 | BUFFER_DRAIN_RATE = 0.5 26 | 27 | try: 28 | RECV_COUNT = int(os.environ.get("SEVENT_RECV_COUNT", 0)) 29 | except: 30 | RECV_COUNT = 0 31 | 32 | try: 33 | SEND_COUNT = int(os.environ.get("SEVENT_SEND_COUNT", 0)) 34 | except: 35 | SEND_COUNT = 0 36 | 37 | try: 38 | if not os.environ.get("SEVENT_NOUSE_CBUFFER", False): 39 | from . import cbuffer 40 | BaseBuffer = cbuffer.Buffer 41 | if RECV_BUFFER_SIZE: 42 | try: 43 | cbuffer.socket_set_recv_size(RECV_BUFFER_SIZE) 44 | except: pass 45 | if RECV_COUNT: 46 | try: 47 | cbuffer.socket_set_recv_count(RECV_COUNT) 48 | except: pass 49 | if SEND_COUNT: 50 | try: 51 | cbuffer.socket_set_send_count(SEND_COUNT) 52 | except: pass 53 | else: 54 | cbuffer = None 55 | except ImportError: 56 | get_logger().warning("cbuffer is not supported") 57 | cbuffer = None 58 | 59 | RECV_BUFFER_SIZE = RECV_BUFFER_SIZE or 8 * 1024 - 64 60 | 61 | if cbuffer is None: 62 | class BaseBuffer(object): 63 | def __init__(self): 64 | self._buffer = b'' 65 | self._buffer_odata = None 66 | self._buffer_len = 0 67 | self._buffers = deque() 68 | self._buffers_odata = deque() 69 | self._len = 0 70 | self._buffer_index = 0 71 | 72 | def join(self): 73 | if self._buffer_index > 0 or self._buffer_len - self._buffer_index < self._len: 74 | if self._buffer_index < self._buffer_len: 75 | self._buffers.appendleft(self._buffer[self._buffer_index:]) 76 | if len(self._buffers) > 1: 77 | data = b"".join(self._buffers) 78 | self._buffers.clear() 79 | else: 80 | data = self._buffers.popleft() 81 | if self._buffers_odata: 82 | self._buffer_odata = self._buffers_odata[-1] 83 | self._buffers_odata.clear() 84 | self._buffer = data 85 | self._buffer_len = len(data) 86 | self._buffer_index = 0 87 | return self._buffer 88 | 89 | def write(self, data, odata = None): 90 | if self._buffer_len > 0: 91 | self._buffers.append(data) 92 | self._buffers_odata.append(odata) 93 | self._len += len(data) 94 | else: 95 | self._buffer = data 96 | self._buffer_odata = odata 97 | self._buffer_len = len(data) 98 | self._len += self._buffer_len 99 | return self 100 | 101 | def read(self, size=-1): 102 | if size < 0: 103 | if self._buffer_len - self._buffer_index < self._len: 104 | self.join() 105 | buffer = self._buffer 106 | elif self._buffer_index > 0: 107 | buffer = self._buffer[self._buffer_index:] 108 | else: 109 | buffer = self._buffer 110 | buffer_odata = self._buffer_odata 111 | self._buffer_index, self._buffer_len, self._buffer, self._buffer_odata, self._len = 0, 0, b'', None, 0 112 | return (buffer, buffer_odata) if buffer_odata else buffer 113 | 114 | if self._len < size: 115 | return b"" 116 | 117 | if self._buffer_len - self._buffer_index < size: 118 | self.join() 119 | data = self._buffer[:size] 120 | self._buffer_index = size 121 | else: 122 | data = self._buffer[self._buffer_index: self._buffer_index + size] 123 | self._buffer_index += size 124 | buffer_odata = self._buffer_odata 125 | self._len -= size 126 | 127 | if self._buffer_index >= self._buffer_len: 128 | if self._len > 0: 129 | self._buffer = self._buffers.popleft() 130 | self._buffer_odata = self._buffers_odata.popleft() 131 | self._buffer_index, self._buffer_len = 0, len(self._buffer) 132 | else: 133 | self._buffer_index, self._buffer_len, self._buffer, self._buffer_odata = 0, 0, b'', None 134 | return (data, buffer_odata) if buffer_odata else data 135 | 136 | def next(self): 137 | if self._buffer_index > 0: 138 | data = self._buffer[self._buffer_index:] 139 | self._len -= self._buffer_len - self._buffer_index 140 | else: 141 | data = self._buffer 142 | self._len -= self._buffer_len 143 | buffer_odata = self._buffer_odata 144 | 145 | if self._len > 0: 146 | self._buffer = self._buffers.popleft() 147 | self._buffer_odata = self._buffers_odata.popleft() 148 | self._buffer_index, self._buffer_len = 0, len(self._buffer) 149 | else: 150 | self._buffer_index, self._buffer_len, self._buffer, self._buffer_odata = 0, 0, b'', None 151 | return (data, buffer_odata) if buffer_odata else data 152 | 153 | def extend(self, o): 154 | if not isinstance(o, BaseBuffer): 155 | raise TypeError('not Buffer') 156 | 157 | while o: 158 | data = o.next() 159 | if isinstance(data, tuple): 160 | self.write(*data) 161 | else: 162 | self.write(data) 163 | 164 | def fetch(self, o, size=-1): 165 | if not isinstance(o, BaseBuffer): 166 | raise TypeError('not Buffer') 167 | 168 | data = o.read(size) 169 | self.write(data) 170 | return len(data) 171 | 172 | def copyfrom(self, o, size=-1): 173 | if not isinstance(o, BaseBuffer): 174 | raise TypeError('not Buffer') 175 | 176 | if size < 0: 177 | data = o.join() 178 | else: 179 | data = o.join()[:size] 180 | self.write(data) 181 | return len(data) 182 | 183 | def clear(self): 184 | self._buffer = b'' 185 | self._buffer_odata = None 186 | self._buffer_len = 0 187 | self._buffers = deque() 188 | self._buffers_odata = deque() 189 | self._len = 0 190 | self._buffer_index = 0 191 | 192 | def head(self): 193 | if not self._buffer_odata: 194 | return self._buffer 195 | return (self._buffer, self._buffer_odata) 196 | 197 | def head_data(self): 198 | return self._buffer_odata 199 | 200 | def last(self): 201 | if not self._buffers: 202 | return self.head() 203 | 204 | if not self._buffers_odata[-1]: 205 | return self._buffers[-1] 206 | return (self._buffers[-1], self._buffers_odata[-1]) 207 | 208 | def last_data(self): 209 | if not self._buffers_odata: 210 | return self.head_data() 211 | return self._buffers_odata[-1] 212 | 213 | def __len__(self): 214 | return self._len 215 | 216 | def __str__(self): 217 | buffer = self.join() 218 | return buffer.__str__() 219 | 220 | def __nonzero__(self): 221 | return self._len > 0 222 | 223 | def __getitem__(self, index): 224 | buffer = self.join() 225 | return (buffer.__getitem__(index), self._buffer_odata) if self._buffer_odata else buffer.__getitem__(index) 226 | 227 | def __hash__(self): 228 | buffer = self.join() 229 | return buffer.__hash__() 230 | 231 | 232 | class Buffer(EventEmitter, BaseBuffer): 233 | def __init__(self, max_buffer_size=None): 234 | EventEmitter.__init__(self) 235 | BaseBuffer.__init__(self) 236 | 237 | self._loop = current() 238 | self._full = False 239 | self._drain_size = int(max_buffer_size or MAX_BUFFER_SIZE) 240 | self._regain_size = int(self._drain_size * BUFFER_DRAIN_RATE) 241 | self._drain_time = time.time() 242 | self._regain_time = time.time() 243 | 244 | @property 245 | def full(self): 246 | return self._full 247 | 248 | def on_drain(self, callback): 249 | self.on("drain", callback) 250 | 251 | def on_regain(self, callback): 252 | self.on("regain", callback) 253 | 254 | def once_drain(self, callback): 255 | self.once("drain", callback) 256 | 257 | def once_regain(self, callback): 258 | self.once("regain", callback) 259 | 260 | def _do_drain(self): 261 | self._full = True 262 | self._drain_time = time.time() 263 | try: 264 | self.emit_drain(self) 265 | except Exception as e: 266 | get_logger().exception("buffer emit drain error:%s", e) 267 | do_drain = _do_drain 268 | 269 | def _do_regain(self): 270 | self._full = False 271 | self._regain_time = time.time() 272 | try: 273 | self.emit_regain(self) 274 | except Exception as e: 275 | get_logger().exception("buffer emit regain error:%s", e) 276 | do_regain = _do_regain 277 | 278 | def write(self, data, odata=None): 279 | if odata is None: 280 | BaseBuffer.write(self, data) 281 | else: 282 | BaseBuffer.write(self, data, odata) 283 | 284 | if self._len > self._drain_size and not self._full: 285 | self.do_drain() 286 | return self 287 | 288 | def extend(self, o): 289 | BaseBuffer.extend(self, o) 290 | 291 | if o._full and o._len < o._regain_size: 292 | o.do_regain() 293 | 294 | if self._len > self._drain_size and not self._full: 295 | self.do_drain() 296 | return o 297 | 298 | def fetch(self, o, size=-1): 299 | r = BaseBuffer.fetch(self, o, size) 300 | 301 | if o._full and o._len < o._regain_size: 302 | o.do_regain() 303 | 304 | if self._len > self._drain_size and not self._full: 305 | self.do_drain() 306 | return r 307 | 308 | def copyfrom(self, o, size=-1): 309 | r = BaseBuffer.copyfrom(self, o, size) 310 | 311 | if self._len > self._drain_size and not self._full: 312 | self.do_drain() 313 | return r 314 | 315 | def read(self, size=-1): 316 | data = BaseBuffer.read(self, size) 317 | 318 | if self._full and self._len < self._regain_size: 319 | self.do_regain() 320 | return data 321 | 322 | def next(self): 323 | data = BaseBuffer.next(self) 324 | 325 | if self._full and self._len < self._regain_size: 326 | self.do_regain() 327 | return data 328 | 329 | def clear(self): 330 | BaseBuffer.clear(self) 331 | 332 | if self._full and self._len < self._regain_size: 333 | self.do_regain() 334 | 335 | def link(self, o): 336 | self_do_drain, self_do_regain = self.do_drain, self.do_regain 337 | o_do_drain, o_do_regain = o.do_drain, o.do_regain 338 | 339 | def do_drain(): 340 | self_do_drain() 341 | o_do_drain() 342 | 343 | def do_regain(): 344 | if self._len >= self._regain_size or o._len >= o._regain_size: 345 | return 346 | self_do_regain() 347 | o_do_regain() 348 | 349 | self.do_drain, self.do_regain = do_drain, do_regain 350 | o.do_drain, o.do_regain = do_drain, do_regain 351 | if (self._full and not o._full) or (not self._full and o._full): 352 | do_drain() 353 | return self 354 | 355 | def close(self): 356 | self.do_drain = self._do_drain 357 | self.do_regain = self._do_regain 358 | self.remove_all_listeners() 359 | 360 | def decode(self, *args, **kwargs): 361 | data = self.join() 362 | return data.decode(*args, **kwargs) 363 | 364 | def items(self): 365 | return self._buffers 366 | 367 | def __getitem__(self, item): 368 | data = self.join() 369 | return data.__getitem__(item) 370 | 371 | def __iter__(self): 372 | data = self.join() 373 | return iter(data) 374 | 375 | def __contains__(self, item): 376 | data = self.join() 377 | 378 | return data.__contains__(item) 379 | 380 | def __add__(self, other): 381 | data = self.join() 382 | return data.__add__(other) 383 | 384 | def __bytes__(self): 385 | return self.join() 386 | 387 | def __cmp__(self, other): 388 | data = self.join() 389 | return data.__cmp__(other) 390 | 391 | def __eq__(self, other): 392 | data = self.join() 393 | return data == other 394 | 395 | def __gt__(self, other): 396 | data = self.join() 397 | return data > other 398 | 399 | def __lt__(self, other): 400 | data = self.join() 401 | return data < other 402 | 403 | def __ge__(self, other): 404 | data = self.join() 405 | return data >= other 406 | 407 | def __le__(self, other): 408 | data = self.join() 409 | return data <= other 410 | 411 | def __ne__(self, other): 412 | data = self.join() 413 | return data != other 414 | -------------------------------------------------------------------------------- /sevent/coroutines/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower -------------------------------------------------------------------------------- /sevent/coroutines/chain.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2021/2/1 3 | # create by: snower 4 | 5 | import greenlet 6 | from collections import deque 7 | from ..errors import ChainClosed 8 | from ..loop import current 9 | 10 | class Chain(object): 11 | def __init__(self, size=1): 12 | self._size = size 13 | self._queue = deque() 14 | self._queue_size = 0 15 | self._closed = False 16 | self._send_waiters = deque() 17 | self._recv_waiters = deque() 18 | 19 | async def send(self, value): 20 | if self._closed: 21 | raise ChainClosed() 22 | 23 | if self._recv_waiters: 24 | current().add_async(self._recv_waiters.popleft().switch, value) 25 | return 26 | 27 | self._queue.append(value) 28 | self._queue_size += 1 29 | if self._queue_size <= self._size: 30 | return 31 | 32 | child_gr = greenlet.getcurrent() 33 | main = child_gr.parent 34 | assert main is not None, "must be running in async func" 35 | self._send_waiters.append(child_gr) 36 | main.switch() 37 | 38 | async def recv(self): 39 | if self._closed: 40 | raise ChainClosed() 41 | 42 | if self._queue_size > 0: 43 | if self._send_waiters: 44 | current().add_async(self._send_waiters.popleft().switch) 45 | self._queue_size -= 1 46 | return self._queue.popleft() 47 | 48 | child_gr = greenlet.getcurrent() 49 | main = child_gr.parent 50 | assert main is not None, "must be running in async func" 51 | self._recv_waiters.append(child_gr) 52 | return main.switch() 53 | 54 | def close(self): 55 | if self._closed: 56 | return 57 | 58 | self._closed = True 59 | while self._send_waiters: 60 | current().add_async(self._send_waiters.popleft().throw, ChainClosed()) 61 | while self._recv_waiters: 62 | current().add_async(self._recv_waiters.popleft().throw, ChainClosed()) 63 | self._queue_size = 0 64 | self._queue.clear() 65 | 66 | async def closeof(self): 67 | self.close() 68 | 69 | def __del__(self): 70 | self.close() -------------------------------------------------------------------------------- /sevent/coroutines/dns.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower 4 | 5 | import greenlet 6 | 7 | 8 | def warp_coroutine(BaseDNSResolver): 9 | class DNSResolver(BaseDNSResolver): 10 | async def gethostbyname(self, hostname, timeout=None): 11 | child_gr = greenlet.getcurrent() 12 | main = child_gr.parent 13 | assert main is not None, "must be running in async func" 14 | self.resolve(hostname, lambda hostname, ip: child_gr.switch(ip), timeout) 15 | return main.switch() 16 | 17 | return DNSResolver 18 | -------------------------------------------------------------------------------- /sevent/coroutines/event.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower 4 | 5 | import greenlet 6 | from ..utils import get_logger 7 | 8 | 9 | def warp_coroutine(BaseEventEmitter): 10 | class EventEmitter(BaseEventEmitter): 11 | def on(self, event_name, callback): 12 | if callback.__code__.co_flags & 0x80 == 0: 13 | return BaseEventEmitter.on(self, event_name, callback) 14 | 15 | def run_async_fuc(*args, **kwargs): 16 | def run(): 17 | try: 18 | g = callback(*args, **kwargs) 19 | g.send(None) 20 | while True: 21 | g.send(None) 22 | except StopIteration: 23 | pass 24 | except Exception as e: 25 | if isinstance(e, (KeyboardInterrupt, SystemError)): 26 | raise e 27 | get_logger().exception("error when calling callback:%s", e) 28 | child_gr = greenlet.greenlet(run) 29 | return child_gr.switch() 30 | BaseEventEmitter.on(self, event_name, run_async_fuc) 31 | 32 | def once(self, event_name, callback): 33 | if callback.__code__.co_flags & 0x80 == 0: 34 | return BaseEventEmitter.once(self, event_name, callback) 35 | 36 | def run_async_fuc(*args, **kwargs): 37 | def run(): 38 | try: 39 | g = callback(*args, **kwargs) 40 | g.send(None) 41 | while True: 42 | g.send(None) 43 | except StopIteration: 44 | pass 45 | except Exception as e: 46 | if isinstance(e, (KeyboardInterrupt, SystemError)): 47 | raise e 48 | get_logger().exception("error when calling callback:%s", e) 49 | child_gr = greenlet.greenlet(run) 50 | return child_gr.switch() 51 | BaseEventEmitter.once(self, event_name, run_async_fuc) 52 | 53 | return EventEmitter 54 | -------------------------------------------------------------------------------- /sevent/coroutines/future.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/10 3 | # create by: snower 4 | 5 | import greenlet 6 | from ..utils import get_logger 7 | 8 | _PENDING = 'PENDING' 9 | _CANCELLED = 'CANCELLED' 10 | _FINISHED = 'FINISHED' 11 | 12 | 13 | class CancelledError(Exception): pass 14 | 15 | 16 | class InvalidStateError(Exception): pass 17 | 18 | 19 | class Future(object): 20 | _state = _PENDING 21 | _result = None 22 | _exception = None 23 | _source_traceback = None 24 | _log_traceback = False 25 | 26 | def __init__(self): 27 | self._callbacks = [] 28 | 29 | def __repr__(self): 30 | return '<%s %s>' % (self.__class__.__name__, self._callbacks) 31 | 32 | def cancel(self): 33 | self._log_traceback = False 34 | if self._state != _PENDING: 35 | return False 36 | self._state = _CANCELLED 37 | self._schedule_callbacks() 38 | return True 39 | 40 | def _schedule_callbacks(self): 41 | callbacks = self._callbacks[:] 42 | if not callbacks: 43 | return 44 | 45 | self._callbacks[:] = [] 46 | for callback in callbacks: 47 | try: 48 | callback(self) 49 | except Exception as e: 50 | get_logger().exception("future schedule callback error:%s", e) 51 | 52 | def cancelled(self): 53 | return self._state == _CANCELLED 54 | 55 | def done(self): 56 | return self._state != _PENDING 57 | 58 | def result(self): 59 | if self._state == _CANCELLED: 60 | raise CancelledError 61 | if self._state != _FINISHED: 62 | raise InvalidStateError('Result is not ready.') 63 | self._log_traceback = False 64 | if self._exception is not None: 65 | raise self._exception 66 | return self._result 67 | 68 | def exception(self): 69 | if self._state == _CANCELLED: 70 | raise CancelledError 71 | if self._state != _FINISHED: 72 | raise InvalidStateError('Exception is not set.') 73 | self._log_traceback = False 74 | return self._exception 75 | 76 | def add_done_callback(self, fn): 77 | if self._state != _PENDING: 78 | fn(self) 79 | else: 80 | self._callbacks.append(fn) 81 | 82 | def remove_done_callback(self, fn): 83 | filtered_callbacks = [f for f in self._callbacks if f != fn] 84 | removed_count = len(self._callbacks) - len(filtered_callbacks) 85 | if removed_count: 86 | self._callbacks[:] = filtered_callbacks 87 | return removed_count 88 | 89 | def set_result(self, result): 90 | if self._state != _PENDING: 91 | raise InvalidStateError('{}: {!r}'.format(self._state, self)) 92 | self._result = result 93 | self._state = _FINISHED 94 | self._schedule_callbacks() 95 | 96 | def set_exception(self, exception): 97 | if self._state != _PENDING: 98 | raise InvalidStateError('{}: {!r}'.format(self._state, self)) 99 | if isinstance(exception, type): 100 | exception = exception() 101 | if type(exception) is StopIteration: 102 | raise TypeError("StopIteration interacts badly with generators " 103 | "and cannot be raised into a Future") 104 | self._exception = exception 105 | self._state = _FINISHED 106 | self._schedule_callbacks() 107 | 108 | def __next__(self): 109 | if not self.done(): 110 | child_gr = greenlet.getcurrent() 111 | main = child_gr.parent 112 | assert main is not None, "must be running in async func" 113 | self._callbacks.append(lambda future: child_gr.switch()) 114 | main.switch() 115 | 116 | result = self.result() 117 | e = StopIteration() 118 | e.value = result 119 | raise e 120 | 121 | def __iter__(self): 122 | return self 123 | 124 | __await__ = __iter__ 125 | -------------------------------------------------------------------------------- /sevent/coroutines/loop.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower 4 | 5 | import types 6 | import greenlet 7 | from ..utils import get_logger 8 | 9 | 10 | def warp_coroutine(BaseIOLoop): 11 | class IOLoop(BaseIOLoop): 12 | def call_async(self, callback, *args, **kwargs): 13 | if isinstance(callback, types.CoroutineType): 14 | def run_coroutine_fuc(*args, **kwargs): 15 | def run_coroutine(): 16 | try: 17 | callback.send(None) 18 | while True: 19 | callback.send(None) 20 | except StopIteration: 21 | return 22 | except Exception as e: 23 | if isinstance(e, (KeyboardInterrupt, SystemError)): 24 | raise e 25 | get_logger().exception("loop callback error:%s", e) 26 | 27 | child_gr = greenlet.greenlet(run_coroutine) 28 | return child_gr.switch() 29 | return self._handlers.append((run_coroutine_fuc, args, kwargs)) 30 | 31 | if callback.__code__.co_flags & 0x80 == 0: 32 | return self._handlers.append((callback, args, kwargs)) 33 | 34 | def run_async_fuc(*args, **kwargs): 35 | def run_async(): 36 | try: 37 | g = callback(*args, **kwargs) 38 | g.send(None) 39 | while True: 40 | g.send(None) 41 | except StopIteration: 42 | return 43 | except Exception as e: 44 | if isinstance(e, (KeyboardInterrupt, SystemError)): 45 | raise e 46 | get_logger().exception("loop callback error:%s", e) 47 | child_gr = greenlet.greenlet(run_async) 48 | return child_gr.switch() 49 | return self._handlers.append((run_async_fuc, args, kwargs)) 50 | 51 | go = call_async 52 | 53 | async def sleep(self, seconds): 54 | child_gr = greenlet.getcurrent() 55 | main = child_gr.parent 56 | assert main is not None, "must be running in async func" 57 | self.add_timeout(seconds, child_gr.switch) 58 | return main.switch() 59 | 60 | def run(self, callback, *args, **kwargs): 61 | if isinstance(callback, types.CoroutineType): 62 | async def do_coroutine_run(): 63 | try: 64 | await callback 65 | finally: 66 | self.stop() 67 | self.call_async(do_coroutine_run) 68 | return self.start() 69 | 70 | if callback.__code__.co_flags & 0x80 == 0: 71 | def do_run(): 72 | try: 73 | callback(*args, **kwargs) 74 | finally: 75 | self.stop() 76 | self.add_async(do_run) 77 | return self.start() 78 | 79 | async def do_async_run(): 80 | try: 81 | await callback(*args, **kwargs) 82 | finally: 83 | self.stop() 84 | self.call_async(do_async_run) 85 | return self.start() 86 | 87 | return IOLoop 88 | -------------------------------------------------------------------------------- /sevent/coroutines/pipe.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower 4 | 5 | import greenlet 6 | from ..errors import SocketClosed 7 | from ..event import EventEmitter 8 | 9 | STATE_INITIALIZED = 0x01 10 | STATE_CONNECTING = 0x02 11 | STATE_STREAMING = 0x04 12 | STATE_LISTENING = 0x08 13 | STATE_CLOSING = 0x10 14 | STATE_CLOSED = 0x20 15 | 16 | 17 | def warp_coroutine(BaseSocket, BaseServer): 18 | class PipeSocket(BaseSocket): 19 | _connect_greenlet = None 20 | _send_greenlet = None 21 | _recv_greenlet = None 22 | _close_error_registed = False 23 | _recv_size = 0 24 | 25 | def _on_close_handle(self, socket): 26 | if self._connect_greenlet is not None: 27 | EventEmitter.off(self, "connect", self._on_connect_handle) 28 | child_gr, self._connect_greenlet = self._connect_greenlet, None 29 | child_gr.throw(SocketClosed()) 30 | if self._send_greenlet is not None: 31 | EventEmitter.off(self, "drain", self._on_send_handle) 32 | child_gr, self._send_greenlet = self._send_greenlet, None 33 | child_gr.throw(SocketClosed()) 34 | if self._recv_greenlet is not None: 35 | EventEmitter.off(self, "data", self._on_recv_handle) 36 | child_gr, self._recv_greenlet = self._recv_greenlet, None 37 | child_gr.throw(SocketClosed()) 38 | 39 | def _on_error_handle(self, socket, e): 40 | if self._connect_greenlet is not None: 41 | EventEmitter.off(self, "connect", self._on_connect_handle) 42 | child_gr, self._connect_greenlet = self._connect_greenlet, None 43 | child_gr.throw(e) 44 | if self._send_greenlet is not None: 45 | EventEmitter.off(self, "drain", self._on_send_handle) 46 | if not self._events["drain"] and not self._events_once["drain"]: 47 | self._has_drain_event = False 48 | child_gr, self._send_greenlet = self._send_greenlet, None 49 | child_gr.throw(e) 50 | if self._recv_greenlet is not None: 51 | EventEmitter.off(self, "data", self._on_recv_handle) 52 | child_gr, self._recv_greenlet = self._recv_greenlet, None 53 | child_gr.throw(e) 54 | 55 | def _on_connect_handle(self, socket): 56 | if self._connect_greenlet is None: 57 | return 58 | EventEmitter.off(self, "connect", self._on_connect_handle) 59 | if self._close_error_registed: 60 | EventEmitter.off(self, "close", self._on_close_handle) 61 | EventEmitter.off(self, "error", self._on_error_handle) 62 | self._close_error_registed = False 63 | child_gr, self._connect_greenlet = self._connect_greenlet, None 64 | return child_gr.switch() 65 | 66 | def _on_send_handle(self, socket): 67 | if self._send_greenlet is None: 68 | return 69 | EventEmitter.off(self, "drain", self._on_send_handle) 70 | if not self._events["drain"] and not self._events_once["drain"]: 71 | self._has_drain_event = False 72 | child_gr, self._send_greenlet = self._send_greenlet, None 73 | return child_gr.switch() 74 | 75 | def _on_recv_handle(self, socket, buffer): 76 | if self._recv_greenlet is None: 77 | return 78 | if len(buffer) < self._recv_size: 79 | return 80 | EventEmitter.off(self, "data", self._on_recv_handle) 81 | child_gr, self._recv_greenlet = self._recv_greenlet, None 82 | return child_gr.switch(buffer) 83 | 84 | async def connectof(self, address, timeout=5): 85 | assert self._connect_greenlet is None, "already connecting" 86 | if self._state != STATE_INITIALIZED: 87 | if self._state == STATE_CLOSED: 88 | raise SocketClosed() 89 | return 90 | if not self._close_error_registed: 91 | EventEmitter.on(self, "close", self._on_close_handle) 92 | EventEmitter.on(self, "error", self._on_error_handle) 93 | self._close_error_registed = True 94 | 95 | self._connect_greenlet = greenlet.getcurrent() 96 | main = self._connect_greenlet.parent 97 | assert main is not None, "must be running in async func" 98 | 99 | EventEmitter.on(self, "connect", self._on_connect_handle) 100 | self.connect(address, timeout) 101 | return main.switch() 102 | 103 | async def send(self, data): 104 | assert self._send_greenlet is None, "already sending" 105 | if self._state == STATE_CLOSED: 106 | raise SocketClosed() 107 | if not self._close_error_registed: 108 | EventEmitter.on(self, "close", self._on_close_handle) 109 | EventEmitter.on(self, "error", self._on_error_handle) 110 | self._close_error_registed = True 111 | 112 | if self.write(data): 113 | return 114 | 115 | self._send_greenlet = greenlet.getcurrent() 116 | main = self._send_greenlet.parent 117 | assert main is not None, "must be running in async func" 118 | 119 | EventEmitter.on(self, "drain", self._on_send_handle) 120 | self._has_drain_event = True 121 | return main.switch() 122 | 123 | async def recv(self, size=0): 124 | assert self._recv_greenlet is None, "already recving" 125 | if self._state == STATE_CLOSED: 126 | raise SocketClosed() 127 | if not self._close_error_registed: 128 | EventEmitter.on(self, "close", self._on_close_handle) 129 | EventEmitter.on(self, "error", self._on_error_handle) 130 | self._close_error_registed = True 131 | 132 | if self._rbuffers and len(self._rbuffers) >= size: 133 | return self._rbuffers 134 | 135 | self._recv_greenlet = greenlet.getcurrent() 136 | main = self._recv_greenlet.parent 137 | assert main is not None, "must be running in async func" 138 | 139 | EventEmitter.on(self, "data", self._on_recv_handle) 140 | self._recv_size = size 141 | return main.switch() 142 | 143 | async def closeof(self): 144 | if self._state == STATE_CLOSED: 145 | return 146 | 147 | child_gr = greenlet.getcurrent() 148 | main = child_gr.parent 149 | assert main is not None, "must be running in async func" 150 | 151 | EventEmitter.on(self, "close", lambda socket: child_gr.switch()) 152 | self.end() 153 | return main.switch() 154 | 155 | async def linkof(self, socket): 156 | assert self._connect_greenlet is None, "already connecting" 157 | assert self._send_greenlet is None, "already sending" 158 | assert self._recv_greenlet is None, "already recving" 159 | if self._state not in (STATE_STREAMING, STATE_CONNECTING): 160 | raise SocketClosed() 161 | if socket._state not in (STATE_STREAMING, STATE_CONNECTING): 162 | raise SocketClosed() 163 | if self._close_error_registed: 164 | EventEmitter.off(self, "close", self._on_close_handle) 165 | EventEmitter.off(self, "error", self._on_error_handle) 166 | self._close_error_registed = False 167 | if hasattr(socket, "_close_error_registed") and socket._close_error_registed: 168 | EventEmitter.off(socket, "close", socket._on_close_handle) 169 | EventEmitter.off(socket, "error", socket._on_error_handle) 170 | socket._close_error_registed = False 171 | 172 | child_gr = greenlet.getcurrent() 173 | main = child_gr.parent 174 | assert main is not None, "must be running in async func" 175 | 176 | def do_closed(s): 177 | if self._state != STATE_CLOSED: 178 | return 179 | if socket._state != STATE_CLOSED: 180 | return 181 | child_gr.switch() 182 | 183 | EventEmitter.on(self, "close", do_closed) 184 | EventEmitter.on(socket, "close", do_closed) 185 | BaseSocket.link(self, socket) 186 | return main.switch() 187 | 188 | async def join(self): 189 | if self._state == STATE_CLOSED: 190 | return 191 | 192 | child_gr = greenlet.getcurrent() 193 | main = child_gr.parent 194 | assert main is not None, "must be running in async func" 195 | 196 | EventEmitter.on(self, "close", lambda socket: child_gr.switch()) 197 | return main.switch() 198 | 199 | class PipeServer(BaseServer): 200 | _listen_greenlet = None 201 | _accept_greenlet = None 202 | _close_error_registed = False 203 | 204 | def _on_close_handle(self, socket): 205 | if self._listen_greenlet is not None: 206 | EventEmitter.off(self, "listen", self._on_listen_handle) 207 | child_gr, self._listen_greenlet = self._listen_greenlet, None 208 | child_gr.throw(SocketClosed()) 209 | if self._accept_greenlet is not None: 210 | EventEmitter.off(self, "connection", self._on_accept_handle) 211 | child_gr, self._accept_greenlet = self._accept_greenlet, None 212 | child_gr.throw(SocketClosed()) 213 | 214 | def _on_error_handle(self, socket, e): 215 | if self._listen_greenlet is not None: 216 | EventEmitter.off(self, "listen", self._on_listen_handle) 217 | child_gr, self._listen_greenlet = self._listen_greenlet, None 218 | child_gr.throw(e) 219 | if self._accept_greenlet is not None: 220 | EventEmitter.off(self, "connection", self._on_accept_handle) 221 | child_gr, self._accept_greenlet = self._accept_greenlet, None 222 | child_gr.throw(e) 223 | 224 | def _on_listen_handle(self, server): 225 | if self._listen_greenlet is None: 226 | return 227 | EventEmitter.off(self, "listen", self._on_listen_handle) 228 | child_gr, self._listen_greenlet = self._listen_greenlet, None 229 | return child_gr.switch() 230 | 231 | def _on_accept_handle(self, server, connection): 232 | if self._accept_greenlet is None: 233 | return 234 | EventEmitter.off(self, "connection", self._on_accept_handle) 235 | child_gr, self._accept_greenlet = self._accept_greenlet, None 236 | return child_gr.switch(connection) 237 | 238 | async def listenof(self, address, backlog=128): 239 | assert self._listen_greenlet is None, "already listening" 240 | if self._state != STATE_INITIALIZED: 241 | if self._state == STATE_CLOSED: 242 | raise SocketClosed() 243 | return 244 | if not self._close_error_registed: 245 | EventEmitter.on(self, "close", self._on_close_handle) 246 | EventEmitter.on(self, "error", self._on_error_handle) 247 | self._close_error_registed = True 248 | 249 | self._listen_greenlet = greenlet.getcurrent() 250 | main = self._listen_greenlet.parent 251 | assert main is not None, "must be running in async func" 252 | 253 | EventEmitter.on(self, "listen", self._on_listen_handle) 254 | self.listen(address, backlog) 255 | return main.switch() 256 | 257 | async def accept(self): 258 | assert self._accept_greenlet is None, "already accepting" 259 | if self._state == STATE_CLOSED: 260 | raise SocketClosed() 261 | if not self._close_error_registed: 262 | EventEmitter.on(self, "close", self._on_close_handle) 263 | EventEmitter.on(self, "error", self._on_error_handle) 264 | self._close_error_registed = True 265 | 266 | self._accept_greenlet = greenlet.getcurrent() 267 | main = self._accept_greenlet.parent 268 | assert main is not None, "must be running in async func" 269 | 270 | EventEmitter.on(self, "connection", self._on_accept_handle) 271 | return main.switch() 272 | 273 | async def closeof(self): 274 | if self._state == STATE_CLOSED: 275 | return 276 | 277 | child_gr = greenlet.getcurrent() 278 | main = child_gr.parent 279 | assert main is not None, "must be running in async func" 280 | 281 | EventEmitter.on(self, "close", lambda server: child_gr.switch()) 282 | self.close() 283 | return main.switch() 284 | 285 | async def join(self): 286 | if self._state == STATE_CLOSED: 287 | return 288 | 289 | child_gr = greenlet.getcurrent() 290 | main = child_gr.parent 291 | assert main is not None, "must be running in async func" 292 | 293 | EventEmitter.on(self, "close", lambda server: child_gr.switch()) 294 | return main.switch() 295 | 296 | return PipeSocket, PipeServer 297 | -------------------------------------------------------------------------------- /sevent/coroutines/tcp.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower 4 | 5 | import greenlet 6 | from ..errors import SocketClosed 7 | from ..event import EventEmitter 8 | 9 | STATE_INITIALIZED = 0x01 10 | STATE_CONNECTING = 0x02 11 | STATE_STREAMING = 0x04 12 | STATE_LISTENING = 0x08 13 | STATE_CLOSING = 0x10 14 | STATE_CLOSED = 0x20 15 | 16 | 17 | def warp_coroutine(BaseSocket, BaseServer, BaseWarpSocket, BaseWarpServer): 18 | class Socket(BaseSocket): 19 | _connect_greenlet = None 20 | _send_greenlet = None 21 | _recv_greenlet = None 22 | _close_error_registed = False 23 | _recv_size = 0 24 | 25 | def _on_close_handle(self, socket): 26 | if self._connect_greenlet is not None: 27 | EventEmitter.off(self, "connect", self._on_connect_handle) 28 | child_gr, self._connect_greenlet = self._connect_greenlet, None 29 | child_gr.throw(SocketClosed()) 30 | if self._send_greenlet is not None: 31 | EventEmitter.off(self, "drain", self._on_send_handle) 32 | child_gr, self._send_greenlet = self._send_greenlet, None 33 | child_gr.throw(SocketClosed()) 34 | if self._recv_greenlet is not None: 35 | EventEmitter.off(self, "data", self._on_recv_handle) 36 | child_gr, self._recv_greenlet = self._recv_greenlet, None 37 | child_gr.throw(SocketClosed()) 38 | 39 | def _on_error_handle(self, socket, e): 40 | if self._connect_greenlet is not None: 41 | EventEmitter.off(self, "connect", self._on_connect_handle) 42 | child_gr, self._connect_greenlet = self._connect_greenlet, None 43 | child_gr.throw(e) 44 | if self._send_greenlet is not None: 45 | EventEmitter.off(self, "drain", self._on_send_handle) 46 | if not self._events["drain"] and not self._events_once["drain"]: 47 | self._has_drain_event = False 48 | child_gr, self._send_greenlet = self._send_greenlet, None 49 | child_gr.throw(e) 50 | if self._recv_greenlet is not None: 51 | EventEmitter.off(self, "data", self._on_recv_handle) 52 | child_gr, self._recv_greenlet = self._recv_greenlet, None 53 | child_gr.throw(e) 54 | 55 | def _on_connect_handle(self, socket): 56 | if self._connect_greenlet is None: 57 | return 58 | EventEmitter.off(self, "connect", self._on_connect_handle) 59 | if self._close_error_registed: 60 | EventEmitter.off(self, "close", self._on_close_handle) 61 | EventEmitter.off(self, "error", self._on_error_handle) 62 | self._close_error_registed = False 63 | child_gr, self._connect_greenlet = self._connect_greenlet, None 64 | return child_gr.switch() 65 | 66 | def _on_send_handle(self, socket): 67 | if self._send_greenlet is None: 68 | return 69 | EventEmitter.off(self, "drain", self._on_send_handle) 70 | if not self._events["drain"] and not self._events_once["drain"]: 71 | self._has_drain_event = False 72 | child_gr, self._send_greenlet = self._send_greenlet, None 73 | return child_gr.switch() 74 | 75 | def _on_recv_handle(self, socket, buffer): 76 | if self._recv_greenlet is None: 77 | return 78 | if len(buffer) < self._recv_size: 79 | return 80 | EventEmitter.off(self, "data", self._on_recv_handle) 81 | child_gr, self._recv_greenlet = self._recv_greenlet, None 82 | return child_gr.switch(buffer) 83 | 84 | async def connectof(self, address, timeout=5): 85 | assert self._connect_greenlet is None, "already connecting" 86 | if self._state != STATE_INITIALIZED: 87 | if self._state == STATE_CLOSED: 88 | raise SocketClosed() 89 | return 90 | if not self._close_error_registed: 91 | EventEmitter.on(self, "close", self._on_close_handle) 92 | EventEmitter.on(self, "error", self._on_error_handle) 93 | self._close_error_registed = True 94 | 95 | self._connect_greenlet = greenlet.getcurrent() 96 | main = self._connect_greenlet.parent 97 | assert main is not None, "must be running in async func" 98 | 99 | EventEmitter.on(self, "connect", self._on_connect_handle) 100 | self.connect(address, timeout) 101 | return main.switch() 102 | 103 | async def send(self, data): 104 | assert self._send_greenlet is None, "already sending" 105 | if self._state == STATE_CLOSED: 106 | raise SocketClosed() 107 | if not self._close_error_registed: 108 | EventEmitter.on(self, "close", self._on_close_handle) 109 | EventEmitter.on(self, "error", self._on_error_handle) 110 | self._close_error_registed = True 111 | 112 | if self.write(data): 113 | return 114 | 115 | self._send_greenlet = greenlet.getcurrent() 116 | main = self._send_greenlet.parent 117 | assert main is not None, "must be running in async func" 118 | 119 | EventEmitter.on(self, "drain", self._on_send_handle) 120 | self._has_drain_event = True 121 | return main.switch() 122 | 123 | async def recv(self, size=0): 124 | assert self._recv_greenlet is None, "already recving" 125 | if self._state == STATE_CLOSED: 126 | raise SocketClosed() 127 | 128 | if self._rbuffers and len(self._rbuffers) >= size: 129 | return self._rbuffers 130 | 131 | if not self._close_error_registed: 132 | EventEmitter.on(self, "close", self._on_close_handle) 133 | EventEmitter.on(self, "error", self._on_error_handle) 134 | self._close_error_registed = True 135 | 136 | self._recv_greenlet = greenlet.getcurrent() 137 | main = self._recv_greenlet.parent 138 | assert main is not None, "must be running in async func" 139 | 140 | EventEmitter.on(self, "data", self._on_recv_handle) 141 | self._recv_size = size 142 | return main.switch() 143 | 144 | async def closeof(self): 145 | if self._state == STATE_CLOSED: 146 | return 147 | 148 | child_gr = greenlet.getcurrent() 149 | main = child_gr.parent 150 | assert main is not None, "must be running in async func" 151 | 152 | EventEmitter.on(self, "close", lambda socket: child_gr.switch()) 153 | self.end() 154 | return main.switch() 155 | 156 | async def linkof(self, socket): 157 | assert self._connect_greenlet is None, "already connecting" 158 | assert self._send_greenlet is None, "already sending" 159 | assert self._recv_greenlet is None, "already recving" 160 | if self._state not in (STATE_STREAMING, STATE_CONNECTING): 161 | raise SocketClosed() 162 | if socket._state not in (STATE_STREAMING, STATE_CONNECTING): 163 | raise SocketClosed() 164 | if self._close_error_registed: 165 | EventEmitter.off(self, "close", self._on_close_handle) 166 | EventEmitter.off(self, "error", self._on_error_handle) 167 | self._close_error_registed = False 168 | if hasattr(socket, "_close_error_registed") and socket._close_error_registed: 169 | EventEmitter.off(socket, "close", socket._on_close_handle) 170 | EventEmitter.off(socket, "error", socket._on_error_handle) 171 | socket._close_error_registed = False 172 | 173 | child_gr = greenlet.getcurrent() 174 | main = child_gr.parent 175 | assert main is not None, "must be running in async func" 176 | 177 | def do_closed(s): 178 | if self._state != STATE_CLOSED: 179 | return 180 | if socket._state != STATE_CLOSED: 181 | return 182 | child_gr.switch() 183 | 184 | EventEmitter.on(self, "close", do_closed) 185 | EventEmitter.on(socket, "close", do_closed) 186 | BaseSocket.link(self, socket) 187 | return main.switch() 188 | 189 | async def join(self): 190 | if self._state == STATE_CLOSED: 191 | return 192 | 193 | child_gr = greenlet.getcurrent() 194 | main = child_gr.parent 195 | assert main is not None, "must be running in async func" 196 | 197 | EventEmitter.on(self, "close", lambda socket: child_gr.switch()) 198 | return main.switch() 199 | 200 | class Server(BaseServer): 201 | _listen_greenlet = None 202 | _accept_greenlet = None 203 | _close_error_registed = False 204 | 205 | def _on_close_handle(self, socket): 206 | if self._listen_greenlet is not None: 207 | EventEmitter.off(self, "listen", self._on_listen_handle) 208 | child_gr, self._listen_greenlet = self._listen_greenlet, None 209 | child_gr.throw(SocketClosed()) 210 | if self._accept_greenlet is not None: 211 | EventEmitter.off(self, "connection", self._on_accept_handle) 212 | child_gr, self._accept_greenlet = self._accept_greenlet, None 213 | child_gr.throw(SocketClosed()) 214 | 215 | def _on_error_handle(self, socket, e): 216 | if self._listen_greenlet is not None: 217 | EventEmitter.off(self, "listen", self._on_listen_handle) 218 | child_gr, self._listen_greenlet = self._listen_greenlet, None 219 | child_gr.throw(e) 220 | if self._accept_greenlet is not None: 221 | EventEmitter.off(self, "connection", self._on_accept_handle) 222 | child_gr, self._accept_greenlet = self._accept_greenlet, None 223 | child_gr.throw(e) 224 | 225 | def _on_listen_handle(self, server): 226 | if self._listen_greenlet is None: 227 | return 228 | EventEmitter.off(self, "listen", self._on_listen_handle) 229 | child_gr, self._listen_greenlet = self._listen_greenlet, None 230 | return child_gr.switch() 231 | 232 | def _on_accept_handle(self, server, connection): 233 | if self._accept_greenlet is None: 234 | return 235 | EventEmitter.off(self, "connection", self._on_accept_handle) 236 | child_gr, self._accept_greenlet = self._accept_greenlet, None 237 | return child_gr.switch(connection) 238 | 239 | async def listenof(self, address, backlog=128): 240 | assert self._listen_greenlet is None, "already listening" 241 | if self._state != STATE_INITIALIZED: 242 | if self._state == STATE_CLOSED: 243 | raise SocketClosed() 244 | return 245 | if not self._close_error_registed: 246 | EventEmitter.on(self, "close", self._on_close_handle) 247 | EventEmitter.on(self, "error", self._on_error_handle) 248 | self._close_error_registed = True 249 | 250 | self._listen_greenlet = greenlet.getcurrent() 251 | main = self._listen_greenlet.parent 252 | assert main is not None, "must be running in async func" 253 | 254 | EventEmitter.on(self, "listen", self._on_listen_handle) 255 | self.listen(address, backlog) 256 | return main.switch() 257 | 258 | async def accept(self): 259 | assert self._accept_greenlet is None, "already accepting" 260 | if self._state == STATE_CLOSED: 261 | raise SocketClosed() 262 | if not self._close_error_registed: 263 | EventEmitter.on(self, "close", self._on_close_handle) 264 | EventEmitter.on(self, "error", self._on_error_handle) 265 | self._close_error_registed = True 266 | 267 | self._accept_greenlet = greenlet.getcurrent() 268 | main = self._accept_greenlet.parent 269 | assert main is not None, "must be running in async func" 270 | 271 | EventEmitter.on(self, "connection", self._on_accept_handle) 272 | return main.switch() 273 | 274 | async def closeof(self): 275 | if self._state == STATE_CLOSED: 276 | return 277 | 278 | child_gr = greenlet.getcurrent() 279 | main = child_gr.parent 280 | assert main is not None, "must be running in async func" 281 | 282 | EventEmitter.on(self, "close", lambda server: child_gr.switch()) 283 | self.close() 284 | return main.switch() 285 | 286 | async def join(self): 287 | if self._state == STATE_CLOSED: 288 | return 289 | 290 | child_gr = greenlet.getcurrent() 291 | main = child_gr.parent 292 | assert main is not None, "must be running in async func" 293 | 294 | EventEmitter.on(self, "close", lambda server: child_gr.switch()) 295 | return main.switch() 296 | 297 | 298 | class WarpSocket(BaseWarpSocket, Socket): 299 | def __init__(self, socket=None, loop=None, dns_resolver=None, max_buffer_size=None): 300 | BaseWarpSocket.__init__(self, socket or Socket(loop=loop, dns_resolver=dns_resolver, 301 | max_buffer_size=max_buffer_size), 302 | loop, dns_resolver, max_buffer_size) 303 | 304 | 305 | class WarpServer(BaseWarpServer, Server): 306 | def __init__(self, socket=None, loop=None, dns_resolver=None): 307 | BaseWarpServer.__init__(self, socket or Server(loop=loop, dns_resolver=dns_resolver), loop, dns_resolver) 308 | 309 | def handshake(self, socket): 310 | self._loop.call_async(self.handshakeof, socket) 311 | 312 | async def handshakeof(self, socket): 313 | max_buffer_size = socket._max_buffer_size if hasattr(socket, "_max_buffer_size") else None 314 | self.emit_connection(self, WarpSocket(socket, loop=self._loop, max_buffer_size=max_buffer_size)) 315 | 316 | 317 | return Socket, Server, WarpSocket, WarpServer 318 | -------------------------------------------------------------------------------- /sevent/coroutines/udp.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/5/8 3 | # create by: snower 4 | 5 | 6 | import greenlet 7 | from ..errors import SocketClosed 8 | from ..event import EventEmitter 9 | 10 | STATE_INITIALIZED = 0x01 11 | STATE_CONNECTING = 0x02 12 | STATE_STREAMING = 0x04 13 | STATE_BINDING = 0x08 14 | STATE_CLOSING = 0x10 15 | STATE_CLOSED = 0x20 16 | 17 | 18 | def warp_coroutine(BaseSocket, BaseServer): 19 | class Socket(BaseSocket): 20 | _send_greenlet = None 21 | _recv_greenlet = None 22 | _close_error_registed = False 23 | _recv_size = 0 24 | 25 | def _on_close_handle(self, socket): 26 | if self._send_greenlet is not None: 27 | EventEmitter.off(self, "drain", self._on_send_handle) 28 | child_gr, self._send_greenlet = self._send_greenlet, None 29 | child_gr.throw(SocketClosed()) 30 | if self._recv_greenlet is not None: 31 | EventEmitter.off(self, "data", self._on_recv_handle) 32 | child_gr, self._recv_greenlet = self._recv_greenlet, None 33 | child_gr.throw(SocketClosed()) 34 | 35 | def _on_error_handle(self, socket, e): 36 | if self._send_greenlet is not None: 37 | EventEmitter.off(self, "drain", self._on_send_handle) 38 | if not self._events["drain"] and not self._events_once["drain"]: 39 | self._has_drain_event = False 40 | child_gr, self._send_greenlet = self._send_greenlet, None 41 | child_gr.throw(e) 42 | if self._recv_greenlet is not None: 43 | EventEmitter.off(self, "data", self._on_recv_handle) 44 | child_gr, self._recv_greenlet = self._recv_greenlet, None 45 | child_gr.throw(e) 46 | 47 | def _on_send_handle(self, socket): 48 | if self._send_greenlet is None: 49 | return 50 | EventEmitter.off(self, "drain", self._on_send_handle) 51 | if not self._events["drain"] and not self._events_once["drain"]: 52 | self._has_drain_event = False 53 | child_gr, self._send_greenlet = self._send_greenlet, None 54 | return child_gr.switch() 55 | 56 | def _on_recv_handle(self, socket, buffer): 57 | if self._recv_greenlet is None: 58 | return 59 | if len(buffer) < self._recv_size: 60 | return 61 | EventEmitter.off(self, "data", self._on_recv_handle) 62 | child_gr, self._recv_greenlet = self._recv_greenlet, None 63 | return child_gr.switch(buffer) 64 | 65 | async def sendto(self, data): 66 | assert self._send_greenlet is None, "already sending" 67 | if self._state == STATE_CLOSED: 68 | raise SocketClosed() 69 | if not self._close_error_registed: 70 | EventEmitter.on(self, "close", self._on_close_handle) 71 | EventEmitter.on(self, "error", self._on_error_handle) 72 | self._close_error_registed = True 73 | 74 | if self.write(data): 75 | return 76 | 77 | self._send_greenlet = greenlet.getcurrent() 78 | main = self._send_greenlet.parent 79 | assert main is not None, "must be running in async func" 80 | 81 | EventEmitter.on(self, "drain", self._on_send_handle) 82 | self._has_drain_event = True 83 | return main.switch() 84 | 85 | async def recvfrom(self, size=0): 86 | assert self._recv_greenlet is None, "already recving" 87 | if self._state == STATE_CLOSED: 88 | raise SocketClosed() 89 | 90 | if self._rbuffers and len(self._rbuffers) >= size: 91 | return self._rbuffers 92 | 93 | if not self._close_error_registed: 94 | EventEmitter.on(self, "close", self._on_close_handle) 95 | EventEmitter.on(self, "error", self._on_error_handle) 96 | self._close_error_registed = True 97 | 98 | self._recv_greenlet = greenlet.getcurrent() 99 | main = self._recv_greenlet.parent 100 | assert main is not None, "must be running in async func" 101 | 102 | EventEmitter.on(self, "data", self._on_recv_handle) 103 | return main.switch() 104 | 105 | async def closeof(self): 106 | if self._state == STATE_CLOSED: 107 | return 108 | 109 | child_gr = greenlet.getcurrent() 110 | main = child_gr.parent 111 | assert main is not None, "must be running in async func" 112 | 113 | EventEmitter.on(self, "close", lambda socket: child_gr.switch()) 114 | self.end() 115 | return main.switch() 116 | 117 | @classmethod 118 | async def linkof(cls, socket, address, timeout=900): 119 | if hasattr(socket, "_send_greenlet"): 120 | assert socket._send_greenlet is None, "already sending" 121 | if hasattr(socket, "_recv_greenlet"): 122 | assert socket._recv_greenlet is None, "already recving" 123 | if socket._state == STATE_CLOSED: 124 | raise SocketClosed() 125 | if hasattr(socket, "_close_error_registed") and socket._close_error_registed: 126 | EventEmitter.off(socket, "close", socket._on_close_handle) 127 | EventEmitter.off(socket, "error", socket._on_error_handle) 128 | socket._close_error_registed = False 129 | 130 | child_gr = greenlet.getcurrent() 131 | main = child_gr.parent 132 | assert main is not None, "must be running in async func" 133 | 134 | def do_closed(s): 135 | if socket._state != STATE_CLOSED: 136 | return 137 | child_gr.switch() 138 | 139 | EventEmitter.on(socket, "close", do_closed) 140 | BaseSocket.link(socket, address, timeout) 141 | return main.switch() 142 | 143 | async def join(self): 144 | if self._state == STATE_CLOSED: 145 | return 146 | 147 | child_gr = greenlet.getcurrent() 148 | main = child_gr.parent 149 | assert main is not None, "must be running in async func" 150 | 151 | EventEmitter.on(self, "close", lambda socket: child_gr.switch()) 152 | return main.switch() 153 | 154 | class Server(BaseServer, Socket): 155 | _bind_greenlet = None 156 | 157 | def _on_close_handle(self, socket): 158 | if self._bind_greenlet is not None: 159 | EventEmitter.off(self, "bind", self._on_bind_handle) 160 | child_gr, self._bind_greenlet = self._bind_greenlet, None 161 | child_gr.throw(SocketClosed()) 162 | Socket._on_close_handle(self, socket) 163 | 164 | def _on_error_handle(self, socket, e): 165 | if self._bind_greenlet is not None: 166 | EventEmitter.off(self, "bind", self._on_bind_handle) 167 | child_gr, self._bind_greenlet = self._bind_greenlet, None 168 | child_gr.throw(e) 169 | Socket._on_error_handle(self, socket, e) 170 | 171 | def _on_bind_handle(self, server): 172 | if self._bind_greenlet is None: 173 | return 174 | EventEmitter.off(self, "bind", self._on_bind_handle) 175 | child_gr, self._bind_greenlet = self._bind_greenlet, None 176 | return child_gr.switch() 177 | 178 | async def bindof(self, address): 179 | assert self._bind_greenlet is None, "already binding" 180 | if self._state != STATE_INITIALIZED: 181 | if self._state == STATE_CLOSED: 182 | raise SocketClosed() 183 | return 184 | if not self._close_error_registed: 185 | EventEmitter.on(self, "close", self._on_close_handle) 186 | EventEmitter.on(self, "error", self._on_error_handle) 187 | self._close_error_registed = True 188 | 189 | self._bind_greenlet = greenlet.getcurrent() 190 | main = self._bind_greenlet.parent 191 | assert main is not None, "must be running in async func" 192 | 193 | EventEmitter.on(self, "bind", self._on_bind_handle) 194 | self.bind(address) 195 | return main.switch() 196 | 197 | return Socket, Server 198 | -------------------------------------------------------------------------------- /sevent/dns.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 15/8/6 3 | # create by: snower 4 | 5 | import sys 6 | import os 7 | import time 8 | import socket 9 | import dnslib 10 | from collections import defaultdict 11 | from .loop import instance 12 | from .event import EventEmitter 13 | from .utils import ensure_bytes, is_py3 14 | 15 | QTYPE_ANY = 255 16 | QTYPE_A = 1 17 | QTYPE_AAAA = 28 18 | QTYPE_CNAME = 5 19 | QTYPE_NS = 2 20 | QCLASS_IN = 1 21 | 22 | STATUS_OPENED = 0 23 | STATUS_CLOSED = 1 24 | 25 | 26 | class DNSCache(object): 27 | def __init__(self, loop, default_ttl=60): 28 | self._loop = loop or instance() 29 | self.default_ttl = default_ttl 30 | self._cache = defaultdict(list) 31 | self._last_resolve_time = time.time() 32 | 33 | def append(self, hostname, rrs): 34 | rrcs = [] 35 | now = time.time() 36 | for rrc in self._cache[hostname]: 37 | if rrc.ttl_expried_time <= now: 38 | continue 39 | rrcs.append(rrc) 40 | self._cache[hostname] = rrcs 41 | 42 | for rr in rrs: 43 | has_cache = False 44 | for rrc in self._cache[hostname]: 45 | if rr == rrc: 46 | has_cache = True 47 | break 48 | if has_cache: 49 | continue 50 | setattr(rr, "ttl_expried_time", time.time() + (rr.ttl or self.default_ttl)) 51 | self._cache[hostname].append(rr) 52 | 53 | if self._last_resolve_time - now >= 120: 54 | self._loop.add_async(self.resolve) 55 | self._last_resolve_time = now 56 | 57 | def get(self, hostname): 58 | if hostname not in self._cache or not self._cache[hostname]: 59 | return None, hostname 60 | 61 | now = time.time() 62 | if self._last_resolve_time - now >= 120: 63 | self._loop.add_async(self.resolve) 64 | self._last_resolve_time = now 65 | 66 | for rrc in self._cache[hostname]: 67 | if rrc.ttl_expried_time <= now: 68 | continue 69 | return str(rrc.rdata), hostname 70 | return None, hostname 71 | 72 | def remove(self, hostname): 73 | if hostname in self._cache: 74 | self._cache.pop(hostname) 75 | 76 | now = time.time() 77 | if self._last_resolve_time - now >= 120: 78 | self._loop.add_async(self.resolve) 79 | self._last_resolve_time = now 80 | 81 | def resolve(self): 82 | now = time.time() 83 | epried_hostnames = [] 84 | for hostname, rrs in self._cache.items(): 85 | rrcs = [] 86 | for rr in rrs: 87 | if rr.ttl_expried_time <= now: 88 | continue 89 | rrcs.append(rr) 90 | if rrcs: 91 | self._cache[hostname] = rrcs 92 | else: 93 | epried_hostnames.append(hostname) 94 | 95 | for hostname in epried_hostnames: 96 | self.remove(hostname) 97 | 98 | def clear(self): 99 | self._cache = defaultdict(list) 100 | 101 | def __getitem__(self, hostname): 102 | return self.get(hostname)[0] 103 | 104 | def __delitem__(self, hostname): 105 | return self.remove(hostname) 106 | 107 | def __contains__(self, hostname): 108 | return bool(self.get(hostname)[0]) 109 | 110 | 111 | class DnsQueryState(object): 112 | def __init__(self, hostname, v4server_count, v6server_count): 113 | self.hostname = hostname 114 | self.v4bv4_count = v4server_count 115 | self.v6bv6_count = v6server_count 116 | self.v6bv4_count = v4server_count 117 | self.v4bv6_count = v6server_count 118 | self.v4bv4_loading_count = 0 119 | self.v6bv6_loading_count = 0 120 | self.v6bv4_loading_count = 0 121 | self.v4bv6_loading_count = 0 122 | self.v4bv4_done_count = 0 123 | self.v6bv6_done_count = 0 124 | self.v6bv4_done_count = 0 125 | self.v4bv6_done_count = 0 126 | self.callbacks = [] 127 | 128 | def v4bv4_done(self): 129 | return self.v4bv4_done_count >= self.v4bv4_count 130 | 131 | def v6bv6_done(self): 132 | return self.v6bv6_done_count >= self.v6bv6_count 133 | 134 | def v6bv4_done(self): 135 | return self.v6bv4_done_count >= self.v6bv4_count 136 | 137 | def v4bv6_done(self): 138 | return self.v4bv6_done_count >= self.v4bv6_count 139 | 140 | def done(self): 141 | return self.v4bv4_done_count >= self.v4bv4_count and self.v6bv6_done_count >= self.v6bv6_count \ 142 | and self.v6bv4_done_count >= self.v6bv4_count and self.v4bv6_done_count >= self.v4bv6_count 143 | 144 | def append(self, callback): 145 | self.callbacks.append(callback) 146 | 147 | 148 | class DNSResolver(EventEmitter): 149 | _instance = None 150 | 151 | @classmethod 152 | def default(cls): 153 | if cls._instance is None: 154 | cls._instance = cls() 155 | return cls._instance 156 | 157 | def __init__(self, loop=None, servers=None, hosts=None, resolve_timeout=None, resend_timeout=0.5): 158 | EventEmitter.__init__(self) 159 | 160 | self._loop = loop or instance() 161 | self._servers = [] 162 | self._server6s = [] 163 | self._hosts = hosts or {} 164 | 165 | self._cache = DNSCache(self._loop) 166 | self._queue = {} 167 | self._socket = None 168 | self._socket6 = None 169 | self._status = STATUS_OPENED 170 | 171 | if not servers: 172 | servers = self.parse_resolv() 173 | for server in servers: 174 | inet_type = self.is_ip(server) 175 | if inet_type == socket.AF_INET: 176 | self._servers.append(server) 177 | elif inet_type == socket.AF_INET6: 178 | self._server6s.append(server) 179 | 180 | if not hosts: 181 | self.parse_hosts() 182 | 183 | self._resolve_timeout = resolve_timeout if resolve_timeout else ((len(self._servers) + len(self._server6s)) 184 | * resend_timeout + 4) 185 | self._resend_timeout = resend_timeout 186 | 187 | def on_resolve(self, callback): 188 | self.on("resolve", callback) 189 | 190 | def off_resolve(self, callback): 191 | self.on("resolve", callback) 192 | 193 | def once_resolve(self, callback): 194 | self.once("resolve", callback) 195 | 196 | def noce_resolve(self, callback): 197 | self.once("resolve", callback) 198 | 199 | def create_socket(self): 200 | from .udp import Socket 201 | self._socket = Socket(self._loop) 202 | self._socket.on_data(self.on_data) 203 | self._socket.on_close(self.on_close) 204 | self._socket.on_error(lambda s, e: None) 205 | 206 | def create_socket6(self): 207 | from .udp import Socket 208 | self._socket6 = Socket(self._loop) 209 | self._socket6.on_data(self.on_data6) 210 | self._socket6.on_close(self.on_close) 211 | self._socket6.on_error(lambda s, e: None) 212 | 213 | def parse_resolv(self): 214 | try: 215 | servers = [server for server in str(os.environ.get("SEVENT_NAMESERVER", '')).split(",") 216 | if server and self.is_ip(server)] 217 | if servers: 218 | return servers 219 | except: 220 | pass 221 | 222 | servers = [] 223 | etc_path = '/etc/resolv.conf' 224 | if 'WINDIR' in os.environ: 225 | etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/resolv.conf' 226 | try: 227 | with open(etc_path, 'rb') as f: 228 | content = f.readlines() 229 | for line in content: 230 | line = line.strip() 231 | if not line or line[:1] == b'#' or not line.startswith(b'nameserver'): 232 | continue 233 | 234 | if is_py3 and type(line) != str: 235 | parts = line.decode("utf-8").split() 236 | else: 237 | parts = line.split() 238 | if len(parts) < 2: 239 | continue 240 | server = parts[1].strip() 241 | if not self.is_ip(server): 242 | continue 243 | servers.append(server) 244 | except IOError: 245 | pass 246 | if not servers: 247 | if sys.platform == "win32": 248 | try: 249 | from .win32util import get_dns_info 250 | servers = get_dns_info().nameservers 251 | if servers: 252 | return servers 253 | except: 254 | pass 255 | return servers or ['8.8.4.4', '8.8.8.8', '114.114.114.114'] 256 | 257 | def parse_hosts(self): 258 | etc_path = '/etc/hosts' 259 | if 'WINDIR' in os.environ: 260 | etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts' 261 | try: 262 | with open(etc_path, 'rb') as f: 263 | for line in f.readlines(): 264 | line = line.strip() 265 | if not line or line[:1] == b'#': 266 | continue 267 | 268 | if is_py3 and type(line) != str: 269 | parts = line.decode("utf-8").split() 270 | else: 271 | parts = line.split() 272 | if len(parts) < 2: 273 | continue 274 | ip = parts[0].strip() 275 | if not self.is_ip(ip): 276 | continue 277 | 278 | for i in range(1, len(parts)): 279 | hostname = parts[i].strip() 280 | if not hostname: 281 | continue 282 | self._hosts[ensure_bytes(hostname)] = ip 283 | except IOError: 284 | self._hosts[b'localhost'] = '127.0.0.1' 285 | 286 | def call_callback(self, hostname, ip): 287 | if hostname not in self._queue: 288 | return 289 | query_state = self._queue.pop(hostname) 290 | hostname = hostname.decode("utf-8") if is_py3 and type(hostname) != str else hostname 291 | for callback in query_state.callbacks: 292 | self._loop.add_async(callback, hostname, ip) 293 | self._loop.add_async(self.emit_resolve, self, hostname, ip) 294 | 295 | def on_data(self, socket, buffer): 296 | while buffer: 297 | data, address = buffer.next() 298 | try: 299 | answer = dnslib.DNSRecord.parse(data) 300 | hostname = b".".join(answer.q.qname.label) 301 | if hostname not in self._queue: 302 | continue 303 | rrs = [rr for rr in answer.rr if rr.rtype == answer.q.qtype] 304 | query_state = self._queue[hostname] 305 | if answer.q.qtype == 28: 306 | query_state.v6bv4_loading_count -= 1 307 | query_state.v6bv4_done_count += 1 308 | else: 309 | query_state.v4bv4_loading_count -= 1 310 | query_state.v4bv4_done_count += 1 311 | 312 | if rrs: 313 | self._cache.append(hostname, rrs) 314 | self.call_callback(hostname, str(rrs[0].rdata)) 315 | elif query_state.done(): 316 | self.call_callback(hostname, None) 317 | elif query_state.v4bv4_done() and query_state.v6bv4_loading_count <= 0 \ 318 | and query_state.v6bv4_done_count <= 0: 319 | self.send_req(hostname, query_state, 0, True) 320 | except Exception: 321 | pass 322 | 323 | def on_data6(self, socket, buffer): 324 | while buffer: 325 | data, address = buffer.next() 326 | try: 327 | answer = dnslib.DNSRecord.parse(data) 328 | hostname = b".".join(answer.q.qname.label) 329 | if hostname not in self._queue: 330 | continue 331 | rrs = [rr for rr in answer.rr if rr.rtype == answer.q.qtype] 332 | query_state = self._queue[hostname] 333 | if answer.q.qtype == 1: 334 | query_state.v4bv6_loading_count -= 1 335 | query_state.v4bv6_done_count += 1 336 | else: 337 | query_state.v6bv6_loading_count -= 1 338 | query_state.v6bv6_done_count += 1 339 | 340 | if rrs: 341 | self._cache.append(hostname, rrs) 342 | self.call_callback(hostname, str(rrs[0].rdata)) 343 | elif query_state.done(): 344 | self.call_callback(hostname, None) 345 | elif query_state.v6bv6_done() and query_state.v4bv6_loading_count <= 0 \ 346 | and query_state.v4bv6_done_count <= 0: 347 | self.send_req6(hostname, query_state, 0, True) 348 | except Exception: 349 | pass 350 | 351 | def send_req(self, hostname, query_state, server_index=0, is_query_v6=False): 352 | if not self._servers: 353 | return 354 | servers = self._servers 355 | 356 | question = dnslib.DNSRecord.question(hostname, 'AAAA' if is_query_v6 else 'A') 357 | if self._socket is None: 358 | self.create_socket() 359 | seq_data = bytes(question.pack()) 360 | for _ in range(3): 361 | if server_index >= len(servers): 362 | break 363 | if is_query_v6: 364 | query_state.v6bv4_loading_count += 1 365 | else: 366 | query_state.v4bv4_loading_count += 1 367 | self._socket.write((seq_data, (servers[server_index], 53))) 368 | server_index += 1 369 | 370 | def on_timeout(): 371 | if server_index >= len(servers): 372 | if hostname in self._queue and query_state.v6bv4_loading_count <= 0 \ 373 | and query_state.v6bv4_done_count <= 0: 374 | self.send_req(hostname, query_state, 0, True) 375 | return 376 | if hostname in self._queue: 377 | self.send_req(hostname, query_state, server_index, is_query_v6) 378 | if server_index >= len(servers) and is_query_v6: 379 | return 380 | self._loop.add_timeout(self._resend_timeout, on_timeout) 381 | 382 | def send_req6(self, hostname, query_state, server_index=0, is_query_v4=False): 383 | if not self._server6s: 384 | return 385 | servers = self._server6s 386 | 387 | question = dnslib.DNSRecord.question(hostname, 'A' if is_query_v4 else 'AAAA') 388 | if self._socket6 is None: 389 | self.create_socket6() 390 | seq_data = bytes(question.pack()) 391 | for _ in range(3): 392 | if server_index >= len(servers): 393 | break 394 | if is_query_v4: 395 | query_state.v4bv6_loading_count += 1 396 | else: 397 | query_state.v6bv6_loading_count += 1 398 | self._socket6.write((seq_data, (servers[server_index], 53))) 399 | server_index += 1 400 | 401 | def on_timeout(): 402 | if server_index >= len(servers): 403 | if hostname in self._queue and query_state.v4bv6_loading_count <= 0 \ 404 | and query_state.v4bv6_done_count <= 0: 405 | self.send_req6(hostname, query_state, 0, True) 406 | return 407 | if hostname in self._queue: 408 | self.send_req6(hostname, query_state, server_index, is_query_v4) 409 | if server_index >= len(servers) and is_query_v4: 410 | return 411 | self._loop.add_timeout(self._resend_timeout, on_timeout) 412 | 413 | def resolve(self, hostname, callback, timeout=None): 414 | if self._status == STATUS_CLOSED: 415 | return callback(hostname.decode("utf-8") if is_py3 and type(hostname) != str else hostname, None) 416 | 417 | hostname = ensure_bytes(hostname) 418 | if not hostname: 419 | return callback(hostname.decode("utf-8") if is_py3 and type(hostname) != str else hostname, None) 420 | elif self.is_ip(hostname): 421 | return callback(hostname.decode("utf-8") if is_py3 and type(hostname) != str else hostname, hostname) 422 | elif hostname in self._hosts: 423 | return callback(hostname.decode("utf-8") if is_py3 and type(hostname) != str else hostname, self._hosts[hostname]) 424 | elif hostname in self._cache: 425 | return callback(hostname.decode("utf-8") if is_py3 and type(hostname) != str else hostname, self._cache[hostname]) 426 | else: 427 | try: 428 | if hostname not in self._queue: 429 | self._queue[hostname] = query_state = DnsQueryState(hostname, len(self._servers), len(self._server6s)) 430 | query_state.append(callback) 431 | self.send_req(hostname, query_state) 432 | self.send_req6(hostname, query_state) 433 | 434 | if hostname in self._queue: 435 | def on_timeout(): 436 | if hostname in self._queue: 437 | self.call_callback(hostname, None) 438 | self._loop.add_timeout(timeout or self._resolve_timeout, on_timeout) 439 | else: 440 | self._queue[hostname].append(callback) 441 | except Exception: 442 | self.call_callback(hostname, None) 443 | return False 444 | 445 | def flush(self): 446 | self._cache.clear() 447 | 448 | def on_close(self, socket): 449 | if self._status == STATUS_CLOSED: 450 | return 451 | 452 | if self._socket == socket: 453 | self._socket = None 454 | else: 455 | self._socket6 = None 456 | 457 | def close(self): 458 | if self._status == STATUS_CLOSED: 459 | return 460 | 461 | self._status = STATUS_CLOSED 462 | if self._socket: 463 | self._socket.close() 464 | if self._socket6: 465 | self._socket6.close() 466 | 467 | for hostname, query_state in self._queue.items(): 468 | for callback in query_state.callbacks: 469 | self._loop.add_async(callback, hostname, None) 470 | self._queue.clear() 471 | 472 | def is_ip(self, address): 473 | if is_py3 and type(address) != str: 474 | address = address.decode('utf8') 475 | 476 | try: 477 | socket.inet_pton(socket.AF_INET, address) 478 | return socket.AF_INET 479 | except (TypeError, ValueError, OSError, IOError): 480 | try: 481 | socket.inet_pton(socket.AF_INET6, address) 482 | return socket.AF_INET6 483 | except (TypeError, ValueError, OSError, IOError): 484 | return False 485 | 486 | 487 | if is_py3: 488 | from .coroutines.dns import warp_coroutine 489 | DNSResolver = warp_coroutine(DNSResolver) -------------------------------------------------------------------------------- /sevent/errors.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/1/16 3 | # create by: snower 4 | 5 | 6 | class SeventException(Exception): 7 | pass 8 | 9 | 10 | class SocketClosed(SeventException): 11 | pass 12 | 13 | class ChainClosed(SeventException): 14 | pass 15 | 16 | class ResolveError(SeventException): 17 | pass 18 | 19 | 20 | class ConnectTimeout(SeventException): 21 | pass 22 | 23 | 24 | class AddressError(SeventException): 25 | pass 26 | 27 | 28 | class ConnectError(SeventException): 29 | def __init__(self, address, socket_error, *args, **kwargs): 30 | super(ConnectError, self).__init__(*args, **kwargs) 31 | 32 | self.address = address 33 | self.socket_error = socket_error 34 | 35 | 36 | class SSLConnectError(ConnectError): 37 | pass 38 | 39 | 40 | class SSLSocketError(SeventException): 41 | pass -------------------------------------------------------------------------------- /sevent/event.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from collections import defaultdict 4 | from .utils import is_py3, get_logger 5 | 6 | 7 | def null_emit_callback(*args, **kwargs): 8 | return None 9 | 10 | 11 | class EventEmitter(object): 12 | def __init__(self): 13 | self._events = defaultdict(set) 14 | self._events_once = defaultdict(set) 15 | 16 | def on(self, event_name, callback): 17 | event_callbacks = self._events[event_name] 18 | event_callbacks.add(callback) 19 | 20 | if not self._events_once[event_name] and len(event_callbacks) == 1: 21 | setattr(self, "emit_" + event_name, callback) 22 | else: 23 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 24 | 25 | def off(self, event_name, callback): 26 | event_callbacks = self._events[event_name] 27 | try: 28 | event_callbacks.remove(callback) 29 | except KeyError: 30 | pass 31 | 32 | if not event_callbacks: 33 | once_event_callbacks = self._events_once[event_name] 34 | if not once_event_callbacks: 35 | setattr(self, "emit_" + event_name, null_emit_callback) 36 | elif len(once_event_callbacks) == 1: 37 | callback = list(once_event_callbacks)[0] 38 | 39 | def emit_callback(*args, **kwargs): 40 | setattr(self, "emit_" + event_name, null_emit_callback) 41 | once_event_callbacks.clear() 42 | return callback(*args, **kwargs) 43 | 44 | setattr(self, "emit_" + event_name, emit_callback) 45 | else: 46 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 47 | elif len(event_callbacks) == 1: 48 | once_event_callbacks = self._events_once[event_name] 49 | if not once_event_callbacks: 50 | setattr(self, "emit_" + event_name, list(event_callbacks)[0]) 51 | else: 52 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 53 | else: 54 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 55 | 56 | def once(self, event_name, callback): 57 | once_event_callbacks = self._events_once[event_name] 58 | once_event_callbacks.add(callback) 59 | 60 | if not self._events[event_name] and len(once_event_callbacks) == 1: 61 | def emit_callback(*args, **kwargs): 62 | setattr(self, "emit_" + event_name, null_emit_callback) 63 | once_event_callbacks.clear() 64 | return callback(*args, **kwargs) 65 | 66 | setattr(self, "emit_" + event_name, emit_callback) 67 | else: 68 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 69 | 70 | def noce(self, event_name, callback): 71 | once_event_callbacks = self._events_once[event_name] 72 | try: 73 | once_event_callbacks.remove(callback) 74 | except KeyError: 75 | pass 76 | 77 | event_callbacks = self._events[event_name] 78 | if not event_callbacks: 79 | if not once_event_callbacks: 80 | setattr(self, "emit_" + event_name, null_emit_callback) 81 | elif len(once_event_callbacks) == 1: 82 | callback = list(once_event_callbacks)[0] 83 | 84 | def emit_callback(*args, **kwargs): 85 | setattr(self, "emit_" + event_name, null_emit_callback) 86 | once_event_callbacks.clear() 87 | return callback(*args, **kwargs) 88 | 89 | setattr(self, "emit_" + event_name, emit_callback) 90 | else: 91 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 92 | elif len(event_callbacks) == 1: 93 | if not once_event_callbacks: 94 | setattr(self, "emit_" + event_name, list(event_callbacks)[0]) 95 | else: 96 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 97 | else: 98 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 99 | 100 | def remove_listener(self, event_name, callback): 101 | event_callbacks = self._events[event_name] 102 | try: 103 | event_callbacks.remove(callback) 104 | except KeyError: 105 | pass 106 | 107 | once_event_callbacks = self._events_once[event_name] 108 | try: 109 | once_event_callbacks.remove(callback) 110 | except KeyError: 111 | pass 112 | 113 | if not event_callbacks: 114 | if not once_event_callbacks: 115 | setattr(self, "emit_" + event_name, null_emit_callback) 116 | elif len(once_event_callbacks) == 1: 117 | callback = list(once_event_callbacks)[0] 118 | 119 | def emit_callback(*args, **kwargs): 120 | setattr(self, "emit_" + event_name, null_emit_callback) 121 | once_event_callbacks.clear() 122 | return callback(*args, **kwargs) 123 | 124 | setattr(self, "emit_" + event_name, emit_callback) 125 | else: 126 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 127 | elif len(event_callbacks) == 1: 128 | if not once_event_callbacks: 129 | setattr(self, "emit_" + event_name, list(event_callbacks)[0]) 130 | else: 131 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 132 | else: 133 | setattr(self, "emit_" + event_name, self.emit_callback(event_name)) 134 | 135 | def remove_all_listeners(self, event_name=None): 136 | if event_name is None: 137 | for event_name in set(list(self._events.keys()) + list(self._events_once.keys())): 138 | setattr(self, "emit_" + event_name, null_emit_callback) 139 | self._events = defaultdict(set) 140 | self._events_once = defaultdict(set) 141 | else: 142 | setattr(self, "emit_" + event_name, null_emit_callback) 143 | self._events[event_name] = set() 144 | self._events_once[event_name] = set() 145 | 146 | def emit_callback(self, event_name): 147 | def _(*args, **kwargs): 148 | for cb in self._events[event_name]: 149 | try: 150 | cb(*args, **kwargs) 151 | except Exception as e: 152 | if isinstance(e, (KeyboardInterrupt, SystemError)): 153 | raise e 154 | get_logger().exception('error when calling callback:%s',e) 155 | 156 | callbacks = self._events_once[event_name] 157 | if callbacks: 158 | self._events_once[event_name] = set() 159 | while callbacks: 160 | cb = callbacks.pop() 161 | try: 162 | cb(*args, **kwargs) 163 | except Exception as e: 164 | if isinstance(e, (KeyboardInterrupt, SystemError)): 165 | raise e 166 | get_logger().exception('error when calling callback:%s',e) 167 | return _ 168 | 169 | def emit(self, event_name, *args, **kwargs): 170 | for cb in self._events[event_name]: 171 | try: 172 | cb(*args, **kwargs) 173 | except Exception as e: 174 | if isinstance(e, (KeyboardInterrupt, SystemError)): 175 | raise e 176 | get_logger().exception('error when calling callback:%s', e) 177 | 178 | callbacks = self._events_once[event_name] 179 | if callbacks: 180 | self._events_once[event_name] = set() 181 | while callbacks: 182 | cb = callbacks.pop() 183 | try: 184 | cb(*args, **kwargs) 185 | except Exception as e: 186 | if isinstance(e, (KeyboardInterrupt, SystemError)): 187 | raise e 188 | get_logger().exception('error when calling callback:%s', e) 189 | 190 | def __getattr__(self, item): 191 | if item[:5] == "emit_": 192 | event_name = item[5:] 193 | event_callbacks = self._events[event_name] 194 | if not event_callbacks: 195 | once_event_callbacks = self._events_once[event_name] 196 | if not once_event_callbacks: 197 | setattr(self, "emit_" + event_name, null_emit_callback) 198 | return null_emit_callback 199 | if len(once_event_callbacks) == 1: 200 | callback = list(once_event_callbacks)[0] 201 | 202 | def emit_callback(*args, **kwargs): 203 | setattr(self, "emit_" + event_name, null_emit_callback) 204 | once_event_callbacks.clear() 205 | return callback(*args, **kwargs) 206 | 207 | setattr(self, "emit_" + event_name, emit_callback) 208 | return emit_callback 209 | callback = self.emit_callback(event_name) 210 | setattr(self, "emit_" + event_name, callback) 211 | return callback 212 | 213 | if len(event_callbacks) == 1: 214 | once_event_callbacks = self._events_once[event_name] 215 | if not once_event_callbacks: 216 | callback = list(event_callbacks)[0] 217 | setattr(self, "emit_" + event_name, callback) 218 | return callback 219 | callback = self.emit_callback(event_name) 220 | setattr(self, "emit_" + event_name, callback) 221 | return callback 222 | 223 | elif item[:3] == "on_": 224 | return lambda *args, **kwargs: self.on(item[3:], *args, **kwargs) 225 | return object.__getattribute__(self, item) 226 | 227 | 228 | if is_py3: 229 | from .coroutines.event import warp_coroutine 230 | EventEmitter = warp_coroutine(EventEmitter) 231 | -------------------------------------------------------------------------------- /sevent/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/7/10 3 | # create by: snower -------------------------------------------------------------------------------- /sevent/helpers/__main__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2021/2/1 3 | # create by: snower 4 | 5 | import sys 6 | import multiprocessing 7 | import logging 8 | import sevent 9 | from . import tcp_forward 10 | from . import simple_proxy 11 | from . import tcp2proxy 12 | from . import proxy2proxy 13 | from . import redirect2proxy 14 | from . import tcp_reverse 15 | from . import arproxy 16 | 17 | HEPERS = { 18 | "tcp_forward": tcp_forward, 19 | "simple_proxy": simple_proxy, 20 | "tcp2proxy": tcp2proxy, 21 | "proxy2proxy": proxy2proxy, 22 | "redirect2proxy": redirect2proxy, 23 | "tcp_reverse": tcp_reverse, 24 | "arproxy": arproxy, 25 | } 26 | 27 | def show_help_message(): 28 | print('usage: -m [HELPER_NAME] [ARGS]\r\n') 29 | print('simple sevent helpers \r\n') 30 | print("can use helpers:\r\n\r\n" + '\r\n'.join(["sevent.helpers." + name for name in HEPERS])) 31 | print('\r\n\r\n' + '*' * 64 + '\r\n') 32 | 33 | for name, helper in HEPERS.items(): 34 | sys.argv[0] = "-m sevent.helpers." + name 35 | p = multiprocessing.Process(target=helper.main, args=(["-h"],)) 36 | p.start() 37 | p.join() 38 | print('\r\n\r\n' + '*' * 64 + '\r\n') 39 | 40 | if __name__ == "__main__": 41 | if "-h" in sys.argv: 42 | show_help_message() 43 | exit(0) 44 | 45 | args_helpers = [] 46 | for arg in sys.argv[1:]: 47 | if arg and arg[0] == "@" and arg[1:] in HEPERS: 48 | args_helpers.append((arg[1:], HEPERS[arg[1:]], [])) 49 | elif not args_helpers: 50 | continue 51 | else: 52 | args_helpers[-1][2].append(arg) 53 | 54 | if not args_helpers: 55 | show_help_message() 56 | exit(0) 57 | 58 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', 59 | datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') 60 | for name, helper, argv in args_helpers: 61 | logging.info("start helper %s by %s", name, argv) 62 | helper.main(argv) 63 | try: 64 | sevent.instance().start() 65 | except KeyboardInterrupt: 66 | exit(0) -------------------------------------------------------------------------------- /sevent/helpers/arproxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/7/10 3 | # create by: snower 4 | 5 | import re 6 | from sevent.helpers.tcp2proxy import * 7 | 8 | def do_rewrite_host(forward_host, forward_port, rewrite_hosts): 9 | def parse_rewrite_host(host_args, rewrite_host, rewrite_args): 10 | for arg in rewrite_args: 11 | if arg < 0 or arg >= len(host_args): 12 | continue 13 | rewrite_host = rewrite_host.replace("{" + str(arg) + "}", host_args[arg]) 14 | rewrite_host = rewrite_host.split(":") 15 | if len(rewrite_host) == 2: 16 | return rewrite_host[0], int(rewrite_host[1]) 17 | return rewrite_host[0], forward_port 18 | 19 | for host, (rewrite_host, rewrite_args) in rewrite_hosts: 20 | if isinstance(host, str): 21 | if host == "*": 22 | return parse_rewrite_host(["%s:%s" % (forward_host, forward_port)], rewrite_host, rewrite_args) 23 | if forward_port in (80, 443): 24 | if forward_host == host: 25 | return parse_rewrite_host([host], rewrite_host, rewrite_args) 26 | if ("%s:%s" % (forward_host, forward_port)) == host: 27 | return parse_rewrite_host([host], rewrite_host, rewrite_args) 28 | else: 29 | if forward_port in (80, 443): 30 | matched = host.match(forward_host) 31 | if matched: 32 | return parse_rewrite_host(matched.groups(), rewrite_host, rewrite_args) 33 | matched = host.match("%s:%s" % (forward_host, forward_port)) 34 | if matched: 35 | return parse_rewrite_host(matched.groups(), rewrite_host, rewrite_args) 36 | return forward_host, forward_port 37 | 38 | def check_allow_host(forward_host, forward_port, allow_hosts): 39 | for host in allow_hosts: 40 | if isinstance(host, str): 41 | if host == "*": 42 | return True 43 | if forward_port in (80, 443): 44 | if forward_host == host: 45 | return True 46 | if ("%s:%s" % (forward_host, forward_port)) == host: 47 | return True 48 | else: 49 | if forward_port in (80, 443): 50 | if host.match(forward_host): 51 | return True 52 | if host.match("%s:%s" % (forward_host, forward_port)): 53 | return True 54 | return False 55 | 56 | def check_noproxy_host(forward_host, forward_port, noproxy_hosts): 57 | for host in noproxy_hosts: 58 | if isinstance(host, str): 59 | if host == "*": 60 | return True 61 | if forward_port in (80, 443): 62 | if forward_host == host: 63 | return True 64 | if ("%s:%s" % (forward_host, forward_port)) == host: 65 | return True 66 | else: 67 | if forward_port in (80, 443): 68 | if host.match(forward_host): 69 | return True 70 | if host.match("%s:%s" % (forward_host, forward_port)): 71 | return True 72 | return False 73 | 74 | async def parse_http_forward(conn, rbuffer): 75 | while True: 76 | if b'\r\n\r\n' in rbuffer.join(): 77 | break 78 | rbuffer.write((await conn.recv()).read()) 79 | 80 | data = rbuffer.join() 81 | data = data[:data.index(b'\r\n\r\n')] 82 | for header in data.split(b'\r\n'): 83 | try: 84 | index = header.index(b':') 85 | except ValueError: 86 | continue 87 | if header[:index].decode("utf-8").strip().lower() != "host": 88 | continue 89 | host = header[index + 1:].decode("utf-8").strip().split(":") 90 | if len(host) != 2: 91 | return header[index + 1:].decode("utf-8").strip(), 80 92 | return host[0], int(host[1]) 93 | return "", 0 94 | 95 | async def parse_tls_forward(conn, rbuffer): 96 | head_data = (await conn.recv(43)).read(43) 97 | rbuffer.write(head_data) 98 | 99 | # session_id 100 | data = (await conn.recv(1)).read(1) 101 | rbuffer.write(data) 102 | if ord(data) > 0: 103 | data = (await conn.recv(ord(data))).read(ord(data)) 104 | rbuffer.write(data) 105 | 106 | # cipher suites 107 | data = (await conn.recv(2)).read(2) 108 | rbuffer.write(data) 109 | csl, = struct.unpack("!H", data) 110 | if csl > 0: 111 | data = (await conn.recv(csl)).read(csl) 112 | rbuffer.write(data) 113 | 114 | # compression methods 115 | data = (await conn.recv(1)).read(1) 116 | rbuffer.write(data) 117 | if ord(data) > 0: 118 | data = (await conn.recv(ord(data))).read(ord(data)) 119 | rbuffer.write(data) 120 | 121 | # estensions 122 | data = (await conn.recv(2)).read(2) 123 | rbuffer.write(data) 124 | el, = struct.unpack("!H", data) 125 | if el > 0: 126 | data = (await conn.recv(el)).read(el) 127 | rbuffer.write(data) 128 | 129 | i = 0 130 | while i < len(data): 131 | et_item, el_item = struct.unpack("!HH", data[i: i + 4]) 132 | if et_item != 0: 133 | i += el_item + 4 134 | continue 135 | 136 | sl, = struct.unpack("!H", data[i+4: i+6]) 137 | j, sdata = 0, data[i+6:i+6+sl] 138 | while j < len(sdata): 139 | st_item, sl_item = struct.unpack("!BH", sdata[j: j + 3]) 140 | if st_item != 0: 141 | j += sl_item + 3 142 | continue 143 | 144 | host = sdata[j + 3: j + 3 + sl_item].decode("utf-8").split(":") 145 | if len(host) != 2: 146 | return sdata[j + 3: j + 3 + sl_item].decode("utf-8"), 443 147 | return host[0], int(host[1]) 148 | return "", 0 149 | 150 | async def none_proxy(conns, conn, proxy_host, proxy_port, remote_host, remote_port, status): 151 | start_time = time.time() 152 | conn.write, pconn = warp_write(conn, status, "recv_len"), None 153 | 154 | try: 155 | conn.enable_nodelay() 156 | pconn = create_socket((remote_host, remote_port)) 157 | await pconn.connectof((remote_host, remote_port)) 158 | pconn.write = warp_write(pconn, status, "send_len") 159 | logging.info("none proxy connected %s:%d -> %s:%d", conn.address[0], conn.address[1], remote_host, remote_port) 160 | await pconn.linkof(conn) 161 | except sevent.errors.SocketClosed: 162 | pass 163 | except Exception as e: 164 | logging.info("none proxy error %s:%d -> %s:%d %s %.2fms\r%s", conn.address[0], conn.address[1], remote_host, remote_port, 165 | e, (time.time() - start_time) * 1000, traceback.format_exc()) 166 | return 167 | finally: 168 | conn.close() 169 | if pconn: pconn.close() 170 | conns.pop(id(conn), None) 171 | 172 | logging.info("none proxy closed %s:%d -> %s:%d %s %s %.2fms", conn.address[0], conn.address[1], remote_host, remote_port, 173 | format_data_len(status["send_len"]), format_data_len(status["recv_len"]), (time.time() - start_time) * 1000) 174 | 175 | async def parse_forward(proxy_type, conns, conn, proxy_host, proxy_port, 176 | default_forward_host, default_forward_port, default_forward_proxy_type, 177 | allow_hosts, noproxy_hosts, rewrite_hosts, status): 178 | try: 179 | rbuffer = sevent.Buffer() 180 | timer = sevent.current().add_timeout(5, lambda: conn.close()) 181 | try: 182 | data = (await conn.recv()).read() 183 | rbuffer.write(data) 184 | 185 | if len(data) < 11 or data[:2] != b'\x16\x03' or data[2] not in (1, 2, 3, 4) or data[5] != 0x01 \ 186 | or data[9] != 0x03 or data[10] not in (1, 2, 3, 4): 187 | if data[:data.index(b' ')].decode("utf-8").lower() in ("get", "post", "put", "delete", "head", "option") \ 188 | or b'HTTP' in data: 189 | forward_host, forward_port = await parse_http_forward(conn, rbuffer) 190 | else: 191 | forward_host, forward_port = "", 0 192 | else: 193 | rbuffer.write(conn.buffer[0].read()) 194 | conn.buffer[0].write(rbuffer.read()) 195 | forward_host, forward_port = await parse_tls_forward(conn, rbuffer) 196 | except Exception: 197 | forward_host, forward_port = "", 0 198 | finally: 199 | sevent.current().cancel_timeout(timer) 200 | 201 | if not forward_host or not forward_port: 202 | forward_host, forward_port, proxy_type = default_forward_host, default_forward_port, default_forward_proxy_type 203 | elif allow_hosts and allow_hosts[0] != "*" and not check_allow_host(forward_host, forward_port, allow_hosts): 204 | forward_host, forward_port, proxy_type = default_forward_host, default_forward_port, default_forward_proxy_type 205 | if rewrite_hosts: 206 | forward_host, forward_port = do_rewrite_host(forward_host, forward_port, rewrite_hosts) 207 | if not forward_host or not forward_port: 208 | logging.info("%s proxy unknown closed %s:%d -> %s:%d", proxy_type, conn.address[0], conn.address[1], 209 | proxy_host, proxy_port) 210 | conn.close() 211 | conns.pop(id(conn), None) 212 | return 213 | 214 | rbuffer.write(conn.buffer[0].read()) 215 | conn.buffer[0].write(rbuffer.read()) 216 | if proxy_type == "http": 217 | if check_noproxy_host(forward_host, forward_port, noproxy_hosts): 218 | proxy_type = "none" 219 | await none_proxy(conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 220 | else: 221 | await http_proxy(conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 222 | elif proxy_type == "socks5": 223 | if check_noproxy_host(forward_host, forward_port, noproxy_hosts): 224 | proxy_type = "none" 225 | await none_proxy(conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 226 | else: 227 | await socks5_proxy(conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 228 | else: 229 | await none_proxy(conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 230 | except Exception as e: 231 | logging.info("%s proxy error %s:%d -> %s:%d %s\r%s", proxy_type, conn.address[0], conn.address[1], 232 | proxy_host, proxy_port, e, traceback.format_exc()) 233 | conns.pop(id(conn), None) 234 | 235 | async def tcp_accept(server, args): 236 | proxy_info = args.proxy_host.split(":") 237 | if len(proxy_info) == 1: 238 | if not proxy_info[0].isdigit(): 239 | proxy_host, proxy_port = proxy_info[0], 8088 240 | else: 241 | proxy_host, proxy_port = "127.0.0.1", int(proxy_info[0]) 242 | else: 243 | proxy_host, proxy_port = proxy_info[0], int(proxy_info[1]) 244 | 245 | default_forward_info = args.default_forward_host.split(":") 246 | if not default_forward_info or not default_forward_info[0]: 247 | default_forward_host, default_forward_port = "", 0 248 | elif len(default_forward_info) == 1: 249 | if not default_forward_info[0].isdigit(): 250 | default_forward_host, default_forward_port = default_forward_info[0], 8088 251 | else: 252 | default_forward_host, default_forward_port = "127.0.0.1", int(default_forward_info[0]) 253 | else: 254 | default_forward_host, default_forward_port = default_forward_info[0], int(default_forward_info[1]) 255 | allow_hosts = [] 256 | for allow_host in (i for i in args.allow_hosts.split(",") if i.strip()): 257 | if "*" in allow_host and "*" != allow_host: 258 | allow_hosts.append(re.compile(allow_host.replace(".", "\.").replace("*", ".+?"))) 259 | else: 260 | allow_hosts.append(allow_host) 261 | noproxy_hosts = [] 262 | for noproxy_host in (i for i in args.noproxy_hosts.split(",") if i.strip()): 263 | if "*" in noproxy_host and "*" != noproxy_host: 264 | noproxy_hosts.append(re.compile(noproxy_host.replace(".", "\.").replace("*", ".+?"))) 265 | else: 266 | noproxy_hosts.append(noproxy_host) 267 | rewrite_hosts = [] 268 | for rewrite_host in (i for i in args.rewrite_hosts.split(",") if i.strip()): 269 | rewrite_host = [i for i in rewrite_host.split("=") if i.strip()] 270 | if len(rewrite_host) != 2: 271 | continue 272 | rewrite_hosts.append(( 273 | re.compile("^" + rewrite_host[0].replace(".", "\.").replace("*", "(.+?)") + "$") if "*" in rewrite_host[0] and "*" != rewrite_host[0] else rewrite_host[0], 274 | ( 275 | rewrite_host[1], 276 | tuple((int(i[1:-1]) for i in re.findall("(\{\d+?\})", rewrite_host[1]))) 277 | ) 278 | )) 279 | 280 | logging.info("use %s proxy %s:%d default forward to %s:%d", args.proxy_type, proxy_host, proxy_port, 281 | default_forward_host, default_forward_port) 282 | conns = {} 283 | sevent.current().call_async(check_timeout, conns, args.timeout) 284 | while True: 285 | conn = await server.accept() 286 | status = {"recv_len": 0, "send_len": 0, "last_time": time.time(), "check_recv_len": 0, "check_send_len": 0} 287 | sevent.current().call_async(parse_forward, args.proxy_type, conns, conn, proxy_host, proxy_port, 288 | default_forward_host, default_forward_port, args.default_forward_proxy_type, 289 | allow_hosts, noproxy_hosts, rewrite_hosts, status) 290 | conns[id(conn)] = (conn, status) 291 | 292 | def main(argv): 293 | parser = argparse.ArgumentParser(description='auto parse servername (support http, TLS) forword to remote host from none, http or socks5 proxy') 294 | parser.add_argument('-b', dest='bind', default="0.0.0.0", help='local bind host (default: 0.0.0.0)') 295 | parser.add_argument('-p', dest='port', default=8088, type=int, help='local bind port (default: 8088)') 296 | parser.add_argument('-t', dest='timeout', default=7200, type=int, help='no read/write timeout (default: 7200)') 297 | parser.add_argument('-T', dest='proxy_type', default="http", choices=("none", "http", "socks5"), 298 | help='proxy type (default: http)') 299 | parser.add_argument('-P', dest='proxy_host', default="127.0.0.1:8088", 300 | help='proxy host, accept format [proxy_host:proxy_port] (default: 127.0.0.1:8088)') 301 | parser.add_argument('-f', dest='default_forward_host', default="", 302 | help='default remote forward host , accept format [remote_host:remote_port] (default: )') 303 | parser.add_argument('-x', dest='default_forward_proxy_type', default="http", choices=("none", "http", "socks5"), 304 | help='default remote forward proxy type (default: http)') 305 | parser.add_argument('-H', dest='allow_hosts', default="*", 306 | help='allow hosts, accept format [host,*host,host*] (default: *)') 307 | parser.add_argument('-N', dest='noproxy_hosts', default="", 308 | help='noproxy hosts, accept format [host,*host,host*] (default: )') 309 | parser.add_argument('-R', dest='rewrite_hosts', default="", 310 | help='rewrite hosts, accept format [host=rhost,*host={1}rhost,host*=rhost{1}] (default: )') 311 | args = parser.parse_args(args=argv) 312 | config_signal() 313 | server = create_server((args.bind, args.port)) 314 | logging.info("listen server at %s:%d", args.bind, args.port) 315 | sevent.current().call_async(tcp_accept, server, args) 316 | 317 | if __name__ == '__main__': 318 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', 319 | datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') 320 | try: 321 | main(sys.argv[1:]) 322 | sevent.instance().start() 323 | except KeyboardInterrupt: 324 | exit(0) -------------------------------------------------------------------------------- /sevent/helpers/proxy2proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/7/10 3 | # create by: snower 4 | 5 | import re 6 | import sys 7 | import time 8 | import argparse 9 | import logging 10 | import traceback 11 | import threading 12 | import sevent 13 | from .utils import create_server, create_socket, config_signal 14 | from .simple_proxy import format_data_len, warp_write, http_protocol_parse, socks5_protocol_parse 15 | from .tcp2proxy import http_build_protocol, socks5_build_protocol, socks5_read_protocol 16 | 17 | def check_host(forward_host, allow_hosts): 18 | for host in allow_hosts: 19 | if isinstance(host, str): 20 | if forward_host == host or forward_host.endswith(host): 21 | return True 22 | else: 23 | if host.match(forward_host): 24 | return True 25 | return False 26 | 27 | async def socks5_proxy(proxy_host, proxy_port, remote_host, remote_port): 28 | pconn = None 29 | try: 30 | pconn = create_socket((proxy_host, proxy_port)) 31 | await pconn.connectof((proxy_host, proxy_port)) 32 | await pconn.send(b"\x05\x01\x00") 33 | buffer = await pconn.recv() 34 | if buffer.read() != b'\x05\00': 35 | logging.info("protocol hello error") 36 | return 37 | 38 | protocol_data = socks5_build_protocol(remote_host, remote_port) 39 | await pconn.send(protocol_data) 40 | buffer = await pconn.recv() 41 | if buffer.read(3) != b'\x05\x00\x00': 42 | logging.info("protocol error") 43 | return 44 | if not socks5_read_protocol(buffer): 45 | logging.info("protocol error") 46 | return 47 | except sevent.errors.SocketClosed: 48 | pconn = None 49 | except Exception as e: 50 | if pconn: pconn.close() 51 | raise e 52 | return pconn 53 | 54 | async def http_proxy(proxy_host, proxy_port, remote_host, remote_port): 55 | pconn = None 56 | try: 57 | pconn = create_socket((proxy_host, proxy_port)) 58 | await pconn.connectof((proxy_host, proxy_port)) 59 | 60 | protocol_data = http_build_protocol(remote_host, remote_port) 61 | await pconn.send(protocol_data) 62 | buffer = await pconn.recv() 63 | if buffer.read(12).lower() != b"http/1.1 200": 64 | logging.info("protocol error") 65 | return 66 | buffer.read() 67 | except sevent.errors.SocketClosed: 68 | pconn = None 69 | except Exception as e: 70 | if pconn: pconn.close() 71 | raise e 72 | return pconn 73 | 74 | async def none_proxy(proxy_host, proxy_port, remote_host, remote_port): 75 | pconn = None 76 | try: 77 | pconn = create_socket((remote_host, remote_port)) 78 | await pconn.connectof((remote_host, remote_port)) 79 | except sevent.errors.SocketClosed: 80 | pconn = None 81 | except Exception as e: 82 | if pconn: pconn.close() 83 | raise e 84 | return pconn 85 | 86 | async def tcp_proxy(conns, conn, proxy_type, proxy_host, proxy_port, noproxy_hosts, status): 87 | start_time = time.time() 88 | host, port, protocol = '', 0, '' 89 | conn.write, pconn = warp_write(conn, status, "recv_len"), None 90 | try: 91 | conn.enable_nodelay() 92 | buffer = await conn.recv() 93 | if buffer[0] == 5: 94 | protocol = 'socks5' 95 | host, port, data = await socks5_protocol_parse(conn, buffer) 96 | else: 97 | protocol = 'http' 98 | host, port, data = await http_protocol_parse(conn, buffer) 99 | if not host or not port: 100 | logging.info("empty address") 101 | return 102 | 103 | logging.info("connected %s %s:%d -> %s %s:%d -> %s:%d", protocol, conn.address[0], conn.address[1], 104 | proxy_type, proxy_host, proxy_port, host, port) 105 | if check_host(host, noproxy_hosts): 106 | proxy_type = "none" 107 | pconn = await none_proxy(proxy_host, proxy_port, host, port) 108 | else: 109 | if proxy_type == "http": 110 | pconn = await http_proxy(proxy_host, proxy_port, host, port) 111 | else: 112 | pconn = await socks5_proxy(proxy_host, proxy_port, host, port) 113 | pconn.write = warp_write(pconn, status, "send_len") 114 | await pconn.connectof((host, port)) 115 | if data: 116 | await pconn.send(data) 117 | await pconn.linkof(conn) 118 | except sevent.errors.SocketClosed: 119 | pass 120 | except Exception as e: 121 | logging.info("error %s %s:%d -> %s %s:%d -> %s:%d %s %.2fms\r%s", protocol, conn.address[0], conn.address[1], 122 | proxy_type, proxy_host, proxy_port, host, port, e, 123 | (time.time() - start_time) * 1000, traceback.format_exc()) 124 | return 125 | finally: 126 | conn.close() 127 | if pconn: pconn.close() 128 | conns.pop(id(conn), None) 129 | 130 | logging.info("closed %s %s:%d -> %s %s:%d -> %s:%d %s %s %.2fms", protocol, conn.address[0], conn.address[1], 131 | proxy_type, proxy_host, proxy_port, host, port, format_data_len(status["send_len"]), 132 | format_data_len(status["recv_len"]), (time.time() - start_time) * 1000) 133 | 134 | async def check_timeout(conns, timeout): 135 | def run_check(): 136 | while True: 137 | try: 138 | now = time.time() 139 | for conn_id, (conn, status) in list(conns.items()): 140 | if status['check_recv_len'] != status['recv_len'] or status['check_send_len'] != status['send_len']: 141 | status["check_recv_len"] = status["recv_len"] 142 | status["check_send_len"] = status["send_len"] 143 | status['last_time'] = now 144 | continue 145 | 146 | if now - status['last_time'] >= timeout: 147 | sevent.current().add_async_safe(conn.close) 148 | conns.pop(conn_id, None) 149 | finally: 150 | time.sleep(min(float(timeout) / 2.0, 30)) 151 | 152 | if timeout > 0: 153 | check_thread = threading.Thread(target=run_check) 154 | check_thread.daemon = True 155 | check_thread.start() 156 | await sevent.Future() 157 | 158 | async def tcp_accept(server, args): 159 | proxy_info = args.proxy_host.split(":") 160 | if len(proxy_info) == 1: 161 | if not proxy_info[0].isdigit(): 162 | proxy_host, proxy_port = proxy_info[0], 8088 163 | else: 164 | proxy_host, proxy_port = "127.0.0.1", int(proxy_info[0]) 165 | else: 166 | proxy_host, proxy_port = proxy_info[0], int(proxy_info[1]) 167 | noproxy_hosts = [] 168 | for noproxy_host in args.noproxy_hosts.split(","): 169 | if "*" in noproxy_host and "*" != noproxy_host: 170 | noproxy_hosts.append(re.compile(noproxy_host.replace(".", "\.").replace("*", ".+?"))) 171 | else: 172 | noproxy_hosts.append(noproxy_host) 173 | 174 | logging.info("use %s proxy %s:%d", args.proxy_type, proxy_host, proxy_port) 175 | conns = {} 176 | sevent.current().call_async(check_timeout, conns, args.timeout) 177 | while True: 178 | conn = await server.accept() 179 | status = {"recv_len": 0, "send_len": 0, "last_time": time.time(), "check_recv_len": 0, "check_send_len": 0} 180 | sevent.current().call_async(tcp_proxy, conns, conn, args.proxy_type, proxy_host, proxy_port, noproxy_hosts, status) 181 | conns[id(conn)] = (conn, status) 182 | 183 | def main(argv): 184 | parser = argparse.ArgumentParser(description='simple http and socks5 proxy forward to http or socks5 uplink proxy') 185 | parser.add_argument('-b', dest='bind', default="0.0.0.0", help='local bind host (default: 0.0.0.0)') 186 | parser.add_argument('-p', dest='port', default=8088, type=int, help='local bind port (default: 8088)') 187 | parser.add_argument('-t', dest='timeout', default=7200, type=int, help='no read/write timeout (default: 7200)') 188 | parser.add_argument('-T', dest='proxy_type', default="http", choices=("http", "socks5"), 189 | help='proxy type (default: http)') 190 | parser.add_argument('-P', dest='proxy_host', default="127.0.0.1:8088", 191 | help='proxy host, accept format [proxy_host:proxy_port] (default: 127.0.0.1:8088)') 192 | parser.add_argument('-N', dest='noproxy_hosts', default="*", 193 | help='noproxy hosts, accept format [host,*host,host*] (default: *)') 194 | args = parser.parse_args(args=argv) 195 | config_signal() 196 | server = create_server((args.bind, args.port)) 197 | logging.info("listen server at %s:%d", args.bind, args.port) 198 | sevent.run(tcp_accept, server, args) 199 | 200 | if __name__ == '__main__': 201 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', 202 | datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') 203 | try: 204 | main(sys.argv[1:]) 205 | sevent.instance().start() 206 | except KeyboardInterrupt: 207 | exit(0) -------------------------------------------------------------------------------- /sevent/helpers/redirect2proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2021/11/2 3 | # create by: snower 4 | 5 | import sys 6 | import time 7 | import struct 8 | import argparse 9 | import logging 10 | import traceback 11 | import threading 12 | import socket 13 | import sevent 14 | from .utils import create_server, create_socket, config_signal 15 | from .simple_proxy import format_data_len, warp_write 16 | from .tcp2proxy import http_build_protocol, socks5_build_protocol, socks5_read_protocol 17 | 18 | async def socks5_proxy(proxy_host, proxy_port, remote_host, remote_port): 19 | pconn = None 20 | try: 21 | pconn = create_socket((proxy_host, proxy_port)) 22 | await pconn.connectof((proxy_host, proxy_port)) 23 | await pconn.send(b"\x05\x01\x00") 24 | buffer = await pconn.recv() 25 | if buffer.read() != b'\x05\00': 26 | logging.info("protocol hello error") 27 | return 28 | 29 | protocol_data = socks5_build_protocol(remote_host, remote_port) 30 | await pconn.send(protocol_data) 31 | buffer = await pconn.recv() 32 | if buffer.read(3) != b'\x05\x00\x00': 33 | logging.info("protocol error") 34 | return 35 | if not socks5_read_protocol(buffer): 36 | logging.info("protocol error") 37 | return 38 | except sevent.errors.SocketClosed: 39 | pconn = None 40 | except Exception as e: 41 | if pconn: pconn.close() 42 | raise e 43 | return pconn 44 | 45 | async def http_proxy(proxy_host, proxy_port, remote_host, remote_port): 46 | pconn = None 47 | try: 48 | pconn = create_socket((proxy_host, proxy_port)) 49 | await pconn.connectof((proxy_host, proxy_port)) 50 | 51 | protocol_data = http_build_protocol(remote_host, remote_port) 52 | await pconn.send(protocol_data) 53 | buffer = await pconn.recv() 54 | if buffer.read(12).lower() != b"http/1.1 200": 55 | logging.info("protocol error") 56 | return 57 | buffer.read() 58 | except sevent.errors.SocketClosed: 59 | pconn = None 60 | except Exception as e: 61 | if pconn: pconn.close() 62 | raise e 63 | return pconn 64 | 65 | async def tcp_proxy(conns, conn, proxy_type, proxy_host, proxy_port, status): 66 | start_time = time.time() 67 | host, port, protocol = '', 0, '' 68 | conn.write, pconn = warp_write(conn, status, "recv_len"), None 69 | try: 70 | conn.enable_nodelay() 71 | address_data = conn.socket.getsockopt(socket.SOL_IP, 80, 16) 72 | host, port = socket.inet_ntoa(address_data[4:8]), struct.unpack(">H", address_data[2:4])[0] 73 | logging.info("connected %s %s:%d -> %s %s:%d -> %s:%d", protocol, conn.address[0], conn.address[1], 74 | proxy_type, proxy_host, proxy_port, host, port) 75 | if proxy_type == "http": 76 | pconn = await http_proxy(proxy_host, proxy_port, host, port) 77 | else: 78 | pconn = await socks5_proxy(proxy_host, proxy_port, host, port) 79 | pconn.write = warp_write(pconn, status, "send_len") 80 | await pconn.connectof((host, port)) 81 | await pconn.linkof(conn) 82 | except sevent.errors.SocketClosed: 83 | pass 84 | except Exception as e: 85 | logging.info("error %s %s:%d -> %s %s:%d -> %s:%d %s %.2fms\r%s", protocol, conn.address[0], conn.address[1], 86 | proxy_type, proxy_host, proxy_port, host, port, e, 87 | (time.time() - start_time) * 1000, traceback.format_exc()) 88 | return 89 | finally: 90 | conn.close() 91 | if pconn: pconn.close() 92 | conns.pop(id(conn), None) 93 | 94 | logging.info("closed %s %s:%d -> %s %s:%d -> %s:%d %s %s %.2fms", protocol, conn.address[0], conn.address[1], 95 | proxy_type, proxy_host, proxy_port, host, port, format_data_len(status["send_len"]), 96 | format_data_len(status["recv_len"]), (time.time() - start_time) * 1000) 97 | 98 | async def check_timeout(conns, timeout): 99 | def run_check(): 100 | while True: 101 | try: 102 | now = time.time() 103 | for conn_id, (conn, status) in list(conns.items()): 104 | if status['check_recv_len'] != status['recv_len'] or status['check_send_len'] != status['send_len']: 105 | status["check_recv_len"] = status["recv_len"] 106 | status["check_send_len"] = status["send_len"] 107 | status['last_time'] = now 108 | continue 109 | 110 | if now - status['last_time'] >= timeout: 111 | sevent.current().add_async_safe(conn.close) 112 | conns.pop(conn_id, None) 113 | finally: 114 | time.sleep(min(float(timeout) / 2.0, 30)) 115 | 116 | if timeout > 0: 117 | check_thread = threading.Thread(target=run_check) 118 | check_thread.daemon = True 119 | check_thread.start() 120 | await sevent.Future() 121 | 122 | async def tcp_accept(server, args): 123 | proxy_info = args.proxy_host.split(":") 124 | if len(proxy_info) == 1: 125 | if not proxy_info[0].isdigit(): 126 | proxy_host, proxy_port = proxy_info[0], 8088 127 | else: 128 | proxy_host, proxy_port = "127.0.0.1", int(proxy_info[0]) 129 | else: 130 | proxy_host, proxy_port = proxy_info[0], int(proxy_info[1]) 131 | 132 | logging.info("use %s proxy %s:%d", args.proxy_type, proxy_host, proxy_port) 133 | conns = {} 134 | sevent.current().call_async(check_timeout, conns, args.timeout) 135 | while True: 136 | conn = await server.accept() 137 | status = {"recv_len": 0, "send_len": 0, "last_time": time.time(), "check_recv_len": 0, "check_send_len": 0} 138 | sevent.current().call_async(tcp_proxy, conns, conn, args.proxy_type, proxy_host, proxy_port, status) 139 | conns[id(conn)] = (conn, status) 140 | 141 | def main(argv): 142 | parser = argparse.ArgumentParser(description='iptables redirect forward to http or socks5 uplink proxy') 143 | parser.add_argument('-b', dest='bind', default="0.0.0.0", help='local bind host (default: 0.0.0.0)') 144 | parser.add_argument('-p', dest='port', default=8088, type=int, help='local bind port (default: 8088)') 145 | parser.add_argument('-t', dest='timeout', default=7200, type=int, help='no read/write timeout (default: 7200)') 146 | parser.add_argument('-T', dest='proxy_type', default="http", choices=("http", "socks5"), help='proxy type (default: http)') 147 | parser.add_argument('-P', dest='proxy_host', default="127.0.0.1:8088", help='proxy host, accept format [proxy_host:proxy_port] (default: 127.0.0.1:8088)') 148 | args = parser.parse_args(args=argv) 149 | config_signal() 150 | server = create_server((args.bind, args.port)) 151 | logging.info("listen server at %s:%d", args.bind, args.port) 152 | sevent.current().call_async(tcp_accept, server, args) 153 | 154 | if __name__ == '__main__': 155 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', 156 | datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') 157 | try: 158 | main(sys.argv[1:]) 159 | sevent.instance().start() 160 | except KeyboardInterrupt: 161 | exit(0) -------------------------------------------------------------------------------- /sevent/helpers/simple_proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/7/10 3 | # create by: snower 4 | 5 | import sys 6 | import time 7 | import struct 8 | import socket 9 | import argparse 10 | import logging 11 | import traceback 12 | import threading 13 | import sevent 14 | from .utils import create_server, create_socket, config_signal, format_data_len 15 | 16 | def warp_write(conn, status, key): 17 | origin_write = conn.write 18 | def _(data): 19 | status[key] += len(data) 20 | return origin_write(data) 21 | return _ 22 | 23 | def http_protocol_parse_address(data, default_port=80): 24 | address = data.split(b":") 25 | if len(address) != 2: 26 | return (address[0].decode("utf-8"), default_port) 27 | return (address[0].decode("utf-8"), int(address[1].decode("utf-8"))) 28 | 29 | async def http_protocol_parse(conn, buffer): 30 | data = buffer.read() 31 | index = data.find(b" ") 32 | method = data[:index].decode("utf-8") 33 | data = data[index + 1:] 34 | 35 | if method.lower() == "connect": 36 | index = data.find(b" ") 37 | host, port = http_protocol_parse_address(data[:index], 443) 38 | await conn.send(b"HTTP/1.1 200 Connection Established\r\n\r\n") 39 | return (host, port, b'') 40 | 41 | if method.lower() in ("get", "post", "put", "options", "head", "delete", "patch"): 42 | data = data[7:] 43 | index = data.find(b"/") 44 | host, port = http_protocol_parse_address(data[:index], 80) 45 | return (host, port, method.encode("utf-8") + b' ' + data[index:]) 46 | return ('', 0, None) 47 | 48 | async def socks5_protocol_parse(conn, buffer): 49 | buffer.read() 50 | await conn.send(b'\x05\00') 51 | buffer = await conn.recv() 52 | data = buffer.read() 53 | if data[1] != 1: 54 | return ('', 0, None) 55 | 56 | if data[3] == 1: 57 | await conn.send(b"".join([b'\x05\x00\x00\x01', socket.inet_aton('0.0.0.0'), struct.pack(">H", 0)])) 58 | return (socket.inet_ntoa(data[4:8]), 59 | struct.unpack('>H', data[8:10])[0], data[10:]) 60 | 61 | if data[3] == 4: 62 | await conn.send(b"".join([b'\x05\x00\x00\x01', socket.inet_aton('0.0.0.0'), struct.pack(">H", 0)])) 63 | return (socket.inet_ntop(socket.AF_INET6, data[4:20]), 64 | struct.unpack('>H', data[20:22])[0], data[22:]) 65 | 66 | elif data[3] == 3: 67 | host_len = data[4] + 5 68 | await conn.send(b"".join([b'\x05\x00\x00\x01', socket.inet_aton('0.0.0.0'), struct.pack(">H", 0)])) 69 | return (data[5: host_len].decode("utf-8"), 70 | struct.unpack('>H', data[host_len: host_len+2])[0], 71 | data[host_len+2:]) 72 | 73 | return ('', 0, None) 74 | 75 | async def tcp_proxy(conns, conn, status): 76 | start_time = time.time() 77 | host, port, protocol = '', 0, '' 78 | conn.write, pconn = warp_write(conn, status, "recv_len"), None 79 | try: 80 | conn.enable_nodelay() 81 | buffer = await conn.recv() 82 | if buffer[0] == 5: 83 | protocol = 'socks5' 84 | host, port, data = await socks5_protocol_parse(conn, buffer) 85 | else: 86 | protocol = 'http' 87 | host, port, data = await http_protocol_parse(conn, buffer) 88 | if not host or not port: 89 | logging.info("empty address") 90 | return 91 | 92 | logging.info("connected %s %s:%d", protocol, host, port) 93 | pconn = create_socket((host, port)) 94 | pconn.write = warp_write(pconn, status, "send_len") 95 | await pconn.connectof((host, port)) 96 | if data: 97 | await pconn.send(data) 98 | await pconn.linkof(conn) 99 | except sevent.errors.SocketClosed: 100 | pass 101 | except Exception as e: 102 | logging.info("error %s %s:%d %s %.2fms\r%s", protocol, host, port, e, 103 | (time.time() - start_time) * 1000, traceback.format_exc()) 104 | return 105 | finally: 106 | conn.close() 107 | if pconn: pconn.close() 108 | conns.pop(id(conn), None) 109 | 110 | logging.info("closed %s %s:%d %s %s %.2fms", protocol, host, port, format_data_len(status["send_len"]), 111 | format_data_len(status["recv_len"]), (time.time() - start_time) * 1000) 112 | 113 | async def check_timeout(conns, timeout): 114 | def run_check(): 115 | while True: 116 | try: 117 | now = time.time() 118 | for conn_id, (conn, status) in list(conns.items()): 119 | if status['check_recv_len'] != status['recv_len'] or status['check_send_len'] != status['send_len']: 120 | status["check_recv_len"] = status["recv_len"] 121 | status["check_send_len"] = status["send_len"] 122 | status['last_time'] = now 123 | continue 124 | 125 | if now - status['last_time'] >= timeout: 126 | sevent.current().add_async_safe(conn.close) 127 | conns.pop(conn_id, None) 128 | finally: 129 | time.sleep(min(float(timeout) / 2.0, 30)) 130 | 131 | if timeout > 0: 132 | check_thread = threading.Thread(target=run_check) 133 | check_thread.daemon = True 134 | check_thread.start() 135 | await sevent.Future() 136 | 137 | async def tcp_accept(server, timeout): 138 | conns = {} 139 | sevent.current().call_async(check_timeout, conns, timeout) 140 | while True: 141 | conn = await server.accept() 142 | status = {"recv_len": 0, "send_len": 0, "last_time": time.time(), "check_recv_len": 0, "check_send_len": 0} 143 | sevent.current().call_async(tcp_proxy, conns, conn, status) 144 | conns[id(conn)] = (conn, status) 145 | 146 | def main(argv): 147 | parser = argparse.ArgumentParser(description='simple http and socks5 proxy server') 148 | parser.add_argument('-b', dest='bind', default="0.0.0.0", help='local bind host (default: 0.0.0.0)') 149 | parser.add_argument('-p', dest='port', default=8088, type=int, help='local bind port (default: 8088)') 150 | parser.add_argument('-t', dest='timeout', default=7200, type=int, help='no read/write timeout (default: 7200)') 151 | args = parser.parse_args(args=argv) 152 | config_signal() 153 | logging.info("listen server at %s:%d", args.bind, args.port) 154 | server = create_server((args.bind, args.port)) 155 | sevent.current().call_async(tcp_accept, server, args.timeout) 156 | 157 | if __name__ == '__main__': 158 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', 159 | datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') 160 | try: 161 | main(sys.argv[1:]) 162 | sevent.instance().start() 163 | except KeyboardInterrupt: 164 | exit(0) -------------------------------------------------------------------------------- /sevent/helpers/tcp2proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2020/7/10 3 | # create by: snower 4 | 5 | import sys 6 | import time 7 | import logging 8 | import struct 9 | import socket 10 | import traceback 11 | import argparse 12 | import threading 13 | import sevent 14 | from .utils import create_server, create_socket, config_signal, format_data_len 15 | 16 | def warp_write(conn, status, key): 17 | origin_write = conn.write 18 | def _(data): 19 | status[key] += len(data) 20 | return origin_write(data) 21 | return _ 22 | 23 | def socks5_build_protocol(remote_host, remote_port): 24 | try: 25 | protocol_data = b"".join( 26 | [b"\x05\x01\x00\x01", socket.inet_aton(remote_host), struct.pack(">H", remote_port)]) 27 | except: 28 | try: 29 | protocol_data = b"".join([b"\x05\x01\x00\x04", socket.inet_pton(socket.AF_INET6, remote_host), 30 | struct.pack(">H", remote_port)]) 31 | except: 32 | protocol_data = b"".join([b"\x05\x01\x00\x03", struct.pack(">B", len(remote_host)), 33 | bytes(remote_host, "utf-8") if isinstance(remote_host, str) else remote_host, 34 | struct.pack(">H", remote_port)]) 35 | return protocol_data 36 | 37 | def socks5_read_protocol(buffer): 38 | cmd = buffer.read(1) 39 | if cmd == b'\x01': 40 | return buffer.read(6) 41 | if cmd == b'\x04': 42 | return buffer.read(18) 43 | if cmd == b'\x03': 44 | addr_len = ord(buffer.read(1)) 45 | return buffer.read(addr_len + 2) 46 | return False 47 | 48 | async def socks5_proxy(conns, conn, proxy_host, proxy_port, remote_host, remote_port, status): 49 | start_time = time.time() 50 | conn.write, pconn = warp_write(conn, status, "recv_len"), None 51 | 52 | try: 53 | conn.enable_nodelay() 54 | pconn = create_socket((proxy_host, proxy_port)) 55 | await pconn.connectof((proxy_host, proxy_port)) 56 | await pconn.send(b"\x05\x01\x00") 57 | buffer = await pconn.recv() 58 | if buffer.read() != b'\x05\00': 59 | logging.info("protocol hello error") 60 | return 61 | 62 | protocol_data = socks5_build_protocol(remote_host, remote_port) 63 | await pconn.send(protocol_data) 64 | buffer = await pconn.recv() 65 | if buffer.read(3) != b'\x05\x00\x00': 66 | logging.info("protocol error") 67 | return 68 | if not socks5_read_protocol(buffer): 69 | logging.info("protocol error") 70 | return 71 | pconn.write = warp_write(pconn, status, "send_len") 72 | logging.info("socks5 proxy connected %s:%d -> %s:%d -> %s:%d", conn.address[0], conn.address[1], proxy_host, proxy_port, 73 | remote_host, remote_port) 74 | await pconn.linkof(conn) 75 | except sevent.errors.SocketClosed: 76 | pass 77 | except Exception as e: 78 | logging.info("socks5 proxy error %s:%d -> %s:%d -> %s:%d %s %.2fms\r%s", conn.address[0], conn.address[1], proxy_host, proxy_port, 79 | remote_host, remote_port, e, (time.time() - start_time) * 1000, traceback.format_exc()) 80 | return 81 | finally: 82 | conn.close() 83 | if pconn: pconn.close() 84 | conns.pop(id(conn), None) 85 | 86 | logging.info("socks5 proxy closed %s:%d -> %s:%d -> %s:%d %s %s %.2fms", conn.address[0], conn.address[1], proxy_host, proxy_port, 87 | remote_host, remote_port, format_data_len(status["send_len"]), format_data_len(status["recv_len"]), (time.time() - start_time) * 1000) 88 | 89 | def http_build_protocol(remote_host, remote_port): 90 | remote = (bytes(remote_host, "utf-8") if isinstance(remote_host, str) else remote_host) + b':' + bytes(str(remote_port), "utf-8") 91 | return b"CONNECT " + remote + b" HTTP/1.1\r\nHost: " + remote + b"\r\nUser-Agent: sevent\r\nProxy-Connection: Keep-Alive\r\n\r\n" 92 | 93 | async def http_proxy(conns, conn, proxy_host, proxy_port, remote_host, remote_port, status): 94 | start_time = time.time() 95 | conn.write, pconn = warp_write(conn, status, "recv_len"), None 96 | 97 | try: 98 | conn.enable_nodelay() 99 | pconn = create_socket((proxy_host, proxy_port)) 100 | await pconn.connectof((proxy_host, proxy_port)) 101 | 102 | protocol_data = http_build_protocol(remote_host, remote_port) 103 | await pconn.send(protocol_data) 104 | buffer = await pconn.recv() 105 | if buffer.read(12).lower() != b"http/1.1 200": 106 | logging.info("protocol error") 107 | return 108 | buffer.read() 109 | pconn.write = warp_write(pconn, status, "send_len") 110 | logging.info("tcp2proxy connected %s:%d -> %s:%d -> %s:%d", conn.address[0], conn.address[1], proxy_host, proxy_port, 111 | remote_host, remote_port) 112 | await pconn.linkof(conn) 113 | except sevent.errors.SocketClosed: 114 | pass 115 | except Exception as e: 116 | logging.info("tcp2proxy error %s:%d -> %s:%d -> %s:%d %s %.2fms\r%s", conn.address[0], conn.address[1], proxy_host, proxy_port, 117 | remote_host, remote_port, e, (time.time() - start_time) * 1000, traceback.format_exc()) 118 | return 119 | finally: 120 | conn.close() 121 | if pconn: pconn.close() 122 | conns.pop(id(conn), None) 123 | 124 | logging.info("tcp2proxy closed %s:%d -> %s:%d -> %s:%d %s %s %.2fms", conn.address[0], conn.address[1], proxy_host, proxy_port, 125 | remote_host, remote_port, format_data_len(status["send_len"]), format_data_len(status["recv_len"]), 126 | (time.time() - start_time) * 1000) 127 | 128 | async def check_timeout(conns, timeout): 129 | def run_check(): 130 | while True: 131 | try: 132 | now = time.time() 133 | for conn_id, (conn, status) in list(conns.items()): 134 | if status['check_recv_len'] != status['recv_len'] or status['check_send_len'] != status['send_len']: 135 | status["check_recv_len"] = status["recv_len"] 136 | status["check_send_len"] = status["send_len"] 137 | status['last_time'] = now 138 | continue 139 | 140 | if now - status['last_time'] >= timeout: 141 | sevent.current().add_async_safe(conn.close) 142 | conns.pop(conn_id, None) 143 | finally: 144 | time.sleep(min(float(timeout) / 2.0, 30)) 145 | 146 | if timeout > 0: 147 | check_thread = threading.Thread(target=run_check) 148 | check_thread.daemon = True 149 | check_thread.start() 150 | await sevent.Future() 151 | 152 | async def tcp_accept(server, args): 153 | proxy_info = args.proxy_host.split(":") 154 | if len(proxy_info) == 1: 155 | if not proxy_info[0].isdigit(): 156 | proxy_host, proxy_port = proxy_info[0], 8088 157 | else: 158 | proxy_host, proxy_port = "127.0.0.1", int(proxy_info[0]) 159 | else: 160 | proxy_host, proxy_port = proxy_info[0], int(proxy_info[1]) 161 | 162 | forward_info = args.forward_host.split(":") 163 | if len(forward_info) == 1: 164 | if not forward_info[0].isdigit(): 165 | forward_host, forward_port = forward_info[0], 8088 166 | else: 167 | forward_host, forward_port = "127.0.0.1", int(forward_info[0]) 168 | else: 169 | forward_host, forward_port = forward_info[0], int(forward_info[1]) 170 | 171 | logging.info("use %s proxy %s:%d forward to %s:%d", args.proxy_type, proxy_host, proxy_port, forward_host, forward_port) 172 | conns = {} 173 | sevent.current().call_async(check_timeout, conns, args.timeout) 174 | while True: 175 | conn = await server.accept() 176 | status = {"recv_len": 0, "send_len": 0, "last_time": time.time(), "check_recv_len": 0, "check_send_len": 0} 177 | if args.proxy_type == "http": 178 | sevent.current().call_async(http_proxy, conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 179 | else: 180 | sevent.current().call_async(socks5_proxy, conns, conn, proxy_host, proxy_port, forward_host, forward_port, status) 181 | conns[id(conn)] = (conn, status) 182 | 183 | def main(argv): 184 | parser = argparse.ArgumentParser(description='forword tcp port to remote host from http or socks5 proxy') 185 | parser.add_argument('-b', dest='bind', default="0.0.0.0", help='local bind host (default: 0.0.0.0)') 186 | parser.add_argument('-p', dest='port', default=8088, type=int, help='local bind port (default: 8088)') 187 | parser.add_argument('-t', dest='timeout', default=7200, type=int, help='no read/write timeout (default: 7200)') 188 | parser.add_argument('-T', dest='proxy_type', default="http", choices=("http", "socks5"), help='proxy type (default: http)') 189 | parser.add_argument('-P', dest='proxy_host', default="127.0.0.1:8088", help='proxy host, accept format [proxy_host:proxy_port] (default: 127.0.0.1:8088)') 190 | parser.add_argument('-f', dest='forward_host', default="127.0.0.1:80", help='remote forward host , accept format [remote_host:remote_port] (default: 127.0.0.1:80)') 191 | args = parser.parse_args(args=argv) 192 | config_signal() 193 | server = create_server((args.bind, args.port)) 194 | logging.info("listen server at %s:%d", args.bind, args.port) 195 | sevent.current().call_async(tcp_accept, server, args) 196 | 197 | if __name__ == '__main__': 198 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)1.1s %(message)s', 199 | datefmt='%Y-%m-%d %H:%M:%S', filemode='a+') 200 | try: 201 | main(sys.argv[1:]) 202 | sevent.instance().start() 203 | except KeyboardInterrupt: 204 | exit(0) -------------------------------------------------------------------------------- /sevent/helpers/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2022/1/22 3 | # create by: snower 4 | 5 | import os 6 | import string 7 | import struct 8 | import signal 9 | import socket 10 | import sevent 11 | 12 | def config_signal(): 13 | signal.signal(signal.SIGINT, lambda signum, frame: sevent.current().stop()) 14 | signal.signal(signal.SIGTERM, lambda signum, frame: sevent.current().stop()) 15 | 16 | def get_address_environ(address, key): 17 | if address and isinstance(address, tuple): 18 | if isinstance(address[0], str): 19 | host_key = "".join([c if c in string.hexdigits else "_" for c in address[0]]).upper() 20 | if len(address) >= 2: 21 | value = os.environ.get("%s_%s_%s" % (key, host_key, address[1])) 22 | if value is not None: 23 | return value 24 | if len(address) >= 2: 25 | value = os.environ.get("%s_%s" % (key, address[1])) 26 | if value is not None: 27 | return value 28 | return os.environ.get(key) 29 | 30 | def create_server(address, *args, **kwargs): 31 | if "pipe" in address: 32 | server = sevent.pipe.PipeServer() 33 | else: 34 | ssl_certificate_file = get_address_environ(address, "SEVENT_HELPERS_SSL_CERTIFICATE_FILE") 35 | ssl_certificate_key_file = get_address_environ(address, "SEVENT_HELPERS_SSL_CERTIFICATE_KEY_FILE") 36 | if ssl_certificate_file and ssl_certificate_key_file: 37 | import ssl 38 | context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) 39 | context.load_cert_chain(certfile=ssl_certificate_file, keyfile=ssl_certificate_key_file) 40 | if get_address_environ(address, "SEVENT_HELPERS_SSL_INSECURE"): 41 | context.verify_mode = ssl.CERT_NONE 42 | server = sevent.ssl.SSLServer(context) 43 | else: 44 | server = sevent.tcp.Server() 45 | server.enable_reuseaddr() 46 | server.listen(address, *args, **kwargs) 47 | return server 48 | 49 | def create_socket(address): 50 | if "pipe" in address: 51 | if isinstance(address, (tuple, list)): 52 | pipe_address = "pipe#%s" % (address[1] if len(address) >= 2 else address[-1]) 53 | elif not isinstance(address, str): 54 | pipe_address = "pipe#%s" % address 55 | else: 56 | pipe_address = address 57 | if pipe_address in sevent.pipe.PipeServer._bind_servers: 58 | conn = sevent.pipe.PipeSocket() 59 | else: 60 | ssl_ca_file = get_address_environ(address, "SEVENT_HELPERS_SSL_CA_FILE") 61 | if ssl_ca_file: 62 | import ssl 63 | context = ssl.create_default_context() 64 | context.load_verify_locations(cafile=ssl_ca_file) 65 | if get_address_environ(address, "SEVENT_HELPERS_SSL_INSECURE"): 66 | context.check_hostname = False 67 | context.verify_mode = ssl.CERT_NONE 68 | conn = sevent.ssl.SSLSocket(context=context, server_hostname=address[0] if address and isinstance(address, tuple) else None) 69 | else: 70 | conn = sevent.tcp.Socket() 71 | else: 72 | ssl_ca_file = get_address_environ(address, "SEVENT_HELPERS_SSL_CA_FILE") 73 | if ssl_ca_file: 74 | import ssl 75 | context = ssl.create_default_context() 76 | if ssl_ca_file != "-": 77 | context.load_verify_locations(cafile=ssl_ca_file) 78 | if get_address_environ(address, "SEVENT_HELPERS_SSL_INSECURE"): 79 | context.check_hostname = False 80 | context.verify_mode = ssl.CERT_NONE 81 | conn = sevent.ssl.SSLSocket(context=context, server_hostname=address[0] if address and isinstance(address, tuple) else None) 82 | else: 83 | conn = sevent.tcp.Socket() 84 | conn.enable_nodelay() 85 | return conn 86 | 87 | def format_data_len(date_len): 88 | if date_len < 1024: 89 | return "%dB" % date_len 90 | elif date_len < 1024*1024: 91 | return "%.3fK" % (date_len/1024.0) 92 | elif date_len < 1024*1024*1024: 93 | return "%.3fM" % (date_len/(1024.0*1024.0)) 94 | elif date_len < 1024*1024*1024*1024: 95 | return "%.3fG" % (date_len/(1024.0*1024.0*1024.0)) 96 | return "%.3fT" % (date_len/(1024.0*1024.0*1024.0*1024.0)) 97 | 98 | def is_subnet(ip, subnet): 99 | try: 100 | ip = struct.unpack("!I", socket.inet_pton(socket.AF_INET, ip))[0] 101 | if isinstance(subnet[0], tuple) or isinstance(subnet[1], tuple): 102 | return False 103 | return (ip & subnet[1]) == (subnet[0] & subnet[1]) 104 | except: 105 | ip = (struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip))) 106 | if not isinstance(subnet[0], tuple) or len(subnet[0]) != 2 or not isinstance(subnet[1], tuple) or len(subnet[1]) != 2: 107 | return False 108 | return ((ip[0] & subnet[1][0]) == (subnet[0][0] & subnet[1][0])) and ((ip[1] & subnet[1][1]) == (subnet[0][1] & subnet[1][1])) -------------------------------------------------------------------------------- /sevent/impl/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- -------------------------------------------------------------------------------- /sevent/impl/epoll_loop.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import select 4 | from ..loop import IOLoop 5 | 6 | 7 | class EpollLoop(IOLoop): 8 | def __init__(self): 9 | super(EpollLoop, self).__init__() 10 | self._epoll = select.epoll() 11 | 12 | self._poll = self._epoll.poll 13 | self._add_fd = self._epoll.register 14 | self._remove_fd = self._epoll.unregister 15 | self._modify_fd = self._epoll.modify 16 | 17 | def _poll(self, timeout): 18 | return self._epoll.poll(timeout) 19 | 20 | def _add_fd(self, fd, mode): 21 | self._epoll.register(fd, mode) 22 | 23 | def _remove_fd(self, fd): 24 | self._epoll.unregister(fd) 25 | 26 | def _modify_fd(self, fd, mode): 27 | self._epoll.modify(fd, mode) 28 | -------------------------------------------------------------------------------- /sevent/impl/kqueue_loop.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import os 4 | import select 5 | from collections import defaultdict 6 | from ..loop import IOLoop, MODE_NULL, MODE_IN, MODE_OUT 7 | 8 | try: 9 | MAX_EVENTS = int(os.environ.get("SEVENT_KQUEUE_MAX_EVENTS", 1024)) 10 | except: 11 | MAX_EVENTS = 1024 12 | 13 | 14 | class KqueueLoop(IOLoop): 15 | def __init__(self): 16 | super(KqueueLoop, self).__init__() 17 | self._kqueue = select.kqueue() 18 | self._fds = {} 19 | 20 | def _control(self, fd, mode, flags): 21 | events = [] 22 | if mode & MODE_IN: 23 | events.append(select.kevent(fd, select.KQ_FILTER_READ, flags)) 24 | if mode & MODE_OUT: 25 | events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags)) 26 | for e in events: 27 | self._kqueue.control([e], 0) 28 | 29 | def _poll(self, timeout): 30 | if timeout < 0: 31 | timeout = None # kqueue behaviour 32 | events = self._kqueue.control(None, MAX_EVENTS, timeout) 33 | results = defaultdict(lambda: MODE_NULL) 34 | for e in events: 35 | fd = e.ident 36 | if e.filter == select.KQ_FILTER_READ: 37 | results[fd] |= MODE_IN 38 | elif e.filter == select.KQ_FILTER_WRITE: 39 | results[fd] |= MODE_OUT 40 | return results.items() 41 | 42 | def _add_fd(self, fd, mode): 43 | self._fds[fd] = mode 44 | self._control(fd, mode, select.KQ_EV_ADD) 45 | 46 | def _remove_fd(self, fd): 47 | self._control(fd, self._fds[fd], select.KQ_EV_DELETE) 48 | del self._fds[fd] 49 | 50 | def _modify_fd(self, fd, mode): 51 | self._remove_fd(fd) 52 | self._add_fd(fd, mode) 53 | -------------------------------------------------------------------------------- /sevent/impl/select_loop.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import select 4 | from collections import defaultdict 5 | from ..loop import IOLoop, MODE_NULL, MODE_IN, MODE_OUT, MODE_ERR 6 | 7 | 8 | class SelectLoop(IOLoop): 9 | def __init__(self): 10 | super(SelectLoop, self).__init__() 11 | self._r_list = set() 12 | self._w_list = set() 13 | self._x_list = set() 14 | 15 | def _poll(self, timeout): 16 | try: 17 | r, w, x = select.select(self._r_list, self._w_list, self._x_list, min(timeout, 2)) 18 | except Exception as e: 19 | if isinstance(e, (KeyboardInterrupt, SystemError)): 20 | raise e 21 | return [] 22 | results = defaultdict(lambda: MODE_NULL) 23 | for p in [(r, MODE_IN), (w, MODE_OUT), (x, MODE_ERR)]: 24 | for fd in p[0]: 25 | results[fd] |= p[1] 26 | return results.items() 27 | 28 | def _add_fd(self, fd, mode): 29 | if mode & MODE_IN: 30 | self._r_list.add(fd) 31 | if mode & MODE_OUT: 32 | self._w_list.add(fd) 33 | if mode & MODE_ERR: 34 | self._x_list.add(fd) 35 | 36 | def _remove_fd(self, fd): 37 | if fd in self._r_list: 38 | self._r_list.remove(fd) 39 | if fd in self._w_list: 40 | self._w_list.remove(fd) 41 | if fd in self._x_list: 42 | self._x_list.remove(fd) 43 | 44 | def _modify_fd(self, fd, mode): 45 | self._remove_fd(fd) 46 | self._add_fd(fd, mode) 47 | -------------------------------------------------------------------------------- /sevent/loop.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import select 4 | import time 5 | import heapq 6 | import threading 7 | from collections import defaultdict 8 | from .waker import Waker 9 | from .utils import is_py3, get_logger 10 | 11 | ''' You can only use instance(). Don't create a Loop() ''' 12 | 13 | _thread_local = threading.local() 14 | _thread_local._sevent_ioloop = None 15 | _ioloop_lock = threading.RLock() 16 | _ioloop_cls = None 17 | _ioloop = None 18 | _mul_ioloop = False 19 | 20 | 21 | def instance(): 22 | global _ioloop_cls, _ioloop, _mul_ioloop 23 | try: 24 | if _thread_local._sevent_ioloop is not None: 25 | return _thread_local._sevent_ioloop 26 | except AttributeError: 27 | pass 28 | 29 | with _ioloop_lock: 30 | try: 31 | if _thread_local._sevent_ioloop is not None: 32 | return _thread_local._sevent_ioloop 33 | except AttributeError: 34 | pass 35 | 36 | if 'epoll' in select.__dict__: 37 | from .impl import epoll_loop 38 | get_logger().debug('using epoll') 39 | _ioloop_cls = epoll_loop.EpollLoop 40 | elif 'kqueue' in select.__dict__: 41 | from .impl import kqueue_loop 42 | get_logger().debug('using kqueue') 43 | _ioloop_cls = kqueue_loop.KqueueLoop 44 | else: 45 | from .impl import select_loop 46 | get_logger().debug('using select') 47 | _ioloop_cls = select_loop.SelectLoop 48 | 49 | _thread_local._sevent_ioloop = _ioloop_cls() 50 | if _ioloop is None: 51 | _ioloop = _thread_local._sevent_ioloop 52 | else: 53 | _mul_ioloop = True 54 | return _thread_local._sevent_ioloop 55 | 56 | 57 | def current(): 58 | if not _mul_ioloop: 59 | return _ioloop 60 | try: 61 | return _thread_local._sevent_ioloop 62 | except AttributeError: 63 | return _ioloop 64 | 65 | 66 | # these values are defined as the same as poll 67 | MODE_NULL = 0x00 68 | MODE_IN = 0x01 69 | MODE_OUT = 0x04 70 | MODE_ERR = 0x08 71 | MODE_HUP = 0x10 72 | MODE_NVAL = 0x20 73 | 74 | 75 | class TimeoutHandler(object): 76 | def __init__(self, callback, deadline, args, kwargs): 77 | '''deadline here is absolute timestamp''' 78 | self.callback = callback 79 | self.deadline = deadline 80 | self.args = args 81 | self.kwargs = kwargs 82 | self.canceled = False 83 | 84 | def __cmp__(self, other): 85 | return (self.deadline > other.deadline) - (self.deadline < other.deadline) 86 | 87 | def __eq__(self, other): 88 | return self.deadline == other.deadline 89 | 90 | def __gt__(self, other): 91 | return self.deadline > other.deadline 92 | 93 | def __lt__(self, other): 94 | return self.deadline < other.deadline 95 | 96 | def __ge__(self, other): 97 | return self.deadline >= other.deadline 98 | 99 | def __le__(self, other): 100 | return self.deadline <= other.deadline 101 | 102 | def __ne__(self, other): 103 | return self.deadline != other.deadline 104 | 105 | def __call__(self): 106 | self.callback(*self.args, **self.kwargs) 107 | 108 | 109 | class IOLoop(object): 110 | def __init__(self): 111 | self._handlers = [] 112 | self._run_handlers = [] 113 | self._timeout_handlers = [] 114 | self._fd_handlers = defaultdict(list) 115 | self._stopped = False 116 | self._waker = Waker() 117 | 118 | def _poll(self, timeout): 119 | raise NotImplementedError() 120 | 121 | def _add_fd(self, fd, mode): 122 | raise NotImplementedError() 123 | 124 | def _remove_fd(self, fd): 125 | raise NotImplementedError() 126 | 127 | def _modify_fd(self, fd, mode): 128 | raise NotImplementedError() 129 | 130 | def add_fd(self, fd, mode, callback): 131 | handlers = self._fd_handlers[fd] 132 | new_handlers = [] 133 | if not handlers: 134 | new_handlers.append((callback, fd, mode)) 135 | self._add_fd(fd, mode) 136 | else: 137 | new_mode = MODE_NULL 138 | for hcallback, hfd, hmode in handlers: 139 | if hcallback != callback: 140 | new_mode |= hmode 141 | new_handlers.append((hcallback, hfd, hmode)) 142 | new_handlers.append((callback, fd, mode)) 143 | new_mode |= mode 144 | self._modify_fd(fd, new_mode) 145 | self._fd_handlers[fd] = new_handlers 146 | return True 147 | 148 | def update_fd(self, fd, mode, callback): 149 | handlers = self._fd_handlers[fd] 150 | if not handlers: 151 | return False 152 | new_handlers = [] 153 | new_mode = MODE_NULL 154 | for hcallback, hfd, hmode in handlers: 155 | if hcallback == callback: 156 | new_mode |= mode 157 | new_handlers.append((hcallback, hfd, mode)) 158 | else: 159 | new_mode |= hmode 160 | new_handlers.append((hcallback, hfd, hmode)) 161 | self._modify_fd(fd, new_mode) 162 | self._fd_handlers[fd] = new_handlers 163 | return True 164 | 165 | def remove_fd(self, fd, callback): 166 | handlers = self._fd_handlers[fd] 167 | if not handlers: 168 | return False 169 | if len(handlers) == 1: 170 | if handlers[0][0] == callback: 171 | self._remove_fd(fd) 172 | del self._fd_handlers[fd] 173 | else: 174 | new_handlers = [] 175 | new_mode = MODE_NULL 176 | for hcallback, hfd, hmode in handlers: 177 | if hcallback != callback: 178 | new_mode |= hmode 179 | new_handlers.append((hcallback, hfd, hmode)) 180 | self._modify_fd(fd, new_mode) 181 | self._fd_handlers[fd] = new_handlers 182 | return True 183 | 184 | def clear_fd(self, fd): 185 | if fd in self._fd_handlers: 186 | if self._fd_handlers[fd]: 187 | del self._fd_handlers[fd] 188 | self._remove_fd(fd) 189 | return True 190 | del self._fd_handlers[fd] 191 | return False 192 | 193 | def start(self): 194 | self.add_fd(self._waker.fileno(), MODE_IN, self._waker.consume) 195 | 196 | while not self._stopped: 197 | timeout = 3600 198 | 199 | if self._timeout_handlers: 200 | cur_time = time.time() 201 | if self._timeout_handlers[0].deadline <= cur_time: 202 | while self._timeout_handlers: 203 | handler = self._timeout_handlers[0] 204 | if handler.canceled: 205 | heapq.heappop(self._timeout_handlers) 206 | elif handler.deadline <= cur_time: 207 | heapq.heappop(self._timeout_handlers) 208 | try: 209 | handler.callback(*handler.args, **handler.kwargs) 210 | except Exception as e: 211 | if isinstance(e, (KeyboardInterrupt, SystemError)): 212 | raise e 213 | get_logger().exception("loop callback timeout error:%s", e) 214 | elif self._handlers: 215 | timeout = 0 216 | break 217 | else: 218 | timeout = self._timeout_handlers[0].deadline - cur_time 219 | break 220 | elif self._handlers: 221 | timeout = 0 222 | else: 223 | timeout = self._timeout_handlers[0].deadline - cur_time 224 | elif self._handlers: 225 | timeout = 0 226 | 227 | fds_ready = self._poll(timeout) 228 | for fd, mode in fds_ready: 229 | for hcallback, hfd, hmode in self._fd_handlers[fd]: 230 | if hmode & mode != 0: 231 | try: 232 | hcallback() 233 | except Exception as e: 234 | if isinstance(e, (KeyboardInterrupt, SystemError)): 235 | raise e 236 | get_logger().exception("loop callback error:%s", e) 237 | 238 | # call handlers without fd 239 | self._handlers, self._run_handlers = self._run_handlers, self._handlers 240 | for callback, args, kwargs in self._run_handlers: 241 | try: 242 | callback(*args, **kwargs) 243 | except Exception as e: 244 | if isinstance(e, (KeyboardInterrupt, SystemError)): 245 | raise e 246 | get_logger().exception("loop callback error:%s", e) 247 | self._run_handlers = [] 248 | 249 | def stop(self): 250 | self._stopped = True 251 | self._waker.wake() 252 | 253 | def add_async(self, callback, *args, **kwargs): 254 | self._handlers.append((callback, args, kwargs)) 255 | 256 | def add_async_safe(self, callback, *args, **kwargs): 257 | self._handlers.append((callback, args, kwargs)) 258 | self._waker.wake() 259 | 260 | def add_timeout(self, timeout, callback, *args, **kwargs): 261 | handler = TimeoutHandler(callback, time.time() + timeout, args, kwargs) 262 | heapq.heappush(self._timeout_handlers, handler) 263 | return handler 264 | 265 | def cancel_timeout(self, handler): 266 | if handler.__class__ == TimeoutHandler: 267 | handler.callback = None 268 | handler.args = None 269 | handler.kwargs = None 270 | handler.canceled = True 271 | else: 272 | try: 273 | self._timeout_handlers.remove(handler) 274 | heapq.heapify(self._timeout_handlers) 275 | except ValueError: 276 | pass 277 | 278 | while self._timeout_handlers: 279 | if not self._timeout_handlers[0].canceled: 280 | break 281 | heapq.heappop(self._timeout_handlers) 282 | 283 | def wakeup(self, *args, **kwargs): 284 | if args and callable(args[0]): 285 | self.add_async(args[0], *args[1:], **kwargs) 286 | self._waker.wake() 287 | 288 | 289 | if is_py3: 290 | from .coroutines.loop import warp_coroutine 291 | IOLoop = warp_coroutine(IOLoop) 292 | -------------------------------------------------------------------------------- /sevent/pipe.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2022/1/21 3 | # create by: snower 4 | 5 | from .utils import is_py3, get_logger 6 | from .event import EventEmitter, null_emit_callback 7 | from .loop import instance 8 | from .buffer import Buffer, BaseBuffer, RECV_BUFFER_SIZE 9 | from .errors import SocketClosed 10 | 11 | STATE_INITIALIZED = 0x01 12 | STATE_CONNECTING = 0x02 13 | STATE_STREAMING = 0x04 14 | STATE_LISTENING = 0x08 15 | STATE_CLOSING = 0x10 16 | STATE_CLOSED = 0x20 17 | 18 | class PipeSocket(EventEmitter): 19 | MAX_BUFFER_SIZE = None 20 | RECV_BUFFER_SIZE = RECV_BUFFER_SIZE 21 | 22 | @classmethod 23 | def config(cls, max_buffer_size=None, recv_buffer_size=RECV_BUFFER_SIZE, **kwargs): 24 | cls.MAX_BUFFER_SIZE = max_buffer_size 25 | cls.RECV_BUFFER_SIZE = recv_buffer_size 26 | 27 | def __init__(self, loop=None, socket=None, address=None, max_buffer_size=None): 28 | EventEmitter.__init__(self) 29 | self._loop = loop or instance() 30 | self._socket = socket 31 | self._address = address 32 | self._max_buffer_size = max_buffer_size or self.MAX_BUFFER_SIZE 33 | self._rbuffers = Buffer(max_buffer_size=self._max_buffer_size) 34 | self._wbuffers = Buffer(max_buffer_size=self._max_buffer_size) 35 | self._state = STATE_STREAMING if socket else STATE_INITIALIZED 36 | self._has_drain_event = False 37 | self._reading = True 38 | self.ignore_write_closed_error = False 39 | 40 | @property 41 | def address(self): 42 | return self._address 43 | 44 | @property 45 | def socket(self): 46 | return self 47 | 48 | @property 49 | def buffer(self): 50 | return self._rbuffers, self._wbuffers 51 | 52 | def __del__(self): 53 | self.close() 54 | 55 | def on(self, event_name, callback): 56 | EventEmitter.on(self, event_name, callback) 57 | 58 | if event_name == "drain": 59 | self._has_drain_event = True 60 | 61 | def once(self, event_name, callback): 62 | EventEmitter.once(self, event_name, callback) 63 | 64 | if event_name == "drain": 65 | self._has_drain_event = True 66 | 67 | def off(self, event_name, callback): 68 | EventEmitter.off(self, event_name, callback) 69 | 70 | if not self._events[event_name] and not self._events_once[event_name]: 71 | if event_name == "drain": 72 | self._has_drain_event = False 73 | 74 | def noce(self, event_name, callback): 75 | EventEmitter.noce(self, event_name, callback) 76 | 77 | if not self._events[event_name] and not self._events_once[event_name]: 78 | if event_name == "drain": 79 | self._has_drain_event = False 80 | 81 | def remove_listener(self, event_name, callback): 82 | EventEmitter.remove_listener(self, event_name, callback) 83 | 84 | if not self._events[event_name] and not self._events_once[event_name]: 85 | if event_name == "drain": 86 | self._has_drain_event = False 87 | 88 | def on_connect(self, callback): 89 | self.on("connect", callback) 90 | 91 | def on_data(self, callback): 92 | self.on("data", callback) 93 | 94 | def on_end(self, callback): 95 | self.on("end", callback) 96 | 97 | def on_close(self, callback): 98 | self.on("close", callback) 99 | 100 | def on_error(self, callback): 101 | self.on("error", callback) 102 | 103 | def on_drain(self, callback): 104 | self.on("drain", callback) 105 | 106 | def off_connect(self, callback): 107 | self.on("connect", callback) 108 | 109 | def off_data(self, callback): 110 | self.on("data", callback) 111 | 112 | def off_end(self, callback): 113 | self.on("end", callback) 114 | 115 | def off_close(self, callback): 116 | self.on("close", callback) 117 | 118 | def off_error(self, callback): 119 | self.on("error", callback) 120 | 121 | def off_drain(self, callback): 122 | self.on("drain", callback) 123 | 124 | def once_connect(self, callback): 125 | self.once("connect", callback) 126 | 127 | def once_data(self, callback): 128 | self.once("data", callback) 129 | 130 | def once_end(self, callback): 131 | self.once("end", callback) 132 | 133 | def once_close(self, callback): 134 | self.once("close", callback) 135 | 136 | def once_error(self, callback): 137 | self.once("error", callback) 138 | 139 | def once_drain(self, callback): 140 | self.once("drain", callback) 141 | 142 | def noce_connect(self, callback): 143 | self.once("connect", callback) 144 | 145 | def noce_data(self, callback): 146 | self.once("data", callback) 147 | 148 | def noce_end(self, callback): 149 | self.once("end", callback) 150 | 151 | def noce_close(self, callback): 152 | self.once("close", callback) 153 | 154 | def noce_error(self, callback): 155 | self.once("error", callback) 156 | 157 | def noce_drain(self, callback): 158 | self.once("drain", callback) 159 | 160 | def enable_fast_open(self): 161 | pass 162 | 163 | @property 164 | def is_enable_fast_open(self): 165 | return False 166 | 167 | def enable_nodelay(self): 168 | pass 169 | 170 | @property 171 | def is_enable_nodelay(self): 172 | return True 173 | 174 | def end(self): 175 | if self._state not in (STATE_INITIALIZED, STATE_CONNECTING, STATE_STREAMING): 176 | return 177 | 178 | if self._state in (STATE_INITIALIZED, STATE_CONNECTING): 179 | self._loop.add_async(self.close) 180 | else: 181 | if self._wbuffers: 182 | self._state = STATE_CLOSING 183 | else: 184 | self._loop.add_async(self.close) 185 | 186 | def close(self): 187 | if self._state == STATE_CLOSED: 188 | return 189 | 190 | self._state = STATE_CLOSED 191 | def on_close(): 192 | try: 193 | self.emit_close(self) 194 | except Exception as e: 195 | get_logger().exception("tcp emit close error:%s", e) 196 | self.remove_all_listeners() 197 | self._rbuffers.close() 198 | self._wbuffers.close() 199 | self._rbuffers = None 200 | self._wbuffers = None 201 | self._loop.add_async(on_close) 202 | if self._socket: 203 | self._loop.add_async(self._socket.end) 204 | 205 | def _error(self, error): 206 | self._loop.add_async(self.emit_error, self, error) 207 | self._loop.add_async(self.close) 208 | if self.emit_error == null_emit_callback: 209 | get_logger().error("Pipe %s socket %s error: %s", self, self.socket, error) 210 | 211 | def connect(self, address, timeout=5): 212 | if self._state != STATE_INITIALIZED: 213 | if self._state == STATE_CLOSED: 214 | raise SocketClosed() 215 | return 216 | if isinstance(address, (tuple, list)): 217 | address = "pipe#%s" % (address[1] if len(address) >= 2 else address[-1]) 218 | elif not isinstance(address, str): 219 | address = "pipe#%s" % address 220 | self._socket = PipeServer._bind_servers[address]._accept_cb(self, address) 221 | self._address = (address, id(self)) 222 | self._state = STATE_STREAMING 223 | self._rbuffers.on("drain", lambda _: self.drain()) 224 | self._rbuffers.on("regain", lambda _: self.regain()) 225 | self._loop.add_async(self.emit_connect, self) 226 | 227 | def drain(self): 228 | if self._state in (STATE_STREAMING, STATE_CLOSING): 229 | self._reading = False 230 | 231 | def regain(self): 232 | if self._state in (STATE_STREAMING, STATE_CLOSING): 233 | self._reading = True 234 | 235 | BaseBuffer.extend(self._rbuffers, self._socket._wbuffers) 236 | if self._rbuffers._len > self._rbuffers._drain_size and not self._rbuffers._full: 237 | self._rbuffers.do_drain() 238 | self._loop.add_async(self.emit_data, self, self._rbuffers) 239 | if self._socket._wbuffers._full and self._socket._wbuffers._len < self._socket._wbuffers._regain_size: 240 | self._socket._wbuffers.do_regain() 241 | if self._socket._has_drain_event: 242 | self._loop.add_async(self._socket.emit_drain, self._socket) 243 | if self._socket._state == STATE_CLOSING: 244 | self._socket.close() 245 | 246 | def write(self, data): 247 | if self._state != STATE_STREAMING: 248 | if self.ignore_write_closed_error: 249 | return False 250 | raise SocketClosed() 251 | 252 | if self._socket._reading: 253 | if data.__class__ == Buffer: 254 | BaseBuffer.extend(self._socket._rbuffers, data) 255 | else: 256 | BaseBuffer.write(self._socket._rbuffers, data) 257 | if self._socket._rbuffers._len > self._socket._rbuffers._drain_size and not self._socket._rbuffers._full: 258 | self._socket._rbuffers.do_drain() 259 | self._loop.add_async(self._socket.emit_data, self._socket, self._socket._rbuffers) 260 | if self._has_drain_event: 261 | self._loop.add_async(self.emit_drain, self) 262 | return True 263 | 264 | if data.__class__ == Buffer: 265 | BaseBuffer.extend(self._wbuffers, data) 266 | else: 267 | BaseBuffer.write(self._wbuffers, data) 268 | if self._wbuffers._len > self._wbuffers._drain_size and not self._wbuffers._full: 269 | self._wbuffers.do_drain() 270 | return False 271 | 272 | def link(self, socket): 273 | if self._state not in (STATE_STREAMING, STATE_CONNECTING): 274 | raise SocketClosed() 275 | if socket._state not in (STATE_STREAMING, STATE_CONNECTING): 276 | raise SocketClosed() 277 | 278 | self.ignore_write_closed_error = True 279 | socket.ignore_write_closed_error = True 280 | rbuffer, wbuffer = socket.buffer 281 | if self._state != STATE_STREAMING: 282 | if self.is_enable_fast_open and rbuffer: 283 | self.write(rbuffer) 284 | 285 | def on_connect(s): 286 | self._wbuffers.link(rbuffer) 287 | if rbuffer: 288 | self.write(rbuffer) 289 | socket.on_data(lambda s, data: self.write(data)) 290 | 291 | self.on_connect(on_connect) 292 | 293 | else: 294 | self._wbuffers.link(rbuffer) 295 | if rbuffer: 296 | self.write(rbuffer) 297 | socket.on_data(lambda s, data: self.write(data)) 298 | 299 | if socket._state != STATE_STREAMING: 300 | if socket.is_enable_fast_open and self._rbuffers: 301 | socket.write(self._rbuffers) 302 | 303 | def on_pconnect(s): 304 | wbuffer.link(self._rbuffers) 305 | if self._rbuffers: 306 | socket.write(self._rbuffers) 307 | self.on_data(lambda s, data: socket.write(data)) 308 | 309 | socket.on_connect(on_pconnect) 310 | else: 311 | wbuffer.link(self._rbuffers) 312 | if self._rbuffers: 313 | socket.write(self._rbuffers) 314 | self.on_data(lambda s, data: socket.write(data)) 315 | 316 | self.on_close(lambda s: socket.end()) 317 | socket.on_close(lambda s: self.end()) 318 | 319 | 320 | class PipeServer(EventEmitter): 321 | _bind_servers = {} 322 | 323 | def __init__(self, loop=None): 324 | EventEmitter.__init__(self) 325 | self._loop = loop or instance() 326 | self._state = STATE_INITIALIZED 327 | self._address = None 328 | 329 | def __del__(self): 330 | self.close() 331 | 332 | def on_listen(self, callback): 333 | self.on("listen", callback) 334 | 335 | def on_connection(self, callback): 336 | self.on("connection", callback) 337 | 338 | def on_close(self, callback): 339 | self.on("close", callback) 340 | 341 | def on_error(self, callback): 342 | self.on("error", callback) 343 | 344 | def once_listen(self, callback): 345 | self.on("listen", callback) 346 | 347 | def once_connection(self, callback): 348 | self.on("connection", callback) 349 | 350 | def once_close(self, callback): 351 | self.on("close", callback) 352 | 353 | def once_error(self, callback): 354 | self.on("error", callback) 355 | 356 | def enable_fast_open(self): 357 | pass 358 | 359 | def enable_reuseaddr(self): 360 | pass 361 | 362 | def enable_nodelay(self): 363 | pass 364 | 365 | @property 366 | def is_enable_fast_open(self): 367 | return False 368 | 369 | @property 370 | def is_reuseaddr(self): 371 | return False 372 | 373 | @property 374 | def is_enable_nodelay(self): 375 | return True 376 | 377 | def listen(self, address, backlog=128): 378 | if self._state != STATE_INITIALIZED: 379 | if self._state == STATE_CLOSED: 380 | raise SocketClosed() 381 | return 382 | if isinstance(address, (tuple, list)): 383 | address = "pipe#%s" % (address[1] if len(address) >= 2 else address[-1]) 384 | elif not isinstance(address, str): 385 | address = "pipe#%s" % address 386 | PipeServer._bind_servers[address] = self 387 | self._address = address 388 | self._state = STATE_LISTENING 389 | 390 | def _accept_cb(self, socket, address): 391 | if self._state != STATE_LISTENING: 392 | return 393 | socket = PipeSocket(socket=socket, address=(address, id(socket))) 394 | self._loop.add_async(self.emit_connection, self, socket) 395 | return socket 396 | 397 | def _error(self, error): 398 | self._loop.add_async(self.emit_error, self, error) 399 | self.close() 400 | get_logger().error("server error: %s", error) 401 | 402 | def close(self): 403 | if self._state in (STATE_INITIALIZED, STATE_LISTENING): 404 | if self._address and self._address in PipeServer._bind_servers: 405 | PipeServer._bind_servers.pop(self._address) 406 | self._state = STATE_CLOSED 407 | 408 | def on_close(): 409 | try: 410 | self.emit_close(self) 411 | except Exception as e: 412 | get_logger().exception("tcp server emit close error:%s", e) 413 | self.remove_all_listeners() 414 | self._loop.add_async(on_close) 415 | 416 | 417 | if is_py3: 418 | from .coroutines.pipe import warp_coroutine 419 | PipeSocket, PipeServer = warp_coroutine(PipeSocket, PipeServer) -------------------------------------------------------------------------------- /sevent/sslsocket/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2025/2/8 3 | # create by: snower 4 | 5 | from .tcp import SSLSocket, SSLServer -------------------------------------------------------------------------------- /sevent/sslsocket/tcp.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2025/2/8 3 | # create by: snower 4 | 5 | import ssl 6 | import time 7 | 8 | from ..buffer import Buffer, BaseBuffer 9 | from ..errors import SSLConnectError, SSLSocketError, SocketClosed, ConnectTimeout 10 | from ..tcp import WarpSocket, WarpServer, STATE_INITIALIZED, STATE_CONNECTING, STATE_CLOSED 11 | 12 | 13 | class SSLSocket(WarpSocket): 14 | _default_context = None 15 | 16 | @classmethod 17 | def load_default_context(cls): 18 | if cls._default_context is None: 19 | cls._default_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) 20 | cls._default_context.load_default_certs(ssl.Purpose.SERVER_AUTH) 21 | return cls._default_context 22 | 23 | def __init__(self, context=None, server_side=False, server_hostname=None, session=None, *args, **kwargs): 24 | WarpSocket.__init__(self, *args, **kwargs) 25 | 26 | self._context = context or self.load_default_context() 27 | self._server_side = server_side 28 | self._handshaked = False 29 | self._shutdowned = None 30 | self._incoming = ssl.MemoryBIO() 31 | self._outgoing = ssl.MemoryBIO() 32 | self._ssl_bio = self._context.wrap_bio(self._incoming, self._outgoing, server_side, server_hostname, session) 33 | self._connect_timeout = 5 34 | self._connect_timestamp = 0 35 | self._handshake_callback = None 36 | self._handshake_timeout_handler = None 37 | self._shutdown_timeout_handler = None 38 | 39 | @property 40 | def context(self): 41 | return self._context 42 | 43 | @property 44 | def sslobj(self): 45 | return self._ssl_bio 46 | 47 | @property 48 | def session(self): 49 | if self._ssl_bio is not None: 50 | return self._ssl_bio.session 51 | return None 52 | 53 | @property 54 | def server_side(self): 55 | return self._server_side 56 | 57 | @property 58 | def server_hostname(self): 59 | if self._ssl_bio is not None: 60 | return self._ssl_bio.server_hostname 61 | return None 62 | 63 | def connect(self, address, timeout=5): 64 | if self._state != STATE_INITIALIZED: 65 | if self._state == STATE_CLOSED: 66 | raise SocketClosed() 67 | return 68 | self._connect_timeout = timeout 69 | self._connect_timestamp = time.time() 70 | WarpSocket.connect(self, address, timeout) 71 | 72 | def close(self): 73 | if self._state == STATE_CLOSED or self._shutdowned is False: 74 | return 75 | if not self._handshaked or self._shutdowned is True: 76 | self._shutdowned = True 77 | WarpSocket.close(self) 78 | return 79 | def on_timeout_cb(): 80 | self._shutdown_timeout_handler = None 81 | if self._shutdowned is True: 82 | return 83 | self._shutdowned = True 84 | self._error(ConnectTimeout("ssl shutdown time out %s" % str(self.address))) 85 | self._shutdown_timeout_handler = self._loop.add_timeout(30, on_timeout_cb) 86 | self._shutdowned = False 87 | self.do_shutdown() 88 | 89 | def _do_connect(self, socket): 90 | if self._state not in (STATE_INITIALIZED, STATE_CONNECTING): 91 | self._shutdowned = True 92 | WarpSocket.close(self) 93 | return 94 | if self._handshaked: 95 | WarpSocket._do_connect(self, socket) 96 | return 97 | timeout = max(1.0, self._connect_timeout - (time.time() - self._connect_timestamp)) 98 | self.start_handshake(timeout, lambda _: self._loop.add_async(WarpSocket._do_connect, self, self._socket)) 99 | 100 | def _do_close(self, socket): 101 | if self._ssl_bio is None or self._context is None: 102 | return 103 | self._incoming = None 104 | self._outgoing = None 105 | self._ssl_bio = None 106 | self._context = None 107 | self._shutdowned = True 108 | if self._handshake_timeout_handler: 109 | self._loop.cancel_timeout(self._handshake_timeout_handler) 110 | self._handshake_timeout_handler = None 111 | if self._shutdown_timeout_handler: 112 | self._loop.cancel_timeout(self._shutdown_timeout_handler) 113 | self._shutdown_timeout_handler = None 114 | WarpSocket._do_close(self, socket) 115 | 116 | def start_handshake(self, timeout, handshake_callback): 117 | self._handshake_callback = handshake_callback 118 | def on_timeout_cb(): 119 | self._handshake_timeout_handler = None 120 | if self._handshaked: 121 | return 122 | self._error(ConnectTimeout("ssl handshake time out %s" % str(self.address))) 123 | self._handshake_timeout_handler = self._loop.add_timeout(max(1, timeout), on_timeout_cb) 124 | self.do_handshake() 125 | 126 | def read(self, data): 127 | if self._state == STATE_CLOSED: 128 | return 129 | if data.__class__ == Buffer: 130 | self._incoming.write(data.read()) 131 | else: 132 | self._incoming.write(data) 133 | if not self._handshaked and self.do_handshake(): 134 | return 135 | 136 | try: 137 | last_data_len = self._rbuffers._len 138 | while self._incoming.pending: 139 | try: 140 | chunk = self._ssl_bio.read(self._incoming.pending) 141 | if not chunk: 142 | break 143 | BaseBuffer.write(self._rbuffers, chunk) 144 | except ssl.SSLWantReadError: 145 | if self._outgoing.pending: 146 | self.flush() 147 | break 148 | except ssl.SSLWantWriteError: 149 | if self._outgoing.pending: 150 | self.flush() 151 | except (ssl.SSLZeroReturnError, ssl.SSLEOFError): 152 | break 153 | if last_data_len < self._rbuffers._len: 154 | if self._rbuffers._len > self._rbuffers._drain_size and not self._rbuffers._full: 155 | self._rbuffers.do_drain() 156 | self._loop.add_async(self.emit_data, self, self._rbuffers) 157 | except Exception as e: 158 | self._shutdowned = True 159 | self._loop.add_async(self._error, SSLSocketError(str(e))) 160 | 161 | def write(self, data): 162 | if self._state == STATE_CLOSED: 163 | return False 164 | try: 165 | if not self._handshaked: 166 | if data.__class__ == Buffer: 167 | BaseBuffer.extend(self._wbuffers, data) 168 | if data._full and data._len < data._regain_size: 169 | data.do_regain() 170 | else: 171 | BaseBuffer.write(self._wbuffers, data) 172 | if self._wbuffers._len > self._wbuffers._drain_size and not self._wbuffers._full: 173 | self._wbuffers.do_drain() 174 | return False 175 | 176 | if data.__class__ == Buffer: 177 | data = data.read() 178 | self._ssl_bio.write(data) 179 | except Exception as e: 180 | self._shutdowned = True 181 | self._loop.add_async(self._error, SSLSocketError(str(e))) 182 | if self._outgoing.pending: 183 | return self.flush() 184 | return True 185 | 186 | def do_handshake(self): 187 | while True: 188 | try: 189 | self._ssl_bio.do_handshake() 190 | if self._outgoing.pending: 191 | self.flush() 192 | self._handshaked = True 193 | if self._wbuffers: 194 | self._ssl_bio.write(self._wbuffers.read()) 195 | if self._outgoing.pending: 196 | self.flush() 197 | if self._handshake_timeout_handler: 198 | self._loop.cancel_timeout(self._handshake_timeout_handler) 199 | self._handshake_timeout_handler = None 200 | handshake_callback, self._handshake_callback = self._handshake_callback, None 201 | if handshake_callback is not None: 202 | handshake_callback(self) 203 | return False 204 | except ssl.SSLWantReadError: 205 | if self._outgoing.pending: 206 | self.flush() 207 | break 208 | except ssl.SSLWantWriteError: 209 | if self._outgoing.pending: 210 | self.flush() 211 | except Exception as e: 212 | self._shutdowned = True 213 | self._loop.add_async(self._error, SSLConnectError(self.address, e, "ssl handshake error %s %s" % (str(self.address), e))) 214 | break 215 | return True 216 | 217 | def do_shutdown(self): 218 | if self._shutdowned is True: 219 | return 220 | while True: 221 | try: 222 | if self._handshaked: 223 | self._ssl_bio.unwrap() 224 | if self._outgoing.pending: 225 | self.flush() 226 | self._shutdowned = True 227 | WarpSocket.close(self) 228 | if self._shutdown_timeout_handler: 229 | self._loop.cancel_timeout(self._shutdown_timeout_handler) 230 | self._shutdown_timeout_handler = None 231 | break 232 | except ssl.SSLWantReadError: 233 | if self._outgoing.pending: 234 | self.flush() 235 | break 236 | except ssl.SSLWantWriteError: 237 | if self._outgoing.pending: 238 | self.flush() 239 | except Exception as e: 240 | self._shutdowned = True 241 | self._loop.add_async(self._error, SSLSocketError(str(e))) 242 | break 243 | 244 | def flush(self): 245 | data = self._outgoing.read() 246 | if not data: 247 | return True 248 | return WarpSocket.write(self, data) 249 | 250 | 251 | class SSLServer(WarpServer): 252 | @classmethod 253 | def create_server_context(cls): 254 | context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) 255 | context.load_default_certs(ssl.Purpose.CLIENT_AUTH) 256 | return context 257 | 258 | def __init__(self, context, *args, **kwargs): 259 | WarpServer.__init__(self, *args, **kwargs) 260 | 261 | self._context = context 262 | 263 | @property 264 | def context(self): 265 | return self._context 266 | 267 | def handshake(self, socket): 268 | max_buffer_size = socket._max_buffer_size if hasattr(socket, "_max_buffer_size") else None 269 | socket = SSLSocket(context=self._context, server_side=True, socket=socket, loop=self._loop, max_buffer_size=max_buffer_size) 270 | socket.start_handshake(30, lambda _: self._loop.add_async(self.emit_connection, self, socket)) -------------------------------------------------------------------------------- /sevent/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 18/5/25 3 | # create by: snower 4 | 5 | import sys 6 | import logging 7 | 8 | if sys.version_info[0] >= 3: 9 | is_py3 = True 10 | unicode_type = str 11 | byte_type = bytes 12 | 13 | def is_int(v): 14 | return v.__class__ == int 15 | 16 | iter_range = range 17 | else: 18 | is_py3 = False 19 | unicode_type = unicode 20 | byte_type = str 21 | 22 | 23 | def is_int(v): 24 | return v.__class__ == int or v.__class__ == long 25 | 26 | iter_range = xrange 27 | 28 | 29 | def ensure_bytes(s): 30 | if isinstance(s, unicode_type): 31 | return s.encode("utf-8") 32 | return s 33 | 34 | 35 | def ensure_unicode(s): 36 | if isinstance(s, byte_type): 37 | return s.decode("utf-8") 38 | return s 39 | 40 | _logger = logging 41 | 42 | def set_logger(logger): 43 | global _logger 44 | _logger = logger 45 | 46 | def get_logger(): 47 | return _logger -------------------------------------------------------------------------------- /sevent/waker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 20/2/14 3 | # create by: snower 4 | 5 | import os 6 | import socket 7 | import threading 8 | 9 | 10 | def set_close_exec(fd): 11 | import fcntl 12 | flags = fcntl.fcntl(fd, fcntl.F_GETFD) 13 | fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) 14 | 15 | 16 | def _set_nonblocking(fd): 17 | import fcntl 18 | flags = fcntl.fcntl(fd, fcntl.F_GETFL) 19 | fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) 20 | 21 | 22 | class PipeWaker(object): 23 | def __init__(self): 24 | r, w = os.pipe() 25 | _set_nonblocking(r) 26 | _set_nonblocking(w) 27 | set_close_exec(r) 28 | set_close_exec(w) 29 | self.reader = os.fdopen(r, "rb", 0) 30 | self.writer = os.fdopen(w, "wb", 0) 31 | self.lock = threading.Lock() 32 | 33 | def fileno(self): 34 | return self.reader.fileno() 35 | 36 | def wake(self): 37 | with self.lock: 38 | try: 39 | self.writer.write(b"x") 40 | except (IOError, ValueError): 41 | pass 42 | 43 | def consume(self): 44 | try: 45 | while True: 46 | result = self.reader.read() 47 | if not result: 48 | break 49 | except IOError: 50 | pass 51 | 52 | def close(self): 53 | self.reader.close() 54 | try: 55 | self.writer.close() 56 | except Exception: 57 | pass 58 | 59 | 60 | class SocketWaker(object): 61 | def __init__(self): 62 | self.writer = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 63 | self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 64 | 65 | count = 0 66 | while 1: 67 | count += 1 68 | a = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 69 | a.bind(("127.0.0.1", 0)) 70 | a.listen(1) 71 | connect_address = a.getsockname() 72 | try: 73 | self.writer.connect(connect_address) 74 | break 75 | except socket.error: 76 | if count >= 10: 77 | a.close() 78 | self.writer.close() 79 | raise socket.error("Cannot bind trigger!") 80 | a.close() 81 | 82 | self.reader, addr = a.accept() 83 | self.reader.setblocking(0) 84 | self.writer.setblocking(0) 85 | a.close() 86 | self.lock = threading.Lock() 87 | 88 | def fileno(self): 89 | return self.reader.fileno() 90 | 91 | def wake(self): 92 | with self.lock: 93 | try: 94 | self.writer.send(b"x") 95 | except (IOError, socket.error, ValueError): 96 | pass 97 | 98 | def consume(self): 99 | try: 100 | while True: 101 | result = self.reader.recv(1024) 102 | if not result or len(result) < 1024: 103 | break 104 | except (IOError, socket.error): 105 | pass 106 | 107 | def close(self): 108 | self.reader.close() 109 | try: 110 | self.writer.close() 111 | except Exception: 112 | pass 113 | 114 | 115 | def Waker(): 116 | if os.name == 'nt': 117 | return SocketWaker() 118 | try: 119 | return PipeWaker() 120 | except: 121 | return SocketWaker() -------------------------------------------------------------------------------- /sevent/win32util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 2023/5/10 3 | # create by: snower 4 | 5 | import sys 6 | 7 | if sys.platform == "win32": 8 | _prefer_wmi = True 9 | import winreg # pylint: disable=import-error 10 | # Keep pylint quiet on non-windows. 11 | try: 12 | WindowsError is None # pylint: disable=used-before-assignment 13 | except KeyError: 14 | WindowsError = Exception 15 | 16 | try: 17 | import threading 18 | import pythoncom # pylint: disable=import-error 19 | import wmi # pylint: disable=import-error 20 | 21 | _have_wmi = True 22 | except Exception: 23 | _have_wmi = False 24 | 25 | class DnsInfo: 26 | def __init__(self): 27 | self.domain = None 28 | self.nameservers = [] 29 | self.search = [] 30 | 31 | if _have_wmi: 32 | 33 | class _WMIGetter(threading.Thread): 34 | def __init__(self): 35 | super().__init__() 36 | self.info = DnsInfo() 37 | 38 | def run(self): 39 | pythoncom.CoInitialize() 40 | try: 41 | system = wmi.WMI() 42 | for interface in system.Win32_NetworkAdapterConfiguration(): 43 | if interface.IPEnabled and interface.DNSDomain: 44 | self.info.domain = interface.DNSDomain 45 | self.info.nameservers = list(interface.DNSServerSearchOrder) 46 | if interface.DNSDomainSuffixSearchOrder: 47 | self.info.search = [ 48 | x for x in interface.DNSDomainSuffixSearchOrder 49 | ] 50 | break 51 | finally: 52 | pythoncom.CoUninitialize() 53 | 54 | def get(self): 55 | # We always run in a separate thread to avoid any issues with 56 | # the COM threading model. 57 | self.start() 58 | self.join() 59 | return self.info 60 | 61 | else: 62 | 63 | class _WMIGetter: # type: ignore 64 | pass 65 | 66 | class _RegistryGetter: 67 | def __init__(self): 68 | self.info = DnsInfo() 69 | 70 | def _determine_split_char(self, entry): 71 | # 72 | # The windows registry irritatingly changes the list element 73 | # delimiter in between ' ' and ',' (and vice-versa) in various 74 | # versions of windows. 75 | # 76 | if entry.find(" ") >= 0: 77 | split_char = " " 78 | elif entry.find(",") >= 0: 79 | split_char = "," 80 | else: 81 | # probably a singleton; treat as a space-separated list. 82 | split_char = " " 83 | return split_char 84 | 85 | def _config_nameservers(self, nameservers): 86 | split_char = self._determine_split_char(nameservers) 87 | ns_list = nameservers.split(split_char) 88 | for ns in ns_list: 89 | if ns not in self.info.nameservers: 90 | self.info.nameservers.append(ns) 91 | 92 | def _config_search(self, search): 93 | split_char = self._determine_split_char(search) 94 | search_list = search.split(split_char) 95 | for s in search_list: 96 | if s not in self.info.search: 97 | self.info.search.append(s) 98 | 99 | def _config_fromkey(self, key, always_try_domain): 100 | try: 101 | servers, _ = winreg.QueryValueEx(key, "NameServer") 102 | except WindowsError: 103 | servers = None 104 | if servers: 105 | self._config_nameservers(servers) 106 | if servers or always_try_domain: 107 | try: 108 | dom, _ = winreg.QueryValueEx(key, "Domain") 109 | if dom: 110 | self.info.domain = dom 111 | except WindowsError: 112 | pass 113 | else: 114 | try: 115 | servers, _ = winreg.QueryValueEx(key, "DhcpNameServer") 116 | except WindowsError: 117 | servers = None 118 | if servers: 119 | self._config_nameservers(servers) 120 | try: 121 | dom, _ = winreg.QueryValueEx(key, "DhcpDomain") 122 | if dom: 123 | self.info.domain = dom 124 | except WindowsError: 125 | pass 126 | try: 127 | search, _ = winreg.QueryValueEx(key, "SearchList") 128 | except WindowsError: 129 | search = None 130 | if search is None: 131 | try: 132 | search, _ = winreg.QueryValueEx(key, "DhcpSearchList") 133 | except WindowsError: 134 | search = None 135 | if search: 136 | self._config_search(search) 137 | 138 | def _is_nic_enabled(self, lm, guid): 139 | # Look in the Windows Registry to determine whether the network 140 | # interface corresponding to the given guid is enabled. 141 | # 142 | # (Code contributed by Paul Marks, thanks!) 143 | # 144 | try: 145 | # This hard-coded location seems to be consistent, at least 146 | # from Windows 2000 through Vista. 147 | connection_key = winreg.OpenKey( 148 | lm, 149 | r"SYSTEM\CurrentControlSet\Control\Network" 150 | r"\{4D36E972-E325-11CE-BFC1-08002BE10318}" 151 | r"\%s\Connection" % guid, 152 | ) 153 | 154 | try: 155 | # The PnpInstanceID points to a key inside Enum 156 | (pnp_id, ttype) = winreg.QueryValueEx( 157 | connection_key, "PnpInstanceID" 158 | ) 159 | 160 | if ttype != winreg.REG_SZ: 161 | raise ValueError # pragma: no cover 162 | 163 | device_key = winreg.OpenKey( 164 | lm, r"SYSTEM\CurrentControlSet\Enum\%s" % pnp_id 165 | ) 166 | 167 | try: 168 | # Get ConfigFlags for this device 169 | (flags, ttype) = winreg.QueryValueEx(device_key, "ConfigFlags") 170 | 171 | if ttype != winreg.REG_DWORD: 172 | raise ValueError # pragma: no cover 173 | 174 | # Based on experimentation, bit 0x1 indicates that the 175 | # device is disabled. 176 | # 177 | # XXXRTH I suspect we really want to & with 0x03 so 178 | # that CONFIGFLAGS_REMOVED devices are also ignored, 179 | # but we're shifting to WMI as ConfigFlags is not 180 | # supposed to be used. 181 | return not flags & 0x1 182 | 183 | finally: 184 | device_key.Close() 185 | finally: 186 | connection_key.Close() 187 | except Exception: # pragma: no cover 188 | return False 189 | 190 | def get(self): 191 | """Extract resolver configuration from the Windows registry.""" 192 | 193 | lm = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) 194 | try: 195 | tcp_params = winreg.OpenKey( 196 | lm, r"SYSTEM\CurrentControlSet" r"\Services\Tcpip\Parameters" 197 | ) 198 | try: 199 | self._config_fromkey(tcp_params, True) 200 | finally: 201 | tcp_params.Close() 202 | interfaces = winreg.OpenKey( 203 | lm, 204 | r"SYSTEM\CurrentControlSet" 205 | r"\Services\Tcpip\Parameters" 206 | r"\Interfaces", 207 | ) 208 | try: 209 | i = 0 210 | while True: 211 | try: 212 | guid = winreg.EnumKey(interfaces, i) 213 | i += 1 214 | key = winreg.OpenKey(interfaces, guid) 215 | try: 216 | if not self._is_nic_enabled(lm, guid): 217 | continue 218 | self._config_fromkey(key, False) 219 | finally: 220 | key.Close() 221 | except EnvironmentError: 222 | break 223 | finally: 224 | interfaces.Close() 225 | finally: 226 | lm.Close() 227 | return self.info 228 | 229 | if _have_wmi and _prefer_wmi: 230 | _getter_class = _WMIGetter 231 | else: 232 | _getter_class = _RegistryGetter 233 | 234 | def get_dns_info(): 235 | """Extract resolver configuration.""" 236 | getter = _getter_class() 237 | return getter.get() --------------------------------------------------------------------------------