├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── NOTICE ├── README.rst ├── THANKS ├── examples ├── buffered_channel.py ├── demo_channel.py ├── demo_goroutines.py ├── demo_polling.py ├── demo_select.py ├── demo_select_buffered.py ├── demo_signal.py └── demo_ticker.py ├── offset ├── __init__.py ├── core │ ├── __init__.py │ ├── chan.py │ ├── context.py │ ├── exc.py │ ├── kernel.py │ ├── proc.py │ ├── sigqueue.py │ ├── timer.py │ └── util.py ├── net │ ├── __init__.py │ ├── dial.py │ ├── exc.py │ ├── fd.py │ ├── fd_bsd.py │ ├── fd_epoll.py │ ├── fd_poll.py │ ├── fd_poll_base.py │ ├── fd_pollserver.py │ ├── fd_select.py │ ├── sock.py │ └── util.py ├── os │ ├── __init__.py │ ├── file.py │ └── signal.py ├── sync │ ├── __init__.py │ ├── atomic.py │ ├── cond.py │ ├── mutex.py │ ├── once.py │ ├── rwmutex.py │ ├── sema.py │ └── waitgroup.py ├── syscall │ ├── __init__.py │ ├── _socketio.py │ ├── fexec.py │ ├── proxy.py │ └── sysctl.py ├── time.py ├── util │ ├── __init__.py │ └── six.py └── version.py ├── requirements.txt ├── requirements_dev.txt ├── setup.py ├── tests ├── test_atomic.py ├── test_channel.py ├── test_core_timer.py ├── test_kernel.py ├── test_sync.py └── test_time.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | *.gem 2 | *.sw* 3 | *.pyc 4 | *.egg 5 | *#* 6 | build 7 | dist 8 | setuptools-* 9 | .svn/* 10 | .DS_Store 11 | *.so 12 | .Python 13 | distribute-0.6.8-py2.6.egg 14 | distribute-0.6.8.tar.gz 15 | offset.egg-info 16 | nohup.out 17 | .coverage 18 | doc/.sass-cache 19 | bin/ 20 | lib/ 21 | man/ 22 | include/ 23 | html/ 24 | 25 | 26 | __pycache__ 27 | .tox 28 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 2.7 3 | env: 4 | - TOX_ENV=py27 5 | - TOX_ENV=py32 6 | - TOX_ENV=py33 7 | - TOX_ENV=pypy 8 | 9 | install: 10 | - "[[ ${TOX_ENV} == pypy ]] && sudo add-apt-repository -y ppa:pypy/ppa || true" 11 | - "[[ ${TOX_ENV} == pypy ]] && sudo apt-get -y update && sudo apt-get -y install pypy || true" 12 | # This is required because we need to get rid of the Travis installed PyPy 13 | # or it'll take precedence over the PPA installed one. 14 | - "[[ ${TOX_ENV} == pypy ]] && sudo rm -rf /usr/local/pypy/bin || true" 15 | - pip install tox coveralls 16 | 17 | script: 18 | - tox -e $TOX_ENV 19 | 20 | after_success: 21 | - coveralls 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2013 (c) Benoît Chesneau 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include NOTICE 2 | include LICENSE 3 | include README.rst 4 | include THANKS 5 | recursive-include examples * 6 | recursive-include tests * 7 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | offset 2 | ------ 3 | 4 | 2013 (c) Benoît Chesneau 5 | 6 | offset is available under the MIT License (see LICENSE). 7 | 8 | Third party 9 | ----------- 10 | 11 | offset.core.atomic under the MIT License 12 | ++++++++++++++++++++++++++++++++++++++++ 13 | 14 | Original source: https://github.com/dreid/atomiclong 15 | 16 | Copyright (c) 2013 David Reid 17 | 18 | Permission is hereby granted, free of charge, to any person obtaining a copy 19 | of this software and associated documentation files (the "Software"), to deal 20 | in the Software without restriction, including without limitation the rights 21 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 22 | copies of the Software, and to permit persons to whom the Software is 23 | furnished to do so, subject to the following conditions: 24 | 25 | The above copyright notice and this permission notice shall be included in 26 | all copies or substantial portions of the Software. 27 | 28 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 31 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 32 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 33 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 | 35 | 36 | offset.sync.mutex is based on the Golang implementation under the BSD License. 37 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Offset 2 | ====== 3 | 4 | *An offset is a small, virtually complete daughter plant that has been naturally 5 | and asexually produced on the mother plant.* 6 | 7 | Offset is a Python implementation of the Go concurrency model. Offset was introduced at the `Pycon APAC 2013 `_. 8 | 9 | .. image:: https://secure.travis-ci.org/benoitc/offset.png?branch=master 10 | :target: http://travis-ci.org/benoitc/offset 11 | -------------------------------------------------------------------------------- /THANKS: -------------------------------------------------------------------------------- 1 | Offset THANKS 2 | ============= 3 | 4 | A number of people have contributed to Offset by reporting problems, 5 | suggesting improvements of submitting changes. Some of these people are: 6 | 7 | Stéphane Wirtel 8 | -------------------------------------------------------------------------------- /examples/buffered_channel.py: -------------------------------------------------------------------------------- 1 | from offset import makechan, maintask, run 2 | 3 | 4 | @maintask 5 | def main(): 6 | c = makechan(2) 7 | c.send(1) 8 | c.send(2) 9 | print(c.recv()) 10 | print(c.recv()) 11 | 12 | 13 | if __name__ == "__main__": 14 | run() 15 | -------------------------------------------------------------------------------- /examples/demo_channel.py: -------------------------------------------------------------------------------- 1 | # example inspired from http://tour.golang.org/#66 2 | 3 | from offset import makechan, go, maintask, run 4 | 5 | def sum(a, c): 6 | s = 0 7 | for v in a: 8 | s += v 9 | c.send(s) 10 | 11 | @maintask 12 | def main(): 13 | a = [7, 2, 8, -9, 4, 0] 14 | 15 | c = makechan() 16 | go(sum, a[:int(len(a)/2)], c) 17 | go(sum, a[int(len(a)/2):], c) 18 | x, y = c.recv(), c.recv() 19 | 20 | print(x, y, x+y) 21 | 22 | if __name__ == "__main__": 23 | run() 24 | -------------------------------------------------------------------------------- /examples/demo_goroutines.py: -------------------------------------------------------------------------------- 1 | # inspired from http://tour.golang.org/#65 2 | 3 | 4 | from offset import go, maintask, run 5 | from offset import time 6 | 7 | def say(s): 8 | for i in range(5): 9 | time.sleep(100 * time.MILLISECOND) 10 | print(s) 11 | 12 | @maintask 13 | def main(): 14 | go(say, "world") 15 | say("hello") 16 | 17 | if __name__ == "__main__": 18 | run() 19 | -------------------------------------------------------------------------------- /examples/demo_polling.py: -------------------------------------------------------------------------------- 1 | from offset import go, maintask, run 2 | from offset.net import sock 3 | 4 | 5 | import signal 6 | from offset.core import kernel 7 | @maintask 8 | def main(): 9 | fd = sock.bind_socket("tcp", ('127.0.0.1', 0)) 10 | print(fd.name()) 11 | while True: 12 | fd1 = fd.accept() 13 | print("accepted %s" % fd1.name()) 14 | fd1.write(b"ok\n") 15 | fd1.close() 16 | 17 | run() 18 | -------------------------------------------------------------------------------- /examples/demo_select.py: -------------------------------------------------------------------------------- 1 | # demo inspired from http://tour.golang.org/#67 2 | 3 | from offset import makechan, select, go, run, maintask 4 | 5 | def fibonacci(c, quit): 6 | x, y = 0, 1 7 | while True: 8 | ret = select(c.if_send(x), quit.if_recv()) 9 | if ret == c.if_send(x): 10 | x, y = y, x+y 11 | elif ret == quit.if_recv(): 12 | print("quit") 13 | return 14 | 15 | @maintask 16 | def main(): 17 | c = makechan() 18 | quit = makechan() 19 | def f(): 20 | for i in range(10): 21 | print(c.recv()) 22 | 23 | quit.send(0) 24 | 25 | go(f) 26 | fibonacci(c, quit) 27 | 28 | if __name__ == "__main__": 29 | run() 30 | -------------------------------------------------------------------------------- /examples/demo_select_buffered.py: -------------------------------------------------------------------------------- 1 | from offset import * 2 | 3 | def test(c, quit): 4 | x = 0 5 | while True: 6 | ret = select(c.if_send(x), quit.if_recv()) 7 | if ret == c.if_send(x): 8 | x = x + 1 9 | elif ret == quit.if_recv(): 10 | print("quit") 11 | return 12 | 13 | @maintask 14 | def main(): 15 | c = makechan(5, label="c") 16 | quit = makechan(label="quit") 17 | def f(): 18 | for i in range(5): 19 | print(c.recv()) 20 | quit.send(0) 21 | 22 | go(f) 23 | test(c, quit) 24 | run() 25 | -------------------------------------------------------------------------------- /examples/demo_signal.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from offset import makechan, run, maintask 6 | from offset import os 7 | from offset.os import signal 8 | import sys 9 | 10 | from offset.core.proc import current 11 | from offset.core.kernel import kernel 12 | 13 | @maintask 14 | def main(): 15 | print(current) 16 | c = makechan(1) 17 | signal.notify(c, os.SIGINT, os.SIGTERM, os.SIGQUIT) 18 | s = c.recv() 19 | print("got signal: %s" % s) 20 | print(kernel.runq) 21 | 22 | run() 23 | print("after run") 24 | print(kernel.running) 25 | -------------------------------------------------------------------------------- /examples/demo_ticker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from offset import run, maintask 6 | from offset.time import Ticker, SECOND 7 | 8 | 9 | @maintask 10 | def main(): 11 | ticker = Ticker(0.1 * SECOND) 12 | for i in range(3): 13 | print(ticker.c.recv()) 14 | ticker.stop() 15 | 16 | run() 17 | -------------------------------------------------------------------------------- /offset/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | # version info 6 | from .version import version_info, __version__ 7 | 8 | # scheduler functions 9 | from .core import go, run, gosched, maintask 10 | 11 | # channel functions 12 | from .core.chan import makechan, select, default 13 | 14 | # exceptions 15 | from .core.exc import PanicError 16 | -------------------------------------------------------------------------------- /offset/core/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import functools 6 | 7 | from .context import Context, enter_syscall 8 | from .kernel import run 9 | from .chan import Channel, makechan, select, default 10 | from .exc import PanicError, ChannelError, KernelError 11 | 12 | 13 | def go(func, *args, **kwargs): 14 | """ starts the execution of a function call as an independent goroutine, 15 | within the same address space. """ 16 | Context.instance().newproc(func, *args, **kwargs) 17 | 18 | def gosched(): 19 | """ force scheduling """ 20 | Context.instance().schedule() 21 | 22 | def maintask(func): 23 | Context.instance().newproc(func) 24 | return func 25 | 26 | def syscall(func): 27 | """ wrap a function to handle its result asynchronously 28 | 29 | This function is useful when you don't want to block the scheduler 30 | and execute the other goroutine while the function is processed 31 | """ 32 | 33 | ctx = Context.instance() 34 | 35 | @functools.wraps(func) 36 | def _wrapper(*args, **kwargs): 37 | # enter the functions in syscall 38 | 39 | ret = ctx.enter_syscall(func, *args, **kwargs) 40 | return ret 41 | return _wrapper 42 | -------------------------------------------------------------------------------- /offset/core/chan.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from collections import deque 6 | import random 7 | 8 | from .context import Context 9 | from .exc import ChannelError 10 | from ..util import six 11 | from . import proc 12 | 13 | 14 | class bomb(object): 15 | def __init__(self, exp_type=None, exp_value=None, exp_traceback=None): 16 | self.type = exp_type 17 | self.value = exp_value 18 | self.traceback = exp_traceback 19 | 20 | def raise_(self): 21 | six.reraise(self.type, self.value, self.traceback) 22 | 23 | 24 | class SudoG(object): 25 | 26 | def __init__(self, g, elem): 27 | self.g = g 28 | self.elem = elem 29 | 30 | 31 | class scase(object): 32 | """ select case. 33 | 34 | op = 0 if recv, 1 if send, -1 if default 35 | """ 36 | 37 | def __init__(self, op, chan, elem=None): 38 | self.op = op 39 | self.ch = chan 40 | self.elem = elem 41 | self.sg = None 42 | self.ok = True 43 | self.value = None 44 | 45 | def __str__(self): 46 | if self.op == 0: 47 | cas_str = "recv" 48 | elif self.op == 1: 49 | cas_str = "send" 50 | else: 51 | cas_str = "default" 52 | 53 | return "scase:%s %s(%s)" % (str(self.ch), cas_str, 54 | str(self.elem)) 55 | 56 | @classmethod 57 | def recv(cls, chan): 58 | """ case recv 59 | 60 | in go: ``val <- elem`` 61 | """ 62 | return cls(0, chan) 63 | 64 | @classmethod 65 | def send(cls, chan, elem): 66 | """ case send 67 | - 68 | in go: ``chan <- elem`` 69 | """ 70 | return cls(1, chan, elem=elem) 71 | 72 | def __eq__(self, other): 73 | if other is None: 74 | return 75 | 76 | if self.elem is not None: 77 | return (self.ch == other.ch and self.op == other.op 78 | and self.elem == other.elem) 79 | 80 | return self.ch == other.ch and self.op == other.op 81 | 82 | def __ne__(self, other): 83 | if other is None: 84 | return 85 | 86 | if self.elem is not None: 87 | return not (self.ch == other.ch and self.op == other.op 88 | and self.elem == other.elem) 89 | 90 | return not(self.ch == other.ch and self.op == other.op) 91 | 92 | 93 | class CaseDefault(scase): 94 | 95 | def __init__(self): 96 | self.op = - 1 97 | self.chan = None 98 | self.elem = None 99 | self.ch = None 100 | self.value = None 101 | self.sg = None 102 | 103 | 104 | default = CaseDefault() 105 | 106 | 107 | class Channel(object): 108 | 109 | def __init__(self, size=None, label=None): 110 | self.size = size or 0 111 | 112 | self._buf = None 113 | if self.size > 0: 114 | self._buf = deque() 115 | 116 | self.closed = False 117 | self.label = label 118 | 119 | self.recvq = deque() # list of receive waiters 120 | self.sendq = deque() # list of send waiters 121 | 122 | def __str__(self): 123 | if self.label is not None: 124 | return "" % self.label 125 | return object.__str__(self) 126 | 127 | def close(self): 128 | self.closed = True 129 | 130 | # release all receivers 131 | while True: 132 | try: 133 | sg = self.recvq.popleft() 134 | except IndexError: 135 | break 136 | 137 | gp = sg.g 138 | gp.param = None 139 | gp.ready() 140 | 141 | # release all senders 142 | while True: 143 | try: 144 | sg = self.sendq.popleft() 145 | except IndexError: 146 | break 147 | 148 | gp = sg.g 149 | gp.param = None 150 | gp.ready() 151 | 152 | def open(self): 153 | self.closed = False 154 | 155 | def send(self, val): 156 | g = proc.current() 157 | 158 | if self.closed: 159 | raise ChannelError("send on a closed channel") 160 | 161 | if self.size > 0: 162 | # the buffer is full, wait until we can fill it 163 | while len(self._buf) >= self.size: 164 | mysg = SudoG(g, None) 165 | self.sendq.append(mysg) 166 | g.park() 167 | 168 | # fill the buffer 169 | self._buf.append(val) 170 | 171 | # eventually trigger a receiver 172 | sg = None 173 | try: 174 | sg = self.recvq.popleft() 175 | except IndexError: 176 | return 177 | 178 | if sg is not None: 179 | gp = sg.g 180 | gp.ready() 181 | 182 | else: 183 | sg = None 184 | # is the someone receiving? 185 | try: 186 | sg = self.recvq.popleft() 187 | except IndexError: 188 | pass 189 | 190 | if sg is not None: 191 | # yes, add the result and activate it 192 | gp = sg.g 193 | sg.elem = val 194 | gp.param = sg 195 | 196 | # activate the receive process 197 | gp.ready() 198 | return 199 | 200 | # noone is receiving, add the process to sendq and remove us from 201 | # the receive q 202 | mysg = SudoG(g, val) 203 | g.param = None 204 | self.sendq.append(mysg) 205 | g.park() 206 | 207 | if g.param is None: 208 | if not self.closed: 209 | raise ChannelError("chansend: spurious wakeup") 210 | 211 | def recv(self): 212 | sg = None 213 | g = proc.current() 214 | 215 | if self.size > 0: 216 | while len(self._buf) <= 0: 217 | mysg = SudoG(g, None) 218 | self.recvq.append(mysg) 219 | g.park() 220 | 221 | val = self._buf.popleft() 222 | 223 | # thread safe way to recv on a buffered channel 224 | try: 225 | sg = self.sendq.popleft() 226 | except IndexError: 227 | pass 228 | 229 | if sg is not None: 230 | # yes someone is sending, unblock it and return the result 231 | gp = sg.g 232 | gp.ready() 233 | 234 | if sg.elem is not None: 235 | self._buf.append(sg.elem) 236 | 237 | Context.instance().schedule() 238 | 239 | if isinstance(val, bomb): 240 | val.raise_() 241 | 242 | return val 243 | 244 | # sync recv 245 | try: 246 | sg = self.sendq.popleft() 247 | except IndexError: 248 | pass 249 | 250 | if sg is not None: 251 | gp = sg.g 252 | gp.param = sg 253 | gp.ready() 254 | 255 | if isinstance(sg.elem, bomb): 256 | sg.elem.raise_() 257 | 258 | return sg.elem 259 | 260 | # noone is sending, we have to wait. Append the current process to 261 | # receiveq, remove us from the run queue and switch 262 | mysg = SudoG(g, None) 263 | g.param = None 264 | self.recvq.append(mysg) 265 | g.park() 266 | 267 | if g.param is None: 268 | if not self.closed: 269 | raise ChannelError("chanrecv: spurious wakeup") 270 | return 271 | 272 | # we are back in the process, return the current value 273 | if isinstance(g.param.elem, bomb): 274 | g.param.elem.raise_() 275 | 276 | return g.param.elem 277 | 278 | def send_exception(self, exp_type, msg): 279 | self.send(bomb(exp_type, exp_type(msg))) 280 | 281 | def if_recv(self): 282 | return scase.recv(self) 283 | 284 | def if_send(self, elem): 285 | return scase.send(self, elem) 286 | 287 | 288 | def select(*cases): 289 | """ A select function lets a goroutine wait on multiple 290 | communication operations. 291 | 292 | A select blocks until one of its cases can run, then it 293 | executes that case. It chooses one at random if multiple are ready""" 294 | 295 | # reorder cases 296 | 297 | 298 | c_ordered = [(i, cas) for i, cas in enumerate(cases)] 299 | random.shuffle(c_ordered) 300 | cases = [cas for _, cas in c_ordered] 301 | 302 | while True: 303 | # pass 1 - look for something already waiting 304 | for cas in cases: 305 | if cas.op == 0: 306 | # RECV 307 | if cas.ch.size > 0 and len(cas.ch._buf) > 0: 308 | # buffered channel 309 | cas.value = cas.ch._buf.popleft() 310 | 311 | # dequeue from the sendq 312 | sg = None 313 | try: 314 | sg = cas.ch.sendq.popleft() 315 | except IndexError: 316 | pass 317 | 318 | if sg is not None: 319 | gp = sg.g 320 | gp.ready() 321 | 322 | # return the case 323 | return cas 324 | else: 325 | # 326 | sg = None 327 | try: 328 | sg = cas.ch.sendq.popleft() 329 | except IndexError: 330 | pass 331 | 332 | if sg is not None: 333 | gp = sg.g 334 | gp.param = sg 335 | gp.ready() 336 | cas.elem = sg.elem 337 | return cas 338 | 339 | if cas.ch.closed: 340 | return 341 | 342 | elif cas.op == 1: 343 | if cas.ch.closed: 344 | return 345 | 346 | # SEND 347 | if cas.ch.size > 0 and len(cas.ch._buf) < cas.ch.size: 348 | # buffered channnel, we can fill the buffer 349 | cas.ch._buf.append(cas.elem) 350 | 351 | # eventually trigger a receiver 352 | sg = None 353 | try: 354 | sg = cas.ch.recvq.popleft() 355 | except IndexError: 356 | pass 357 | 358 | if sg is not None: 359 | gp = sg.g 360 | gp.ready() 361 | 362 | # return 363 | return cas 364 | else: 365 | sg = None 366 | try: 367 | sg = cas.ch.recvq.popleft() 368 | except IndexError: 369 | pass 370 | 371 | if sg is not None: 372 | gp = sg.g 373 | sg.elem = cas.elem 374 | gp.param = sg 375 | gp.ready() 376 | return cas 377 | else: 378 | # default case 379 | return cas 380 | 381 | # pass 2 - enqueue on all channels 382 | g = proc.current() 383 | g.param = None 384 | g.sleeping = True 385 | for cas in cases: 386 | sg = SudoG(g, cas.elem) 387 | cas.sg = sg 388 | if cas.op == 0: 389 | cas.ch.recvq.append(sg) 390 | else: 391 | cas.ch.sendq.append(sg) 392 | 393 | # sleep until a communication happen 394 | g.park() 395 | 396 | sg = g.param 397 | 398 | # pass 3 - dequeue from unsucessful channels 399 | # to not iddle in them 400 | selected = None 401 | for cas in cases: 402 | if cas.sg != sg: 403 | try: 404 | if cas.op == 0: 405 | cas.ch.recvq.remove(cas.sg) 406 | else: 407 | cas.ch.sendq.remove(cas.sg) 408 | except ValueError: 409 | pass 410 | else: 411 | selected = cas 412 | 413 | if sg is None: 414 | continue 415 | 416 | if selected.ch.size > 0: 417 | raise RuntimeError("select shouldn't happen") 418 | 419 | if selected.op == 0: 420 | selected.value = sg.elem 421 | 422 | return selected 423 | 424 | def makechan(size=None, label=None): 425 | return Channel(size=size, label=label) 426 | -------------------------------------------------------------------------------- /offset/core/context.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from collections import deque 6 | from concurrent import futures 7 | import sys 8 | import threading 9 | 10 | try: 11 | import thread # py2 12 | except ImportError: 13 | import _thread as thread # py3 14 | 15 | from .exc import KernelError 16 | from . import proc 17 | from .util import getmaxthreads 18 | 19 | 20 | # increase the recursion limit 21 | sys.setrecursionlimit(1000000) 22 | 23 | 24 | class Context(object): 25 | 26 | _instance_lock = threading.Lock() 27 | 28 | def __init__(self): 29 | self.runq = deque() 30 | self.running = deque() 31 | self.sleeping = {} 32 | self.lock = threading.Lock() 33 | self._thread_ident = None 34 | self._run_calls = [] 35 | 36 | # initialize the thread executor pool used for background processing 37 | # like syscall 38 | self.maxthreads = getmaxthreads() 39 | self.tpool = futures.ThreadPoolExecutor(self.maxthreads) 40 | 41 | @staticmethod 42 | def instance(): 43 | """Returns a global `Context` instance. 44 | """ 45 | if not hasattr(Context, "_instance"): 46 | with Context._instance_lock: 47 | if not hasattr(Context, "_instance"): 48 | # New instance after double check 49 | Context._instance = Context() 50 | return Context._instance 51 | 52 | def newproc(self, func, *args, **kwargs): 53 | # wrap the function so we know when it ends 54 | # create the coroutine 55 | g = proc.Proc(self, func, args, kwargs) 56 | # add the coroutine at the end of the runq 57 | self.runq.append(g) 58 | # register the goroutine 59 | self.running.append(g) 60 | # return the coroutine 61 | return g 62 | 63 | def removeg(self, g=None): 64 | # get the current proc 65 | g = g or proc.current() 66 | # remove it from the run queue 67 | try: 68 | self.runq.remove(g) 69 | except ValueError: 70 | pass 71 | 72 | # unregister the goroutine 73 | try: 74 | self.running.remove(g) 75 | except ValueError: 76 | pass 77 | 78 | def park(self, g=None): 79 | g = g or proc.current() 80 | g.sleeping = True 81 | try: 82 | self.runq.remove(g) 83 | except ValueError: 84 | pass 85 | self.schedule() 86 | 87 | def ready(self, g): 88 | if not g.sleeping: 89 | raise KernelError("bad goroutine status") 90 | 91 | g.sleeping = False 92 | self.runq.append(g) 93 | 94 | def schedule(self): 95 | gcurrent = proc.current() 96 | 97 | while True: 98 | gnext = None 99 | if len(self.runq): 100 | if self.runq[0] == gcurrent: 101 | self.runq.rotate(-1) 102 | gnext = self.runq[0] 103 | elif len(self.sleeping) > 0: 104 | self.wait_syscalls(0.05) 105 | continue 106 | elif self._run_calls: 107 | gnext = self._run_calls.pop(0) 108 | 109 | if not gnext: 110 | return 111 | 112 | # switch 113 | self._last_task = gnext 114 | if gnext != gcurrent: 115 | gnext.switch() 116 | 117 | if gcurrent == self._last_task: 118 | return 119 | 120 | def run(self): 121 | # append the run to the run calls 122 | self._run_calls.append(proc.current()) 123 | # set current thread 124 | self._thread_ident = thread.get_ident() 125 | # start scheduling 126 | self.schedule() 127 | 128 | def stop(self): 129 | # kill all running goroutines 130 | while True: 131 | try: 132 | p = self.running.popleft() 133 | except IndexError: 134 | break 135 | 136 | p.terminate() 137 | 138 | # stop the pool 139 | self.tpool.shutdown(wait=False) 140 | 141 | def wait_syscalls(self, timeout): 142 | with self.lock: 143 | fs = [f for f in self.sleeping] 144 | 145 | futures.wait(fs, timeout, return_when=futures.FIRST_COMPLETED) 146 | 147 | def enter_syscall(self, fn, *args, **kwargs): 148 | # get current coroutine 149 | gt = proc.current() 150 | gt.sleeping = True 151 | 152 | # init the futures 153 | f = self.tpool.submit(fn, *args, **kwargs) 154 | 155 | # add the goroutine to sleeping functions 156 | with self.lock: 157 | self.sleeping[f] = gt 158 | 159 | f.add_done_callback(self.exit_syscall) 160 | 161 | # schedule, switch to another coroutine 162 | self.park() 163 | 164 | if f.exception() is not None: 165 | raise f.exception() 166 | return f.result() 167 | 168 | def exit_syscall(self, f): 169 | # get the goroutine associated to this syscall 170 | with self.lock: 171 | g = self.sleeping.pop(f) 172 | 173 | # we exited 174 | if f.cancelled(): 175 | return 176 | 177 | if not g.is_alive(): 178 | return 179 | 180 | g.sleeping = False 181 | 182 | # put the goroutine back at the top of the running queue 183 | self.runq.appendleft(g) 184 | 185 | 186 | def park(): 187 | g = proc.current() 188 | g.park() 189 | 190 | def ready(g): 191 | g.ready(g) 192 | 193 | def enter_syscall(fn, *args, **kwargs): 194 | ctx = Context.instance() 195 | return ctx.enter_syscall(fn, *args, **kwargs) 196 | -------------------------------------------------------------------------------- /offset/core/exc.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | class PanicError(Exception): 7 | """ panic error raised """ 8 | 9 | class ChannelError(Exception): 10 | """ excption raised on channel error """ 11 | 12 | class KernelError(Exception): 13 | """ unexpected error in the kernel """ 14 | -------------------------------------------------------------------------------- /offset/core/kernel.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from concurrent import futures 6 | from collections import deque 7 | import signal 8 | import sys 9 | import time 10 | 11 | from .context import Context 12 | from .sigqueue import SigQueue 13 | 14 | # increase the recursion limit 15 | sys.setrecursionlimit(1000000) 16 | 17 | class Kernel(object): 18 | 19 | def __init__(self): 20 | 21 | # we have for now only one context 22 | self.ctx = Context.instance() 23 | 24 | # init signals 25 | self.init_signals() 26 | 27 | 28 | # init signal global queue used to handle all signals from the 29 | # app 30 | self.sig_queue = SigQueue(self) 31 | 32 | def init_signals(self): 33 | signal.signal(signal.SIGQUIT, self.handle_quit) 34 | signal.signal(signal.SIGTERM, self.handle_quit) 35 | signal.signal(signal.SIGINT, self.handle_quit) 36 | 37 | def handle_quit(self, *args): 38 | self.ctx.stop() 39 | 40 | def run(self): 41 | self.ctx.run() 42 | 43 | def signal_enable(self, sig): 44 | self.sig_queue.signal_enable(sig) 45 | 46 | def signal_disable(self, sig): 47 | self.sig_queue.signal_disable(sig) 48 | 49 | def signal_recv(self, s): 50 | self.sig_queue.signal_recv(s) 51 | 52 | def callback(): 53 | while True: 54 | if s.value != 0: 55 | return s.value 56 | time.sleep(0.05) 57 | 58 | return self.ctx.enter_syscall(callback) 59 | 60 | 61 | kernel = Kernel() 62 | run = kernel.run 63 | 64 | 65 | signal_enable = kernel.signal_enable 66 | signal_disable = kernel.signal_disable 67 | signal_recv = kernel.signal_recv 68 | -------------------------------------------------------------------------------- /offset/core/proc.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import threading 6 | import time 7 | 8 | try: 9 | import fibers 10 | except ImportError: 11 | raise RuntimeError("Platform not supported") 12 | 13 | _tls = threading.local() 14 | 15 | 16 | class ProcExit(Exception): 17 | """ exception raised when the proc is asked to exit """ 18 | 19 | def current(): 20 | try: 21 | return _tls.current_proc 22 | except AttributeError: 23 | _create_main_proc() 24 | return _tls.current_proc 25 | 26 | 27 | class Proc(object): 28 | 29 | def __init__(self, m, func, args, kwargs): 30 | 31 | def _run(): 32 | _tls.current_proc = self 33 | self._is_started = 1 34 | try: 35 | return func(*args, **kwargs) 36 | except ProcExit: 37 | pass 38 | finally: 39 | m.removeg() 40 | 41 | self.m = m 42 | self.fiber = fibers.Fiber(_run) 43 | self.waiting = False 44 | self.sleeping = False 45 | self.param = None 46 | self._is_started = 0 47 | 48 | def switch(self): 49 | curr = current() 50 | try: 51 | self.fiber.switch() 52 | finally: 53 | _tls.current_proc = curr 54 | 55 | def throw(self, *args): 56 | curr = current() 57 | try: 58 | self.fiber.throw(*args) 59 | finally: 60 | _tls.current_proc = curr 61 | 62 | def park(self): 63 | self.m.park(self) 64 | 65 | def ready(self): 66 | self.m.ready(self) 67 | 68 | def is_alive(self): 69 | return self._is_started < 0 or self.fiber.is_alive() 70 | 71 | def terminate(self): 72 | self.throw(ProcExit, ProcExit("exit")) 73 | time.sleep(0.1) 74 | 75 | def __eq__(self, other): 76 | return self.fiber == other.fiber 77 | 78 | 79 | def _create_main_proc(): 80 | main_proc = Proc.__new__(Proc) 81 | main_proc.fiber = fibers.current() 82 | main_proc._is_started = True 83 | main_proc.sleeping = True 84 | 85 | _tls.main_proc = main_proc 86 | _tls.current_proc = main_proc 87 | -------------------------------------------------------------------------------- /offset/core/sigqueue.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | from collections import deque 7 | import copy 8 | import signal 9 | import threading 10 | import weakref 11 | 12 | 13 | NUMSIG=65 14 | 15 | class SigQueue(object): 16 | 17 | def __init__(self, kernel): 18 | self.kernel = kernel 19 | self.queue = deque() 20 | self.receivers = [] 21 | self.lock = threading.Lock() 22 | 23 | self.sigtable = {} 24 | for i in range(NUMSIG): 25 | self.sigtable[i] = 0 26 | 27 | def signal_enable(self, sig): 28 | with self.lock: 29 | if not self.sigtable[sig]: 30 | signal.signal(sig, self.signal_handler) 31 | 32 | self.sigtable[sig] += 1 33 | 34 | 35 | def signal_disable(self, sig): 36 | with self.lock: 37 | if self.sigtable[sig] == 0: 38 | return 39 | 40 | self.sigtable[sig] -= 1 41 | 42 | if self.sigtable[sig] == 0: 43 | signal.signal(sig, signal.SIG_DFL) 44 | 45 | def signal_recv(self, s): 46 | with self.lock: 47 | self.receivers.append(s) 48 | 49 | def signal_handler(self, sig, frame): 50 | with self.lock: 51 | receivers = copy.copy(self.receivers) 52 | self.receivers = [] 53 | 54 | for recv in receivers: 55 | recv.value = sig 56 | -------------------------------------------------------------------------------- /offset/core/timer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import heapq 6 | import operator 7 | import threading 8 | 9 | from ..util import six 10 | 11 | from .context import Context, park, enter_syscall 12 | from . import proc 13 | from .util import nanotime, nanosleep 14 | 15 | 16 | def _ready(now, t, g): 17 | g.ready() 18 | 19 | 20 | def sleep(d): 21 | g = proc.current() 22 | g.sleeping = True 23 | t = Timer(_ready, interval=d, args=(g,)) 24 | t.start() 25 | g.park() 26 | 27 | 28 | class Timer(object): 29 | 30 | def __init__(self, callback, interval=None, period=None, args=None, 31 | kwargs=None): 32 | if not six.callable(callback): 33 | raise ValueError("callback must be a callable") 34 | 35 | self.callback = callback 36 | self.interval = interval 37 | self.period = period 38 | self.args = args or [] 39 | self.kwargs = kwargs or {} 40 | self.when = 0 41 | self.active = False 42 | 43 | def start(self): 44 | global timers 45 | self.active = True 46 | if not self.when: 47 | self.when = nanotime() + self.interval 48 | add_timer(self) 49 | 50 | def stop(self): 51 | remove_timer(self) 52 | self.active = False 53 | 54 | def __lt__(self, other): 55 | return self.when < other.when 56 | 57 | __cmp__ = __lt__ 58 | 59 | 60 | class Timers(object): 61 | 62 | __slots__ = ['__dict__', '_lock', 'sleeping'] 63 | 64 | __shared_state__ = dict( 65 | _timers = {}, 66 | _heap = [], 67 | _timerproc = None 68 | ) 69 | 70 | def __init__(self): 71 | self.__dict__ = self.__shared_state__ 72 | self._lock = threading.RLock() 73 | self.sleeping = False 74 | self.rescheduling = False 75 | 76 | def add(self, t): 77 | with self._lock: 78 | self._add_timer(t) 79 | 80 | if self.sleeping: 81 | self.sleeping = False 82 | self._timerproc.ready() 83 | 84 | if self._timerproc is None or not self._timerproc.is_alive: 85 | self._timerproc = Context.instance().newproc(self.timerproc) 86 | 87 | def _add_timer(self, t): 88 | if not t.interval: 89 | return 90 | heapq.heappush(self._heap, t) 91 | 92 | def remove(self, t): 93 | with self._lock: 94 | try: 95 | del self._heap[operator.indexOf(self._heap, t)] 96 | except (KeyError, IndexError): 97 | pass 98 | 99 | def timerproc(self): 100 | while True: 101 | self._lock.acquire() 102 | now = nanotime() 103 | 104 | while True: 105 | if not len(self._heap): 106 | delta = -1 107 | break 108 | 109 | t = heapq.heappop(self._heap) 110 | delta = t.when - now 111 | if delta > 0: 112 | heapq.heappush(self._heap, t) 113 | break 114 | else: 115 | # repeat ? reinsert the timer 116 | if t.period is not None and t.period > 0: 117 | np = t.period 118 | t.when += np * (1 - delta/np) 119 | heapq.heappush(self._heap, t) 120 | 121 | # run 122 | self._lock.release() 123 | t.callback(now, t, *t.args, **t.kwargs) 124 | self._lock.acquire() 125 | 126 | if delta < 0: 127 | self.sleeping = True 128 | self._lock.release() 129 | park() 130 | else: 131 | self._lock.release() 132 | # one time is pending sleep until 133 | enter_syscall(nanosleep, delta) 134 | 135 | 136 | timers = Timers() 137 | add_timer = timers.add 138 | remove_timer = timers.remove 139 | -------------------------------------------------------------------------------- /offset/core/util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import multiprocessing 6 | import os 7 | import time 8 | 9 | 10 | def getmaxthreads(): 11 | if 'OFFSET_MAX_THREADS' in os.environ: 12 | return int(os.environ['OFFSET_MAX_THREADS']) 13 | 14 | n = 0 15 | try: 16 | n = multiprocessing.cpu_count() 17 | except NotImplementedError: 18 | pass 19 | 20 | # use a minimum of 2 threads 21 | return max(n, 2) 22 | 23 | def nanotime(s=None): 24 | """ convert seconds to nanoseconds. If s is None, current time is 25 | returned """ 26 | if s is not None: 27 | return s * 1000000000 28 | return time.time() * 1000000000 29 | 30 | def from_nanotime(n): 31 | """ convert from nanotime to seconds """ 32 | return n / 1.0e9 33 | 34 | 35 | # TODO: implement this function with libc nanosleep function when 36 | # available. 37 | def nanosleep(n): 38 | time.sleep(from_nanotime(n)) 39 | -------------------------------------------------------------------------------- /offset/net/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | -------------------------------------------------------------------------------- /offset/net/dial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | -------------------------------------------------------------------------------- /offset/net/exc.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | class Timeout(Exception): 7 | """ error raised when a timeout happen """ 8 | 9 | class FdClosing(Exception): 10 | """ Error raised while trying to achieve an FD closing """ 11 | -------------------------------------------------------------------------------- /offset/net/fd.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | _os = __import__('os') 7 | 8 | import errno 9 | 10 | from .. import os 11 | from .. import syscall 12 | from ..syscall import socket 13 | from ..sync import Mutex 14 | from ..time import sleep 15 | 16 | from .fd_pollserver import PollDesc 17 | from .exc import FdClosing 18 | 19 | 20 | class NetFd(object): 21 | 22 | def __init__(self, sock, familly, sotype, net): 23 | self.sysfd = sock.fileno() 24 | self.familly = familly 25 | self.sotype = sotype 26 | self.net = net 27 | 28 | # socket object 29 | self.sock = sock 30 | #_os.close(fd) 31 | 32 | self.pd = PollDesc(self) 33 | 34 | self.closing = False 35 | self.isConnected = False 36 | self.rio = Mutex() 37 | self.wio = Mutex() 38 | self.sysmu = Mutex() 39 | self.sysref = 0 40 | self.addr = None 41 | self.sysfile = None 42 | 43 | def name(self): 44 | return "%s: %s -> %s" % (self.net, self.addr[0], self.addr[1]) 45 | 46 | def setaddr(self, addr): 47 | self.addr = addr 48 | 49 | def connect(self, address): 50 | with self.wio: 51 | self.pd.prepare_write() 52 | while True: 53 | try: 54 | self.sock.connect(address) 55 | except socket.error as e: 56 | if e.args[0] == errno.EISCONN: 57 | break 58 | if e.args[0] not in (errno.EINPROGRESS, errno.EALREADY, 59 | errno.EINTR,): 60 | raise 61 | 62 | self.pd.wait_write() 63 | continue 64 | 65 | break 66 | 67 | self.isConnected = True 68 | 69 | def incref(self, closing=False): 70 | with self.sysmu: 71 | if self.closing: 72 | raise FdClosing() 73 | 74 | self.sysref += 1 75 | if closing: 76 | self.closing = True 77 | 78 | def decref(self): 79 | with self.sysmu: 80 | self.sysref -= 1 81 | if self.closing and self.sysref == 0: 82 | self.pd.close() 83 | 84 | # close the socket 85 | self.sock.close() 86 | self.sysfd = -1 87 | 88 | def close(self): 89 | self.pd.lock() 90 | try: 91 | self.incref(True) 92 | self.pd.evict() 93 | finally: 94 | self.pd.unlock() 95 | 96 | self.decref() 97 | 98 | def shutdown(self, how): 99 | self.incref() 100 | 101 | try: 102 | self.sock.shutdown(how) 103 | finally: 104 | self.decref() 105 | 106 | def close_read(self): 107 | self.shutdown(socket.SHUT_RD) 108 | 109 | def close_write(self): 110 | self.shutdown(socket.SHUT_WR) 111 | 112 | def read(self, n): 113 | with self.rio: 114 | self.incref() 115 | try: 116 | self.pd.prepare_read() 117 | while True: 118 | try: 119 | return self.sock.recv(n) 120 | except socket.error as e: 121 | if e.args[0] == errno.EAGAIN: 122 | self.pd.wait_read() 123 | continue 124 | else: 125 | raise 126 | finally: 127 | self.decref() 128 | 129 | def readfrom(self, n, *flags): 130 | with self.rio: 131 | self.incref() 132 | try: 133 | self.pd.prepare_read() 134 | while True: 135 | try: 136 | return self.sock.recvfrom(n, **flags) 137 | except socket.error as e: 138 | if e.args[0] == errno.EAGAIN: 139 | self.pd.wait_read() 140 | continue 141 | else: 142 | raise 143 | finally: 144 | self.decref() 145 | 146 | 147 | if hasattr(socket, 'recvmsg'): 148 | def readmsg(self, p, oob): 149 | with self.rio: 150 | self.incref() 151 | try: 152 | self.pd.prepare_read() 153 | while True: 154 | try: 155 | return self.sock.recvmsg(p, oob, 0) 156 | except socket.error as e: 157 | if e.args[0] == errno.EAGAIN: 158 | self.pd.wait_read() 159 | continue 160 | else: 161 | raise 162 | finally: 163 | self.decref() 164 | 165 | 166 | def write(self, data): 167 | with self.wio: 168 | self.incref() 169 | try: 170 | self.pd.prepare_write() 171 | while True: 172 | try: 173 | return self.sock.send(data) 174 | except socket.error as e: 175 | if e.args[0] == errno.EAGAIN: 176 | self.pd.wait_write() 177 | continue 178 | else: 179 | raise 180 | finally: 181 | self.decref() 182 | 183 | def writeto(self, data, addr): 184 | with self.wio: 185 | self.incref() 186 | try: 187 | self.pd.prepare_write() 188 | while True: 189 | try: 190 | return self.sock.sendto(data, addr) 191 | except socket.error as e: 192 | if e.args[0] == errno.EAGAIN: 193 | self.pd.wait_write() 194 | continue 195 | else: 196 | raise 197 | finally: 198 | self.decref() 199 | 200 | if hasattr(socket, 'sendmsg'): 201 | def writemsg(self, p, oob, addr): 202 | with self.wio: 203 | self.incref() 204 | try: 205 | self.pd.prepare_write() 206 | while True: 207 | try: 208 | return self.sock.sendmsg(p, oob, 0, addr) 209 | except socket.error as e: 210 | if e.args[0] == errno.EAGAIN: 211 | self.pd.wait_write() 212 | continue 213 | else: 214 | raise 215 | finally: 216 | self.decref() 217 | 218 | 219 | def accept(self): 220 | with self.rio: 221 | self.incref() 222 | try: 223 | self.pd.prepare_read() 224 | while True: 225 | try: 226 | fd, addr = accept(self.sock) 227 | except socket.error as e: 228 | if e.args[0] == errno.EAGAIN: 229 | self.pd.wait_read() 230 | continue 231 | elif e.args[0] == errno.ECONNABORTED: 232 | continue 233 | else: 234 | raise 235 | 236 | break 237 | 238 | cls = self.__class__ 239 | obj = cls(fd, self.familly, self.sotype, 240 | self.net) 241 | obj.setaddr(addr) 242 | return obj 243 | finally: 244 | self.decref() 245 | 246 | def dup(self): 247 | syscall.ForkLock.rlock() 248 | try: 249 | fd = _os.dup(self.sock.fileno()) 250 | syscall.closeonexec(fd) 251 | 252 | finally: 253 | syscall.ForkLock.runlock() 254 | 255 | syscall.setnonblock(fd) 256 | return os.File(fd, self.name()) 257 | 258 | 259 | def accept(sock): 260 | conn, addr = sock.accept() 261 | syscall.ForkLock.rlock() 262 | try: 263 | syscall.closeonexec(conn.fileno()) 264 | 265 | finally: 266 | syscall.ForkLock.runlock() 267 | 268 | conn.setblocking(0) 269 | return conn, addr 270 | -------------------------------------------------------------------------------- /offset/net/fd_bsd.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import errno 6 | import sys 7 | 8 | from .util import fd_ 9 | from .. import syscall 10 | from ..syscall import select 11 | 12 | if not hasattr(select, "kqueue"): 13 | raise RuntimeError('kqueue is not supported') 14 | 15 | 16 | class Pollster(object): 17 | 18 | def __init__(self): 19 | self.kq = select.kqueue() 20 | syscall.closeonexec(self.kq.fileno()) 21 | self.events = [] 22 | 23 | def addfd(self, fd, mode, repeat=True): 24 | if mode == 'r': 25 | kmode = select.KQ_FILTER_READ 26 | else: 27 | kmode = select.KQ_FILTER_WRITE 28 | 29 | flags = select.KQ_EV_ADD 30 | 31 | if sys.platform.startswith("darwin"): 32 | flags |= select.KQ_EV_ENABLE 33 | 34 | if not repeat: 35 | flags |= select.KQ_EV_ONESHOT 36 | 37 | ev = select.kevent(fd_(fd), kmode, flags) 38 | self.kq.control([ev], 0) 39 | 40 | def delfd(self, fd, mode): 41 | if mode == 'r': 42 | kmode = select.KQ_FILTER_READ 43 | else: 44 | kmode = select.KQ_FILTER_WRITE 45 | 46 | ev = select.kevent(fd_(fd), select.KQ_FILTER_READ, 47 | select.KQ_EV_DELETE) 48 | self.kq.control([ev], 0) 49 | 50 | def waitfd(self, pollserver, nsec=0): 51 | while len(self.events) == 0: 52 | pollserver.unlock() 53 | try: 54 | events = self.kq.control(None, 0, nsec) 55 | except select.error as e: 56 | if e.args[0] == errno.EINTR: 57 | continue 58 | raise 59 | finally: 60 | pollserver.lock() 61 | 62 | self.events.extend(events) 63 | 64 | ev = self.events.pop(0) 65 | if ev.filter == select.KQ_FILTER_READ: 66 | mode = 'r' 67 | else: 68 | mode = 'w' 69 | 70 | return (fd_(ev.ident), mode) 71 | 72 | def close(self): 73 | self.kq.close() 74 | -------------------------------------------------------------------------------- /offset/net/fd_epoll.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import errno 6 | 7 | from .util import fd_ 8 | from .. import syscall 9 | from ..syscall import select 10 | 11 | 12 | if not hasattr(select, "epoll"): 13 | raise RuntimeError("epoll is not supported") 14 | 15 | 16 | class Pollster(object): 17 | 18 | def __init__(self): 19 | self.poll = select.epoll() 20 | syscall.closeonexec(self.poll.fileno()) 21 | self.fds = {} 22 | self.events = [] 23 | 24 | def addfd(self, fd, mode, repeat=True): 25 | if mode == 'r': 26 | mode = (select.EPOLLIN, repeat) 27 | else: 28 | mode = (select.EPOLLOUT, repeat) 29 | 30 | if fd in self.fds: 31 | modes = self.fds[fd] 32 | if mode in self.fds[fd]: 33 | # already registered for this mode 34 | return 35 | modes.append(mode) 36 | addfd_ = self.poll.modify 37 | else: 38 | modes = [mode] 39 | addfd_ = self.poll.register 40 | 41 | # append the new mode to fds 42 | self.fds[fd] = modes 43 | 44 | mask = 0 45 | for mode, r in modes: 46 | mask |= mode 47 | 48 | if not repeat: 49 | mask |= select.EPOLLONESHOT 50 | 51 | addfd_(fd, mask) 52 | 53 | def delfd(self, fd, mode): 54 | if mode == 'r': 55 | mode = select.POLLIN | select.POLLPRI 56 | else: 57 | mode = select.POLLOUT 58 | 59 | if fd not in self.fds: 60 | return 61 | 62 | modes = [] 63 | for m, r in self.fds[fd]: 64 | if mode != m: 65 | modes.append((m, r)) 66 | 67 | if not modes: 68 | # del the fd from the poll 69 | self.poll.unregister(fd) 70 | del self.fds[fd] 71 | else: 72 | # modify the fd in the poll 73 | self.fds[fd] = modes 74 | m, r = modes[0] 75 | mask = m[0] 76 | if r: 77 | mask |= select.EPOLLONESHOT 78 | 79 | self.poll.modify(fd, mask) 80 | 81 | def waitfd(self, pollserver, nsec=0): 82 | # wait for the events 83 | while len(self.events) == 0: 84 | pollserver.unlock() 85 | try: 86 | events = self.poll.poll(nsec) 87 | except select.error as e: 88 | if e.args[0] == errno.EINTR: 89 | continue 90 | raise 91 | finally: 92 | pollserver.lock() 93 | 94 | self.events.extend(events) 95 | 96 | (fd, ev) = self.events.pop(0) 97 | fd = fd_(fd) 98 | 99 | if ev == select.EPOLLIN: 100 | mode = 'r' 101 | else: 102 | mode = 'w' 103 | 104 | # eventually remove the mode from the list if repeat was set to 105 | # False and modify the poll if needed. 106 | modes = [] 107 | for m, r in self.fds[fd]: 108 | if not r: 109 | continue 110 | modes.append(m, r) 111 | 112 | if modes != self.fds[fd]: 113 | self.fds[fd] = mode 114 | 115 | mask = 0 116 | for m, r in modes: 117 | mask |= m 118 | 119 | self.poll.modify(fd, mask) 120 | 121 | return (fd_(fd), mode) 122 | 123 | def close(self): 124 | self.poll.close() 125 | -------------------------------------------------------------------------------- /offset/net/fd_poll.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from .fd_poll_base import PollerBase 6 | from ..syscall import select 7 | 8 | if hasattr(select, "devpoll"): 9 | # solaris 10 | 11 | class Pollster(PollerBase): 12 | POLL_IMPL = select.devpoll 13 | 14 | elif hasattr(select, "poll"): 15 | # other posix system supporting poll 16 | class Pollster(PollerBase): 17 | POLL_IMPL = select.poll 18 | else: 19 | raise RuntimeError("poll is not supported") 20 | -------------------------------------------------------------------------------- /offset/net/fd_poll_base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import errno 6 | 7 | from .util import fd_ 8 | from .. import syscall 9 | from ..syscall import select 10 | 11 | class PollerBase(object): 12 | 13 | POLL_IMPL = None 14 | 15 | def __init__(self): 16 | self.poll = self.POLL_IMPL() 17 | self.fds = {} 18 | self.events = [] 19 | 20 | def addfd(self, fd, mode, repeat=True): 21 | fd = fd_(fd) 22 | if mode == 'r': 23 | mode = (select.POLLIN, repeat) 24 | else: 25 | mode = (select.POLLOUT, repeat) 26 | 27 | if fd in self.fds: 28 | modes = self.fds[fd] 29 | if mode in modes: 30 | # already registered for this mode 31 | return 32 | modes.append(mode) 33 | addfd_ = self.poll.modify 34 | else: 35 | modes = [mode] 36 | addfd_ = self.poll.register 37 | 38 | # append the new mode to fds 39 | self.fds[fd] = modes 40 | 41 | mask = 0 42 | for mode, r in modes: 43 | mask |= mode 44 | 45 | addfd_(fd, mask) 46 | 47 | def delfd(self, fd, mode): 48 | fd = fd_(fd) 49 | 50 | if mode == 'r': 51 | mode = select.POLLIN | select.POLLPRI 52 | else: 53 | mode = select.POLLOUT 54 | 55 | if fd not in self.fds: 56 | return 57 | 58 | modes = [] 59 | for m, r in self.fds[fd]: 60 | if mode != m: 61 | modes.append((m, r)) 62 | 63 | if not modes: 64 | # del the fd from the poll 65 | self.poll.unregister(fd) 66 | del self.fds[fd] 67 | else: 68 | # modify the fd in the poll 69 | self.fds[fd] = modes 70 | m, r = modes[0] 71 | mask = m[0] 72 | self.poll.modify(fd, mask) 73 | 74 | def waitfd(self, pollserver, nsec=0): 75 | # wait for the events 76 | while len(self.events) == 0: 77 | pollserver.unlock() 78 | try: 79 | events = self.poll.poll(nsec) 80 | except select.error as e: 81 | if e.args[0] == errno.EINTR: 82 | continue 83 | raise 84 | finally: 85 | pollserver.lock() 86 | 87 | self.events.extend(events) 88 | 89 | (fd, ev) = self.events.pop(0) 90 | fd = fd_(fd) 91 | 92 | if fd not in self.fds: 93 | return None, None 94 | 95 | 96 | if ev == select.POLLIN or ev == select.POLLPRI: 97 | mode = 'r' 98 | else: 99 | mode = 'w' 100 | 101 | # eventually remove the mode from the list if repeat was set to 102 | # False and modify the poll if needed. 103 | modes = [] 104 | for m, r in self.fds[fd]: 105 | if not r: 106 | continue 107 | modes.append(m, r) 108 | 109 | if not modes: 110 | self.poll.unregister(fd) 111 | else: 112 | mask = 0 113 | if modes != self.fds[fd]: 114 | mask |= m 115 | self.poll.modify(fd, mask) 116 | 117 | return (fd_(fd), mode) 118 | 119 | def close(self): 120 | for fd in self.fds: 121 | self.poll.unregister(fd) 122 | 123 | self.fds = [] 124 | self.poll = None 125 | -------------------------------------------------------------------------------- /offset/net/fd_pollserver.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import errno 6 | 7 | from .. import os 8 | from ..core import go, makechan 9 | from ..core.util import getmaxthreads 10 | from ..syscall import select 11 | from ..syscall import fexec 12 | from ..sync import Mutex, Once 13 | from ..time import nano 14 | 15 | from .exc import Timeout 16 | from .util import Deadline 17 | 18 | 19 | if hasattr(select, "kqueue"): 20 | from .fd_bsd import Pollster 21 | elif hasattr(select, "epoll"): 22 | from .fd_epoll import Pollster 23 | elif hasattr(select, "poll") or hasattr(select, "devpoll"): 24 | from .fd_poll import Pollster 25 | else: 26 | from .fd_select import Pollster 27 | 28 | 29 | class PollServer(object): 30 | 31 | def __init__(self): 32 | self.m = Mutex() 33 | 34 | self.poll = Pollster() 35 | 36 | self.pr, self.pw = os.pipe() 37 | fexec.setnonblock(self.pr) 38 | fexec.setnonblock(self.pw) 39 | self.poll.addfd(self.pr, 'r') 40 | 41 | self.pending = {} 42 | self.deadline = 0 43 | 44 | go(self.run) 45 | 46 | def lock(self): 47 | self.m.lock() 48 | 49 | def unlock(self): 50 | self.m.unlock() 51 | 52 | def addfd(self, pd, mode): 53 | self.lock() 54 | if pd.sysfd < 0 or pd.closing: 55 | self.unlock() 56 | raise ValueError("fd closing") 57 | 58 | key = pd.sysfd << 1 59 | t = 0 60 | if mode == 'r': 61 | pd.ncr += 1 62 | t = pd.rdeadline.value 63 | else: 64 | pd.ncw += 1 65 | key += 1 66 | t = pd.wdeadline.value 67 | 68 | self.pending[key] = pd 69 | do_wakeup = False 70 | if t > 0 and (self.deadline == 0 or self.deadline < t): 71 | self.deadline = t 72 | do_wakeup = True 73 | 74 | self.poll.addfd(pd.sysfd, mode, False) 75 | self.unlock() 76 | 77 | if do_wakeup: 78 | self.wakeup() 79 | 80 | def evict(self, pd): 81 | pd.closing = True 82 | 83 | try: 84 | if self.pending[pd.sysfd << 1] == pd: 85 | self.wakefd(pd, 'r') 86 | self.poll.delfd(pd.sysfd) 87 | del self.pending[pd.sysfd << 1] 88 | except KeyError: 89 | pass 90 | 91 | try: 92 | if self.pending[pd.sysfd << 1 | 1]: 93 | self.wakefd(pd, 'w') 94 | self.poll.delfd(pd.sysfd, 'w') 95 | del self.pending[pd.sysfd << 1 | 1] 96 | except KeyError: 97 | pass 98 | 99 | def wakeup(self): 100 | self.pw.write(b'.') 101 | 102 | try: 103 | os.write(self.pw, b'.') 104 | except IOError as e: 105 | if e.errno not in [errno.EAGAIN, errno.EINTR]: 106 | raise 107 | 108 | def lookupfd(self, fd, mode): 109 | key = fd << 1 110 | if mode == 'w': 111 | key += 1 112 | 113 | try: 114 | netfd = self.pending.pop(key) 115 | except KeyError: 116 | return None 117 | 118 | return netfd 119 | 120 | def wakefd(self, pd, mode): 121 | if mode == 'r': 122 | while pd.ncr > 0: 123 | pd.ncr -= 1 124 | pd.cr.send(True) 125 | else: 126 | while pd.ncw > 0: 127 | pd.ncw -= 1 128 | pd.cw.send(True) 129 | 130 | def check_deadline(self): 131 | now = nano() 132 | 133 | next_deadline = 0 134 | pending = self.pending.copy() 135 | for key, pd in pending.items(): 136 | if key & 1 == 0: 137 | mode = 'r' 138 | else: 139 | mode = 'w' 140 | 141 | if mode == 'r': 142 | t = pd.rdeadline.value() 143 | else: 144 | t = pd.wdeadline.value() 145 | 146 | if t > 0: 147 | if t <= now: 148 | del self.pending[key] 149 | self.poll.delfd(pd.sysfd, mode) 150 | self.wakefd(pd, mode) 151 | elif next_deadline == 0 or t < next_deadline: 152 | next_deadline = t 153 | 154 | self.deadline = next_deadline 155 | 156 | def run(self): 157 | self.lock() 158 | try: 159 | while True: 160 | timeout = 0.1 161 | if self.deadline > 0: 162 | timeout = self.deadline - nano() 163 | if timeout <= 0: 164 | self.check_deadline() 165 | continue 166 | 167 | fd, mode = self.poll.waitfd(self, timeout) 168 | if fd < 0: 169 | self.check_deadline() 170 | continue 171 | 172 | if fd == self.pr.fileno(): 173 | os.read(self.pr, 1) 174 | self.check_deadline() 175 | 176 | else: 177 | pd = self.lookupfd(fd, mode) 178 | if not pd: 179 | continue 180 | self.wakefd(pd, mode) 181 | finally: 182 | self.unlock() 183 | 184 | 185 | pollservers = {} 186 | startserveronce = Once() 187 | 188 | @startserveronce.do 189 | def startservers(): 190 | global pollservers 191 | 192 | for i in range(getmaxthreads()): 193 | pollservers[i] = PollServer() 194 | 195 | 196 | class PollDesc(object): 197 | 198 | def __init__(self, fd): 199 | 200 | # init pollservers 201 | startservers() 202 | 203 | polln = len(pollservers) 204 | k = fd.sysfd % polln 205 | self.sysfd = fd.sysfd 206 | self.pollserver = pollservers[k] 207 | 208 | self.cr = makechan(1) 209 | self.cw = makechan(1) 210 | self.ncr = 0 211 | self.ncw = 0 212 | self.rdeadline = Deadline() 213 | self.wdeadline = Deadline() 214 | 215 | def close(self): 216 | pass 217 | 218 | def lock(self): 219 | self.pollserver.lock() 220 | 221 | def unlock(self): 222 | self.pollserver.unlock() 223 | 224 | def wakeup(self): 225 | self.pollserver.wakeup() 226 | 227 | def prepare_read(self): 228 | if self.rdeadline.expired(): 229 | raise Timeout 230 | 231 | def prepare_write(self): 232 | if self.wdeadline.expired(): 233 | raise Timeout 234 | 235 | def wait_read(self): 236 | self.pollserver.addfd(self, 'r') 237 | return self.cr.recv() 238 | 239 | def wait_write(self): 240 | self.pollserver.addfd(self, 'w') 241 | return self.cw.recv() 242 | 243 | def evict(self): 244 | return self.pollserver.evict(self) 245 | -------------------------------------------------------------------------------- /offset/net/fd_select.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import errno 6 | 7 | from .util import fd_ 8 | from ..syscall import select 9 | 10 | 11 | class Pollster(object): 12 | 13 | def __init__(self): 14 | self.read_fds = {} 15 | self.write_fds = {} 16 | self.events = [] 17 | 18 | def addfd(self, fd, mode, repeat=True): 19 | fd = fd_(fd) 20 | 21 | if mode == 'r': 22 | self.read_fds[fd] = repeat 23 | else: 24 | self.write_fds[fd] = repeat 25 | 26 | def delfd(self, fd, mode): 27 | if mode == 'r' and fd in self.read_fds: 28 | del self.read_fds[fd] 29 | elif fd in self.write_fds: 30 | del self.write_fds[fd] 31 | 32 | def waitfd(self, pollserver, nsec): 33 | read_fds = [fd for fd in self.read_fds] 34 | write_fds = [fd for fd in self.write_fds] 35 | 36 | while len(self.events) == 0: 37 | pollserver.unlock() 38 | try: 39 | r, w, e = select.select(read_fds, write_fds, [], nsec) 40 | except select.error as e: 41 | if e.args[0] == errno.EINTR: 42 | continue 43 | raise 44 | finally: 45 | pollserver.lock() 46 | 47 | events = [] 48 | for fd in r: 49 | if fd in self.read_fds: 50 | if self.read_fds[fd] == False: 51 | del self.read_fds[fd] 52 | events.append((fd, 'r')) 53 | 54 | for fd in w: 55 | if fd in self.write_fds: 56 | if self.write_fds[fd] == False: 57 | del self.write_fds[fd] 58 | events.append((fd, 'w')) 59 | 60 | self.events.extend(events) 61 | 62 | return self.evens.pop(0) 63 | 64 | def close(self): 65 | self.read_fds = [] 66 | self.write_fds = [] 67 | -------------------------------------------------------------------------------- /offset/net/sock.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | 4 | from ..syscall import socket 5 | 6 | try: 7 | from ..syscall.sysctl import sysctlbyname 8 | from ctypes import c_int 9 | except ImportError: 10 | sysctlbyname = None 11 | 12 | from .fd import NetFd 13 | from . import util 14 | 15 | def maxListenerBacklog(): 16 | if sys.platform.startswith('linux'): 17 | try: 18 | f = open("/proc/sys/net/core/somaxconn") 19 | except OSError: 20 | return socket.SOMAXCONN 21 | 22 | try: 23 | n = int(f.read().split('\n')[0]) 24 | except ValueError: 25 | return socket.SOMAXCONN 26 | 27 | if n > 1<<16-1: 28 | n = 1<<16 - 1 29 | 30 | return n 31 | elif sysctlbyname is not None: 32 | n = 0 33 | if (sys.platform.startswith('darwin') or 34 | sys.platform.startswith('freebsd')): 35 | n = sysctlbyname('kern.ipc.somaxconn', c_int) 36 | elif sys.platform.startswith('openbsd'): 37 | n = sysctlbyname('kern.somaxconn', c_int) 38 | 39 | if n == 0: 40 | return socket.SOMAXCONN 41 | 42 | if n > 1<<16-1: 43 | n = 1<<16-1 44 | 45 | return n 46 | else: 47 | return socket.SOMAXCONN 48 | 49 | # return a bounded socket 50 | def bind_socket(net, addr): 51 | if net == "tcp" or net == "udp": 52 | if util.is_ipv6(addr[0]): 53 | family = socket.AF_INET6 54 | else: 55 | family = socket.AF_INET 56 | else: 57 | # net == "unix" 58 | family = socket.AF_UNIX 59 | 60 | if net == "udp": 61 | sotype = socket.socket.SOCK_DGRAM 62 | else: 63 | # net == "unix" or net == "tcp" 64 | sotype = socket.SOCK_STREAM 65 | 66 | # bind and listen the socket 67 | sock = socket.socket(family, sotype) 68 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 69 | sock.bind(addr) 70 | sock.listen(maxListenerBacklog()) 71 | 72 | # return the NetFd instance 73 | netfd = NetFd(sock, family, sotype, net) 74 | netfd.setaddr(sock.getsockname()) 75 | return netfd 76 | -------------------------------------------------------------------------------- /offset/net/util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from ..sync import Mutex 6 | from ..sync.atomic import AtomicLong 7 | from ..syscall import socket 8 | from ..time import nano 9 | 10 | def fd_(fd): 11 | if hasattr(fd, "fileno"): 12 | return int(fd.fileno()) 13 | return fd 14 | 15 | 16 | class Deadline(object): 17 | 18 | def __init__(self): 19 | self.m = Mutex() 20 | self.val = 0 21 | 22 | def expired(self): 23 | t = self.value() 24 | return t > 0 and nano() >= t 25 | 26 | def value(self): 27 | with self.m: 28 | v = self.val 29 | 30 | return v 31 | 32 | def set(self, v): 33 | with self.m: 34 | self.val = v 35 | 36 | def settime(self, t=None): 37 | self.set(t or nano()) 38 | 39 | 40 | def is_ipv6(addr): 41 | try: 42 | socket.inet_pton(socket.AF_INET6, addr) 43 | except socket.error: # not a valid address 44 | return False 45 | except ValueError: # ipv6 not supported on this platform 46 | return False 47 | return True 48 | -------------------------------------------------------------------------------- /offset/os/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information.# 4 | 5 | import sys 6 | 7 | __all__ = [] 8 | 9 | from .file import File, pipe 10 | 11 | os_mod = sys.modules[__name__] 12 | 13 | 14 | _signal = __import__('signal') 15 | 16 | for name in dir(_signal): 17 | if name[:3] == "SIG" and name[3] != "_": 18 | setattr(os_mod, name, getattr(_signal, name)) 19 | __all__.append(name) 20 | 21 | del _signal 22 | 23 | -------------------------------------------------------------------------------- /offset/os/file.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information.# 4 | 5 | from .. import syscall 6 | from ..syscall import os 7 | 8 | 9 | class File(object): 10 | 11 | def __init__(self, fd, name): 12 | self.fd = fd 13 | self.name = name 14 | 15 | def close(self): 16 | syscall.close(self.fd) 17 | self.fd = -1 18 | 19 | def read(self): 20 | return syscall.read(self.fd) 21 | 22 | 23 | def pipe(): 24 | syscall.ForkLock.rlock() 25 | p = os.pipe() 26 | syscall.closeonexec(p[0]) 27 | syscall.closeonexec(p[1]) 28 | syscall.ForkLock.runlock() 29 | return p 30 | -------------------------------------------------------------------------------- /offset/os/signal.py: -------------------------------------------------------------------------------- 1 | 2 | import weakref 3 | 4 | from ..core import default, go, gosched, select 5 | from ..core.kernel import signal_enable, signal_disable 6 | from ..core.sigqueue import NUMSIG 7 | from ..sync import Mutex 8 | from ..sync.atomic import AtomicLong 9 | from ..syscall import signal 10 | 11 | class Handler(object): 12 | 13 | def __init__(self): 14 | self.mask = set() 15 | 16 | def set(self, sig): 17 | self.mask.add(sig) 18 | 19 | def want(self, sig): 20 | return sig in self.mask 21 | 22 | 23 | class Handlers(object): 24 | 25 | def __init__(self): 26 | self.m = Mutex() 27 | self.handlers = {} 28 | self.ref = {} 29 | 30 | # init signals 31 | for i in range(NUMSIG): 32 | self.ref[i] = 0 33 | 34 | self.signal_recv = AtomicLong(0) 35 | go(self.loop) 36 | 37 | 38 | def notify(self, c, *sigs): 39 | with self.m: 40 | if c not in self.handlers: 41 | h = Handler() 42 | else: 43 | h = self.handlers[c] 44 | 45 | for sig in sigs: 46 | h.set(sig) 47 | if not self.ref[sig]: 48 | signal_enable(sig) 49 | 50 | self.ref[sig] += 1 51 | self.handlers[c] = h 52 | 53 | 54 | def stop(self, c): 55 | with self.m: 56 | if c not in self.handlers: 57 | return 58 | 59 | h = self.handlers.pop(c) 60 | for sig in h.mask: 61 | self.ref[sig] -= 1 62 | if self.ref[sig] == 0: 63 | signal_disable(sig) 64 | 65 | 66 | def loop(self): 67 | while True: 68 | self.process(signal(self.signal_recv)) 69 | 70 | def process(self, sig): 71 | with self.m: 72 | for c, h in self.handlers.items(): 73 | if h.want(sig): 74 | ret = select(c.if_send(sig)) 75 | if ret: 76 | continue 77 | 78 | self.signal_recv.value = 0 79 | 80 | _handlers = Handlers() 81 | notify = _handlers.notify 82 | stop = _handlers.stop 83 | -------------------------------------------------------------------------------- /offset/sync/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from .sema import Semaphore 6 | from .mutex import Mutex 7 | from .once import Once 8 | from .cond import Cond 9 | from .rwmutex import RWMutex 10 | from .waitgroup import WaitGroup 11 | -------------------------------------------------------------------------------- /offset/sync/atomic.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | # copyright (c) 2013 David Reid under the MIT License. 5 | 6 | from cffi import FFI 7 | 8 | from functools import total_ordering 9 | 10 | ffi = FFI() 11 | 12 | ffi.cdef(""" 13 | long long_add_and_fetch(long *, long); 14 | long long_sub_and_fetch(long *, long); 15 | long long_bool_compare_and_swap(long *, long, long); 16 | """) 17 | 18 | lib = ffi.verify(""" 19 | long long_add_and_fetch(long *v, long l) { 20 | return __sync_add_and_fetch(v, l); 21 | }; 22 | 23 | long long_sub_and_fetch(long *v, long l) { 24 | return __sync_sub_and_fetch(v, l); 25 | }; 26 | 27 | long long_bool_compare_and_swap(long *v, long o, long n) { 28 | return __sync_bool_compare_and_swap(v, o, n); 29 | }; 30 | """) 31 | 32 | @total_ordering 33 | class AtomicLong(object): 34 | def __init__(self, initial_value): 35 | self._storage = ffi.new('long *', initial_value) 36 | 37 | def __repr__(self): 38 | return '<{0} at 0x{1:x}: {2!r}>'.format( 39 | self.__class__.__name__, id(self), self.value) 40 | 41 | @property 42 | def value(self): 43 | return self._storage[0] 44 | 45 | @value.setter 46 | def value(self, new): 47 | lib.long_bool_compare_and_swap(self._storage, self.value, new) 48 | 49 | def add(self, delta): 50 | """ atomically adds delta and returns the new value """ 51 | if delta >= 0: 52 | lib.long_add_and_fetch(self._storage, delta) 53 | else: 54 | lib.long_sub_and_fetch(self._storage, abs(delta)) 55 | 56 | return self._storage[0] 57 | 58 | 59 | def __iadd__(self, inc): 60 | lib.long_add_and_fetch(self._storage, inc) 61 | return self 62 | 63 | def __isub__(self, dec): 64 | lib.long_sub_and_fetch(self._storage, dec) 65 | return self 66 | 67 | def __eq__(self, other): 68 | if isinstance(other, AtomicLong): 69 | return self.value == other.value 70 | else: 71 | return self.value == other 72 | 73 | def __ne__(self, other): 74 | return not (self == other) 75 | 76 | def __lt__(self, other): 77 | if isinstance(other, AtomicLong): 78 | return self.value < other.value 79 | else: 80 | return self.value < other 81 | -------------------------------------------------------------------------------- /offset/sync/cond.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from .mutex import Mutex 4 | from .sema import Semaphore 5 | 6 | class Cond(object): 7 | """ Cond implements a condition variable, a rendezvous point for coroutines 8 | waiting for or announcing the occurrence of an event. 9 | 10 | Each Cond has an associated Locker L (often a Mutex or RWMutex), which 11 | must be held when changing the condition and when calling the ``wait`` method. 12 | """ 13 | 14 | 15 | def __init__(self, l): 16 | self.l = l 17 | self.m = Mutex() 18 | 19 | # We must be careful to make sure that when ``signal`` 20 | # releases a semaphore, the corresponding acquire is 21 | # executed by a coroutine that was already waiting at 22 | # the time of the call to ``signal``, not one that arrived later. 23 | # To ensure this, we segment waiting coroutines into 24 | # generations punctuated by calls to ``signal``. Each call to 25 | # ``signal`` begins another generation if there are no coroutines 26 | # left in older generations for it to wake. Because of this 27 | # optimization (only begin another generation if there 28 | # are no older coroutines left), we only need to keep track 29 | # of the two most recent generations, which we call old 30 | # and new. 31 | 32 | self.old_waiters = 0 # number of waiters in old generation... 33 | self.old_sema = Semaphore() # ... waiting on this semaphore 34 | 35 | self.new_waiters = 0 # number of waiters in new generation... 36 | self.new_sema = Semaphore() # ... waiting on this semaphore 37 | 38 | def wait(self): 39 | """``wait`` atomically unlocks cond.l and suspends execution of the calling 40 | coroutine. After later resuming execution, ``wait`` locks cond.l before 41 | returning. Unlike in other systems, ``wait`` cannot return unless awoken by 42 | Broadcast or ``signal``. 43 | 44 | Because cond.l is not locked when ``wait`` first resumes, the caller typically 45 | cannot assume that the condition is true when ``wait`` returns. Instead, 46 | the caller should ``wait`` in a loop:: 47 | 48 | with m: 49 | while True: 50 | if not condition(): 51 | cond.wait() 52 | 53 | # ... handle the condition 54 | 55 | """ 56 | 57 | self.m.lock() 58 | 59 | if self.new_sema is None: 60 | self.new_sema = Semaphore() 61 | 62 | self.new_waiters += 1 63 | self.m.unlock() 64 | self.l.unlock() 65 | self.new_sema.acquire() 66 | self.l.lock() 67 | 68 | def signal(self): 69 | """ ``signal`` wakes one coroutine waiting on cond, if there is any. 70 | 71 | It is allowed but not required for the caller to hold cond.l 72 | during the call. 73 | """ 74 | self.m.lock() 75 | 76 | if self.old_waiters == 0 and self.new_waiters > 0: 77 | self.old_waiters = self.new_waiters 78 | self.old_sema = self.new_sema 79 | self.new_waiters = 0 80 | self.new_sema = None 81 | 82 | if self.old_waiters > 0: 83 | self.old_waiters -= 1 84 | self.old_sema.release() 85 | 86 | self.m.unlock() 87 | 88 | def broadcast(self): 89 | """ Broadcast wakes all coroutines waiting on cond. 90 | 91 | It is allowed but not required for the caller to hold cond.l 92 | during the call. 93 | """ 94 | self.m.lock() 95 | 96 | if self.old_waiters > 0: 97 | for i in range(self.new_waiters): 98 | self.new_sema.release() 99 | self.new_waiters = 0 100 | self.new_sema = None 101 | 102 | self.m.unlock() 103 | -------------------------------------------------------------------------------- /offset/sync/mutex.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from .atomic import ffi, lib 6 | from .sema import Semaphore 7 | 8 | MUTEX_LOCKED = 1 9 | MUTEX_WOKEN = 2 10 | MUTEX_WAITER_SHIFT = 2 11 | 12 | class Locker(object): 13 | 14 | def lock(self): 15 | raise NotImplementedError 16 | 17 | def unlock(self): 18 | raise NotImplementedError 19 | 20 | 21 | class Mutex(Locker): 22 | """ A Mutex is a mutual exclusion lock. """ 23 | 24 | def __init__(self): 25 | self.state = ffi.new('long *', 0) 26 | self.sema = Semaphore(0) 27 | 28 | 29 | def lock(self): 30 | """ locks the coroutine """ 31 | 32 | if lib.long_bool_compare_and_swap(self.state, 0, MUTEX_LOCKED): 33 | return 34 | 35 | awoke = False 36 | while True: 37 | old = self.state[0] 38 | new = old | MUTEX_LOCKED 39 | 40 | if old & MUTEX_LOCKED: 41 | new = old + 1<> MUTEX_WAITER_SHIFT == 0 65 | or old & (MUTEX_LOCKED | MUTEX_WOKEN) != 0): 66 | return 67 | 68 | new = (old - 1 << MUTEX_WAITER_SHIFT) | MUTEX_WOKEN 69 | if lib.long_bool_compare_and_swap(self.state, old, new): 70 | self.sema.release() 71 | return 72 | old = self.state[0] 73 | 74 | def __exit__(self, t, v, tb): 75 | self.unlock() 76 | -------------------------------------------------------------------------------- /offset/sync/once.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import functools 6 | 7 | from .atomic import AtomicLong 8 | from .mutex import Mutex 9 | 10 | class Once(object): 11 | """ Once is an object that will perform exactly one action. """ 12 | 13 | def __init__(self): 14 | self.m = Mutex() 15 | self.done = AtomicLong(0) 16 | 17 | def do(self, func): 18 | """ Do calls the function f if and only if the method is being called for the 19 | 20 | ex:: 21 | 22 | once = Once 23 | 24 | @once.do 25 | def f(): 26 | return 27 | 28 | # or 29 | once.do(f)() 30 | 31 | if once.do(f) is called multiple times, only the first call will invoke 32 | f. 33 | """ 34 | 35 | @functools.wraps(func) 36 | def _wrapper(*args, **kwargs): 37 | if self.done == 1: 38 | return 39 | 40 | with self.m: 41 | if self.done == 0: 42 | func(*args, **kwargs) 43 | self.done.value = 1 44 | 45 | return _wrapper 46 | -------------------------------------------------------------------------------- /offset/sync/rwmutex.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from .atomic import AtomicLong 6 | from .mutex import Locker, Mutex 7 | from .sema import Semaphore 8 | 9 | RWMUTEX_MAX_READERS = 1 << 30 10 | 11 | class RWMutex(object): 12 | """ An RWMutex is a reader/writer mutual exclusion lock. 13 | 14 | The lock can be held by an arbitrary number of readers of a single writer 15 | """ 16 | 17 | def __init__(self): 18 | self.w = Mutex() # held if there are pending writers 19 | self.writer_sem = Semaphore() # semaphore to wait for completing readers 20 | self.reader_sem = Semaphore() #semaphore to wait for complering writers 21 | self.reader_count = AtomicLong(0) # number of pending readers 22 | self.reader_wait = AtomicLong(0) # number of departing readers 23 | 24 | def rlock(self): 25 | """ lock reading 26 | 27 | """ 28 | if self.reader_count.add(1) < 0: 29 | # a writer is pending, wait for it 30 | self.reader_sem.acquire() 31 | 32 | def runlock(self): 33 | """ unlock reading 34 | 35 | it does not affect other simultaneous readers. 36 | """ 37 | if self.reader_count.add(-1) < 0: 38 | # a writer is pending 39 | if self.reader_wait.add(-1) == 0: 40 | # the last reader unblock the writer 41 | self.writer_sem.release() 42 | 43 | def lock(self): 44 | """ lock for writing 45 | 46 | If the lock is already locked for reading or writing, it blocks until 47 | the lock is available. To ensure that the lock eventually becomes 48 | available, a blocked lock call excludes new readers from acquiring. 49 | """ 50 | self.w.lock() 51 | 52 | r = self.reader_count.add(-RWMUTEX_MAX_READERS) + RWMUTEX_MAX_READERS 53 | if r != 0 and self.reader_wait.add(r) != 0: 54 | self.writer_sem.acquire() 55 | 56 | def unlock(self): 57 | """ unlock writing 58 | 59 | As with Mutexes, a locked RWMutex is not associated with a particular 60 | coroutine. One coroutine may rLock (lock) an RWMutex and then arrange 61 | for another goroutine to rUnlock (unlock) it. 62 | """ 63 | r = self.reader_count.add(RWMUTEX_MAX_READERS) 64 | for i in range(r): 65 | self.reader_sem.release() 66 | 67 | self.w.unlock() 68 | 69 | def RLocker(self): 70 | return RLocker(self) 71 | 72 | class RLocker(Locker): 73 | """ RLocker returns a Locker instance that implements the lock and unnlock 74 | methods of RWMutex. """ 75 | 76 | def __init__(self, rw): 77 | self.rw = rw 78 | 79 | def lock(self): 80 | return self.rw.lock() 81 | 82 | __enter__ = lock 83 | 84 | def unlock(self): 85 | return self.rw.unlock() 86 | 87 | def __exit__(self, t, v, tb): 88 | self.unlock() 89 | 90 | -------------------------------------------------------------------------------- /offset/sync/sema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from collections import deque 6 | 7 | from .atomic import AtomicLong 8 | from ..core.context import park 9 | from ..core import proc 10 | 11 | 12 | class Semaphore(object): 13 | """ Semaphore implementation exposed to offset 14 | 15 | Intended use is provide a sleep and wakeup primitive that can be used in the 16 | contended case of other synchronization primitives. 17 | 18 | Thus it targets the same goal as Linux's futex, but it has much simpler 19 | semantics. 20 | 21 | That is, don't think of these as semaphores. Think of them as a way to 22 | implement sleep and wakeup such that every sleep is paired with a single 23 | wakeup, even if, due to races, the wakeup happens before the sleep. 24 | 25 | See Mullender and Cox, ``Semaphores in Plan 9,'' 26 | http://swtch.com/semaphore.pdf 27 | 28 | Comment and code based on the Go code: 29 | http://golang.org/src/pkg/runtime/sema.goc 30 | """ 31 | 32 | def __init__(self, value=0): 33 | self.sema = AtomicLong(value) 34 | self.nwait = AtomicLong(1) 35 | self.waiters = deque() 36 | 37 | def can_acquire(self): 38 | if self.sema > 0: 39 | self.sema -= 1 40 | return True 41 | return False 42 | 43 | def acquire(self): 44 | if self.can_acquire(): 45 | return 46 | 47 | t0 = 0 48 | releasetime = 0 49 | 50 | while True: 51 | self.nwait += 1 52 | self.waiters.append(proc.current()) 53 | 54 | if self.can_acquire(): 55 | self.nwait -= 1 56 | self.waiters.remove(proc.current()) 57 | return 58 | 59 | park() 60 | 61 | __enter__ = acquire 62 | 63 | def release(self): 64 | self.sema += 1 65 | 66 | if self.nwait == 0: 67 | return 68 | 69 | try: 70 | waiter = self.waiters.pop() 71 | except IndexError: 72 | return 73 | 74 | self.nwait -= 1 75 | waiter.ready() 76 | 77 | def __exit__(self, t, v, tb): 78 | return self.release() 79 | -------------------------------------------------------------------------------- /offset/sync/waitgroup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from ..core import PanicError 6 | 7 | from .atomic import AtomicLong 8 | from .mutex import Mutex 9 | from .sema import Semaphore 10 | 11 | 12 | class WaitGroup(object): 13 | """ A WaitGroup waits for a collection of goroutines to finish. 14 | The main goroutine calls ``add`` to set the number of goroutines to wait for. 15 | Then each of the goroutines runs and calls Done when finished. At the same 16 | time, ``wait`` can be used to block until all goroutines have finished. 17 | """ 18 | 19 | def __init__(self): 20 | self.m = Mutex() 21 | self.counter = AtomicLong(0) 22 | self.waiters = AtomicLong(0) 23 | self.sema = Semaphore() 24 | 25 | def add(self, delta): 26 | """ Add adds delta, which may be negative, to the WaitGroup counter. If 27 | the counter becomes zero, all goroutines blocked on Wait are released. 28 | If the counter goes negative, raise an error. 29 | 30 | Note that calls with positive delta must happen before the call to 31 | ``wait``, or else ``wait`` may wait for too small a group. Typically 32 | this means the calls to add should execute before the statement creating 33 | the goroutine or other event to be waited for. See the WaitGroup example. 34 | """ 35 | v = self.counter.add(delta) 36 | if v < 0: 37 | raise PanicError("sync: negative waitgroup counter") 38 | 39 | if v > 0 or self.waiters == 0: 40 | return 41 | 42 | with self.m: 43 | for i in range(self.waiters.value): 44 | self.sema.release() 45 | self.waiters = 0 46 | self.sema = None 47 | 48 | def done(self): 49 | """ decrement the WaitGroup counter """ 50 | self.add(-1) 51 | 52 | def wait(self): 53 | """ blocks until the WaitGroup counter is zero. """ 54 | if self.counter == 0: 55 | return 56 | 57 | self.m.lock() 58 | self.waiters += 1 59 | if self.counter == 0: 60 | self.waiters -= 1 61 | self.m.unlock() 62 | return 63 | 64 | if self.sema is None: 65 | self.sema = Semaphore() 66 | 67 | self.m.unlock() 68 | self.sema.acquire() 69 | -------------------------------------------------------------------------------- /offset/syscall/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | import sys 7 | 8 | from .fexec import ForkLock, closeonexec, setnonblock 9 | from . import proxy 10 | from ..core.kernel import signal_recv 11 | 12 | # patch the os module 13 | os = proxy.OsProxy() 14 | sys.modules['offset.syscall.os'] = os 15 | 16 | # patch the select module 17 | select = proxy.SelectProxy() 18 | sys.modules['offset.syscall.select'] = select 19 | 20 | # patch the socket module 21 | socket = proxy.SocketProxy() 22 | sys.modules['offset.syscall.socket'] = socket 23 | 24 | def signal(s): 25 | return signal_recv(s) 26 | -------------------------------------------------------------------------------- /offset/syscall/_socketio.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | """ 6 | socketio taken from the python3 stdlib 7 | """ 8 | 9 | import io 10 | import sys 11 | from errno import EINTR, EAGAIN, EWOULDBLOCK 12 | 13 | _socket = __import__('socket') 14 | _blocking_errnos = EAGAIN, EWOULDBLOCK 15 | 16 | 17 | # python2.6 fixes 18 | 19 | def _recv_into_sock_py26(sock, buf): 20 | data = sock.recv(len(buf)) 21 | l = len(data) 22 | buf[:l] = data 23 | return l 24 | 25 | 26 | if sys.version_info < (2, 7, 0, 'final'): 27 | _recv_into_sock = _recv_into_sock_py26 28 | else: 29 | _recv_into_sock = lambda sock, buf: sock.recv_into(buf) 30 | 31 | 32 | class SocketIO(io.RawIOBase): 33 | 34 | """Raw I/O implementation for stream sockets. 35 | 36 | This class supports the makefile() method on sockets. It provides 37 | the raw I/O interface on top of a socket object. 38 | """ 39 | 40 | # One might wonder why not let FileIO do the job instead. There are two 41 | # main reasons why FileIO is not adapted: 42 | # - it wouldn't work under Windows (where you can't used read() and 43 | # write() on a socket handle) 44 | # - it wouldn't work with socket timeouts (FileIO would ignore the 45 | # timeout and consider the socket non-blocking) 46 | 47 | # XXX More docs 48 | 49 | def __init__(self, sock, mode): 50 | if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): 51 | raise ValueError("invalid mode: %r" % mode) 52 | io.RawIOBase.__init__(self) 53 | self._sock = sock 54 | if "b" not in mode: 55 | mode += "b" 56 | self._mode = mode 57 | self._reading = "r" in mode 58 | self._writing = "w" in mode 59 | self._timeout_occurred = False 60 | 61 | def readinto(self, b): 62 | """Read up to len(b) bytes into the writable buffer *b* and return 63 | the number of bytes read. If the socket is non-blocking and no bytes 64 | are available, None is returned. 65 | 66 | If *b* is non-empty, a 0 return value indicates that the connection 67 | was shutdown at the other end. 68 | """ 69 | self._checkClosed() 70 | self._checkReadable() 71 | if self._timeout_occurred: 72 | raise IOError("cannot read from timed out object") 73 | while True: 74 | try: 75 | return _recv_into_sock(self._sock, b) 76 | except _socket.timeout: 77 | self._timeout_occurred = True 78 | raise 79 | except _socket.error as e: 80 | n = e.args[0] 81 | if n == EINTR: 82 | continue 83 | if n in _blocking_errnos: 84 | return None 85 | raise 86 | 87 | def write(self, b): 88 | """Write the given bytes or bytearray object *b* to the socket 89 | and return the number of bytes written. This can be less than 90 | len(b) if not all data could be written. If the socket is 91 | non-blocking and no bytes could be written None is returned. 92 | """ 93 | self._checkClosed() 94 | self._checkWritable() 95 | try: 96 | return self._sock.send(b) 97 | except _socket.error as e: 98 | # XXX what about EINTR? 99 | if e.args[0] in _blocking_errnos: 100 | return None 101 | raise 102 | 103 | def readable(self): 104 | """True if the SocketIO is open for reading. 105 | """ 106 | return self._reading and not self.closed 107 | 108 | def writable(self): 109 | """True if the SocketIO is open for writing. 110 | """ 111 | return self._writing and not self.closed 112 | 113 | def fileno(self): 114 | """Return the file descriptor of the underlying socket. 115 | """ 116 | self._checkClosed() 117 | return self._sock.fileno() 118 | 119 | @property 120 | def name(self): 121 | if not self.closed: 122 | return self.fileno() 123 | else: 124 | return -1 125 | 126 | @property 127 | def mode(self): 128 | return self._mode 129 | 130 | def close(self): 131 | """Close the SocketIO object. This doesn't close the underlying 132 | socket, except if all references to it have disappeared. 133 | """ 134 | if self.closed: 135 | return 136 | io.RawIOBase.close(self) 137 | self._sock._decref_socketios() 138 | self._sock = None 139 | 140 | def _checkClosed(self, msg=None): 141 | """Internal: raise an ValueError if file is closed 142 | """ 143 | if self.closed: 144 | raise ValueError("I/O operation on closed file." 145 | if msg is None else msg) 146 | -------------------------------------------------------------------------------- /offset/syscall/fexec.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import fcntl 6 | import os 7 | 8 | from ..sync import RWMutex 9 | 10 | ForkLock = RWMutex() 11 | 12 | def closeonexec(fd): 13 | flags = fcntl.fcntl(fd, fcntl.F_GETFD) 14 | flags |= fcntl.FD_CLOEXEC 15 | fcntl.fcntl(fd, fcntl.F_SETFD, flags) 16 | 17 | 18 | def setnonblock(fd, nonblocking=True): 19 | flags = fcntl.fcntl(fd, fcntl.F_GETFL) 20 | if nonblocking: 21 | flags |= os.O_NONBLOCK 22 | else: 23 | flags &= ~os.O_NONBLOCK 24 | fcntl.fcntl(fd, fcntl.F_SETFL, flags) 25 | -------------------------------------------------------------------------------- /offset/syscall/proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | 6 | __os_mod__ = __import__("os") 7 | __select_mod__ = __import__("select") 8 | __socket_mod__ = __import__("socket") 9 | _socket = __import__("socket") 10 | 11 | import io 12 | import wrapt 13 | from ..core import syscall, enter_syscall 14 | 15 | __all__ = ['OsProxy', 'SelectProxy'] 16 | 17 | 18 | # proxy the OS module 19 | 20 | class OsProxy(wrapt.ObjectProxy): 21 | """ proxy the os module """ 22 | 23 | _OS_SYSCALLS = ("chown", "fchown", "close", "dup", "dup2", "read", 24 | "pread","write", "pwrite", "sendfile", "readv", "writev", "stat", 25 | "lstat", "truncate", "sync", "lseek", "open", "posix_fallocate", 26 | "posix_fadvise", "chmod", "chflags", ) 27 | 28 | def __init__(self): 29 | super(OsProxy, self).__init__(__os_mod__) 30 | 31 | def __getattr__(self, name): 32 | # wrap syscalls 33 | if name in self._OS_SYSCALLS: 34 | return syscall(getattr(self.__wrapped__, name)) 35 | return getattr(self.__wrapped__, name) 36 | 37 | 38 | if hasattr(_socket, "SocketIO"): 39 | SocketIO = _socket.SocketIO 40 | else: 41 | from _socketio import SocketIO 42 | 43 | class socket(object): 44 | """A subclass of _socket.socket wrapping the makefile() method and 45 | patching blocking calls. """ 46 | 47 | __slots__ = ('_io_refs', '_sock', '_closed', ) 48 | 49 | _BL_SYSCALLS = ('accept', 'getpeername', 'getsockname', 50 | 'getsockopt', 'ioctl', 'recv', 'recvfrom', 'recvmsg', 51 | 'recvmsg_into', 'recvfrom_into', 'recv_into', 'send', 52 | 'sendall', 'sendto', 'sendmsg', ) 53 | 54 | def __init__(self, family=_socket.AF_INET, type=_socket.SOCK_STREAM, 55 | proto=0, fileno=None): 56 | 57 | if fileno is not None: 58 | if hasattr(_socket.socket, 'detach'): 59 | self._sock = _socket.socket(family, type, proto, fileno) 60 | else: 61 | self._sock = _socket.fromfd(fileno, family, type, proto) 62 | else: 63 | self._sock = _socket.socket(family, type, proto) 64 | 65 | self._io_refs = 0 66 | self._closed = False 67 | 68 | def __enter__(self): 69 | return self 70 | 71 | def __exit__(self, *args): 72 | if not self._closed: 73 | self.close() 74 | 75 | def __getattr__(self, name): 76 | # wrap syscalls 77 | if name in self._BL_SYSCALLS: 78 | return syscall(getattr(self._sock, name)) 79 | 80 | return getattr(self._sock, name) 81 | 82 | 83 | def makefile(self, mode="r", buffering=None, encoding=None, 84 | errors=None, newline=None): 85 | """makefile(...) -> an I/O stream connected to the socket 86 | 87 | The arguments are as for io.open() after the filename, 88 | except the only mode characters supported are 'r', 'w' and 'b'. 89 | The semantics are similar too. (XXX refactor to share code?) 90 | """ 91 | for c in mode: 92 | if c not in {"r", "w", "b"}: 93 | raise ValueError("invalid mode %r (only r, w, b allowed)") 94 | writing = "w" in mode 95 | reading = "r" in mode or not writing 96 | assert reading or writing 97 | binary = "b" in mode 98 | rawmode = "" 99 | if reading: 100 | rawmode += "r" 101 | if writing: 102 | rawmode += "w" 103 | raw = SocketIO(self, rawmode) 104 | self._io_refs += 1 105 | if buffering is None: 106 | buffering = -1 107 | if buffering < 0: 108 | buffering = io.DEFAULT_BUFFER_SIZE 109 | if buffering == 0: 110 | if not binary: 111 | raise ValueError("unbuffered streams must be binary") 112 | return raw 113 | if reading and writing: 114 | buffer = io.BufferedRWPair(raw, raw, buffering) 115 | elif reading: 116 | buffer = io.BufferedReader(raw, buffering) 117 | else: 118 | assert writing 119 | buffer = io.BufferedWriter(raw, buffering) 120 | if binary: 121 | return buffer 122 | text = io.TextIOWrapper(buffer, encoding, errors, newline) 123 | text.mode = mode 124 | return text 125 | 126 | def _decref_socketios(self): 127 | if self._io_refs > 0: 128 | self._io_refs -= 1 129 | if self._closed: 130 | self._sock.close() 131 | 132 | def close(self): 133 | self._closed = True 134 | if self._io_refs <= 0: 135 | """ 136 | # socket shutdown 137 | try: 138 | self._sock.shutdown(_socket.SHUT_RDWR) 139 | except: 140 | pass 141 | """ 142 | 143 | self._sock.close() 144 | 145 | def detach(self): 146 | self._closed = True 147 | if hasattr(self._sock, 'detach'): 148 | return self._sock.detach() 149 | 150 | new_fd = os.dup(self._sock.fileno()) 151 | self._sock.close() 152 | 153 | # python 2.7 has no detach method, fake it 154 | return new_fd 155 | 156 | 157 | class SocketProxy(wrapt.ObjectProxy): 158 | 159 | def __init__(self): 160 | super(SocketProxy, self).__init__(__socket_mod__) 161 | 162 | def socket(self, *args, **kwargs): 163 | return socket(*args, **kwargs) 164 | 165 | 166 | def fromfd(self, fd, family, type, proto=0): 167 | return socket(family, type, fileno=fd) 168 | 169 | if hasattr(socket, "share"): 170 | def fromshare(self, info): 171 | return socket(0, 0, 0, info) 172 | 173 | if hasattr(_socket, "socketpair"): 174 | def socketpair(self, family=None, type=__socket_mod__.SOCK_STREAM, 175 | proto=0): 176 | 177 | if family is None: 178 | try: 179 | family = self.__wrapped__.AF_UNIX 180 | except NameError: 181 | family = self.__wrapped__.AF_INET 182 | a, b = self.__wrapped__.socketpair(family, type, proto) 183 | 184 | if hasattr(a, 'detach'): 185 | a = socket(family, type, proto, a.detach()) 186 | b = socket(family, type, proto, b.detach()) 187 | else: 188 | a = socket(family, type, proto, a.fileno()) 189 | b = socket(family, type, proto, b.fileno()) 190 | 191 | return a, b 192 | 193 | 194 | # proxy the socket proxy 195 | 196 | 197 | class _Poll(object): 198 | 199 | def register(self, *args): 200 | return self.p.register(*args) 201 | 202 | def modify(self, *args): 203 | return self.p.modify(*args) 204 | 205 | def unregister(self, *args): 206 | return self.p.unregister(*args) 207 | 208 | def poll(self, *args, **kwargs): 209 | return enter_syscall(self.p.poll, *args) 210 | 211 | 212 | if hasattr(__select_mod__, "devpoll"): 213 | 214 | class devpoll(_Poll): 215 | 216 | def __init__(self): 217 | self.p = __select_mod__.devpoll() 218 | 219 | if hasattr(__select_mod__, "epoll"): 220 | 221 | class epoll(_Poll): 222 | 223 | def __init__(self): 224 | self.p = __select_mod__.epoll() 225 | 226 | def close(self): 227 | return self.p.close() 228 | 229 | def fileno(self): 230 | return self.p.fileno() 231 | 232 | def fromfd(self, fd): 233 | return self.p.fromfd(fd) 234 | 235 | if hasattr(__select_mod__, "poll"): 236 | 237 | class poll(_Poll): 238 | 239 | def __init__(self): 240 | self.p = __select_mod__.poll() 241 | 242 | if hasattr(__select_mod__, "kqueue"): 243 | 244 | class kqueue(object): 245 | 246 | def __init__(self): 247 | self.kq = __select_mod__.kqueue() 248 | 249 | def fileno(self): 250 | return self.kq.fileno() 251 | 252 | def fromfd(self, fd): 253 | return self.kq.fromfd(fd) 254 | 255 | def close(self): 256 | return self.kq.close() 257 | 258 | def control(self, *args, **kwargs): 259 | return enter_syscall(self.kq.control, *args, **kwargs) 260 | 261 | 262 | 263 | class SelectProxy(wrapt.ObjectProxy): 264 | 265 | def __init__(self): 266 | super(SelectProxy, self).__init__(__select_mod__) 267 | 268 | if hasattr(__select_mod__, "devpoll"): 269 | def devpoll(self): 270 | return devpoll() 271 | 272 | if hasattr(__select_mod__, "epoll"): 273 | def epoll(self): 274 | return epoll() 275 | 276 | if hasattr(__select_mod__, "poll"): 277 | def poll(self): 278 | return poll() 279 | 280 | if hasattr(__select_mod__, "kqueue"): 281 | def kqueue(self): 282 | return kqueue() 283 | 284 | def select(self, *args, **kwargs): 285 | return enter_syscall(self.__wrapped__.select, *args, **kwargs) 286 | -------------------------------------------------------------------------------- /offset/syscall/sysctl.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from ctypes import * 3 | from ctypes.util import find_library 4 | 5 | libc = cdll.LoadLibrary(find_library("c")) 6 | 7 | def sysctl(mib_t, c_type=None): 8 | mib = (c_int * len(mib_t))() 9 | for i, v in enumerate(mib_t): 10 | mib[i] = c_int(v) 11 | if c_type == None: 12 | size = c_size_t(0) 13 | libc.sysctl(mib, len(mib), None, byref(sz), None, 0) 14 | buf = create_string_buffer(size.value) 15 | else: 16 | buf = c_type() 17 | size = c_size_t(sizeof(buf)) 18 | size = libc.sysctl(mib, len(mib), byref(buf), byref(size), None, 0) 19 | if st != 0: 20 | raise OSError('sysctl() returned with error %d' % st) 21 | try: 22 | return buf.value 23 | except AttributeError: 24 | return buf 25 | 26 | def sysctlbyname(name, c_type=None): 27 | if c_type == None: 28 | size = c_size_t(0) 29 | libc.sysctlbyname(name, None, byref(sz), None, 0) 30 | buf = create_string_buffer(size.value) 31 | else: 32 | buf = c_type() 33 | size = c_size_t(sizeof(buf)) 34 | st = libc.sysctlbyname(name, byref(buf), byref(size), None, 0) 35 | if st != 0: 36 | raise OSError('sysctlbyname() returned with error %d' % st) 37 | try: 38 | return buf.value 39 | except AttributeError: 40 | return buf 41 | -------------------------------------------------------------------------------- /offset/time.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from .core.util import nanotime, from_nanotime 6 | from .core import timer 7 | from .core.chan import makechan, select 8 | 9 | 10 | NANOSECOND = 1 11 | MICROSECOND = 1000 * NANOSECOND 12 | MILLISECOND = 1000 * MICROSECOND 13 | SECOND = 1000 * MILLISECOND 14 | MINUTE = 60 * SECOND 15 | HOUR = 60 * MINUTE 16 | 17 | nano = nanotime 18 | sleep = timer.sleep 19 | 20 | def _sendtime(now, t, c): 21 | select(c.if_send(from_nanotime(now))) 22 | 23 | class Timer(object): 24 | """ The Timer instance represents a single event. 25 | When the timer expires, the current time will be sent on c """ 26 | 27 | def __init__(self, interval): 28 | self.c = makechan(1) 29 | self.t = timer.Timer(_sendtime, interval, args=(self.c,)) 30 | self.t.start() 31 | 32 | def reset(self, interval): 33 | """ reset the timer interval """ 34 | w = nanotime() + interval 35 | self.t.stop() 36 | self.t.when = w 37 | self.t.start() 38 | 39 | def stop(self): 40 | self.t.stop() 41 | self.c.close() 42 | 43 | 44 | def After(interval): 45 | """ After waits for the duration to elapse and then sends the current time 46 | on the returned channel. 47 | It is equivalent to Timer(interval).c 48 | """ 49 | 50 | return Timer(interval).c 51 | 52 | def AfterFunc(interval, func, args=None, kwargs=None): 53 | """ AfterFunc waits for the duration to elapse and then calls f in its own 54 | goroutine. It returns a Timer that can be used to cancel the call using its 55 | Stop method. """ 56 | 57 | t = timer.Timer(func, interval, args=args, kwargs=kwargs) 58 | t.start() 59 | return t 60 | 61 | 62 | class Ticker(object): 63 | """ returns a new Ticker containing a channel that will send the 64 | time with a period specified by the duration argument. 65 | 66 | It adjusts the intervals or drops ticks to make up for slow receivers. 67 | The duration d must be greater than zero. 68 | """ 69 | 70 | def __init__(self, interval): 71 | if interval < 0: 72 | raise ValueError("non-positive interval") 73 | 74 | self.c = makechan(1) 75 | 76 | # set the runtime timer 77 | self.t = timer.Timer(_sendtime, interval, interval, args=(self.c,)) 78 | self.t.start() 79 | 80 | def stop(self): 81 | self.c.close() 82 | self.t.stop() 83 | 84 | 85 | def Tick(interval): 86 | """ Tick is a convenience wrapper for Ticker providing access 87 | to the ticking channel. Useful for clients that no need to shutdown 88 | the ticker """ 89 | 90 | if interval <= 0: 91 | return 92 | 93 | return Ticker(interval).c 94 | -------------------------------------------------------------------------------- /offset/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benoitc/offset/b8561635a4cb44a9f47d086163f4d0b58bb8fd74/offset/util/__init__.py -------------------------------------------------------------------------------- /offset/util/six.py: -------------------------------------------------------------------------------- 1 | """Utilities for writing code that runs on Python 2 and 3""" 2 | 3 | # Copyright (c) 2010-2013 Benjamin Peterson 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in all 13 | # copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | import operator 24 | import sys 25 | import types 26 | 27 | __author__ = "Benjamin Peterson " 28 | __version__ = "1.4.1" 29 | 30 | 31 | # Useful for very coarse version differentiation. 32 | PY2 = sys.version_info[0] == 2 33 | PY3 = sys.version_info[0] == 3 34 | 35 | if PY3: 36 | string_types = str, 37 | integer_types = int, 38 | class_types = type, 39 | text_type = str 40 | binary_type = bytes 41 | 42 | MAXSIZE = sys.maxsize 43 | else: 44 | string_types = basestring, 45 | integer_types = (int, long) 46 | class_types = (type, types.ClassType) 47 | text_type = unicode 48 | binary_type = str 49 | 50 | if sys.platform.startswith("java"): 51 | # Jython always uses 32 bits. 52 | MAXSIZE = int((1 << 31) - 1) 53 | else: 54 | # It's possible to have sizeof(long) != sizeof(Py_ssize_t). 55 | class X(object): 56 | def __len__(self): 57 | return 1 << 31 58 | try: 59 | len(X()) 60 | except OverflowError: 61 | # 32-bit 62 | MAXSIZE = int((1 << 31) - 1) 63 | else: 64 | # 64-bit 65 | MAXSIZE = int((1 << 63) - 1) 66 | del X 67 | 68 | 69 | def _add_doc(func, doc): 70 | """Add documentation to a function.""" 71 | func.__doc__ = doc 72 | 73 | 74 | def _import_module(name): 75 | """Import module, returning the module after the last dot.""" 76 | __import__(name) 77 | return sys.modules[name] 78 | 79 | 80 | class _LazyDescr(object): 81 | 82 | def __init__(self, name): 83 | self.name = name 84 | 85 | def __get__(self, obj, tp): 86 | result = self._resolve() 87 | setattr(obj, self.name, result) 88 | # This is a bit ugly, but it avoids running this again. 89 | delattr(tp, self.name) 90 | return result 91 | 92 | 93 | class MovedModule(_LazyDescr): 94 | 95 | def __init__(self, name, old, new=None): 96 | super(MovedModule, self).__init__(name) 97 | if PY3: 98 | if new is None: 99 | new = name 100 | self.mod = new 101 | else: 102 | self.mod = old 103 | 104 | def _resolve(self): 105 | return _import_module(self.mod) 106 | 107 | 108 | class MovedAttribute(_LazyDescr): 109 | 110 | def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): 111 | super(MovedAttribute, self).__init__(name) 112 | if PY3: 113 | if new_mod is None: 114 | new_mod = name 115 | self.mod = new_mod 116 | if new_attr is None: 117 | if old_attr is None: 118 | new_attr = name 119 | else: 120 | new_attr = old_attr 121 | self.attr = new_attr 122 | else: 123 | self.mod = old_mod 124 | if old_attr is None: 125 | old_attr = name 126 | self.attr = old_attr 127 | 128 | def _resolve(self): 129 | module = _import_module(self.mod) 130 | return getattr(module, self.attr) 131 | 132 | 133 | 134 | class _MovedItems(types.ModuleType): 135 | """Lazy loading of moved objects""" 136 | 137 | 138 | _moved_attributes = [ 139 | MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), 140 | MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), 141 | MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), 142 | MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), 143 | MovedAttribute("map", "itertools", "builtins", "imap", "map"), 144 | MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), 145 | MovedAttribute("reload_module", "__builtin__", "imp", "reload"), 146 | MovedAttribute("reduce", "__builtin__", "functools"), 147 | MovedAttribute("StringIO", "StringIO", "io"), 148 | MovedAttribute("UserString", "UserString", "collections"), 149 | MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), 150 | MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), 151 | MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), 152 | 153 | MovedModule("builtins", "__builtin__"), 154 | MovedModule("configparser", "ConfigParser"), 155 | MovedModule("copyreg", "copy_reg"), 156 | MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), 157 | MovedModule("http_cookies", "Cookie", "http.cookies"), 158 | MovedModule("html_entities", "htmlentitydefs", "html.entities"), 159 | MovedModule("html_parser", "HTMLParser", "html.parser"), 160 | MovedModule("http_client", "httplib", "http.client"), 161 | MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), 162 | MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), 163 | MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), 164 | MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), 165 | MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), 166 | MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), 167 | MovedModule("cPickle", "cPickle", "pickle"), 168 | MovedModule("queue", "Queue"), 169 | MovedModule("reprlib", "repr"), 170 | MovedModule("socketserver", "SocketServer"), 171 | MovedModule("tkinter", "Tkinter"), 172 | MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), 173 | MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), 174 | MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), 175 | MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), 176 | MovedModule("tkinter_tix", "Tix", "tkinter.tix"), 177 | MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), 178 | MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), 179 | MovedModule("tkinter_colorchooser", "tkColorChooser", 180 | "tkinter.colorchooser"), 181 | MovedModule("tkinter_commondialog", "tkCommonDialog", 182 | "tkinter.commondialog"), 183 | MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), 184 | MovedModule("tkinter_font", "tkFont", "tkinter.font"), 185 | MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), 186 | MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", 187 | "tkinter.simpledialog"), 188 | MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), 189 | MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), 190 | MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), 191 | MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), 192 | MovedModule("winreg", "_winreg"), 193 | ] 194 | for attr in _moved_attributes: 195 | setattr(_MovedItems, attr.name, attr) 196 | del attr 197 | 198 | moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves") 199 | 200 | 201 | 202 | class Module_six_moves_urllib_parse(types.ModuleType): 203 | """Lazy loading of moved objects in six.moves.urllib_parse""" 204 | 205 | 206 | _urllib_parse_moved_attributes = [ 207 | MovedAttribute("ParseResult", "urlparse", "urllib.parse"), 208 | MovedAttribute("parse_qs", "urlparse", "urllib.parse"), 209 | MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), 210 | MovedAttribute("urldefrag", "urlparse", "urllib.parse"), 211 | MovedAttribute("urljoin", "urlparse", "urllib.parse"), 212 | MovedAttribute("urlparse", "urlparse", "urllib.parse"), 213 | MovedAttribute("urlsplit", "urlparse", "urllib.parse"), 214 | MovedAttribute("urlunparse", "urlparse", "urllib.parse"), 215 | MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), 216 | MovedAttribute("quote", "urllib", "urllib.parse"), 217 | MovedAttribute("quote_plus", "urllib", "urllib.parse"), 218 | MovedAttribute("unquote", "urllib", "urllib.parse"), 219 | MovedAttribute("unquote_plus", "urllib", "urllib.parse"), 220 | MovedAttribute("urlencode", "urllib", "urllib.parse"), 221 | ] 222 | for attr in _urllib_parse_moved_attributes: 223 | setattr(Module_six_moves_urllib_parse, attr.name, attr) 224 | del attr 225 | 226 | sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse") 227 | sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse") 228 | 229 | 230 | class Module_six_moves_urllib_error(types.ModuleType): 231 | """Lazy loading of moved objects in six.moves.urllib_error""" 232 | 233 | 234 | _urllib_error_moved_attributes = [ 235 | MovedAttribute("URLError", "urllib2", "urllib.error"), 236 | MovedAttribute("HTTPError", "urllib2", "urllib.error"), 237 | MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), 238 | ] 239 | for attr in _urllib_error_moved_attributes: 240 | setattr(Module_six_moves_urllib_error, attr.name, attr) 241 | del attr 242 | 243 | sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error") 244 | sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error") 245 | 246 | 247 | class Module_six_moves_urllib_request(types.ModuleType): 248 | """Lazy loading of moved objects in six.moves.urllib_request""" 249 | 250 | 251 | _urllib_request_moved_attributes = [ 252 | MovedAttribute("urlopen", "urllib2", "urllib.request"), 253 | MovedAttribute("install_opener", "urllib2", "urllib.request"), 254 | MovedAttribute("build_opener", "urllib2", "urllib.request"), 255 | MovedAttribute("pathname2url", "urllib", "urllib.request"), 256 | MovedAttribute("url2pathname", "urllib", "urllib.request"), 257 | MovedAttribute("getproxies", "urllib", "urllib.request"), 258 | MovedAttribute("Request", "urllib2", "urllib.request"), 259 | MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), 260 | MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), 261 | MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), 262 | MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), 263 | MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), 264 | MovedAttribute("BaseHandler", "urllib2", "urllib.request"), 265 | MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), 266 | MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), 267 | MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), 268 | MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), 269 | MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), 270 | MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), 271 | MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), 272 | MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), 273 | MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), 274 | MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), 275 | MovedAttribute("FileHandler", "urllib2", "urllib.request"), 276 | MovedAttribute("FTPHandler", "urllib2", "urllib.request"), 277 | MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), 278 | MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), 279 | MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), 280 | MovedAttribute("urlretrieve", "urllib", "urllib.request"), 281 | MovedAttribute("urlcleanup", "urllib", "urllib.request"), 282 | MovedAttribute("URLopener", "urllib", "urllib.request"), 283 | MovedAttribute("FancyURLopener", "urllib", "urllib.request"), 284 | ] 285 | for attr in _urllib_request_moved_attributes: 286 | setattr(Module_six_moves_urllib_request, attr.name, attr) 287 | del attr 288 | 289 | sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request") 290 | sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request") 291 | 292 | 293 | class Module_six_moves_urllib_response(types.ModuleType): 294 | """Lazy loading of moved objects in six.moves.urllib_response""" 295 | 296 | 297 | _urllib_response_moved_attributes = [ 298 | MovedAttribute("addbase", "urllib", "urllib.response"), 299 | MovedAttribute("addclosehook", "urllib", "urllib.response"), 300 | MovedAttribute("addinfo", "urllib", "urllib.response"), 301 | MovedAttribute("addinfourl", "urllib", "urllib.response"), 302 | ] 303 | for attr in _urllib_response_moved_attributes: 304 | setattr(Module_six_moves_urllib_response, attr.name, attr) 305 | del attr 306 | 307 | sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response") 308 | sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response") 309 | 310 | 311 | class Module_six_moves_urllib_robotparser(types.ModuleType): 312 | """Lazy loading of moved objects in six.moves.urllib_robotparser""" 313 | 314 | 315 | _urllib_robotparser_moved_attributes = [ 316 | MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), 317 | ] 318 | for attr in _urllib_robotparser_moved_attributes: 319 | setattr(Module_six_moves_urllib_robotparser, attr.name, attr) 320 | del attr 321 | 322 | sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser") 323 | sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser") 324 | 325 | 326 | class Module_six_moves_urllib(types.ModuleType): 327 | """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" 328 | parse = sys.modules[__name__ + ".moves.urllib_parse"] 329 | error = sys.modules[__name__ + ".moves.urllib_error"] 330 | request = sys.modules[__name__ + ".moves.urllib_request"] 331 | response = sys.modules[__name__ + ".moves.urllib_response"] 332 | robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"] 333 | 334 | 335 | sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib") 336 | 337 | 338 | def add_move(move): 339 | """Add an item to six.moves.""" 340 | setattr(_MovedItems, move.name, move) 341 | 342 | 343 | def remove_move(name): 344 | """Remove item from six.moves.""" 345 | try: 346 | delattr(_MovedItems, name) 347 | except AttributeError: 348 | try: 349 | del moves.__dict__[name] 350 | except KeyError: 351 | raise AttributeError("no such move, %r" % (name,)) 352 | 353 | 354 | if PY3: 355 | _meth_func = "__func__" 356 | _meth_self = "__self__" 357 | 358 | _func_closure = "__closure__" 359 | _func_code = "__code__" 360 | _func_defaults = "__defaults__" 361 | _func_globals = "__globals__" 362 | 363 | _iterkeys = "keys" 364 | _itervalues = "values" 365 | _iteritems = "items" 366 | _iterlists = "lists" 367 | else: 368 | _meth_func = "im_func" 369 | _meth_self = "im_self" 370 | 371 | _func_closure = "func_closure" 372 | _func_code = "func_code" 373 | _func_defaults = "func_defaults" 374 | _func_globals = "func_globals" 375 | 376 | _iterkeys = "iterkeys" 377 | _itervalues = "itervalues" 378 | _iteritems = "iteritems" 379 | _iterlists = "iterlists" 380 | 381 | 382 | try: 383 | advance_iterator = next 384 | except NameError: 385 | def advance_iterator(it): 386 | return it.next() 387 | next = advance_iterator 388 | 389 | 390 | try: 391 | callable = callable 392 | except NameError: 393 | def callable(obj): 394 | return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) 395 | 396 | 397 | if PY3: 398 | def get_unbound_function(unbound): 399 | return unbound 400 | 401 | create_bound_method = types.MethodType 402 | 403 | Iterator = object 404 | else: 405 | def get_unbound_function(unbound): 406 | return unbound.im_func 407 | 408 | def create_bound_method(func, obj): 409 | return types.MethodType(func, obj, obj.__class__) 410 | 411 | class Iterator(object): 412 | 413 | def next(self): 414 | return type(self).__next__(self) 415 | 416 | callable = callable 417 | _add_doc(get_unbound_function, 418 | """Get the function out of a possibly unbound function""") 419 | 420 | 421 | get_method_function = operator.attrgetter(_meth_func) 422 | get_method_self = operator.attrgetter(_meth_self) 423 | get_function_closure = operator.attrgetter(_func_closure) 424 | get_function_code = operator.attrgetter(_func_code) 425 | get_function_defaults = operator.attrgetter(_func_defaults) 426 | get_function_globals = operator.attrgetter(_func_globals) 427 | 428 | 429 | def iterkeys(d, **kw): 430 | """Return an iterator over the keys of a dictionary.""" 431 | return iter(getattr(d, _iterkeys)(**kw)) 432 | 433 | def itervalues(d, **kw): 434 | """Return an iterator over the values of a dictionary.""" 435 | return iter(getattr(d, _itervalues)(**kw)) 436 | 437 | def iteritems(d, **kw): 438 | """Return an iterator over the (key, value) pairs of a dictionary.""" 439 | return iter(getattr(d, _iteritems)(**kw)) 440 | 441 | def iterlists(d, **kw): 442 | """Return an iterator over the (key, [values]) pairs of a dictionary.""" 443 | return iter(getattr(d, _iterlists)(**kw)) 444 | 445 | 446 | if PY3: 447 | def b(s): 448 | return s.encode("latin-1") 449 | def u(s): 450 | return s 451 | unichr = chr 452 | if sys.version_info[1] <= 1: 453 | def int2byte(i): 454 | return bytes((i,)) 455 | else: 456 | # This is about 2x faster than the implementation above on 3.2+ 457 | int2byte = operator.methodcaller("to_bytes", 1, "big") 458 | byte2int = operator.itemgetter(0) 459 | indexbytes = operator.getitem 460 | iterbytes = iter 461 | import io 462 | StringIO = io.StringIO 463 | BytesIO = io.BytesIO 464 | else: 465 | def b(s): 466 | return s 467 | def u(s): 468 | return unicode(s, "unicode_escape") 469 | unichr = unichr 470 | int2byte = chr 471 | def byte2int(bs): 472 | return ord(bs[0]) 473 | def indexbytes(buf, i): 474 | return ord(buf[i]) 475 | def iterbytes(buf): 476 | return (ord(byte) for byte in buf) 477 | import StringIO 478 | StringIO = BytesIO = StringIO.StringIO 479 | _add_doc(b, """Byte literal""") 480 | _add_doc(u, """Text literal""") 481 | 482 | 483 | if PY3: 484 | import builtins 485 | exec_ = getattr(builtins, "exec") 486 | 487 | 488 | def reraise(tp, value, tb=None): 489 | if value.__traceback__ is not tb: 490 | raise value.with_traceback(tb) 491 | raise value 492 | 493 | 494 | print_ = getattr(builtins, "print") 495 | del builtins 496 | 497 | else: 498 | def exec_(_code_, _globs_=None, _locs_=None): 499 | """Execute code in a namespace.""" 500 | if _globs_ is None: 501 | frame = sys._getframe(1) 502 | _globs_ = frame.f_globals 503 | if _locs_ is None: 504 | _locs_ = frame.f_locals 505 | del frame 506 | elif _locs_ is None: 507 | _locs_ = _globs_ 508 | exec("""exec _code_ in _globs_, _locs_""") 509 | 510 | 511 | exec_("""def reraise(tp, value, tb=None): 512 | raise tp, value, tb 513 | """) 514 | 515 | 516 | def print_(*args, **kwargs): 517 | """The new-style print function.""" 518 | fp = kwargs.pop("file", sys.stdout) 519 | if fp is None: 520 | return 521 | def write(data): 522 | if not isinstance(data, basestring): 523 | data = str(data) 524 | fp.write(data) 525 | want_unicode = False 526 | sep = kwargs.pop("sep", None) 527 | if sep is not None: 528 | if isinstance(sep, unicode): 529 | want_unicode = True 530 | elif not isinstance(sep, str): 531 | raise TypeError("sep must be None or a string") 532 | end = kwargs.pop("end", None) 533 | if end is not None: 534 | if isinstance(end, unicode): 535 | want_unicode = True 536 | elif not isinstance(end, str): 537 | raise TypeError("end must be None or a string") 538 | if kwargs: 539 | raise TypeError("invalid keyword arguments to print()") 540 | if not want_unicode: 541 | for arg in args: 542 | if isinstance(arg, unicode): 543 | want_unicode = True 544 | break 545 | if want_unicode: 546 | newline = unicode("\n") 547 | space = unicode(" ") 548 | else: 549 | newline = "\n" 550 | space = " " 551 | if sep is None: 552 | sep = space 553 | if end is None: 554 | end = newline 555 | for i, arg in enumerate(args): 556 | if i: 557 | write(sep) 558 | write(arg) 559 | write(end) 560 | 561 | _add_doc(reraise, """Reraise an exception.""") 562 | 563 | 564 | def with_metaclass(meta, *bases): 565 | """Create a base class with a metaclass.""" 566 | return meta("NewBase", bases, {}) 567 | 568 | def add_metaclass(metaclass): 569 | """Class decorator for creating a class with a metaclass.""" 570 | def wrapper(cls): 571 | orig_vars = cls.__dict__.copy() 572 | orig_vars.pop('__dict__', None) 573 | orig_vars.pop('__weakref__', None) 574 | for slots_var in orig_vars.get('__slots__', ()): 575 | orig_vars.pop(slots_var) 576 | return metaclass(cls.__name__, cls.__bases__, orig_vars) 577 | return wrapper 578 | -------------------------------------------------------------------------------- /offset/version.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | version_info = (0, 1, 0) 6 | __version__ = ".".join([str(v) for v in version_info]) 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fibers 2 | cffi 3 | wrapt 4 | -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | fibers 2 | cffi 3 | wrapt 4 | pytest 5 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from setuptools import setup, find_packages, Extension 5 | from setuptools.command.test import test as TestCommand 6 | 7 | class PyTest(TestCommand): 8 | def finalize_options(self): 9 | TestCommand.finalize_options(self) 10 | self.test_args = [] 11 | self.test_suite = True 12 | 13 | def run_tests(self): 14 | import pytest 15 | errno = pytest.main(self.test_args) 16 | sys.exit(errno) 17 | 18 | 19 | is_pypy = '__pypy__' in sys.builtin_module_names 20 | py_version = sys.version_info[:2] 21 | 22 | if py_version < (2, 7): 23 | raise RuntimeError('On Python 2, offset requires Python 2.7 or better') 24 | 25 | 26 | 27 | REQUIREMENTS = ["cffi", "wrapt", "fibers"] 28 | 29 | if py_version == (2, 7): 30 | REQUIREMENTS.append('futures') 31 | 32 | import imp 33 | 34 | def load_module(name, path): 35 | f, pathname, description = imp.find_module(name, [path]) 36 | return imp.load_module(name, f, pathname, description) 37 | 38 | 39 | try: 40 | atomic = load_module('atomic', './offset/sync') 41 | except ImportError: 42 | EXT_MODULES=[] 43 | else: 44 | EXT_MODULES=[atomic.ffi.verifier.get_extension()] 45 | 46 | CLASSIFIERS = [ 47 | 'Development Status :: 4 - Beta', 48 | 'Environment :: Web Environment', 49 | 'Intended Audience :: Developers', 50 | 'License :: OSI Approved :: MIT License', 51 | 'Operating System :: OS Independent', 52 | 'Programming Language :: Python', 53 | 'Programming Language :: Python :: 2', 54 | 'Programming Language :: Python :: 2.7', 55 | 'Programming Language :: Python :: 3', 56 | 'Programming Language :: Python :: 3.0', 57 | 'Programming Language :: Python :: 3.1', 58 | 'Programming Language :: Python :: 3.2', 59 | 'Programming Language :: Python :: 3.3', 60 | 'Topic :: Software Development :: Libraries'] 61 | 62 | 63 | # read long description 64 | with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: 65 | long_description = f.read() 66 | 67 | DATA_FILES = [ 68 | ('offset', ["LICENSE", "MANIFEST.in", "NOTICE", "README.rst", 69 | "THANKS"]) 70 | ] 71 | 72 | VERSION = load_module('version', './offset').__version__ 73 | 74 | setup(name='offset', 75 | version=VERSION, 76 | description = 'collection of modules to build distributed and reliable concurrent systems', 77 | long_description = long_description, 78 | classifiers = CLASSIFIERS, 79 | license = 'BSD', 80 | url = 'http://github.com/benoitc/offset', 81 | author = 'Benoit Chesneau', 82 | author_email = 'benoitc@e-engura.org', 83 | packages=find_packages(), 84 | install_requires = REQUIREMENTS, 85 | setup_requires=REQUIREMENTS, 86 | tests_require=['pytest'], 87 | ext_modules=EXT_MODULES, 88 | data_files = DATA_FILES, 89 | cmdclass={"test": PyTest}) 90 | -------------------------------------------------------------------------------- /tests/test_atomic.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | # copyright (c) 2013 David Reid under the MIT License. 5 | 6 | 7 | from offset.sync.atomic import AtomicLong, ffi, lib 8 | 9 | def test_long_add_and_fetch(): 10 | l = ffi.new('long *', 0) 11 | assert lib.long_add_and_fetch(l, 1) == 1 12 | assert lib.long_add_and_fetch(l, 10) == 11 13 | 14 | def test_long_sub_and_fetch(): 15 | l = ffi.new('long *', 0) 16 | assert lib.long_sub_and_fetch(l, 1) == -1 17 | assert lib.long_sub_and_fetch(l, 10) == -11 18 | 19 | def test_long_bool_compare_and_swap(): 20 | l = ffi.new('long *', 0) 21 | assert lib.long_bool_compare_and_swap(l, 0, 10) == True 22 | assert lib.long_bool_compare_and_swap(l, 1, 20) == False 23 | 24 | def test_atomiclong_repr(): 25 | l = AtomicLong(123456789) 26 | assert '' in repr(l) 28 | 29 | def test_atomiclong_value(): 30 | l = AtomicLong(0) 31 | assert l.value == 0 32 | l.value = 10 33 | assert l.value == 10 34 | 35 | def test_atomiclong_iadd(): 36 | l = AtomicLong(0) 37 | l += 10 38 | assert l.value == 10 39 | 40 | def test_atomiclong_isub(): 41 | l = AtomicLong(0) 42 | l -= 10 43 | assert l.value == -10 44 | 45 | def test_atomiclong_eq(): 46 | l1 = AtomicLong(0) 47 | l2 = AtomicLong(1) 48 | l3 = AtomicLong(0) 49 | assert l1 == 0 50 | assert l1 != 1 51 | assert not (l2 == 0) 52 | assert not (l2 != 1) 53 | assert l1 == l3 54 | assert not (l1 != l3) 55 | assert l1 != l2 56 | assert not (l1 == l2) 57 | 58 | def test_atomiclong_ordering(): 59 | l1 = AtomicLong(0) 60 | l2 = AtomicLong(1) 61 | l3 = AtomicLong(0) 62 | 63 | assert l1 < l2 64 | assert l1 <= l2 65 | assert l1 <= l3 66 | assert l2 > l1 67 | assert l2 >= l3 68 | assert l2 >= l2 69 | 70 | assert l1 < 1 71 | assert l1 <= 0 72 | assert l1 <= 1 73 | assert l1 > -1 74 | assert l1 >= -1 75 | assert l1 >= 0 76 | -------------------------------------------------------------------------------- /tests/test_channel.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from __future__ import absolute_import 6 | 7 | import time 8 | from py.test import skip 9 | 10 | 11 | from offset import makechan, go, gosched, run, maintask, select 12 | from offset.core.chan import bomb 13 | 14 | SHOW_STRANGE = False 15 | 16 | from offset.util import six 17 | 18 | def dprint(txt): 19 | if SHOW_STRANGE: 20 | print(txt) 21 | 22 | class Test_Channel: 23 | 24 | def test_simple_channel(self): 25 | output = [] 26 | def print_(*args): 27 | output.append(args) 28 | 29 | def sending(channel): 30 | print_("sending") 31 | channel.send("foo") 32 | 33 | def receiving(channel): 34 | print_("receiving") 35 | print_(channel.recv()) 36 | 37 | @maintask 38 | def main(): 39 | ch = makechan() 40 | go(sending, ch) 41 | go(receiving, ch) 42 | 43 | run() 44 | 45 | assert output == [('sending',), ('receiving',), ('foo',)] 46 | 47 | 48 | def test_send_counter(self): 49 | import random 50 | 51 | numbers = list(range(20)) 52 | random.shuffle(numbers) 53 | 54 | def counter(n, ch): 55 | ch.send(n) 56 | 57 | rlist = [] 58 | 59 | 60 | @maintask 61 | def main(): 62 | ch = makechan() 63 | for each in numbers: 64 | go(counter, each, ch) 65 | for each in numbers: 66 | rlist.append(ch.recv()) 67 | 68 | run() 69 | 70 | rlist.sort() 71 | numbers.sort() 72 | assert rlist == numbers 73 | 74 | def test_recv_counter(self): 75 | import random 76 | 77 | numbers = list(range(20)) 78 | random.shuffle(numbers) 79 | 80 | rlist = [] 81 | def counter(n, ch): 82 | ch.recv() 83 | rlist.append(n) 84 | 85 | @maintask 86 | def main(): 87 | ch = makechan() 88 | 89 | for each in numbers: 90 | go(counter, each, ch) 91 | 92 | for each in numbers: 93 | ch.send(None) 94 | run() 95 | 96 | numbers.sort() 97 | rlist.sort() 98 | assert rlist == numbers 99 | 100 | def test_bomb(self): 101 | try: 102 | 1/0 103 | except: 104 | import sys 105 | b = bomb(*sys.exc_info()) 106 | assert b.type is ZeroDivisionError 107 | if six.PY3: 108 | assert (str(b.value).startswith('division by zero') or 109 | str(b.value).startswith('int division')) 110 | else: 111 | assert str(b.value).startswith('integer division') 112 | assert b.traceback is not None 113 | 114 | def test_send_exception(self): 115 | def exp_sender(chan): 116 | chan.send_exception(Exception, 'test') 117 | 118 | def exp_recv(chan): 119 | try: 120 | val = chan.recv() 121 | except Exception as exp: 122 | assert exp.__class__ is Exception 123 | assert str(exp) == 'test' 124 | 125 | @maintask 126 | def main(): 127 | chan = makechan() 128 | go(exp_recv, chan) 129 | go(exp_sender, chan) 130 | run() 131 | 132 | 133 | def test_simple_pipe(self): 134 | def pipe(X_in, X_out): 135 | foo = X_in.recv() 136 | X_out.send(foo) 137 | 138 | @maintask 139 | def main(): 140 | X, Y = makechan(), makechan() 141 | go(pipe, X, Y) 142 | 143 | X.send(42) 144 | assert Y.recv() == 42 145 | run() 146 | 147 | 148 | def test_nested_pipe(self): 149 | dprint('tnp ==== 1') 150 | def pipe(X, Y): 151 | dprint('tnp_P ==== 1') 152 | foo = X.recv() 153 | dprint('tnp_P ==== 2') 154 | Y.send(foo) 155 | dprint('tnp_P ==== 3') 156 | 157 | def nest(X, Y): 158 | X2, Y2 = makechan(), makechan() 159 | go(pipe, X2, Y2) 160 | dprint('tnp_N ==== 1') 161 | X_Val = X.recv() 162 | dprint('tnp_N ==== 2') 163 | X2.send(X_Val) 164 | dprint('tnp_N ==== 3') 165 | Y2_Val = Y2.recv() 166 | dprint('tnp_N ==== 4') 167 | Y.send(Y2_Val) 168 | dprint('tnp_N ==== 5') 169 | 170 | 171 | @maintask 172 | def main(): 173 | X, Y = makechan(), makechan() 174 | go(nest, X, Y) 175 | X.send(13) 176 | dprint('tnp ==== 2') 177 | res = Y.recv() 178 | dprint('tnp ==== 3') 179 | assert res == 13 180 | if SHOW_STRANGE: 181 | raise Exception('force prints') 182 | 183 | run() 184 | 185 | def test_wait_two(self): 186 | """ 187 | A tasklets/channels adaptation of the test_wait_two from the 188 | logic object space 189 | """ 190 | def sleep(X, Y): 191 | dprint('twt_S ==== 1') 192 | value = X.recv() 193 | dprint('twt_S ==== 2') 194 | Y.send((X, value)) 195 | dprint('twt_S ==== 3') 196 | 197 | def wait_two(X, Y, Ret_chan): 198 | Barrier = makechan() 199 | go(sleep, X, Barrier) 200 | go(sleep, Y, Barrier) 201 | dprint('twt_W ==== 1') 202 | ret = Barrier.recv() 203 | dprint('twt_W ==== 2') 204 | if ret[0] == X: 205 | Ret_chan.send((1, ret[1])) 206 | else: 207 | Ret_chan.send((2, ret[1])) 208 | dprint('twt_W ==== 3') 209 | 210 | @maintask 211 | def main(): 212 | X, Y = makechan(), makechan() 213 | Ret_chan = makechan() 214 | 215 | go(wait_two, X, Y, Ret_chan) 216 | 217 | dprint('twt ==== 1') 218 | Y.send(42) 219 | 220 | dprint('twt ==== 2') 221 | X.send(42) 222 | dprint('twt ==== 3') 223 | value = Ret_chan.recv() 224 | dprint('twt ==== 4') 225 | assert value == (2, 42) 226 | 227 | run() 228 | 229 | 230 | def test_async_channel(self): 231 | 232 | @maintask 233 | def main(): 234 | c = makechan(100) 235 | 236 | unblocked_sent = 0 237 | for i in range(100): 238 | c.send(True) 239 | unblocked_sent += 1 240 | 241 | assert unblocked_sent == 100 242 | 243 | unblocked_recv = [] 244 | for i in range(100): 245 | unblocked_recv.append(c.recv()) 246 | 247 | assert len(unblocked_recv) == 100 248 | 249 | run() 250 | 251 | def test_async_with_blocking_channel(self): 252 | 253 | def sender(c): 254 | unblocked_sent = 0 255 | for i in range(10): 256 | c.send(True) 257 | unblocked_sent += 1 258 | 259 | assert unblocked_sent == 10 260 | 261 | c.send(True) 262 | 263 | @maintask 264 | def main(): 265 | c = makechan(10) 266 | 267 | go(sender, c) 268 | unblocked_recv = [] 269 | for i in range(11): 270 | unblocked_recv.append(c.recv()) 271 | 272 | 273 | assert len(unblocked_recv) == 11 274 | 275 | 276 | run() 277 | 278 | def test_multiple_sender(self): 279 | rlist = [] 280 | sent = [] 281 | 282 | def f(c): 283 | c.send("ok") 284 | 285 | def f1(c): 286 | c.send("eof") 287 | 288 | def f2(c): 289 | while True: 290 | data = c.recv() 291 | sent.append(data) 292 | if data == "eof": 293 | return 294 | rlist.append(data) 295 | 296 | @maintask 297 | def main(): 298 | c = makechan() 299 | go(f, c) 300 | go(f1, c) 301 | go(f2, c) 302 | 303 | run() 304 | 305 | assert rlist == ['ok'] 306 | assert len(sent) == 2 307 | assert "eof" in sent 308 | 309 | 310 | def test_select_simple(): 311 | rlist = [] 312 | def fibonacci(c, quit): 313 | x, y = 0, 1 314 | while True: 315 | ret = select(c.if_send(x), quit.if_recv()) 316 | if ret == c.if_send(x): 317 | x, y = y, x+y 318 | elif ret == quit.if_recv(): 319 | return 320 | 321 | @maintask 322 | def main(): 323 | c = makechan() 324 | quit = makechan() 325 | def f(): 326 | for i in range(10): 327 | rlist.append(c.recv()) 328 | print(rlist) 329 | quit.send(0) 330 | 331 | go(f) 332 | fibonacci(c, quit) 333 | 334 | run() 335 | 336 | assert rlist == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34] 337 | 338 | def test_select_buffer(): 339 | rlist = [] 340 | def test(c, quit): 341 | x = 0 342 | while True: 343 | ret = select(c.if_send(x), quit.if_recv()) 344 | if ret == c.if_send(x): 345 | x = x + 1 346 | elif ret == quit.if_recv(): 347 | return 348 | 349 | @maintask 350 | def main(): 351 | c = makechan(5, label="c") 352 | quit = makechan(label="quit") 353 | def f(): 354 | for i in range(5): 355 | v = c.recv() 356 | rlist.append(v) 357 | quit.send(0) 358 | 359 | go(f) 360 | test(c, quit) 361 | run() 362 | 363 | assert rlist == [0, 1, 2, 3, 4] 364 | 365 | def test_select_buffer2(): 366 | rlist = [] 367 | 368 | def test(c): 369 | while True: 370 | ret = select(c.if_recv()) 371 | if ret == c.if_recv(): 372 | 373 | if ret.value == "QUIT": 374 | break 375 | rlist.append(ret.value) 376 | 377 | @maintask 378 | def main(): 379 | c = makechan(5, label="c") 380 | go(test, c) 381 | 382 | for i in range(5): 383 | c.send(i) 384 | 385 | c.send("QUIT") 386 | 387 | run() 388 | assert rlist == [0, 1, 2, 3, 4] 389 | -------------------------------------------------------------------------------- /tests/test_core_timer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | import time 6 | 7 | from offset import run, go, maintask 8 | from offset.core.context import park 9 | from offset.core import proc 10 | from offset.core.util import nanotime 11 | from offset.core.timer import Timer, sleep 12 | from offset.time import SECOND 13 | 14 | DELTA0 = 0.06 * SECOND 15 | DELTA = 0.06 * SECOND 16 | 17 | 18 | def _wait(): 19 | time.sleep(0.01) 20 | 21 | 22 | def test_simple_timer(): 23 | 24 | def _func(now, t, rlist, g): 25 | rlist.append(now) 26 | g.ready() 27 | 28 | @maintask 29 | def main(): 30 | rlist = [] 31 | period = 0.1 * SECOND 32 | t = Timer(_func, period, args=(rlist, proc.current())) 33 | now = nanotime() 34 | t.start() 35 | park() 36 | delay = rlist[0] 37 | 38 | assert (now + period - DELTA0) <= delay <= (now + period + DELTA), delay 39 | 40 | run() 41 | 42 | 43 | def test_multiple_timer(): 44 | r1 = [] 45 | def f1(now, t, g): 46 | r1.append(now) 47 | g.ready() 48 | 49 | r2 = [] 50 | def f2(now, t): 51 | r2.append(now) 52 | 53 | @maintask 54 | def main(): 55 | T1 = 0.4 * SECOND 56 | T2 = 0.1 * SECOND 57 | t1 = Timer(f1, T1, args=(proc.current(),)) 58 | t2 = Timer(f2, T2) 59 | 60 | now = nanotime() 61 | t1.start() 62 | t2.start() 63 | 64 | park() 65 | 66 | assert r1[0] > r2[0] 67 | 68 | assert (now + T1 - DELTA0) <= r1[0] <= (now + T1 + DELTA), r1[0] 69 | assert (now + T2 - DELTA0) <= r2[0] <= (now + T2 + DELTA), r2[0] 70 | 71 | run() 72 | 73 | 74 | def test_repeat(): 75 | r = [] 76 | def f(now, t, g): 77 | if len(r) == 3: 78 | t.stop() 79 | g.ready() 80 | else: 81 | r.append(now) 82 | 83 | 84 | @maintask 85 | def main(): 86 | t = Timer(f, 0.01 * SECOND, 0.01 * SECOND, args=(proc.current(),)) 87 | t.start() 88 | park() 89 | 90 | assert len(r) == 3 91 | assert r[2] > r[1] 92 | assert r[1] > r[0] 93 | 94 | run() 95 | 96 | 97 | def test_sleep(): 98 | @maintask 99 | def main(): 100 | PERIOD = 0.1 * SECOND 101 | start = nanotime() 102 | sleep(PERIOD) 103 | diff = nanotime() - start 104 | assert PERIOD - DELTA0 <= diff <= PERIOD + DELTA 105 | 106 | run() 107 | 108 | 109 | def test_multiple_sleep(): 110 | T1 = 0.4 * SECOND 111 | T2 = 0.1 * SECOND 112 | 113 | r1 = [] 114 | def f1(): 115 | sleep(T1) 116 | r1.append(nanotime()) 117 | 118 | r2 = [] 119 | def f2(): 120 | sleep(T2) 121 | r2.append(nanotime()) 122 | 123 | go(f1) 124 | go(f2) 125 | now = nanotime() 126 | run() 127 | assert r1[0] > r2[0] 128 | assert (now + T1 - DELTA0) <= r1[0] <= (now + T1 + DELTA), r1[0] 129 | assert (now + T2 - DELTA0) <= r2[0] <= (now + T2 + DELTA), r2[0] 130 | -------------------------------------------------------------------------------- /tests/test_kernel.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from __future__ import absolute_import 6 | 7 | import time 8 | from py.test import skip 9 | 10 | from offset import makechan, go, gosched, run, maintask 11 | 12 | SHOW_STRANGE = False 13 | 14 | 15 | def dprint(txt): 16 | if SHOW_STRANGE: 17 | print(txt) 18 | 19 | def test_simple(): 20 | rlist = [] 21 | 22 | def f(): 23 | rlist.append('f') 24 | 25 | def g(): 26 | rlist.append('g') 27 | gosched() 28 | 29 | @maintask 30 | def main(): 31 | rlist.append('m') 32 | cg = go(g) 33 | cf = go(f) 34 | gosched() 35 | rlist.append('m') 36 | 37 | run() 38 | 39 | assert rlist == 'm g f m'.split() 40 | 41 | def test_run(): 42 | output = [] 43 | def print_(*args): 44 | output.append(args) 45 | 46 | def f(i): 47 | print_(i) 48 | 49 | go(f, 1) 50 | go(f, 2) 51 | run() 52 | 53 | assert output == [(1,), (2,)] 54 | 55 | 56 | def test_run_class(): 57 | output = [] 58 | def print_(*args): 59 | output.append(args) 60 | 61 | class Test(object): 62 | 63 | def __call__(self, i): 64 | print_(i) 65 | 66 | t = Test() 67 | 68 | go(t, 1) 69 | go(t, 2) 70 | run() 71 | 72 | assert output == [(1,), (2,)] 73 | 74 | 75 | # tests inspired from simple core.com examples 76 | 77 | def test_construction(): 78 | output = [] 79 | def print_(*args): 80 | output.append(args) 81 | 82 | def aCallable(value): 83 | print_("aCallable:", value) 84 | 85 | go(aCallable, 'Inline using setup') 86 | 87 | run() 88 | assert output == [("aCallable:", 'Inline using setup')] 89 | 90 | 91 | del output[:] 92 | go(aCallable, 'Inline using ()') 93 | 94 | run() 95 | assert output == [("aCallable:", 'Inline using ()')] 96 | 97 | def test_run(): 98 | output = [] 99 | def print_(*args): 100 | output.append(args) 101 | 102 | def f(i): 103 | print_(i) 104 | 105 | @maintask 106 | def main(): 107 | go(f, 1) 108 | go(f, 2) 109 | 110 | run() 111 | 112 | assert output == [(1,), (2,)] 113 | 114 | def test_schedule(): 115 | output = [] 116 | def print_(*args): 117 | output.append(args) 118 | 119 | def f(i): 120 | print_(i) 121 | 122 | go(f, 1) 123 | go(f, 2) 124 | gosched() 125 | 126 | assert output == [(1,), (2,)] 127 | 128 | 129 | def test_cooperative(): 130 | output = [] 131 | def print_(*args): 132 | output.append(args) 133 | 134 | def Loop(i): 135 | for x in range(3): 136 | gosched() 137 | print_("schedule", i) 138 | 139 | @maintask 140 | def main(): 141 | go(Loop, 1) 142 | go(Loop, 2) 143 | run() 144 | 145 | assert output == [('schedule', 1), ('schedule', 2), 146 | ('schedule', 1), ('schedule', 2), 147 | ('schedule', 1), ('schedule', 2),] 148 | -------------------------------------------------------------------------------- /tests/test_sync.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 - 2 | # 3 | # This file is part of offset. See the NOTICE for more information. 4 | 5 | from offset import go, run, maintask, makechan, select, default, PanicError 6 | 7 | from pytest import raises 8 | 9 | from offset.sync.atomic import AtomicLong 10 | from offset.sync.cond import Cond 11 | from offset.sync.mutex import Mutex 12 | from offset.sync.once import Once 13 | from offset.sync.rwmutex import RWMutex 14 | from offset.sync.waitgroup import WaitGroup 15 | 16 | 17 | def test_Mutex(): 18 | 19 | def hammer_mutex(m, loops, cdone): 20 | for i in range(loops): 21 | m.lock() 22 | m.unlock() 23 | 24 | cdone.send(True) 25 | 26 | @maintask 27 | def main(): 28 | m = Mutex() 29 | c = makechan() 30 | for i in range(10): 31 | go(hammer_mutex, m, 1000, c) 32 | 33 | for i in range(10): 34 | c.recv() 35 | 36 | run() 37 | 38 | def test_Mutex(): 39 | 40 | def hammer_mutex(m, loops, cdone): 41 | for i in range(loops): 42 | m.lock() 43 | m.unlock() 44 | 45 | cdone.send(True) 46 | 47 | @maintask 48 | def main(): 49 | m = Mutex() 50 | c = makechan() 51 | for i in range(10): 52 | go(hammer_mutex, m, 1000, c) 53 | 54 | for i in range(10): 55 | c.recv() 56 | 57 | run() 58 | 59 | def test_Once(): 60 | 61 | def f(o): 62 | o += 1 63 | 64 | def test(once, o, c): 65 | once.do(f)(o) 66 | assert o == 1 67 | c.send(True) 68 | 69 | @maintask 70 | def main(): 71 | c = makechan() 72 | once = Once() 73 | o = AtomicLong(0) 74 | for i in range(10): 75 | go(test, once, o, c) 76 | 77 | for i in range(10): 78 | c.recv() 79 | 80 | assert o == 1 81 | 82 | run() 83 | 84 | def test_RWMutex_concurrent_readers(): 85 | 86 | def reader(m, clocked, cunlock, cdone): 87 | m.rlock() 88 | clocked.send(True) 89 | cunlock.recv() 90 | m.runlock() 91 | cdone.send(True) 92 | 93 | def test_readers(num): 94 | m = RWMutex() 95 | clocked = makechan() 96 | cunlock = makechan() 97 | cdone = makechan() 98 | 99 | for i in range(num): 100 | go(reader, m, clocked, cunlock, cdone) 101 | 102 | for i in range(num): 103 | clocked.recv() 104 | 105 | for i in range(num): 106 | cunlock.send(True) 107 | 108 | for i in range(num): 109 | cdone.recv() 110 | 111 | @maintask 112 | def main(): 113 | test_readers(1) 114 | test_readers(3) 115 | test_readers(4) 116 | 117 | run() 118 | 119 | def test_RWMutex(): 120 | 121 | activity = AtomicLong(0) 122 | 123 | def reader(rwm, num_iterations, activity, cdone): 124 | print("reader") 125 | for i in range(num_iterations): 126 | rwm.rlock() 127 | n = activity.add(1) 128 | assert n >= 1 and n < 10000, "rlock %d" % n 129 | 130 | for i in range(100): 131 | continue 132 | 133 | activity.add(-1) 134 | rwm.runlock() 135 | cdone.send(True) 136 | 137 | def writer(rwm, num_iterations, activity, cdone): 138 | for i in range(num_iterations): 139 | rwm.lock() 140 | n = activity.add(10000) 141 | assert n == 10000, "wlock %d" % n 142 | for i in range(100): 143 | continue 144 | activity.add(-10000) 145 | rwm.unlock() 146 | cdone.send(True) 147 | 148 | def hammer_rwmutex(num_readers, num_iterations): 149 | activity = AtomicLong(0) 150 | rwm = RWMutex() 151 | cdone = makechan() 152 | 153 | go(writer, rwm, num_iterations, activity, cdone) 154 | 155 | for i in range(int(num_readers / 2)): 156 | go(reader, rwm, num_iterations, activity, cdone) 157 | 158 | go(writer, rwm, num_iterations, activity, cdone) 159 | 160 | for i in range(num_readers): 161 | go(reader, rwm, num_iterations, activity, cdone) 162 | 163 | for i in range(2 + num_readers): 164 | cdone.recv() 165 | 166 | @maintask 167 | def main(): 168 | n = 1000 169 | hammer_rwmutex(1, n) 170 | hammer_rwmutex(3, n) 171 | hammer_rwmutex(10, n) 172 | 173 | run() 174 | 175 | def test_RLocker(): 176 | wl = RWMutex() 177 | rl = wl.RLocker() 178 | wlocked = makechan(1) 179 | rlocked = makechan(1) 180 | 181 | n = 10 182 | 183 | def test(): 184 | for i in range(n): 185 | rl.lock() 186 | rl.lock() 187 | rlocked.send(True) 188 | wl.lock() 189 | wlocked.send(True) 190 | 191 | @maintask 192 | def main(): 193 | go(test) 194 | for i in range(n): 195 | rlocked.recv() 196 | rl.unlock() 197 | ret = select(wlocked.if_recv(), default) 198 | assert ret != wlocked.if_recv(), "RLocker didn't read-lock it" 199 | rl.unlock() 200 | wlocked.recv() 201 | ret = select(rlocked.if_recv(), default) 202 | assert ret != rlocked.if_recv(), "RLocker didn't respect the write lock" 203 | wl.unlock() 204 | 205 | run() 206 | 207 | def test_Cond_signal(): 208 | 209 | def test(m, c, running, awake): 210 | with m: 211 | running.send(True) 212 | c.wait() 213 | awake.send(True) 214 | 215 | 216 | @maintask 217 | def main(): 218 | m = Mutex() 219 | c = Cond(m) 220 | n = 2 221 | running = makechan(n) 222 | awake = makechan(n) 223 | 224 | for i in range(n): 225 | go(test, m, c, running, awake) 226 | 227 | for i in range(n): 228 | running.recv() 229 | 230 | while n > 0: 231 | ret = select(awake.if_recv(), default) 232 | assert ret != awake.if_recv(), "coroutine not asleep" 233 | 234 | m.lock() 235 | c.signal() 236 | awake.recv() 237 | ret = select(awake.if_recv(), default) 238 | assert ret != awake.if_recv(), "too many coroutines awakes" 239 | n -= 1 240 | c.signal() 241 | 242 | run() 243 | 244 | def test_Cond_signal_generation(): 245 | 246 | def test(i, m, c, running, awake): 247 | m.lock() 248 | running.send(True) 249 | c.wait() 250 | awake.send(i) 251 | m.unlock() 252 | 253 | @maintask 254 | def main(): 255 | m = Mutex() 256 | c = Cond(m) 257 | n = 100 258 | running = makechan(n) 259 | awake = makechan(n) 260 | 261 | for i in range(n): 262 | go(test, i, m, c, running, awake) 263 | 264 | if i > 0: 265 | a = awake.recv() 266 | assert a == (i - 1), "wrong coroutine woke up: want %d, got %d" % (i-1, a) 267 | 268 | running.recv() 269 | with m: 270 | c.signal() 271 | 272 | run() 273 | 274 | def test_Cond_broadcast(): 275 | m = Mutex() 276 | c = Cond(m) 277 | n = 200 278 | running = makechan(n) 279 | awake = makechan(n) 280 | exit = False 281 | 282 | def test(i): 283 | m.lock() 284 | while not exit: 285 | running.send(i) 286 | c.wait() 287 | awake.send(i) 288 | m.unlock() 289 | 290 | @maintask 291 | def main(): 292 | for i in range(n): 293 | go(test, i) 294 | 295 | for i in range(n): 296 | for i in range(n): 297 | running.recv() 298 | if i == n -1: 299 | m.lock() 300 | exit = True 301 | m.unlock() 302 | 303 | ret = select(awake.if_recv(), default) 304 | assert ret != awake.if_recv(), "coroutine not asleep" 305 | 306 | m.lock() 307 | c.broadcast() 308 | m.unlock() 309 | 310 | seen = {} 311 | for i in range(n): 312 | g = awake.recv() 313 | assert g not in seen, "coroutine woke up twice" 314 | seen[g] = True 315 | 316 | ret = select(running.if_recv(), default) 317 | assert ret != running.if_recv(), "coroutine did not exist" 318 | c.broadcast() 319 | 320 | run() 321 | 322 | def test_WaitGroup(): 323 | 324 | def test_waitgroup(wg1, wg2): 325 | n = 16 326 | wg1.add(n) 327 | wg2.add(n) 328 | exited = makechan(n) 329 | 330 | def f(i): 331 | wg1.done() 332 | wg2.wait() 333 | exited.send(True) 334 | 335 | for i in range(n): 336 | go(f, i) 337 | 338 | wg1.wait() 339 | 340 | for i in range(n): 341 | ret = select(exited.if_recv(), default) 342 | assert ret != exited.if_recv(), "WaitGroup released group too soon" 343 | wg2.done() 344 | 345 | for i in range(16): 346 | exited.recv() 347 | 348 | @maintask 349 | def main(): 350 | wg1 = WaitGroup() 351 | wg2 = WaitGroup() 352 | for i in range(8): 353 | test_waitgroup(wg1, wg2) 354 | 355 | run() 356 | 357 | def test_WaitGroup_raises(): 358 | 359 | @maintask 360 | def main(): 361 | wg = WaitGroup() 362 | with raises(PanicError): 363 | wg.add(1) 364 | wg.done() 365 | wg.done() 366 | run() 367 | -------------------------------------------------------------------------------- /tests/test_time.py: -------------------------------------------------------------------------------- 1 | 2 | from offset import go, run, maintask, makechan 3 | from offset.time import (SECOND, sleep, Ticker, Tick, nanotime, Timer, After, 4 | AfterFunc) 5 | 6 | DELTA0 = 0.06 * SECOND 7 | DELTA = 0.06 * SECOND 8 | 9 | 10 | def test_sleep(): 11 | @maintask 12 | def main(): 13 | PERIOD = 0.1 * SECOND 14 | 15 | start = nanotime() 16 | sleep(PERIOD) 17 | diff = nanotime() - start 18 | assert PERIOD - DELTA0 <= diff <= PERIOD + DELTA 19 | 20 | run() 21 | 22 | def test_Ticker(): 23 | rlist = [] 24 | 25 | @maintask 26 | def main(): 27 | ticker = Ticker(0.1 * SECOND) 28 | for i in range(3): 29 | rlist.append(ticker.c.recv()) 30 | 31 | ticker.stop() 32 | 33 | run() 34 | 35 | assert len(rlist) == 3 36 | 37 | def test_Tick(): 38 | rlist = [] 39 | 40 | @maintask 41 | def main(): 42 | ticker_chan = Tick(0.1 * SECOND) 43 | for i in range(3): 44 | rlist.append(ticker_chan.recv()) 45 | 46 | run() 47 | 48 | assert len(rlist) == 3 49 | 50 | 51 | def test_Timer(): 52 | rlist = [] 53 | 54 | @maintask 55 | def main(): 56 | PERIOD = 0.1 * SECOND 57 | now = nanotime() 58 | t = Timer(PERIOD) 59 | rlist.append(t.c.recv()) 60 | 61 | diff = nanotime() - rlist[0] 62 | assert PERIOD - DELTA0 <= diff <= PERIOD + DELTA 63 | 64 | run() 65 | 66 | def test_Timer_reset(): 67 | rlist = [] 68 | 69 | @maintask 70 | def main(): 71 | PERIOD = 10 * SECOND 72 | t = Timer(PERIOD) 73 | now = nanotime() 74 | t.reset(0.1 * SECOND) 75 | 76 | rlist.append(t.c.recv()) 77 | 78 | diff = nanotime() - rlist[0] 79 | assert PERIOD - DELTA0 <= diff <= PERIOD + DELTA 80 | 81 | run() 82 | 83 | 84 | def test_After(): 85 | rlist = [] 86 | 87 | @maintask 88 | def main(): 89 | PERIOD = 0.1 * SECOND 90 | now = nanotime() 91 | c = After(PERIOD) 92 | rlist.append(c.recv()) 93 | 94 | diff = nanotime() - rlist[0] 95 | assert PERIOD - DELTA0 <= diff <= PERIOD + DELTA 96 | 97 | run() 98 | 99 | def test_AfterFunc(): 100 | rlist = [] 101 | 102 | @maintask 103 | def main(): 104 | i = 10 105 | c = makechan() 106 | 107 | def f(): 108 | i -= 1 109 | if i >= 0: 110 | AfterFunc(0, f) 111 | sleep(1 * SECOND) 112 | else: 113 | c.send(True) 114 | 115 | AfterFunc(0, f) 116 | c.recv() 117 | 118 | assert i == 0 119 | 120 | run() 121 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | [tox] 6 | envlist = py27,pypy,py32,py33 7 | 8 | [testenv] 9 | deps = 10 | pytest-cov 11 | pretend 12 | commands = py.test --cov=offset/ --cov=tests/ 13 | --------------------------------------------------------------------------------