├── .gitignore ├── CHANGES ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.rst ├── curio ├── __init__.py ├── __main__.py ├── channel.py ├── debug.py ├── errors.py ├── file.py ├── io.py ├── kernel.py ├── meta.py ├── monitor.py ├── network.py ├── queue.py ├── sched.py ├── socket.py ├── ssl.py ├── sync.py ├── task.py ├── thread.py ├── time.py ├── timequeue.py ├── traps.py └── workers.py ├── docs ├── Makefile ├── _static │ ├── curiolayer.png │ ├── guiserv.png │ └── layers.png ├── conf.py ├── customization.py ├── howto.rst ├── index.rst ├── make.bat ├── reference.rst └── tutorial.rst ├── examples ├── bench │ ├── asyncecho.py │ ├── asyncproto.py │ ├── asyncsslecho.py │ ├── asyncstream.py │ ├── client.c │ ├── client.py │ ├── curioecho.py │ ├── curiosslecho.py │ ├── curiosslstream.py │ ├── curiostream.py │ ├── echoclient.py │ ├── gevecho.py │ ├── gevsslecho.py │ ├── nodeecho.js │ ├── process_perf.py │ ├── ssl_test.crt │ ├── ssl_test_rsa │ ├── sslclient.py │ ├── subproc_perf.py │ ├── thread_perf.py │ ├── threadecho.py │ ├── threadsslecho.py │ ├── torecho.py │ ├── trioecho.py │ ├── twistecho.py │ ├── twistsslecho.py │ └── uvclient.py ├── boundsema.py ├── chat.py ├── curio_subprocess.py ├── curio_zmq.py ├── dualserv.py ├── echoserv.py ├── echoserv2.py ├── echoserv3.py ├── fibserve.py ├── guiserv.py ├── guiserv2.py ├── happy.py ├── pinger.py ├── prodcons.py ├── promise.py ├── pytest_plugin.py ├── signal_handling.py ├── ssl_conn.py ├── ssl_echo.py ├── ssl_echo_client.py ├── ssl_http.py ├── ssl_test.crt ├── ssl_test_readme.txt ├── ssl_test_rsa ├── udp_echo.py ├── udp_echo_client.py ├── unix_echo.py ├── unix_echo_client.py ├── ws_server.py ├── zmq_puller.py ├── zmq_pusher.py ├── zmq_rpcclient.py └── zmq_rpcserv.py ├── setup.cfg ├── setup.py └── tests ├── child.py ├── conftest.py ├── ichild.py ├── test_activation.py ├── test_asyncgen.py ├── test_channel.py ├── test_file.py ├── test_io.py ├── test_kernel.py ├── test_meta.py ├── test_network.py ├── test_queue.py ├── test_socket.py ├── test_sync.py ├── test_task.py ├── test_thread.py ├── test_timequeue.py ├── test_workers.py └── testdata.txt /.gitignore: -------------------------------------------------------------------------------- 1 | docs/_build 2 | __pycache__/ 3 | .vscode 4 | venv* 5 | *.egg-info 6 | 7 | benchmarks/curio/ 8 | benchmarks/env/ 9 | benchmarks/results/ 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributing to Curio 2 | ===================== 3 | 4 | Although Curio is made available as open-source software, it is not 5 | developed as a community project. Thus, pull requests are not 6 | accepted except by invitation. However, if you have found a bug, a 7 | typo, or have an idea for improvement, please submit an issue instead. 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Curio 2 | 3 | Copyright (C) 2015-2020 4 | David Beazley (Dabeaz LLC, https://www.dabeaz.com) 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are 9 | met: 10 | 11 | * Redistributions of source code must retain the above copyright notice, 12 | this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | * Neither the name of the David Beazley or Dabeaz LLC may be used to 17 | endorse or promote products derived from this software without 18 | specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include examples * 2 | recursive-include docs * 3 | recursive-include tests * 4 | recursive-exclude __pycache__ *.pyc *.pyo 5 | include README.rst 6 | include LICENSE 7 | include CHANGES 8 | include CONTRIBUTING.md 9 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Curio 2 | ===== 3 | 4 | Curio is a coroutine-based library for concurrent Python systems 5 | programming using async/await. It provides standard programming 6 | abstractions such as tasks, sockets, files, locks, and queues as 7 | well as some advanced features such as support for structured 8 | concurrency. It works on Unix and Windows and has zero dependencies. 9 | You'll find it to be familiar, small, fast, and fun. 10 | 11 | Important Notice: October 25, 2022 12 | ---------------------------------- 13 | The Curio project is no longer making package releases. I'm more than 14 | happy to accept bug reports and may continue to work on it from time 15 | to time as the mood strikes. If you want the absolute latest version, you 16 | should vendor the source code from here. Curio has no dependencies 17 | other than the Python standard library. --Dave 18 | 19 | Curio is Different 20 | ------------------ 21 | One of the most important ideas from software architecture is the 22 | "separation of concerns." This can take many forms such as utilizing 23 | abstraction layers, object oriented programming, aspects, higher-order 24 | functions, and so forth. However, another effective form of it exists 25 | in the idea of separating execution environments. For example, "user 26 | mode" versus "kernel mode" in operating systems. This is the 27 | underlying idea in Curio, but applied to "asynchronous" versus 28 | "synchronous" execution. 29 | 30 | A fundamental problem with asynchronous code is that it involves a 31 | completely different evaluation model that doesn't compose well with 32 | ordinary applications or with other approaches to concurrency such as 33 | thread programing. Although the addition of "async/await" to Python 34 | helps clarify such code, "async" libraries still tend to be a confused 35 | mess of functionality that mix asynchronous and synchronous 36 | functionality together in the same environment--often bolting it all 37 | together with an assortment of hacks that try to sort out all of 38 | associated API confusion. 39 | 40 | Curio strictly separates asynchronous code from synchronous code. 41 | Specifically, *all* functionality related to the asynchronous 42 | environment utilizes "async/await" features and syntax--without 43 | exception. Moreover, interactions between async and sync code is 44 | carefully managed through a small set of simple mechanisms such as 45 | events and queues. As a result, Curio is small, fast, and 46 | significantly easier to reason about. 47 | 48 | A Simple Example 49 | ----------------- 50 | 51 | Here is a concurrent TCP echo server directly implemented using sockets: 52 | 53 | .. code:: python 54 | 55 | # echoserv.py 56 | 57 | from curio import run, spawn 58 | from curio.socket import * 59 | 60 | async def echo_server(address): 61 | sock = socket(AF_INET, SOCK_STREAM) 62 | sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) 63 | sock.bind(address) 64 | sock.listen(5) 65 | print('Server listening at', address) 66 | async with sock: 67 | while True: 68 | client, addr = await sock.accept() 69 | await spawn(echo_client, client, addr, daemon=True) 70 | 71 | async def echo_client(client, addr): 72 | print('Connection from', addr) 73 | async with client: 74 | while True: 75 | data = await client.recv(100000) 76 | if not data: 77 | break 78 | await client.sendall(data) 79 | print('Connection closed') 80 | 81 | if __name__ == '__main__': 82 | run(echo_server, ('',25000)) 83 | 84 | If you've done network programming with threads, it looks almost 85 | identical. Moreover, it can handle thousands of clients even though no 86 | threads are being used inside. 87 | 88 | Core Features 89 | ------------- 90 | 91 | Curio supports standard synchronization primitives (events, locks, 92 | recursive locks, semaphores, and condition variables), queues, 93 | subprocesses, as well as running tasks in threads and processes. The 94 | task model fully supports cancellation, task groups, timeouts, 95 | monitoring, and other features critical to writing reliable code. 96 | 97 | Read the `official documentation `_ for 98 | more in-depth coverage. The `tutorial 99 | `_ is a good 100 | starting point. The `howto 101 | `_ describes how to 102 | carry out common programming tasks. 103 | 104 | Talks Related to Curio 105 | ---------------------- 106 | 107 | Concepts related to Curio's design and general issues related to async 108 | programming have been described by Curio's creator, `David Beazley `_, in 109 | various conference talks and tutorials: 110 | 111 | * `Build Your Own Async `_, Workshop talk by David Beazley at PyCon India, 2019. 112 | 113 | * `The Other Async (Threads + Asyncio = Love) `_, Keynote talk by David Beazley at PyGotham, 2017. 114 | 115 | * `Fear and Awaiting in Async `_, Keynote talk by David Beazley at PyOhio 2016. 116 | 117 | * `Topics of Interest (Async) `_, Keynote talk by David Beazley at Python Brasil 2015. 118 | 119 | * `Python Concurrency from the Ground Up (LIVE) `_, talk by David Beazley at PyCon 2015. 120 | 121 | Questions and Answers 122 | --------------------- 123 | 124 | **Q: What is the point of the Curio project?** 125 | 126 | A: Curio is async programming, reimagined as something smaller, faster, and easier 127 | to reason about. It is meant to be both educational and practical. 128 | 129 | **Q: Is Curio implemented using asyncio?** 130 | 131 | A: No. Curio is a standalone library directly created from low-level I/O primitives. 132 | 133 | **Q: Is Curio meant to be a clone of asyncio?** 134 | 135 | A: No. Although Curio provides a significant amount of overlapping 136 | functionality, the API is different. Compatibility with other 137 | libaries is not a goal. 138 | 139 | **Q: Is Curio meant to be compatible with other async libraries?** 140 | 141 | A: No. Curio is a stand-alone project that emphasizes a certain 142 | software architecture based on separation of environments. Other 143 | libraries have largely ignored this concept, preferring to simply 144 | provide variations on the existing approach found in asyncio. 145 | 146 | **Q: Can Curio interoperate with other event loops?** 147 | 148 | A: It depends on what you mean by the word "interoperate." Curio's 149 | preferred mechanism of communication with the external world is a 150 | queue. It is possible to communicate between Curio, threads, and 151 | other event loops using queues. 152 | 153 | **Q: How fast is Curio?** 154 | 155 | A: Curio's primary goal is to be an async library that is minimal and 156 | understandable. Performance is not the primary concern. That said, in 157 | rough benchmarking of a simple echo server, Curio is more than twice 158 | as fast as comparable code using coroutines in ``asyncio`` or 159 | ``trio``. This was last measured on OS-X using Python 3.9. Keep in 160 | mind there is a lot more to overall application performance than the 161 | performance of a simple echo server so your mileage might 162 | vary. However, as a runtime environment, Curio doesn't introduce a lot of 163 | extra overhead. See the ``examples/benchmark`` directory for various 164 | testing programs. 165 | 166 | **Q: What is the future of Curio?** 167 | 168 | A: Curio should be viewed as a library of basic programming 169 | primitives. At this time, it is considered to be 170 | feature-complete--meaning that it is not expected to sprout many new 171 | capabilities. It may be updated from time to time to fix bugs or 172 | support new versions of Python. 173 | 174 | **Q: Can I contribute?** 175 | 176 | A: Curio is not a community-based project seeking developers 177 | or maintainers. However, having it work reliably is important. If you've 178 | found a bug or have an idea for making it better, please 179 | file an `issue `_. 180 | 181 | Contributors 182 | ------------ 183 | 184 | The following people contributed ideas to early stages of the Curio project: 185 | Brett Cannon, Nathaniel Smith, Alexander Zhukov, Laura Dickinson, and Sandeep Gupta. 186 | 187 | Who 188 | --- 189 | Curio is the creation of David Beazley (@dabeaz) who is also 190 | responsible for its maintenance. http://www.dabeaz.com 191 | 192 | P.S. 193 | ---- 194 | If you want to learn more about concurrent programming more generally, you should 195 | come take a `course `_! 196 | 197 | .. |--| unicode:: U+2013 .. en dash 198 | .. |---| unicode:: U+2014 .. em dash, trimming surrounding whitespace 199 | :trim: 200 | 201 | 202 | 203 | -------------------------------------------------------------------------------- /curio/__init__.py: -------------------------------------------------------------------------------- 1 | # curio/__init__.py 2 | 3 | __version__ = '1.6' 4 | 5 | from .errors import * 6 | from .queue import * 7 | from .task import * 8 | from .time import * 9 | from .kernel import * 10 | from .sync import * 11 | from .workers import * 12 | from .network import * 13 | from .file import * 14 | from .channel import * 15 | from .thread import * 16 | 17 | __all__ = [*errors.__all__, 18 | *queue.__all__, 19 | *task.__all__, 20 | *time.__all__, 21 | *kernel.__all__, 22 | *sync.__all__, 23 | *workers.__all__, 24 | *network.__all__, 25 | *file.__all__, 26 | *channel.__all__, 27 | *thread.__all__, 28 | ] 29 | -------------------------------------------------------------------------------- /curio/__main__.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import curio 3 | import curio.monitor 4 | import code 5 | import inspect 6 | import sys 7 | import types 8 | import warnings 9 | import threading 10 | import signal 11 | import os 12 | 13 | assert (sys.version_info.major >= 3 and sys.version_info.minor >= 8), "console requires Python 3.8+" 14 | 15 | class CurioIOInteractiveConsole(code.InteractiveConsole): 16 | 17 | def __init__(self, locals): 18 | super().__init__(locals) 19 | self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT 20 | self.requests = curio.UniversalQueue() 21 | self.response = curio.UniversalQueue() 22 | 23 | def runcode(self, code): 24 | # This coroutine is handed from the thread running the REPL to the 25 | # task runner in the main thread. 26 | async def run_it(): 27 | func = types.FunctionType(code, self.locals) 28 | try: 29 | # We restore the default REPL signal handler for running normal code 30 | hand = signal.signal(signal.SIGINT, signal.default_int_handler) 31 | try: 32 | coro = func() 33 | finally: 34 | signal.signal(signal.SIGINT, hand) 35 | except BaseException as ex: 36 | await self.response.put((None, ex)) 37 | return 38 | if not inspect.iscoroutine(coro): 39 | await self.response.put((coro, None)) 40 | return 41 | 42 | # For a coroutine... We're going to try and do some magic to intercept 43 | # Control-C in an Event/Task. 44 | async def watch_ctrl_c(evt, repl_task): 45 | await evt.wait() 46 | await repl_task.cancel() 47 | evt = curio.UniversalEvent() 48 | try: 49 | hand = signal.signal(signal.SIGINT, lambda signo, frame: evt.set()) 50 | repl_task = await curio.spawn(coro) 51 | watch_task = await curio.spawn(watch_ctrl_c, evt, repl_task) 52 | try: 53 | result = await repl_task.join() 54 | response = (result, None) 55 | except SystemExit: 56 | raise 57 | except BaseException as e: 58 | await repl_task.wait() 59 | response = (None, e.__cause__) 60 | await watch_task.cancel() 61 | finally: 62 | signal.signal(signal.SIGINT, hand) 63 | await self.response.put(response) 64 | 65 | self.requests.put(run_it()) 66 | # Get the result here... 67 | result, exc = self.response.get() 68 | if exc is not None: 69 | try: 70 | raise exc 71 | except BaseException: 72 | self.showtraceback() 73 | else: 74 | return result 75 | 76 | # Task that runs in the main thread, executing input fed to it from above 77 | async def runmain(self): 78 | try: 79 | hand = signal.signal(signal.SIGINT, signal.SIG_IGN) 80 | while True: 81 | coro = await self.requests.get() 82 | if coro is None: 83 | break 84 | await coro 85 | finally: 86 | signal.signal(signal.SIGINT, hand) 87 | 88 | def run_repl(console): 89 | try: 90 | banner = ( 91 | f'curio REPL {sys.version} on {sys.platform}\n' 92 | f'Use "await" directly instead of "curio.run()".\n' 93 | f'Type "help", "copyright", "credits" or "license" ' 94 | f'for more information.\n' 95 | f'{getattr(sys, "ps1", ">>> ")}import curio' 96 | ) 97 | console.interact( 98 | banner=banner, 99 | exitmsg='exiting curio REPL...') 100 | finally: 101 | warnings.filterwarnings( 102 | 'ignore', 103 | message=r'^coroutine .* was never awaited$', 104 | category=RuntimeWarning) 105 | console.requests.put(None) 106 | 107 | if __name__ == '__main__': 108 | repl_locals = { 'curio': curio, 109 | 'ps': curio.monitor.ps, 110 | 'where': curio.monitor.where, 111 | } 112 | for key in {'__name__', '__package__', 113 | '__loader__', '__spec__', 114 | '__builtins__', '__file__'}: 115 | repl_locals[key] = locals()[key] 116 | 117 | console = CurioIOInteractiveConsole(repl_locals) 118 | threading.Thread(target=run_repl, args=[console], daemon=True).start() 119 | curio.run(console.runmain) 120 | -------------------------------------------------------------------------------- /curio/debug.py: -------------------------------------------------------------------------------- 1 | # curio/debug.py 2 | # 3 | # Task debugging tools 4 | 5 | __all__ = [ 'longblock', 'schedtrace', 'traptrace', 'logcrash' ] 6 | 7 | import time 8 | import logging 9 | log = logging.getLogger(__name__) 10 | 11 | # -- Curio 12 | 13 | from .kernel import Activation 14 | from .errors import TaskCancelled 15 | 16 | class DebugBase(Activation): 17 | def __init__(self, *, level=logging.INFO, log=log, filter=None, **kwargs): 18 | self.level = level 19 | self.filter = filter 20 | self.log = log 21 | 22 | def check_filter(self, task): 23 | if self.filter and task.name not in self.filter: 24 | return False 25 | return True 26 | 27 | class longblock(DebugBase): 28 | ''' 29 | Report warnings for tasks that block the event loop for a long duration. 30 | ''' 31 | def __init__(self, *, max_time=0.05, level=logging.WARNING, **kwargs): 32 | super().__init__(level=level, **kwargs) 33 | self.max_time = max_time 34 | 35 | def running(self, task): 36 | if self.check_filter(task): 37 | self.start = time.monotonic() 38 | 39 | def suspended(self, task, trap): 40 | if self.check_filter(task): 41 | duration = time.monotonic() - self.start 42 | if duration > self.max_time: 43 | self.log.log(self.level, '%r ran for %s seconds', task, duration) 44 | 45 | class logcrash(DebugBase): 46 | ''' 47 | Report tasks that crash with an uncaught exception 48 | ''' 49 | def __init__(self, level=logging.ERROR, **kwargs): 50 | super().__init__(level=level, **kwargs) 51 | 52 | def suspended(self, task, trap): 53 | if task.terminated and self.check_filter(task): 54 | if task.exception and not isinstance(task.exception, (StopIteration, TaskCancelled, KeyboardInterrupt, SystemExit)): 55 | self.log.log(self.level, '%r crashed', task, exc_info=task.exception) 56 | 57 | class schedtrace(DebugBase): 58 | ''' 59 | Report when tasks run 60 | ''' 61 | def __init__(self, **kwargs): 62 | super().__init__(**kwargs) 63 | 64 | def created(self, task): 65 | if self.check_filter(task): 66 | self.log.log(self.level, 'CREATE:%f:%r', time.time(), task) 67 | 68 | def running(self, task): 69 | if self.check_filter(task): 70 | self.log.log(self.level, 'RUN:%f:%r', time.time(), task) 71 | 72 | def suspended(self, task, trap): 73 | if self.check_filter(task): 74 | self.log.log(self.level, 'SUSPEND:%f:%r', time.time(), task) 75 | 76 | def terminated(self, task): 77 | if self.check_filter(task): 78 | self.log.log(self.level, 'TERMINATED:%f:%r', time.time(), task) 79 | 80 | class traptrace(schedtrace): 81 | ''' 82 | Report traps executed 83 | ''' 84 | def suspended(self, task, trap): 85 | if self.check_filter(task): 86 | if trap: 87 | self.log.log(self.level, 'TRAP:%r', trap) 88 | super().suspended(task, trap) 89 | 90 | def _create_debuggers(debug): 91 | ''' 92 | Create debugger objects. Called by the kernel to instantiate the objects. 93 | ''' 94 | if debug is True: 95 | # Set a default set of debuggers 96 | debug = [ schedtrace ] 97 | 98 | elif not isinstance(debug, (list, tuple)): 99 | debug = [ debug ] 100 | 101 | # Create instances 102 | debug = [ (d() if (isinstance(d, type) and issubclass(d, DebugBase)) else d) 103 | for d in debug ] 104 | return debug 105 | 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /curio/errors.py: -------------------------------------------------------------------------------- 1 | # curio/errors.py 2 | # 3 | # Curio specific exceptions 4 | 5 | __all__ = [ 6 | 'CurioError', 'CancelledError', 'TaskTimeout', 'TaskError', 7 | 'SyncIOError', 'ResourceBusy', 8 | 'ReadResourceBusy', 'WriteResourceBusy', 9 | 'TimeoutCancellationError', 'UncaughtTimeoutError', 10 | 'TaskCancelled', 'AsyncOnlyError', 11 | ] 12 | 13 | 14 | class CurioError(Exception): 15 | ''' 16 | Base class for all non-cancellation Curio-related exceptions 17 | ''' 18 | 19 | 20 | class CancelledError(BaseException): 21 | ''' 22 | Base class for all task-cancellation related exceptions 23 | ''' 24 | 25 | 26 | class TaskCancelled(CancelledError): 27 | ''' 28 | Exception raised as a result of a task being directly cancelled. 29 | ''' 30 | 31 | 32 | class TimeoutCancellationError(CancelledError): 33 | ''' 34 | Exception raised if task is being cancelled due to a timeout, but 35 | it's not the inner-most timeout in effect. 36 | ''' 37 | 38 | 39 | class TaskTimeout(CancelledError): 40 | ''' 41 | Exception raised if task is cancelled due to timeout. 42 | ''' 43 | 44 | 45 | class UncaughtTimeoutError(CurioError): 46 | ''' 47 | Raised if a TaskTimeout exception escapes a timeout handling 48 | block and is unexpectedly caught by an outer timeout handler. 49 | ''' 50 | 51 | 52 | class TaskError(CurioError): 53 | ''' 54 | Raised if a task launched via spawn() or similar function 55 | terminated due to an exception. This is a chained exception. 56 | The __cause__ attribute contains the actual exception that 57 | occurred in the task. 58 | ''' 59 | 60 | 61 | class SyncIOError(CurioError): 62 | ''' 63 | Raised if a task attempts to perform a synchronous I/O operation 64 | on an object that only supports asynchronous I/O. 65 | ''' 66 | 67 | 68 | class AsyncOnlyError(CurioError): 69 | ''' 70 | Raised by the AWAIT() function if its applied to code not 71 | properly running in an async-thread. 72 | ''' 73 | 74 | 75 | class ResourceBusy(CurioError): 76 | ''' 77 | Raised by I/O related functions if an operation is requested, 78 | but the resource is already busy performing the same operation 79 | on behalf of another task. 80 | ''' 81 | 82 | 83 | class ReadResourceBusy(ResourceBusy): 84 | pass 85 | 86 | 87 | class WriteResourceBusy(ResourceBusy): 88 | pass 89 | 90 | 91 | -------------------------------------------------------------------------------- /curio/file.py: -------------------------------------------------------------------------------- 1 | # curio/file.py 2 | # 3 | # Let's talk about files for a moment. Suppose you're in a coroutine 4 | # and you start using things like the built-in open() function: 5 | # 6 | # async def coro(): 7 | # f = open(somefile, 'r') 8 | # data = f.read() 9 | # ... 10 | # 11 | # Yes, it will "work", but who knows what's actually going to happen 12 | # on that open() call and associated read(). If it's on disk, the 13 | # whole program might lock up for a few milliseconds (aka. "an 14 | # eternity") doing a disk seek. While that happens, your whole 15 | # coroutine based server is going to grind to a screeching halt. This 16 | # is bad--especially if a lot of coroutines start doing it all at 17 | # once. 18 | # 19 | # Knowing how to handle this is a tricky question. Traditional files 20 | # don't really support "async" in the usual way a socket might. You 21 | # might be able to do something sneaky with asynchronous POSIX APIs 22 | # (i.e., aio_* functions) or maybe thread pools. However, one thing 23 | # is for certain--if files are going to be handled in a sane way, they're 24 | # going to have an async interface. 25 | # 26 | # This file does just that by providing an async-compatible aopen() 27 | # call. You use it the same way you use open() and a normal file: 28 | # 29 | # async def coro(): 30 | # async with aopen(somefile, 'r') as f: 31 | # data = await f.read() 32 | # ... 33 | # 34 | # If you want to use iteration, make sure you use the asynchronous version: 35 | # 36 | # async def coro(): 37 | # async with aopen(somefile, 'r') as f: 38 | # async for line in f: 39 | # ... 40 | # 41 | 42 | __all__ = ['aopen', 'anext'] 43 | 44 | # -- Standard library 45 | 46 | from contextlib import contextmanager 47 | from functools import partial 48 | 49 | # -- Curio 50 | 51 | from .workers import run_in_thread 52 | from .errors import SyncIOError, CancelledError 53 | from . import thread 54 | 55 | class AsyncFile(object): 56 | ''' 57 | An async wrapper around a standard file object. Uses threads to 58 | execute various I/O operations in a way that avoids blocking 59 | the Curio kernel loop. 60 | ''' 61 | 62 | def __init__(self, fileobj, open_args=None, open_kwargs=None): 63 | self._fileobj = fileobj 64 | self._open_args = open_args 65 | self._open_kwargs = open_kwargs 66 | 67 | def __repr__(self): 68 | return 'AsyncFile(%r)' % self._fileobj 69 | 70 | @contextmanager 71 | def blocking(self): 72 | ''' 73 | Expose the underlying file in blocking mode for use with synchronous code. 74 | ''' 75 | yield self._file 76 | 77 | @property 78 | def _file(self): 79 | if self._fileobj is None: 80 | raise RuntimeError('Must use an async file as an async-context-manager.') 81 | return self._fileobj 82 | 83 | async def read(self, *args, **kwargs): 84 | return await run_in_thread(partial(self._file.read, *args, **kwargs)) 85 | 86 | async def read1(self, *args, **kwargs): 87 | return await run_in_thread(partial(self._file.read1, *args, **kwargs)) 88 | 89 | async def readinto(self, *args, **kwargs): 90 | return await run_in_thread(partial(self._file.readinto, *args, **kwargs)) 91 | 92 | async def readinto1(self, *args, **kwargs): 93 | return await run_in_thread(partial(self._file.readinto1, *args, **kwargs)) 94 | 95 | async def readline(self, *args, **kwargs): 96 | return await run_in_thread(partial(self._file.readline, *args, **kwargs)) 97 | 98 | async def readlines(self, *args, **kwargs): 99 | return await run_in_thread(partial(self._file.readlines, *args, **kwargs)) 100 | 101 | async def write(self, *args, **kwargs): 102 | return await run_in_thread(partial(self._file.write, *args, **kwargs)) 103 | 104 | async def writelines(self, *args, **kwargs): 105 | return await run_in_thread(partial(self._file.writelines, *args, **kwargs)) 106 | 107 | async def flush(self): 108 | return await run_in_thread(self._file.flush) 109 | 110 | async def close(self): 111 | return await run_in_thread(self._file.close) 112 | 113 | async def seek(self, *args, **kwargs): 114 | return await run_in_thread(partial(self._file.seek, *args, **kwargs)) 115 | 116 | async def tell(self, *args, **kwargs): 117 | return await run_in_thread(partial(self._file.tell, *args, **kwargs)) 118 | 119 | async def truncate(self, *args, **kwargs): 120 | return await run_in_thread(partial(self._file.truncate, *args, **kwargs)) 121 | 122 | def __iter__(self): 123 | raise SyncIOError('Use asynchronous iteration') 124 | 125 | def __next__(self): 126 | raise SyncIOError('Use asynchronous iteration') 127 | 128 | def __enter__(self): 129 | return thread.AWAIT(self.__aenter__()) 130 | 131 | def __exit__(self, *args): 132 | return thread.AWAIT(self.__aexit__(*args)) 133 | 134 | def __aiter__(self): 135 | return self 136 | 137 | async def __aenter__(self): 138 | if self._fileobj is None: 139 | self._fileobj = await run_in_thread(partial(open, *self._open_args, **self._open_kwargs)) 140 | return self 141 | 142 | async def __aexit__(self, *args): 143 | await self.close() 144 | 145 | async def __anext__(self): 146 | data = await run_in_thread(next, self._file, None) 147 | if data is None: 148 | raise StopAsyncIteration 149 | return data 150 | 151 | def __getattr__(self, name): 152 | return getattr(self._file, name) 153 | 154 | # Compatibility with io.FileStream 155 | async def readall(self): 156 | chunks = [] 157 | maxread = 65536 158 | sep = '' if hasattr(self._file, 'encoding') else b'' 159 | while True: 160 | try: 161 | chunk = await self.read(maxread) 162 | except CancelledError as e: 163 | e.bytes_read = sep.join(chunks) 164 | raise 165 | if not chunk: 166 | return sep.join(chunks) 167 | chunks.append(chunk) 168 | if len(chunk) == maxread: 169 | maxread *= 2 170 | 171 | def aopen(*args, **kwargs): 172 | ''' 173 | Async version of the builtin open() function that returns an async-compatible 174 | file object. Takes the same arguments. Returns a wrapped file in which 175 | blocking I/O operations must be awaited. 176 | ''' 177 | return AsyncFile(None, args, kwargs) 178 | 179 | async def anext(f, sentinel=object): 180 | ''' 181 | Async version of the builtin next() function that advances an async iterator. 182 | Sometimes used to skip a single line in files. 183 | ''' 184 | try: 185 | return await f.__anext__() 186 | except StopAsyncIteration: 187 | if sentinel is not object: 188 | return sentinel 189 | else: 190 | raise 191 | -------------------------------------------------------------------------------- /curio/meta.py: -------------------------------------------------------------------------------- 1 | # curio/meta.py 2 | # ___ 3 | # \./ DANGER: This module implements some experimental 4 | # .--.O.--. metaprogramming techniques involving async/await. 5 | # \/ \/ If you use it, you might die. No seriously. 6 | # 7 | 8 | __all__ = [ 9 | 'iscoroutinefunction', 'finalize', 'awaitable', 'asyncioable', 10 | 'curio_running', 'instantiate_coroutine', 'from_coroutine', 11 | ] 12 | 13 | # -- Standard Library 14 | 15 | from sys import _getframe 16 | import sys 17 | import inspect 18 | from functools import wraps, partial 19 | import dis 20 | import asyncio 21 | import threading 22 | from contextlib import contextmanager 23 | import collections.abc 24 | 25 | # -- Curio 26 | 27 | from .errors import SyncIOError 28 | 29 | 30 | _locals = threading.local() 31 | 32 | # Context manager that is used when the kernel is executing. 33 | 34 | @contextmanager 35 | def running(kernel): 36 | if getattr(_locals, 'running', False): 37 | raise RuntimeError('Only one Curio kernel per thread is allowed') 38 | _locals.running = True 39 | _locals.kernel = kernel 40 | try: 41 | with asyncgen_manager(): 42 | yield 43 | finally: 44 | _locals.running = False 45 | _locals.kernel = None 46 | 47 | def curio_running(): 48 | ''' 49 | Return a flag that indicates whether or not Curio is running in the current thread. 50 | ''' 51 | return getattr(_locals, 'running', False) 52 | 53 | _CO_NESTED = inspect.CO_NESTED 54 | _CO_FROM_COROUTINE = inspect.CO_COROUTINE | inspect.CO_ITERABLE_COROUTINE | inspect.CO_ASYNC_GENERATOR 55 | _isasyncgenfunction = inspect.isasyncgenfunction 56 | 57 | def from_coroutine(level=2, _cache={}): 58 | f_code = _getframe(level).f_code 59 | if f_code in _cache: 60 | return _cache[f_code] 61 | if f_code.co_flags & _CO_FROM_COROUTINE: 62 | _cache[f_code] = True 63 | return True 64 | else: 65 | # Comment: It's possible that we could end up here if one calls a function 66 | # from the context of a list comprehension or a generator expression. For 67 | # example: 68 | # 69 | # async def coro(): 70 | # ... 71 | # a = [ func() for x in s ] 72 | # ... 73 | # 74 | # Where func() is some function that we've wrapped with one of the decorators 75 | # below. If so, the code object is nested and has a name such as or 76 | if (f_code.co_flags & _CO_NESTED and f_code.co_name[0] == '<'): 77 | return from_coroutine(level + 2) 78 | else: 79 | _cache[f_code] = False 80 | return False 81 | 82 | def iscoroutinefunction(func): 83 | ''' 84 | Modified test for a coroutine function with awareness of functools.partial 85 | ''' 86 | if isinstance(func, partial): 87 | return iscoroutinefunction(func.func) 88 | if hasattr(func, '__func__'): 89 | return iscoroutinefunction(func.__func__) 90 | return inspect.iscoroutinefunction(func) or hasattr(func, '_awaitable') or _isasyncgenfunction(func) 91 | 92 | def instantiate_coroutine(corofunc, *args, **kwargs): 93 | ''' 94 | Try to instantiate a coroutine. If corofunc is already a coroutine, 95 | we're done. If it's a coroutine function, we call it inside an 96 | async context with the given arguments to create a coroutine. If 97 | it's not a coroutine, we call corofunc(*args, **kwargs) and hope 98 | for the best. 99 | ''' 100 | if isinstance(corofunc, collections.abc.Coroutine) or inspect.isgenerator(corofunc): 101 | assert not args and not kwargs, "arguments can't be passed to an already instantiated coroutine" 102 | return corofunc 103 | 104 | if not iscoroutinefunction(corofunc) and not getattr(corofunc, '_async_thread', False): 105 | coro = corofunc(*args, **kwargs) 106 | if not isinstance(coro, collections.abc.Coroutine): 107 | raise TypeError(f'Could not create coroutine from {corofunc}') 108 | return coro 109 | 110 | async def context(): 111 | return corofunc(*args, **kwargs) 112 | 113 | try: 114 | context().send(None) 115 | except StopIteration as e: 116 | return e.value 117 | 118 | def awaitable(syncfunc): 119 | ''' 120 | Decorator that allows an asynchronous function to be paired with a 121 | synchronous function in a single function call. The selection of 122 | which function executes depends on the calling context. For example: 123 | 124 | def spam(sock, maxbytes): (A) 125 | return sock.recv(maxbytes) 126 | 127 | @awaitable(spam) (B) 128 | async def spam(sock, maxbytes): 129 | return await sock.recv(maxbytes) 130 | 131 | In later code, you could use the spam() function in either a synchronous 132 | or asynchronous context. For example: 133 | 134 | def foo(): 135 | ... 136 | r = spam(s, 1024) # Calls synchronous function (A) above 137 | ... 138 | 139 | async def bar(): 140 | ... 141 | r = await spam(s, 1024) # Calls async function (B) above 142 | ... 143 | 144 | ''' 145 | def decorate(asyncfunc): 146 | if inspect.signature(syncfunc) != inspect.signature(asyncfunc): 147 | raise TypeError(f'{syncfunc.__name__} and async {asyncfunc.__name__} have different signatures') 148 | 149 | @wraps(asyncfunc) 150 | def wrapper(*args, **kwargs): 151 | if from_coroutine(): 152 | return asyncfunc(*args, **kwargs) 153 | else: 154 | return syncfunc(*args, **kwargs) 155 | wrapper._syncfunc = syncfunc 156 | wrapper._asyncfunc = asyncfunc 157 | wrapper._awaitable = True 158 | wrapper.__doc__ = syncfunc.__doc__ or asyncfunc.__doc__ 159 | return wrapper 160 | return decorate 161 | 162 | def asyncioable(awaitablefunc): 163 | ''' 164 | Decorator that additionally allows an asyncio compatible call to 165 | be attached to an already awaitable function. For example: 166 | 167 | def spam(): 168 | print('Synchronous spam') 169 | 170 | @awaitable(spam) 171 | def spam(): 172 | print('Async spam (Curio)') 173 | 174 | @asynioable(spam) 175 | def spam(): 176 | print('Async spam (asyncio)') 177 | 178 | This only works if Curio/Asyncio are running in different threads. 179 | Main use is in the implementation of UniversalQueue. 180 | ''' 181 | def decorate(asyncfunc): 182 | @wraps(asyncfunc) 183 | def wrapper(*args, **kwargs): 184 | if from_coroutine(): 185 | # Check if we're Curio or not 186 | if curio_running(): 187 | return awaitablefunc._asyncfunc(*args, **kwargs) 188 | else: 189 | return asyncfunc(*args, **kwargs) 190 | else: 191 | return awaitablefunc._syncfunc(*args, **kwargs) 192 | wrapper._awaitable = True 193 | return wrapper 194 | return decorate 195 | 196 | class finalize(object): 197 | ''' 198 | Context manager that safely finalizes an asynchronous generator. 199 | This might be needed if an asynchronous generator uses async functions 200 | in try-finally and other constructs. 201 | ''' 202 | def __init__(self, aobj): 203 | self.aobj = aobj 204 | 205 | async def __aenter__(self): 206 | return self.aobj 207 | 208 | async def __aexit__(self, ty, val, tb): 209 | if hasattr(self.aobj, 'aclose'): 210 | await self.aobj.aclose() 211 | 212 | 213 | # This context manager is used to manage the execution of async generators 214 | # in Python 3.6. In certain circumstances, they can't be used safely 215 | # unless finalized properly. This context manager installs some a hook 216 | # for detecting lack of finalization. 217 | 218 | @contextmanager 219 | def asyncgen_manager(): 220 | if hasattr(sys, 'get_asyncgen_hooks'): 221 | old_asyncgen_hooks = sys.get_asyncgen_hooks() 222 | def _fini_async_gen(agen): 223 | if agen.ag_frame is not None: 224 | raise RuntimeError("Async generator with async finalization must be wrapped by\n" 225 | "async with curio.meta.finalize(agen) as agen:\n" 226 | " async for n in agen:\n" 227 | " ...\n" 228 | "See PEP 533 for further discussion.") 229 | 230 | sys.set_asyncgen_hooks(None, _fini_async_gen) 231 | try: 232 | yield 233 | finally: 234 | if hasattr(sys, 'get_asyncgen_hooks'): 235 | sys.set_asyncgen_hooks(*old_asyncgen_hooks) 236 | 237 | -------------------------------------------------------------------------------- /curio/network.py: -------------------------------------------------------------------------------- 1 | # curio/network.py 2 | # 3 | # Some high-level functions useful for writing network code. These are loosely 4 | # based on their similar counterparts in the asyncio library. Some of the 5 | # fiddly low-level bits are borrowed. 6 | 7 | __all__ = [ 'open_connection', 'tcp_server', 'tcp_server_socket', 8 | 'open_unix_connection', 'unix_server', 'unix_server_socket' ] 9 | 10 | # -- Standard library 11 | 12 | import logging 13 | log = logging.getLogger(__name__) 14 | 15 | # -- Curio 16 | 17 | from . import socket 18 | from . import ssl as curiossl 19 | from .task import TaskGroup 20 | from .io import Socket 21 | 22 | 23 | async def _wrap_ssl_client(sock, ssl, server_hostname, alpn_protocols): 24 | # Applies SSL to a client connection. Returns an SSL socket. 25 | if ssl: 26 | if isinstance(ssl, bool): 27 | sslcontext = curiossl.create_default_context() 28 | if not server_hostname: 29 | sslcontext._context.check_hostname = False 30 | sslcontext._context.verify_mode = curiossl.CERT_NONE 31 | 32 | if alpn_protocols: 33 | sslcontext.set_alpn_protocols(alpn_protocols) 34 | else: 35 | # Assume that ssl is an already created context 36 | sslcontext = ssl 37 | 38 | if server_hostname: 39 | extra_args = {'server_hostname': server_hostname} 40 | else: 41 | extra_args = {} 42 | 43 | # if the context is Curio's own, it expects a Curio socket and 44 | # returns one. If context is from an external source, including 45 | # the stdlib's ssl.SSLContext, it expects a non-Curio socket and 46 | # returns a non-Curio socket, which then needs wrapping in a Curio 47 | # socket. 48 | # 49 | # Perhaps the CurioSSLContext is no longer needed. In which case, 50 | # this code can be simplified to just the else case below. 51 | # 52 | if isinstance(sslcontext, curiossl.CurioSSLContext): 53 | sock = await sslcontext.wrap_socket(sock, do_handshake_on_connect=False, **extra_args) 54 | else: 55 | # do_handshake_on_connect should not be specified for 56 | # non-blocking sockets 57 | extra_args['do_handshake_on_connect'] = sock._socket.gettimeout() != 0.0 58 | sock = Socket(sslcontext.wrap_socket(sock._socket, **extra_args)) 59 | await sock.do_handshake() 60 | return sock 61 | 62 | async def open_connection(host, port, *, ssl=None, source_addr=None, server_hostname=None, 63 | alpn_protocols=None): 64 | ''' 65 | Create a TCP connection to a given Internet host and port with optional SSL applied to it. 66 | ''' 67 | if server_hostname and not ssl: 68 | raise ValueError('server_hostname is only applicable with SSL') 69 | 70 | sock = await socket.create_connection((host, port), source_address=source_addr) 71 | 72 | try: 73 | # Apply SSL wrapping to the connection, if applicable 74 | if ssl: 75 | sock = await _wrap_ssl_client(sock, ssl, server_hostname, alpn_protocols) 76 | 77 | return sock 78 | except Exception: 79 | sock._socket.close() 80 | raise 81 | 82 | async def open_unix_connection(path, *, ssl=None, server_hostname=None, 83 | alpn_protocols=None): 84 | if server_hostname and not ssl: 85 | raise ValueError('server_hostname is only applicable with SSL') 86 | 87 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 88 | try: 89 | await sock.connect(path) 90 | 91 | # Apply SSL wrapping to connection, if applicable 92 | if ssl: 93 | sock = await _wrap_ssl_client(sock, ssl, server_hostname, alpn_protocols) 94 | 95 | return sock 96 | except Exception: 97 | sock._socket.close() 98 | raise 99 | 100 | async def run_server(sock, client_connected_task, ssl=None): 101 | if ssl and not hasattr(ssl, 'wrap_socket'): 102 | raise ValueError('ssl argument must have a wrap_socket method') 103 | 104 | async def run_client(client, addr): 105 | async with client: 106 | await client_connected_task(client, addr) 107 | 108 | async def run_server(sock, group): 109 | while True: 110 | client, addr = await sock.accept() 111 | if ssl: 112 | if isinstance(ssl, curiossl.CurioSSLContext): 113 | client = await ssl.wrap_socket(client, server_side=True, do_handshake_on_connect=False) 114 | else: 115 | client = ssl.wrap_socket(client, server_side=True, do_handshake_on_connect=False) 116 | if not isinstance(client, Socket): 117 | client = Socket(client) 118 | await group.spawn(run_client, client, addr) 119 | del client 120 | 121 | async with sock: 122 | async with TaskGroup() as tg: 123 | await tg.spawn(run_server, sock, tg) 124 | # Reap all of the children tasks as they complete 125 | async for task in tg: 126 | task.joined = True 127 | del task 128 | 129 | def tcp_server_socket(host, port, family=socket.AF_INET, backlog=100, 130 | reuse_address=True, reuse_port=False): 131 | 132 | sock = socket.socket(family, socket.SOCK_STREAM) 133 | try: 134 | if reuse_address: 135 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) 136 | 137 | if reuse_port: 138 | try: 139 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, True) 140 | except (AttributeError, OSError) as e: 141 | log.warning('reuse_port=True option failed', exc_info=True) 142 | 143 | sock.bind((host, port)) 144 | sock.listen(backlog) 145 | except Exception: 146 | sock._socket.close() 147 | raise 148 | 149 | return sock 150 | 151 | async def tcp_server(host, port, client_connected_task, *, 152 | family=socket.AF_INET, backlog=100, ssl=None, 153 | reuse_address=True, reuse_port=False): 154 | 155 | sock = tcp_server_socket(host, port, family, backlog, reuse_address, reuse_port) 156 | await run_server(sock, client_connected_task, ssl) 157 | 158 | def unix_server_socket(path, backlog=100): 159 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 160 | try: 161 | sock.bind(path) 162 | sock.listen(backlog) 163 | except Exception: 164 | sock._socket.close() 165 | raise 166 | return sock 167 | 168 | async def unix_server(path, client_connected_task, *, backlog=100, ssl=None): 169 | sock = unix_server_socket(path, backlog) 170 | await run_server(sock, client_connected_task, ssl) 171 | -------------------------------------------------------------------------------- /curio/sched.py: -------------------------------------------------------------------------------- 1 | # curio/sched.py 2 | # 3 | # Task-scheduling primitives. These are used to implement low-level 4 | # scheduling operations needed by higher-level abstractions such 5 | # as Events, Locks, Semaphores, and Queues. 6 | 7 | __all__ = [ 'SchedFIFO', 'SchedBarrier' ] 8 | 9 | # -- Standard Library 10 | 11 | from abc import ABC, abstractmethod 12 | from collections import deque 13 | 14 | # -- Curio 15 | 16 | from .traps import _scheduler_wait, _scheduler_wake 17 | 18 | 19 | class SchedBase(ABC): 20 | 21 | def __repr__(self): 22 | return f'{type(self).__name__}<{len(self)} tasks waiting>' 23 | 24 | @abstractmethod 25 | def __len__(self): 26 | pass 27 | 28 | @abstractmethod 29 | def _kernel_suspend(self, task): 30 | ''' 31 | Suspends a task. This method *must* return a zero-argument 32 | callable that removes the just added task from the scheduler 33 | on cancellation. Called by the kernel. 34 | ''' 35 | pass 36 | 37 | @abstractmethod 38 | def _kernel_wake(self, ntasks=1): 39 | ''' 40 | Wake one or more tasks. Returns a list of the awakened tasks. 41 | Called by the kernel. 42 | ''' 43 | pass 44 | 45 | async def suspend(self, reason='SUSPEND'): 46 | ''' 47 | Suspend the calling task. reason is a string containing 48 | descriptive text to indicate why (used to set the task state). 49 | ''' 50 | await _scheduler_wait(self, reason) 51 | 52 | async def wake(self, n=1): 53 | ''' 54 | Wake one or more suspended tasks. 55 | ''' 56 | await _scheduler_wake(self, n) 57 | 58 | 59 | class SchedFIFO(SchedBase): 60 | ''' 61 | A scheduling FIFO. Tasks sleep and awake in the order of arrival. 62 | The wake method only awakens a single task. Commonly used to 63 | implement locks and queues. 64 | ''' 65 | def __init__(self): 66 | self._queue = deque() 67 | self._actual_len = 0 68 | 69 | def __len__(self): 70 | return self._actual_len 71 | 72 | def _kernel_suspend(self, task): 73 | # The task is placed inside a 1-item list. If cancelled, the 74 | # task is replaced by None, but the list remains on the queue 75 | # until later pop operations discard it 76 | item = [task] 77 | self._queue.append(item) 78 | self._actual_len += 1 79 | 80 | def remove(): 81 | item[0] = None 82 | self._actual_len -= 1 83 | return remove 84 | 85 | def _kernel_wake(self, ntasks=1): 86 | tasks = [] 87 | while ntasks > 0: 88 | task, = self._queue.popleft() 89 | if task: 90 | tasks.append(task) 91 | ntasks -= 1 92 | self._actual_len -= len(tasks) 93 | return tasks 94 | 95 | class SchedBarrier(SchedBase): 96 | ''' 97 | A scheduling barrier. Sleeping tasks are collected into a set. 98 | Waking makes all of the blocked tasks reawaken at the same time. 99 | Commonly used to implement Event and join(). 100 | ''' 101 | def __init__(self): 102 | self._tasks = set() 103 | 104 | def __len__(self): 105 | return len(self._tasks) 106 | 107 | def _kernel_suspend(self, task): 108 | self._tasks.add(task) 109 | return lambda: self._tasks.remove(task) 110 | 111 | def _kernel_wake(self, ntasks=1): 112 | if ntasks == len(self._tasks): 113 | result = list(self._tasks) 114 | self._tasks.clear() 115 | else: 116 | result = [self._tasks.pop() for _ in range(ntasks)] 117 | return result 118 | 119 | async def wake(self, n=None): 120 | ''' 121 | Wake all or a specified number of tasks. 122 | ''' 123 | n = len(self._tasks) if n is None else n 124 | await _scheduler_wake(self, n) 125 | 126 | -------------------------------------------------------------------------------- /curio/socket.py: -------------------------------------------------------------------------------- 1 | # curio/socket.py 2 | # 3 | # Standin for the standard socket library. The entire contents of stdlib socket are 4 | # made available here. However, the socket class is replaced by an async compatible version. 5 | # Certain blocking operations are also replaced by versions safe to use in async. 6 | # 7 | 8 | import socket as _socket 9 | 10 | __all__ = _socket.__all__ 11 | 12 | from socket import * 13 | from functools import wraps, partial 14 | 15 | from . import workers 16 | from . import io 17 | 18 | 19 | @wraps(_socket.socket) 20 | def socket(*args, **kwargs): 21 | return io.Socket(_socket.socket(*args, **kwargs)) 22 | 23 | 24 | @wraps(_socket.socketpair) 25 | def socketpair(*args, **kwargs): 26 | s1, s2 = _socket.socketpair(*args, **kwargs) 27 | return io.Socket(s1), io.Socket(s2) 28 | 29 | 30 | @wraps(_socket.fromfd) 31 | def fromfd(*args, **kwargs): 32 | return io.Socket(_socket.fromfd(*args, **kwargs)) 33 | 34 | # Replacements for blocking functions related to domain names and DNS 35 | 36 | #@wraps(_socket.create_connection) 37 | #async def create_connection(*args, **kwargs): 38 | # sock = await workers.run_in_thread(partial(_socket.create_connection, *args, **kwargs)) 39 | # return io.Socket(sock) 40 | 41 | async def create_connection(address, timeout=None, source_address=None): 42 | ''' 43 | Pure async implementation of the socket.create_connection function in standard library 44 | ''' 45 | host, port = address 46 | err = None 47 | for res in await getaddrinfo(host, port, 0, SOCK_STREAM): 48 | af, socktype, proto, canonname, sa = res 49 | sock = None 50 | try: 51 | sock = socket(af, socktype, proto) 52 | if source_address: 53 | sock.bind(source_address) 54 | await sock.connect(sa) 55 | # Break explicitly a reference cycle 56 | err = None 57 | return sock 58 | 59 | except error as _: 60 | err = _ 61 | if sock is not None: 62 | await sock.close() 63 | 64 | if err is not None: 65 | raise err 66 | else: 67 | raise OSError("getaddrinfo returns an empty list") 68 | 69 | @wraps(_socket.getaddrinfo) 70 | async def getaddrinfo(*args, **kwargs): 71 | return await workers.run_in_thread(partial(_socket.getaddrinfo, *args, **kwargs)) 72 | 73 | 74 | @wraps(_socket.getfqdn) 75 | async def getfqdn(*args, **kwargs): 76 | return await workers.run_in_thread(partial(_socket.getfqdn, *args, **kwargs)) 77 | 78 | 79 | @wraps(_socket.gethostbyname) 80 | async def gethostbyname(*args, **kwargs): 81 | return await workers.run_in_thread(partial(_socket.gethostbyname, *args, **kwargs)) 82 | 83 | 84 | @wraps(_socket.gethostbyname_ex) 85 | async def gethostbyname_ex(*args, **kwargs): 86 | return await workers.run_in_thread(partial(_socket.gethostbyname_ex, *args, **kwargs)) 87 | 88 | 89 | @wraps(_socket.gethostname) 90 | async def gethostname(*args, **kwargs): 91 | return await workers.run_in_thread(partial(_socket.gethostname, *args, **kwargs)) 92 | 93 | 94 | @wraps(_socket.gethostbyaddr) 95 | async def gethostbyaddr(*args, **kwargs): 96 | return await workers.run_in_thread(partial(_socket.gethostbyaddr, *args, **kwargs)) 97 | 98 | 99 | @wraps(_socket.getnameinfo) 100 | async def getnameinfo(*args, **kwargs): 101 | return await workers.run_in_thread(partial(_socket.getnameinfo, *args, **kwargs)) 102 | -------------------------------------------------------------------------------- /curio/ssl.py: -------------------------------------------------------------------------------- 1 | # curio/ssl.py 2 | # 3 | # Wrapper around built-in SSL module 4 | 5 | __all__ = [] 6 | 7 | # -- Standard Library 8 | 9 | from functools import wraps, partial 10 | 11 | try: 12 | import ssl as _ssl 13 | from ssl import * 14 | except ImportError: 15 | _ssl = None 16 | 17 | # We need these exceptions defined, even if ssl is not available. 18 | class SSLWantReadError(Exception): 19 | pass 20 | 21 | class SSLWantWriteError(Exception): 22 | pass 23 | 24 | # -- Curio 25 | 26 | from .workers import run_in_thread 27 | from .io import Socket 28 | 29 | if _ssl: 30 | @wraps(_ssl.SSLContext.wrap_socket) 31 | async def wrap_socket(sock, *args, do_handshake_on_connect=True, **kwargs): 32 | if isinstance(sock, Socket): 33 | sock = sock._socket 34 | 35 | ssl_sock = _ssl.SSLContext.wrap_socket(sock, *args, do_handshake_on_connect=False, **kwargs) 36 | cssl_sock = Socket(ssl_sock) 37 | cssl_sock.do_handshake_on_connect = do_handshake_on_connect 38 | if do_handshake_on_connect and ssl_sock._connected: 39 | await cssl_sock.do_handshake() 40 | return cssl_sock 41 | 42 | @wraps(_ssl.get_server_certificate) 43 | async def get_server_certificate(*args, **kwargs): 44 | return await run_in_thread(partial(_ssl.get_server_certificate, *args, **kwargs)) 45 | 46 | # Small wrapper class to make sure the wrap_socket() method returns the right type 47 | class CurioSSLContext(object): 48 | 49 | def __init__(self, context): 50 | self._context = context 51 | 52 | def __getattr__(self, name): 53 | return getattr(self._context, name) 54 | 55 | async def wrap_socket(self, sock, *args, do_handshake_on_connect=True, **kwargs): 56 | sock = self._context.wrap_socket( 57 | sock._socket, *args, do_handshake_on_connect=False, **kwargs) 58 | csock = Socket(sock) 59 | csock.do_handshake_on_connect = do_handshake_on_connect 60 | if do_handshake_on_connect and sock._connected: 61 | await csock.do_handshake() 62 | return csock 63 | 64 | def __setattr__(self, name, value): 65 | if name == '_context': 66 | super().__setattr__(name, value) 67 | else: 68 | setattr(self._context, name, value) 69 | 70 | # Name alias 71 | def SSLContext(protocol): 72 | return CurioSSLContext(_ssl.SSLContext(protocol)) 73 | 74 | @wraps(_ssl.create_default_context) 75 | def create_default_context(*args, **kwargs): 76 | context = _ssl.create_default_context(*args, **kwargs) 77 | return CurioSSLContext(context) 78 | -------------------------------------------------------------------------------- /curio/thread.py: -------------------------------------------------------------------------------- 1 | # curio/thread.py 2 | # 3 | # Support for threads implemented on top of the Curio kernel. 4 | # 5 | # Theory of operation: 6 | # -------------------- 7 | # Curio has the ability to safely wait for Futures as defined 8 | # in the concurrent.futures module. A notable feature of coroutines 9 | # is that when called, their evaluation is delayed--instead you get 10 | # a "coroutine" object that must be executed by a kernel or event loop. 11 | # 12 | # A so-called "async thread" uses both of these features together to 13 | # set up an execution pathway for allowing threads to execute 14 | # coroutines. For each thread (a real thread--created by the 15 | # threading module), a backing coroutine is created in Curio. This 16 | # backing coroutine runs on top of the Curio kernel and constantly 17 | # monitors a Future for an incoming request. This request is expected 18 | # to contain an unevaluated coroutine. The unevaluated coroutine is 19 | # evaluated on behalf of the thread by the backing coroutine. Any 20 | # result is the communicated back to the thread which is waiting 21 | # for it on an Event. 22 | # 23 | # The mechanism for making a request within a thread is the AWAIT 24 | # function. Specifically, a call like this: 25 | # 26 | # result = AWAIT(coro, *args, **kwargs) 27 | # 28 | # Makes the thread's backing coroutine execute the following: 29 | # 30 | # result = await coro(*args, **kwargs) 31 | # 32 | # From the standpoint of the thread, it appears to be executing a 33 | # normal synchronous call. 34 | # 35 | # Here is a picture diagram of the parts 36 | # 37 | # ________ ___________ _________ 38 | # | | await | | Future | | 39 | # | Curio |<-------| backing |<-------| Thread | 40 | # | Kernel |------->| coroutine |------->| | 41 | # |________| result |___________| Event |_________| 42 | # 43 | 44 | __all__ = [ 'AWAIT', 'spawn_thread' ] 45 | 46 | # -- Standard Library 47 | 48 | import threading 49 | from concurrent.futures import Future 50 | from functools import wraps 51 | from inspect import iscoroutine, isgenerator 52 | from contextlib import contextmanager 53 | import logging 54 | 55 | log = logging.getLogger(__name__) 56 | 57 | # -- Curio 58 | 59 | from . import sync 60 | from . import queue 61 | from .task import spawn, disable_cancellation, check_cancellation, set_cancellation 62 | from .traps import _future_wait 63 | from . import errors 64 | from . import meta 65 | 66 | _locals = threading.local() 67 | 68 | class AsyncThread(object): 69 | 70 | def __init__(self, target=None, args=(), kwargs={}, daemon=False): 71 | self.target = target 72 | self.args = args 73 | self.kwargs = kwargs 74 | self.daemon = daemon 75 | 76 | # The following attributes are provided to make a thread mimic a Task 77 | self.terminated = False 78 | self.cancelled = False 79 | self.taskgroup = None 80 | self.joined = False 81 | 82 | # This future is used by a thread to make a request to Curio 83 | self._request = Future() 84 | 85 | # This event is used to communicate completion of the request 86 | self._done_evt = threading.Event() 87 | 88 | # Event used to signal thread termination 89 | self._terminate_evt = sync.UniversalEvent() 90 | 91 | # Information about the coroutine being executed by the thread 92 | self._coro = None 93 | self._coro_result = None 94 | self._coro_exc = None 95 | 96 | # Final values produced by the thread before termination 97 | self._final_value = None 98 | self._final_exc = None 99 | 100 | # A reference to the associated thread (from threading module) 101 | self._thread = None 102 | 103 | # A reference to the associated backing task 104 | self._task = None 105 | 106 | async def _coro_runner(self): 107 | while True: 108 | # Wait for a hand-off 109 | await disable_cancellation(_future_wait(self._request)) 110 | self._coro = self._request.result() 111 | self._request = Future() 112 | 113 | # If no coroutine, we're shutting down 114 | if not self._coro: 115 | break 116 | 117 | # Run the the coroutine 118 | try: 119 | self._coro_result = await self._coro 120 | self._coro_exc = None 121 | except BaseException as e: 122 | self._coro_result = None 123 | self._coro_exc = e 124 | 125 | # Hand it back to the thread 126 | self._coro = None 127 | self._done_evt.set() 128 | 129 | if self.taskgroup: 130 | await self.taskgroup._task_done(self) 131 | self.joined = True 132 | await self._terminate_evt.set() 133 | 134 | def _func_runner(self): 135 | _locals.thread = self 136 | try: 137 | self._final_result = self.target(*self.args, **self.kwargs) 138 | self._final_exc = None 139 | except BaseException as e: 140 | self._final_result = None 141 | self._final_exc = e 142 | if not isinstance(e, errors.CancelledError): 143 | log.warning("Unexpected exception in cancelled async thread", exc_info=True) 144 | 145 | finally: 146 | self._request.set_result(None) 147 | 148 | async def start(self): 149 | if self.target is None: 150 | raise RuntimeError("Async thread must be given a target") 151 | 152 | # Launch the backing coroutine 153 | self._task = await spawn(self._coro_runner, daemon=True) 154 | 155 | # Launch the thread itself 156 | self._thread = threading.Thread(target=self._func_runner) 157 | self._thread.start() 158 | 159 | def AWAIT(self, coro): 160 | self._request.set_result(coro) 161 | self._done_evt.wait() 162 | self._done_evt.clear() 163 | 164 | if self._coro_exc: 165 | raise self._coro_exc 166 | else: 167 | return self._coro_result 168 | 169 | async def join(self): 170 | await self.wait() 171 | self.joined = True 172 | if self.taskgroup: 173 | self.taskgroup._task_discard(self) 174 | 175 | if self._final_exc: 176 | raise errors.TaskError() from self._final_exc 177 | else: 178 | return self._final_result 179 | 180 | async def wait(self): 181 | await self._terminate_evt.wait() 182 | self.terminated = True 183 | 184 | @property 185 | def result(self): 186 | if not self._terminate_evt.is_set(): 187 | raise RuntimeError('Thread not terminated') 188 | if self._final_exc: 189 | raise self._final_exc 190 | else: 191 | return self._final_result 192 | 193 | @property 194 | def exception(self): 195 | if not self._terminate_evt.is_set(): 196 | raise RuntimeError('Thread not terminated') 197 | return self._final_exc 198 | 199 | async def cancel(self, *, exc=errors.TaskCancelled, blocking=True): 200 | self.cancelled = True 201 | await self._task.cancel(exc=exc, blocking=blocking) 202 | if blocking: 203 | await self.wait() 204 | 205 | @property 206 | def id(self): 207 | return self._task.id 208 | 209 | @property 210 | def state(self): 211 | return self._task.state 212 | 213 | def AWAIT(coro, *args, **kwargs): 214 | ''' 215 | Await for a coroutine in an asynchronous thread. If coro is 216 | not a proper coroutine, this function acts a no-op, returning coro. 217 | ''' 218 | # If the coro is a callable and it's identifiable as a coroutine function, 219 | # wrap it inside a coroutine and pass that. 220 | if callable(coro): 221 | if meta.iscoroutinefunction(coro) and hasattr(_locals, 'thread'): 222 | async def _coro(coro): 223 | return await coro(*args, **kwargs) 224 | coro = _coro(coro) 225 | else: 226 | coro = coro(*args, **kwargs) 227 | 228 | if iscoroutine(coro) or isgenerator(coro): 229 | if hasattr(_locals, 'thread'): 230 | return _locals.thread.AWAIT(coro) 231 | else: 232 | # Thought: Do we try to promote the calling thread into an 233 | # "async" thread automatically? Would require a running 234 | # kernel. Would require a task dedicated to spawning the 235 | # coro runner. Would require shutdown. Maybe a context 236 | # manager? 237 | raise errors.AsyncOnlyError('Must be used as async') 238 | else: 239 | return coro 240 | 241 | def spawn_thread(func, *args, daemon=False): 242 | ''' 243 | Launch an async thread. This mimicks the way a task is normally spawned. For 244 | example: 245 | 246 | t = await spawn_thread(func, arg1, arg2) 247 | ... 248 | await t.join() 249 | ''' 250 | if iscoroutine(func) or meta.iscoroutinefunction(func): 251 | raise TypeError("spawn_thread() can't be used on coroutines") 252 | 253 | async def runner(args, daemon): 254 | t = AsyncThread(func, args=args, daemon=daemon) 255 | await t.start() 256 | return t 257 | 258 | return runner(args, daemon) 259 | 260 | def is_async_thread(): 261 | ''' 262 | Returns True if current thread is an async thread. 263 | ''' 264 | return hasattr(_locals, 'thread') 265 | -------------------------------------------------------------------------------- /curio/time.py: -------------------------------------------------------------------------------- 1 | # curio/time.py 2 | # 3 | # Functionality related to time handling including timeouts and sleeping 4 | 5 | __all__ = [ 6 | 'clock', 'sleep', 'timeout_after', 'ignore_after', 7 | ] 8 | 9 | # -- Standard library 10 | 11 | import logging 12 | log = logging.getLogger(__name__) 13 | 14 | # --- Curio 15 | 16 | from .task import current_task 17 | from .traps import * 18 | from .errors import * 19 | from . import meta 20 | 21 | async def clock(): 22 | ''' 23 | Immediately return the current value of the kernel clock. There 24 | are no side-effects such as task preemption or cancellation. 25 | ''' 26 | return await _clock() 27 | 28 | async def sleep(seconds): 29 | ''' 30 | Sleep for a specified number of seconds. Sleeping for 0 seconds 31 | makes a task immediately switch to the next ready task (if any). 32 | Returns the value of the kernel clock when awakened. 33 | ''' 34 | return await _sleep(seconds) 35 | 36 | class _TimeoutAfter(object): 37 | ''' 38 | Helper class used by timeout_after() and ignore_after() functions 39 | when used as a context manager. For example: 40 | 41 | async with timeout_after(delay): 42 | statements 43 | ... 44 | ''' 45 | 46 | def __init__(self, clock, ignore=False, timeout_result=None): 47 | self._clock = clock 48 | self._ignore = ignore 49 | self._timeout_result = timeout_result 50 | self.expired = False 51 | self.result = True 52 | 53 | async def __aenter__(self): 54 | task = await current_task() 55 | # Clock adjusted to absolute time 56 | if self._clock is not None: 57 | self._clock += await _clock() 58 | self._deadlines = task._deadlines 59 | self._deadlines.append(self._clock) 60 | self._prior = await _set_timeout(self._clock) 61 | return self 62 | 63 | async def __aexit__(self, ty, val, tb): 64 | current_clock = await _unset_timeout(self._prior) 65 | 66 | # Discussion. If a timeout has occurred, it will either 67 | # present itself here as a TaskTimeout or TimeoutCancellationError 68 | # exception. The value of this exception is set to the current 69 | # kernel clock which can be compared against our own deadline. 70 | # What happens next is driven by these rules: 71 | # 72 | # 1. If we are the outer-most context where the timeout 73 | # period has expired, then a TaskTimeout is raised. 74 | # 75 | # 2. If the deadline has expired for at least one outer 76 | # context, (but not us), a TimeoutCancellationError is 77 | # raised. This means that time has expired elsewhere. 78 | # We're being cancelled because of that, but the reason 79 | # for the cancellation wasn't due to a timeout on our 80 | # part. 81 | # 82 | # 3. If the timeout period has not expired on ANY remaining 83 | # timeout context, it means that a timeout has escaped 84 | # some inner timeout context where it should have been 85 | # caught. This is an operational error. We raise 86 | # UncaughtTimeoutError. 87 | 88 | try: 89 | if ty in (TaskTimeout, TimeoutCancellationError): 90 | timeout_clock = val.args[0] 91 | # Find the outer most deadline that has expired 92 | for n, deadline in enumerate(self._deadlines): 93 | if deadline <= timeout_clock: 94 | break 95 | else: 96 | # No remaining context has expired. An operational error 97 | raise UncaughtTimeoutError('Uncaught timeout received') 98 | 99 | if n < len(self._deadlines) - 1: 100 | if ty is TaskTimeout: 101 | raise TimeoutCancellationError(val.args[0]).with_traceback(tb) from None 102 | else: 103 | return False 104 | else: 105 | # The timeout is us. Make sure it's a TaskTimeout (unless ignored) 106 | self.result = self._timeout_result 107 | self.expired = True 108 | if self._ignore: 109 | return True 110 | else: 111 | if ty is TimeoutCancellationError: 112 | raise TaskTimeout(val.args[0]).with_traceback(tb) from None 113 | else: 114 | return False 115 | elif ty is None: 116 | if current_clock > self._deadlines[-1]: 117 | # Further discussion. In the presence of threads and blocking 118 | # operations, it's possible that a timeout has expired, but 119 | # there was simply no opportunity to catch it because there was 120 | # no suspension point. 121 | badness = current_clock - self._deadlines[-1] 122 | log.warning('%r. Operation completed successfully, ' 123 | 'but it took longer than an enclosing timeout. Badness delta=%r.', 124 | await current_task(), badness) 125 | 126 | finally: 127 | self._deadlines.pop() 128 | 129 | def __enter__(self): 130 | return thread.AWAIT(self.__aenter__()) 131 | 132 | def __exit__(self, *args): 133 | return thread.AWAIT(self.__aexit__(*args)) 134 | 135 | async def _timeout_after_func(clock, coro, args, 136 | ignore=False, timeout_result=None): 137 | coro = meta.instantiate_coroutine(coro, *args) 138 | async with _TimeoutAfter(clock, ignore=ignore, timeout_result=timeout_result): 139 | return await coro 140 | 141 | def timeout_after(seconds, coro=None, *args): 142 | ''' 143 | Raise a TaskTimeout exception in the calling task after seconds 144 | have elapsed. This function may be used in two ways. You can 145 | apply it to the execution of a single coroutine: 146 | 147 | await timeout_after(seconds, coro(args)) 148 | 149 | or you can use it as an asynchronous context manager to apply 150 | a timeout to a block of statements: 151 | 152 | async with timeout_after(seconds): 153 | await coro1(args) 154 | await coro2(args) 155 | ... 156 | ''' 157 | if coro is None: 158 | return _TimeoutAfter(seconds) 159 | else: 160 | return _timeout_after_func(seconds, coro, args) 161 | 162 | def ignore_after(seconds, coro=None, *args, timeout_result=None): 163 | ''' 164 | Stop the enclosed task or block of code after seconds have 165 | elapsed. No exception is raised when time expires. Instead, None 166 | is returned. This is often more convenient that catching an 167 | exception. You can apply the function to a single coroutine: 168 | 169 | if await ignore_after(5, coro(args)) is None: 170 | # A timeout occurred 171 | ... 172 | 173 | Alternatively, you can use this function as an async context 174 | manager on a block of statements like this: 175 | 176 | async with ignore_after(5) as r: 177 | await coro1(args) 178 | await coro2(args) 179 | ... 180 | if r.result is None: 181 | # A timeout occurred 182 | 183 | When used as a context manager, the return manager object has 184 | a result attribute that will be set to None if the time 185 | period expires (or True otherwise). 186 | 187 | You can change the return result to a different value using 188 | the timeout_result keyword argument. 189 | ''' 190 | if coro is None: 191 | return _TimeoutAfter(seconds, ignore=True, timeout_result=timeout_result) 192 | else: 193 | return _timeout_after_func(seconds, coro, args, ignore=True, timeout_result=timeout_result) 194 | 195 | from . import thread 196 | -------------------------------------------------------------------------------- /curio/timequeue.py: -------------------------------------------------------------------------------- 1 | # timequeue.py 2 | # 3 | # A Discussion About Time. 4 | # 5 | # Internally, Curio must manage time for two different reasons, 6 | # sleeping and for timeouts. Aside from toy examples, most real-world 7 | # code isn't going to sit around making a lot of sleep() calls. 8 | # Instead the more common use is timeouts. Timeouts are kind of 9 | # interesting though--when a timeout is set, there is typically an 10 | # expectation that it will probably NOT occur. The expiration of a 11 | # timeout is an exceptional event. Most of the time, a timeout will be 12 | # cancelled before it is allowed to expire. 13 | # 14 | # This presents an interesting implementation challenge for managing 15 | # time. It is most common to see time managed by sorting the 16 | # expiration times in some way. For example, placing them in a sorted 17 | # list, or ordering them on a heap in a priority queue. Although 18 | # operations on these kinds of data structures can be managed in O(log N) 19 | # steps, they might not be necessary at all if you make some slightly 20 | # different assumptions about time management. 21 | # 22 | # The queue implementation here is based on the idea that expiration 23 | # times in the distant future don't need to be precisely sorted. 24 | # Instead, you can merely drop expiration times in a dict with the 25 | # hope that they'll be cancelled later. Manipulating a dict in this 26 | # case is O(1)--meaning that is extremely cheap to setup and teardown 27 | # a timeout that never occurs. For timeouts in the near future, they 28 | # can still be sorted using a priority queue in the usual way. 29 | 30 | import heapq 31 | 32 | class TimeQueue: 33 | cutoff = 1.0 # Threshhold for near/far events (seconds) 34 | def __init__(self): 35 | self.near = [ ] 36 | self.far = { } 37 | self.near_deadline = 0 38 | self.far_min_deadline = float('inf') 39 | 40 | def _far_to_near(self): 41 | ''' 42 | Move items from the far queue to the near queue (if any). 43 | ''' 44 | removed = [] 45 | min_deadline = float('inf') 46 | for item, expires in self.far.items(): 47 | if expires < self.near_deadline: 48 | self.push(item, expires) 49 | removed.append(item) 50 | elif expires < min_deadline: 51 | min_deadline = expires 52 | for item in removed: 53 | del self.far[item] 54 | self.far_min_deadline = min_deadline 55 | 56 | def next_deadline(self, current_clock): 57 | ''' 58 | Returns the number of seconds to delay until the next deadline 59 | expires. current_clock is the current value of the clock. 60 | Returns None if there are no pending deadlines. 61 | ''' 62 | self.near_deadline = current_clock + self.cutoff 63 | if self.near_deadline > self.far_min_deadline: 64 | self._far_to_near() 65 | 66 | if self.near: 67 | delta = self.near[0][0] - current_clock 68 | return delta if delta > 0 else 0 69 | 70 | # There are no near deadlines. Use the closest far deadline 71 | if self.far: 72 | delta = self.far_min_deadline - current_clock 73 | return delta if delta > 0 else 0 74 | 75 | # There are no sleeping tasks of any kind. 76 | return None 77 | 78 | def push(self, item, expires): 79 | ''' 80 | Push a new item onto the time queue. 81 | ''' 82 | # If the expiration time is closer than the current near deadline, 83 | # it gets pushed onto a heap in order to preserve order 84 | if expires <= self.near_deadline: 85 | heapq.heappush(self.near, (expires, item)) 86 | else: 87 | # Otherwise the item gets put into a dict for far-in-future handling 88 | if item not in self.far or self.far[item] > expires: 89 | self.far[item] = expires 90 | if expires < self.far_min_deadline: 91 | self.far_min_deadline = expires 92 | 93 | def expired(self, deadline): 94 | ''' 95 | An iterator that returns all items that have expired up to a given deadline 96 | ''' 97 | near = self.near 98 | if deadline >= self.far_min_deadline: 99 | self.near_deadline = deadline + self.cutoff 100 | self._far_to_near() 101 | 102 | while near and near[0][0] <= deadline: 103 | yield heapq.heappop(near) 104 | 105 | def cancel(self, item, expires): 106 | ''' 107 | Cancel a time event. The combination of (item, expires) should 108 | match a prior push() operation (but if not, it's ignored). 109 | ''' 110 | self.far.pop(item, None) 111 | -------------------------------------------------------------------------------- /curio/traps.py: -------------------------------------------------------------------------------- 1 | # traps.py 2 | # 3 | # Curio programs execute under the supervision of a 4 | # kernel. Communication with the kernel takes place via a "trap" 5 | # involving the yield statement. Traps represent internel kernel 6 | # procedures. Direct use of the functions defined here is allowed 7 | # when making new kinds of Curio primitives, but if you're trying to 8 | # solve a higher level problem, there is probably a higher-level 9 | # interface that is easier to use (e.g., Socket, File, Queue, etc.). 10 | # ---------------------------------------------------------------------- 11 | 12 | __all__ = [ 13 | '_read_wait', '_write_wait', '_future_wait', '_sleep', '_spawn', 14 | '_cancel_task', '_scheduler_wait', '_scheduler_wake', 15 | '_get_kernel', '_get_current', '_set_timeout', '_unset_timeout', 16 | '_clock', '_io_waiting', '_io_release', 17 | ] 18 | 19 | # -- Standard library 20 | 21 | from types import coroutine 22 | from selectors import EVENT_READ, EVENT_WRITE 23 | 24 | # -- Curio 25 | 26 | from . import errors 27 | 28 | # This is the only entry point to the Curio kernel and the 29 | # only place where the @types.coroutine decorator is used. 30 | @coroutine 31 | def _kernel_trap(*request): 32 | result = yield request 33 | if isinstance(result, BaseException): 34 | raise result 35 | else: 36 | return result 37 | 38 | # Higher-level trap functions that make use of async/await 39 | async def _read_wait(fileobj): 40 | ''' 41 | Wait until reading can be performed. If another task is waiting 42 | on the same file, a ResourceBusy exception is raised. 43 | ''' 44 | return await _kernel_trap('trap_io', fileobj, EVENT_READ, 'READ_WAIT') 45 | 46 | async def _write_wait(fileobj): 47 | ''' 48 | Wait until writing can be performed. If another task is waiting 49 | to write on the same file, a ResourceBusy exception is raised. 50 | ''' 51 | return await _kernel_trap('trap_io', fileobj, EVENT_WRITE, 'WRITE_WAIT') 52 | 53 | async def _io_release(fileobj): 54 | ''' 55 | Release kernel resources associated with a file 56 | ''' 57 | return await _kernel_trap('trap_io_release', fileobj) 58 | 59 | async def _io_waiting(fileobj): 60 | ''' 61 | Return a tuple (rtask, wtask) of tasks currently blocked waiting 62 | for I/O on fileobj. 63 | ''' 64 | return await _kernel_trap('trap_io_waiting', fileobj) 65 | 66 | async def _future_wait(future, event=None): 67 | ''' 68 | Wait for the result of a Future to be ready. 69 | ''' 70 | return await _kernel_trap('trap_future_wait', future, event) 71 | 72 | async def _sleep(clock): 73 | ''' 74 | Sleep until the monotonic clock reaches the specified clock value. 75 | If clock is 0, forces the current task to yield to the next task (if any). 76 | ''' 77 | return await _kernel_trap('trap_sleep', clock) 78 | 79 | async def _spawn(coro): 80 | ''' 81 | Create a new task. Returns the resulting Task object. 82 | ''' 83 | return await _kernel_trap('trap_spawn', coro) 84 | 85 | async def _cancel_task(task, exc=errors.TaskCancelled, val=None): 86 | ''' 87 | Cancel a task. Causes a CancelledError exception to raise in the task. 88 | Set the exc and val arguments to change the exception. 89 | ''' 90 | return await _kernel_trap('trap_cancel_task', task, exc, val) 91 | 92 | async def _scheduler_wait(sched, state): 93 | ''' 94 | Put the task to sleep on a scheduler primitive. 95 | ''' 96 | return await _kernel_trap('trap_sched_wait', sched, state) 97 | 98 | async def _scheduler_wake(sched, n=1): 99 | ''' 100 | Reschedule one or more tasks waiting on a scheduler primitive. 101 | ''' 102 | return await _kernel_trap('trap_sched_wake', sched, n) 103 | 104 | async def _get_kernel(): 105 | ''' 106 | Get the kernel executing the task. 107 | ''' 108 | return await _kernel_trap('trap_get_kernel') 109 | 110 | async def _get_current(): 111 | ''' 112 | Get the currently executing task 113 | ''' 114 | return await _kernel_trap('trap_get_current') 115 | 116 | async def _set_timeout(clock): 117 | ''' 118 | Set a timeout for the current task that occurs at the specified clock value. 119 | Setting a clock of None clears any previous timeout. 120 | ''' 121 | return await _kernel_trap('trap_set_timeout', clock) 122 | 123 | async def _unset_timeout(previous): 124 | ''' 125 | Restore the previous timeout for the current task. 126 | ''' 127 | return await _kernel_trap('trap_unset_timeout', previous) 128 | 129 | async def _clock(): 130 | ''' 131 | Return the value of the kernel clock 132 | ''' 133 | return await _kernel_trap('trap_clock') 134 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/curio.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/curio.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/curio" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/curio" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/_static/curiolayer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dabeaz/curio/148454621f9bd8dd843f591e87715415431f6979/docs/_static/curiolayer.png -------------------------------------------------------------------------------- /docs/_static/guiserv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dabeaz/curio/148454621f9bd8dd843f591e87715415431f6979/docs/_static/guiserv.png -------------------------------------------------------------------------------- /docs/_static/layers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dabeaz/curio/148454621f9bd8dd843f591e87715415431f6979/docs/_static/layers.png -------------------------------------------------------------------------------- /docs/customization.py: -------------------------------------------------------------------------------- 1 | # Adapted from https://hg.python.org/cpython/file/default/Doc/tools/extensions/pyspecific.py . 2 | 3 | from sphinx import addnodes 4 | from sphinx.domains.python import PyModulelevel, PyClassmember 5 | 6 | 7 | class PyCoroutineMixin(object): 8 | 9 | def handle_signature(self, sig, signode): 10 | ret = super(PyCoroutineMixin, self).handle_signature(sig, signode) 11 | signode.insert(0, addnodes.desc_annotation('await ', 'await ')) 12 | return ret 13 | 14 | 15 | class PyAsyncFunction(PyCoroutineMixin, PyModulelevel): 16 | 17 | def run(self): 18 | self.name = 'py:function' 19 | return PyModulelevel.run(self) 20 | 21 | 22 | class PyAsyncMethod(PyCoroutineMixin, PyClassmember): 23 | 24 | def run(self): 25 | self.name = 'py:method' 26 | return PyClassmember.run(self) 27 | 28 | 29 | def setup(app): 30 | app.add_directive_to_domain('py', 'asyncfunction', PyAsyncFunction) 31 | app.add_directive_to_domain('py', 'asyncmethod', PyAsyncMethod) 32 | return {'version': '1.0', 'parallel_read_safe': True} 33 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. curio documentation master file, created by 2 | sphinx-quickstart on Thu Oct 22 09:54:26 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Curio 7 | ===== 8 | 9 | Curio is a coroutine-based library for concurrent Python systems 10 | programming. It provides standard programming abstractions such as as 11 | tasks, sockets, files, locks, and queues. You'll find it to be 12 | familiar, small, fast, and fun. 13 | 14 | Curio is the work of David Beazley (https://www.dabeaz.com), who has 15 | been teaching and talking about concurrency related topics for more 16 | than 20 years, both as a university professor and as an independent 17 | researcher. 18 | 19 | Requirements 20 | ------------ 21 | 22 | Curio requires Python 3.7 or newer. It has no third-party 23 | dependencies and works on both POSIX and Windows. 24 | 25 | Documentation 26 | ------------- 27 | 28 | .. toctree:: 29 | :maxdepth: 2 30 | 31 | tutorial 32 | howto 33 | reference 34 | 35 | Curio University 36 | ---------------- 37 | 38 | Curio is based on ideas resulting from more than 12 years of 39 | exploration into various facets of Python's concurrency and coroutine 40 | model. Dave has given numerous talks/tutorials on this topic at PyCon 41 | and elsewhere. Here is a detailed list of presentations to help you 42 | understand how Curio works and some of the system thinking that has gone 43 | into it. All of these talks are more general than Curio--you'll learn 44 | a lot about Python concurrency in general. 45 | 46 | * `Build Your Own Async `_ 47 | Workshop talk at PyCon India, 2019. 48 | This workshop talks about the fundamentals of building a simple 49 | async concurrency library from scratch using both callbacks and 50 | coroutines. 51 | 52 | * `Die Threads `_ 53 | Keynote talk at EuroPython, 2018. 54 | Asynchronous programming is most commonly described as an alternative to 55 | thread programming. But what if you reinvented thread programming run on top 56 | of asynchronous programming? This talk explores this concept. It 57 | might be the most "experimental" talk related to Curio. 58 | 59 | * `The Other Async (Threads + Asyncio = Love) `_ 60 | Keynote talk at PyGotham, 2017. 61 | This talk steps through the thinking and design of building a so-called 62 | "Universal Queue" that works with both async programs and threads 63 | using a common programming interface. 64 | 65 | * `Fear and Awaiting in Async `_ 66 | Keynote talk at PyOhio 2016. 67 | A no-holds-barred tour through the possibilities that await programmers 68 | who embrace the new async/await syntax in Python. Covers the basics of 69 | coroutines, async iteration, async context managers, and a lot of advanced 70 | metaprogramming including decorators, descriptors, and metaclasses. 71 | Also discusses the importance of API design in async programming. 72 | 73 | * `Topics of Interest (Async) `_ 74 | Keynote talk at Python Brasil 2015. 75 | Perhaps the first "Curio" talk. A small concurrency library similar 76 | to Curio is live-coded and discussed along with other topics 77 | related to async. 78 | 79 | * `Python Concurrency from the Ground Up (LIVE) `_ 80 | Conference talk at PyCon 2015. This live-coded talk 81 | discusses threads, generators, coroutines, the Global 82 | Interpreter Lock (GIL), and more. 83 | 84 | * `Understanding the Python GIL `_ 85 | Conference talk from PyCon 2010. Understand the inner workings of the infamous 86 | Global Interpreter Lock and how it impacts thread performance. See also 87 | this related `talk `_ from the RuPy 2011 conference. 88 | 89 | * `A Curious Course on Coroutines and Concurrency `_ [`Materials `_] 90 | Tutorial at PyCon 2009. Coroutines were first introduced in Python 2.5. 91 | This tutorial explores the foundations of using coroutines for various 92 | problems in data processing and concurrency. This tutorial gives 93 | much of the background that led to the current incarnation of Python 94 | coroutines. 95 | 96 | * `An Introduction to Python Concurrency `_ 97 | Tutorial at USENIX Technical Conference, 2009. A comprehensive overview of concurrency 98 | programming in Python. Includes threads, processes, and event-driven I/O. A good overview 99 | of basic programming concepts. 100 | 101 | 102 | 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\curio.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\curio.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /examples/bench/asyncecho.py: -------------------------------------------------------------------------------- 1 | # Example: A simple echo server written directly with asyncio 2 | 3 | import asyncio 4 | from socket import * 5 | 6 | 7 | async def echo_server(loop, address): 8 | sock = socket(AF_INET, SOCK_STREAM) 9 | sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) 10 | sock.bind(address) 11 | sock.listen(5) 12 | sock.setblocking(False) 13 | print('Server listening at', address) 14 | with sock: 15 | while True: 16 | client, addr = await loop.sock_accept(sock) 17 | print('Connection from', addr) 18 | loop.create_task(echo_client(loop, client)) 19 | 20 | 21 | async def echo_client(loop, client): 22 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 23 | with client: 24 | while True: 25 | data = await loop.sock_recv(client, 10000) 26 | if not data: 27 | break 28 | await loop.sock_sendall(client, data) 29 | print('Connection closed') 30 | 31 | 32 | if __name__ == '__main__': 33 | loop = asyncio.new_event_loop() 34 | asyncio.set_event_loop(loop) 35 | loop.set_debug(False) 36 | loop.create_task(echo_server(loop, ('', 25000))) 37 | loop.run_forever() 38 | -------------------------------------------------------------------------------- /examples/bench/asyncproto.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from socket import * 3 | 4 | 5 | class EchoProtocol(asyncio.Protocol): 6 | 7 | def connection_made(self, transport): 8 | self.transport = transport 9 | sock = transport.get_extra_info('socket') 10 | try: 11 | sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 12 | except (OSError, NameError): 13 | pass 14 | 15 | def connection_lost(self, exc): 16 | self.transport = None 17 | 18 | def data_received(self, data): 19 | self.transport.write(data) 20 | 21 | 22 | if __name__ == '__main__': 23 | loop = asyncio.get_event_loop() 24 | coro = loop.create_server(EchoProtocol, '', 25000) 25 | srv = loop.run_until_complete(coro) 26 | loop.run_forever() 27 | -------------------------------------------------------------------------------- /examples/bench/asyncsslecho.py: -------------------------------------------------------------------------------- 1 | # Example: A simple echo server written using asyncio streams 2 | 3 | import asyncio 4 | import ssl 5 | 6 | 7 | KEYFILE = "ssl_test_rsa" # Private key 8 | CERTFILE = "ssl_test.crt" # Certificate (self-signed) 9 | 10 | 11 | async def echo_client(reader, writer): 12 | addr = writer.get_extra_info('peername') 13 | print('Connection from', addr) 14 | while True: 15 | data = await reader.read(100000) 16 | if not data: 17 | break 18 | writer.write(data) 19 | await writer.drain() 20 | print('Connection closed') 21 | 22 | 23 | if __name__ == '__main__': 24 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 25 | context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE) 26 | 27 | # import uvloop 28 | # asyncio.set_event_loop(uvloop.new_event_loop()) 29 | 30 | loop = asyncio.get_event_loop() 31 | coro = asyncio.start_server( 32 | echo_client, '127.0.0.1', 25000, loop=loop, ssl=context) 33 | loop.run_until_complete(coro) 34 | loop.run_forever() 35 | -------------------------------------------------------------------------------- /examples/bench/asyncstream.py: -------------------------------------------------------------------------------- 1 | # Example: A simple echo server written using asyncio streams 2 | 3 | import asyncio 4 | 5 | async def echo_client(reader, writer): 6 | addr = writer.get_extra_info('peername') 7 | print('Connection from', addr) 8 | while True: 9 | data = await reader.read(100000) 10 | if not data: 11 | break 12 | writer.write(data) 13 | await writer.drain() 14 | print('Connection closed') 15 | 16 | 17 | if __name__ == '__main__': 18 | loop = asyncio.get_event_loop() 19 | coro = asyncio.start_server(echo_client, '127.0.0.1', 25000, loop=loop) 20 | loop.run_until_complete(coro) 21 | loop.run_forever() 22 | -------------------------------------------------------------------------------- /examples/bench/client.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int main() { 10 | int sockfd; 11 | struct sockaddr_in servaddr; 12 | char outgoing[100]; 13 | char incoming[100]; 14 | int n; 15 | 16 | sockfd = socket(AF_INET, SOCK_STREAM, 0); 17 | bzero(&servaddr, sizeof(servaddr)); 18 | bzero(incoming, 100); 19 | servaddr.sin_family = AF_INET; 20 | servaddr.sin_port = htons(25000); 21 | inet_pton(AF_INET, "127.0.0.1", &servaddr.sin_addr); 22 | connect(sockfd, &servaddr, sizeof(servaddr)); 23 | 24 | strcpy(outgoing, "test"); 25 | for (n = 0; n < 1000000; n++) { 26 | send(sockfd, outgoing, 1, 0); 27 | recv(sockfd, incoming, 1, 0); 28 | if ((n % 10000) == 0) { 29 | printf("%d\n", n); 30 | } 31 | } 32 | close(sockfd); 33 | } 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /examples/bench/client.py: -------------------------------------------------------------------------------- 1 | # client.py 2 | # 3 | # Common client. Measure the response rate of the echo server 4 | 5 | from socket import * 6 | import time 7 | from threading import Thread 8 | import atexit 9 | import sys 10 | 11 | if len(sys.argv) > 1: 12 | MSGSIZE = int(sys.argv[1]) 13 | else: 14 | MSGSIZE = 1 15 | 16 | msg = b'x' * MSGSIZE 17 | 18 | sock = socket(AF_INET, SOCK_STREAM) 19 | sock.connect(('localhost', 25000)) 20 | sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 21 | 22 | N = 0 23 | results = [] 24 | 25 | 26 | def monitor(): 27 | global N 28 | while True: 29 | time.sleep(1) 30 | print(N, 'requests/sec') 31 | results.append(N) 32 | N = 0 33 | 34 | 35 | Thread(target=monitor, daemon=True).start() 36 | 37 | 38 | def print_average(): 39 | import statistics 40 | print('Average', statistics.mean(results), 'requests/sec') 41 | 42 | 43 | atexit.register(print_average) 44 | 45 | while True: 46 | sock.sendall(msg) 47 | nrecv = 0 48 | while nrecv < MSGSIZE: 49 | resp = sock.recv(MSGSIZE) 50 | if not resp: 51 | raise SystemExit() 52 | nrecv += len(resp) 53 | N += 1 54 | -------------------------------------------------------------------------------- /examples/bench/curioecho.py: -------------------------------------------------------------------------------- 1 | # A simple echo server 2 | 3 | from curio import run, tcp_server 4 | from curio.socket import IPPROTO_TCP, TCP_NODELAY 5 | 6 | 7 | async def echo_handler(client, addr): 8 | print('Connection from', addr) 9 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 10 | while True: 11 | data = await client.recv(1000000) 12 | if not data: 13 | break 14 | await client.sendall(data) 15 | print('Connection closed') 16 | 17 | 18 | if __name__ == '__main__': 19 | run(tcp_server, '', 25000, echo_handler) 20 | -------------------------------------------------------------------------------- /examples/bench/curiosslecho.py: -------------------------------------------------------------------------------- 1 | # curiosslecho.py 2 | # 3 | # Use sslclient.py to test 4 | 5 | import curio 6 | from curio import ssl 7 | from curio import network 8 | from socket import * 9 | 10 | 11 | KEYFILE = "ssl_test_rsa" # Private key 12 | CERTFILE = "ssl_test.crt" # Certificate (self-signed) 13 | 14 | 15 | async def handle(client, addr): 16 | print('Connection from', addr) 17 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 18 | async with client: 19 | while True: 20 | data = await client.recv(100000) 21 | if not data: 22 | break 23 | await client.sendall(data) 24 | print('Connection closed') 25 | 26 | 27 | if __name__ == '__main__': 28 | ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 29 | ssl_context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE) 30 | curio.run(network.tcp_server, '', 25000, handle, ssl=ssl_context) 31 | -------------------------------------------------------------------------------- /examples/bench/curiosslstream.py: -------------------------------------------------------------------------------- 1 | from curio import run, tcp_server 2 | from curio import ssl 3 | from socket import * 4 | 5 | 6 | KEYFILE = "ssl_test_rsa" # Private key 7 | CERTFILE = "ssl_test.crt" # Certificate (self-signed) 8 | 9 | 10 | async def echo_handler(client, addr): 11 | print('Connection from', addr) 12 | try: 13 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 14 | except (OSError, NameError): 15 | pass 16 | s = client.as_stream() 17 | while True: 18 | data = await s.read(102400) 19 | if not data: 20 | break 21 | await s.write(data) 22 | await s.close() 23 | print('Connection closed') 24 | 25 | 26 | if __name__ == '__main__': 27 | ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 28 | ssl_context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE) 29 | run(tcp_server, '', 25000, echo_handler, ssl=ssl_context) 30 | -------------------------------------------------------------------------------- /examples/bench/curiostream.py: -------------------------------------------------------------------------------- 1 | from curio import run, spawn, tcp_server 2 | from socket import * 3 | 4 | 5 | async def echo_handler(client, addr): 6 | print('Connection from', addr) 7 | try: 8 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 9 | except (OSError, NameError): 10 | pass 11 | s = client.as_stream() 12 | while True: 13 | data = await s.read(102400) 14 | if not data: 15 | break 16 | await s.write(data) 17 | await s.close() 18 | print('Connection closed') 19 | 20 | 21 | if __name__ == '__main__': 22 | run(tcp_server, '', 25000, echo_handler) 23 | -------------------------------------------------------------------------------- /examples/bench/echoclient.py: -------------------------------------------------------------------------------- 1 | # client.py 2 | # 3 | # Common client. Measure the response rate of the echo server 4 | # Sends a large number of requests. Measures how long it takes. 5 | 6 | from socket import * 7 | import time 8 | import sys 9 | 10 | if len(sys.argv) > 1: 11 | MSGSIZE = int(sys.argv[1]) 12 | else: 13 | MSGSIZE = 1000 14 | 15 | msg = b'x' * MSGSIZE 16 | 17 | 18 | def run_test(n): 19 | sock = socket(AF_INET, SOCK_STREAM) 20 | sock.connect(('localhost', 25000)) 21 | while n > 0: 22 | sock.sendall(msg) 23 | nrecv = 0 24 | while nrecv < MSGSIZE: 25 | resp = sock.recv(MSGSIZE) 26 | if not resp: 27 | raise SystemExit() 28 | nrecv += len(resp) 29 | n -= 1 30 | 31 | 32 | NMESSAGES = 1000000 33 | print('Sending', NMESSAGES, 'messages') 34 | start = time.time() 35 | run_test(NMESSAGES) 36 | end = time.time() 37 | duration = end - start 38 | print(NMESSAGES, 'in', duration) 39 | print(NMESSAGES / duration, 'requests/sec') 40 | -------------------------------------------------------------------------------- /examples/bench/gevecho.py: -------------------------------------------------------------------------------- 1 | from gevent.server import StreamServer 2 | 3 | # this handler will be run for each incoming connection in a dedicated greenlet 4 | 5 | 6 | def echo(socket, address): 7 | print('New connection from %s:%s' % address) 8 | while True: 9 | data = socket.recv(100000) 10 | if not data: 11 | break 12 | socket.sendall(data) 13 | socket.close() 14 | 15 | 16 | if __name__ == '__main__': 17 | server = StreamServer(('0.0.0.0', 25000), echo) 18 | server.serve_forever() 19 | -------------------------------------------------------------------------------- /examples/bench/gevsslecho.py: -------------------------------------------------------------------------------- 1 | from gevent.server import StreamServer 2 | from socket import IPPROTO_TCP, TCP_NODELAY 3 | 4 | # this handler will be run for each incoming connection in a dedicated greenlet 5 | 6 | 7 | def echo(socket, address): 8 | print('New connection from %s:%s' % address) 9 | socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 10 | while True: 11 | data = socket.recv(102400) 12 | if not data: 13 | break 14 | socket.sendall(data) 15 | socket.close() 16 | 17 | 18 | if __name__ == '__main__': 19 | KEYFILE = "ssl_test_rsa" 20 | CERTFILE = "ssl_test.crt" 21 | server = StreamServer(('0.0.0.0', 25000), echo, 22 | keyfile=KEYFILE, certfile=CERTFILE) 23 | server.serve_forever() 24 | -------------------------------------------------------------------------------- /examples/bench/nodeecho.js: -------------------------------------------------------------------------------- 1 | // Taken from http://learn.bevry.me/node/server/ 2 | // Simple install node.js, 3 | // run with (e.g.): 4 | // $ /usr/local/bin/node nodeecho.js 5 | 6 | var net = require('net'); 7 | net.createServer(function(socket){ 8 | socket.on('data', function(data){ 9 | socket.write(data) 10 | }); 11 | }).listen(25000); 12 | -------------------------------------------------------------------------------- /examples/bench/process_perf.py: -------------------------------------------------------------------------------- 1 | # process.py 2 | # 3 | # Performance test of submitting work to a process pool. 4 | 5 | import time 6 | import curio 7 | from multiprocessing import Pool 8 | from concurrent.futures import ProcessPoolExecutor 9 | import asyncio 10 | 11 | COUNT = 10000 12 | 13 | 14 | def fib(n): 15 | if n <= 2: 16 | return 1 17 | else: 18 | return fib(n - 1) + fib(n - 2) 19 | 20 | 21 | def curio_test(x): 22 | async def main(): 23 | for n in range(COUNT): 24 | await curio.run_in_process(fib, x) 25 | 26 | start = time.time() 27 | curio.run(main()) 28 | end = time.time() 29 | print('Curio:', end - start) 30 | 31 | 32 | def mp_test(x): 33 | pool = Pool() 34 | 35 | def main(): 36 | for n in range(COUNT): 37 | pool.apply(fib, (x,)) 38 | start = time.time() 39 | main() 40 | end = time.time() 41 | print('multiprocessing:', end - start) 42 | 43 | 44 | def future_test(x): 45 | pool = ProcessPoolExecutor() 46 | 47 | def main(): 48 | for n in range(COUNT): 49 | f = pool.submit(fib, x) 50 | f.result() 51 | start = time.time() 52 | main() 53 | end = time.time() 54 | print('concurrent.futures:', end - start) 55 | 56 | 57 | def asyncio_test(x): 58 | pool = ProcessPoolExecutor() 59 | 60 | async def main(loop): 61 | for n in range(COUNT): 62 | await loop.run_in_executor(pool, fib, x) 63 | 64 | loop = asyncio.get_event_loop() 65 | start = time.time() 66 | loop.run_until_complete(asyncio.ensure_future(main(loop))) 67 | end = time.time() 68 | print('asyncio:', end - start) 69 | 70 | 71 | def uvloop_test(x): 72 | try: 73 | import uvloop 74 | except ImportError: 75 | return 76 | 77 | pool = ProcessPoolExecutor() 78 | 79 | async def main(loop): 80 | for n in range(COUNT): 81 | await loop.run_in_executor(pool, fib, x) 82 | 83 | loop = uvloop.new_event_loop() 84 | asyncio.set_event_loop(loop) 85 | start = time.time() 86 | loop.run_until_complete(asyncio.ensure_future(main(loop))) 87 | end = time.time() 88 | print('uvloop:', end - start) 89 | 90 | 91 | if __name__ == '__main__': 92 | import sys 93 | if len(sys.argv) != 2: 94 | raise SystemExit('Usage: %s n' % sys.argv[0]) 95 | x = int(sys.argv[1]) 96 | asyncio_test(x) 97 | uvloop_test(x) 98 | future_test(x) 99 | mp_test(x) 100 | curio_test(x) 101 | -------------------------------------------------------------------------------- /examples/bench/ssl_test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICljCCAf8CAQMwDQYJKoZIhvcNAQEEBQAwRzELMAkGA1UEBhMCVVMxETAPBgNV 3 | BAgTCElsbGlub2lzMRAwDgYDVQQHEwdDaGljYWdvMRMwEQYDVQQKEwpEYWJlYXog 4 | TExDMB4XDTE1MTAyMzIwNDkzNloXDTE2MTAyMjIwNDkzNlowXjELMAkGA1UEBhMC 5 | VVMxCzAJBgNVBAgTAklMMRAwDgYDVQQHEwdDaGljYWdvMSEwHwYDVQQKExhJbnRl 6 | cm5ldCBXaWRnaXRzIFB0eSBMdGQxDTALBgNVBAMTBEFDTUUwggEgMA0GCSqGSIb3 7 | DQEBAQUAA4IBDQAwggEIAoIBAQDSD+oyba20UfGGswSsKGo+se399tdpi0xnL5Au 8 | or3wAbzsXoOc4md7MDMoAcsbWOZGTmB46OI8yyK6u51hBgiNL2Cb9D1pOvCwGaiH 9 | u/aO5TB1hXrLeWMdGslbmfMfey2JElPNZIe0W6aiDSknVPs3GAiYd/ddDlhAzaUy 10 | AIvxlVKDjwq4A0n+CrSxm1nFW9vRuxgo6Hx5NMqOsIfyjuMl41p0B7ZcN7gsCNvB 11 | ykGlWs/7do8+90FUk4NKQt5BNn5I9qAhKwG9UX8WJswH9L25naDCzblVZ0sgPuUo 12 | fPl1NUPhm5/ShPwvUWVspxLae1ikvGAwabTU4btwudzX4bYfAgEjMA0GCSqGSIb3 13 | DQEBBAUAA4GBAFvnUXLV1HCSW9KY593PcfZv2Z5v01gVelKoY0CtZ6uzBA/bPQn+ 14 | +HkyqFCamy03vZ1jBKiMImm5cgOijvycqMvQ8qLnJ2K+E4O/YaY5EOe/LzJj5YkF 15 | t3TYWnSi9nEllysAUsFhh/ESVPt9HHuh3ljkg8ZGLvjTcTsgNUdI/cXI 16 | -----END CERTIFICATE----- 17 | -------------------------------------------------------------------------------- /examples/bench/ssl_test_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEA0g/qMm2ttFHxhrMErChqPrHt/fbXaYtMZy+QLqK98AG87F6D 3 | nOJnezAzKAHLG1jmRk5geOjiPMsiurudYQYIjS9gm/Q9aTrwsBmoh7v2juUwdYV6 4 | y3ljHRrJW5nzH3stiRJTzWSHtFumog0pJ1T7NxgImHf3XQ5YQM2lMgCL8ZVSg48K 5 | uANJ/gq0sZtZxVvb0bsYKOh8eTTKjrCH8o7jJeNadAe2XDe4LAjbwcpBpVrP+3aP 6 | PvdBVJODSkLeQTZ+SPagISsBvVF/FibMB/S9uZ2gws25VWdLID7lKHz5dTVD4Zuf 7 | 0oT8L1FlbKcS2ntYpLxgMGm01OG7cLnc1+G2HwIBIwKCAQAkArpvC3zdFVyav5pm 8 | p9ey+e5Iyzre5K4DD3fNevv9QiBjCONcuRkNzb+gdVYEsCd5xEsNacBiMXOq+5dS 9 | dhAYNAHu2WmdAsos0TLVcK3snFFzO3Qi4Zv2XF0IY4jDkXWFNleCS79+ARVAWgcO 10 | DpF3KLEEMdKiPPkvrjmTixCtEfkZPs+6GQW7hDCAUJC24XmTNu53mgP/RVpmtZec 11 | HWefhDCqpCnwRFUDklgzMBJTf1UomA4v7Jev0QXjIj1qs0I+6UB+5mxnFNUA796o 12 | CD0cnrmcWa1XTpfouNwCPI00cQIULgfhWf8swnLo63E3l9rPx5hHhVhzIq1tQaFE 13 | XyTLAoGBAPCRj4KjsEgWt9fBlnMp+76CWYje5vTPN2i+S9HeNEuPvXUoSjE3u9kR 14 | xfw4XkE0vh/avyWjNWdQ6NYlFt3bwgMIZWE2oVWQ6ZcG0dYm0QFY2n5M9YcnqlOJ 15 | jYniUJnyJyPD2gadwLCWsNZLbj7JI+nkrC1QK2ABDSi9WHuuWltPAoGBAN+JZncB 16 | a7XMJRdY2Zpwki10oSA6/xsDk30j7BcFrxBzosloVTsq7USFbTJe05WJiz9elcGE 17 | Mw/XZ2Ai5ETMXs4m60aw9DPTGQ2J3bhiSQKPJT0Qd78Fx9bJJa05IH0meulStRXn 18 | jKW+MkYn22pNDEeuro/ptHuJikm2DTRwBwQxAoGAFJ7DKHRuMhCTakPLES97+mLx 19 | u0ZOT395xyZBA1wwXj+FRI5swmPc5rhhbWPq0mObRI8XssTYsRWQTN6bj1v6r82F 20 | CFUjxYFzG5LeyTaG8XylA4LweUyK8TetC9GR4U9FLvOHt2ybfNm3YtND9sDIkGQO 21 | wg4vmoO/TKKD7VgWX5kCgYEA0sNgnBdITFLD4tAdoD5AroPoYDegEifxdf1MUDiP 22 | HiPioKQzGoePQJsPL336sZBQFy1LXq/YX2Sx7O2yp0RZY0lEO1Zil0Ngw58+w8pi 23 | GFsUe2dMVQVzRtrpAmkQAPhlQmPskP7jsjb8M4SqTkilLaSzNztvp62RA6umDN6n 24 | h5sCgYEAx5gFJK5Kk2HP5Npz9h6zEPhEtrOxl+fm/0du0qyZ/vs0AHzDla77z9e1 25 | BFky5r6EQH+pTi4PeQWal/bffVSDNBBb8ilBeFIKVxNaMiCnlQvIbmWJMy3Q/vZS 26 | vXN6S4PRwhJmUUPe7yD+nXCN0wdsms9kgewID/Czx5O3pOogWz0= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /examples/bench/sslclient.py: -------------------------------------------------------------------------------- 1 | # Copied with no modifications from the uvloop project 2 | # https://github.com/MagicStack/uvloop 3 | 4 | from concurrent.futures import ProcessPoolExecutor 5 | 6 | import argparse 7 | from socket import * 8 | import ssl 9 | 10 | import time 11 | 12 | 13 | if __name__ == '__main__': 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('--msize', default=1000, type=int, 16 | help='message size in bytes') 17 | parser.add_argument('--num', default=200000, type=int, 18 | help='number of messages') 19 | parser.add_argument('--times', default=1, type=int, 20 | help='number of times to run the test') 21 | parser.add_argument('--workers', default=3, type=int, 22 | help='number of workers') 23 | parser.add_argument('--addr', default='127.0.0.1:25000', type=str, 24 | help='number of workers') 25 | args = parser.parse_args() 26 | 27 | unix = False 28 | if args.addr.startswith('file:'): 29 | unix = True 30 | addr = args.addr[5:] 31 | else: 32 | addr = args.addr.split(':') 33 | addr[1] = int(addr[1]) 34 | addr = tuple(addr) 35 | print('will connect to: {}'.format(addr)) 36 | 37 | MSGSIZE = args.msize 38 | 39 | msg = b'x' * MSGSIZE 40 | 41 | def run_test(n): 42 | print('Sending', NMESSAGES, 'messages') 43 | if unix: 44 | sock = socket(AF_UNIX, SOCK_STREAM) 45 | else: 46 | sock = socket(AF_INET, SOCK_STREAM) 47 | 48 | sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 49 | sock.connect(addr) 50 | ssl_context = ssl.create_default_context() 51 | ssl_context.check_hostname = False 52 | ssl_context.verify_mode = ssl.CERT_NONE 53 | sock = ssl_context.wrap_socket(sock) 54 | sock.do_handshake() 55 | while n > 0: 56 | sock.sendall(msg) 57 | nrecv = 0 58 | while nrecv < MSGSIZE: 59 | resp = sock.recv(MSGSIZE) 60 | if not resp: 61 | raise SystemExit() 62 | nrecv += len(resp) 63 | n -= 1 64 | 65 | TIMES = args.times 66 | N = args.workers 67 | NMESSAGES = args.num 68 | start = time.time() 69 | for _ in range(TIMES): 70 | with ProcessPoolExecutor(max_workers=N) as e: 71 | for _ in range(N): 72 | e.submit(run_test, NMESSAGES) 73 | end = time.time() 74 | duration = end - start 75 | print(NMESSAGES * N * TIMES, 'in', duration) 76 | print(NMESSAGES * N * TIMES / duration, 'requests/sec') 77 | -------------------------------------------------------------------------------- /examples/bench/subproc_perf.py: -------------------------------------------------------------------------------- 1 | # subproc_perf.py 2 | 3 | from curio import * 4 | from curio.subprocess import check_output 5 | import time 6 | import subprocess 7 | import asyncio 8 | 9 | COUNT = 1000 10 | 11 | input = (b'aaa ' * 10 + b'\n') * 10000 12 | cmd = ['cat'] 13 | 14 | 15 | async def main(n): 16 | for x in range(n): 17 | out = await check_output(cmd, input=input) 18 | assert out == input 19 | 20 | 21 | def curio_test(n): 22 | start = time.time() 23 | run(main(n)) 24 | end = time.time() 25 | print('curio:', end - start) 26 | 27 | 28 | def subprocess_test(n): 29 | start = time.time() 30 | for x in range(n): 31 | out = subprocess.check_output(cmd, input=input) 32 | assert out == input 33 | end = time.time() 34 | print('subprocess:', end - start) 35 | 36 | 37 | def asyncio_test(n): 38 | async def main(n): 39 | for x in range(n): 40 | proc = await asyncio.create_subprocess_exec( 41 | *cmd, 42 | stdin=asyncio.subprocess.PIPE, 43 | stdout=asyncio.subprocess.PIPE) 44 | stdout, stderr = await proc.communicate(input=input) 45 | await proc.wait() 46 | assert stdout == input 47 | 48 | loop = asyncio.get_event_loop() 49 | start = time.time() 50 | loop.run_until_complete(asyncio.ensure_future(main(n))) 51 | end = time.time() 52 | print('asyncio:', end - start) 53 | 54 | 55 | def uvloop_test(n): 56 | try: 57 | import uvloop 58 | except ImportError: 59 | return 60 | 61 | async def main(n): 62 | for x in range(n): 63 | proc = await asyncio.create_subprocess_exec( 64 | *cmd, 65 | stdin=asyncio.subprocess.PIPE, 66 | stdout=asyncio.subprocess.PIPE) 67 | stdout, stderr = await proc.communicate(input=input) 68 | await proc.wait() 69 | assert stdout == input 70 | 71 | loop = uvloop.new_event_loop() 72 | asyncio.set_event_loop(loop) 73 | start = time.time() 74 | loop.run_until_complete(asyncio.ensure_future(main(n))) 75 | end = time.time() 76 | print('uvloop:', end - start) 77 | 78 | 79 | if __name__ == '__main__': 80 | curio_test(COUNT) 81 | subprocess_test(COUNT) 82 | asyncio_test(COUNT) 83 | uvloop_test(COUNT) 84 | -------------------------------------------------------------------------------- /examples/bench/thread_perf.py: -------------------------------------------------------------------------------- 1 | # thread.py 2 | # 3 | # Performance test of submitting work to a thread pool 4 | 5 | import time 6 | import curio 7 | from concurrent.futures import ThreadPoolExecutor 8 | import asyncio 9 | 10 | COUNT = 25000 11 | 12 | 13 | def curio_test(): 14 | async def main(): 15 | for n in range(COUNT): 16 | await curio.run_in_thread(time.sleep, 0) 17 | 18 | start = time.time() 19 | curio.run(main()) 20 | end = time.time() 21 | print('Curio:', end - start) 22 | 23 | 24 | def future_test(): 25 | pool = ThreadPoolExecutor() 26 | 27 | def main(): 28 | for n in range(COUNT): 29 | f = pool.submit(time.sleep, 0) 30 | f.result() 31 | start = time.time() 32 | main() 33 | end = time.time() 34 | print('concurrent.futures:', end - start) 35 | 36 | 37 | def asyncio_test(): 38 | pool = ThreadPoolExecutor() 39 | 40 | async def main(loop): 41 | for n in range(COUNT): 42 | await loop.run_in_executor(pool, time.sleep, 0) 43 | 44 | loop = asyncio.get_event_loop() 45 | start = time.time() 46 | loop.run_until_complete(asyncio.ensure_future(main(loop))) 47 | end = time.time() 48 | print('asyncio:', end - start) 49 | 50 | 51 | def uvloop_test(): 52 | try: 53 | import uvloop 54 | except ImportError: 55 | return 56 | 57 | pool = ThreadPoolExecutor() 58 | 59 | async def main(loop): 60 | for n in range(COUNT): 61 | await loop.run_in_executor(pool, time.sleep, 0) 62 | 63 | loop = uvloop.new_event_loop() 64 | asyncio.set_event_loop(loop) 65 | start = time.time() 66 | loop.run_until_complete(asyncio.ensure_future(main(loop))) 67 | end = time.time() 68 | print('uvloop:', end - start) 69 | 70 | 71 | if __name__ == '__main__': 72 | asyncio_test() 73 | uvloop_test() 74 | future_test() 75 | curio_test() 76 | -------------------------------------------------------------------------------- /examples/bench/threadecho.py: -------------------------------------------------------------------------------- 1 | # A simple echo server with threads 2 | 3 | from socket import * 4 | from threading import Thread 5 | 6 | 7 | def echo_server(addr): 8 | sock = socket(AF_INET, SOCK_STREAM) 9 | sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) 10 | sock.bind(addr) 11 | sock.listen(5) 12 | while True: 13 | client, addr = sock.accept() 14 | Thread(target=echo_handler, args=(client, addr), daemon=True).start() 15 | 16 | 17 | def echo_handler(client, addr): 18 | print('Connection from', addr) 19 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 20 | with client: 21 | while True: 22 | data = client.recv(100000) 23 | if not data: 24 | break 25 | client.sendall(data) 26 | print('Connection closed') 27 | 28 | 29 | if __name__ == '__main__': 30 | echo_server(('', 25000)) 31 | -------------------------------------------------------------------------------- /examples/bench/threadsslecho.py: -------------------------------------------------------------------------------- 1 | # A simple echo server with threads 2 | 3 | from socket import * 4 | from threading import Thread 5 | import ssl 6 | 7 | KEYFILE = "ssl_test_rsa" # Private key 8 | CERTFILE = "ssl_test.crt" # Certificate (self-signed) 9 | 10 | 11 | def echo_server(addr): 12 | sock = socket(AF_INET, SOCK_STREAM) 13 | sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) 14 | sock.bind(addr) 15 | sock.listen(5) 16 | while True: 17 | client, addr = sock.accept() 18 | Thread(target=echo_handler, args=(client, addr), daemon=True).start() 19 | 20 | 21 | def echo_handler(client, addr): 22 | print('Connection from', addr) 23 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 24 | ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 25 | ssl_context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE) 26 | client = ssl_context.wrap_socket(client, server_side=True) 27 | with client: 28 | while True: 29 | data = client.recv(100000) 30 | if not data: 31 | break 32 | client.sendall(data) 33 | print('Connection closed') 34 | 35 | 36 | if __name__ == '__main__': 37 | echo_server(('', 25000)) 38 | -------------------------------------------------------------------------------- /examples/bench/torecho.py: -------------------------------------------------------------------------------- 1 | 2 | from tornado.ioloop import IOLoop 3 | from tornado.tcpserver import TCPServer 4 | 5 | 6 | class EchoServer(TCPServer): 7 | 8 | def handle_stream(self, stream, address): 9 | self._stream = stream 10 | self._stream.read_until_close(None, self.handle_read) 11 | 12 | def handle_read(self, data): 13 | self._stream.write(data) 14 | 15 | 16 | if __name__ == '__main__': 17 | server = EchoServer() 18 | server.bind(25000) 19 | server.start(1) 20 | IOLoop.instance().start() 21 | IOLoop.instance().close() 22 | -------------------------------------------------------------------------------- /examples/bench/trioecho.py: -------------------------------------------------------------------------------- 1 | # echo-server-low-level.py 2 | 3 | import trio 4 | from socket import * 5 | 6 | # Port is arbitrary, but: 7 | # - must be in between 1024 and 65535 8 | # - can't be in use by some other program on your computer 9 | # - must match what we set in our echo client 10 | PORT = 25000 11 | # How much memory to spend (at most) on each call to recv. Pretty arbitrary, 12 | # but shouldn't be too big or too small. 13 | BUFSIZE = 1000000 14 | 15 | async def sendall(sock, data): 16 | while data: 17 | nsent = await sock.send(data) 18 | data = data[nsent:] 19 | 20 | async def echo_server(server_sock, ident): 21 | with server_sock: 22 | print("echo_server {}: started".format(ident)) 23 | try: 24 | while True: 25 | data = await server_sock.recv(BUFSIZE) 26 | #print("echo_server {}: received data {!r}".format(ident, data)) 27 | if not data: 28 | print("echo_server {}: connection closed".format(ident)) 29 | return 30 | #print("echo_server {}: sending data {!r}".format(ident, data)) 31 | #print(dir(server_sock)) 32 | #await server_sock.sendall(data) 33 | await sendall(server_sock, data) 34 | except Exception as exc: 35 | # Unhandled exceptions will propagate into our parent and take 36 | # down the whole program. If the exception is KeyboardInterrupt, 37 | # that's what we want, but otherwise maybe not... 38 | print("echo_server {}: crashed: {!r}".format(ident, exc)) 39 | 40 | async def echo_listener(nursery): 41 | with trio.socket.socket() as listen_sock: 42 | # Notify the operating system that we want to receive connection 43 | # attempts at this address: 44 | listen_sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, True) 45 | await listen_sock.bind(("127.0.0.1", PORT)) 46 | listen_sock.listen() 47 | print("echo_listener: listening on 127.0.0.1:{}".format(PORT)) 48 | 49 | ident = 0 50 | while True: 51 | server_sock, _ = await listen_sock.accept() 52 | print("echo_listener: got new connection, spawning echo_server") 53 | ident += 1 54 | nursery.start_soon(echo_server, server_sock, ident) 55 | 56 | async def parent(): 57 | async with trio.open_nursery() as nursery: 58 | print("parent: spawning echo_listener") 59 | nursery.start_soon(echo_listener, nursery) 60 | 61 | trio.run(parent) 62 | -------------------------------------------------------------------------------- /examples/bench/twistecho.py: -------------------------------------------------------------------------------- 1 | 2 | # Copyright (c) Twisted Matrix Laboratories. 3 | # See LICENSE for details. 4 | 5 | 6 | from twisted.internet import reactor, protocol 7 | 8 | 9 | class Echo(protocol.Protocol): 10 | """This is just about the simplest possible protocol""" 11 | 12 | def dataReceived(self, data): 13 | "As soon as any data is received, write it back." 14 | self.transport.write(data) 15 | 16 | 17 | def main(): 18 | """This runs the protocol on port 25000""" 19 | factory = protocol.ServerFactory() 20 | factory.protocol = Echo 21 | reactor.listenTCP(25000, factory) 22 | reactor.run() 23 | 24 | 25 | # this only runs if the module was *not* imported 26 | if __name__ == '__main__': 27 | main() 28 | -------------------------------------------------------------------------------- /examples/bench/twistsslecho.py: -------------------------------------------------------------------------------- 1 | 2 | # Copyright (c) Twisted Matrix Laboratories. 3 | # See LICENSE for details. 4 | 5 | 6 | from twisted.internet import reactor, protocol, ssl 7 | 8 | 9 | class Echo(protocol.Protocol): 10 | """This is just about the simplest possible protocol""" 11 | 12 | def connectionMade(self): 13 | self.transport.setTcpNoDelay(True) 14 | 15 | def dataReceived(self, data): 16 | "As soon as any data is received, write it back." 17 | self.transport.write(data) 18 | 19 | 20 | KEYFILE = "ssl_test_rsa" # Private key 21 | CERTFILE = "ssl_test.crt" # Certificate (self-signed) 22 | 23 | 24 | def main(): 25 | """This runs the protocol on port 25000""" 26 | factory = protocol.Factory() 27 | factory.protocol = Echo 28 | reactor.listenSSL(25000, factory, 29 | ssl.DefaultOpenSSLContextFactory(KEYFILE, CERTFILE)) 30 | reactor.run() 31 | 32 | 33 | # this only runs if the module was *not* imported 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /examples/bench/uvclient.py: -------------------------------------------------------------------------------- 1 | # Copied with no modifications from the uvloop project 2 | # https://github.com/MagicStack/uvloop 3 | 4 | from concurrent.futures import ProcessPoolExecutor 5 | 6 | import argparse 7 | from socket import * 8 | import time 9 | 10 | 11 | if __name__ == '__main__': 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument('--msize', default=1000, type=int, 14 | help='message size in bytes') 15 | parser.add_argument('--num', default=200000, type=int, 16 | help='number of messages') 17 | parser.add_argument('--times', default=1, type=int, 18 | help='number of times to run the test') 19 | parser.add_argument('--workers', default=3, type=int, 20 | help='number of workers') 21 | parser.add_argument('--addr', default='127.0.0.1:25000', type=str, 22 | help='number of workers') 23 | args = parser.parse_args() 24 | 25 | unix = False 26 | if args.addr.startswith('file:'): 27 | unix = True 28 | addr = args.addr[5:] 29 | else: 30 | addr = args.addr.split(':') 31 | addr[1] = int(addr[1]) 32 | addr = tuple(addr) 33 | print('will connect to: {}'.format(addr)) 34 | 35 | MSGSIZE = args.msize 36 | 37 | msg = b'x' * MSGSIZE 38 | 39 | def run_test(n): 40 | print('Sending', NMESSAGES, 'messages') 41 | if unix: 42 | sock = socket(AF_UNIX, SOCK_STREAM) 43 | else: 44 | sock = socket(AF_INET, SOCK_STREAM) 45 | sock.connect(addr) 46 | while n > 0: 47 | sock.sendall(msg) 48 | nrecv = 0 49 | while nrecv < MSGSIZE: 50 | resp = sock.recv(MSGSIZE) 51 | if not resp: 52 | raise SystemExit() 53 | nrecv += len(resp) 54 | n -= 1 55 | 56 | TIMES = args.times 57 | N = args.workers 58 | NMESSAGES = args.num 59 | start = time.time() 60 | for _ in range(TIMES): 61 | with ProcessPoolExecutor(max_workers=N) as e: 62 | for _ in range(N): 63 | e.submit(run_test, NMESSAGES) 64 | end = time.time() 65 | duration = end - start 66 | print(NMESSAGES * N * TIMES, 'in', duration) 67 | print(NMESSAGES * N * TIMES / duration, 'requests/sec') 68 | -------------------------------------------------------------------------------- /examples/boundsema.py: -------------------------------------------------------------------------------- 1 | # boundsema.py 2 | # 3 | # Curio can often be extended to implement more specialized forms 4 | # of basic concurrency primitives. For example, here is how 5 | # you could implement a bound semaphore. 6 | 7 | from curio import Semaphore 8 | 9 | class BoundedSemaphore(Semaphore): 10 | 11 | def __init__(self, value=1): 12 | self._bound = value 13 | super().__init__(value) 14 | 15 | @property 16 | def bound(self): 17 | return self._bound 18 | 19 | async def release(self): 20 | if self._value >= self._bound: 21 | raise ValueError('BoundedSemaphore released too many times') 22 | await super().release() 23 | 24 | -------------------------------------------------------------------------------- /examples/chat.py: -------------------------------------------------------------------------------- 1 | from curio import run, spawn, TaskGroup, Queue, tcp_server 2 | 3 | import logging 4 | log = logging.getLogger(__name__) 5 | 6 | messages = Queue() 7 | subscribers = set() 8 | 9 | async def dispatcher(): 10 | while True: 11 | msg = await messages.get() 12 | for q in subscribers: 13 | await q.put(msg) 14 | 15 | async def publish(msg, local): 16 | log.info('%r published %r', local['address'], msg) 17 | await messages.put(msg) 18 | 19 | async def outgoing(client_stream): 20 | queue = Queue() 21 | try: 22 | subscribers.add(queue) 23 | while True: 24 | name, msg = await queue.get() 25 | await client_stream.write(name + b':' + msg) 26 | finally: 27 | subscribers.discard(queue) 28 | 29 | async def incoming(client_stream, name, local): 30 | async for line in client_stream: 31 | await publish((name, line), local) 32 | 33 | async def chat_handler(client, addr): 34 | log.info('Connection from %r', addr) 35 | local = { 'address': addr } 36 | async with client: 37 | client_stream = client.as_stream() 38 | await client_stream.write(b'Your name: ') 39 | name = (await client_stream.readline()).strip() 40 | await publish((name, b'joined\n'), local) 41 | 42 | async with TaskGroup(wait=any) as workers: 43 | await workers.spawn(outgoing, client_stream) 44 | await workers.spawn(incoming, client_stream, name, local) 45 | 46 | await publish((name, b'has gone away\n'), local) 47 | 48 | log.info('%r connection closed', addr) 49 | 50 | async def chat_server(host, port): 51 | async with TaskGroup() as g: 52 | await g.spawn(dispatcher) 53 | await g.spawn(tcp_server, host, port, chat_handler) 54 | 55 | if __name__ == '__main__': 56 | logging.basicConfig(level=logging.INFO) 57 | run(chat_server('', 25000)) 58 | -------------------------------------------------------------------------------- /examples/curio_subprocess.py: -------------------------------------------------------------------------------- 1 | # curio_subprocess.py 2 | # 3 | # A curio-compatible standin for the subprocess module. Provides 4 | # asynchronous compatible versions of Popen(), check_output(), 5 | # and run() functions. 6 | 7 | __all__ = ['run', 'Popen', 'CompletedProcess', 'CalledProcessError', 8 | 'SubprocessError', 'check_output', 'PIPE', 'STDOUT', 'DEVNULL'] 9 | 10 | # -- Standard Library 11 | 12 | import subprocess 13 | import os 14 | import sys 15 | 16 | from subprocess import ( 17 | CompletedProcess, 18 | SubprocessError, 19 | CalledProcessError, 20 | PIPE, 21 | STDOUT, 22 | DEVNULL, 23 | ) 24 | 25 | # -- Curio 26 | 27 | from curio.task import spawn 28 | from curio.time import sleep 29 | from curio.errors import CancelledError 30 | from curio.io import FileStream 31 | from curio import thread 32 | from curio.workers import run_in_thread 33 | 34 | if sys.platform.startswith('win'): 35 | from curio.file import AsyncFile as FileStream 36 | 37 | class Popen(object): 38 | ''' 39 | Curio wrapper around the Popen class from the subprocess module. All of the 40 | methods from subprocess.Popen should be available, but the associated file 41 | objects for stdin, stdout, stderr have been replaced by async versions. 42 | Certain blocking operations (e.g., wait() and communicate()) have been 43 | replaced by async compatible implementations. Explicit timeouts 44 | are not available. Use the timeout_after() function for timeouts. 45 | ''' 46 | 47 | def __init__(self, args, **kwargs): 48 | if 'universal_newlines' in kwargs: 49 | raise RuntimeError('universal_newlines argument not supported') 50 | 51 | # If stdin has been given and it's set to a curio FileStream object, 52 | # then we need to flip it to blocking. 53 | if 'stdin' in kwargs: 54 | stdin = kwargs['stdin'] 55 | if isinstance(stdin, FileStream): 56 | # At hell's heart I stab thy coroutine attempting to read from a stream 57 | # that's been used as a pipe input to a subprocess. Must set back to 58 | # blocking or all hell breaks loose in the child. 59 | if hasattr(os, 'set_blocking'): 60 | os.set_blocking(stdin.fileno(), True) 61 | 62 | self._popen = subprocess.Popen(args, **kwargs) 63 | 64 | if self._popen.stdin: 65 | self.stdin = FileStream(self._popen.stdin) 66 | if self._popen.stdout: 67 | self.stdout = FileStream(self._popen.stdout) 68 | if self._popen.stderr: 69 | self.stderr = FileStream(self._popen.stderr) 70 | 71 | def __getattr__(self, name): 72 | return getattr(self._popen, name) 73 | 74 | async def wait(self): 75 | retcode = self._popen.poll() 76 | if retcode is None: 77 | retcode = await run_in_thread(self._popen.wait) 78 | return retcode 79 | 80 | async def communicate(self, input=b''): 81 | ''' 82 | Communicates with a subprocess. input argument gives data to 83 | feed to the subprocess stdin stream. Returns a tuple (stdout, stderr) 84 | corresponding to the process output. If cancelled, the resulting 85 | cancellation exception has stdout_completed and stderr_completed 86 | attributes attached containing the bytes read so far. 87 | ''' 88 | stdout_task = await spawn(self.stdout.readall) if self.stdout else None 89 | stderr_task = await spawn(self.stderr.readall) if self.stderr else None 90 | try: 91 | if input: 92 | await self.stdin.write(input) 93 | await self.stdin.close() 94 | 95 | stdout = await stdout_task.join() if stdout_task else b'' 96 | stderr = await stderr_task.join() if stderr_task else b'' 97 | return (stdout, stderr) 98 | except CancelledError as err: 99 | if stdout_task: 100 | await stdout_task.cancel() 101 | err.stdout = stdout_task.exception.bytes_read 102 | else: 103 | err.stdout = b'' 104 | 105 | if stderr_task: 106 | await stderr_task.cancel() 107 | err.stderr = stderr_task.exception.bytes_read 108 | else: 109 | err.stderr = b'' 110 | raise 111 | 112 | async def __aenter__(self): 113 | return self 114 | 115 | async def __aexit__(self, *args): 116 | if self.stdout: 117 | await self.stdout.close() 118 | 119 | if self.stderr: 120 | await self.stderr.close() 121 | 122 | if self.stdin: 123 | await self.stdin.close() 124 | 125 | # Wait for the process to terminate 126 | await self.wait() 127 | 128 | def __enter__(self): 129 | return thread.AWAIT(self.__aenter__()) 130 | 131 | def __exit__(self, *args): 132 | return thread.AWAIT(self.__aexit__(*args)) 133 | 134 | 135 | async def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, check=False): 136 | ''' 137 | Curio-compatible version of subprocess.run() 138 | ''' 139 | if input: 140 | stdin = subprocess.PIPE 141 | else: 142 | stdin = None 143 | 144 | async with Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell) as process: 145 | try: 146 | stdout, stderr = await process.communicate(input) 147 | except CancelledError as err: 148 | process.kill() 149 | stdout, stderr = await process.communicate() 150 | # Append the remaining stdout, stderr to the exception 151 | err.stdout += stdout 152 | err.stderr += stderr 153 | raise err 154 | except: 155 | process.kill() 156 | raise 157 | 158 | retcode = process.poll() 159 | if check and retcode: 160 | raise CalledProcessError(retcode, process.args, 161 | output=stdout, stderr=stderr) 162 | return CompletedProcess(process.args, retcode, stdout, stderr) 163 | 164 | 165 | async def check_output(args, *, stdin=None, stderr=None, shell=False, input=None): 166 | ''' 167 | Curio compatible version of subprocess.check_output() 168 | ''' 169 | out = await run(args, stdout=PIPE, stdin=stdin, stderr=stderr, shell=shell, 170 | check=True, input=input) 171 | return out.stdout 172 | -------------------------------------------------------------------------------- /examples/curio_zmq.py: -------------------------------------------------------------------------------- 1 | # curio_zmq.py 2 | # 3 | # Curio support for ZeroMQ. Requires pyzmq. The following test programs 4 | # can be used to try it out: 5 | # 6 | # zmq_puller.py/zmq_pusher.py - Push/pull socket example 7 | # zmq_rpclient.py/zmq_rpcserv.py - RPC Server 8 | # 9 | ''' 10 | ZeroMQ wrapper module 11 | --------------------- 12 | 13 | The curio_zmq module provides an async wrapper around the third party 14 | pyzmq library for communicating via ZeroMQ. You use it in the same way except 15 | that certain operations are replaced by async functions. 16 | 17 | Context(*args, **kwargs) 18 | 19 | An asynchronous subclass of zmq.Context. It has the same arguments 20 | and methods as the synchronous class. Create ZeroMQ sockets using the 21 | socket() method of this class. 22 | 23 | Sockets created by the curio_zmq.Context() class have the following 24 | methods replaced by asynchronous versions: 25 | 26 | Socket.send(data, flags=0, copy=True, track=False) 27 | Socket.recv(flags=0, copy=True, track=False) 28 | Socket.send_multipart(msg_parts, flags=0, copy=True, track=False) 29 | Socket.recv_multipart(flags=0, copy=True, track=False) 30 | Socket.send_pyobj(obj, flags=0, protocol=pickle.DEFAULT_PROTOCOL) 31 | Socket.recv_pyobj(flags=0) 32 | Socket.send_json(obj, flags=0, **kwargs) 33 | Socket.recv_json(flags, **kwargs) 34 | Socket.send_string(u, flags=0, copy=True, encoding='utf-8') 35 | Socket.recv_string(flags=0, encoding='utf-8') 36 | 37 | Here is an example of task that uses a ZMQ PUSH socket:: 38 | 39 | import curio_zmq as zmq 40 | 41 | async def pusher(address): 42 | ctx = zmq.Context() 43 | sock = ctx.socket(zmq.PUSH) 44 | sock.bind(address) 45 | for n in range(100): 46 | await sock.send(b'Message %d' % n) 47 | await sock.send(b'exit') 48 | 49 | if __name__ == '__main__': 50 | zmq.run(pusher('tcp://*:9000')) 51 | 52 | Here is an example of a Curio task that receives messages:: 53 | 54 | import curio_zmq as zmq 55 | 56 | async def puller(address): 57 | ctx = zmq.Context() 58 | sock = ctx.socket(zmq.PULL) 59 | sock.connect(address) 60 | while True: 61 | msg = await sock.recv() 62 | if msg == b'exit': 63 | break 64 | print('Got:', msg) 65 | 66 | if __name__ == '__main__': 67 | zmq.run(puller('tcp://localhost:9000')) 68 | ''' 69 | 70 | import pickle 71 | from zmq.utils import jsonapi 72 | import zmq 73 | 74 | from curio.kernel import run # for import compatibility 75 | from curio.traps import _read_wait, _write_wait 76 | 77 | # Pull all ZMQ constants and exceptions into our namespace 78 | globals().update((key, val) for key, val in vars(zmq).items() 79 | if key.isupper() or 80 | (isinstance(val, type) and issubclass(val, zmq.ZMQBaseError))) 81 | 82 | class CurioZMQSocket(zmq.Socket): 83 | 84 | async def send(self, data, flags=0, copy=True, track=False): 85 | while True: 86 | try: 87 | return super().send(data, flags | zmq.NOBLOCK, copy, track) 88 | except zmq.Again: 89 | await _write_wait(self) 90 | 91 | async def recv(self, flags=0, copy=True, track=False): 92 | while True: 93 | try: 94 | return super().recv(flags | zmq.NOBLOCK, copy, track) 95 | except zmq.Again: 96 | await _read_wait(self) 97 | 98 | async def send_multipart(self, msg_parts, flags=0, copy=True, track=False): 99 | for msg in msg_parts[:-1]: 100 | await self.send(msg, zmq.SNDMORE | flags, copy=copy, track=track) 101 | return await self.send(msg_parts[-1], flags, copy=copy, track=track) 102 | 103 | async def recv_multipart(self, flags=0, copy=True, track=False): 104 | parts = [ await self.recv(flags, copy=copy, track=track) ] 105 | while self.getsockopt(zmq.RCVMORE): 106 | parts.append(await self.recv(flags, copy=copy, track=track)) 107 | return parts 108 | 109 | async def send_pyobj(self, obj, flags=0, protocol=pickle.DEFAULT_PROTOCOL): 110 | return await self.send(pickle.dumps(obj, protocol), flags) 111 | 112 | async def recv_pyobj(self, flags=0): 113 | return pickle.loads(await self.recv(flags)) 114 | 115 | async def send_json(self, obj, flags=0, **kwargs): 116 | return await self.send(jsonapi.dumps(obj, **kwargs), flags) 117 | 118 | async def recv_json(self, flags, **kwargs): 119 | return jsonapi.loads(await self.recv(flags), **kwargs) 120 | 121 | async def send_string(self, u, flags=0, copy=True, encoding='utf-8'): 122 | return await self.send(u.encode(encoding), flags=flags, copy=copy) 123 | 124 | async def recv_string(self, flags=0, encoding='utf-8'): 125 | return (await self.recv(flags=flags)).decode(encoding) 126 | 127 | class Context(zmq.Context): 128 | _socket_class = CurioZMQSocket 129 | -------------------------------------------------------------------------------- /examples/dualserv.py: -------------------------------------------------------------------------------- 1 | # dualserve.py 2 | # 3 | # An example of a server implementation that works both in Curio and 4 | # as a normal threaded application 5 | 6 | import threading 7 | from curio import run, spawn_thread, AWAIT 8 | 9 | # This is a normal synchronous function. It is used in both synchronous 10 | # and asynchronous code. 11 | 12 | def echo_handler(client, addr): 13 | print('Connection from', addr) 14 | with client: 15 | while True: 16 | data = AWAIT(client.recv, 100000) 17 | if not data: 18 | break 19 | AWAIT(client.sendall, b'Got:' + data) 20 | print('Connection closed') 21 | 22 | # A Traditional threaded server 23 | def threaded_echo_server(addr): 24 | import socket 25 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) 27 | sock.bind(addr) 28 | sock.listen(5) 29 | print('Threaded server running on:', addr) 30 | while True: 31 | client, addr = sock.accept() 32 | threading.Thread(target=echo_handler, args=(client, addr), daemon=True).start() 33 | 34 | # An async server 35 | async def async_echo_server(addr): 36 | import curio.socket as socket 37 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 38 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) 39 | sock.bind(addr) 40 | sock.listen(5) 41 | print('Async server running on:', addr) 42 | while True: 43 | client, addr = await sock.accept() 44 | await spawn_thread(echo_handler, client, addr, daemon=True) 45 | 46 | if __name__ == '__main__': 47 | threading.Thread(target=threaded_echo_server, args=(('',25000),)).start() 48 | run(async_echo_server, ('',26000)) 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /examples/echoserv.py: -------------------------------------------------------------------------------- 1 | # Example: A simple echo server written directly with sockets 2 | 3 | from curio import run, spawn 4 | from curio.socket import * 5 | 6 | 7 | async def echo_server(address): 8 | sock = socket(AF_INET, SOCK_STREAM) 9 | sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) 10 | sock.bind(address) 11 | sock.listen(5) 12 | print('Server listening at', address) 13 | async with sock: 14 | while True: 15 | client, addr = await sock.accept() 16 | print('Connection from', addr) 17 | await spawn(echo_client, client) 18 | 19 | 20 | async def echo_client(client): 21 | async with client: 22 | while True: 23 | data = await client.recv(10000) 24 | if not data: 25 | break 26 | await client.sendall(data) 27 | print('Connection closed') 28 | 29 | 30 | if __name__ == '__main__': 31 | run(echo_server, ('', 25000)) 32 | -------------------------------------------------------------------------------- /examples/echoserv2.py: -------------------------------------------------------------------------------- 1 | # echoserv.py 2 | # 3 | # Echo server using the run_server() function 4 | 5 | from curio import run, tcp_server 6 | 7 | 8 | async def echo_client(client, addr): 9 | print('Connection from', addr) 10 | while True: 11 | data = await client.recv(1000) 12 | if not data: 13 | break 14 | await client.sendall(data) 15 | print('Connection closed') 16 | 17 | 18 | if __name__ == '__main__': 19 | run(tcp_server, '', 25000, echo_client) 20 | -------------------------------------------------------------------------------- /examples/echoserv3.py: -------------------------------------------------------------------------------- 1 | # echoserv.py 2 | # 3 | # Echo server using streams 4 | 5 | from curio import run, tcp_server 6 | 7 | async def echo_client(client, addr): 8 | print('Connection from', addr) 9 | s = client.as_stream() 10 | async for line in s: 11 | await s.write(line) 12 | print('Connection closed') 13 | await s.close() 14 | 15 | 16 | if __name__ == '__main__': 17 | run(tcp_server, '', 25000, echo_client) 18 | -------------------------------------------------------------------------------- /examples/fibserve.py: -------------------------------------------------------------------------------- 1 | # An example of a server involving a CPU-intensive task. We'll farm the 2 | # CPU-intensive work out to a separate process. 3 | 4 | from curio import run, run_in_process, tcp_server 5 | 6 | def fib(n): 7 | if n <= 2: 8 | return 1 9 | else: 10 | return fib(n - 1) + fib(n - 2) 11 | 12 | 13 | async def fib_handler(client, addr): 14 | print('Connection from', addr) 15 | s = client.as_stream() 16 | async for line in s: 17 | try: 18 | n = int(line) 19 | result = await run_in_process(fib, n) 20 | resp = str(result) + '\n' 21 | await s.write(resp.encode('ascii')) 22 | except ValueError: 23 | await s.write(b'Bad input\n') 24 | print('Connection closed') 25 | await client.close() 26 | 27 | 28 | if __name__ == '__main__': 29 | run(tcp_server, '', 25000, fib_handler) 30 | -------------------------------------------------------------------------------- /examples/guiserv.py: -------------------------------------------------------------------------------- 1 | # guiserv.py 2 | # 3 | # An example of connecting Curio and the Tkinter event loop 4 | import tkinter as tk 5 | from curio import * 6 | 7 | class EchoApp(object): 8 | def __init__(self): 9 | # Pending coroutines 10 | self.pending = [] 11 | 12 | # Main Tk window 13 | self.root = tk.Tk() 14 | 15 | # Number of clients connected label 16 | self.clients_label = tk.Label(text='') 17 | self.clients_label.pack() 18 | self.nclients = 0 19 | self.incr_clients(0) 20 | self.client_tasks = set() 21 | 22 | # Number of bytes received label 23 | self.bytes_received = 0 24 | self.bytes_label = tk.Label(text='') 25 | self.bytes_label.pack() 26 | self.update_bytes() 27 | 28 | # Disconnect all button 29 | self.disconnect_button = tk.Button(text='Disconnect all', 30 | command=lambda: self.pending.append(self.disconnect_all())) 31 | self.disconnect_button.pack() 32 | 33 | def incr_clients(self, delta=1): 34 | self.nclients += delta 35 | self.clients_label.configure(text='Number Clients %d' % self.nclients) 36 | 37 | def update_bytes(self): 38 | self.bytes_label.configure(text='Bytes received %d' % self.bytes_received) 39 | self.root.after(1000, self.update_bytes) 40 | 41 | async def echo_client(self, sock, address): 42 | self.incr_clients(1) 43 | self.client_tasks.add(await current_task()) 44 | try: 45 | async with sock: 46 | while True: 47 | data = await sock.recv(100000) 48 | if not data: 49 | break 50 | self.bytes_received += len(data) 51 | await sock.sendall(data) 52 | finally: 53 | self.incr_clients(-1) 54 | self.client_tasks.remove(await current_task()) 55 | 56 | async def disconnect_all(self): 57 | for task in list(self.client_tasks): 58 | await task.cancel() 59 | 60 | async def main(self): 61 | serv = await spawn(tcp_server, '', 25000, self.echo_client) 62 | while True: 63 | self.root.update() 64 | for coro in self.pending: 65 | await coro 66 | self.pending = [] 67 | await sleep(0.05) 68 | 69 | if __name__ == '__main__': 70 | app = EchoApp() 71 | run(app.main) 72 | 73 | -------------------------------------------------------------------------------- /examples/guiserv2.py: -------------------------------------------------------------------------------- 1 | # guiserv2.py 2 | # 3 | # Another example of integrating Curio with the Tkinter 4 | # event loop using UniversalQueue and threads. 5 | 6 | import tkinter as tk 7 | import threading 8 | from curio import * 9 | 10 | class EchoApp(object): 11 | def __init__(self): 12 | self.gui_ops = UniversalQueue(withfd=True) 13 | self.coro_ops = UniversalQueue() 14 | 15 | # Main Tk window 16 | self.root = tk.Tk() 17 | 18 | # Number of clients connected label 19 | self.clients_label = tk.Label(text='') 20 | self.clients_label.pack() 21 | self.nclients = 0 22 | self.incr_clients(0) 23 | self.client_tasks = set() 24 | 25 | # Number of bytes received label 26 | self.bytes_received = 0 27 | self.bytes_label = tk.Label(text='') 28 | self.bytes_label.pack() 29 | self.update_bytes() 30 | 31 | # Disconnect all button 32 | self.disconnect_button = tk.Button(text='Disconnect all', 33 | command=lambda: self.coro_ops.put(self.disconnect_all())) 34 | self.disconnect_button.pack() 35 | 36 | # Set up event handler for queued GUI updates 37 | self.root.createfilehandler(self.gui_ops, tk.READABLE, self.process_gui_ops) 38 | 39 | def incr_clients(self, delta=1): 40 | self.nclients += delta 41 | self.clients_label.configure(text='Number Clients %d' % self.nclients) 42 | 43 | def update_bytes(self): 44 | self.bytes_label.configure(text='Bytes received %d' % self.bytes_received) 45 | self.root.after(1000, self.update_bytes) 46 | 47 | def process_gui_ops(self, file, mask): 48 | while not self.gui_ops.empty(): 49 | func, args = self.gui_ops.get() 50 | func(*args) 51 | 52 | async def echo_client(self, sock, address): 53 | await self.gui_ops.put((self.incr_clients, (1,))) 54 | self.client_tasks.add(await current_task()) 55 | try: 56 | async with sock: 57 | while True: 58 | data = await sock.recv(100000) 59 | if not data: 60 | break 61 | self.bytes_received += len(data) 62 | await sock.sendall(data) 63 | finally: 64 | self.client_tasks.remove(await current_task()) 65 | await self.gui_ops.put((self.incr_clients, (-1,))) 66 | 67 | async def disconnect_all(self): 68 | for task in list(self.client_tasks): 69 | await task.cancel() 70 | 71 | async def main(self): 72 | serv = await spawn(tcp_server, '', 25000, self.echo_client) 73 | while True: 74 | coro = await self.coro_ops.get() 75 | await coro 76 | 77 | def run_forever(self): 78 | threading.Thread(target=run, args=(self.main,)).start() 79 | self.root.mainloop() 80 | 81 | if __name__ == '__main__': 82 | app = EchoApp() 83 | app.run_forever() 84 | 85 | 86 | -------------------------------------------------------------------------------- /examples/happy.py: -------------------------------------------------------------------------------- 1 | # happy.py 2 | # An implementation of RFC 6555 (Happy Eyeballs). 3 | # See: https://tools.ietf.org/html/rfc6555 4 | 5 | from curio import socket, TaskGroup, ignore_after, run 6 | import itertools 7 | 8 | async def open_tcp_stream(hostname, port, delay=0.3): 9 | # Get all of the possible targets for a given host/port 10 | targets = await socket.getaddrinfo(hostname, port, type=socket.SOCK_STREAM) 11 | if not targets: 12 | raise OSError(f'nothing known about {hostname}:{port}') 13 | 14 | # Cluster the targets into unique address families (e.g., AF_INET, AF_INET6, etc.) 15 | # and make sure the first entries are from a different family. 16 | families = [ list(g) for _, g in itertools.groupby(targets, key=lambda t: t[0]) ] 17 | targets = [ fam.pop(0) for fam in families ] 18 | targets.extend(itertools.chain(*families)) 19 | 20 | # List of accumulated errors to report in case of total failure 21 | errors = [] 22 | 23 | # Task group to manage a collection concurrent tasks. 24 | # Cancels all remaining once an interesting result is returned. 25 | async with TaskGroup(wait=object) as group: 26 | 27 | # Attempt to make a connection request 28 | async def try_connect(sockargs, addr, errors): 29 | sock = socket.socket(*sockargs) 30 | try: 31 | await sock.connect(addr) 32 | return sock 33 | except Exception as e: 34 | await sock.close() 35 | errors.append(e) 36 | 37 | # Walk the list of targets and try connections with a staggered delay 38 | for *sockargs, _, addr in targets: 39 | await group.spawn(try_connect, sockargs, addr, errors) 40 | async with ignore_after(delay): 41 | task = await group.next_done() 42 | if not task.exception: 43 | group.completed = task 44 | break 45 | 46 | if group.completed: 47 | return group.completed.result 48 | else: 49 | raise OSError(errors) 50 | 51 | 52 | async def main(): 53 | result = await open_tcp_stream('www.python.org', 80) 54 | print(result) 55 | 56 | if __name__ == '__main__': 57 | run(main) 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /examples/pinger.py: -------------------------------------------------------------------------------- 1 | # Example of launching a subprocess and reading streaming output 2 | 3 | from curio import subprocess 4 | import curio 5 | 6 | 7 | async def main(): 8 | p = subprocess.Popen(['ping', 'www.python.org'], stdout=subprocess.PIPE) 9 | async for line in p.stdout: 10 | print('Got:', line.decode('ascii'), end='') 11 | 12 | 13 | if __name__ == '__main__': 14 | curio.run(main) 15 | -------------------------------------------------------------------------------- /examples/prodcons.py: -------------------------------------------------------------------------------- 1 | # prodcons.py 2 | # 3 | # Example of a producer/consumer setup with queues 4 | 5 | import curio 6 | 7 | 8 | async def producer(queue): 9 | for n in range(10): 10 | await queue.put(n) 11 | await queue.join() 12 | print('Producer done') 13 | 14 | 15 | async def consumer(queue): 16 | while True: 17 | item = await queue.get() 18 | print('Consumer got', item) 19 | await queue.task_done() 20 | 21 | 22 | async def main(): 23 | q = curio.Queue() 24 | prod_task = await curio.spawn(producer, q) 25 | cons_task = await curio.spawn(consumer, q) 26 | await prod_task.join() 27 | await cons_task.cancel() 28 | 29 | 30 | if __name__ == '__main__': 31 | curio.run(main) 32 | -------------------------------------------------------------------------------- /examples/promise.py: -------------------------------------------------------------------------------- 1 | # Example of implementing a Promise class using 2 | # built-in synchronization primitives. 3 | 4 | from curio import Event 5 | 6 | class Promise: 7 | def __init__(self): 8 | self._event = Event() 9 | self._data = None 10 | self._exception = None 11 | 12 | def __repr__(self): 13 | res = super().__repr__() 14 | if self.is_set(): 15 | extra = repr(self._exception) if self._exception else repr(self._data) 16 | else: 17 | extra = 'unset' 18 | return f'<{res[1:-1]} [{extra}]>' 19 | 20 | def is_set(self): 21 | '''Return `True` if the promise is set''' 22 | return self._event.is_set() 23 | 24 | def clear(self): 25 | '''Clear the promise''' 26 | self._data = None 27 | self._exception = None 28 | self._event.clear() 29 | 30 | async def set(self, data): 31 | '''Set the promise. Wake all waiting tasks (if any).''' 32 | self._data = data 33 | await self._event.set() 34 | 35 | async def get(self): 36 | '''Wait for the promise to be set, and return the data. 37 | 38 | If an exception was set, it will be raised.''' 39 | await self._event.wait() 40 | 41 | if self._exception is not None: 42 | raise self._exception 43 | 44 | return self._data 45 | 46 | async def __aenter__(self): 47 | return self 48 | 49 | async def __aexit__(self, exc_type, exc, tb): 50 | if exc_type is not None: 51 | self._exception = exc 52 | await self._event.set() 53 | 54 | return True 55 | 56 | async def consumer(promise): 57 | return await promise.get() 58 | 59 | def test_promise(kernel): 60 | async def producer(promise): 61 | await promise.set(42) 62 | 63 | async def main(): 64 | promise = Promise() 65 | assert not promise.is_set() 66 | 67 | producer_task = await curio.spawn(producer(promise)) 68 | 69 | assert 42 == await consumer(promise) 70 | assert promise.is_set() 71 | await producer_task.join() 72 | 73 | kernel.run(main()) 74 | 75 | def test_promise_exception(kernel): 76 | async def exception_producer(promise): 77 | async with promise: 78 | raise RuntimeError() 79 | 80 | async def main(): 81 | promise = Promise() 82 | producer_task = await curio.spawn(exception_producer(promise)) 83 | 84 | with pytest.raises(RuntimeError): 85 | await consumer(promise) 86 | 87 | await producer_task.join() 88 | 89 | kernel.run(main()) 90 | 91 | def test_promise_no_exception(kernel): 92 | async def main(): 93 | promise = Promise() 94 | async with promise: 95 | pass 96 | assert not promise.is_set() 97 | 98 | kernel.run(main()) 99 | 100 | def test_promise_internals(kernel): 101 | async def main(): 102 | promise = Promise() 103 | assert not promise.is_set() 104 | assert repr(promise).endswith('[unset]>') 105 | 106 | await promise.set(42) 107 | 108 | assert promise.is_set() 109 | assert promise._data == 42 110 | assert promise._exception is None 111 | assert repr(promise).endswith('[42]>') 112 | 113 | promise.clear() 114 | 115 | assert not promise.is_set() 116 | assert promise._data is None 117 | assert promise._exception is None 118 | assert repr(promise).endswith('[unset]>') 119 | 120 | async with promise: 121 | raise RuntimeError() 122 | 123 | assert promise.is_set() 124 | assert promise._data is None 125 | assert isinstance(promise._exception, RuntimeError) 126 | assert repr(promise).endswith('[RuntimeError()]>') 127 | 128 | promise.clear() 129 | 130 | assert not promise.is_set() 131 | assert promise._data is None 132 | assert promise._exception is None 133 | assert repr(promise).endswith('[unset]>') 134 | 135 | kernel.run(main()) 136 | -------------------------------------------------------------------------------- /examples/pytest_plugin.py: -------------------------------------------------------------------------------- 1 | # python3.7 2 | 3 | """Plugin module for pytest. 4 | 5 | This enables easier unit tests for applications that use both Curio and Pytest. If you have Curio 6 | installed, you have the plugin and can write unit tests per the example below. 7 | 8 | Provides a fixture named `kernel`, and a marker (pytest.mark.curio) that will run a bare coroutine 9 | in a new Kernel instance. 10 | 11 | Example: 12 | 13 | from curio import sleep 14 | import pytest 15 | 16 | # Use marker 17 | 18 | @pytest.mark.curio 19 | async def test_coro(): 20 | await sleep(1) 21 | 22 | 23 | # Use kernel fixture 24 | 25 | def test_app(kernel): 26 | 27 | async def my_aapp(): 28 | await sleep(1) 29 | 30 | kernel.run(my_aapp) 31 | """ 32 | 33 | import inspect 34 | import functools 35 | 36 | import pytest 37 | 38 | from curio import Kernel 39 | from curio import meta 40 | from curio import monitor 41 | from curio.debug import longblock, logcrash 42 | 43 | 44 | def _is_coroutine(obj): 45 | """Check to see if an object is really a coroutine.""" 46 | return meta.iscoroutinefunction(obj) or inspect.isgeneratorfunction(obj) 47 | 48 | 49 | def pytest_configure(config): 50 | """Inject documentation.""" 51 | config.addinivalue_line("markers", 52 | "curio: " 53 | "mark the test as a coroutine, it will be run using a Curio kernel.") 54 | 55 | 56 | @pytest.mark.tryfirst 57 | def pytest_pycollect_makeitem(collector, name, obj): 58 | """A pytest hook to collect coroutines in a test module.""" 59 | if collector.funcnamefilter(name) and _is_coroutine(obj): 60 | item = pytest.Function.from_parent(collector, name=name) 61 | if 'curio' in item.keywords: 62 | return list(collector._genfunctions(name, obj)) 63 | 64 | 65 | @pytest.hookimpl(tryfirst=True, hookwrapper=True) 66 | def pytest_pyfunc_call(pyfuncitem): 67 | """Run curio marked test functions in a Curio kernel instead of a normal function call. 68 | """ 69 | if pyfuncitem.get_closest_marker('curio'): 70 | pyfuncitem.obj = wrap_in_sync(pyfuncitem.obj) 71 | yield 72 | 73 | 74 | def wrap_in_sync(func): 75 | """Return a sync wrapper around an async function executing it in a Kernel.""" 76 | @functools.wraps(func) 77 | def inner(**kwargs): 78 | coro = func(**kwargs) 79 | Kernel().run(coro, shutdown=True) 80 | return inner 81 | 82 | 83 | # Fixture for explicitly running in Kernel instance. 84 | @pytest.fixture(scope='session') 85 | def kernel(request): 86 | """Provide a Curio Kernel object for running co-routines.""" 87 | k = Kernel(debug=[longblock, logcrash]) 88 | m = monitor.Monitor(k) 89 | request.addfinalizer(lambda: k.run(shutdown=True)) 90 | request.addfinalizer(m.close) 91 | return k 92 | -------------------------------------------------------------------------------- /examples/signal_handling.py: -------------------------------------------------------------------------------- 1 | # signal_handling.py 2 | # 3 | # This example illustrates how you might handle a Unix signal from Curio. 4 | # The basic idea is that you need to install a standard signal handler 5 | # using Python's built-in signal module. That handler then communciates 6 | # with a Curio task using a UniversalEvent (or a UniversalQueue). 7 | 8 | import signal 9 | import os 10 | import curio 11 | 12 | signal_evt = curio.UniversalEvent() 13 | 14 | # Ordinary Python signal handler. 15 | def sig_handler(signo, frame): 16 | signal_evt.set() 17 | 18 | # A Curio task waiting for an event 19 | async def coro(): 20 | print("Waiting....") 21 | await signal_evt.wait() 22 | print("Got a signal!") 23 | 24 | # Set up and execution 25 | def main(): 26 | signal.signal(signal.SIGHUP, sig_handler) 27 | print("Send me a SIGHUP", os.getpid()) 28 | curio.run(coro) 29 | 30 | if __name__ == '__main__': 31 | main() 32 | 33 | -------------------------------------------------------------------------------- /examples/ssl_conn.py: -------------------------------------------------------------------------------- 1 | # Example of making an SSL connection and downloading data 2 | 3 | import curio 4 | 5 | async def main(): 6 | sock = await curio.open_connection( 7 | 'www.python.org', 8 | 443, 9 | ssl=True, 10 | server_hostname='www.python.org' 11 | ) 12 | async with sock: 13 | await sock.sendall(b'GET / HTTP/1.0\r\nHost: www.python.org\r\n\r\n') 14 | chunks = [] 15 | while True: 16 | chunk = await sock.recv(10000) 17 | if not chunk: 18 | break 19 | chunks.append(chunk) 20 | 21 | response = b''.join(chunks) 22 | print(response.decode('latin-1')) 23 | 24 | 25 | if __name__ == '__main__': 26 | curio.run(main) 27 | -------------------------------------------------------------------------------- /examples/ssl_echo.py: -------------------------------------------------------------------------------- 1 | # ssl_echo 2 | # 3 | # An example of a simple SSL echo server. Use ssl_echo_client.py to test. 4 | 5 | import os 6 | import curio 7 | from curio import ssl 8 | from curio import network 9 | 10 | KEYFILE = 'ssl_test_rsa' # Private key 11 | # Certificate (self-signed) 12 | CERTFILE = 'ssl_test.crt' 13 | 14 | async def handle(client, addr): 15 | print('Connection from', addr) 16 | async with client: 17 | while True: 18 | data = await client.recv(1000) 19 | if not data: 20 | break 21 | await client.send(data) 22 | print('Connection closed') 23 | 24 | 25 | if __name__ == '__main__': 26 | ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 27 | ssl_context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE) 28 | curio.run(network.tcp_server('', 10000, handle, ssl=ssl_context)) 29 | -------------------------------------------------------------------------------- /examples/ssl_echo_client.py: -------------------------------------------------------------------------------- 1 | # ssl_echo 2 | # 3 | # An example of a simple SSL echo client. Use ssl_echo.py for the server. 4 | 5 | import curio 6 | from curio import ssl 7 | from curio import network 8 | 9 | 10 | async def main(host, port): 11 | ssl_context = ssl.create_default_context() 12 | ssl_context.check_hostname = False 13 | ssl_context.verify_mode = ssl.CERT_NONE 14 | sock = await network.open_connection( 15 | host, port, ssl=True, server_hostname=None) 16 | for i in range(1000): 17 | msg = ('Message %d' % i).encode('ascii') 18 | print(msg) 19 | await sock.sendall(msg) 20 | resp = await sock.recv(1000) 21 | assert msg == resp 22 | await sock.close() 23 | 24 | 25 | if __name__ == '__main__': 26 | curio.run(main, 'localhost', 10000) 27 | -------------------------------------------------------------------------------- /examples/ssl_http.py: -------------------------------------------------------------------------------- 1 | # ssl_http 2 | # 3 | # An example of a simple SSL server. To test, connect via browser 4 | 5 | import os 6 | import curio 7 | from curio import ssl 8 | import time 9 | 10 | 11 | KEYFILE = "ssl_test_rsa" # Private key 12 | # Certificate (self-signed) 13 | CERTFILE = "ssl_test.crt" 14 | 15 | 16 | async def handler(client, addr): 17 | s = client.as_stream() 18 | async for line in s: 19 | line = line.strip() 20 | if not line: 21 | break 22 | print(line) 23 | 24 | await s.write( 25 | b'''HTTP/1.0 200 OK\r 26 | Content-type: text/plain\r 27 | \r 28 | If you're seeing this, it probably worked. Yay! 29 | ''') 30 | await s.write(time.asctime().encode('ascii')) 31 | await client.close() 32 | 33 | 34 | if __name__ == '__main__': 35 | ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 36 | ssl_context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE) 37 | print('Connect to https://localhost:10000 to see if it works') 38 | curio.run(curio.tcp_server('', 10000, handler, ssl=ssl_context)) 39 | -------------------------------------------------------------------------------- /examples/ssl_test.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFlzCCA3+gAwIBAgIJAIjl8bnEmgY/MA0GCSqGSIb3DQEBCwUAMGIxCzAJBgNV 3 | BAYTAlVTMREwDwYDVQQIDAhJbGxpbm9pczEQMA4GA1UEBwwHQ2hpY2FnbzETMBEG 4 | A1UECgwKRGFiZWF6IExMQzEZMBcGA1UECwwQU3BlY2lhbCBQcm9qZWN0czAeFw0x 5 | ODAzMDExMTMyMjVaFw0yODAyMjcxMTMyMjVaMGIxCzAJBgNVBAYTAlVTMREwDwYD 6 | VQQIDAhJbGxpbm9pczEQMA4GA1UEBwwHQ2hpY2FnbzETMBEGA1UECgwKRGFiZWF6 7 | IExMQzEZMBcGA1UECwwQU3BlY2lhbCBQcm9qZWN0czCCAiIwDQYJKoZIhvcNAQEB 8 | BQADggIPADCCAgoCggIBALXHnzJH/KiadfKJhEAHaKFV4WrIuTkNIs0gVWtxDGie 9 | ZUlEQui/jHHsZJJqSXiHJZ7NpdsEd3D9lUaPPwQVnhBHrD7ZiFy5twCCSU0Jrx+u 10 | htc279wGsIudVAmwrDr67QttfMxcm6cuvOg2OOrPu8IhGdUJGV9bN+Qzd9nrAYFa 11 | xdr4Ge3fjAENHulw7UNxk8RAXFyMLGrA6EhRTR7wnVWfeTZ3vlTh8CAr08aT0CJL 12 | BDPmcit1zdCGzRL1KjGpHujN7+/eKFlhEFp7/CKpJcpylMWbeiYFIEdi8Y7DrOwl 13 | YVUqlWp/DYUMPuoKQkcrKUv1pYgbEFnoO6H4GJDiUFpb00RMcPkCtBFSo+dGmOJG 14 | 3jwVed/qQscDnm15CQSO2G3DM+sZSopwRO+uBHYV2FnnyeO/+yPN9nQqW7dDVVVv 15 | 1KqAIN6WtxEzpmxxK4O9bHtC+nt2ZjcSKYxN1FRIu12mb/XZL7ix2lIcpdAJBhL2 16 | iqHoQB8GqeB7Siux+uo/QLaJZmGlBGNKE3//UiymRTww8qOIKrf4xlei1z+QZbYQ 17 | e2fHs/S+1APuC7fWRLHS65DcHIbZcp9L6Y4ke93kbE+PfPnNdz9Cc8RYNMIPF8vd 18 | 3fAfIsxkdRcinlfXcrNFKjNyGD3sdlZLDTgI7sOvBEYgRJ6Zxh+amfvpLb7Jfa6l 19 | AgMBAAGjUDBOMB0GA1UdDgQWBBRmah48WiP6yGaU3vkoyL+enMKbeTAfBgNVHSME 20 | GDAWgBRmah48WiP6yGaU3vkoyL+enMKbeTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 21 | DQEBCwUAA4ICAQA3lbN9j5WnqJECnd6Ri1RvhTbDwuine+8KgB7n1M4bLGbsIpIX 22 | +9IDofkR7rgaXZi10XJhaT2vB20SfaYSFR9vPm85PTQzpyhvt8GBv7Si7xuqw9cE 23 | b5CUyV7mhy2cFbV5SFY6bcbnAdGLseODw8PYN7qxQQY4D8SRzMceXe90GsGxoenF 24 | ixzQBDoX85cH0Oose1rDqgjKGASPrDi1xygt0z51KWuCMsJmoEyjhftPJ78Jq/eK 25 | MRNC106Zjcyzp4qbDv+5dSrUDXyBHJaxaGet486Y/qHF/1ztqGumh5i2pTTzpmBB 26 | 5uxwMRbLPW+jIR9Q78GdcE/u8qQL1RSBvH/fSCBzA+9/pSX4zufi1QJtswXFpFit 27 | xEndfK0MnKePBOCuNIRKtBxkxPR43KJ/U9Whm1kYlQQYJOdh1s+WiR8XRTOhQDhu 28 | SSvl3DMybbhPKAu0RohpPKoCnJjbHEroTjMTUqF1Cbw7dkW4TZ8BF9EqsB5cja+j 29 | DlKs0r2xpFOzjidu/206XbKKfnIe4WwwNEy7PL5aJ6nhzNhyDnUMC49xw11hiYlk 30 | /TUtR87ZEbN8pcyHRTDtbRIKGh2O8UOaWL+oEwL5zrw+kt7OQiElr+WqmLVegbc3 31 | TxPRVQa3RZToHv9qMo6CG9ACI8QSr6najKTfYZpoER5LRj5dLh2h3dHRQw== 32 | -----END CERTIFICATE----- 33 | -------------------------------------------------------------------------------- /examples/ssl_test_readme.txt: -------------------------------------------------------------------------------- 1 | The following command is used to create the private key and certificate. 2 | 3 | openssl req -x509 -newkey rsa:4096 -keyout ssl_test_rsa -out ssl_test.crt -days 3650 -nodes 4 | 5 | -------------------------------------------------------------------------------- /examples/ssl_test_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQC1x58yR/yomnXy 3 | iYRAB2ihVeFqyLk5DSLNIFVrcQxonmVJRELov4xx7GSSakl4hyWezaXbBHdw/ZVG 4 | jz8EFZ4QR6w+2YhcubcAgklNCa8frobXNu/cBrCLnVQJsKw6+u0LbXzMXJunLrzo 5 | Njjqz7vCIRnVCRlfWzfkM3fZ6wGBWsXa+Bnt34wBDR7pcO1DcZPEQFxcjCxqwOhI 6 | UU0e8J1Vn3k2d75U4fAgK9PGk9AiSwQz5nIrdc3Qhs0S9SoxqR7oze/v3ihZYRBa 7 | e/wiqSXKcpTFm3omBSBHYvGOw6zsJWFVKpVqfw2FDD7qCkJHKylL9aWIGxBZ6Duh 8 | +BiQ4lBaW9NETHD5ArQRUqPnRpjiRt48FXnf6kLHA55teQkEjthtwzPrGUqKcETv 9 | rgR2FdhZ58njv/sjzfZ0Klu3Q1VVb9SqgCDelrcRM6ZscSuDvWx7Qvp7dmY3EimM 10 | TdRUSLtdpm/12S+4sdpSHKXQCQYS9oqh6EAfBqnge0orsfrqP0C2iWZhpQRjShN/ 11 | /1IspkU8MPKjiCq3+MZXotc/kGW2EHtnx7P0vtQD7gu31kSx0uuQ3ByG2XKfS+mO 12 | JHvd5GxPj3z5zXc/QnPEWDTCDxfL3d3wHyLMZHUXIp5X13KzRSozchg97HZWSw04 13 | CO7DrwRGIESemcYfmpn76S2+yX2upQIDAQABAoICAQCJnIZC3UZDKTNzGXG5uyIV 14 | SNtuKGg37V7UJM8lmB5JsOeCcJ+qbR/YOpnkUXwvNzheoNYXGKmHKyFvmg+devP+ 15 | 5RNbPPjDIYCNbRQqstMVS90eXaN1UMcj2kV9VHb962m6Bfe9Cbp39KTjsfCiqgI8 16 | syxIHyxLCNYl9mSb3xFzqJEx9DJE5r+pKBKnrGqhbQDtR7/j++XAJytxW8LjUkn5 17 | QNHVTw8SWTycTjFzGgEthNa4p5L0najiXLowyNJZZ+tTWFt0nhyzlQx5Q3UblCtc 18 | uCMx+UKeN7E4+Yleh3mIB8BIZgVY6rgEacHTEVMdZ2pK67qVb8+QEIPcWOHYBMvI 19 | ZOoOxrAjR7mS/UFokBlpbFSMoDSCiP18ybmsjoiv3sFQz5MKQocyCPQb2JlDzr4j 20 | RhpH5dXorYyRu/36L9UjTmQM9EndVB7ls3zLjEP2ABUOreMxWQwz7TJrb3/X8Dxm 21 | DmH0f+fTwuEaEIUeWv9utSbpG4zWsECZlnmdAoDWSSZTN164cpgECbae1YosXg+6 22 | kaKQ30SOv+yY7ZjEG2MNJr1l1xFuB01ey6HjOOvDyx3yHLp8aL4QjgMWvrutYq7d 23 | Jrn/V6bEoR+Uzlp0b68ytGq9SoEn8QkZOWn9yGjA31te1iVpoW+DT828iyBd5Zfg 24 | 5iLka8MeY/+2OtztNDJVIQKCAQEA4LKJ/nR6AIGnGs8joAw+XMFKWBWrJemBAopz 25 | xP/l/X7bNVqxSKY+5Ksc4J7IvbXyn+9DG/O/hGLMJXUAZiJIZIFx6FmIXQu3mTqq 26 | 0h9t0I8DUVPwACyoZtzr/E9zj2XWLDMKyPdFnW/PmF5USzd/xuImfrYottbF9QQa 27 | j0xEX+HbgKpuzfgRvHynHDfGmp9Q8PlBjuS6xoPCMe4dFDndP9aryuZrO0J3/7nl 28 | gatdQ1nManMh8L+BCQvcJFJcSTMk2K+Nr2bHvj2oZ3vahCHn9d8IxZHjCYWjFmCo 29 | b6QCvUj+x8vY0FLoKMJkLerzUAUD5zdYvEbLSAeQwS1cwmxyeQKCAQEAzxp/A1p5 30 | Z9U+TtJdpsbLld8/ROapOBNgydFf57NpJwNPlXXQuYsl10P1PHGRlXHu+GdRgI4o 31 | BvyLy+qWqB4R5vS8VFrMq/X2sLwvIV0tiCCZvehJTMMLQkmiqAN+ywfsF52y2Lfx 32 | aChK6kj8TPnFMswe1I0Xz0YKxALlw/40d62tSversCJ3iBh+NJ6p7a2/nETukUHU 33 | fgQ6Bs73DEMAgEzowLLS29JyowxOw2tDjJKks6TWVWQuENECHrJLrY95C0Zm4cUW 34 | y0HpCF/EguMYg4eJA75g7OuNEiSPd/SJSKISqJgngZZSm/VOEcHUG2nUWiteMjSO 35 | UdQROdQJvt8yjQKCAQEAp3mLTuPj1yLLuTiwtebKSHos/lBQOza6WHl3ZlrRQkrd 36 | +ft57cszT16WbHkM6BGIIFrF6zyW/4Zf4H8/Z+CfkoHEs8if0bIdCxFmJq0UPYGK 37 | mIB2frrKXbjGD03stPHTWyhEHpvnADxJP6j3LQTVsRf4VzNQT07vRvrjDPFseQav 38 | g0wEwHHoTlOefnK4DaE3RBSUH3wuFi44fNycQeoLFTzc2KM7Q3T8Wddlz2s5XSy+ 39 | Jo1fu8AQ3fw//baBYHHFQdg/Q3/Y2tu7/D2dFiTrbh/putBMp1k6wEAU6vkUrZ6N 40 | f79Y/5T7wv14i5z18yt5yElBUzt04l/5LXKxE5yfCQKCAQAWW5LopsTCe9fdJIKt 41 | tXpW9KHc61XLUvQ49Vx4I/svFgFqbiyZSzdLlO+NlekUjBczQ9wdl7skffENk4au 42 | IYjnOPZ32NGNouD91WL1JBz9PB+8y63WFpxJjK99mdr/ShyntAORt9Hc37SutDBg 43 | kvpB8J1Hbu/qFXKmFHX/gZribKJvnGsxeZcT3ykIvzQzD3XHBf4HOmcZL5WgFInB 44 | Dh6ouyTJYMS4rHBpv7NlD1hcfZxANIRa9ZSvhhDHhZH766gymmgS6Xb7fGMO31/U 45 | jXWhBWm6OLw393nkQoQmfASyDaUcBEkHiVXjtki2TAl86CDflxz1KI1Qsicl6gkf 46 | TPMlAoIBAQDP6XAZ6+tPT93nD4uwCdob+M2fRyVe6+s2TpPkLew7v654RLZV7G0b 47 | i95XCUPgsaIcdeH6zpAmZDJ8KSdLD4f88o0WoEW9PryHOGCEZQcBvtflVnH4pwx5 48 | qMlDcjTCHkxkp5wCHwYuIhZbxxnU5DhfnNhat45PciLD+fyYtA7lbIysQqdWvffh 49 | kGJAhPc2MZNPqdPDrmAxbcDgw5mpJ7718pdfOZUEwIdbgl6oGZRijFn1i6NTdNPG 50 | BYSaJ+C60Zpad1E12BNC75cwuYVaR7MQo3JgHdHHhlFMZFLm/B9l7xDAL5oaf6ER 51 | A4GuYjfLPDE8jq2djRX6fnMekYAk1snk 52 | -----END PRIVATE KEY----- 53 | -------------------------------------------------------------------------------- /examples/udp_echo.py: -------------------------------------------------------------------------------- 1 | # udp_echo 2 | # 3 | # An example of a simple UDP echo server. 4 | 5 | import curio 6 | from curio import socket 7 | 8 | 9 | async def main(addr): 10 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 11 | sock.bind(addr) 12 | while True: 13 | data, addr = await sock.recvfrom(10000) 14 | print('Received from', addr, data) 15 | await sock.sendto(data, addr) 16 | 17 | 18 | if __name__ == '__main__': 19 | curio.run(main, ('', 26000)) 20 | -------------------------------------------------------------------------------- /examples/udp_echo_client.py: -------------------------------------------------------------------------------- 1 | # udp_echo 2 | # 3 | # An example of a UDP echo client 4 | 5 | import curio 6 | from curio import socket 7 | 8 | 9 | async def main(addr): 10 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 11 | await sock.connect(addr) 12 | for i in range(1000): 13 | msg = ('Message %d' % i).encode('ascii') 14 | print(msg) 15 | await sock.send(msg) 16 | resp = await sock.recv(1000) 17 | assert msg == resp 18 | await sock.close() 19 | 20 | 21 | if __name__ == '__main__': 22 | curio.run(main, ('localhost', 26000)) 23 | -------------------------------------------------------------------------------- /examples/unix_echo.py: -------------------------------------------------------------------------------- 1 | # Example: A simple Unix echo server 2 | 3 | from curio import run, unix_server 4 | 5 | 6 | async def echo_handler(client, address): 7 | print('Connection from', address) 8 | while True: 9 | data = await client.recv(10000) 10 | if not data: 11 | break 12 | await client.sendall(data) 13 | print('Connection closed') 14 | 15 | 16 | if __name__ == '__main__': 17 | import os 18 | try: 19 | os.remove('/tmp/curiounixecho') 20 | except: 21 | pass 22 | try: 23 | run(unix_server, '/tmp/curiounixecho', echo_handler) 24 | except KeyboardInterrupt: 25 | pass 26 | -------------------------------------------------------------------------------- /examples/unix_echo_client.py: -------------------------------------------------------------------------------- 1 | # udp_echo 2 | # 3 | # An example of a UDP echo client 4 | 5 | import curio 6 | 7 | 8 | async def main(addr): 9 | sock = await curio.open_unix_connection(addr) 10 | for i in range(1000): 11 | msg = ('Message %d' % i).encode('ascii') 12 | print(msg) 13 | await sock.send(msg) 14 | resp = await sock.recv(1000) 15 | assert msg == resp 16 | await sock.close() 17 | 18 | 19 | if __name__ == '__main__': 20 | try: 21 | curio.run(main, '/tmp/curiounixecho') 22 | except KeyboardInterrupt: 23 | pass 24 | -------------------------------------------------------------------------------- /examples/ws_server.py: -------------------------------------------------------------------------------- 1 | """A Curio websocket server. 2 | 3 | pip install wsproto before running this. 4 | 5 | """ 6 | from curio import Queue, run, spawn, TaskGroup 7 | from curio.socket import IPPROTO_TCP, TCP_NODELAY 8 | from wsproto.connection import WSConnection, SERVER 9 | from wsproto.events import (ConnectionClosed, ConnectionRequested, TextReceived, 10 | BytesReceived) 11 | 12 | DATA_TYPES = (TextReceived, BytesReceived) 13 | 14 | 15 | async def ws_adapter(in_q, out_q, client, _): 16 | """A simple, queue-based Curio-Sans-IO websocket bridge.""" 17 | client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 18 | wsconn = WSConnection(SERVER) 19 | closed = False 20 | 21 | while not closed: 22 | wstask = await spawn(client.recv, 65535) 23 | outqtask = await spawn(out_q.get) 24 | 25 | async with TaskGroup([wstask, outqtask]) as g: 26 | task = await g.next_done() 27 | result = await task.join() 28 | await g.cancel_remaining() 29 | 30 | if task is wstask: 31 | wsconn.receive_bytes(result) 32 | 33 | for event in wsconn.events(): 34 | cl = event.__class__ 35 | if cl in DATA_TYPES: 36 | await in_q.put(event.data) 37 | elif cl is ConnectionRequested: 38 | # Auto accept. Maybe consult the handler? 39 | wsconn.accept(event) 40 | elif cl is ConnectionClosed: 41 | # The client has closed the connection. 42 | await in_q.put(None) 43 | closed = True 44 | else: 45 | print(event) 46 | await client.sendall(wsconn.bytes_to_send()) 47 | else: 48 | # We got something from the out queue. 49 | if result is None: 50 | # Terminate the connection. 51 | print("Closing the connection.") 52 | wsconn.close() 53 | closed = True 54 | else: 55 | wsconn.send_data(result) 56 | payload = wsconn.bytes_to_send() 57 | await client.sendall(payload) 58 | print("Bridge done.") 59 | 60 | 61 | async def ws_echo_server(in_queue, out_queue): 62 | """Just echo websocket messages, reversed. Echo 3 times, then close.""" 63 | for _ in range(3): 64 | msg = await in_queue.get() 65 | if msg is None: 66 | # The ws connection was closed. 67 | break 68 | await out_queue.put(msg[::-1]) 69 | print("Handler done.") 70 | 71 | 72 | def serve_ws(handler): 73 | """Start processing web socket messages using the given handler.""" 74 | async def run_ws(client, addr): 75 | in_q, out_q = Queue(), Queue() 76 | ws_task = await spawn(ws_adapter, in_q, out_q, client, addr) 77 | await handler(in_q, out_q) 78 | await out_q.put(None) 79 | await ws_task.join() # Wait until it's done. 80 | # Curio will close the socket for us after we drop off here. 81 | print("Master task done.") 82 | 83 | return run_ws 84 | 85 | 86 | if __name__ == '__main__': 87 | from curio import tcp_server 88 | port = 5000 89 | print(f'Listening on port {port}.') 90 | run(tcp_server, '', port, serve_ws(ws_echo_server)) 91 | -------------------------------------------------------------------------------- /examples/zmq_puller.py: -------------------------------------------------------------------------------- 1 | # zmq pull client example. Requires zmq_pusher.py to be running 2 | 3 | import curio_zmq as zmq 4 | 5 | async def puller(address): 6 | ctx = zmq.Context() 7 | sock = ctx.socket(zmq.PULL) 8 | sock.connect(address) 9 | while True: 10 | msg = await sock.recv() 11 | if msg == b'exit': 12 | break 13 | print('Got:', msg) 14 | 15 | if __name__ == '__main__': 16 | zmq.run(puller, 'tcp://localhost:9000') 17 | -------------------------------------------------------------------------------- /examples/zmq_pusher.py: -------------------------------------------------------------------------------- 1 | # zmq push example. Run the zmq_puller.py program for the client 2 | 3 | import curio_zmq as zmq 4 | 5 | async def pusher(address): 6 | ctx = zmq.Context() 7 | sock = ctx.socket(zmq.PUSH) 8 | sock.bind(address) 9 | for n in range(100): 10 | await sock.send(b'Message %d' % n) 11 | await sock.send(b'exit') 12 | 13 | if __name__ == '__main__': 14 | zmq.run(pusher, 'tcp://*:9000') 15 | -------------------------------------------------------------------------------- /examples/zmq_rpcclient.py: -------------------------------------------------------------------------------- 1 | # zmq RPC client example. Requires zmq_rpcserv.py to be runnig 2 | 3 | import curio_zmq as zmq 4 | from curio import sleep, spawn 5 | 6 | from fibserve import fib 7 | 8 | async def ticker(): 9 | n = 0 10 | while True: 11 | await sleep(1) 12 | print('Tick:', n) 13 | n += 1 14 | 15 | async def client(address): 16 | # Run a background task to make sure the message passing operations don't block 17 | await spawn(ticker, daemon=True) 18 | 19 | # Compute the first 40 fibonacci numbers 20 | ctx = zmq.Context() 21 | sock = ctx.socket(zmq.REQ) 22 | sock.connect(address) 23 | for n in range(1, 40): 24 | await sock.send_pyobj((fib, (n,), {})) 25 | result = await sock.recv_pyobj() 26 | print(n, result) 27 | 28 | if __name__ == '__main__': 29 | zmq.run(client, 'tcp://localhost:9000') 30 | -------------------------------------------------------------------------------- /examples/zmq_rpcserv.py: -------------------------------------------------------------------------------- 1 | # zmq rpc example. 2 | 3 | import curio_zmq as zmq 4 | 5 | async def rpc_server(address): 6 | ctx = zmq.Context() 7 | sock = ctx.socket(zmq.REP) 8 | sock.bind(address) 9 | while True: 10 | func, args, kwargs = await sock.recv_pyobj() 11 | try: 12 | result = func(*args, **kwargs) 13 | await sock.send_pyobj(result) 14 | except Exception as e: 15 | await sock.send_pyobj(e) 16 | 17 | if __name__ == '__main__': 18 | zmq.run(rpc_server, 'tcp://*:9000') 19 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E302,E402,F403,E265,E201,E124,E202,E123,E731 3 | max-line-length = 120 4 | exclude = tests/* 5 | max-complexity = 15 6 | 7 | [tool:pytest] 8 | testpaths = tests 9 | addopts = --verbose 10 | --ignore=setup.py --ignore=docs/conf.py 11 | markers = 12 | internet: mark tests as requiring internet connectivity (deselect with '-m "not internet"') 13 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | try: 2 | from setuptools import setup 3 | except ImportError: 4 | from distutils.core import setup 5 | 6 | tests_require = ['pytest', 'Sphinx'] 7 | 8 | long_description = """ 9 | Curio is a coroutine-based library for concurrent systems programming. No longer 10 | maintained as a PyPi project. Latest version is available on GitHub. 11 | """ 12 | 13 | 14 | setup(name="curio", 15 | description="Curio", 16 | long_description=long_description, 17 | license="BSD", 18 | version="1.6", 19 | author="David Beazley", 20 | author_email="dave@dabeaz.com", 21 | maintainer="David Beazley", 22 | maintainer_email="dave@dabeaz.com", 23 | url="https://github.com/dabeaz/curio", 24 | packages=['curio'], 25 | tests_require=tests_require, 26 | extras_require={ 27 | 'test': tests_require, 28 | }, 29 | python_requires='>= 3.7', 30 | # This is disabled because it often causes interference with other testing 31 | # plugins people have written. Curio doesn't use it for it's own testing. 32 | # entry_points={"pytest11": ["curio = curio.pytest_plugin"]}, 33 | classifiers=[ 34 | 'Programming Language :: Python :: 3', 35 | "Framework :: Pytest", 36 | ]) 37 | -------------------------------------------------------------------------------- /tests/child.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | for i in range(4, 0, -1): 4 | print('t-minus', i, flush=True) 5 | time.sleep(1) 6 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | import pytest 4 | from curio import Kernel 5 | from curio.monitor import Monitor 6 | from curio.debug import * 7 | 8 | @pytest.fixture(scope='session') 9 | def kernel(request): 10 | k = Kernel(debug=[longblock, logcrash]) 11 | m = Monitor(k) 12 | request.addfinalizer(lambda: k.run(shutdown=True)) 13 | request.addfinalizer(m.close) 14 | return k 15 | 16 | 17 | # This is based on https://unix.stackexchange.com/a/132524 18 | @pytest.fixture(scope='function') 19 | def portno(): 20 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 21 | s.bind(('', 0)) 22 | _, port = s.getsockname() 23 | s.close() 24 | return port 25 | 26 | collect_ignore = [] 27 | if sys.version_info < (3,6): 28 | collect_ignore.append("test_asyncgen.py") 29 | 30 | -------------------------------------------------------------------------------- /tests/ichild.py: -------------------------------------------------------------------------------- 1 | # ichild.py 2 | 3 | import sys 4 | for lineno, line in enumerate(sys.stdin, start=1): 5 | pass 6 | 7 | print(lineno) 8 | 9 | 10 | -------------------------------------------------------------------------------- /tests/test_activation.py: -------------------------------------------------------------------------------- 1 | # test_activation.py 2 | 3 | from curio import Kernel, sleep, run 4 | from curio.kernel import Activation 5 | 6 | class _TestActivation(Activation): 7 | def __init__(self): 8 | self.events = [] 9 | 10 | def activate(self, kernel): 11 | self.events.append('activate') 12 | 13 | def created(self, task): 14 | if task.name.endswith('main'): 15 | self.events.append('created') 16 | 17 | def running(self, task): 18 | if task.name.endswith('main'): 19 | self.events.append('running') 20 | 21 | def suspended(self, task, trap): 22 | if task.name.endswith('main'): 23 | self.events.append('suspended') 24 | 25 | def terminated(self, task): 26 | if task.name.endswith('main'): 27 | self.events.append('terminated') 28 | 29 | def test_activation_base(): 30 | async def main(): 31 | await sleep(0.01) 32 | await sleep(0.01) 33 | await sleep(0.01) 34 | 35 | a = _TestActivation() 36 | run(main, activations=[a]) 37 | assert a.events == ['activate', 'created', 'running', 'suspended', 'running', 'suspended', 38 | 'running', 'suspended', 'running', 'suspended', 'terminated'] 39 | 40 | def test_activation_crash(): 41 | async def main(): 42 | await sleep(0.01) 43 | raise ValueError("Dead") 44 | 45 | a = _TestActivation() 46 | kern = Kernel(activations=[a]) 47 | try: 48 | kern.run(main) 49 | assert False 50 | except ValueError as e: 51 | assert a.events == ['activate', 'created', 'running', 'suspended', 'running', 'suspended', 'terminated'] 52 | 53 | kern.run(shutdown=True) 54 | 55 | class _TestActivationCreate(Activation): 56 | def __init__(self): 57 | self.events = set() 58 | 59 | def created(self, task): 60 | self.events.add(task.name.split('.')[-1]) 61 | 62 | def test_activation_count(): 63 | async def main(): 64 | await sleep(0.001) 65 | 66 | a = _TestActivationCreate() 67 | run(main, activations=[a]) 68 | # There should be three tasks. main(), an in-kernel task, and a shutdown task 69 | assert a.events == { 'main', '_kernel_task', '_shutdown_tasks' } 70 | 71 | -------------------------------------------------------------------------------- /tests/test_asyncgen.py: -------------------------------------------------------------------------------- 1 | # test_asyncgen.py 2 | 3 | import pytest 4 | from curio import * 5 | from curio.meta import finalize, awaitable 6 | 7 | 8 | # Test to make sure a simple async generator runs 9 | def test_simple_agen(kernel): 10 | async def countdown(n): 11 | while n > 0: 12 | yield n 13 | n -= 1 14 | 15 | async def main(): 16 | nums = [ n async for n in countdown(5) ] 17 | assert nums == [5, 4, 3, 2, 1] 18 | 19 | kernel.run(main()) 20 | 21 | # Test to make sure a simple finally clause executes 22 | def test_simple_agen_final(kernel): 23 | results = [] 24 | async def countdown(n): 25 | try: 26 | while n > 0: 27 | yield n 28 | n -= 1 29 | finally: 30 | results.append('done') 31 | 32 | async def main(): 33 | nums = [ n async for n in countdown(5) ] 34 | assert nums == [5, 4, 3, 2, 1] 35 | 36 | kernel.run(main()) 37 | assert results == ['done'] 38 | 39 | # Make sure application of finalize() works 40 | def test_agen_final_finalize(kernel): 41 | async def countdown(n): 42 | try: 43 | while n > 0: 44 | yield n 45 | n -= 1 46 | finally: 47 | await sleep(0.0) 48 | 49 | async def main(): 50 | async with finalize(countdown(5)) as c: 51 | nums = [n async for n in c] 52 | assert nums == [5, 4, 3, 2, 1] 53 | 54 | kernel.run(main()) 55 | 56 | # Make sure a try-except without asyncs works 57 | def test_agen_except_ok(kernel): 58 | async def countdown(n): 59 | while n > 0: 60 | try: 61 | yield n 62 | except Exception: 63 | pass 64 | n -= 1 65 | 66 | async def main(): 67 | nums = [n async for n in countdown(5) ] 68 | assert nums == [5, 4, 3, 2, 1] 69 | 70 | kernel.run(main()) 71 | 72 | # Test to make sure a simple async generator runs 73 | def test_awaitable_agen(kernel): 74 | async def countdown(n): 75 | while n > 0: 76 | try: 77 | yield n 78 | except Exception: 79 | pass 80 | n -= 1 81 | 82 | def add(x, y): 83 | return x + y 84 | 85 | @awaitable(add) 86 | async def add(x, y): 87 | return x + y 88 | 89 | async def main(): 90 | nums = [ await add(n,n) async for n in countdown(5) ] 91 | assert nums == [10, 8, 6, 4, 2] 92 | 93 | kernel.run(main()) 94 | 95 | nums = [ add(n,n) for n in range(5,0,-1) ] 96 | assert nums == [10, 8, 6, 4, 2] 97 | -------------------------------------------------------------------------------- /tests/test_file.py: -------------------------------------------------------------------------------- 1 | # test_file.py 2 | 3 | import os.path 4 | from curio import * 5 | import pytest 6 | 7 | dirname = os.path.dirname(__file__) 8 | testinput = os.path.join(dirname, 'testdata.txt') 9 | 10 | 11 | def test_read(kernel): 12 | async def main(): 13 | async with aopen(testinput, 'r') as f: 14 | data = await f.read() 15 | assert f.closed == False 16 | 17 | assert data == 'line 1\nline 2\nline 3\n' 18 | 19 | kernel.run(main()) 20 | 21 | 22 | def test_readall(kernel): 23 | async def main(): 24 | async with aopen(testinput, 'r') as f: 25 | data = await f.readall() 26 | assert f.closed == False 27 | assert data == 'line 1\nline 2\nline 3\n' 28 | 29 | kernel.run(main()) 30 | 31 | 32 | def test_read1(kernel): 33 | async def main(): 34 | async with aopen(testinput, 'rb') as f: 35 | data = await f.read1(1000) 36 | with open(testinput, 'rb') as f: 37 | data2 = f.read1(1000) 38 | assert data == data2 39 | 40 | kernel.run(main()) 41 | 42 | 43 | def test_readinto(kernel): 44 | async def main(): 45 | async with aopen(testinput, 'rb') as f: 46 | buf = bytearray(1000) 47 | n = await f.readinto(buf) 48 | 49 | with open(testinput, 'rb') as f: 50 | buf2 = bytearray(1000) 51 | n2 = f.readinto(buf2) 52 | 53 | assert buf[:n] == buf2[:n2] 54 | 55 | kernel.run(main()) 56 | 57 | 58 | def test_readinto1(kernel): 59 | async def main(): 60 | async with aopen(testinput, 'rb') as f: 61 | buf = bytearray(1000) 62 | n = await f.readinto1(buf) 63 | with open(testinput, 'rb') as f: 64 | buf2 = bytearray(1000) 65 | n2 = f.readinto1(buf2) 66 | assert buf[:n] == buf2[:n] 67 | 68 | kernel.run(main()) 69 | 70 | 71 | def test_readline(kernel): 72 | async def main(): 73 | async with aopen(testinput, 'r') as f: 74 | lines = [] 75 | while True: 76 | line = await f.readline() 77 | if not line: 78 | break 79 | lines.append(line) 80 | 81 | assert lines == ['line 1\n', 'line 2\n', 'line 3\n'] 82 | 83 | kernel.run(main()) 84 | 85 | 86 | def test_readlines(kernel): 87 | async def main(): 88 | async with aopen(testinput, 'r') as f: 89 | lines = await f.readlines() 90 | 91 | assert lines == ['line 1\n', 'line 2\n', 'line 3\n'] 92 | 93 | kernel.run(main()) 94 | 95 | 96 | def test_readiter(kernel): 97 | async def main(): 98 | async with aopen(testinput, 'r') as f: 99 | lines = [] 100 | async for line in f: 101 | lines.append(line) 102 | 103 | assert lines == ['line 1\n', 'line 2\n', 'line 3\n'] 104 | 105 | kernel.run(main()) 106 | 107 | def test_read_anext(kernel): 108 | async def main(): 109 | async with aopen(testinput, 'r') as f: 110 | lines = [] 111 | while True: 112 | line = await anext(f, '') 113 | if not line: 114 | break 115 | lines.append(line) 116 | 117 | assert lines == ['line 1\n', 'line 2\n', 'line 3\n'] 118 | 119 | kernel.run(main()) 120 | 121 | def test_read_anext2(kernel): 122 | async def main(): 123 | async with aopen(testinput, 'r') as f: 124 | lines = [] 125 | with pytest.raises(StopAsyncIteration): 126 | while True: 127 | line = await anext(f) 128 | lines.append(line) 129 | 130 | assert lines == ['line 1\n', 'line 2\n', 'line 3\n'] 131 | 132 | kernel.run(main()) 133 | 134 | def test_bad_usage(kernel): 135 | async def main(): 136 | f = aopen(testinput, 'r') 137 | with pytest.raises(RuntimeError): 138 | await f.read() 139 | 140 | wlines = ['line1\n', 'line2\n', 'line3\n'] 141 | 142 | 143 | def test_write(kernel, tmpdir): 144 | async def main(): 145 | outname = tmpdir.join('tmp.txt') 146 | async with aopen(outname, 'w') as f: 147 | outdata = ''.join(wlines) 148 | await f.write(outdata) 149 | await f.flush() 150 | 151 | assert open(outname).read() == outdata 152 | 153 | kernel.run(main()) 154 | 155 | 156 | def test_writelines(kernel, tmpdir): 157 | async def main(): 158 | outname = tmpdir.join('tmp.txt') 159 | async with aopen(outname, 'w') as f: 160 | await f.writelines(wlines) 161 | 162 | assert open(outname).readlines() == wlines 163 | 164 | kernel.run(main()) 165 | 166 | 167 | def test_seek_tell(kernel): 168 | async def main(): 169 | async with aopen(testinput, 'rb') as f: 170 | await f.seek(10) 171 | n = await f.tell() 172 | assert n == 10 173 | data = await f.read() 174 | 175 | with open(testinput, 'rb') as f: 176 | f.seek(10) 177 | n2 = f.tell() 178 | assert n2 == 10 179 | data2 = f.read() 180 | 181 | assert data == data2 182 | 183 | kernel.run(main()) 184 | 185 | def test_truncate(kernel, tmpdir): 186 | async def main(): 187 | outname = tmpdir.join('tmp.txt') 188 | async with aopen(outname, 'wb') as f: 189 | await f.write(b'12345') 190 | await f.flush() 191 | assert await f.tell() == 5 192 | await f.truncate(2) 193 | await f.seek(2) 194 | await f.write(b'6789') 195 | await f.close() 196 | async with aopen(outname, 'rb') as f: 197 | data = await f.read() 198 | assert data == b'126789' 199 | 200 | kernel.run(main()) 201 | 202 | def test_sync_iter(kernel): 203 | async def main(): 204 | async with aopen(testinput, 'r') as f: 205 | try: 206 | for line in f: 207 | pass 208 | 209 | assert False, 'sync-iteration should have failed' 210 | except SyncIOError: 211 | assert True 212 | 213 | kernel.run(main()) 214 | 215 | 216 | def test_sync_with(kernel): 217 | async def main(): 218 | f = aopen(testinput, 'r') 219 | try: 220 | with f: 221 | pass 222 | assert False, 'sync-with should have failed' 223 | except AsyncOnlyError: 224 | assert True 225 | 226 | kernel.run(main()) 227 | 228 | def test_must_be_asynccontext(kernel): 229 | async def main(): 230 | f = aopen(testinput, 'r') 231 | with pytest.raises(RuntimeError): 232 | data = await f.read() 233 | 234 | kernel.run(main()) 235 | 236 | def test_blocking(kernel): 237 | async def main(): 238 | async with aopen(testinput, 'r') as f: 239 | with f.blocking() as sync_f: 240 | data = sync_f.read() 241 | 242 | assert data == 'line 1\nline 2\nline 3\n' 243 | 244 | kernel.run(main()) 245 | 246 | def test_file_misc(kernel): 247 | async def main(): 248 | f = aopen(testinput, 'r') 249 | repr(f) 250 | with pytest.raises(SyncIOError): 251 | next(f) 252 | 253 | kernel.run(main()) 254 | -------------------------------------------------------------------------------- /tests/test_meta.py: -------------------------------------------------------------------------------- 1 | from curio import meta 2 | from curio import * 3 | from functools import partial 4 | import pytest 5 | import sys 6 | import inspect 7 | 8 | def test_iscoroutinefunc(): 9 | async def spam(x, y): 10 | pass 11 | 12 | assert meta.iscoroutinefunction(partial(spam, 1)) 13 | 14 | def test_instantiate_coroutine(): 15 | async def coro(x, y): 16 | pass 17 | 18 | def func(x, y): 19 | pass 20 | 21 | c = meta.instantiate_coroutine(coro(2,3)) 22 | assert inspect.iscoroutine(c) 23 | 24 | d = meta.instantiate_coroutine(coro, 2, 3) 25 | assert inspect.iscoroutine(d) 26 | 27 | with pytest.raises(TypeError): 28 | meta.instantiate_coroutine(func(2,3)) 29 | 30 | with pytest.raises(TypeError): 31 | meta.instantiate_coroutine(func, 2, 3) 32 | 33 | 34 | def test_bad_awaitable(): 35 | def spam(x, y): 36 | pass 37 | 38 | with pytest.raises(TypeError): 39 | @meta.awaitable(spam) 40 | def spam(x, y, z): 41 | pass 42 | 43 | 44 | def test_awaitable_partial(kernel): 45 | def func(x, y, z): 46 | assert False 47 | 48 | @meta.awaitable(func) 49 | async def func(x, y, z): 50 | assert x == 1 51 | assert y == 2 52 | assert z == 3 53 | return True 54 | 55 | async def main(): 56 | assert await func(1, 2, 3) 57 | assert await ignore_after(1, func(1,2,3)) 58 | assert await ignore_after(1, func, 1, 2, 3) 59 | assert await ignore_after(1, partial(func, 1, 2), 3) 60 | assert await ignore_after(1, partial(func, z=3), 1, 2) 61 | assert await ignore_after(1, partial(partial(func, 1), 2), 3) 62 | 63 | # Try spawns 64 | t = await spawn(func(1,2,3)) 65 | assert await t.join() 66 | 67 | t = await spawn(func, 1, 2, 3) 68 | assert await t.join() 69 | 70 | t = await spawn(partial(func, 1, 2), 3) 71 | assert await t.join() 72 | 73 | t = await spawn(partial(func, z=3), 1, 2) 74 | assert await t.join() 75 | 76 | t = await spawn(partial(partial(func, 1), 2), 3) 77 | assert await t.join() 78 | 79 | 80 | kernel.run(main) 81 | kernel.run(func, 1, 2, 3) 82 | kernel.run(partial(func, 1, 2), 3) 83 | kernel.run(partial(func, z=3), 1, 2) 84 | 85 | if sys.version_info >= (3,7): 86 | import contextlib 87 | def test_asynccontextmanager(kernel): 88 | results = [] 89 | @contextlib.asynccontextmanager 90 | async def manager(): 91 | try: 92 | yield (await coro()) 93 | finally: 94 | await cleanup() 95 | 96 | async def coro(): 97 | results.append('coro') 98 | return 'result' 99 | 100 | async def cleanup(): 101 | results.append('cleanup') 102 | 103 | async def main(): 104 | async with manager() as r: 105 | results.append(r) 106 | 107 | kernel.run(main) 108 | assert results == ['coro', 'result', 'cleanup'] 109 | 110 | 111 | def test_missing_asynccontextmanager(kernel): 112 | results = [] 113 | async def manager(): 114 | try: 115 | yield (await coro()) 116 | finally: 117 | await cleanup() 118 | 119 | async def coro(): 120 | results.append('coro') 121 | return 'result' 122 | 123 | async def cleanup(): 124 | results.append('cleanup') 125 | 126 | async def main(): 127 | async for x in manager(): 128 | break 129 | 130 | kernel.run(main) 131 | assert results == ['coro'] 132 | -------------------------------------------------------------------------------- /tests/test_thread.py: -------------------------------------------------------------------------------- 1 | # test_thread.py 2 | 3 | import pytest 4 | from curio import * 5 | from curio.thread import AWAIT, spawn_thread, is_async_thread 6 | from curio.file import aopen 7 | import time 8 | import pytest 9 | 10 | 11 | def simple_func(x, y): 12 | assert is_async_thread() 13 | AWAIT(sleep(0.25)) # Execute a blocking operation 14 | AWAIT(sleep, 0.25) # Alternative 15 | return x + y 16 | 17 | async def simple_coro(x, y): 18 | await sleep(0.5) 19 | return x + y 20 | 21 | def test_good_result(kernel): 22 | async def main(): 23 | t = await spawn_thread(simple_func, 2, 3) 24 | result = await t.join() 25 | assert result == 5 26 | assert t.result == 5 27 | assert t.exception is None 28 | 29 | kernel.run(main) 30 | 31 | def test_bad_result(kernel): 32 | async def main(): 33 | t = await spawn_thread(simple_func, 2, '3') 34 | try: 35 | result = await t.join() 36 | assert False 37 | except TaskError as e: 38 | assert isinstance(e.__cause__, TypeError) 39 | assert True 40 | else: 41 | assert False 42 | 43 | try: 44 | result = await t.result 45 | assert False 46 | except TypeError as e: 47 | assert True 48 | else: 49 | assert False 50 | 51 | kernel.run(main) 52 | 53 | def test_cancel_result(kernel): 54 | async def main(): 55 | t = await spawn_thread(simple_func, 2, 3) 56 | await sleep(0.25) 57 | await t.cancel() 58 | try: 59 | result = await t.join() 60 | assert False 61 | except TaskError as e: 62 | assert isinstance(e.__cause__, TaskCancelled) 63 | assert True 64 | kernel.run(main) 65 | 66 | def test_thread_good_result(kernel): 67 | def coro(): 68 | result = AWAIT(simple_coro(2, 3)) 69 | return result 70 | 71 | async def main(): 72 | t = await spawn_thread(coro) 73 | result = await t.join() 74 | assert result == 5 75 | 76 | kernel.run(main) 77 | 78 | def test_thread_bad_result(kernel): 79 | def coro(): 80 | with pytest.raises(TypeError): 81 | result = AWAIT(simple_coro(2, '3')) 82 | 83 | async def main(): 84 | t = await spawn_thread(coro) 85 | await t.join() 86 | 87 | kernel.run(main) 88 | 89 | def test_thread_cancel_result(kernel): 90 | def func(): 91 | with pytest.raises(TaskCancelled): 92 | result = AWAIT(simple_coro(2, 3)) 93 | 94 | async def main(): 95 | t = await spawn_thread(func) 96 | await sleep(0.25) 97 | await t.cancel() 98 | 99 | kernel.run(main) 100 | 101 | def test_thread_sync(kernel): 102 | results = [] 103 | def func(lock): 104 | with lock: 105 | results.append('func') 106 | 107 | async def main(): 108 | lock = Lock() 109 | async with lock: 110 | results.append('main') 111 | t = await spawn_thread(func, lock) 112 | await sleep(0.5) 113 | results.append('main done') 114 | await t.join() 115 | 116 | kernel.run(main()) 117 | assert results == [ 'main', 'main done', 'func' ] 118 | 119 | 120 | def test_thread_timeout(kernel): 121 | 122 | def func(): 123 | with pytest.raises(TaskTimeout): 124 | with timeout_after(1): 125 | AWAIT(sleep(2)) 126 | 127 | async def main(): 128 | t = await spawn_thread(func) 129 | await t.join() 130 | 131 | kernel.run(main) 132 | 133 | 134 | def test_thread_disable_cancellation(kernel): 135 | def func(): 136 | with disable_cancellation(): 137 | AWAIT(sleep(1)) 138 | assert True 139 | 140 | with enable_cancellation(): 141 | AWAIT(sleep(2)) 142 | 143 | assert isinstance(AWAIT(check_cancellation()), TaskTimeout) 144 | 145 | with pytest.raises(TaskTimeout): 146 | AWAIT(sleep(2)) 147 | 148 | async def main(): 149 | t = await spawn_thread(func) 150 | await sleep(0.5) 151 | await t.cancel() 152 | 153 | kernel.run(main) 154 | 155 | import os 156 | dirname = os.path.dirname(__file__) 157 | testinput = os.path.join(dirname, 'testdata.txt') 158 | 159 | def test_thread_read(kernel): 160 | def func(): 161 | with aopen(testinput, 'r') as f: 162 | data = AWAIT(f.read()) 163 | assert f.closed == False 164 | 165 | assert data == 'line 1\nline 2\nline 3\n' 166 | 167 | async def main(): 168 | t = await spawn_thread(func) 169 | await t.join() 170 | 171 | kernel.run(main) 172 | 173 | def test_task_group_thread(kernel): 174 | results = [] 175 | async def add(x, y): 176 | return x + y 177 | 178 | def task(): 179 | task1 = AWAIT(spawn(add, 1, 1)) 180 | task2 = AWAIT(spawn(add, 2, 2)) 181 | task3 = AWAIT(spawn(add, 3, 3)) 182 | w = TaskGroup([task1, task2, task3]) 183 | with w: 184 | for task in w: 185 | result = AWAIT(task.join()) 186 | results.append(result) 187 | 188 | async def main(): 189 | t = await spawn_thread(task) 190 | await t.join() 191 | 192 | kernel.run(main) 193 | assert results == [2, 4, 6] 194 | 195 | def test_task_group_spawn_thread(kernel): 196 | def add(x, y): 197 | return x + y 198 | 199 | async def task(): 200 | async with TaskGroup(wait=all) as w: 201 | await w.spawn_thread(add, 1, 1) 202 | await w.spawn_thread(add, 2, 2) 203 | t3 = await w.spawn_thread(add, 3, 3) 204 | r3 = await t3.join() 205 | assert r3 == 6 206 | 207 | assert w.results == [2, 4] 208 | 209 | kernel.run(task) 210 | 211 | def test_await_passthrough(kernel): 212 | import time 213 | def add(x, y): 214 | AWAIT(time.sleep(0.1)) 215 | AWAIT(time.sleep, 0.1) 216 | return x + y 217 | async def main(): 218 | t = await spawn_thread(add, 2, 3) 219 | await t.wait() 220 | assert t.result == 5 221 | kernel.run(main) 222 | 223 | def test_errors(kernel): 224 | # spawn_thread used on a coroutine 225 | async def main(): 226 | with pytest.raises(TypeError): 227 | t = await spawn_thread(simple_coro, 2, 3) 228 | 229 | kernel.run(main) 230 | 231 | # AWAIT used on coroutine outside of async-thread 232 | with pytest.raises(AsyncOnlyError): 233 | AWAIT(simple_coro(2,3)) 234 | 235 | # Premature result 236 | async def f(): 237 | t = await spawn_thread(simple_func, 2, 3) 238 | assert t.state != 'TERMINATED' 239 | with pytest.raises(RuntimeError): 240 | r = t.result 241 | with pytest.raises(RuntimeError): 242 | e = t.exception 243 | 244 | kernel.run(f) 245 | 246 | # Launching a thread with no target 247 | async def g(): 248 | from curio.thread import AsyncThread 249 | t = AsyncThread() 250 | with pytest.raises(RuntimeError): 251 | await t.start() 252 | 253 | kernel.run(g) 254 | 255 | 256 | -------------------------------------------------------------------------------- /tests/test_timequeue.py: -------------------------------------------------------------------------------- 1 | 2 | from curio.timequeue import TimeQueue 3 | 4 | def test_timequeue_expired(): 5 | q = TimeQueue() 6 | 7 | delta = q.next_deadline(10) 8 | assert delta == None 9 | 10 | q.push('a', 50) 11 | q.push('b', 25) 12 | q.push('c', 100) 13 | # this should return the number of seconds to the next deadline 14 | delta = q.next_deadline(5) 15 | assert delta == 20 16 | items = list(q.expired(25)) 17 | assert items == [(25, 'b')] 18 | 19 | items = list(q.expired(101)) 20 | assert items == [(50, 'a'), (100, 'c')] 21 | 22 | -------------------------------------------------------------------------------- /tests/test_workers.py: -------------------------------------------------------------------------------- 1 | # test_workers.py 2 | 3 | import pytest 4 | 5 | import time 6 | from curio import * 7 | import pytest 8 | import sys 9 | 10 | def fib(n): 11 | if n <= 2: 12 | return 1 13 | else: 14 | return fib(n - 1) + fib(n - 2) 15 | 16 | #@pytest.mark.skipif(sys.platform.startswith("win"), 17 | # reason='broken on Windows') 18 | def test_cpu(kernel): 19 | results = [] 20 | 21 | async def spin(n): 22 | while n > 0: 23 | results.append(n) 24 | await sleep(0.1) 25 | n -= 1 26 | 27 | async def cpu_bound(n): 28 | r = await run_in_process(fib, n) 29 | results.append(('fib', r)) 30 | 31 | async def main(): 32 | async with TaskGroup() as g: 33 | await g.spawn(spin, 10) 34 | await g.spawn(cpu_bound, 36) 35 | 36 | kernel.run(main()) 37 | 38 | assert results == [ 39 | 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 40 | ('fib', 14930352) 41 | ] 42 | 43 | #@pytest.mark.skipif(sys.platform.startswith("win"), 44 | # reason='broken on Windows') 45 | def test_bad_cpu(kernel): 46 | async def main(): 47 | with pytest.raises(TypeError): 48 | r = await run_in_process(fib, '1') 49 | 50 | kernel.run(main()) 51 | 52 | 53 | def test_blocking(kernel): 54 | results = [] 55 | 56 | async def spin(n): 57 | while n > 0: 58 | results.append(n) 59 | await sleep(0.1) 60 | n -= 1 61 | 62 | async def blocking(n): 63 | await run_in_thread(time.sleep, n) 64 | results.append('sleep done') 65 | 66 | async def main(): 67 | async with TaskGroup() as g: 68 | await g.spawn(spin, 10) 69 | await g.spawn(blocking, 2) 70 | 71 | kernel.run(main()) 72 | 73 | assert results == [ 74 | 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 75 | 'sleep done', 76 | ] 77 | 78 | @pytest.mark.parametrize('runner', [run_in_thread, run_in_process ]) 79 | def test_worker_cancel(kernel, runner): 80 | results = [] 81 | 82 | async def spin(n): 83 | while n > 0: 84 | results.append(n) 85 | await sleep(0.1) 86 | n -= 1 87 | 88 | async def blocking(n): 89 | task = await spawn(runner, time.sleep, n) 90 | await sleep(0.55) 91 | await task.cancel() 92 | try: 93 | await task.join() 94 | except TaskError as e: 95 | if isinstance(e.__cause__, CancelledError): 96 | results.append('cancel') 97 | else: 98 | results.append(repr(e.__cause__)) 99 | 100 | async def main(): 101 | async with TaskGroup() as g: 102 | await g.spawn(spin, 10) 103 | await g.spawn(blocking, 5) 104 | 105 | kernel.run(main()) 106 | 107 | assert results == [ 108 | 10, 9, 8, 7, 6, 5, 'cancel', 4, 3, 2, 1 109 | ] 110 | 111 | 112 | @pytest.mark.parametrize('runner', [run_in_thread, run_in_process]) 113 | def test_worker_timeout(kernel, runner): 114 | results = [] 115 | 116 | async def spin(n): 117 | while n > 0: 118 | results.append(n) 119 | await sleep(0.1) 120 | n -= 1 121 | 122 | async def blocking(n): 123 | try: 124 | result = await timeout_after(0.55, runner(time.sleep, n)) 125 | except TaskTimeout: 126 | results.append('cancel') 127 | 128 | async def main(): 129 | async with TaskGroup() as g: 130 | await g.spawn(spin, 10) 131 | await g.spawn(blocking, 5) 132 | 133 | kernel.run(main()) 134 | 135 | assert results == [ 136 | 10, 9, 8, 7, 6, 5, 'cancel', 4, 3, 2, 1 137 | ] 138 | 139 | 140 | def test_exception(kernel): 141 | results = [] 142 | 143 | async def error(): 144 | try: 145 | result = await run_in_thread(fib, '10') 146 | results.append('fail') 147 | except Exception as e: 148 | results.append(type(e)) 149 | results.append(e.__cause__) 150 | 151 | async def main(): 152 | await error() 153 | 154 | kernel.run(main()) 155 | 156 | assert results == [ 157 | TypeError, 158 | None 159 | ] 160 | -------------------------------------------------------------------------------- /tests/testdata.txt: -------------------------------------------------------------------------------- 1 | line 1 2 | line 2 3 | line 3 4 | --------------------------------------------------------------------------------