├── .github
├── FUNDING.yml
└── workflows
│ ├── release.yml
│ └── run_tests.yml
├── .gitignore
├── tinyio
├── __init__.py
├── _time.py
├── _sync.py
├── _background.py
├── _thread.py
└── _core.py
├── .pre-commit-config.yaml
├── tests
├── test_examples.py
├── test_background.py
├── test_time.py
├── test_thread.py
├── test_sync.py
├── test_core.py
└── test_errors.py
├── CONTRIBUTING.md
├── devdocs
├── exception_groups.md
└── event_clear_and_timeout.md
├── pyproject.toml
├── README.md
└── LICENSE
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [patrick-kidger]
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/__pycache__
2 | **/.ipynb_checkpoints
3 | *.egg-info/
4 | build/
5 | dist/
6 | site/
7 | examples/data
8 | examples/CIFAR
9 | .all_objects.cache
10 | .pymon
11 | .idea
12 | examples/MNIST
13 | examples/multipart_serialised.eqx
14 | .python-version
15 | .DS_Store
16 | .venv
17 | uv.lock
18 |
--------------------------------------------------------------------------------
/tinyio/__init__.py:
--------------------------------------------------------------------------------
1 | from ._background import as_completed as as_completed
2 | from ._core import (
3 | CancelledError as CancelledError,
4 | Coro as Coro,
5 | Event as Event,
6 | Loop as Loop,
7 | )
8 | from ._sync import Barrier as Barrier, Lock as Lock, Semaphore as Semaphore
9 | from ._thread import ThreadPool as ThreadPool, run_in_thread as run_in_thread
10 | from ._time import TimeoutError as TimeoutError, sleep as sleep, timeout as timeout
11 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Release
13 | uses: patrick-kidger/action_update_python_project@v8
14 | with:
15 | python-version: "3.11"
16 | test-script: |
17 | cp -r ${{ github.workspace }}/tests ./tests
18 | cp ${{ github.workspace }}/pyproject.toml ./pyproject.toml
19 | uv sync --extra tests --no-install-project --inexact
20 | uv run --no-sync pytest
21 | pypi-token: ${{ secrets.pypi_token }}
22 | github-user: patrick-kidger
23 | github-token: ${{ github.token }}
24 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: local
3 | hooks:
4 | - id: sort_pyproject
5 | name: sort_pyproject
6 | entry: toml-sort -i --sort-table-keys --sort-inline-tables
7 | language: python
8 | files: ^pyproject\.toml$
9 | additional_dependencies: ["toml-sort==0.23.1"]
10 | - repo: https://github.com/astral-sh/ruff-pre-commit
11 | rev: v0.13.0
12 | hooks:
13 | - id: ruff-format # formatter
14 | types_or: [python, pyi, jupyter, toml]
15 | - id: ruff # linter
16 | types_or: [python, pyi, jupyter, toml]
17 | args: [--fix]
18 | - repo: https://github.com/RobertCraigie/pyright-python
19 | rev: v1.1.405
20 | hooks:
21 | - id: pyright
22 | additional_dependencies:
23 | [pytest]
24 |
--------------------------------------------------------------------------------
/tests/test_examples.py:
--------------------------------------------------------------------------------
1 | import random
2 | import time
3 |
4 | import tinyio
5 |
6 |
7 | def test_dataloading():
8 | outs = []
9 |
10 | def slow_transform(x):
11 | time.sleep(random.uniform(0.01, 0.02)) # slow I/O bound work
12 | return x * x
13 |
14 | def main():
15 | iterator = range(100)
16 | pool = tinyio.ThreadPool(16)
17 | async_iterator = yield tinyio.as_completed({pool.run_in_thread(slow_transform, item) for item in iterator})
18 | while not async_iterator.done():
19 | out = yield async_iterator.get()
20 | outs.append(out)
21 | return outs
22 |
23 | loop = tinyio.Loop()
24 | out = loop.run(main())
25 | assert set(out) == {x**2 for x in range(100)}
26 | assert out != [x**2 for x in range(100)] # test not in order. Very low chance of failing, should be fine!
27 |
--------------------------------------------------------------------------------
/.github/workflows/run_tests.yml:
--------------------------------------------------------------------------------
1 | name: Run tests
2 |
3 | on:
4 | pull_request:
5 |
6 | jobs:
7 | run-test:
8 | strategy:
9 | matrix:
10 | python-version: [ "3.11", "3.13" ]
11 | os: [ ubuntu-latest ]
12 | fail-fast: false
13 | runs-on: ${{ matrix.os }}
14 | steps:
15 | - name: Checkout code
16 | uses: actions/checkout@v2
17 |
18 | - name: Set up Python ${{ matrix.python-version }}
19 | uses: actions/setup-python@v2
20 | with:
21 | python-version: ${{ matrix.python-version }}
22 |
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | python -m pip install '.[dev,tests]'
27 |
28 | - name: Checks with pre-commit
29 | run: |
30 | pre-commit run --all-files
31 |
32 | - name: Test with pytest
33 | run: |
34 | pytest
35 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Contributions (pull requests) are very welcome! Here's how to get started.
4 |
5 | ---
6 |
7 | **Getting started**
8 |
9 | First fork the library on GitHub.
10 |
11 | Then clone and install the library:
12 |
13 | ```bash
14 | git clone https://github.com/your-username-here/tinyio.git
15 | cd tinyio
16 | pip install -e '.[dev]'
17 | pre-commit install # `pre-commit` is installed by `pip` on the previous line
18 | ```
19 |
20 | ---
21 |
22 | **If you're making changes to the code:**
23 |
24 | Now make your changes. Make sure to include additional tests if necessary.
25 |
26 | Next verify the tests all pass:
27 |
28 | ```bash
29 | pip install -e '.[tests]'
30 | pytest # `pytest` is installed by `pip` on the previous line.
31 | ```
32 |
33 | Then push your changes back to your fork of the repository:
34 |
35 | ```bash
36 | git push
37 | ```
38 |
39 | Finally, open a pull request on GitHub!
40 |
41 |
--------------------------------------------------------------------------------
/tests/test_background.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import tinyio
3 |
4 |
5 | def _sleep(x):
6 | yield tinyio.sleep(x)
7 | return x
8 |
9 |
10 | def test_as_completed():
11 | def _run():
12 | iterator = yield tinyio.as_completed({_sleep(0.3), _sleep(0.1), _sleep(0.2)})
13 | outs = []
14 | while not iterator.done():
15 | x = yield iterator.get()
16 | outs.append(x)
17 | return outs
18 |
19 | loop = tinyio.Loop()
20 | assert loop.run(_run()) == [0.1, 0.2, 0.3]
21 |
22 |
23 | def test_as_completed_out_of_order():
24 | def _run():
25 | iterator = yield tinyio.as_completed({_sleep(0.3), _sleep(0.1), _sleep(0.2)})
26 | get1 = iterator.get()
27 | get2 = iterator.get()
28 | get3 = iterator.get()
29 | with pytest.raises(RuntimeError, match="which is greater than the number of coroutines"):
30 | iterator.get()
31 | assert iterator.done()
32 | out3 = yield get3
33 | out2 = yield get2
34 | out1 = yield get1
35 | return [out1, out2, out3]
36 |
37 | loop = tinyio.Loop()
38 | assert loop.run(_run()) == [0.1, 0.2, 0.3]
39 |
--------------------------------------------------------------------------------
/tests/test_time.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import tinyio
4 |
5 |
6 | def test_sleep():
7 | outs = []
8 |
9 | def f():
10 | start = time.monotonic()
11 | yield [tinyio.sleep(0.05), tinyio.sleep(0.1)]
12 | actual_duration = time.monotonic() - start
13 | # Note that these are pretty inaccurate tolerances! This is about what we get with `asyncio` too.
14 | # The reason for this seems to be the accuracy in the `threading.Event.wait()` that we bottom out in. If we need
15 | # greater resolution than this then we could do that by using a busy-loop for the last 1e-2 seconds.
16 | success = 0.09 < actual_duration < 0.11
17 | outs.append(success)
18 |
19 | loop = tinyio.Loop()
20 | for _ in range(5):
21 | loop.run(f())
22 | assert sum(outs) >= 4 # We allow one failure, to decrease flakiness.
23 |
24 |
25 | def _sleep(x):
26 | yield tinyio.sleep(x)
27 | return 3
28 |
29 |
30 | def _test_timeout():
31 | out1, success1 = yield tinyio.timeout(_sleep(0.2), 0.3)
32 | out2, success2 = yield tinyio.timeout(_sleep(0.2), 0.1)
33 | assert out1 == 3
34 | assert out2 is None
35 | assert success1 is True
36 | assert success2 is False
37 |
38 |
39 | def test_timeout():
40 | loop = tinyio.Loop()
41 | loop.run(_test_timeout())
42 |
--------------------------------------------------------------------------------
/devdocs/exception_groups.md:
--------------------------------------------------------------------------------
1 | If a coroutine crashes, and all other coroutines cancel gracefully, then by default we raise the original exception directly - without it being wrapped in an exception group of one element.
2 |
3 | This is in contrast to e.g. trio's nurseries, which encourage setting a flag to raise a `{Base}ExceptionGroup`, even if it is only containing one element. Presumably this is because it ensures a consistent 'type signature' for the function, in the sense that the raised error type is stable. (As when there are multiple exceptions, then you always get a `{Base}ExceptionGroup`.)
4 |
5 | The reason we do not do this by default is because of our particularly simple error semantics: if one coroutine crashes then we cancel all other coroutines. Thus, the 99% use-case is that there really is only one error, and not a collection of multiple potentially-unrelated errors that we want to raise all together.
6 |
7 | Correspondingly, we can get multiple errors in two cases only:
8 | - multiple `.run_in_thread`s raise independent errors at the same time.
9 | - when cancelling, a coroutine responds improperly to having a `tinyio.CancelledError` raised within it, and raises some other error.
10 |
11 | Both of these are fairly unusual. As such, raising the original error seems generally more useful for most users. It's arguably a little less robust programmatically, but it gives a much more pleasant UX experience for debugging stack traces in CI!
12 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | build-backend = "hatchling.build"
3 | requires = ["hatchling"]
4 |
5 | [project]
6 | authors = [
7 | {email = "contact@kidger.site", name = "Patrick Kidger"}
8 | ]
9 | classifiers = [
10 | "Development Status :: 3 - Alpha",
11 | "License :: OSI Approved :: Apache Software License",
12 | "Natural Language :: English",
13 | "Programming Language :: Python :: 3"
14 | ]
15 | dependencies = []
16 | description = "A tiny event loop for Python."
17 | keywords = ["asyncio", "tinyio", "async", "await"]
18 | license = {file = "LICENSE"}
19 | name = "tinyio"
20 | readme = "README.md"
21 | requires-python = ">=3.11"
22 | urls = {repository = "https://github.com/patrick-kidger/tinyio"}
23 | version = "0.2.0"
24 |
25 | [project.optional-dependencies]
26 | dev = ["pre-commit"]
27 | tests = ["pytest"]
28 |
29 | [tool.hatch.build]
30 | include = ["tinyio/*"]
31 | reportFunctionMemberAccess = false
32 | reportIncompatibleMethodOverride = true
33 | reportIncompatibleVariableOverride = false
34 |
35 | [tool.pyright]
36 | include = ["tinyio", "tests"]
37 |
38 | [tool.ruff]
39 | extend-include = ["*.ipynb"]
40 | line-length = 120
41 | src = []
42 |
43 | [tool.ruff.lint]
44 | fixable = ["I001", "F401", "UP"]
45 | ignore = ["E402", "E721", "E731", "E741", "F722", "UP038"]
46 | select = ["E", "F", "I001", "UP"]
47 |
48 | [tool.ruff.lint.flake8-import-conventions.extend-aliases]
49 | "collections" = "co"
50 | "functools" = "ft"
51 | "itertools" = "it"
52 |
53 | [tool.ruff.lint.isort]
54 | combine-as-imports = true
55 | extra-standard-library = ["typing_extensions"]
56 | lines-after-imports = 2
57 | order-by-type = true
58 |
--------------------------------------------------------------------------------
/tinyio/_time.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | from typing import TypeVar
3 |
4 | from ._core import Coro, Event
5 |
6 |
7 | _T = TypeVar("_T")
8 |
9 |
10 | def sleep(delay_in_seconds: int | float) -> Coro[None]:
11 | """`tinyio` coroutine for sleeping without blocking the event loop.
12 |
13 | **Arguments:**
14 |
15 | - `delay_in_seconds`: the number of seconds to sleep for.
16 |
17 | **Returns:**
18 |
19 | A coroutine that just sleeps.
20 | """
21 | yield from Event().wait(delay_in_seconds)
22 |
23 |
24 | class TimeoutError(BaseException):
25 | pass
26 |
27 |
28 | TimeoutError.__module__ = "tinyio"
29 |
30 |
31 | def timeout(coro: Coro[_T], timeout_in_seconds: int | float) -> Coro[tuple[None | _T, bool]]:
32 | """`tinyio` coroutine for running a coroutine for at most `timeout_in_seconds`.
33 |
34 | **Arguments:**
35 |
36 | - `coro`: another coroutine.
37 | - `timeout_in_seconds`: the maximum number of seconds to allow `coro` to run for.
38 |
39 | **Returns:**
40 |
41 | A coroutine that an be `yield`ed on. This will return a pair of either `(output, True)` or `(None, False)`,
42 | corresponding to whether `coro` completed within the timeout or not.
43 | """
44 | done = Event()
45 | outs = []
46 |
47 | def wrapper():
48 | out = yield coro
49 | outs.append(out)
50 | done.set()
51 |
52 | yield {wrapper()}
53 | yield from done.wait(timeout_in_seconds)
54 | if len(outs) == 0:
55 | with contextlib.suppress(TimeoutError):
56 | coro.throw(TimeoutError)
57 | return None, False
58 | else:
59 | [out] = outs
60 | return out, True
61 |
--------------------------------------------------------------------------------
/devdocs/event_clear_and_timeout.md:
--------------------------------------------------------------------------------
1 | We support `tinyio.Event.clear` (which trio does not have), and support timeouts in `tinyio.Event.wait` (which neither asyncio nor trio have). Why is this?
2 |
3 | My original position was in-line with https://github.com/python-trio/trio/issues/637, in which it is argued that `Event.clear` is a misfeature that can be troublesome to reason about. Specifically, code immediately after `yield event.wait()` doesn't actually have any guarantee about the value of the event: the flag might have been toggled back-and-forth, which would trigger the `yield` i.e. schedule the coroutine, but with the flag set back to false by the time that the coroutine actually wakes up.
4 |
5 | However, I knew that I wanted timeout support on `Event.wait`.
6 | - This is because this makes it possible to implement `tinyio.sleep` without requiring a thread (to set the value of the event). Instead, we can just `yield Event().wait(timeout=delay)`.
7 | - This also fits elegantly with our model of using a single `threading.Event.wait` to determine when to wake up our event loop and schedule work - we can implement sleeping by making it the timeout on that `threading.Event.wait` call.
8 |
9 | This means we already do not have the guarantee that the event's flag has been set after the `yield`, as we may have timed out and moved on instead.
10 |
11 | In light of that, I think it makes sense to fill out our API and support `Event.clear`. For the record: if you do need to ensure that the flag is set after `Event.wait`, even in the presence of other coroutines calling `.clear`, then you can use the following pattern:
12 | ```python
13 | while True:
14 | yield event.wait()
15 | if event.is_set():
16 | break
17 | ```
18 | It's not a strong feeling though, as the potential user-footgun-ness still remains.
19 |
--------------------------------------------------------------------------------
/tests/test_thread.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import time
3 | import warnings
4 |
5 | import pytest
6 | import tinyio
7 |
8 |
9 | def test_in_thread():
10 | def _blocking_slow_add_one(x: int) -> int:
11 | time.sleep(0.1)
12 | return x + 1
13 |
14 | def _big_gather(x: int):
15 | out = yield [tinyio.run_in_thread(_blocking_slow_add_one, x) for _ in range(100)]
16 | return out
17 |
18 | loop = tinyio.Loop()
19 | start = time.time()
20 | out = loop.run(_big_gather(1))
21 | end = time.time()
22 | assert out == [2 for _ in range(100)]
23 | assert end - start < 0.5
24 |
25 |
26 | @pytest.mark.parametrize("with_map", (False, True))
27 | def test_thread_pool(with_map: bool):
28 | counter = 0
29 | invalid_counter = False
30 | lock = threading.Lock()
31 |
32 | def _count(x, y):
33 | nonlocal counter, invalid_counter
34 | with lock:
35 | counter += 1
36 | time.sleep(0.01)
37 | invalid_counter = invalid_counter | (counter > 2)
38 | with lock:
39 | counter -= 1
40 | return x, y
41 |
42 | def _run(max_threads):
43 | pool = tinyio.ThreadPool(max_threads)
44 | if with_map:
45 | out = yield pool.map(lambda i: _count(i, y=i), range(50))
46 | else:
47 | out = yield [pool.run_in_thread(_count, i, y=i) for i in range(50)]
48 | return out
49 |
50 | loop = tinyio.Loop()
51 | assert loop.run(_run(2)) == [(i, i) for i in range(50)]
52 | assert not invalid_counter
53 | loop.run(_run(3))
54 | assert invalid_counter
55 |
56 |
57 | def test_simultaneous_errors():
58 | def _raises():
59 | raise RuntimeError
60 |
61 | def _run():
62 | out = yield [tinyio.run_in_thread(_raises) for _ in range(10)]
63 | return out
64 |
65 | loop = tinyio.Loop()
66 | with warnings.catch_warnings():
67 | warnings.simplefilter("error")
68 | with pytest.raises(BaseExceptionGroup) as catcher:
69 | loop.run(_run())
70 | assert len(catcher.value.exceptions) > 1
71 | for e in catcher.value.exceptions:
72 | assert type(e) is RuntimeError
73 |
--------------------------------------------------------------------------------
/tinyio/_sync.py:
--------------------------------------------------------------------------------
1 | import collections as co
2 | import contextlib
3 |
4 | from ._core import Coro, Event
5 |
6 |
7 | class Semaphore:
8 | """Limits coroutines so that at most `value` of them can access a resource concurrently.
9 |
10 | Usage:
11 | ```python
12 | semaphore = tinyio.Semaphore(value=...)
13 |
14 | with (yield semaphore()):
15 | ...
16 | ```
17 | """
18 |
19 | def __init__(self, value: int):
20 | """**Arguments:**
21 |
22 | - `value`: the maximum number of concurrent accesses.
23 | """
24 | if value <= 0:
25 | raise ValueError("`tinyio.Semaphore(value=...)` must be positive.")
26 | self._value = value
27 | self._events = co.deque[Event]()
28 |
29 | def __call__(self) -> Coro[contextlib.AbstractContextManager[None]]:
30 | if self._value == 0:
31 | event = Event()
32 | self._events.append(event)
33 | yield from event.wait()
34 | assert self._value > 0
35 | self._value -= 1
36 | return _CloseSemaphore(self, [False])
37 |
38 |
39 | class _CloseSemaphore:
40 | def __init__(self, semaphore: Semaphore, cell: list[bool]):
41 | self._semaphore = semaphore
42 | self._cell = cell
43 |
44 | def __enter__(self):
45 | if self._cell[0]:
46 | raise RuntimeError("Use a new `semaphore()` call in each `with (yield semaphore())`, do not re-use it.")
47 | self._cell[0] = True
48 |
49 | def __exit__(self, exc_type, exc_value, exc_tb):
50 | del exc_type, exc_value, exc_tb
51 | self._semaphore._value += 1
52 | if len(self._semaphore._events) > 0:
53 | event = self._semaphore._events.popleft()
54 | event.set()
55 |
56 |
57 | class Lock:
58 | """Prevents multiple coroutines from accessing a single resource."""
59 |
60 | def __init__(self):
61 | self._semaphore = Semaphore(value=1)
62 |
63 | def __call__(self) -> Coro[contextlib.AbstractContextManager[None]]:
64 | return self._semaphore()
65 |
66 |
67 | class Barrier:
68 | """Prevents coroutines from progressing until at least `value` of them have called `yield barrier.wait()`."""
69 |
70 | def __init__(self, value: int):
71 | self._count = 0
72 | self._value = value
73 | self._event = Event()
74 |
75 | def wait(self):
76 | count = self._count
77 | self._count += 1
78 | if self._count == self._value:
79 | self._event.set()
80 | yield from self._event.wait()
81 | return count
82 |
--------------------------------------------------------------------------------
/tinyio/_background.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Generator
2 | from typing import Generic, TypeVar
3 |
4 | from ._core import Coro, Event
5 |
6 |
7 | _T = TypeVar("_T")
8 |
9 |
10 | def as_completed(coros: set[Coro[_T]]) -> Coro["AsCompleted"]:
11 | """Schedules multiple coroutines, iterating through their outputs in the order that they complete.
12 |
13 | Usage is via `.done()` and `.get()` as follows:
14 | ```python
15 | import tinyio
16 |
17 | def sleep(x):
18 | yield tinyio.sleep(x)
19 | return x
20 |
21 | def as_completed_demo():
22 | iterator = yield tinyio.as_completed({sleep(7), sleep(2), sleep(4)})
23 | while not iterator.done():
24 | out = yield iterator.get()
25 | print(f"As completed demo: {out}")
26 |
27 | loop = tinyio.Loop()
28 | loop.run(as_completed_demo())
29 | # As completed demo: 2
30 | # As completed demo: 4
31 | # As completed demo: 7
32 | ```
33 | """
34 | if not isinstance(coros, set) or any(not isinstance(coro, Generator) for coro in coros):
35 | raise ValueError("`AsCompleted(coros=...)` must be a set of coroutines.")
36 |
37 | outs = {}
38 | put_count = 0
39 | events = [Event() for _ in coros]
40 |
41 | def wrapper(coro):
42 | nonlocal put_count
43 | out = yield coro
44 | outs[put_count] = out
45 | events[put_count].set()
46 | put_count += 1
47 |
48 | yield {wrapper(coro) for coro in coros}
49 | return AsCompleted(outs, events)
50 |
51 |
52 | class AsCompleted(Generic[_T]):
53 | def __init__(self, outs: dict, events: list[Event]):
54 | self._get_count = 0
55 | self._outs = outs
56 | self._events = events
57 |
58 | def done(self) -> bool:
59 | """Whether all coroutines are being waited on. This does not imply that all coroutines have necessarily
60 | finished executing; it just implies that you should not call `.get()` any more times.
61 | """
62 | return self._get_count == len(self._events)
63 |
64 | def get(self) -> Coro[_T]:
65 | """Yields the output of the next coroutine to complete."""
66 | get_count = self._get_count
67 | if self._get_count >= len(self._events):
68 | raise RuntimeError(
69 | f"Called `AsCompleted.get` {self._get_count + 1} times, which is greater than the number of coroutines "
70 | f"which are being waited on ({len(self._events)})."
71 | )
72 | self._get_count += 1
73 | return self._get(get_count)
74 |
75 | def _get(self, get_count: int):
76 | yield from self._events[get_count].wait()
77 | return self._outs.pop(get_count)
78 |
--------------------------------------------------------------------------------
/tests/test_sync.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import threading
3 | import time
4 |
5 | import pytest
6 | import tinyio
7 |
8 |
9 | def test_semaphore():
10 | counter = 0
11 |
12 | def _count(semaphore, i):
13 | nonlocal counter
14 | with (yield semaphore()):
15 | counter += 1
16 | if counter > 2:
17 | raise RuntimeError
18 | yield
19 | counter -= 1
20 | return i
21 |
22 | def _run(value):
23 | semaphore = tinyio.Semaphore(value)
24 | out = yield [_count(semaphore, i) for i in range(50)]
25 | return out
26 |
27 | loop = tinyio.Loop()
28 | assert loop.run(_run(2)) == list(range(50))
29 | with pytest.raises(RuntimeError):
30 | loop.run(_run(3))
31 |
32 |
33 | def test_semaphore_reuse():
34 | counter = 0
35 |
36 | def _foo(coro):
37 | nonlocal counter
38 | context = yield coro
39 | counter += 1
40 | with pytest.raises(RuntimeError, match="do not") if counter == 2 else contextlib.nullcontext():
41 | with context:
42 | yield
43 |
44 | def _bar():
45 | semaphore = tinyio.Semaphore(2)
46 | coro = semaphore()
47 | yield [_foo(coro), _foo(coro)]
48 |
49 | loop = tinyio.Loop()
50 | loop.run(_bar())
51 |
52 |
53 | def test_lock():
54 | counter = 0
55 |
56 | def _count(semaphore, i):
57 | nonlocal counter
58 | with (yield semaphore()):
59 | counter += 1
60 | if counter > 1:
61 | raise RuntimeError
62 | yield
63 | counter -= 1
64 | return i
65 |
66 | def _run():
67 | semaphore = tinyio.Lock()
68 | out = yield [_count(semaphore, i) for i in range(50)]
69 | return out
70 |
71 | loop = tinyio.Loop()
72 | assert loop.run(_run()) == list(range(50))
73 |
74 |
75 | def test_barrier():
76 | barrier = tinyio.Barrier(3)
77 | count = 0
78 |
79 | def _foo():
80 | nonlocal count
81 | count += 1
82 | i = yield barrier.wait()
83 | time.sleep(0.1)
84 | assert count == 3
85 | return i
86 |
87 | def _run():
88 | out = yield [_foo() for _ in range(3)]
89 | return out
90 |
91 | loop = tinyio.Loop()
92 | assert set(loop.run(_run())) == {0, 1, 2}
93 |
94 |
95 | def test_event():
96 | event = tinyio.Event()
97 | done = False
98 | done2 = False
99 |
100 | def _foo():
101 | nonlocal done
102 | assert event.is_set() is False
103 | yield event.wait()
104 | assert event.is_set() is True
105 | done = True
106 |
107 | def _bar():
108 | nonlocal done2
109 | yield {_foo()}
110 | for _ in range(10):
111 | yield
112 | assert not done
113 | assert event.is_set() is False
114 | event.set()
115 | assert event.is_set()
116 | done2 = True
117 |
118 | loop = tinyio.Loop()
119 | loop.run(_bar())
120 | assert done
121 | assert done2
122 | assert event.is_set()
123 |
124 |
125 | @pytest.mark.parametrize("is_set", (False, True))
126 | def test_event_only(is_set: bool):
127 | event = tinyio.Event()
128 | if is_set:
129 | event.set()
130 |
131 | def foo():
132 | if not is_set:
133 | t = threading.Timer(0.1, lambda: event.set())
134 | t.start()
135 | yield event.wait()
136 |
137 | loop = tinyio.Loop()
138 | loop.run(foo())
139 |
140 |
141 | @pytest.mark.parametrize("is_set", (False, True))
142 | def test_event_run(is_set: bool):
143 | event = tinyio.Event()
144 | if is_set:
145 | event.set()
146 | loop = tinyio.Loop()
147 | if not is_set:
148 | t = threading.Timer(0.1, lambda: event.set())
149 | t.start()
150 | loop.run(event.wait())
151 |
152 |
153 | @pytest.mark.parametrize("is_set", (False, True))
154 | def test_event_simultaneous_wait(is_set: bool):
155 | event = tinyio.Event()
156 | if is_set:
157 | event.set()
158 |
159 | def _foo():
160 | if not is_set:
161 | t = threading.Timer(0.1, lambda: event.set())
162 | t.start()
163 | yield [event.wait(), event.wait()]
164 |
165 | loop = tinyio.Loop()
166 | loop.run(_foo())
167 |
168 |
169 | def test_event_clear_not_strict():
170 | """Test that even though we `clear()` the event after setting it, that both `foo()` still unblock."""
171 | event = tinyio.Event()
172 | event.set()
173 | event.clear()
174 | assert not event.is_set()
175 |
176 | out = []
177 |
178 | def foo():
179 | yield event.wait()
180 | out.append(2)
181 |
182 | def bar():
183 | yield
184 | out.append(1)
185 | event.set()
186 | for _ in range(20):
187 | yield
188 | event.clear()
189 | out.append(3)
190 |
191 | def baz():
192 | yield [foo(), foo(), bar()]
193 |
194 | loop = tinyio.Loop()
195 | loop.run(baz())
196 | assert out == [1, 2, 2, 3]
197 |
198 |
199 | class _Semaphore:
200 | def __init__(self, value):
201 | self.value = value
202 | self.event = tinyio.Event()
203 | self.event.set()
204 |
205 | def __call__(self):
206 | while True:
207 | yield self.event.wait()
208 | if self.event.is_set():
209 | break
210 | assert self.value > 0
211 | self.value -= 1
212 | if self.value == 0:
213 | self.event.clear()
214 | return _closing(self)
215 |
216 |
217 | @contextlib.contextmanager
218 | def _closing(semaphore):
219 | try:
220 | yield
221 | finally:
222 | semaphore.value += 1
223 | semaphore.event.set()
224 |
225 |
226 | def test_alternate_semaphore():
227 | """This test is useful as it makes use of `Event.clear()`."""
228 |
229 | counter = 0
230 |
231 | def _count(semaphore, i):
232 | nonlocal counter
233 | with (yield semaphore()):
234 | counter += 1
235 | if counter > 2:
236 | raise RuntimeError
237 | yield
238 | counter -= 1
239 | return i
240 |
241 | def _run(value):
242 | semaphore = _Semaphore(value)
243 | out = yield [_count(semaphore, i) for i in range(50)]
244 | return out
245 |
246 | loop = tinyio.Loop()
247 | assert loop.run(_run(2)) == list(range(50))
248 | with pytest.raises(RuntimeError):
249 | loop.run(_run(3))
250 |
--------------------------------------------------------------------------------
/tinyio/_thread.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import ctypes
3 | import threading
4 | from collections.abc import Callable, Iterable
5 | from typing import ParamSpec, TypeVar, cast
6 |
7 | from ._core import CancelledError, Coro, Event
8 | from ._sync import Semaphore
9 |
10 |
11 | _T = TypeVar("_T")
12 | _Params = ParamSpec("_Params")
13 | _Return = TypeVar("_Return")
14 |
15 |
16 | def run_in_thread(fn: Callable[_Params, _Return], /, *args: _Params.args, **kwargs: _Params.kwargs) -> Coro[_Return]:
17 | """A `tinyio` coroutine for running the blocking function `fn(*args, **kwargs)` in a thread.
18 |
19 | If this coroutine is cancelled then the cancellation will be raised in the thread as well; vice-versa if the
20 | function call raises an error then this will propagate to the coroutine.
21 |
22 | **Arguments:**
23 |
24 | - `fn`: the function to call.
25 | - `*args`: arguments to call `fn` with.
26 | - `**kwargs`: keyword arguments to call `fn` with.
27 |
28 | **Returns:**
29 |
30 | A coroutine that can be `yield`ed on, returning the output of `fn(*args, **kwargs)`.
31 | """
32 |
33 | is_exception = None
34 | result = None
35 | event = Event()
36 |
37 | def target():
38 | nonlocal result, is_exception
39 | try:
40 | result = fn(*args, **kwargs)
41 | is_exception = False
42 | except BaseException as e:
43 | try:
44 | result = e
45 | is_exception = True
46 | except BaseException:
47 | # We have an `except` here just in case we were already within the `except` block due to an error from
48 | # within the thread, whilst our `ctypes` error below triggers.
49 | result = e
50 | is_exception = True
51 | raise
52 | finally:
53 | event.set()
54 |
55 | t = threading.Thread(target=target)
56 |
57 | try:
58 | t.start()
59 | yield from event.wait()
60 | except BaseException as e:
61 | # We can end up here if an `tinyio.CancelledError` arises out of the `yield`, or from an exogeneous
62 | # `KeyboardInterrupt`.
63 |
64 | # First check whether we have a race condition: that our thread itself produced an error whilst we were being
65 | # cancelled due to another error. If this is the case then we suppress the warning in the core event loop about
66 | # resource cleanup.
67 | # This is best-effort as our thread may still crash with its own exception between now and the end of this
68 | # function.
69 | already_error = is_exception is True
70 |
71 | # Second, cancel the thread if necessary. This `event.is_set()` isn't load-bearing, it's just a code cleanliness
72 | # thing, as raising the `CancelledError` in the thread is a no-op if the thread has already terminated.
73 | # (And in principle the thread may terminate after we check the event but before we try raising the exception.)
74 | if not event.is_set():
75 | thread_id = t.ident
76 | assert thread_id is not None
77 | # Raise a `CancelledError` in the thread that is running the task. This allows the thread to do any cleanup.
78 | # This is not readily supported and needs to be done via ctypes, see: https://gist.github.com/liuw/2407154.
79 | ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id), ctypes.py_object(CancelledError))
80 | t.join()
81 |
82 | # Our thread above has now completed.
83 | # We can `is_exception is True` either because the thread already crashed, or from our `ctypes` crash above.
84 | # We can have `is_exception is False` if the the thread managed to finish successfully whilst we are in this
85 | # `except` block.
86 | # I don't think we can have `is_exception is None`.
87 | if is_exception is True and type(e) is CancelledError:
88 | # We were cancelled.
89 | #
90 | # Note that we raise this regardless of whether `result` is itself a `CancelledError`. It's probably the
91 | # error we triggered via `ctypes` above, but in principle the function may have caught that and done
92 | # something else.
93 | # Either way, we are but the humble purveyors of this message back to the event loop: raise it.
94 | result = cast(BaseException, result)
95 | context = result.__context__
96 | cause = result.__cause__
97 | try:
98 | raise result
99 | except BaseException as e:
100 | # try-and-immediately-except is a trick to remove the current frame from the traceback.
101 | # This smoothly links up the `run_in_thread` invocation with the frame in `target`, with no weird
102 | # `raise result` frame halfway through.
103 | # In addition we also need to preserve the `__context__` (and `__cause__`?) as the `raise` overwrites
104 | # this with the current context.
105 | # Curiously this doesn't seem to work if we unify this with the `else` branch of the `try/except/else`
106 | # below, despite ostensibly then being outside of the `except BaseException` context. Whatever, this
107 | # version works correctly!
108 | assert e.__traceback__ is not None
109 | e.__traceback__ = e.__traceback__.tb_next
110 | e.__context__ = context
111 | e.__cause__ = cause
112 | with contextlib.suppress(Exception):
113 | e.__tinyio_no_warn__ = already_error # pyright: ignore[reportAttributeAccessIssue]
114 | raise
115 | else:
116 | # Probably a `KeyboardInterrupt`, forward it on.
117 | raise
118 | else:
119 | assert is_exception is not None
120 | if is_exception:
121 | try:
122 | raise cast(BaseException, result)
123 | except BaseException as e:
124 | assert e.__traceback__ is not None
125 | e.__traceback__ = e.__traceback__.tb_next
126 | raise
127 | else:
128 | return cast(_Return, result)
129 |
130 |
131 | class ThreadPool:
132 | """A wrapper around `tinyio.run_in_thread` to launch at most `value` many threads at a time."""
133 |
134 | def __init__(self, max_threads: int):
135 | """**Arguments:**
136 |
137 | - `value`: the maximum number of threads to launch at a time.
138 | """
139 |
140 | self._semaphore = Semaphore(max_threads)
141 |
142 | def run_in_thread(
143 | self, fn: Callable[_Params, _Return], /, *args: _Params.args, **kwargs: _Params.kwargs
144 | ) -> Coro[_Return]:
145 | """Like `tinyio.run_in_thread(fn, *args, **kwargs)`.
146 |
147 | Usage is `output = yield pool.run_in_thread(...)`
148 | """
149 | with (yield self._semaphore()):
150 | out = yield run_in_thread(fn, *args, **kwargs)
151 | return out
152 |
153 | def map(self, fn: Callable[[_T], _Return], /, xs: Iterable[_T]) -> Coro[list[_Return]]:
154 | """Like `[tinyio.run_in_thread(fn, x) for x in xs]`.
155 |
156 | Usage is `output_list = pool.map(...)`
157 | """
158 | return (yield [self.run_in_thread(fn, x) for x in xs])
159 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
tinyio
2 | A tiny (~300 lines) event loop for Python
3 |
4 | _Ever used `asyncio` and wished you hadn't?_
5 |
6 | `tinyio` is a dead-simple event loop for Python, born out of my frustration with trying to get robust error handling with `asyncio`. (I'm not the only one running into its sharp corners: [link1](https://sailor.li/asyncio), [link2](https://lucumr.pocoo.org/2016/10/30/i-dont-understand-asyncio/).)
7 |
8 | This is an alternative for the simple use-cases, where you just need an event loop, and want to crash the whole thing if anything goes wrong. (Raising an exception in every coroutine so it can clean up its resources.)
9 |
10 | ```python
11 | import tinyio
12 |
13 | def slow_add_one(x: int):
14 | yield tinyio.sleep(1)
15 | return x + 1
16 |
17 | def foo():
18 | four, five = yield [slow_add_one(3), slow_add_one(4)]
19 | return four, five
20 |
21 | loop = tinyio.Loop()
22 | out = loop.run(foo())
23 | assert out == (4, 5)
24 | ```
25 |
26 | - Somewhat unusually, our syntax uses `yield` rather than `await`, but the behaviour is the same. Await another coroutine with `yield coro`. Await on multiple with `yield [coro1, coro2, ...]` (a 'gather' in asyncio terminology; a 'nursery' in trio terminology).
27 | - An error in one coroutine will cancel all coroutines across the entire event loop.
28 | - If the erroring coroutine is sequentially depended on by a chain of other coroutines, then we chain their tracebacks for easier debugging.
29 | - Errors propagate to and from synchronous operations ran in threads.
30 | - Can nest tinyio loops inside each other, none of this one-per-thread business.
31 | - Ludicrously simple. No need for futures, tasks, etc. Here's the entirety of the day-to-day API:
32 | ```python
33 | tinyio.Loop
34 | tinyio.run_in_thread
35 | tinyio.sleep
36 | tinyio.CancelledError
37 | ```
38 |
39 | ## Installation
40 |
41 | ```
42 | pip install tinyio
43 | ```
44 |
45 | ## Documentation
46 |
47 | ### Loops
48 |
49 | Create a loop with `tinyio.Loop()`. It has a single method, `.run(coro)`, which consumes a coroutine, and which returns the output of that coroutine.
50 |
51 | Coroutines can `yield` four possible things:
52 |
53 | - `yield`: yield nothing, this just pauses and gives other coroutines a chance to run.
54 | - `yield coro`: wait on a single coroutine, in which case we'll resume with the output of that coroutine once it is available.
55 | - `yield [coro1, coro2, coro3]`: wait on multiple coroutines by putting them in a list, and resume with a list of outputs once all have completed. This is what asyncio calls a 'gather' or 'TaskGroup', and what trio calls a 'nursery'.
56 | - `yield {coro1, coro2, coro3}`: schedule one or more coroutines but do not wait on their result - they will run independently in the background.
57 |
58 | If you `yield` on the same coroutine multiple times (e.g. in a diamond dependency pattern) then the coroutine will be scheduled once, and on completion all dependees will receive its output. (You can even do this if the coroutine has already finished: `yield` on it to retrieve its output.)
59 |
60 | ### Threading
61 |
62 | Blocking functions can be ran in threads using `tinyio.run_in_thread(fn, *args, **kwargs)`, which gives a coroutine you can `yield` on. Example:
63 |
64 | ```python
65 | import time, tinyio
66 |
67 | def slow_blocking_add_one(x: int) -> int:
68 | time.sleep(1)
69 | return x + 1
70 |
71 | def foo(x: int):
72 | out = yield [tinyio.run_in_thread(slow_blocking_add_one, x) for _ in range(3)]
73 | return out
74 |
75 | loop = tinyio.Loop()
76 | out = loop.run(foo(x=1)) # runs in one second, not three
77 | assert out == [2, 2, 2]
78 | ```
79 |
80 | ### Sleeping
81 |
82 | This is `tinyio.sleep(delay_in_seconds)`, which is a coroutine you can `yield` on.
83 |
84 | ### Error propagation
85 |
86 | If any coroutine raises an error, then:
87 |
88 | 1. All coroutines across the entire loop will have `tinyio.CancelledError` raised in them (from whatever `yield` point they are currently waiting at).
89 | 2. Any functions ran in threads via `tinyio.run_in_thread` will also have `tinyio.CancelledError` raised in the thread.
90 | 3. The original error is raised out of `loop.run(...)`. This behaviour can be configured (e.g. to collect errors into a `BaseExceptionGroup`) by setting `loop.run(..., exception_group=None/False/True)`.
91 |
92 | This gives every coroutine a chance to shut down gracefully. Debuggers like [`patdb`](https://github.com/patrick-kidger/patdb) offer the ability to navigate across exceptions in an exception group, allowing you to inspect the state of all coroutines that were related to the error.
93 |
94 | ### Batteries-included
95 |
96 | We ship batteries-included with the usual collection of standard operations.
97 |
98 | Click to expand
99 |
100 | ```python
101 | tinyio.as_completed tinyio.Semaphore
102 | tinyio.Barrier tinyio.ThreadPool
103 | tinyio.Event tinyio.timeout
104 | tinyio.Lock tinyio.TimeoutError
105 | ```
106 |
107 | ---
108 |
109 | - `tinyio.as_completed({coro1, coro2, ...})`
110 |
111 | This schedules multiple coroutines in the background (like `yield {coro1, coro2, ...}`), and then offers their results in the order they complete.
112 |
113 | This is iterated over in the following way, using its `.done()` and `.get()` methods:
114 | ```python
115 | def main():
116 | iterator = yield tinyio.as_completed({coro1, coro2, coro3})
117 | while not iterator.done():
118 | x = yield iterator.get()
119 | ```
120 |
121 | ---
122 |
123 | - `tinyio.Barrier(value)`
124 |
125 | This has a single method `barrier.wait()`, which is a coroutine you can `yield` on. Once `value` many coroutines have yielded on this method then it will unblock.
126 |
127 | ---
128 |
129 | - `tinyio.Event()`
130 |
131 | This is a wrapper around a boolean flag, initialised with `False`.
132 | This has the following methods:
133 |
134 | - `.is_set()`: return the value of the flag.
135 | - `.set()`: set the flag to `True`.
136 | - `.clear()`: set the flag to `False`.
137 | - `.wait(timeout_in_seconds=None)`, which is a coroutine you can `yield` on. This will unblock if the internal flag is `True` or if `timeout_in_seconds` seconds pass. (Typically the former is accomplished by calling `.set()` from another coroutine or from a thread.)
138 |
139 | ---
140 |
141 | - `tinyio.Lock()`
142 |
143 | This is just a convenience for `tinyio.Semaphore(value=1)`, see below.
144 |
145 | ---
146 |
147 | - `tinyio.Semaphore(value)`
148 |
149 | This manages an internal counter that is initialised at `value`, is decremented when entering a region, and incremented when exiting. This blocks if this counter is at zero. In this way, at most `value` coroutines may acquire the semaphore at a time.
150 |
151 | This is used as:
152 | ```python
153 | semaphore = Semaphore(value)
154 |
155 | ...
156 |
157 | with (yield semaphore()):
158 | ...
159 | ```
160 |
161 | ---
162 |
163 | - `tinyio.timeout(coro, timeout_in_seconds)`
164 |
165 | This is a coroutine you can `yield` on, used as `output, success = yield tinyio.timeout(coro, timeout_in_seconds)`.
166 |
167 | This runs `coro` for at most `timeout_in_seconds`. If it succeeds in that time then the pair `(output, True)` is returned . Else this will return `(None, False)`, and `coro` will be halted by raising `tinyio.TimeoutError` inside it.
168 |
169 | ---
170 |
171 | - `tinyio.ThreadPool(max_threads)`
172 |
173 | This is equivalent to making multiple `tinyio.run_in_thread` calls, but will limit the number of threads to at most `max_threads`. Additional work after that will block until a thread becomes available.
174 |
175 | This has two methods:
176 |
177 | - `.run_in_thread(fn, *args, **kwargs)`, which is a coroutine you can `yield` on. This is equivalent to `yield tinyio.run_in_thread(fn, *args, **kwargs)`.
178 | - `.map(fn, xs)`, which is a coroutine you can `yield` on. This is equivalent to `yield [tinyio.run_in_thread(fn, x) for x in xs]`.
179 |
180 | ---
181 |
182 |
183 |
184 | ## FAQ
185 |
186 |
187 | Why yield - why not await like is normally seen for coroutines?
188 |
189 |
190 | The reason is that `await` does not offer a suspension point to an event loop (it just calls `__await__` and maybe *that* offers a suspension point), so if we wanted to use that syntax then we'd need to replace `yield coro` with something like `await tinyio.Task(coro)`. The traditional syntax is not worth the extra class.
191 |
192 |
193 |
194 | I have a function I want to be a coroutine, but it has zero yield statements, so it is just a normal function?
195 |
196 |
197 | You can distinguish it from a normal Python function by putting `if False: yield` somewhere inside its body. Another common trick is to put a `yield` statement after the final `return` statement. Bit ugly but oh well.
198 |
199 |
200 |
201 | vs asyncio or trio?.
202 |
203 |
204 | I wasted a *lot* of time trying to get correct error propagation with `asyncio`, trying to reason whether my tasks would be cleaned up correctly or not (edge-triggered vs level-triggered etc etc). `trio` is excellent but still has a one-loop-per-thread rule, and doesn't propagate cancellations to/from threads. These points inspired me to try writing my own.
205 |
206 | `tinyio` has the following unique features, and as such may be the right choice if any of the following are must-haves for you:
207 |
208 | - the propagation of errors to/from threads;
209 | - no one-loop-per-thread rule;
210 | - simple+robust error semantics (crash the whole loop if anything goes wrong);
211 | - tiny, hackable, codebase.
212 |
213 | However conversely, `tinyio` does not offer the ability to schedule work on the event loop whilst cleaning up from errors.
214 |
215 | If none of the bullet points are must-haves for you, or if needing the event loop during cleanup is a dealbreaker, then either `trio` or `asyncio` are likely to be better choices. :)
216 |
217 |
218 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/tests/test_core.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import gc
3 | import threading
4 | import time
5 | import weakref
6 | from typing import Any
7 |
8 | import pytest
9 | import tinyio
10 |
11 |
12 | def _add_one(x: int) -> tinyio.Coro[int]:
13 | yield
14 | return x + 1
15 |
16 |
17 | def _add_two(x: int) -> tinyio.Coro[int]:
18 | y = yield _add_one(x)
19 | z = yield _add_one(y)
20 | return z
21 |
22 |
23 | def test_basic():
24 | loop = tinyio.Loop()
25 | assert loop.run(_add_two(4)) == 6
26 |
27 |
28 | def test_gather():
29 | def _gather(x: int):
30 | return (yield [_add_one(x), _add_two(x)])
31 |
32 | loop = tinyio.Loop()
33 | assert loop.run(_gather(3)) == [4, 5]
34 |
35 |
36 | def test_empty_gather():
37 | def _gather():
38 | out = yield []
39 | return out
40 |
41 | loop = tinyio.Loop()
42 | assert loop.run(_gather()) == []
43 |
44 |
45 | def test_multi_yield():
46 | def _multi_yield():
47 | foo = _add_one(x=3)
48 | x = yield foo
49 | y = yield foo
50 | return x, y
51 |
52 | loop = tinyio.Loop()
53 | assert loop.run(_multi_yield()) == (4, 4)
54 |
55 |
56 | def test_simultaneous_yield():
57 | def _simultaneous_yield():
58 | foo = _add_one(x=3)
59 | x, y = yield [foo, foo]
60 | return x, y
61 |
62 | loop = tinyio.Loop()
63 | assert loop.run(_simultaneous_yield()) == (4, 4)
64 |
65 |
66 | def test_diamond():
67 | def _diamond1(x: int) -> tinyio.Coro[int]:
68 | y = _add_one(x)
69 | a, b = yield [_diamond2(y, 1), _diamond2(y, 2)]
70 | return a + b
71 |
72 | def _diamond2(y: tinyio.Coro[int], factor: int):
73 | z = yield y
74 | return z * factor
75 |
76 | loop = tinyio.Loop()
77 | assert loop.run(_diamond1(2)) == 9
78 |
79 |
80 | def test_sleep():
81 | def _slow_add_one(x: int):
82 | yield tinyio.sleep(0.1)
83 | return x + 1
84 |
85 | def _big_gather(x: int):
86 | out = yield [_slow_add_one(x) for _ in range(100)]
87 | return out
88 |
89 | loop = tinyio.Loop()
90 | start = time.time()
91 | out = loop.run(_big_gather(1))
92 | end = time.time()
93 | assert out == [2 for _ in range(100)]
94 | assert end - start < 0.5
95 |
96 |
97 | def test_multi_run():
98 | foo = _add_one(x=4)
99 |
100 | def _mul():
101 | out = yield foo
102 | return out * 5
103 |
104 | loop = tinyio.Loop()
105 | assert loop.run(_mul()) == 25
106 | assert loop.run(_mul()) == 25
107 | assert loop.run(_mul()) == 25
108 |
109 |
110 | def test_waiting_on_already_finished():
111 | def f():
112 | yield
113 | return 3
114 |
115 | def g(coro):
116 | yield h(coro)
117 | yield [f(), coro]
118 |
119 | def h(coro):
120 | yield coro
121 |
122 | foo = f()
123 | loop = tinyio.Loop()
124 | loop.run(g(foo))
125 |
126 |
127 | def test_cycle():
128 | def f():
129 | yield gg
130 |
131 | def g():
132 | yield ff
133 |
134 | ff = f()
135 | gg = g()
136 |
137 | def h():
138 | yield [ff, gg]
139 |
140 | loop = tinyio.Loop()
141 | with pytest.raises(RuntimeError, match="Cycle detected in `tinyio` loop"):
142 | loop.run(h(), exception_group=False)
143 |
144 |
145 | @pytest.mark.parametrize("wait_on_f", (False, True))
146 | def test_background(wait_on_f: bool):
147 | val = False
148 | done = False
149 |
150 | def f():
151 | while val is False:
152 | yield
153 | return 3
154 |
155 | def g():
156 | nonlocal val
157 | val = True
158 | yield
159 |
160 | def h():
161 | nonlocal done
162 | ff = f()
163 | out = yield {ff}
164 | assert out is None
165 | yield g()
166 | if wait_on_f:
167 | out = yield ff
168 | assert out == 3
169 | done = True
170 |
171 | loop = tinyio.Loop()
172 | loop.run(h())
173 | assert done
174 |
175 |
176 | @pytest.mark.parametrize("wait_on_f", (False, True))
177 | def test_background_already_waiting(wait_on_f: bool):
178 | val = False
179 | done = False
180 |
181 | def f():
182 | while val is False:
183 | yield
184 | return 3
185 |
186 | ff = f()
187 |
188 | def g():
189 | nonlocal val
190 | val = True
191 | yield
192 |
193 | def h():
194 | nonlocal done
195 | out = yield {ff}
196 | assert out is None
197 | yield g()
198 | if wait_on_f:
199 | out = yield ff
200 | assert out == 3
201 | done = True
202 |
203 | def i():
204 | yield [ff, h()]
205 |
206 | loop = tinyio.Loop()
207 | loop.run(i())
208 | assert done
209 |
210 |
211 | def test_empty_background():
212 | def _background():
213 | yield set()
214 | return 3
215 |
216 | loop = tinyio.Loop()
217 | assert loop.run(_background()) == 3
218 |
219 |
220 | def test_background_multiple_yields():
221 | done = False
222 |
223 | def f():
224 | yield
225 | return 3
226 |
227 | def g():
228 | nonlocal done
229 | ff = f()
230 | yield {ff}
231 | yield {ff}
232 | x = yield ff
233 | y = yield ff
234 | assert x == 3
235 | assert y == 3
236 | done = True
237 |
238 | loop = tinyio.Loop()
239 | loop.run(g())
240 | assert done
241 |
242 |
243 | def test_no_yield_direct():
244 | def f():
245 | return 3
246 | yield
247 |
248 | loop = tinyio.Loop()
249 | assert loop.run(f()) == 3
250 |
251 |
252 | def test_no_yield_indirect():
253 | def f():
254 | return 3
255 | yield
256 |
257 | def g():
258 | out = yield f()
259 | return out
260 |
261 | loop = tinyio.Loop()
262 | assert loop.run(g()) == 3
263 |
264 |
265 | def test_gc_simple():
266 | def _block_add_one(x):
267 | return x + 1
268 |
269 | def _foo(x):
270 | return (yield tinyio.run_in_thread(_block_add_one, x))
271 |
272 | def _gc(x: int) -> tinyio.Coro[tuple[int, int]]:
273 | iterator = yield tinyio.as_completed({_foo(x), _add_one(x)})
274 | y = yield iterator.get()
275 | z = yield iterator.get()
276 | return y, z
277 |
278 | loop = tinyio.Loop()
279 | coro = _gc(4)
280 | assert loop.run(coro) == (5, 5)
281 | gc.collect()
282 | assert set(loop._results.keys()) == {coro}
283 |
284 |
285 | @pytest.mark.parametrize("yield_from", (False, True))
286 | @pytest.mark.parametrize("timeout", (None, 10))
287 | def test_gc_after_event(yield_from, timeout):
288 | """The interesting case here is `yield_from=True`, `timeout=10`.
289 | (The others are just for completeness.)
290 |
291 | In this case we have that:
292 | - `f1` has `timeout=10` but triggers immediately.
293 | - `f2` has `timeout=2` but triggers at the end of our main coroutine.
294 | And so we have that `f2` is before of `f1` in the internal heap of timeouts, but that `f1` will trigger first.
295 | Thus when `f1` triggers it will remain in that heap even after it has triggered (until `f2` has triggered as well
296 | and they can both be popped).
297 | In this scenario, we don't want the generator object to remain in memory just because it's still sitting in that
298 | heap!
299 | This test checks that the generator can be cleaned up even whilst we wait for the `_Wait` object to get collected
300 | later.
301 | """
302 |
303 | def wait(event, wait_time):
304 | if yield_from:
305 | yield from event.wait(wait_time)
306 | else:
307 | yield event.wait(wait_time)
308 |
309 | def set_event(event):
310 | for _ in range(20):
311 | yield
312 | event.set()
313 |
314 | def baz():
315 | event1 = tinyio.Event()
316 | event2 = tinyio.Event()
317 | f1 = wait(event1, timeout)
318 | f2 = wait(event2, 2)
319 | ref = weakref.ref(f1)
320 | yield {f2}
321 | yield [f1, set_event(event1)]
322 | del f1
323 | gc.collect()
324 | assert ref() is None
325 | event2.set()
326 | return 3
327 |
328 | loop = tinyio.Loop()
329 | assert loop.run(baz()) == 3
330 |
331 |
332 | def test_event_fairness():
333 | """This checks that once one event unblocks, that we don't just keep chasing all the stuff downstream of that event,
334 | i.e. that we do schedule work from any other event that has finished.
335 | """
336 | outs = []
337 |
338 | def f():
339 | yield tinyio.Event().wait(0)
340 | outs.append(1)
341 | for _ in range(20):
342 | yield
343 | outs.append(2)
344 |
345 | def g():
346 | yield [f(), f()]
347 |
348 | loop = tinyio.Loop()
349 | loop.run(g())
350 | assert outs == [1, 1, 2, 2]
351 |
352 |
353 | def test_event_fairness2():
354 | event1 = tinyio.Event()
355 | outs = []
356 |
357 | def f():
358 | yield event1.wait(0)
359 | outs.append(1)
360 |
361 | def g():
362 | yield {f()}
363 | for _ in range(20):
364 | yield
365 | outs.append(2)
366 |
367 | loop = tinyio.Loop()
368 | loop.run(g())
369 | assert outs == [1, 2]
370 |
371 |
372 | def test_simultaneous_set():
373 | event = tinyio.Event()
374 |
375 | def f():
376 | for _ in range(20):
377 | yield
378 | yield [tinyio.run_in_thread(event.set) for _ in range(100)]
379 |
380 | def g():
381 | yield event.wait()
382 |
383 | def h():
384 | yield [g(), f()]
385 |
386 | loop = tinyio.Loop()
387 | loop.run(h())
388 |
389 |
390 | def test_timeout_then_set():
391 | event1 = tinyio.Event()
392 | event2 = tinyio.Event()
393 |
394 | def f():
395 | yield [event1.wait(0), event2.wait()]
396 |
397 | def g():
398 | yield {f()}
399 | for _ in range(20):
400 | yield
401 | event1.set()
402 | for _ in range(20):
403 | yield
404 | event2.set()
405 | return 3
406 |
407 | loop = tinyio.Loop()
408 | assert loop.run(g()) == 3
409 |
410 |
411 | def test_set_then_timeout():
412 | event1 = tinyio.Event()
413 | event2 = tinyio.Event()
414 |
415 | def f():
416 | event1.set()
417 | yield [event1.wait(0), event2.wait()]
418 |
419 | def g():
420 | yield {f()}
421 | for _ in range(20):
422 | yield
423 | event2.set()
424 | return 3
425 |
426 | loop = tinyio.Loop()
427 | assert loop.run(g()) == 3
428 |
429 |
430 | def test_set_then_timeout_then_clear():
431 | event1 = tinyio.Event()
432 | event2 = tinyio.Event()
433 |
434 | def f():
435 | event1.set()
436 | yield [event1.wait(0), event2.wait()]
437 |
438 | def g():
439 | yield {f()}
440 | for _ in range(20):
441 | yield
442 | event1.clear()
443 | event2.set()
444 | return 3
445 |
446 | loop = tinyio.Loop()
447 | assert loop.run(g()) == 3
448 |
449 |
450 | def test_set_then_timeout_then_clear_then_set():
451 | event1 = tinyio.Event()
452 | event2 = tinyio.Event()
453 |
454 | def f():
455 | event1.set()
456 | yield [event1.wait(0), event2.wait()]
457 |
458 | def g():
459 | yield {f()}
460 | for _ in range(20):
461 | yield
462 | event1.clear()
463 | event2.set()
464 | event1.set()
465 | return 3
466 |
467 | loop = tinyio.Loop()
468 | assert loop.run(g()) == 3
469 |
470 |
471 | def test_timeout_as_part_of_group_and_only_coroutine():
472 | event1 = tinyio.Event()
473 | event2 = tinyio.Event()
474 | wait: Any = event1.wait(0)
475 | wait2 = event2.wait()
476 |
477 | def f():
478 | yield [wait, wait2]
479 | return 3
480 |
481 | def set2():
482 | time.sleep(0.1)
483 | event2.set()
484 |
485 | t = threading.Thread(target=set2)
486 | t.start()
487 | loop = tinyio.Loop()
488 | assert loop.run(f()) == 3
489 |
490 |
491 | def test_yield_finished_coroutine():
492 | def f():
493 | yield
494 |
495 | ff = f()
496 | next(ff)
497 | with contextlib.suppress(StopIteration):
498 | next(ff)
499 |
500 | def g():
501 | yield ff
502 |
503 | loop = tinyio.Loop()
504 | with pytest.raises(RuntimeError, match="has already finished"):
505 | loop.run(g())
506 |
--------------------------------------------------------------------------------
/tests/test_errors.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import time
3 |
4 | import pytest
5 | import tinyio
6 |
7 |
8 | def _flat_tb(e: BaseException) -> list[str]:
9 | tb = e.__traceback__
10 | out = []
11 | while tb is not None:
12 | out.append(tb.tb_frame.f_code.co_name)
13 | tb = tb.tb_next
14 | return out
15 |
16 |
17 | @pytest.mark.parametrize("exception_group", (None, False, True))
18 | def test_propagation(exception_group):
19 | def f():
20 | yield g()
21 |
22 | def g():
23 | yield from g2()
24 |
25 | def g2():
26 | yield h()
27 |
28 | def h():
29 | yield i()
30 |
31 | def i():
32 | raise RuntimeError("oh no")
33 | yield
34 |
35 | loop = tinyio.Loop()
36 | with pytest.raises(Exception) as catcher:
37 | loop.run(f(), exception_group)
38 | if exception_group is True:
39 | assert type(catcher.value) is ExceptionGroup
40 | [runtime] = catcher.value.exceptions # pyright: ignore[reportAttributeAccessIssue]
41 | assert type(runtime) is RuntimeError
42 | assert _flat_tb(runtime) == ["f", "g", "g2", "h", "i"]
43 | else:
44 | runtime = catcher.value
45 | assert type(runtime) is RuntimeError
46 | assert _flat_tb(runtime) == ["test_propagation", "f", "g", "g2", "h", "i"]
47 |
48 |
49 | @pytest.mark.parametrize("exception_group", (None, False, True))
50 | def test_cancelling_coroutines_not_affecting_current_error(exception_group):
51 | cancelled = False
52 |
53 | def f():
54 | yield [g(), h()]
55 |
56 | def g():
57 | yield i()
58 |
59 | def h():
60 | try:
61 | while True:
62 | yield tinyio.sleep(1)
63 | except BaseException as e:
64 | assert type(e) is tinyio.CancelledError
65 | nonlocal cancelled
66 | cancelled = True
67 | raise
68 |
69 | def i():
70 | raise RuntimeError("kapow")
71 | yield
72 |
73 | loop = tinyio.Loop()
74 | with pytest.raises(BaseException) as catcher:
75 | loop.run(f(), exception_group)
76 | assert cancelled
77 | if exception_group is True:
78 | assert type(catcher.value) is BaseExceptionGroup
79 | [a, b, c] = catcher.value.exceptions
80 | assert type(a) is RuntimeError
81 | assert str(a) == "kapow"
82 | assert _flat_tb(a) == ["f", "g", "i"]
83 | assert _flat_tb(b) == ["h"]
84 | assert _flat_tb(c) == ["sleep"]
85 | else:
86 | assert type(catcher.value) is RuntimeError
87 | assert str(catcher.value) == "kapow"
88 | assert _flat_tb(catcher.value) == ["test_cancelling_coroutines_not_affecting_current_error", "f", "g", "i"]
89 |
90 |
91 | @pytest.mark.parametrize("exception_group", (None, False, True))
92 | def test_invalid_yield(exception_group):
93 | def f():
94 | yield g()
95 |
96 | def g():
97 | yield h(), h()
98 |
99 | def h():
100 | yield
101 |
102 | loop = tinyio.Loop()
103 | with pytest.raises(Exception) as catcher:
104 | loop.run(f(), exception_group)
105 | if exception_group is True:
106 | assert type(catcher.value) is ExceptionGroup
107 | [runtime] = catcher.value.exceptions
108 | assert _flat_tb(runtime) == ["f", "g"]
109 | else:
110 | runtime = catcher.value
111 | assert _flat_tb(runtime) == ["test_invalid_yield", "f", "g"]
112 | assert type(runtime) is RuntimeError
113 | assert "Invalid yield" in str(runtime)
114 |
115 |
116 | def _foo():
117 | yield _bar()
118 |
119 |
120 | def _bar():
121 | yield _baz()
122 |
123 |
124 | def _baz():
125 | raise RuntimeError("Kaboom")
126 |
127 |
128 | @pytest.mark.parametrize("exception_group", (None, False, True))
129 | @pytest.mark.parametrize("with_notes", (False, True))
130 | def test_serialize(exception_group: bool, with_notes: bool):
131 | loop = tinyio.Loop()
132 | with pytest.raises(BaseException) as catcher:
133 | loop.run(_foo(), exception_group)
134 | if with_notes:
135 | catcher.value.add_note("hi")
136 | catcher.value.add_note("bye")
137 | data = pickle.dumps(catcher.value)
138 | out = pickle.loads(data)
139 | if with_notes:
140 | assert out.__notes__ == ["hi", "bye"]
141 | if exception_group is True:
142 | assert type(out) is ExceptionGroup
143 | [runtime] = out.exceptions
144 | else:
145 | runtime = out
146 | assert type(runtime) is RuntimeError
147 | assert str(runtime) == "Kaboom"
148 | # Pickle strips tracebacks
149 | assert _flat_tb(runtime) == []
150 |
151 |
152 | @pytest.mark.parametrize("exception_group", (None, False, True))
153 | def test_error_to_thread(exception_group):
154 | def blocking_operation():
155 | nonlocal marker
156 | assert marker is None
157 |
158 | # Do some blocking work.
159 | try:
160 | marker = False
161 | timeout = time.time() + 10
162 | while time.time() < timeout:
163 | time.sleep(0.1)
164 | except BaseException as e:
165 | assert type(e) is tinyio.CancelledError
166 | marker = True
167 | raise
168 |
169 | def cancel():
170 | yield from cancel2()
171 |
172 | def cancel2():
173 | while True:
174 | yield
175 | if marker is False:
176 | raise RuntimeError("Raising!")
177 |
178 | def one():
179 | yield [tinyio.run_in_thread(blocking_operation), cancel()]
180 |
181 | def two():
182 | yield [cancel(), tinyio.run_in_thread(blocking_operation)]
183 |
184 | loop = tinyio.Loop()
185 |
186 | for workflow in [one, two]:
187 | marker = None
188 | with pytest.raises(BaseException) as catcher:
189 | loop.run(workflow(), exception_group)
190 | assert marker is True
191 | if exception_group is True:
192 | assert type(catcher.value) is BaseExceptionGroup
193 | [raising, cancelled] = catcher.value.exceptions
194 | assert type(raising) is RuntimeError and str(raising) == "Raising!"
195 | assert _flat_tb(raising) == [workflow.__name__, "cancel", "cancel2"]
196 | assert type(cancelled) is tinyio.CancelledError
197 | assert _flat_tb(cancelled) == ["target", "blocking_operation"]
198 | else:
199 | raising = catcher.value
200 | assert type(raising) is RuntimeError and str(raising) == "Raising!"
201 | assert _flat_tb(raising) == ["test_error_to_thread", workflow.__name__, "cancel", "cancel2"]
202 |
203 |
204 | @pytest.mark.parametrize("exception_group", (None, False, True))
205 | def test_error_to_thread_with_context(exception_group):
206 | def blocking_operation():
207 | try:
208 | sub_blocking_operation()
209 | except tinyio.CancelledError as e:
210 | raise ValueError("Responding improperly to cancellation") from e
211 |
212 | def sub_blocking_operation():
213 | while True:
214 | time.sleep(0.1)
215 |
216 | def foo():
217 | yield bar()
218 |
219 | def bar():
220 | yield [baz(), tinyio.run_in_thread(blocking_operation)]
221 |
222 | def baz():
223 | yield
224 | raise RuntimeError("Kaboom")
225 |
226 | loop = tinyio.Loop()
227 | with (
228 | pytest.raises(BaseException) as catcher,
229 | pytest.warns(RuntimeWarning, match="did not respond properly to cancellation"),
230 | ):
231 | loop.run(foo(), exception_group)
232 | if exception_group is False:
233 | runtime = catcher.value
234 | assert type(runtime) is RuntimeError
235 | assert str(runtime) == "Kaboom"
236 | assert _flat_tb(runtime) == ["test_error_to_thread_with_context", "foo", "bar", "baz"]
237 | else:
238 | assert type(catcher.value) is ExceptionGroup
239 | runtime, value = catcher.value.exceptions
240 | assert type(runtime) is RuntimeError
241 | assert str(runtime) == "Kaboom"
242 | assert _flat_tb(runtime) == ["foo", "bar", "baz"]
243 | assert type(value) is ValueError
244 | assert str(value) == "Responding improperly to cancellation"
245 | assert _flat_tb(value) == ["target", "blocking_operation"]
246 | cancelled_context = value.__context__
247 | assert cancelled_context is value.__cause__
248 | assert type(cancelled_context) is tinyio.CancelledError
249 | assert _flat_tb(cancelled_context) == ["blocking_operation", "sub_blocking_operation"]
250 |
251 |
252 | @pytest.mark.parametrize("exception_group", (None, False, True))
253 | def test_error_from_thread(exception_group):
254 | def blocking_operation():
255 | sub_blocking_operation()
256 |
257 | def sub_blocking_operation():
258 | raise RuntimeError("Kaboom")
259 |
260 | def foo():
261 | yield bar()
262 |
263 | def bar():
264 | yield [baz(), tinyio.run_in_thread(blocking_operation)]
265 |
266 | def baz():
267 | while True:
268 | yield
269 |
270 | loop = tinyio.Loop()
271 | with pytest.raises(BaseException) as catcher:
272 | loop.run(foo(), exception_group)
273 | if exception_group is True:
274 | assert type(catcher.value) is BaseExceptionGroup
275 | runtime, cancelled = catcher.value.exceptions
276 | assert type(runtime) is RuntimeError
277 | assert str(runtime) == "Kaboom"
278 | assert _flat_tb(runtime) == ["foo", "bar", "target", "blocking_operation", "sub_blocking_operation"]
279 | assert type(cancelled) is tinyio.CancelledError
280 | assert _flat_tb(cancelled) == ["baz"]
281 | else:
282 | runtime = catcher.value
283 | assert type(runtime) is RuntimeError
284 | assert str(runtime) == "Kaboom"
285 | assert _flat_tb(runtime) == [
286 | "test_error_from_thread",
287 | "foo",
288 | "bar",
289 | "target",
290 | "blocking_operation",
291 | "sub_blocking_operation",
292 | ]
293 |
294 |
295 | @pytest.mark.parametrize("exception_group", (None, False, True))
296 | def test_error_from_thread_with_context(exception_group):
297 | def blocking_operation():
298 | try:
299 | sub_blocking_operation()
300 | except RuntimeError as e:
301 | raise ValueError("oh no") from e
302 |
303 | def sub_blocking_operation():
304 | raise RuntimeError("Kaboom")
305 |
306 | def foo():
307 | yield bar()
308 |
309 | def bar():
310 | yield [baz(), tinyio.run_in_thread(blocking_operation)]
311 |
312 | def baz():
313 | while True:
314 | yield
315 |
316 | loop = tinyio.Loop()
317 | with pytest.raises(BaseException) as catcher:
318 | loop.run(foo(), exception_group)
319 | if exception_group is True:
320 | assert type(catcher.value) is BaseExceptionGroup
321 | value, cancelled = catcher.value.exceptions
322 | assert type(value) is ValueError
323 | assert str(value) == "oh no"
324 | assert _flat_tb(value) == ["foo", "bar", "target", "blocking_operation"]
325 | assert type(cancelled) is tinyio.CancelledError
326 | assert _flat_tb(cancelled) == ["baz"]
327 | else:
328 | value = catcher.value
329 | assert type(value) is ValueError
330 | assert str(value) == "oh no"
331 | assert _flat_tb(value) == ["test_error_from_thread_with_context", "foo", "bar", "target", "blocking_operation"]
332 |
333 | runtime = value.__context__
334 | assert type(runtime) is RuntimeError
335 | assert str(runtime) == "Kaboom"
336 | assert _flat_tb(runtime) == ["blocking_operation", "sub_blocking_operation"]
337 |
338 |
339 | @pytest.mark.parametrize("exception_group", (None, False, True))
340 | def test_keyboard_interrupt_within_loop(exception_group, monkeypatch):
341 | """Tests an error occurring from within the loop itself, not within one of the coroutines."""
342 |
343 | def _invalid(out):
344 | del out
345 | raise KeyboardInterrupt
346 |
347 | monkeypatch.setattr(tinyio._core, "_invalid", _invalid)
348 |
349 | def _f():
350 | yield [_g(), _h()]
351 |
352 | def _g():
353 | yield 5
354 |
355 | def _h():
356 | yield
357 |
358 | loop = tinyio.Loop()
359 | with pytest.raises(BaseException) as catcher:
360 | loop.run(_f(), exception_group)
361 | if exception_group is True:
362 | assert type(catcher.value) is BaseExceptionGroup
363 | [keyboard, h_error] = catcher.value.exceptions
364 | assert type(keyboard) is KeyboardInterrupt
365 | assert _flat_tb(keyboard) == ["_f", "_g", "_invalid"]
366 | assert _flat_tb(h_error) == ["_h"]
367 | else:
368 | keyboard = catcher.value
369 | assert type(keyboard) is KeyboardInterrupt
370 | assert _flat_tb(keyboard) == ["test_keyboard_interrupt_within_loop", "_f", "_g", "_invalid"]
371 |
--------------------------------------------------------------------------------
/tinyio/_core.py:
--------------------------------------------------------------------------------
1 | import collections as co
2 | import dataclasses
3 | import enum
4 | import graphlib
5 | import heapq
6 | import threading
7 | import time
8 | import traceback
9 | import types
10 | import warnings
11 | import weakref
12 | from collections.abc import Generator
13 | from typing import Any, TypeAlias, TypeVar
14 |
15 |
16 | #
17 | # Public API: loop implementation
18 | #
19 | # The main logic is that each time coroutine yields, we create a `_WaitingFor` object which holds a counter for how many
20 | # things it is waiting on before it can wake up. Once this counter hits zero, the `_WaitingFor` object schedules the
21 | # coroutine back on the loop.
22 | # Counters can be decremented in three ways: another coroutine finishes, an `Event.set()` is triggered, or a timeout in
23 | # `Event.wait(timeout=...)` is triggered.
24 | #
25 |
26 |
27 | _Return = TypeVar("_Return")
28 | Coro: TypeAlias = Generator[Any, Any, _Return]
29 |
30 |
31 | class Loop:
32 | """Event loop for running `tinyio`-style coroutines."""
33 |
34 | def __init__(self):
35 | # Keep around the results with weakrefs.
36 | # This makes it possible to perform multiple `.run`s, with coroutines that may internally await on the same
37 | # coroutines as each other.
38 | # It's a weakref as if no-one else has access to them then they cannot appear in our event loop, so we don't
39 | # need to keep their results around for the above use-case.
40 | self._results = weakref.WeakKeyDictionary()
41 |
42 | def run(self, coro: Coro[_Return], exception_group: None | bool = None) -> _Return:
43 | """Run the specified coroutine in the event loop.
44 |
45 | **Arguments:**
46 |
47 | - `coro`: a Python coroutine to run; it may yield `None`, other coroutines, or lists-of-coroutines.
48 | - `exception_group`: in the event of an error in one of the coroutines (which will cancel all other coroutines
49 | and shut down the loop), then this determines the kind of exception raised out of the loop:
50 | - if `False` then raise just that error, silently ignoring any errors that occur when cancelling the other
51 | coroutines.
52 | - if `True` then always raise a `{Base}ExceptionGroup`, whose first sub-exception will be the original
53 | error, and whose later sub-exceptions will be any errors that occur whilst cancelling the other
54 | coroutines. (Including all the `tinyio.CancelledError`s that indicate successful cancellation.)
55 | - if `None` (the default) then raise just the original error if all other coroutines shut down successfully,
56 | and raise a `{Base}ExceptionGroup` if any other coroutine raises an exception during shutdown.
57 | (Excluding all the `tinyio.CancelledError`s that indicate successful cancellation.)
58 |
59 | **Returns:**
60 |
61 | The final `return` from `coro`.
62 | """
63 | if not isinstance(coro, Generator):
64 | raise ValueError("Invalid input `coro`, which is not a coroutine (a function using `yield` statements).")
65 | queue: co.deque[_Todo] = co.deque()
66 | queue.appendleft(_Todo(coro, None))
67 | waiting_on = dict[Coro, list[_WaitingFor]]()
68 | waiting_on[coro] = []
69 | # Loop invariant: `{x.coro for x in queue}.issubset(set(waiting_on.keys()))`
70 | wait_heap: list[_Wait] = []
71 | wake_loop = threading.Event()
72 | wake_loop.set()
73 | current_coro_ref = [coro]
74 | # Loop invariant: `len(current_coro_ref) == 1`. It's not really load-bearing, it's just used for making a nice
75 | # traceback when we get an error.
76 | try:
77 | while True:
78 | if len(queue) == 0:
79 | if len(waiting_on) == 0:
80 | # We're done.
81 | break
82 | else:
83 | # We might have a cycle bug...
84 | self._check_cycle(waiting_on, coro)
85 | # ...but hopefully we're just waiting on a thread or exogeneous event to unblock one of our
86 | # coroutines.
87 | while len(queue) == 0:
88 | self._wait(wait_heap, wake_loop)
89 | self._clear(wait_heap, wake_loop)
90 | # These lines needs to be wrapped in a `len(queue)` check, as just because we've unblocked
91 | # doesn't necessarily mean that we're ready to schedule a coroutine: we could have something
92 | # like `yield [event1.wait(...), event2.wait(...)]`, and only one of the two has unblocked.
93 | else:
94 | self._clear(wait_heap, wake_loop)
95 | todo = queue.pop()
96 | current_coro_ref[0] = todo.coro
97 | self._step(todo, queue, waiting_on, wait_heap, wake_loop)
98 | current_coro_ref[0] = coro
99 | except BaseException as e:
100 | _cleanup(e, waiting_on, current_coro_ref, exception_group)
101 | raise # if not raising an `exception_group`
102 | return self._results[coro]
103 |
104 | @staticmethod
105 | def _check_cycle(waiting_on, coro):
106 | sorter = graphlib.TopologicalSorter()
107 | for k, v in waiting_on.items():
108 | for vi in v:
109 | sorter.add(k, vi.coro)
110 | try:
111 | sorter.prepare()
112 | except graphlib.CycleError:
113 | coro.throw(RuntimeError("Cycle detected in `tinyio` loop. Cancelling all coroutines."))
114 |
115 | @staticmethod
116 | def _wait(wait_heap: list["_Wait"], wake_loop: threading.Event):
117 | timeout = None
118 | while len(wait_heap) > 0:
119 | soonest = wait_heap[0]
120 | assert soonest.timeout_in_seconds is not None
121 | if soonest.state == _WaitState.DONE:
122 | heapq.heappop(wait_heap)
123 | else:
124 | timeout = soonest.timeout_in_seconds - time.monotonic()
125 | break
126 | wake_loop.wait(timeout=timeout)
127 |
128 | @staticmethod
129 | def _clear(wait_heap: list["_Wait"], wake_loop: threading.Event):
130 | wake_loop.clear()
131 | while len(wait_heap) > 0:
132 | soonest = wait_heap[0]
133 | assert soonest.timeout_in_seconds is not None
134 | if soonest.state == _WaitState.DONE:
135 | heapq.heappop(wait_heap)
136 | elif soonest.timeout_in_seconds <= time.monotonic():
137 | heapq.heappop(wait_heap)
138 | soonest.notify_from_timeout()
139 | else:
140 | break
141 |
142 | def _step(
143 | self,
144 | todo: "_Todo",
145 | queue: co.deque["_Todo"],
146 | waiting_on: dict[Coro, list["_WaitingFor"]],
147 | wait_heap: list["_Wait"],
148 | wake_loop: threading.Event,
149 | ) -> None:
150 | try:
151 | out = todo.coro.send(todo.value)
152 | except StopIteration as e:
153 | self._results[todo.coro] = e.value
154 | for waiting_for in waiting_on.pop(todo.coro):
155 | waiting_for.decrement()
156 | else:
157 | original_out = out
158 | if type(out) is list and len(out) == 0:
159 | out = None
160 | if isinstance(out, (_Wait, Generator)):
161 | out = [out]
162 | match out:
163 | case None:
164 | # original_out will either be `None` or `[]`.
165 | queue.appendleft(_Todo(todo.coro, original_out))
166 | case set():
167 | for out_i in out:
168 | if isinstance(out_i, Generator):
169 | if out_i not in self._results.keys() and out_i not in waiting_on.keys():
170 | if out_i.gi_frame is None: # pyright: ignore[reportAttributeAccessIssue]
171 | todo.coro.throw(_already_finished(out_i))
172 | queue.appendleft(_Todo(out_i, None))
173 | waiting_on[out_i] = []
174 | else:
175 | assert not isinstance(out_i, _Wait)
176 | todo.coro.throw(_invalid(original_out))
177 | queue.appendleft(_Todo(todo.coro, None))
178 | case list():
179 | waiting_for = _WaitingFor(len(out), todo.coro, original_out, wake_loop, self._results, queue)
180 | for out_i in out:
181 | if isinstance(out_i, Generator):
182 | if out_i in self._results.keys():
183 | waiting_for.decrement()
184 | elif out_i in waiting_on.keys():
185 | waiting_on[out_i].append(waiting_for)
186 | else:
187 | if out_i.gi_frame is None: # pyright: ignore[reportAttributeAccessIssue]
188 | todo.coro.throw(_already_finished(out_i))
189 | queue.appendleft(_Todo(out_i, None))
190 | waiting_on[out_i] = [waiting_for]
191 | elif isinstance(out_i, _Wait):
192 | out_i.register(waiting_for)
193 | if out_i.timeout_in_seconds is not None:
194 | heapq.heappush(wait_heap, out_i)
195 | else:
196 | todo.coro.throw(_invalid(original_out))
197 | case _:
198 | todo.coro.throw(_invalid(original_out))
199 |
200 |
201 | class CancelledError(BaseException):
202 | """Raised when a `tinyio` coroutine is cancelled due an error in another coroutine."""
203 |
204 |
205 | CancelledError.__module__ = "tinyio"
206 |
207 |
208 | #
209 | # Loop internals, in particular events and waiting
210 | #
211 |
212 |
213 | @dataclasses.dataclass(frozen=True)
214 | class _Todo:
215 | coro: Coro
216 | value: Any
217 |
218 |
219 | # We need at least some use of locks, as `Event`s are public objects that may interact with user threads. If the
220 | # internals of our event/wait/waitingfor mechanisms are modified concurrently then it would be very easy for things to
221 | # go wrong.
222 | # In particular note that our event loop is one actor that is making modifications, in addition to user threads.
223 | # For this reason it doesn't suffice to just have a lock around `Event.{set, clear}`.
224 | # For simplicity, we simply guard all entries into the event/wait/waitingfor mechanism with a single lock. We could try
225 | # to use some other locking strategy but that seems error-prone.
226 | _global_event_lock = threading.RLock()
227 |
228 |
229 | @dataclasses.dataclass(frozen=False)
230 | class _WaitingFor:
231 | counter: int
232 | coro: Coro
233 | out: "_Wait | Coro | list[_Wait | Coro]"
234 | wake_loop: threading.Event
235 | results: weakref.WeakKeyDictionary[Coro, Any]
236 | queue: co.deque[_Todo]
237 |
238 | def __post_init__(self):
239 | assert self.counter > 0
240 |
241 | def increment(self):
242 | with _global_event_lock:
243 | # This assert is valid as our only caller is `_Wait.unnotify_from_event`, which will only have a reference
244 | # to us if we haven't completed yet -- otherwise we'd have already called its `_Wait.cleanup` method.
245 | assert self.counter != 0
246 | self.counter += 1
247 |
248 | def decrement(self):
249 | # We need a lock here as this may be called simultaneously between our event loop and via `Event.set`.
250 | # (Though `Event.set` has its only internal lock, that doesn't cover the event loop as well.)
251 | with _global_event_lock:
252 | assert self.counter > 0
253 | self.counter -= 1
254 | if self.counter == 0:
255 | match self.out:
256 | case None:
257 | result = None
258 | waits = []
259 | case _Wait():
260 | result = None
261 | waits = [self.out]
262 | case Generator():
263 | result = self.results[self.out]
264 | waits = []
265 | case list():
266 | result = [None if isinstance(out_i, _Wait) else self.results[out_i] for out_i in self.out]
267 | waits = [out_i for out_i in self.out if isinstance(out_i, _Wait)]
268 | case _:
269 | assert False
270 | for wait in waits:
271 | wait.cleanup()
272 | self.queue.appendleft(_Todo(self.coro, result))
273 | # If we're callling this function from a thread, and the main event loop is blocked, then use this to
274 | # notify the main event loop that it can wake up.
275 | self.wake_loop.set()
276 |
277 |
278 | class _WaitState(enum.Enum):
279 | INITIALISED = "initialised"
280 | REGISTERED = "registered"
281 | NOTIFIED_EVENT = "notified_event"
282 | NOTIFIED_TIMEOUT = "notified_timeout"
283 | DONE = "done"
284 |
285 |
286 | class _Wait:
287 | def __init__(self, event: "Event", timeout_in_seconds: None | int | float):
288 | self._event = event
289 | self._timeout_in_seconds = timeout_in_seconds
290 | self._waiting_for = None
291 | self.state = _WaitState.INITIALISED
292 |
293 | # This is basically just a second `__init__` method. We're not really initialised until this has been called
294 | # precisely once as well. The reason we have two is that an end-user creates us during `Event.wait()`, and then we
295 | # need to register on the event loop.
296 | def register(self, waiting_for: "_WaitingFor") -> None:
297 | with _global_event_lock:
298 | assert self.state is _WaitState.INITIALISED
299 | assert self._waiting_for is None
300 | assert self._event is not None
301 | self.state = _WaitState.REGISTERED
302 | if self._timeout_in_seconds is None:
303 | self.timeout_in_seconds = None
304 | else:
305 | self.timeout_in_seconds = time.monotonic() + self._timeout_in_seconds
306 | self._waiting_for = waiting_for
307 | self._event._waits[self] = None
308 | if self._event.is_set():
309 | self.notify_from_event()
310 |
311 | def notify_from_event(self):
312 | with _global_event_lock:
313 | # We cannot have `NOTIFIED_EVENT` as our event will have toggled its internal state to `True` as part of
314 | # calling us, and so future `Event.set()` calls will not call `.notify_from_event`.
315 | # We cannot have `DONE` as this is only set during `.cleanup()`, and at that point we deregister from
316 | # `self._event._waits`.
317 | assert self.state in {_WaitState.REGISTERED, _WaitState.NOTIFIED_TIMEOUT}
318 | assert self._waiting_for is not None
319 | if self.state == _WaitState.REGISTERED:
320 | self.state = _WaitState.NOTIFIED_EVENT
321 | self._waiting_for.decrement()
322 |
323 | def notify_from_timeout(self):
324 | with _global_event_lock:
325 | assert self.state in {_WaitState.REGISTERED, _WaitState.NOTIFIED_EVENT}
326 | assert self._waiting_for is not None
327 | is_registered = self.state == _WaitState.REGISTERED
328 | self.state = _WaitState.NOTIFIED_TIMEOUT # Override `NOTIFIED_EVENT` in case we `unnotify_from_event` later
329 | if is_registered:
330 | self._waiting_for.decrement()
331 |
332 | def unnotify_from_event(self):
333 | with _global_event_lock:
334 | assert self.state in {_WaitState.NOTIFIED_EVENT, _WaitState.NOTIFIED_TIMEOUT}
335 | assert self._waiting_for is not None
336 | # But ignore un-notifies if we've already triggered our timeout.
337 | if self.state is _WaitState.NOTIFIED_EVENT:
338 | self.state = _WaitState.REGISTERED
339 | self._waiting_for.increment()
340 |
341 | def cleanup(self):
342 | with _global_event_lock:
343 | assert self.state in {_WaitState.NOTIFIED_EVENT, _WaitState.NOTIFIED_TIMEOUT}
344 | assert self._waiting_for is not None
345 | assert self._event is not None
346 | self.state = _WaitState.DONE
347 | self._waiting_for = None # For GC purposes.
348 | del self._event._waits[self]
349 | self._event = None # For GC purposes.
350 |
351 | # For `heapq` to work.
352 | def __lt__(self, other):
353 | return self.timeout_in_seconds < other.timeout_in_seconds
354 |
355 |
356 | class Event:
357 | """A marker that something has happened."""
358 |
359 | def __init__(self):
360 | self._value = False
361 | self._waits = dict[_Wait, None]()
362 |
363 | def is_set(self):
364 | return self._value
365 |
366 | def set(self):
367 | with _global_event_lock:
368 | if not self._value:
369 | for wait in self._waits.copy().keys():
370 | wait.notify_from_event()
371 | self._value = True
372 |
373 | def clear(self):
374 | with _global_event_lock:
375 | if self._value:
376 | for wait in self._waits.keys():
377 | wait.unnotify_from_event()
378 | self._value = False
379 |
380 | def wait(self, timeout_in_seconds: None | int | float = None) -> Coro[None]:
381 | yield _Wait(self, timeout_in_seconds)
382 |
383 | def __bool__(self):
384 | raise TypeError("Cannot convert `tinyio.Event` to boolean. Did you mean `event.is_set()`?")
385 |
386 |
387 | #
388 | # Error handling
389 | #
390 |
391 |
392 | def _strip_frames(e: BaseException, n: int):
393 | tb = e.__traceback__
394 | for _ in range(n):
395 | if tb is not None:
396 | tb = tb.tb_next
397 | return e.with_traceback(tb)
398 |
399 |
400 | def _cleanup(
401 | base_e: BaseException,
402 | waiting_on: dict[Coro, list[_WaitingFor]],
403 | current_coro_ref: list[Coro],
404 | exception_group: None | bool,
405 | ):
406 | # Oh no! Time to shut everything down. We can get here in two different ways:
407 | # - One of our coroutines raised an error internally (including being interrupted with a `KeyboardInterrupt`).
408 | # - An exogenous `KeyboardInterrupt` occurred whilst we were within the loop itself.
409 | [current_coro] = current_coro_ref
410 | # First, stop all the coroutines.
411 | cancellation_errors: dict[Coro, BaseException] = {}
412 | other_errors: dict[Coro, BaseException] = {}
413 | for coro in waiting_on.keys():
414 | # We do not have an `if coro is current_coro: continue` clause here. It may indeed be the case that
415 | # `current_coro` was the the origin of the current error (or the one on which we called `.throw` on in a
416 | # few cases), so it has already been shut down. However it may also be the case that there was an exogenous
417 | # `KeyboardInterrupt` whilst within the tinyio loop itself, in which case we do need to shut this one down
418 | # as well.
419 | try:
420 | out = coro.throw(CancelledError)
421 | except CancelledError as e:
422 | # Skipped frame is the `coro.throw` above.
423 | cancellation_errors[coro] = _strip_frames(e, 1)
424 | continue
425 | except StopIteration as e:
426 | what_did = f"returned `{e.value}`."
427 | except BaseException as e:
428 | # Skipped frame is the `coro.throw` above.
429 | other_errors[coro] = _strip_frames(e, 1)
430 | if getattr(e, "__tinyio_no_warn__", False):
431 | continue
432 | details = "".join(traceback.format_exception_only(e)).strip()
433 | what_did = f"raised the exception `{details}`."
434 | else:
435 | what_did = f"yielded `{out}`."
436 | warnings.warn(
437 | f"Coroutine `{coro}` did not respond properly to cancellation on receiving a "
438 | "`tinyio.CancelledError`, and so a resource leak may have occurred. The coroutine is expected to "
439 | "propagate the `tinyio.CancelledError` to indicate success in cleaning up resources. Instead, the "
440 | f"coroutine {what_did}\n",
441 | category=RuntimeWarning,
442 | stacklevel=3,
443 | )
444 | # 2 skipped frames:
445 | # `self._step`
446 | # either `coro.throw(...)` or `todo.coro.send(todo.value)`
447 | _strip_frames(base_e, 2) # pyright: ignore[reportPossiblyUnboundVariable]
448 | # Next: bit of a heuristic, but it is pretty common to only have one thing waiting on you, so stitch together
449 | # their tracebacks as far as we can. Thinking about specifically `current_coro`:
450 | #
451 | # - If `current_coro` was the source of the error then our `coro.throw(CancelledError)` above will return an
452 | # exception with zero frames in its traceback (well it starts with a single frame for
453 | # `coro.throw(CancelledError)`, but this immediately gets stripped above). So we begin by appending nothing here,
454 | # which is what we want.
455 | # - If this was an exogenous `KeyboardInterrupt` whilst we were within the loop itself, then we'll append the
456 | # stack from cancelling `current_coro`, which again is what we want.
457 | #
458 | # And then after that we just keep working our way up appending the cancellation tracebacks for each coroutine in
459 | # turn.
460 | coro = current_coro
461 | tb = base_e.__traceback__ # pyright: ignore[reportPossiblyUnboundVariable]
462 | while True:
463 | next_e = cancellation_errors.pop(coro, None)
464 | if next_e is None:
465 | break # This coroutine responded improperly; don't try to go any further.
466 | else:
467 | flat_tb = []
468 | tb_ = next_e.__traceback__
469 | while tb_ is not None:
470 | flat_tb.append(tb_)
471 | tb_ = tb_.tb_next
472 | for tb_ in reversed(flat_tb):
473 | tb = types.TracebackType(tb, tb_.tb_frame, tb_.tb_lasti, tb_.tb_lineno)
474 | if len(waiting_on[coro]) != 1:
475 | # Either no-one is waiting on us and we're at the root, or multiple are waiting and we can't uniquely append
476 | # tracebacks any more.
477 | break
478 | [waiting_for] = waiting_on[coro]
479 | coro = waiting_for.coro
480 | base_e.with_traceback(tb) # pyright: ignore[reportPossiblyUnboundVariable]
481 | if exception_group is None:
482 | exception_group = len(other_errors) > 0
483 | cancellation_errors.clear()
484 | if exception_group:
485 | # Most cancellation errors are single frame tracebacks corresponding to the underlying generator.
486 | # A handful of them may be more interesting than this, e.g. if there is a `yield from` or if it's
487 | # `run_in_thread` which begins with the traceback from within the thread.
488 | # Bump these more-interesting ones to the top.
489 | interesting_cancellation_errors = []
490 | other_cancellation_errors = []
491 | for e in cancellation_errors.values():
492 | more_than_one_frame = e.__traceback__ is not None and e.__traceback__.tb_next is not None
493 | has_context = e.__context__ is not None
494 | if more_than_one_frame or has_context:
495 | interesting_cancellation_errors.append(e)
496 | else:
497 | other_cancellation_errors.append(e)
498 | raise BaseExceptionGroup(
499 | "An error occured running a `tinyio` loop.\nThe first exception below is the original error. Since it is "
500 | "common for each coroutine to only have one other coroutine waiting on it, then we have stitched together "
501 | "their tracebacks for as long as that is possible.\n"
502 | "The other exceptions are all exceptions that occurred whilst stopping the other coroutines.\n"
503 | "(For a debugger that allows for navigating within exception groups, try "
504 | "`https://github.com/patrick-kidger/patdb`.)\n",
505 | [base_e, *other_errors.values(), *interesting_cancellation_errors, *other_cancellation_errors], # pyright: ignore[reportPossiblyUnboundVariable]
506 | )
507 | # else let the parent `raise` the original error.
508 |
509 |
510 | def _invalid(out):
511 | msg = f"Invalid yield {out}. Must be either `None`, a coroutine, or a list/set of coroutines."
512 | if type(out) is tuple:
513 | # We could support this but I find the `[]` visually distinctive.
514 | msg += (
515 | " In particular to wait on multiple coroutines (a 'gather'), then the syntax is `yield [foo, bar]`, "
516 | "not `yield foo, bar`."
517 | )
518 | return RuntimeError(msg)
519 |
520 |
521 | def _already_finished(out):
522 | return RuntimeError(
523 | f"The coroutine `{out}` has already finished. However it has not been seen by the `tinyio` loop before and as "
524 | "such does not have any result associated with it."
525 | )
526 |
--------------------------------------------------------------------------------