├── requirements.txt ├── .gitignore ├── .vscode ├── settings.json └── launch.json ├── .github └── workflows │ ├── release.yml │ └── python-tests.yml ├── asynccp ├── __init__.py ├── time.py ├── managed_spi.py ├── test │ ├── test_managed_resource.py │ ├── managed_spi_test.py │ └── test_loop.py ├── managed_resource.py └── loop.py └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | pytest 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .venv/ 3 | **__pycache__ 4 | 5 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "pythonTestExplorer.testFramework": "pytest" 3 | 4 | } -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | release: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set release 13 | id: semrel 14 | uses: go-semantic-release/action@v1 15 | with: 16 | github-token: ${{ secrets.GITHUB_TOKEN }} 17 | allow-initial-development-versions: true 18 | force-bump-patch-version: true 19 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python: Current File", 9 | "type": "python", 10 | "request": "launch", 11 | "program": "${file}", 12 | "console": "integratedTerminal", 13 | "cwd": "${workspaceFolder}", 14 | "env": { 15 | "PYTHONPATH": "${workspaceFolder}" 16 | } 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /asynccp/__init__.py: -------------------------------------------------------------------------------- 1 | from .loop import Loop 2 | 3 | # Enable logging by setting builtins.asynccp_logging = True before importing the first time. 4 | # 5 | # import builtins 6 | # builtins.asynccp_logging = True 7 | # import asynccp 8 | 9 | __global_event_loop = None 10 | 11 | try: 12 | global asynccp_logging 13 | if asynccp_logging: 14 | print('Enabling asynccp instrumentation') 15 | except NameError: 16 | # Set False by default to skip debug logging 17 | asynccp_logging = False 18 | 19 | 20 | def get_loop(debug=asynccp_logging): 21 | """Returns the singleton event loop""" 22 | global __global_event_loop 23 | if __global_event_loop is None: 24 | __global_event_loop = Loop(debug=debug) 25 | return __global_event_loop 26 | 27 | 28 | add_task = get_loop().add_task 29 | run_later = get_loop().run_later 30 | schedule = get_loop().schedule 31 | schedule_later = get_loop().schedule_later 32 | delay = get_loop().delay 33 | suspend = get_loop().suspend 34 | 35 | run = get_loop().run 36 | -------------------------------------------------------------------------------- /asynccp/time.py: -------------------------------------------------------------------------------- 1 | 2 | class Duration: 3 | @staticmethod 4 | def of_hours(count): 5 | return Duration(count * 60 * 60 * 1000000000) 6 | 7 | @staticmethod 8 | def of_minutes(count): 9 | return Duration(count * 60 * 1000000000) 10 | 11 | @staticmethod 12 | def of_seconds(count): 13 | return Duration(count * 1000000000) 14 | 15 | @staticmethod 16 | def of_milliseconds(count): 17 | return Duration(count * 1000000) 18 | 19 | @staticmethod 20 | def of_microseconds(count): 21 | return Duration(count) 22 | 23 | def __init__(self, nanoseconds): 24 | self._nanoseconds = int(nanoseconds) 25 | 26 | def __add__(self, other): 27 | return Duration(self._nanoseconds + other._nanoseconds) 28 | 29 | def __sub__(self, other): 30 | return Duration(self._nanoseconds - other._nanoseconds) 31 | 32 | def __neg__(self): 33 | return Duration(-1 * self._nanoseconds) 34 | 35 | def as_frequency(self): 36 | """returns this duration as a frequency interval in HZ""" 37 | return 1000000000.0 / self._nanoseconds 38 | 39 | def as_seconds(self): 40 | return self._nanoseconds / 1000000000.0 41 | 42 | def as_milliseconds(self): 43 | return self._nanoseconds / 1000000.0 44 | 45 | def as_microseconds(self): 46 | return self._nanoseconds / 1000.0 47 | 48 | def as_nanoseconds(self): 49 | return self._nanoseconds 50 | -------------------------------------------------------------------------------- /.github/workflows/python-tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: CPython Tests 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: [ 3.9 ] 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install flake8 pytest 31 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 32 | - name: Lint with flake8 33 | run: | 34 | # stop the build if there are Python syntax errors or undefined names 35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 38 | - name: Test with pytest 39 | run: | 40 | python -m pytest 41 | -------------------------------------------------------------------------------- /asynccp/managed_spi.py: -------------------------------------------------------------------------------- 1 | from .managed_resource import ManagedResource 2 | import asynccp 3 | 4 | 5 | class ManagedSpi: 6 | def __init__(self, spi_bus, loop=asynccp.get_loop()): 7 | """ 8 | Vends access to an SPI bus via chip select leases. 9 | """ 10 | self._resource = ManagedResource(spi_bus, on_acquire=self._acquire_spi, on_release=self._release_spi, loop=loop) 11 | self._handles = {} 12 | 13 | @staticmethod 14 | def _acquire_spi(chip_select): 15 | chip_select.value = False 16 | 17 | @staticmethod 18 | def _release_spi(chip_select): 19 | chip_select.value = True 20 | 21 | def cs_handle(self, chip_select): 22 | """ 23 | pass in a digitalio.DigitalInOut chip select. 24 | This will be pulled low when a SpiHandle acquires the bus. 25 | 26 | Store 1 handle for each chip select you want to manage with a shared SPI. 27 | 28 | You can read or write batches of data from an sd card while sending updates to a display 29 | between read batches, while reading sensor data from 3 different sensors on a timer on a single 30 | spi bus - without coordinating between them. Only await the hande for each task's turn with the bus. 31 | 32 | You need to: 33 | * configure the bus to work with your devices 34 | * configure the chip select pins as outputs 35 | 36 | You get: 37 | * non-blocking, awaitable access to an SPI 38 | """ 39 | chip_select.value = True 40 | spi_handle = self._resource.handle(chip_select=chip_select) 41 | return spi_handle 42 | -------------------------------------------------------------------------------- /asynccp/test/test_managed_resource.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from asynccp.managed_resource import ManagedResource 4 | from asynccp import Loop 5 | 6 | 7 | class Resource: 8 | def __init__(self): 9 | self.active_cs = None 10 | 11 | def acquire(self, chip_select): 12 | assert self.active_cs is None, 'cannot acquire owned resource. Owned cs: ' + self.active_cs + ', requested cs: ' + chip_select 13 | self.active_cs = chip_select 14 | return self 15 | 16 | def release(self, chip_select): 17 | assert self.active_cs is not None, 'cannot release unowned resource. Owned cs: ' + self.active_cs + ', requested cs: ' + chip_select 18 | self.active_cs = None 19 | 20 | 21 | class YieldOne: 22 | def __await__(self): 23 | yield 24 | 25 | 26 | class TestManagedResource(TestCase): 27 | def test_acquire(self): 28 | loop = Loop() 29 | spi = Resource() 30 | managed_spi = ManagedResource(spi, spi.acquire, spi.release, loop=loop) 31 | handle_cs1 = managed_spi.handle(chip_select=1) 32 | handle_cs2 = managed_spi.handle(chip_select=2) 33 | self.assertIsNone(spi.active_cs) 34 | 35 | async def test_fn(managed_resource): # Stop points 36 | await YieldOne() # Enter fn 37 | async with managed_resource as spi: # acquire/suspend 38 | await YieldOne() # work 39 | self.assertTrue(spi.active_cs is not None) 40 | await YieldOne() # after context 41 | 42 | loop.add_task(test_fn(handle_cs1)) 43 | loop.add_task(test_fn(handle_cs2)) 44 | 45 | loop._step() # 1 Enter fn 2 Enter fn 46 | loop._step() # 1 acquire-work 2 suspend 47 | # 1 is working with spi on cs1, 2 is suspended waiting. 48 | self.assertIs(spi.active_cs, 1) 49 | self.assertEqual(len(loop._tasks), 1) # 2 is suspended, not eligible to be run next step 50 | 51 | loop._step() # 1 after context 52 | self.assertIsNone(spi.active_cs) 53 | self.assertEqual(len(loop._tasks), 2) # 1 is unfinished, 2 is unsuspended by the ManagedResource 54 | 55 | loop._step() # 1 end 2 work 56 | self.assertIs(spi.active_cs, 2) 57 | self.assertEqual(len(loop._tasks), 1) # 1 is finished, 2 is working with spi on cs2 58 | 59 | loop._step() # 2 after context 60 | self.assertIsNone(spi.active_cs) 61 | self.assertEqual(len(loop._tasks), 1) # 2 is unfinished 62 | 63 | loop._step() # 2 end 64 | self.assertEqual(loop._tasks, []) # 2 is finished 65 | -------------------------------------------------------------------------------- /asynccp/managed_resource.py: -------------------------------------------------------------------------------- 1 | import asynccp 2 | 3 | 4 | class ManagedResource: 5 | """ 6 | Manages a singleton resource with your functions that initialize a resource and clean it up between uses. 7 | 8 | This class vends access to `resource` via a fair queue. Intended use is with something like a busio.SPI 9 | with on_acquire setting a chip select pin and on_release resetting that pin. 10 | 11 | A ManagedResource instance should be shared among all users of `resource`. 12 | """ 13 | def __init__(self, resource, on_acquire=lambda *args, **kwargs: None, on_release=lambda *args, **kwargs: None, loop=asynccp.get_loop()): 14 | """ 15 | :param resource: The resource you want to manage access to (e.g., a busio.SPI) 16 | :param on_acquire: function(*args, **kwargs) => void acquires your singleton resource (CS pin low or something) 17 | :param on_release: function(*args, **kwargs) => void releases your singleton resource (CS pin high or something) 18 | """ 19 | self._resource = resource 20 | self._on_acquire = on_acquire 21 | self._on_release = on_release 22 | self._loop = loop 23 | self._ownership_queue = [] 24 | self._owned = False 25 | 26 | def handle(self, *args, **kwargs): 27 | """ 28 | returns a reusable, reentrant handle to the managed resource. 29 | args and kwargs are passed to on_acquire and on_release functions you provided with the resource. 30 | """ 31 | return Handle(self, args, kwargs) 32 | 33 | async def _aenter(self, args, kwargs): 34 | if self._owned: 35 | # queue up for access to the resource later 36 | await_handle, resume_fn = self._loop.suspend() 37 | self._ownership_queue.append(resume_fn) 38 | # This leverages the suspend() feature in asynccp; this current coroutine is not considered again until 39 | # the owning job is complete and __aexit__s below. This keeps waiting handles as cheap as possible. 40 | await await_handle 41 | self._owned = True 42 | self._on_acquire(*args, **kwargs) 43 | return self._resource 44 | 45 | async def _aexit(self, args, kwargs): 46 | assert self._owned, 'Exited from a context where a managed resource was not owned' 47 | self._on_release(*args, **kwargs) 48 | if len(self._ownership_queue) > 0: 49 | resume_fn = self._ownership_queue.pop(0) 50 | # Note that the awaiter has already passed the ownership check. 51 | # By not resetting to unowned here we avoid unfair resource starvation in certain code constructs. 52 | resume_fn() 53 | else: 54 | self._owned = False 55 | 56 | 57 | class Handle: 58 | """ 59 | For binding resource initialization/teardown args to a resource. 60 | """ 61 | def __init__(self, managed_resource, args, kwargs): 62 | self._managed_resource = managed_resource 63 | self._args = args 64 | self._kwargs = kwargs 65 | self.active = False 66 | 67 | async def __aenter__(self): 68 | resource = await self._managed_resource._aenter(self._args, self._kwargs) 69 | self.active = True 70 | return resource 71 | 72 | async def __aexit__(self, exc_type, exc_val, exc_tb): 73 | resource = await self._managed_resource._aexit(self._args, self._kwargs) 74 | self.active = False 75 | return resource 76 | -------------------------------------------------------------------------------- /asynccp/test/managed_spi_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from asynccp.managed_spi import ManagedSpi 4 | from asynccp import Loop 5 | 6 | 7 | # This is a terrible pattern, used only for tests 8 | class YieldOne: 9 | def __await__(self): 10 | yield 11 | 12 | 13 | class FakeDigitalIO: 14 | def __init__(self, id): 15 | self.id = id 16 | self.value = False 17 | 18 | 19 | class TestManagedSpi(TestCase): 20 | def test_acquire(self): 21 | loop = Loop() 22 | 23 | # Synchronize access to an SPI allowing other tasks to work while waiting 24 | spi_bus = 'board.SPI' 25 | managed_spi = ManagedSpi(spi_bus, loop=loop) 26 | 27 | # Configure 3 pins for selecting different chip selects on the shared SPI bus 28 | sdcard_spi = managed_spi.cs_handle(FakeDigitalIO('D1')) 29 | screen_spi = managed_spi.cs_handle(FakeDigitalIO('D2')) 30 | sensor_spi = managed_spi.cs_handle(FakeDigitalIO('D3')) 31 | 32 | did_read = did_screen = did_sensor = False 33 | 34 | # Define 3 full while True app loops dependent on 1 shared SPI 35 | 36 | async def read_sdcard(): 37 | nonlocal did_read 38 | while True: 39 | async with sdcard_spi as spi: 40 | # do_something_with(spi) 41 | for _ in range(2): 42 | self.assertTrue(sdcard_spi.active) 43 | self.assertFalse(screen_spi.active) 44 | self.assertFalse(sensor_spi.active) 45 | await YieldOne() 46 | did_read = True 47 | await YieldOne() 48 | 49 | async def update_screen(): 50 | nonlocal did_screen 51 | while True: 52 | # await do_other_work 53 | async with screen_spi as spi: 54 | for _ in range(2): 55 | self.assertFalse(sdcard_spi.active) 56 | self.assertTrue(screen_spi.active) 57 | self.assertFalse(sensor_spi.active) 58 | await YieldOne() 59 | did_screen = True 60 | await YieldOne() 61 | 62 | async def read_sensor(): 63 | nonlocal did_sensor 64 | while True: 65 | async with sensor_spi as spi: 66 | for _ in range(2): 67 | self.assertFalse(sdcard_spi.active) 68 | self.assertFalse(screen_spi.active) 69 | self.assertTrue(sensor_spi.active) 70 | await YieldOne() 71 | did_sensor = True 72 | await YieldOne() 73 | 74 | # Add the top level application coroutines 75 | loop.add_task(read_sdcard()) 76 | loop.add_task(read_sensor()) 77 | loop.add_task(update_screen()) 78 | 79 | # would just use asynccp.add_task() and asynccp.run() but for test let's manually step it through 80 | # loop.run() 81 | 82 | # They didn't run yet 83 | self.assertFalse(sdcard_spi.active) 84 | self.assertFalse(screen_spi.active) 85 | self.assertFalse(sensor_spi.active) 86 | 87 | loop._step() 88 | self.assertTrue(sdcard_spi.active) # sdcard was the first to queue up 89 | loop._step() 90 | loop._step() 91 | 92 | loop._step() 93 | self.assertTrue(sensor_spi.active) # sensor was the second to queue up 94 | loop._step() 95 | loop._step() 96 | 97 | loop._step() 98 | self.assertTrue(screen_spi.active) # screen was the last to queue up 99 | loop._step() 100 | loop._step() 101 | 102 | self.assertTrue(did_sensor) 103 | self.assertTrue(did_screen) 104 | self.assertTrue(did_read) 105 | -------------------------------------------------------------------------------- /asynccp/test/test_loop.py: -------------------------------------------------------------------------------- 1 | from asynccp.loop import _yield_once, set_time_provider 2 | import time 3 | from unittest import TestCase 4 | 5 | from asynccp import Loop 6 | from asynccp.time import Duration 7 | 8 | 9 | class TestLoop(TestCase): 10 | def test_add_task(self): 11 | loop = Loop() 12 | ran = False 13 | 14 | async def foo(): 15 | nonlocal ran 16 | ran = True 17 | loop.add_task(foo()) 18 | loop._step() 19 | self.assertTrue(ran) 20 | 21 | def test_delay(self): 22 | loop = Loop() 23 | complete = False 24 | 25 | async def foo(): 26 | nonlocal complete 27 | await loop.delay(0.1) 28 | complete = True 29 | loop.add_task(foo()) 30 | start = time.monotonic() 31 | while not complete and time.monotonic() - start < 1: 32 | loop._step() 33 | self.assertTrue(complete) 34 | 35 | def test_reschedule(self): 36 | now = 0 37 | def nanos(): 38 | nonlocal now 39 | return now 40 | 41 | set_time_provider(nanos) 42 | try: 43 | loop = Loop() 44 | run_count = 0 45 | 46 | async def foo(): 47 | nonlocal run_count 48 | run_count += 1 49 | scheduled_task = loop.schedule(1000000000, foo) 50 | 51 | now = 2 52 | self.assertEqual(0, run_count, 'did not run before step') 53 | loop._step() 54 | self.assertEqual(1, run_count, 'ran only once during step') 55 | 56 | now = 4 57 | scheduled_task.stop() 58 | loop._step() 59 | self.assertEqual(1, run_count, 'does not run again while stopped') 60 | 61 | now = 6 62 | scheduled_task.start() 63 | loop._step() 64 | self.assertEqual(2, run_count, 'runs again after restarting') 65 | 66 | now = 7 67 | scheduled_task.change_rate(1000000000 / 10) 68 | loop._step() 69 | self.assertEqual(3, run_count, 'this run was already scheduled. the next one will be at 10-step') 70 | 71 | now = 16 72 | loop._step() 73 | self.assertEqual(3, run_count, 'expect to run after 10 has passed') 74 | 75 | now = 17 76 | loop._step() 77 | self.assertEqual(4, run_count, 'new schedule rate ran') 78 | finally: 79 | set_time_provider(time.monotonic_ns) 80 | 81 | def test_schedule_rate(self): 82 | # Checks a bunch of scheduled tasks to make sure they hit their target fixed rate schedule. 83 | # Pathological scheduling sees these tasks barge in front of others all the time. Many run 84 | # at a fairly high frequency. 85 | 86 | loop = Loop(debug=False) 87 | duration = 1 # Seconds to run the scheduler. (try with 10s if you suspect scheduler drift) 88 | tasks = 110 # How many tasks (higher indexes count faster. 110th index goes 100hz) 89 | 90 | def timer(index): 91 | return Duration.of_milliseconds(120 - index) 92 | 93 | counters = [] 94 | for i in range(tasks): 95 | async def f(_i): 96 | counters[_i] += 1 97 | 98 | counters.append(0) 99 | 100 | loop.schedule(timer(i), f, i) 101 | 102 | start = time.perf_counter() 103 | end = start 104 | while end - start < duration: 105 | loop._step() 106 | end = time.perf_counter() 107 | print() 108 | 109 | expected_tps = 0 110 | actual_tps = 0 111 | # Assert that all the tasks hit their scheduled count, at least within +-5 iterations. 112 | for i in range(len(counters)): 113 | self.assertAlmostEqual(duration * timer(i).as_frequency(), counters[i], delta=2) 114 | expected_tps += timer(i).as_frequency() 115 | actual_tps += counters[i] 116 | actual_tps /= duration 117 | print('expected tps:', expected_tps, 'actual:', actual_tps) 118 | 119 | def test_schedule_later(self): 120 | control_ticks = 0 121 | deferred_ticks = 0 122 | deferred_ticked = False 123 | loop = Loop(debug=False) 124 | 125 | async def deferred_task(): 126 | nonlocal deferred_ticked, deferred_ticks 127 | deferred_ticks = deferred_ticks + 1 128 | deferred_ticked = True 129 | 130 | async def control_ticker(): 131 | nonlocal control_ticks 132 | control_ticks = control_ticks + 1 133 | 134 | loop.schedule(100, control_ticker) 135 | loop.schedule_later(10, deferred_task) 136 | 137 | while True: 138 | loop._step() 139 | if deferred_ticked: 140 | break 141 | 142 | self.assertEqual(deferred_ticks, 1) 143 | self.assertAlmostEqual(control_ticks, 10, delta=2) 144 | 145 | def test_run_later(self): 146 | loop = Loop() 147 | count = 0 148 | 149 | async def run_later(): 150 | nonlocal count 151 | while True: 152 | count = count + 1 153 | await _yield_once() # For testing 154 | 155 | loop.run_later(seconds_to_delay=0.1, awaitable_task=run_later()) 156 | 157 | self.assertEqual(0, count, 'count should not increment upon coroutine instantiation') 158 | loop._step() 159 | self.assertEqual(0, count, 'count should not increment before waiting long enough') 160 | 161 | time.sleep(0.1) # Make sure enough time has passed for step to pick up the task 162 | loop._step() 163 | self.assertEqual(1, count, 'count should increment once per step') 164 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CircuitPython_async 2 | ![Tests](https://github.com/WarriorOfWire/CircuitPython_async/actions/workflows/python-tests.yml/badge.svg) 3 | 4 | ## About 5 | Pure Python cooperative multitasking implementation for the async/await language syntax. 6 | 7 | Loosely modeled after CPython's standard `asyncio`; focused on CircuitPython. 8 | 9 | Typically, when you need to wait around for something you have to choose between just doing time.sleep() and 10 | having a hitch in your app OR manually interleaving tasks and tracking their state & timers. 11 | 12 | `asynccp` interleaves your tasks at `await` points in the same general way as `asyncio` does on regular python. 13 | Instead of blocking with `time.sleep()` you'll `await asynccp.delay()` to let the microcontroller work on other 14 | things. 15 | 16 | The `async` and `await` keywords are supported in Circuitpython 6.0. They may be unavailable on your m0 17 | microcontroller because of flash space. 18 | 19 | ## Examples 20 | ### Plain synchronous loop task with async support 21 | ```python 22 | import asynccp 23 | 24 | 25 | async def read_some_sensor(self): 26 | pass 27 | 28 | 29 | async def check_button(self): 30 | pass 31 | 32 | 33 | async def update_display(self): 34 | pass 35 | 36 | 37 | async def loop(): 38 | await read_some_sensor() 39 | await check_button() 40 | await update_display() 41 | 42 | 43 | def run(): 44 | asynccp.add_task(loop()) 45 | asynccp.run() 46 | 47 | 48 | if __name__ == '__main__': 49 | run() 50 | ``` 51 | 52 | ### Scheduled app instead of loop 53 | ```python 54 | import asynccp 55 | import asynccp.time.Duration as Duration 56 | 57 | class App: 58 | def __init__(self): 59 | self.button_state = 0 60 | self.sensor_state = 0 61 | 62 | async def read_some_sensor(self): 63 | pass 64 | 65 | async def check_button(self): 66 | pass 67 | 68 | async def update_display(self): 69 | pass 70 | 71 | 72 | def run(): 73 | app = App() 74 | 75 | asynccp.schedule(frequency=Duration.of_seconds(5), coroutine_function=app.read_some_sensor) 76 | asynccp.schedule(frequency=80, coroutine_function=app.check_button) 77 | asynccp.schedule(frequency=15, coroutine_function=app.update_display) 78 | asynccp.run() 79 | 80 | 81 | if __name__ == '__main__': 82 | run() 83 | ``` 84 | 85 | ### Multiplex SPI bus without manual coordination 86 | Using `asynccp.managed_resource.ManagedResource` you can share an SPI bus between concurrent tasks without explicit 87 | coordination. 88 | 89 | ```python 90 | def setup_spi(): 91 | from asynccp.managed_resource import ManagedResource 92 | import digitalio 93 | import board 94 | # Configure the hardware 95 | spi = board.SPI() 96 | 97 | sensor_cs = digitalio.DigitalInOut(board.D4) 98 | sensor_cs.direction = digitalio.Direction.OUTPUT 99 | 100 | sdcard_cs = digitalio.DigitalInOut(board.D5) 101 | sdcard_cs.direction = digitalio.Direction.OUTPUT 102 | 103 | # Set up acquire/release workflow for the SPI bus 104 | def set_active(pin): 105 | pin.value = True 106 | 107 | def set_inactive(pin): 108 | pin.value = False 109 | 110 | # Configure the physical spi as a managed resource with callbacks that manage the CS pin 111 | managed_spi = ManagedResource(spi, on_acquire=set_active, on_release=set_inactive) 112 | 113 | # Get awaitable handles for each CS using this SPI bus 114 | sensor_handle = managed_spi.handle(pin=sensor_cs) 115 | sdcard_handle = managed_spi.handle(pin=sdcard_cs) 116 | 117 | return sensor_handle, sdcard_handle 118 | ``` 119 | 120 | And with these configured resource handles you can use them without checking whether anything is busy. Things will 121 | efficiently wait when they have to, and charge right on through when there's nothing using the bus currently. 122 | ```python 123 | async def read_sensor(sensor_handle): 124 | async with sensor_handle as bus: 125 | await send_read_request_to_sensor(bus) 126 | # Consider a BME680 which needs a delay before reading the requested result. 127 | # Let's let something else use the bus while it's waiting 128 | await asynccp.delay(seconds=0.1) 129 | async with sensor_handle as bus: 130 | return await read_result_from_sensor(bus) 131 | 132 | async def log_to_sdcard(sdcard_handle): 133 | async with sdcard_handle as bus: 134 | bytes_written = await write_to_sdcard(bus) 135 | 136 | sensor_handle, sdcard_handle = setup_spi() 137 | asynccp.schedule(Duration.of_milliseconds(123), read_sensor, sensor_handle) 138 | sd_log_scheduled_task = asynccp.schedule(Duration.of_seconds(1.5), log_to_sdcard, sdcard_handle) 139 | asynccp.run() 140 | ``` 141 | 142 | ## Some toy example code 143 | Uses [this library](https://github.com/WarriorOfWire/circuitpython-utilities/blob/master/cpy_rotary/README.md) for the rotary button 144 | 145 | ```python 146 | import asynccp 147 | from cpy_rotary import RotaryButton 148 | 149 | 150 | # Some state. Global state is not super cool but whatevs 151 | reading_sensor = False 152 | 153 | 154 | # Define the top-level workflows (you would have to write this stuff no matter what) 155 | async def read_sensor(): 156 | global reading_sensor 157 | reading_sensor = True 158 | try: 159 | i2c.send(payload) 160 | await asynccp.delay(1) # Don't block your loading beach ball while the sensor is sensing. 161 | i2c.read(payload) # if you have some buffered i2c thing 162 | finally: 163 | reading_sensor = False 164 | 165 | 166 | async def animate_beach_ball(): 167 | global reading_sensor 168 | if reading_sensor: 169 | set_animation_state() # hopefully this is quick - if not, maybe there's something inside to `await` 170 | 171 | 172 | async def read_from_3d_printer(): 173 | pass 174 | 175 | 176 | rotary = RotaryButton() 177 | 178 | 179 | # ---------- asynccp wiring begins here ---------- # 180 | # Schedule the workflows at whatever frequency makes sense 181 | asynccp.schedule(Duration.of_milliseconds(100), coroutine_function=read_sensor) 182 | asynccp.schedule(Duration.of_milliseconds(100), coroutine_function=animate_beach_ball) 183 | asynccp.schedule(Duration.of_milliseconds(200), corouting_function=read_from_3d_printer) 184 | asynccp.schedule(Duration.of_milliseconds(10), coroutine_function=rotary.loop) 185 | 186 | # And let asynccp do while True 187 | asynccp.run() 188 | # ---------- asynccp wiring ends here ---------- # 189 | ``` 190 | -------------------------------------------------------------------------------- /asynccp/loop.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import asynccp.time 4 | 5 | _monotonic_ns = time.monotonic_ns 6 | 7 | 8 | def set_time_provider(monotonic_ns): 9 | global _monotonic_ns 10 | _monotonic_ns = monotonic_ns 11 | 12 | 13 | def _yield_once(): 14 | """await the return value of this function to yield the processor""" 15 | class _CallMeNextTime: 16 | def __await__(self): 17 | # This is inside the scheduler where we know generator yield is the 18 | # implementation of task switching in CircuitPython. This throws 19 | # control back out through user code and up to the scheduler's 20 | # __iter__ stack which will see that we've suspended _current. 21 | # Don't yield in async methods; only await unless you're making a library. 22 | yield 23 | 24 | return _CallMeNextTime() 25 | 26 | 27 | def _get_future_nanos(seconds_in_future): 28 | return _monotonic_ns() + int(seconds_in_future * 1000000000) 29 | 30 | 31 | class Delayer: 32 | def __init__(self, resume_nanos, task): 33 | self.task = task 34 | self._resume_nanos = resume_nanos 35 | 36 | def resume_nanos(self): 37 | return self._resume_nanos 38 | 39 | def __repr__(self): 40 | return '{{Delayer remaining: {:.2f}, task: {} }}'.format( 41 | (self.resume_nanos() - _monotonic_ns()) / 1000000000.0, 42 | self.task 43 | ) 44 | 45 | __str__ = __repr__ 46 | 47 | 48 | class Task: 49 | def __init__(self, coroutine): 50 | self.coroutine = coroutine 51 | 52 | def __repr__(self): 53 | return '{{Task {}}}'.format(self.coroutine) 54 | 55 | __str__ = __repr__ 56 | 57 | 58 | class ScheduledTask: 59 | def change_rate(self, frequency): 60 | """ Update the task rate to a new frequency. Float hz or asynccp.time.Duration interval """ 61 | if isinstance(frequency, asynccp.time.Duration): 62 | self._nanoseconds_per_invocation = frequency.as_nanoseconds() 63 | else: 64 | hz = frequency 65 | self._nanoseconds_per_invocation = 1000000000 / hz 66 | 67 | def stop(self): 68 | """ Stop the task (does not interrupt a currently running task) """ 69 | self._stop = True 70 | 71 | def start(self): 72 | """ Schedule the task (if it's not already scheduled) """ 73 | self._stop = False 74 | if not self._scheduled_to_run: 75 | # Don't double-up the task if it's still in the run list! 76 | self._loop.add_task(self._run_at_fixed_rate()) 77 | 78 | def __init__(self, loop, nanoseconds_per_invocation, forward_async_fn, forward_args, forward_kwargs): 79 | self._loop = loop 80 | self._forward_async_fn = forward_async_fn 81 | self._forward_args = forward_args 82 | self._forward_kwargs = forward_kwargs 83 | self._nanoseconds_per_invocation = nanoseconds_per_invocation 84 | self._stop = False 85 | self._running = False 86 | self._scheduled_to_run = False 87 | 88 | async def _run_at_fixed_rate(self): 89 | self._scheduled_to_run = True 90 | try: 91 | target_run_nanos = _monotonic_ns() 92 | while True: 93 | if self._stop: 94 | return # Check before running 95 | 96 | iteration = self._forward_async_fn(*self._forward_args, **self._forward_kwargs) 97 | self._loop._debug('iteration ', iteration) 98 | 99 | self._running = True 100 | try: 101 | await iteration 102 | finally: 103 | self._running = False 104 | 105 | if self._stop: 106 | return # Check before waiting 107 | 108 | # Try to reschedule for the next window without skew. If we're falling behind, 109 | # just go as fast as possible & schedule to run "now." If we catch back up again 110 | # we'll return to seconds_per_invocation without doing a bunch of catchup runs. 111 | target_run_nanos = target_run_nanos + self._nanoseconds_per_invocation 112 | now_nanos = _monotonic_ns() 113 | if now_nanos <= target_run_nanos: 114 | await self._loop._delay_until_nanos(target_run_nanos) 115 | else: 116 | if now_nanos - target_run_nanos > 2 * self._nanoseconds_per_invocation: 117 | # Only break the schedule if we fall behind by too much. 118 | target_run_nanos = now_nanos 119 | # Allow other tasks a chance to run if this task is too slow. 120 | await _yield_once() 121 | finally: 122 | self._scheduled_to_run = False 123 | 124 | def __repr__(self): 125 | hz = 1 / (self._nanoseconds_per_invocation / 1000000000) 126 | state = 'running' if self._running else 'waiting' 127 | return '{{ScheduledTask {} rate: {}hz, fn: {}}}'.format(state, hz, self._forward_async_fn) 128 | 129 | __str__ = __repr__ 130 | 131 | 132 | class TaskCanceledException(Exception): 133 | pass 134 | 135 | 136 | class Loop: 137 | """ 138 | It's your task host. You run() it and it manages your main application loop. 139 | """ 140 | 141 | def __init__(self, debug=False): 142 | self._tasks = [] 143 | self._delaying = [] 144 | self._current = None 145 | if debug: 146 | self._debug = print 147 | else: 148 | self._debug = lambda *arg, **kwargs: None 149 | 150 | def add_task(self, awaitable_task): 151 | """ 152 | Add a concurrent task (known as a coroutine, implemented as a generator in CircuitPython) 153 | Use: 154 | scheduler.add_task( my_async_method() ) 155 | :param awaitable_task: The coroutine to be concurrently driven to completion. 156 | """ 157 | self._debug('adding task ', awaitable_task) 158 | self._tasks.append(Task(awaitable_task)) 159 | 160 | async def delay(self, seconds): 161 | """ 162 | From within a coroutine, this suspends your call stack for some amount of time. 163 | 164 | NOTE: Always `await` this! You will have a bad time if you do not. 165 | 166 | :param seconds: Floating point; will wait at least this long to call your task again. 167 | """ 168 | await self._delay_until_nanos(_get_future_nanos(seconds)) 169 | 170 | def run_later(self, seconds_to_delay, awaitable_task): 171 | """ 172 | Add a concurrent task, delayed by some seconds. 173 | Use: 174 | asynccp.run_later( seconds_to_delay=1.2, my_async_method() ) 175 | :param seconds_to_delay: How long until the task should be kicked off? 176 | :param awaitable_task: The coroutine to be concurrently driven to completion. 177 | """ 178 | # Make sure we don't wait unnecessarily if there are lots of tasks to kick off 179 | start_nanos = _get_future_nanos(seconds_to_delay) 180 | async def _run_later(): 181 | await self._delay_until_nanos(start_nanos) 182 | await awaitable_task 183 | self.add_task(_run_later()) 184 | 185 | def suspend(self): 186 | """ 187 | For making library functions that suspend and then resume later on some condition 188 | E.g., a scope manager for SPI 189 | 190 | To use this you will stash the resumer somewhere to call from another coroutine, AND 191 | you will `await suspender` to pause this stack at the spot you choose. 192 | 193 | :returns (async_suspender, resumer) 194 | """ 195 | assert self._current is not None, 'You can only suspend the current task if you are running the event loop.' 196 | suspended = self._current 197 | 198 | def resume(): 199 | self._tasks.append(suspended) 200 | 201 | self._current = None 202 | return _yield_once(), resume 203 | 204 | def schedule(self, frequency, coroutine_function, *args, **kwargs): 205 | """ 206 | Describe how often a method should be called. 207 | 208 | Your event loop will call this coroutine on the hz schedule. 209 | Only up to 1 instance of your method will be alive at a time. 210 | 211 | This will use sleep() internally when there's nothing to do 212 | and scheduled, waiting functions consume no cpu so you should 213 | feel pretty good about using scheduled async functions. 214 | 215 | usage: 216 | async def main_loop: 217 | await your_code() 218 | scheduled_task = get_loop().schedule(frequency=100, coroutine_function=main_loop) 219 | get_loop().run() 220 | 221 | :param frequency: frequency-like - How many times per second should the function run? (float hz or asynccp.Duration) 222 | :param coroutine_function: the async def function you want invoked on your schedule 223 | """ 224 | assert coroutine_function is not None, 'coroutine function must not be none' 225 | if isinstance(frequency, asynccp.time.Duration): 226 | nanoseconds_per_invocation = frequency.as_nanoseconds() 227 | else: 228 | nanoseconds_per_invocation = (1 / frequency) * 1000000000 229 | task = ScheduledTask(self, nanoseconds_per_invocation, coroutine_function, args, kwargs) 230 | task.start() 231 | return task 232 | 233 | def schedule_later(self, hz: float, coroutine_function, *args, **kwargs): 234 | """ 235 | Like schedule, but invokes the coroutine_function after the first hz interval. 236 | 237 | See schedule api for parameters. 238 | """ 239 | ran_once = False 240 | async def call_later(): 241 | nonlocal ran_once 242 | if ran_once: 243 | await coroutine_function(*args, **kwargs) 244 | else: 245 | await _yield_once() 246 | ran_once = True 247 | 248 | return self.schedule(hz, call_later) 249 | 250 | def run(self): 251 | """ 252 | Use: 253 | async def application_loop(): 254 | pass 255 | 256 | def run(): 257 | main_loop = Loop() 258 | loop.schedule(100, application_loop) 259 | loop.run() 260 | 261 | if __name__ == '__main__': 262 | run() 263 | The crucial StopIteration exception signifies the end of a coroutine in CircuitPython. 264 | Other Exceptions that reach the runner break out, stopping your app and showing a stack trace. 265 | """ 266 | assert self._current is None, 'Loop can only be advanced by 1 stack frame at a time.' 267 | while self._tasks or self._delaying: 268 | self._step() 269 | self._debug('Loop completed', self._tasks, self._delaying) 270 | 271 | def _step(self): 272 | self._debug('stepping over ', len(self._tasks), ' tasks') 273 | for _ in range(len(self._tasks)): 274 | task = self._tasks.pop(0) 275 | self._run_task(task) 276 | # Consider each delaying function at most once (avoids sleep(0) problems) 277 | for i in range(len(self._delaying)): 278 | delayed_task = self._delaying[0] 279 | now_nanos = _monotonic_ns() 280 | if now_nanos >= delayed_task.resume_nanos(): 281 | self._delaying.pop(0) 282 | self._run_task(delayed_task.task) 283 | else: 284 | # We didn't pop the task and it wasn't time to run it. Only later tasks past this one. 285 | break 286 | if len(self._tasks) == 0 and len(self._delaying) > 0: 287 | next_delayed_task = self._delaying[0] 288 | delay_nanos = next_delayed_task.resume_nanos() - _monotonic_ns() 289 | if delay_nanos > 0: 290 | # Give control to the system, there's nothing to be done right now, 291 | # and nothing else is scheduled to run for this long. 292 | # This is the real sleep. If/when interrupts are implemented this will likely need to change. 293 | sleep_seconds = delay_nanos / 1000000000.0 294 | self._debug('No active tasks. Sleeping for ', sleep_seconds, 's. \n', self._delaying) 295 | time.sleep(sleep_seconds) 296 | 297 | def _run_task(self, task: Task): 298 | """ 299 | Runs a task and re-queues for the next loop if it is both (1) not complete and (2) not delaying. 300 | """ 301 | self._current = task 302 | try: 303 | task.coroutine.send(None) 304 | self._debug('current', self._current) 305 | # delay gate here, in case the current task suspended. 306 | # If a delaying task re-suspends it will have already put itself in the delaying queue. 307 | if self._current is not None: 308 | self._tasks.append(task) 309 | except StopIteration: 310 | # This task is all done. 311 | self._debug('task complete') 312 | pass 313 | finally: 314 | self._current = None 315 | 316 | async def _delay_until_nanos(self, target_run_nanos): 317 | """ 318 | From within a coroutine, delays until the target time.monotonic_ns 319 | Returns the thing to await 320 | """ 321 | assert self._current is not None, 'You can only delay from within a task' 322 | self._delaying.append(Delayer(target_run_nanos, self._current)) 323 | self._delaying.sort(key=Delayer.resume_nanos) # heap would be better but hey. 324 | self._debug('delaying ', self._current) 325 | self._current = None 326 | # Pretty subtle here. This yields once, then it continues next time the task scheduler executes it. 327 | # The async function is parked at this point. 328 | await _yield_once() 329 | 330 | --------------------------------------------------------------------------------