├── .coveragerc ├── .dockerignore ├── .flake8 ├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ └── pythonapp.yml ├── .gitignore ├── CHANGES ├── CONTRIBUTORS ├── LICENSE ├── README.rst ├── TODO ├── aiorun ├── __init__.py └── py.typed ├── docker-compose.yml ├── examples ├── echo_client.py ├── echo_server.py ├── stop_unhandled.py ├── stop_unhandled_custom.py ├── stop_unhandled_illegal.py └── stop_unhandled_task.py ├── pyproject.toml ├── pytest.ini ├── requirements-test.txt ├── test.dockerfile ├── test38.dockerfile ├── tests ├── conftest.py ├── fake_main.py ├── test_posix.py ├── test_stop_on_errors.py └── test_win.py ├── testshield.py └── watchtest.sh /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | include = 4 | aiorun.py 5 | tests/* 6 | 7 | [report] 8 | exclude_lines = 9 | # Have to re-enable the standard pragma 10 | pragma: no cover 11 | if typing.TYPE_CHECKING: 12 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/__pycache__/ 2 | **/.mypy_cache 3 | .pytest_cache 4 | **/.pytest_cache 5 | **/.tox 6 | **/.vscode 7 | **/.idea 8 | **/.coverage 9 | **/.DS_Store 10 | **/.eggs 11 | **/.python-version 12 | **/*.egg-info 13 | **/*.bak 14 | **/*.db 15 | **/*.pyc 16 | **/*.sqllite 17 | **/*.swp 18 | **/venv* -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | examples 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [cjrh] 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "19:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /.github/workflows/pythonapp.yml: -------------------------------------------------------------------------------- 1 | name: Python application 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | build: 13 | name: Test on Python ${{ matrix.python-version }} and ${{ matrix.os }} 14 | runs-on: ${{ matrix.os }} 15 | strategy: 16 | matrix: 17 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] 18 | os: [ubuntu-latest, windows-latest, macos-latest] 19 | fail-fast: false 20 | timeout-minutes: 15 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python 24 | uses: actions/setup-python@v3 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | pip install -r requirements-test.txt flit pygments 31 | - name: Lint with flake8 32 | run: | 33 | pip install flake8 34 | # stop the build if there are Python syntax errors or undefined names 35 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 36 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 37 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 38 | - name: Test with pytest 39 | run: | 40 | pip install pytest 41 | flit install --pth-file 42 | pytest --cov aiorun tests/ 43 | 44 | - name: Extract branch name 45 | shell: bash 46 | run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" 47 | id: extract_branch 48 | - name: Upload coverage 49 | env: 50 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 51 | COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} 52 | COVERALLS_PARALLEL: "true" 53 | if: matrix.os == 'ubuntu-latest' 54 | run: | 55 | coveralls --service=github 56 | 57 | coveralls: 58 | name: Indicate completion to coveralls.io 59 | needs: build 60 | runs-on: ubuntu-latest 61 | steps: 62 | - name: Finished 63 | env: 64 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 65 | run: | 66 | pip3 install --upgrade coveralls 67 | coveralls --service=github --finish 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | 2017.11.6 Due to https://bugs.python.org/issue23548, we kinda have to close 2 | the event loop, else we'll get exceptions during atexit. Therefore, 3 | reverting the change. Now, if caller supplies a loop, it is up 4 | to caller to close, but if no loop is supplied, we will close 5 | the default loop on exit. 6 | 7 | 2021.8.1 From feedback in https://github.com/cjrh/aiorun/issues/65, decided 8 | to change the max_workers parameter to None to allow the default 9 | in ThreadPoolExecutor to be used, rather than a constant of 10. 10 | 11 | 2022.4.1 @Pirulax pushed for removing `loop` from the `gather` call based 12 | on the deprecation warning, so there's a small refactor in 13 | https://github.com/cjrh/aiorun/pull/69 14 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | Cameron Parry (@wallies) 2 | Ashwini Balnaves (@ashwini-balnaves) 3 | Thomas Grainger (https://graingert.co.uk) 4 | Fedir Alifirenko (@FedirAlifirenko) 5 | @Pirulax 6 | Zach Broniszewski (@zbroniszewski) 7 | Arkadiy Illarionov (@qarkai) 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://github.com/cjrh/aiorun/workflows/Python%20application/badge.svg 2 | :target: https://github.com/cjrh/aiorun/actions 3 | 4 | .. image:: https://coveralls.io/repos/github/cjrh/aiorun/badge.svg?branch=master 5 | :target: https://coveralls.io/github/cjrh/aiorun?branch=master 6 | 7 | .. image:: https://img.shields.io/pypi/pyversions/aiorun.svg 8 | :target: https://pypi.python.org/pypi/aiorun 9 | 10 | .. image:: https://img.shields.io/github/tag/cjrh/aiorun.svg 11 | :target: https://img.shields.io/github/tag/cjrh/aiorun.svg 12 | 13 | .. image:: https://img.shields.io/badge/install-pip%20install%20aiorun-ff69b4.svg 14 | :target: https://img.shields.io/badge/install-pip%20install%20aiorun-ff69b4.svg 15 | 16 | .. image:: https://img.shields.io/pypi/v/aiorun.svg 17 | :target: https://pypi.org/project/aiorun/ 18 | 19 | .. image:: https://img.shields.io/badge/calver-YYYY.MM.MINOR-22bfda.svg 20 | :alt: This project uses calendar-based versioning scheme 21 | :target: http://calver.org/ 22 | 23 | .. image:: https://pepy.tech/badge/aiorun 24 | :alt: Downloads 25 | :target: https://pepy.tech/project/aiorun 26 | 27 | .. image:: https://img.shields.io/badge/code%20style-black-000000.svg 28 | :alt: This project uses the "black" style formatter for Python code 29 | :target: https://github.com/python/black 30 | 31 | 32 | .. contents:: Table of Contents 33 | 34 | 🏃 aiorun 35 | ====================== 36 | 37 | Here's the big idea (how you use it): 38 | 39 | .. code-block:: python 40 | 41 | import asyncio 42 | from aiorun import run 43 | 44 | async def main(): 45 | # Put your application code here 46 | await asyncio.sleep(1.0) 47 | 48 | if __name__ == '__main__': 49 | run(main()) 50 | 51 | This package provides a ``run()`` function as the starting point 52 | of your ``asyncio``-based application. The ``run()`` function will 53 | run forever. If you want to shut down when ``main()`` completes, just 54 | call ``loop.stop()`` inside it: that will initiate shutdown. 55 | 56 | .. warning:: 57 | 58 | Note that `aiorun.run(coro)` will run **forever**, unlike the standard 59 | library's ``asyncio.run()`` helper. You can call `aiorun.run()` 60 | without a coroutine parameter, and it will still run forever. 61 | 62 | This is surprising to many people, because they sometimes expect that 63 | unhandled exceptions should abort the program, with an exception and 64 | a traceback. If you want this behaviour, please see the section on 65 | *error handling* further down. 66 | 67 | .. warning:: 68 | 69 | Note that `aiorun.run(coro)` will create a **new event loop instance** 70 | every time it is invoked (same as `asyncio.run`). This might cause 71 | confusing errors if your code interacts with the default event loop 72 | instance provided by the stdlib `asyncio` library. For such situations 73 | you can provide the actual loop you're using with 74 | `aiorun.run(coro, loop=loop)`. There is more info about this further down. 75 | 76 | However, generally speaking, configuring your own loop and providing 77 | it in this way is a code smell. You will find it much easier to 78 | reason about your code if you do all your task creation *inside* 79 | an async context, such as within an `async def` function, because then 80 | there will no ambiguity about which event loop is in play: it will 81 | always be the one returned by `asyncio.get_running_loop()`. 82 | 83 | 84 | 🤔 Why? 85 | ---------------- 86 | 87 | The ``run()`` function will handle **everything** that normally needs 88 | to be done during the shutdown sequence of the application. All you 89 | need to do is write your coroutines and run them. 90 | 91 | So what the heck does ``run()`` do exactly?? It does these standard, 92 | idiomatic actions for asyncio apps: 93 | 94 | - creates a ``Task`` for the given coroutine (schedules it on the 95 | event loop), 96 | - calls ``loop.run_forever()``, 97 | - adds default (and smart) signal handlers for both ``SIGINT`` 98 | and ``SIGTERM`` that will stop the loop; 99 | - and *when* the loop stops (either by signal or called directly), then it will... 100 | - ...gather all outstanding tasks, 101 | - cancel them using ``task.cancel()``, 102 | - resume running the loop until all those tasks are done, 103 | - wait for the *executor* to complete shutdown, and 104 | - finally close the loop. 105 | 106 | All of this stuff is boilerplate that you will never have to write 107 | again. So, if you use ``aiorun`` this is what **you** need to remember: 108 | 109 | - Spawn all your work from a single, starting coroutine 110 | - When a shutdown signal is received, **all** currently-pending tasks 111 | will have ``CancelledError`` raised internally. It's up to you whether 112 | you want to handle this inside each coroutine with 113 | a ``try/except`` or not. 114 | - If you want to protect coros from cancellation, see `shutdown_waits_for()` 115 | further down. 116 | - Try to have executor jobs be shortish, since the shutdown process will wait 117 | for them to finish. If you need a long-running thread or process tasks, use 118 | a dedicated thread/subprocess and set ``daemon=True`` instead. 119 | 120 | There's not much else to know for general use. `aiorun` has a few special 121 | tools that you might need in unusual circumstances. These are discussed 122 | next. 123 | 124 | 🖥️ What about TCP server startup? 125 | ----------------------------------- 126 | 127 | You will see in many examples online that for servers, startup happens in 128 | several ``run_until_complete()`` phases before the primary ``run_forever()`` 129 | which is the "main" running part of the program. How do we handle that with 130 | *aiorun*? 131 | 132 | Let's recreate the `echo client & server `_ 133 | examples from the Standard Library documentation: 134 | 135 | **Client:** 136 | 137 | .. code-block:: python 138 | 139 | # echo_client.py 140 | import asyncio 141 | from aiorun import run 142 | 143 | async def tcp_echo_client(message): 144 | # Same as original! 145 | reader, writer = await asyncio.open_connection('127.0.0.1', 8888) 146 | print('Send: %r' % message) 147 | writer.write(message.encode()) 148 | data = await reader.read(100) 149 | print('Received: %r' % data.decode()) 150 | print('Close the socket') 151 | writer.close() 152 | asyncio.get_event_loop().stop() # Exit after one msg like original 153 | 154 | message = 'Hello World!' 155 | run(tcp_echo_client(message)) 156 | 157 | **Server:** 158 | 159 | .. code-block:: python 160 | 161 | import asyncio 162 | from aiorun import run 163 | 164 | async def handle_echo(reader, writer): 165 | # Same as original! 166 | data = await reader.read(100) 167 | message = data.decode() 168 | addr = writer.get_extra_info('peername') 169 | print("Received %r from %r" % (message, addr)) 170 | print("Send: %r" % message) 171 | writer.write(data) 172 | await writer.drain() 173 | print("Close the client socket") 174 | writer.close() 175 | 176 | async def main(): 177 | server = await asyncio.start_server(handle_echo, '127.0.0.1', 8888) 178 | print('Serving on {}'.format(server.sockets[0].getsockname())) 179 | async with server: 180 | await server.serve_forever() 181 | 182 | run(main()) 183 | 184 | It works the same as the original examples, except you see this 185 | when you hit ``CTRL-C`` on the server instance: 186 | 187 | .. code-block:: bash 188 | 189 | $ python echo_server.py 190 | Running forever. 191 | Serving on ('127.0.0.1', 8888) 192 | Received 'Hello World!' from ('127.0.0.1', 57198) 193 | Send: 'Hello World!' 194 | Close the client socket 195 | ^CStopping the loop 196 | Entering shutdown phase. 197 | Cancelling pending tasks. 198 | Cancelling task: 199 | Running pending tasks till complete 200 | Waiting for executor shutdown. 201 | Leaving. Bye! 202 | 203 | Task gathering, cancellation, and executor shutdown all happen 204 | automatically. 205 | 206 | 🐛 Error Handling 207 | ------------------ 208 | 209 | Unlike the standard library's ``asyncio.run()`` method, ``aiorun.run`` 210 | will run forever, and does not stop on unhandled exceptions. This is partly 211 | because we predate the standard library method, during the time in which 212 | ``run_forever()`` was actually the recommended API for servers, and partly 213 | because it can *make sense* for long-lived servers to be resilient to 214 | unhandled exceptions. For example, if 99% of your API works fine, but the 215 | one new endpoint you just added has a bug: do you really want that one new 216 | endpoint to crash-loop your deployed service? 217 | 218 | Nevertheless, not all usages of ``aiorun`` are long-lived servers, so some 219 | users would prefer that ``aiorun.run()`` crash on an unhandled exception, 220 | just like any normal Python program. For this, we have an extra parameter 221 | that enables it: 222 | 223 | .. code-block:: python 224 | 225 | # stop_demo.py 226 | from aiorun import run 227 | 228 | async def main(): 229 | raise Exception('ouch') 230 | 231 | if __name__ == '__main__': 232 | run(main(), stop_on_unhandled_errors=True) 233 | 234 | This produces the following output: 235 | 236 | .. code-block:: 237 | 238 | $ python stop_demo.py 239 | Unhandled exception; stopping loop. 240 | Traceback (most recent call last): 241 | File "/opt/project/examples/stop_unhandled.py", line 9, in 242 | run(main(), stop_on_unhandled_errors=True) 243 | File "/opt/project/aiorun.py", line 294, in run 244 | raise pending_exception_to_raise 245 | File "/opt/project/aiorun.py", line 206, in new_coro 246 | await coro 247 | File "/opt/project/examples/stop_unhandled.py", line 5, in main 248 | raise Exception("ouch") 249 | Exception: ouch 250 | 251 | Error handling scenarios can get very complex, and I suggest that you 252 | try to keep your error handling as simple as possible. Nevertheless, sometimes 253 | people have special needs that require some complexity, so let's look at a 254 | few scenarios where error-handling considerations can be more challenging. 255 | 256 | ``aiorun.run()`` can also be started without an initial coroutine, in which 257 | case any other created tasks still run as normal; in this case exceptions 258 | still abort the program if the parameter is supplied: 259 | 260 | .. code-block:: python 261 | 262 | import asyncio 263 | from aiorun import run 264 | 265 | 266 | async def job(): 267 | raise Exception("ouch") 268 | 269 | 270 | if __name__ == "__main__": 271 | loop = asyncio.new_event_loop() 272 | asyncio.set_event_loop(loop) 273 | loop.create_task(job()) 274 | 275 | run(loop=loop, stop_on_unhandled_errors=True) 276 | 277 | The output is the same as the previous program. In this second example, 278 | we made a our own loop instance and passed that to ``run()``. It is also possible 279 | to configure your exception handler on the loop, but if you do this the 280 | ``stop_on_unhandled_errors`` parameter is no longer allowed: 281 | 282 | .. code-block:: python 283 | 284 | import asyncio 285 | from aiorun import run 286 | 287 | 288 | async def job(): 289 | raise Exception("ouch") 290 | 291 | 292 | if __name__ == "__main__": 293 | loop = asyncio.new_event_loop() 294 | asyncio.set_event_loop(loop) 295 | loop.create_task(job()) 296 | loop.set_exception_handler(lambda loop, context: "Error") 297 | 298 | run(loop=loop, stop_on_unhandled_errors=True) 299 | 300 | But this is not allowed: 301 | 302 | .. code-block:: 303 | 304 | Traceback (most recent call last): 305 | File "/opt/project/examples/stop_unhandled_illegal.py", line 15, in 306 | run(loop=loop, stop_on_unhandled_errors=True) 307 | File "/opt/project/aiorun.py", line 171, in run 308 | raise Exception( 309 | Exception: If you provide a loop instance, and you've configured a 310 | custom exception handler on it, then the 'stop_on_unhandled_errors' 311 | parameter is unavailable (all exceptions will be handled). 312 | /usr/local/lib/python3.8/asyncio/base_events.py:633: 313 | RuntimeWarning: coroutine 'job' was never awaited 314 | 315 | Remember that the parameter ``stop_on_unhandled_errors`` is just a convenience. If you're 316 | going to go to the trouble of making your own loop instance anyway, you can 317 | stop the loop yourself inside your own exception handler just fine, and 318 | then you no longer need to set ``stop_on_unhandled_errors``: 319 | 320 | .. code-block:: python 321 | 322 | # custom_stop.py 323 | import asyncio 324 | from aiorun import run 325 | 326 | 327 | async def job(): 328 | raise Exception("ouch") 329 | 330 | 331 | async def other_job(): 332 | try: 333 | await asyncio.sleep(10) 334 | except asyncio.CancelledError: 335 | print("other_job was cancelled!") 336 | 337 | 338 | if __name__ == "__main__": 339 | loop = asyncio.new_event_loop() 340 | asyncio.set_event_loop(loop) 341 | loop.create_task(job()) 342 | loop.create_task(other_job()) 343 | 344 | def handler(loop, context): 345 | # https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.call_exception_handler 346 | print(f'Stopping loop due to error: {context["exception"]} ') 347 | loop.stop() 348 | 349 | loop.set_exception_handler(handler=handler) 350 | 351 | run(loop=loop) 352 | 353 | In this example, we schedule two jobs on the loop. One of them raises an 354 | exception, and you can see in the output that the other job was still 355 | cancelled during shutdown as expected (which is what you expect ``aiorun`` 356 | to do!): 357 | 358 | .. code-block:: 359 | 360 | $ python custom_stop.py 361 | Stopping loop due to error: ouch 362 | other_job was cancelled! 363 | 364 | Note however that in this situation the exception is being *handled* by 365 | your custom exception handler, and does not bubble up out of the ``run()`` 366 | like you saw in earlier examples. If you want to do something with that 367 | exception, like reraise it or something, you need to capture it inside your 368 | custom exception handler and then do something with it, like add it to a list 369 | that you check after ``run()`` completes, and then reraise there or something 370 | similar. 371 | 372 | 💨 Do you like `uvloop `_? 373 | ------------------------------------------------------------------ 374 | 375 | .. code-block:: python 376 | 377 | import asyncio 378 | from aiorun import run 379 | 380 | async def main(): 381 | 382 | 383 | if __name__ == '__main__': 384 | run(main(), use_uvloop=True) 385 | 386 | Note that you have to ``pip install uvloop`` yourself. 387 | 388 | 🛡️ Smart shield for shutdown 389 | --------------------------------- 390 | 391 | It's unusual, but sometimes you're going to want a coroutine to not get 392 | interrupted by cancellation *during the shutdown sequence*. You'll look in 393 | the official docs and find ``asyncio.shield()``. 394 | 395 | Unfortunately, ``shield()`` doesn't work in shutdown scenarios because 396 | the protection offered by ``shield()`` only applies if the specific coroutine 397 | *inside which* the ``shield()`` is used, gets cancelled directly. 398 | 399 | Let me explain: if you do a conventional shutdown sequence (like ``aiorun`` 400 | is doing internally), this is the sequence of steps: 401 | 402 | - ``tasks = all_tasks()``, followed by 403 | - ``[t.cancel() for t in tasks]``, and then 404 | - ``run_until_complete(gather(*tasks))`` 405 | 406 | The way ``shield()`` works internally is it creates a *secret, inner* 407 | task—which also gets included in the ``all_tasks()`` call above! Thus 408 | it also receives a cancellation exception just like everything else. 409 | 410 | Therefore, we have an alternative version of ``shield()`` that works better for 411 | us: ``shutdown_waits_for()``. If you've got a coroutine that must **not** be 412 | cancelled during the shutdown sequence, just wrap it in 413 | ``shutdown_waits_for()``! 414 | 415 | Here's an example: 416 | 417 | .. code-block:: python 418 | 419 | import asyncio 420 | from aiorun import run, shutdown_waits_for 421 | 422 | async def corofn(): 423 | for i in range(10): 424 | print(i) 425 | await asyncio.sleep(1) 426 | print('done!') 427 | 428 | async def main(): 429 | try: 430 | await shutdown_waits_for(corofn()) 431 | except asyncio.CancelledError: 432 | print('oh noes!') 433 | 434 | run(main()) 435 | 436 | If you hit ``CTRL-C`` *before* 10 seconds has passed, you will see 437 | ``oh noes!`` printed immediately, and then after 10 seconds (since start), 438 | ``done!`` is printed, and thereafter the program exits. 439 | 440 | Output: 441 | 442 | .. code-block:: shell 443 | 444 | $ python testshield.py 445 | 0 446 | 1 447 | 2 448 | 3 449 | 4 450 | ^CStopping the loop 451 | oh noes! 452 | 5 453 | 6 454 | 7 455 | 8 456 | 9 457 | done! 458 | 459 | Behind the scenes, ``all_tasks()`` would have been cancelled by ``CTRL-C``, 460 | *except* ones wrapped in ``shutdown_waits_for()`` calls. In this respect, it 461 | is loosely similar to ``asyncio.shield()``, but with special applicability 462 | to our shutdown scenario in ``aiorun()``. 463 | 464 | Be careful with this: the coroutine should still finish up at some point. 465 | The main use case for this is short-lived tasks that you don't want to 466 | write explicit cancellation handling. 467 | 468 | Oh, and you can use ``shutdown_waits_for()`` as if it were ``asyncio.shield()`` 469 | too. For that use-case it works the same. If you're using ``aiorun``, there 470 | is no reason to use ``shield()``. 471 | 472 | 🙏 Windows Support 473 | ------------------------- 474 | 475 | ``aiorun`` also supports Windows! Kinda. Sorta. The root problem with Windows, 476 | for a thing like ``aiorun`` is that Windows doesn't support *signal handling* 477 | the way Linux or Mac OS X does. Like, at all. 478 | 479 | For Linux, ``aiorun`` does "the right thing" out of the box for the 480 | ``SIGINT`` and ``SIGTERM`` signals; i.e., it will catch them and initiate 481 | a safe shutdown process as described earlier. However, on *Windows*, these 482 | signals don't work. 483 | 484 | There are two signals that work on Windows: the ``CTRL-C`` signal (happens 485 | when you press, unsurprisingly, ``CTRL-C``, and the ``CTRL-BREAK`` signal 486 | which happens when you...well, you get the picture. 487 | 488 | The good news is that, for ``aiorun``, both of these will work. Yay! The bad 489 | news is that for them to work, you have to run your code in a Console 490 | window. Boo! 491 | 492 | Fortunately, it turns out that you can run an asyncio-based process *not* 493 | attached to a Console window, e.g. as a service or a subprocess, *and* have 494 | it also receive a signal to safely shut down in a controlled way. It turns 495 | out that it is possible to send a ``CTRL-BREAK`` signal to another process, 496 | with no console window involved, but only as long as that process was created 497 | in a particular way and---here is the drop---this targetted process is a 498 | child process of the one sending the signal. Yeah, I know, it's a downer. 499 | 500 | There is an example of how to do this in the tests: 501 | 502 | .. code-block:: python3 503 | 504 | import subprocess as sp 505 | 506 | proc = sp.Popen( 507 | ['python', 'app.py'], 508 | stdout=sp.PIPE, 509 | stderr=sp.STDOUT, 510 | creationflags=sp.CREATE_NEW_PROCESS_GROUP 511 | ) 512 | print(proc.pid) 513 | 514 | Notice how we print out the process id (``pid``). Then you can send that 515 | process the signal from a completely different process, once you know 516 | the ``pid``: 517 | 518 | .. code-block:: python3 519 | 520 | import os, signal 521 | 522 | os.kill(pid, signal.CTRL_BREAK_EVENT) 523 | 524 | (Remember, ``os.kill()`` doesn't actually kill, it only sends a signal) 525 | 526 | ``aiorun`` supports this use-case above, although I'll be pretty surprised 527 | if anyone actually uses it to manage microservices (does anyone do this?) 528 | 529 | So to summarize: ``aiorun`` will do a controlled shutdown if either 530 | ``CTRL-C`` or ``CTRL-BREAK`` is entered via keyboard in a Console window 531 | with a running instance, or if the ``CTRL-BREAK`` signal is sent to 532 | a *subprocess* that was created with the ``CREATE_NEW_PROCESS_GROUP`` 533 | flag set. `Here `_ is a much more 534 | detailed explanation of these issues. 535 | 536 | Finally, ``uvloop`` is not yet supported on Windows so that won't work 537 | either. 538 | 539 | At the very least, ``aiorun`` will, well, *run* on Windows ¯\\_(ツ)_/¯ 540 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | - Optional entry point for "pre-run_forever" tasks, like TCP server start. 2 | - Might also need a hook for server shutdown, aka "wait_closed"? Hopefully 3 | this can just be done in application code. 4 | -------------------------------------------------------------------------------- /aiorun/__init__.py: -------------------------------------------------------------------------------- 1 | """Boilerplate for asyncio applications""" 2 | import asyncio 3 | import contextlib 4 | import inspect 5 | import logging 6 | import signal 7 | import sys 8 | from asyncio import AbstractEventLoop, CancelledError, all_tasks, get_event_loop 9 | from concurrent.futures import Executor, ThreadPoolExecutor 10 | from functools import partial 11 | from typing import Awaitable, Callable, Coroutine, Optional, Union, AbstractSet 12 | from weakref import WeakSet 13 | 14 | ShutdownCallback = Optional[ 15 | Union[ 16 | Awaitable, 17 | Callable[[AbstractEventLoop], Awaitable], 18 | Callable[[AbstractEventLoop], None], 19 | ] 20 | ] 21 | 22 | 23 | __all__ = ["run", "shutdown_waits_for"] 24 | __version__ = "2025.1.1" 25 | logger = logging.getLogger("aiorun") 26 | WINDOWS = sys.platform == "win32" 27 | 28 | 29 | # TODO: when 3.8 is dropped, replace `AbstractSet` with `WeakSet` 30 | _DO_NOT_CANCEL_COROS: AbstractSet[Coroutine] = WeakSet() 31 | 32 | 33 | def shutdown_waits_for(coro, loop=None): 34 | """Prevent coro from being cancelled during the shutdown sequence. 35 | 36 | The trick here is that we add this coro to the global 37 | "DO_NOT_CANCEL" collection, and then later during the shutdown 38 | sequence we make sure that the task that wraps this coro will NOT 39 | be cancelled. 40 | 41 | To make this work, we have to create a super-secret task, below, that 42 | communicates with the caller (which "awaits" us) via a Future. Using 43 | a Future in this way allows us to avoid awaiting the Task, which 44 | decouples the Task from the normal exception propagation which would 45 | normally happen when the outer Task gets cancelled. We get the 46 | result of coro back to the caller via Future.set_result. 47 | 48 | NOTE that during the shutdown sequence, the caller WILL NOT be able 49 | to receive a result, since the caller will likely have been 50 | cancelled. So you should probably not rely on capturing results 51 | via this function. 52 | """ 53 | loop = loop or get_event_loop() 54 | fut = loop.create_future() # This future will connect coro and the caller. 55 | 56 | async def coro_proxy(): 57 | """This function will await coro, but it will also send the result 58 | over to the future. Remember: the outside caller (of 59 | shutdown_waits_for) will be awaiting fut, NOT coro(), due to 60 | the decoupling. However, when coro completes, we need to send its 61 | result over to the fut to make it look *as if* it was just coro 62 | running the whole time. This whole thing is a teeny magic trick. 63 | """ 64 | try: 65 | result = await coro 66 | except (CancelledError, Exception) as e: 67 | set_fut_done = partial(fut.set_exception, e) 68 | else: 69 | set_fut_done = partial(fut.set_result, result) 70 | 71 | if not fut.cancelled(): 72 | set_fut_done() 73 | 74 | new_coro = coro_proxy() # We'll taskify this one instead of coro. 75 | _DO_NOT_CANCEL_COROS.add(new_coro) # The new task must not be cancelled. 76 | _background_task = loop.create_task(new_coro) # Make the task 77 | 78 | # Ok, so we *could* simply return fut. Callers can await it as normal, 79 | # e.g. 80 | # 81 | # async def blah(): 82 | # x = await shutdown_waits_for(bleh()) 83 | # 84 | # That will work fine. However, callers may *also* want to detach the 85 | # call from the current execution context, e.g. 86 | # 87 | # async def blah(): 88 | # loop.create_task(shutdown_waits_for(bleh())) 89 | # 90 | # This will only work if shutdown_waits_for() returns a coroutine. 91 | # Therefore, we just make a new coroutine to wrap the `await fut` and 92 | # return that. Then both things will work. 93 | # 94 | # (Side note: instead of callers using create_tasks, it would also work 95 | # if they used `asyncio.ensure_future()` instead, since that can work 96 | # with futures. But I don't like ensure_future.) 97 | # 98 | # (Another side note: You don't even need `create_task()` or 99 | # `ensure_future()`...If you don't want a result, you can just call 100 | # `shutdown_waits_for()` as a flat function call, no await or anything, 101 | # and it should still work; unfortunately it causes a RuntimeWarning to 102 | # tell you that ``inner()`` was never awaited :/ 103 | 104 | async def inner(): 105 | return await fut 106 | 107 | return inner() 108 | 109 | 110 | def run( 111 | coro: "Optional[Coroutine]" = None, 112 | *, 113 | loop: Optional[AbstractEventLoop] = None, 114 | shutdown_handler: Optional[Callable[[AbstractEventLoop], None]] = None, 115 | shutdown_callback: "ShutdownCallback" = None, 116 | executor_workers: Optional[int] = None, 117 | executor: Optional[Executor] = None, 118 | use_uvloop: bool = False, 119 | stop_on_unhandled_errors: bool = False, 120 | timeout_task_shutdown: float = 60 121 | ) -> None: 122 | """ 123 | Start up the event loop, and wait for a signal to shut down. 124 | 125 | :param coro: Optionally supply a coroutine. The loop will still 126 | run if missing. The loop will continue to run after the supplied 127 | coroutine finishes. The supplied coroutine is typically 128 | a "main" coroutine from which all other work is spawned. 129 | :param loop: Optionally supply your own loop. If missing, a new 130 | event loop instance will be created. 131 | :param shutdown_handler: By default, SIGINT and SIGTERM will be 132 | handled and will stop the loop, thereby invoking the shutdown 133 | sequence. Alternatively you can supply your own shutdown 134 | handler function. It should conform to the type spec as shown 135 | in the function signature. 136 | :param shutdown_callback: Callable, executed after loop is stopped, before 137 | cancelling any tasks. 138 | Useful for graceful shutdown. 139 | :param executor_workers: The number of workers in the executor. 140 | NOTE: ``run()`` creates a new executor instance internally, 141 | regardless of whether you supply your own loop. Note that this 142 | parameter will be ignored if you provide an executor parameter. 143 | :param executor: You can decide to use your own executor instance 144 | if you like. If you provide an executor instance, the 145 | executor_workers parameter will be ignored. 146 | :param use_uvloop: The loop policy will be set to use uvloop. It 147 | is your responsibility to install uvloop. If missing, an 148 | ``ImportError`` will be raised. 149 | :param stop_on_unhandled_errors: By default, the event loop will 150 | handle any exceptions that get raised and are not handled. This 151 | means that the event loop will continue running regardless of errors, 152 | and the only way to stop it is to call `loop.stop()`. However, if 153 | this flag is set, any unhandled exceptions will stop the loop, and 154 | be re-raised after the normal shutdown sequence is completed. 155 | :param timeout_task_shutdown: When shutdown is initiated, for example 156 | by a signal like SIGTERM, or even by an unhandled exception if 157 | ``stop_on_unhandled_errors`` is True, then the first action taken 158 | during shutdown is to cancel all currently pending or running tasks 159 | and then wait for them all to complete. This timeout sets an upper 160 | limit on how long to wait. 161 | """ 162 | _clear_signal_handlers() 163 | logger.debug("Entering run()") 164 | # Disable default signal handling ASAP 165 | 166 | if loop and use_uvloop: 167 | raise Exception( 168 | "'loop' and 'use_uvloop' parameters are mutually " 169 | "exclusive. (Just make your own uvloop and pass it in)." 170 | ) 171 | 172 | loop_was_supplied = bool(loop) 173 | 174 | # If we check `loop_was_supplied` here, mypy will forever complain 175 | # that `loop` might be None. So we have to check loop directly here 176 | # to silence these incorrect mypy complaints. Yay for typing. 177 | if not loop: 178 | if use_uvloop: 179 | import uvloop 180 | 181 | asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) 182 | else: 183 | asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy()) 184 | 185 | loop = asyncio.new_event_loop() 186 | asyncio.set_event_loop(loop) 187 | 188 | if loop and loop.get_exception_handler() and stop_on_unhandled_errors: 189 | raise Exception( 190 | "If you provide a loop instance, and you've configured a custom " 191 | "exception handler on it, then the 'stop_on_unhandled_errors' " 192 | "parameter is unavailable (all exceptions will be handled)." 193 | ) 194 | 195 | pending_exception_to_raise = None 196 | 197 | def custom_exception_handler(loop, context: dict): 198 | """See: https://docs.python.org/3/library/asyncio-eventloop.html#error-handling-api""" 199 | nonlocal pending_exception_to_raise 200 | pending_exception_to_raise = context.get("exception") 201 | logger.error( 202 | "Unhandled exception; stopping loop: %r", 203 | context.get("message"), 204 | exc_info=pending_exception_to_raise 205 | ) 206 | loop.stop() 207 | 208 | if stop_on_unhandled_errors: 209 | loop.set_exception_handler(custom_exception_handler) 210 | 211 | if coro: 212 | 213 | async def new_coro(): 214 | """During shutdown, run_until_complete() will exit 215 | if a CancelledError bubbles up from anything in the 216 | group. To counteract that, we'll try to handle 217 | any CancelledErrors that bubble up from the given 218 | coro. This isn't fool-proof: if the user doesn't 219 | provide a coro, and instead creates their own with 220 | loop.create_task, that task might bubble 221 | a CancelledError into the run_until_complete().""" 222 | with contextlib.suppress(asyncio.CancelledError): 223 | await coro 224 | 225 | loop.create_task(new_coro()) 226 | 227 | shutdown_handler = shutdown_handler or _shutdown_handler 228 | # Setting up signal handlers. The callback configured by the 229 | # underlying system (non-asyncio) API ``signal.signal`` is 230 | # pre-emptive, which means you can't safely do loop manipulation 231 | # with it; yet, aiorun provides an API that allows you to specify 232 | # a ``shutdown_handler`` that takes a loop parameter. This will be 233 | # used to manipulate the loop. How to bridge these two worlds? 234 | # Here we use a private, internal wrapper function that can be 235 | # called from ``signal.signal`` (i.e. pre-emptive interruption) 236 | # but which will call our actual, non-pre-emptive shutdown handler 237 | # in a safe way. 238 | # 239 | # This is supposed to be what loop.add_signal_handler does, but I 240 | # cannot seem get it to work robustly. 241 | sighandler = partial(_signal_wrapper, loop=loop, actual_handler=shutdown_handler) 242 | _set_signal_handlers(sighandler) 243 | 244 | if WINDOWS: # pragma: no cover 245 | # This is to allow CTRL-C to be detected in a timely fashion, 246 | # see: https://bugs.python.org/issue23057#msg246316 247 | loop.create_task(windows_support_wakeup()) 248 | 249 | # TODO: We probably don't want to create a different executor if the 250 | # TODO: loop was supplied. (User might have put stuff on that loop's 251 | # TODO: executor). 252 | if not executor: 253 | logger.debug("Creating default executor") 254 | executor = ThreadPoolExecutor(max_workers=executor_workers) 255 | loop.set_default_executor(executor) 256 | try: 257 | loop.run_forever() 258 | except KeyboardInterrupt: # pragma: no cover 259 | logger.info("Got KeyboardInterrupt") 260 | if WINDOWS: 261 | # Windows doesn't do any POSIX signal handling, and no 262 | # abstraction layer for signals is currently implemented in 263 | # asyncio. So we fall back to KeyboardInterrupt (triggered 264 | # by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT 265 | shutdown_handler(loop) 266 | 267 | logger.info("Entering shutdown phase.") 268 | if shutdown_callback is not None: 269 | logger.info("Executing provided shutdown_callback.") 270 | try: 271 | if inspect.iscoroutine(shutdown_callback): 272 | loop.run_until_complete(shutdown_callback) 273 | elif inspect.iscoroutinefunction(shutdown_callback): 274 | loop.run_until_complete(shutdown_callback(loop)) 275 | elif callable(shutdown_callback): 276 | shutdown_callback(loop) 277 | else: # pragma: no cover 278 | raise TypeError( 279 | "The provided shutdown_callback must be either a function," 280 | "an awaitable, or a coroutine object, but it was " 281 | + str(type(shutdown_callback)) 282 | ) 283 | except BaseException as exc: 284 | if pending_exception_to_raise: 285 | logger.exception( 286 | "The shutdown_callback() raised an error, but there is " 287 | "already a different exception raised from the loop, so " 288 | "this log message is all you're going to see about it." 289 | ) 290 | else: 291 | pending_exception_to_raise = exc 292 | 293 | tasks = all_tasks(loop=loop) 294 | 295 | if tasks: 296 | logger.info("Cancelling pending tasks.") 297 | for t in tasks: 298 | # TODO: we don't need access to the coro. We could simply 299 | # TODO: store the task itself in the weakset. 300 | if t._coro not in _DO_NOT_CANCEL_COROS: # type: ignore 301 | t.cancel() 302 | 303 | async def wait_for_cancelled_tasks(timeout): 304 | """ Wait for the cancelled tasks to finish up. They have received 305 | CancelledError and must exit. However, it is possible that some 306 | badly-behaved tasks catch CancelledError (or BaseException) and 307 | then do not exit as they're supposed to. Thus, we only wait for 308 | ``timeout`` and then return anyway. 309 | 310 | To make it a bit easier to figure out when this is happening and 311 | why, there is is a log message (WARNING level) that will show 312 | the stack frames of all tasks that are still alive at the timeout. 313 | This can be used to troubleshoot why those tasks did not exit. 314 | 315 | Here is a sample of what the logs might look like (taken from one 316 | of the tests.) 317 | 318 | .. code-block:: 319 | 320 | 321 | INFO:aiorun:Entering shutdown phase. 322 | INFO:aiorun:Cancelling pending tasks. 323 | DEBUG:aiorun:Cancelling task: \ 324 | .naughty_task() \ 325 | running at /home/caleb/Documents/repos/aiorun/tests/test_stop_on_errors.py:75> \ 326 | wait_for=> 327 | INFO:aiorun:Running pending tasks till complete 328 | WARNING:aiorun:During shutdown, the following tasks were cancelled but refused to \ 329 | exit after 2.0 seconds: [] 332 | INFO:aiorun:Waiting for executor shutdown. 333 | INFO:aiorun:Shutting down async generators 334 | INFO:aiorun:Closing the loop. 335 | INFO:aiorun:Leaving. Bye! 336 | INFO:aiorun:Reraising unhandled exception 337 | 338 | 339 | """ 340 | _, pending = await asyncio.wait([*tasks], timeout=timeout) 341 | if pending: 342 | tasks_info = '\n\n'.join(str(t.get_stack()) for t in pending) 343 | msg = ( 344 | "During shutdown, the following tasks refused " 345 | "to exit after {timeout} seconds: {tasks_info}".format( 346 | timeout=timeout, 347 | tasks_info=tasks_info 348 | ) 349 | ) 350 | logger.warning(msg) 351 | 352 | if tasks: 353 | logger.info("Running pending tasks till complete") 354 | # TODO: obtain all the results, and log any results that are exceptions 355 | # other than CancelledError. Will be useful for troubleshooting. 356 | loop.run_until_complete( 357 | wait_for_cancelled_tasks( 358 | timeout=timeout_task_shutdown, 359 | ), 360 | ) 361 | 362 | logger.info("Waiting for executor shutdown.") 363 | executor.shutdown(wait=True) 364 | # If loop was supplied, it's up to the caller to close! 365 | if not loop_was_supplied: 366 | logger.info("Shutting down async generators") 367 | loop.run_until_complete(loop.shutdown_asyncgens()) 368 | logger.info("Closing the loop.") 369 | loop.close() 370 | logger.info("Leaving. Bye!") 371 | 372 | if pending_exception_to_raise: 373 | logger.info("Reraising unhandled exception") 374 | raise pending_exception_to_raise 375 | 376 | 377 | async def windows_support_wakeup(): # pragma: no cover 378 | """See https://stackoverflow.com/a/36925722 """ 379 | while True: 380 | await asyncio.sleep(0.1) 381 | 382 | 383 | def _signal_wrapper(sig, frame, loop: asyncio.AbstractEventLoop, actual_handler): 384 | """This private function does nothing other than call the actual signal 385 | handler function in a way that is safe for asyncio. This function is 386 | called as the raw signal handler which means it is called pre-emptively, 387 | that's why we used ``call_soon_threadsafe`` below. The actual signal 388 | handler can interact with the loop in a safe way.""" 389 | # Disable the handlers so they won't be called again. 390 | _clear_signal_handlers() 391 | loop.call_soon_threadsafe(actual_handler, loop) 392 | 393 | 394 | def _shutdown_handler(loop): 395 | logger.debug("Entering shutdown handler") 396 | loop = loop or get_event_loop() 397 | 398 | logger.warning("Stopping the loop") 399 | loop.stop() 400 | 401 | 402 | def _set_signal_handlers(threadsafe_func): 403 | if WINDOWS: # pragma: no cover 404 | signal.signal(signal.SIGBREAK, threadsafe_func) 405 | signal.signal(signal.SIGINT, threadsafe_func) 406 | else: 407 | signal.signal(signal.SIGTERM, threadsafe_func) 408 | signal.signal(signal.SIGINT, threadsafe_func) 409 | 410 | 411 | def _clear_signal_handlers(): 412 | if WINDOWS: # pragma: no cover 413 | # These calls to signal.signal can only be called from the main 414 | # thread. 415 | signal.signal(signal.SIGBREAK, signal.SIG_IGN) 416 | signal.signal(signal.SIGINT, signal.SIG_IGN) 417 | else: 418 | signal.signal(signal.SIGINT, signal.SIG_IGN) 419 | signal.signal(signal.SIGTERM, signal.SIG_IGN) 420 | -------------------------------------------------------------------------------- /aiorun/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cjrh/aiorun/fd1702346bc2881c6895a0d2dfc3a37660b09421/aiorun/py.typed -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | services: 3 | test37: 4 | build: 5 | context: . 6 | dockerfile: test.dockerfile 7 | test38: 8 | build: 9 | context: . 10 | dockerfile: test38.dockerfile 11 | -------------------------------------------------------------------------------- /examples/echo_client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiorun import run 3 | 4 | 5 | async def tcp_echo_client(message): 6 | reader, writer = await asyncio.open_connection( 7 | '127.0.0.1', 8888) 8 | 9 | print('Send: %r' % message) 10 | writer.write(message.encode()) 11 | 12 | data = await reader.read(100) 13 | print('Received: %r' % data.decode()) 14 | 15 | print('Close the socket') 16 | writer.close() 17 | asyncio.get_event_loop().stop() 18 | 19 | message = 'Hello World!' 20 | run(tcp_echo_client(message)) 21 | -------------------------------------------------------------------------------- /examples/echo_server.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiorun import run 3 | 4 | 5 | async def handle_echo(reader, writer): 6 | data = await reader.read(100) 7 | message = data.decode() 8 | addr = writer.get_extra_info('peername') 9 | print("Received %r from %r" % (message, addr)) 10 | 11 | print("Send: %r" % message) 12 | writer.write(data) 13 | await writer.drain() 14 | 15 | print("Close the client socket") 16 | writer.close() 17 | 18 | 19 | async def main(): 20 | server = await asyncio.start_server(handle_echo, '127.0.0.1', 8888) 21 | print('Serving on {}'.format(server.sockets[0].getsockname())) 22 | try: 23 | while True: 24 | await asyncio.sleep(10) 25 | except asyncio.CancelledError: 26 | server.close() 27 | await server.wait_closed() 28 | 29 | run(main()) 30 | -------------------------------------------------------------------------------- /examples/stop_unhandled.py: -------------------------------------------------------------------------------- 1 | from aiorun import run 2 | 3 | 4 | async def main(): 5 | raise Exception("ouch") 6 | 7 | 8 | if __name__ == "__main__": 9 | run(main(), stop_on_unhandled_errors=True) 10 | -------------------------------------------------------------------------------- /examples/stop_unhandled_custom.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiorun import run 3 | 4 | 5 | async def job(): 6 | raise Exception("ouch") 7 | 8 | 9 | async def other_job(): 10 | try: 11 | await asyncio.sleep(10) 12 | except asyncio.CancelledError: 13 | print("other_job was cancelled!") 14 | 15 | 16 | if __name__ == "__main__": 17 | loop = asyncio.new_event_loop() 18 | asyncio.set_event_loop(loop) 19 | loop.create_task(job()) 20 | loop.create_task(other_job()) 21 | 22 | def handler(loop, context): 23 | # https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.call_exception_handler 24 | print(f'Stopping loop due to error: {context["exception"]} ') 25 | loop.stop() 26 | 27 | loop.set_exception_handler(handler=handler) 28 | 29 | run(loop=loop) 30 | -------------------------------------------------------------------------------- /examples/stop_unhandled_illegal.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiorun import run 3 | 4 | 5 | async def job(): 6 | raise Exception("ouch") 7 | 8 | 9 | if __name__ == "__main__": 10 | loop = asyncio.new_event_loop() 11 | asyncio.set_event_loop(loop) 12 | loop.create_task(job()) 13 | loop.set_exception_handler(lambda loop, context: "Error") 14 | 15 | run(loop=loop, stop_on_unhandled_errors=True) 16 | -------------------------------------------------------------------------------- /examples/stop_unhandled_task.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiorun import run 3 | 4 | 5 | async def job(): 6 | raise Exception("ouch") 7 | 8 | 9 | if __name__ == "__main__": 10 | loop = asyncio.new_event_loop() 11 | asyncio.set_event_loop(loop) 12 | loop.create_task(job()) 13 | 14 | run(loop=loop, stop_on_unhandled_errors=True) 15 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit_core >=2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [tool.flit.metadata] 6 | module = "aiorun" 7 | author = "Caleb Hattingh" 8 | author-email = "caleb.hattingh@gmail.com" 9 | home-page = "https://github.com/cjrh/aiorun" 10 | classifiers = [ 11 | "License :: OSI Approved :: Apache Software License", 12 | "Natural Language :: English", 13 | "Programming Language :: Python", 14 | "Programming Language :: Python :: 3.8", 15 | "Programming Language :: Python :: 3.9", 16 | "Programming Language :: Python :: 3.10", 17 | "Programming Language :: Python :: 3.11", 18 | "Programming Language :: Python :: 3.12", 19 | "Operating System :: OS Independent" 20 | ] 21 | description-file = "README.rst" 22 | requires-python = ">=3.7" 23 | 24 | [tool.flit.metadata.requires-extra] 25 | dev = [ 26 | "pytest", 27 | "pytest-cov" 28 | ] 29 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | # Added to let vim ALE figure out that this is the project root :( 2 | [pytest] 3 | testpaths = aiorun.py tests 4 | norecursedirs = 5 | .git 6 | htmlcov 7 | contribs 8 | contrib 9 | *.egg-info 10 | .cache 11 | *_cache 12 | settings 13 | venv 14 | xfail_strict = true 15 | addopts = 16 | --continue-on-collection-errors 17 | --durations=10 18 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | typing; python_version < '3.5' 2 | pytest 3 | pytest-cov 4 | coveralls 5 | uvloop; sys_platform != 'win32' 6 | flit 7 | -------------------------------------------------------------------------------- /test.dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7-stretch 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | vim \ 5 | less 6 | 7 | COPY ./requirements-test.txt /requirements-test.txt 8 | RUN python -m pip install \ 9 | -r requirements-test.txt \ 10 | flit \ 11 | pygments 12 | COPY . /opt/project 13 | WORKDIR /opt/project 14 | RUN FLIT_ROOT_INSTALL=1 flit install --pth-file 15 | 16 | CMD ["/bin/bash"] 17 | -------------------------------------------------------------------------------- /test38.dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.3-slim 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | vim \ 5 | less 6 | 7 | COPY ./requirements-test.txt /requirements-test.txt 8 | RUN python -m pip install \ 9 | -r requirements-test.txt \ 10 | flit \ 11 | pygments 12 | COPY . /opt/project 13 | WORKDIR /opt/project 14 | RUN FLIT_ROOT_INSTALL=1 flit install --pth-file 15 | 16 | CMD ["/bin/bash"] 17 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | import sys 4 | import logging 5 | import multiprocessing 6 | 7 | multiprocessing.set_start_method("spawn") 8 | 9 | 10 | # The windows tests have to use subprocesses, and it makes it easier 11 | # if all tests are forced to have the package root as the CWD. 12 | os.chdir(str(Path(__file__).parent.parent)) 13 | logging.basicConfig(level="DEBUG", stream=sys.stdout) 14 | -------------------------------------------------------------------------------- /tests/fake_main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import asyncio 3 | from aiorun import run 4 | 5 | 6 | logging.basicConfig(level="DEBUG") 7 | 8 | 9 | async def main(): 10 | logging.info("Sleeping in main") 11 | try: 12 | await asyncio.sleep(50) 13 | finally: 14 | logging.info("Leaving main") 15 | 16 | 17 | run(main()) 18 | logging.critical("Leaving fake main") 19 | -------------------------------------------------------------------------------- /tests/test_posix.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import asyncio 3 | import os 4 | import time 5 | from signal import SIGINT, SIGTERM 6 | from concurrent.futures import ThreadPoolExecutor 7 | from typing import List 8 | from enum import Enum 9 | 10 | 11 | from aiorun import run, shutdown_waits_for, _DO_NOT_CANCEL_COROS, ShutdownCallback 12 | import pytest 13 | import multiprocessing as mp 14 | from contextlib import contextmanager 15 | 16 | 17 | # asyncio.Task.all_tasks is deprecated in favour of asyncio.all_tasks in Py3.7 18 | try: 19 | from asyncio import all_tasks 20 | except ImportError: # pragma: no cover 21 | all_tasks = asyncio.Task.all_tasks 22 | 23 | if sys.platform == "win32": # pragma: no cover 24 | pytest.skip("Windows doesn't use POSIX signals", allow_module_level=True) 25 | 26 | 27 | def newloop(): 28 | loop = asyncio.new_event_loop() 29 | asyncio.set_event_loop(loop) 30 | return loop 31 | 32 | 33 | @pytest.fixture() 34 | def mpproc(): 35 | @contextmanager 36 | def run_proc(target, expected_exit_code=0, allowed_shutdown_delay=5.0, **kwargs): 37 | q = mp.JoinableQueue() 38 | p = mp.Process(target=target, args=(q,), kwargs=kwargs) 39 | p.start() 40 | try: 41 | yield p, q 42 | finally: 43 | # Wait a bit for the subprocess to finish. 44 | p.join(allowed_shutdown_delay) 45 | # The subprocess should never be alive here. If it is, kill 46 | # it and raise. 47 | if p.is_alive(): 48 | p.terminate() 49 | raise ValueError("Process was alive but should not be.") 50 | 51 | if p.exitcode != expected_exit_code: 52 | raise ValueError("Process exitcode was %s" % p.exitcode) 53 | 54 | if sys.version_info >= (3, 7): 55 | p.close() 56 | 57 | return run_proc 58 | 59 | 60 | def main(q: mp.Queue, **kwargs): 61 | async def main(): 62 | q.put("ok") 63 | await asyncio.sleep(5.0) 64 | 65 | if kwargs.pop("use_exe", None): 66 | exe = ThreadPoolExecutor() 67 | else: 68 | exe = None 69 | 70 | loop = None 71 | if kwargs.pop("user_supplied_loop", None) and not kwargs.get("use_uvloop"): 72 | loop = newloop() 73 | 74 | try: 75 | run(main(), executor=exe, loop=loop, **kwargs) 76 | finally: 77 | q.put(None) 78 | q.join() 79 | 80 | 81 | @pytest.mark.parametrize("user_supplied_loop", [False, True]) 82 | @pytest.mark.parametrize("use_exe", [False, True]) 83 | @pytest.mark.parametrize("use_uvloop", [False, True]) 84 | @pytest.mark.parametrize("signal", [SIGTERM, SIGINT]) 85 | def test_sigterm_mp(mpproc, signal, use_uvloop, use_exe, user_supplied_loop): 86 | """Basic SIGTERM""" 87 | 88 | with mpproc( 89 | target=main, 90 | use_uvloop=use_uvloop, 91 | use_exe=use_exe, 92 | user_supplied_loop=user_supplied_loop, 93 | ) as ( 94 | p, 95 | q, 96 | ): 97 | ok = q.get() 98 | q.task_done() 99 | assert ok == "ok" 100 | os.kill(p.pid, signal) 101 | items = drain_queue(q) 102 | assert not items 103 | 104 | 105 | def main_no_coro(q: mp.Queue): 106 | run() 107 | q.put(None) 108 | q.join() 109 | 110 | 111 | def test_no_coroutine(mpproc): 112 | """Signal should still work without a main coroutine""" 113 | with mpproc(target=main_no_coro) as (p, q): 114 | # TODO: with multiprocessing set_start_method=spawn, lowering this 115 | # detail causes hanging. Still unclear why. 116 | time.sleep(0.8) 117 | os.kill(p.pid, SIGTERM) 118 | assert drain_queue(q) == [] 119 | 120 | 121 | def main_sig_pause(q: mp.Queue): 122 | async def main(): 123 | try: 124 | q.put_nowait("ok") 125 | await asyncio.sleep(5.0) 126 | except asyncio.CancelledError: 127 | print("in cancellation handler") 128 | await asyncio.sleep(0.1) 129 | 130 | run(main()) 131 | q.put("done") 132 | q.put(None) 133 | q.join() 134 | 135 | 136 | def drain_queue(q: mp.JoinableQueue) -> List: 137 | """Keeps taking items until we get a `None`.""" 138 | items = [] 139 | item = q.get() 140 | q.task_done() 141 | while item is not None: 142 | items.append(item) 143 | item = q.get() 144 | q.task_done() 145 | 146 | return items 147 | 148 | 149 | @pytest.mark.parametrize("pre_sig_delay", [0, 0.001, 0.05]) 150 | @pytest.mark.parametrize("post_sig_delay", [0, 0.001, 0.05]) 151 | @pytest.mark.parametrize("signal", [SIGTERM, SIGINT]) 152 | def test_sig_pause_mp(mpproc, signal, pre_sig_delay, post_sig_delay): 153 | with mpproc(target=main_sig_pause) as (p, q): 154 | 155 | # async main function is ready 156 | ok = q.get() 157 | q.task_done() 158 | assert ok == "ok" 159 | 160 | # Send the first signal 161 | time.sleep(pre_sig_delay) 162 | os.kill(p.pid, signal) 163 | 164 | # Wait a bit then send more signals - these should be ignored 165 | time.sleep(post_sig_delay) 166 | os.kill(p.pid, signal) 167 | os.kill(p.pid, signal) 168 | os.kill(p.pid, signal) 169 | 170 | # Collect items sent back on the q until we get `None` 171 | items = drain_queue(q) 172 | assert items == ["done"] 173 | 174 | 175 | def main_sigterm_enduring_create_task(q: mp.Queue, spawn_method): 176 | async def corofn(): 177 | q.put_nowait("ok") 178 | await asyncio.sleep(0.05) 179 | q.put_nowait(True) 180 | 181 | async def main(): 182 | loop = asyncio.get_event_loop() 183 | if spawn_method == "create_task": 184 | loop.create_task(shutdown_waits_for(corofn())) 185 | elif spawn_method == "ensure_future": 186 | asyncio.ensure_future(shutdown_waits_for(corofn())) 187 | elif spawn_method == "await": 188 | await shutdown_waits_for(corofn()) 189 | elif spawn_method == "bare": 190 | shutdown_waits_for(corofn()) 191 | 192 | run(main()) 193 | q.put(None) 194 | q.join() 195 | 196 | 197 | @pytest.mark.parametrize( 198 | "spawn_method", ["create_task", "ensure_future", "await", "bare"] 199 | ) 200 | @pytest.mark.parametrize("signal", [SIGTERM, SIGINT]) 201 | def test_sigterm_enduring_create_task(mpproc, signal, spawn_method): 202 | """Calling `shutdown_waits_for()` via `create_task()`""" 203 | with mpproc( 204 | target=main_sigterm_enduring_create_task, spawn_method=spawn_method 205 | ) as (p, q): 206 | # async main function is ready 207 | ok = q.get() 208 | q.task_done() 209 | assert ok == "ok" 210 | 211 | # signal and collect 212 | time.sleep(0.01) 213 | os.kill(p.pid, signal) 214 | items = drain_queue(q) 215 | assert items == [True] 216 | 217 | 218 | def main_sigterm_enduring_indirect_cancel(q: mp.Queue): 219 | async def corofn(): 220 | q.put_nowait("ok") 221 | await asyncio.sleep(0.2) 222 | q.put_nowait(True) 223 | 224 | def direct_cancel(): 225 | """Reach inside, find the one task that is marked "do not cancel" 226 | for shutdown, and then cancel it directly. This should raise 227 | CancelledError at the location of the caller for 228 | `shutdown_waits_for()`.""" 229 | tasks = all_tasks() 230 | for t in tasks: # pragma: no cover 231 | if t._coro in _DO_NOT_CANCEL_COROS: 232 | t.cancel() 233 | return 234 | 235 | async def main(): 236 | loop = asyncio.get_event_loop() 237 | coro = corofn() 238 | loop.call_later(0.1, direct_cancel) 239 | try: 240 | await shutdown_waits_for(coro) 241 | except asyncio.CancelledError: 242 | q.put_nowait("got cancellation as expected") 243 | else: 244 | q.put_nowait("no cancellation raised") 245 | 246 | run(main()) 247 | q.put(None) 248 | q.join() 249 | 250 | 251 | def test_sigterm_enduring_indirect_cancel(mpproc): 252 | with mpproc(target=main_sigterm_enduring_indirect_cancel) as (p, q): 253 | # async main function is ready 254 | ok = q.get() 255 | q.task_done() 256 | assert ok == "ok" # Means the coro inside shutdown_waits_for is up. 257 | 258 | # signal and collect 259 | time.sleep(0.01) 260 | os.kill(p.pid, SIGTERM) 261 | items = drain_queue(q) 262 | assert items == ["got cancellation as expected"] 263 | 264 | 265 | class CallbackType(Enum): 266 | FUNCTION = 1 267 | ASYNC_DEF_FUNCTION = 2 268 | COROUTINE_OBJECT = 3 269 | 270 | 271 | def make_shutdown_callback(cbtype: CallbackType, fut: asyncio.Future) -> ShutdownCallback: 272 | if cbtype is CallbackType.FUNCTION: 273 | def shutdown_callback(loop): 274 | print('shutdown callback called!') 275 | fut.set_result(None) 276 | 277 | return shutdown_callback 278 | 279 | elif cbtype is CallbackType.ASYNC_DEF_FUNCTION: 280 | async def shutdown_callback(loop): 281 | fut.set_result(None) 282 | 283 | return shutdown_callback 284 | 285 | elif cbtype is CallbackType.COROUTINE_OBJECT: 286 | async def shutdown_callback(loop): 287 | fut.set_result(None) 288 | 289 | return shutdown_callback(None) 290 | 291 | else: 292 | raise TypeError('Unexpected callback type.') 293 | 294 | 295 | def main_shutdown_callback(q: mp.Queue, *, cbtype: "ShutdownCallback"): 296 | async def main(): 297 | # Inform the test caller that the main coro is ready 298 | q.put_nowait("ok") 299 | await asyncio.sleep(10.0) 300 | # Inform the test caller that the fut was unblocked successfully. 301 | q.put_nowait(True) 302 | 303 | if cbtype is CallbackType.FUNCTION: 304 | def shutdown_callback(loop): 305 | q.put_nowait(True) 306 | elif cbtype is CallbackType.ASYNC_DEF_FUNCTION: 307 | async def shutdown_callback(loop): 308 | q.put_nowait(True) 309 | elif cbtype is CallbackType.COROUTINE_OBJECT: 310 | async def shutdown_callback_fn(loop): 311 | q.put_nowait(True) 312 | 313 | shutdown_callback = shutdown_callback_fn(None) 314 | else: 315 | raise TypeError('Unexpected cbtype') 316 | 317 | run(main(), shutdown_callback=shutdown_callback) 318 | q.put(None) 319 | q.join() 320 | 321 | 322 | @pytest.mark.parametrize('cbtype', [ 323 | CallbackType.FUNCTION, 324 | CallbackType.ASYNC_DEF_FUNCTION, 325 | CallbackType.COROUTINE_OBJECT, 326 | ]) 327 | def test_shutdown_callback(mpproc, cbtype): 328 | with mpproc(target=main_shutdown_callback, cbtype=cbtype) as (p, q): 329 | 330 | # async main function is ready 331 | ok = q.get() 332 | q.task_done() 333 | assert ok == "ok" 334 | 335 | # signal and collect 336 | time.sleep(0.3) 337 | os.kill(p.pid, SIGTERM) 338 | items = drain_queue(q) 339 | assert items == [True] 340 | 341 | 342 | def main_shutdown_callback_error(q: mp.Queue): 343 | async def main(): 344 | await asyncio.sleep(1e-3) 345 | 346 | def shutdown_callback(loop): 347 | raise Exception("blah") 348 | 349 | try: 350 | run(main(), shutdown_callback=shutdown_callback) 351 | except Exception as e: 352 | q.put_nowait(e) 353 | else: 354 | q.put_nowait("exception was not raised") 355 | finally: 356 | q.put_nowait(None) 357 | q.join() 358 | 359 | 360 | def test_shutdown_callback_error(mpproc): 361 | with mpproc(target=main_shutdown_callback_error) as (p, q): 362 | # TODO: with multiprocessing set_start_method=spawn, lowering this 363 | # detail causes hanging. Still unclear why. 364 | time.sleep(0.8) 365 | os.kill(p.pid, SIGTERM) 366 | items = drain_queue(q) 367 | assert isinstance(items[0], Exception) 368 | assert str(items[0]) == "blah" 369 | 370 | 371 | def main_shutdown_callback_error_and_main_error(q: mp.Queue): 372 | 373 | import logging 374 | 375 | log_messages = [] 376 | 377 | def filt(record): 378 | log_messages.append(record.getMessage()) 379 | return record 380 | 381 | logging.getLogger("aiorun").addFilter(filt) 382 | 383 | async def main(): 384 | await asyncio.sleep(1e-3) 385 | raise Exception("main") 386 | 387 | def shutdown_callback(loop): 388 | raise Exception("blah") 389 | 390 | try: 391 | run(main(), stop_on_unhandled_errors=True, shutdown_callback=shutdown_callback) 392 | except Exception as e: 393 | q.put_nowait(e) 394 | else: 395 | q.put_nowait("exception was not raised") 396 | finally: 397 | q.put_nowait(log_messages) 398 | q.put_nowait(None) 399 | q.join() 400 | 401 | 402 | def test_shutdown_callback_error_and_main_error(mpproc): 403 | with mpproc( 404 | target=main_shutdown_callback_error_and_main_error, expected_exit_code=1 405 | ) as (p, q): 406 | # TODO: with multiprocessing set_start_method=spawn, lowering this 407 | # detail causes hanging. Still unclear why. 408 | time.sleep(0.8) 409 | os.kill(p.pid, SIGTERM) 410 | items = drain_queue(q) 411 | assert isinstance(items[0], Exception) 412 | assert str(items[0]) == "main" 413 | 414 | records = items[1] 415 | print(records) 416 | assert any("shutdown_callback() raised an error" in r for r in records) 417 | 418 | 419 | def test_mutex_loop_and_use_uvloop(): 420 | async def main(): 421 | pass 422 | 423 | with pytest.raises(Exception, match="mutually exclusive"): 424 | run(main(), loop=newloop(), use_uvloop=True) 425 | 426 | 427 | def test_mutex_exc_handler_and_stop_unhandled(): 428 | async def main(): 429 | pass 430 | 431 | loop = newloop() 432 | loop.set_exception_handler(lambda loop, ctx: None) 433 | 434 | with pytest.raises(Exception, match="parameter is unavailable"): 435 | run(main(), loop=loop, stop_on_unhandled_errors=True) 436 | -------------------------------------------------------------------------------- /tests/test_stop_on_errors.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from aiorun import run 6 | 7 | 8 | def test_exc_stop(): 9 | """Basic SIGTERM""" 10 | 11 | created_tasks = [] 12 | 13 | async def background_task(): 14 | await asyncio.sleep(10) 15 | 16 | async def main(): 17 | # loop = asyncio.get_running_loop() 18 | loop = asyncio.get_event_loop() 19 | created_tasks.extend(loop.create_task(background_task()) for i in range(10)) 20 | await asyncio.sleep(0.01) 21 | raise Exception("Stops the loop") 22 | 23 | with pytest.raises(Exception) as excinfo: 24 | run(main(), stop_on_unhandled_errors=True) 25 | 26 | print(excinfo.traceback) 27 | assert "Stops the loop" in str(excinfo.value) 28 | assert all(t.cancelled for t in created_tasks) 29 | 30 | 31 | class _TestException(Exception): 32 | pass 33 | 34 | 35 | @pytest.mark.parametrize( 36 | "context, raised_exc", 37 | [ 38 | ({"message": "test error"}, None), 39 | ( 40 | {"message": "test error", "exception": _TestException("test error")}, 41 | _TestException, 42 | ), 43 | ], 44 | ) 45 | def test_call_exception_handler(context, raised_exc): 46 | """Test when loop's exception handler was called with custom context""" 47 | created_tasks = [] 48 | 49 | async def background_task(): 50 | await asyncio.sleep(2) 51 | 52 | async def main(): 53 | loop = asyncio.get_event_loop() 54 | created_tasks.extend(loop.create_task(background_task()) for _ in range(5)) 55 | await asyncio.sleep(0.1) 56 | loop.call_exception_handler(context=context) 57 | 58 | if raised_exc is not None: 59 | with pytest.raises(raised_exc) as excinfo: 60 | run(main(), stop_on_unhandled_errors=True) 61 | assert "test error" in str(excinfo.value) 62 | else: 63 | run(main(), stop_on_unhandled_errors=True) 64 | 65 | assert all(t.cancelled for t in created_tasks) 66 | 67 | 68 | def test_stop_must_be_obeyed(caplog): 69 | """Basic SIGTERM""" 70 | 71 | created_tasks = [] 72 | 73 | async def naughty_task(): 74 | try: 75 | await asyncio.sleep(10) 76 | except asyncio.CancelledError: 77 | await asyncio.sleep(10) 78 | 79 | async def main(): 80 | # loop = asyncio.get_running_loop() 81 | loop = asyncio.get_event_loop() 82 | created_tasks.append( 83 | loop.create_task( 84 | naughty_task(), 85 | ) 86 | ) 87 | await asyncio.sleep(0.01) 88 | raise Exception("Stops the loop") 89 | 90 | with pytest.raises(Exception) as excinfo: 91 | run(main(), stop_on_unhandled_errors=True, timeout_task_shutdown=2.0) 92 | 93 | print(excinfo.value) 94 | print(excinfo.traceback) 95 | assert "tasks refused to exit after 2.0 seconds" in caplog.text 96 | assert "Stops the loop" in str(excinfo.value) 97 | assert all(t.cancelled for t in created_tasks) 98 | 99 | 100 | -------------------------------------------------------------------------------- /tests/test_win.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import time 4 | import signal 5 | import subprocess as sp 6 | import pytest 7 | 8 | 9 | if sys.platform != "win32": 10 | pytest.skip("These tests are for Windows compatibility.", allow_module_level=True) 11 | 12 | 13 | def test_sig(): 14 | """Basic SIGTERM""" 15 | proc = sp.Popen( 16 | [sys.executable, "tests/fake_main.py"], 17 | stdout=sp.PIPE, 18 | stderr=sp.STDOUT, 19 | creationflags=sp.CREATE_NEW_PROCESS_GROUP, 20 | ) 21 | time.sleep(0.5) 22 | # proc.send_signal(signal.CTRL_BREAK_EVENT) 23 | # os.kill(proc.pid, signal.CTRL_C_EVENT) 24 | os.kill(proc.pid, signal.CTRL_BREAK_EVENT) 25 | print("Send signal") 26 | proc.wait(timeout=5) 27 | stdout = proc.stdout.read().decode() 28 | print(stdout) 29 | assert proc.returncode == 0 30 | 31 | expected = [ 32 | "Entering run()", 33 | "Entering shutdown phase", 34 | "Cancelling pending tasks", 35 | "Closing the loop", 36 | "Bye!", 37 | ] 38 | 39 | for phrase in expected: 40 | assert phrase in stdout 41 | -------------------------------------------------------------------------------- /testshield.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiorun import run, shutdown_waits_for 3 | 4 | async def corofn(): 5 | for i in range(10): 6 | print(i) 7 | await asyncio.sleep(1) 8 | print('done!') 9 | 10 | async def main(): 11 | try: 12 | await shutdown_waits_for(corofn()) 13 | except asyncio.CancelledError: 14 | print('oh noes!') 15 | 16 | run(main()) 17 | -------------------------------------------------------------------------------- /watchtest.sh: -------------------------------------------------------------------------------- 1 | watchmedo shell-command -c 'pytest --cov --cov-report term-missing' -R -p "*.py" -W 2 | --------------------------------------------------------------------------------