├── .editorconfig ├── .github ├── ISSUE_TEMPLATE.md └── workflows │ ├── publish.yml │ └── tox.yml ├── .gitignore ├── COPYING ├── MANIFEST.in ├── Makefile ├── README.md ├── benchmark ├── benchmark_read_common.py ├── benchmark_read_linux_aio.py ├── benchmark_read_python_aio.py ├── benchmark_read_thread_aio.py ├── benchmark_write_common.py ├── benchmark_write_linux_aio.py ├── benchmark_write_python_aio.py ├── benchmark_write_thread_aio.py └── gen_data.py ├── caio ├── __init__.py ├── abstract.py ├── asyncio_base.py ├── linux_aio.c ├── linux_aio.pyi ├── linux_aio_asyncio.py ├── py.typed ├── python_aio.py ├── python_aio_asyncio.py ├── src │ └── threadpool │ │ ├── LICENSE │ │ ├── LINK │ │ ├── README.md │ │ ├── threadpool.c │ │ └── threadpool.h ├── thread_aio.c ├── thread_aio.pyi ├── thread_aio_asyncio.py └── version.py ├── example.py ├── pyproject.toml ├── scripts └── make-wheels.sh ├── setup.py └── tests ├── conftest.py ├── test_aio_context.py ├── test_asyncio_adapter.py └── test_impl_selector.py /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | trim_trailing_whitespace = true 8 | 9 | [*.{py,yml}] 10 | indent_style = space 11 | 12 | [*.py] 13 | indent_size = 4 14 | 15 | [docs/**.py] 16 | max_line_length = 80 17 | 18 | [*.rst] 19 | indent_size = 3 20 | 21 | [Makefile] 22 | indent_style = tab 23 | 24 | [*.yml] 25 | indent_size = 2 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Long story short 2 | 3 | 4 | 5 | ## Expected behavior 6 | 7 | 8 | 9 | ## Actual behavior 10 | 11 | 12 | 13 | ## Steps to reproduce 14 | 15 | 18 | 19 | ## Environment info 20 | 21 | Kernel version: `replace here to "uname -a" output` 22 | File system: `your file system` 23 | 24 | I have been produced this problem with implementations: 25 | 26 | * [] `export CAIO_IMPL=linux` - Native linux implementation 27 | * [] `export CAIO_IMPL=thread` - Thread implementation 28 | * [] `export CAIO_IMPL=python` - Pure Python implementation 29 | 30 | ## Additional info 31 | 32 | 35 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: publish 5 | 6 | on: 7 | release: 8 | types: 9 | - created 10 | 11 | jobs: 12 | sdist: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | 18 | - uses: actions/checkout@v2 19 | with: 20 | submodules: recursive 21 | 22 | - name: Setup python3.9 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: "3.9" 26 | 27 | - name: Install requires 28 | run: python -m pip install twine build 29 | 30 | - name: Build source package 31 | run: python -m build --sdist 32 | 33 | - name: Publishing to pypi 34 | run: twine upload --skip-existing --disable-progress-bar dist/*.tar.gz 35 | env: 36 | TWINE_USERNAME: __token__ 37 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 38 | 39 | wheel: 40 | 41 | runs-on: ${{ matrix.os }} 42 | 43 | strategy: 44 | fail-fast: false 45 | 46 | matrix: 47 | include: 48 | # MacOS 49 | - python: '3.9' 50 | os: macos-latest 51 | - python: '3.10' 52 | os: macos-latest 53 | - python: '3.11' 54 | os: macos-latest 55 | - python: '3.12' 56 | os: macos-latest 57 | - python: '3.13' 58 | os: macos-latest 59 | # Windows 60 | - python: '3.9' 61 | os: windows-latest 62 | - python: '3.10' 63 | os: windows-latest 64 | - python: '3.11' 65 | os: windows-latest 66 | - python: '3.12' 67 | os: windows-latest 68 | - python: '3.13' 69 | os: windows-latest 70 | 71 | steps: 72 | - uses: actions/checkout@v2 73 | with: 74 | submodules: recursive 75 | 76 | - name: Setup python${{ matrix.python }} 77 | uses: actions/setup-python@v2 78 | with: 79 | python-version: "${{ matrix.python }}" 80 | 81 | - name: Install requires 82 | run: python -m pip install twine build 83 | 84 | - name: Build wheel for python "${{ matrix.python }}" 85 | run: python -m build --wheel 86 | 87 | - name: Publishing to pypi 88 | run: twine upload --skip-existing --disable-progress-bar dist/*.whl 89 | env: 90 | TWINE_USERNAME: __token__ 91 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 92 | 93 | 94 | linux-wheels: 95 | 96 | runs-on: ubuntu-latest 97 | 98 | steps: 99 | - uses: actions/checkout@v2 100 | with: 101 | submodules: recursive 102 | 103 | - name: Building manylinux2014 wheels 104 | uses: docker://quay.io/pypa/manylinux2014_x86_64 105 | with: 106 | args: /bin/bash scripts/make-wheels.sh 107 | 108 | - name: Setup python${{ matrix.python }} 109 | uses: actions/setup-python@v2 110 | with: 111 | python-version: "3.10" 112 | 113 | - name: Install requires 114 | run: python -m pip install twine 115 | 116 | - name: Publishing to pypi 117 | run: twine upload --skip-existing --disable-progress-bar dist/*.whl 118 | env: 119 | TWINE_USERNAME: __token__ 120 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 121 | -------------------------------------------------------------------------------- /.github/workflows/tox.yml: -------------------------------------------------------------------------------- 1 | name: tox 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | lint: 11 | 12 | runs-on: ubuntu-latest 13 | 14 | strategy: 15 | matrix: 16 | toxenv: 17 | - lint 18 | - mypy 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | 23 | - name: Setup python${{ matrix.python }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: "3.10" 27 | 28 | - name: Install tox 29 | run: python -m pip install tox 30 | 31 | - name: tox ${{ matrix.toxenv }} 32 | run: tox 33 | env: 34 | TOXENV: ${{ matrix.toxenv }} 35 | FORCE_COLOR: 1 36 | COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} 37 | 38 | 39 | tests: 40 | needs: lint 41 | runs-on: ${{ matrix.os }} 42 | 43 | strategy: 44 | fail-fast: false 45 | 46 | matrix: 47 | include: 48 | - toxenv: py39 49 | python: "3.9" 50 | os: ubuntu-latest 51 | - toxenv: py310 52 | python: "3.10" 53 | os: ubuntu-latest 54 | - toxenv: py311 55 | python: "3.11" 56 | os: ubuntu-latest 57 | - toxenv: py312 58 | python: "3.12" 59 | os: ubuntu-latest 60 | - toxenv: py313 61 | python: "3.13" 62 | os: ubuntu-latest 63 | - toxenv: py39 64 | python: "3.9" 65 | os: windows-latest 66 | - toxenv: py310 67 | python: "3.10" 68 | os: windows-latest 69 | - toxenv: py311 70 | python: "3.11" 71 | os: windows-latest 72 | - toxenv: py313 73 | python: "3.13" 74 | os: windows-latest 75 | - toxenv: py39 76 | python: "3.9" 77 | os: macos-latest 78 | - toxenv: py310 79 | python: "3.10" 80 | os: macos-latest 81 | - toxenv: py311 82 | python: "3.11" 83 | os: macos-latest 84 | - toxenv: py312 85 | python: "3.12" 86 | os: macos-latest 87 | - toxenv: py313 88 | python: "3.13" 89 | os: macos-latest 90 | 91 | steps: 92 | - uses: actions/checkout@v2 93 | 94 | - name: Setup python${{ matrix.python }} 95 | uses: actions/setup-python@v2 96 | with: 97 | python-version: "${{ matrix.python }}" 98 | 99 | - name: Install tox 100 | run: python -m pip install tox 101 | 102 | - name: tox ${{ matrix.toxenv }} 103 | run: tox 104 | env: 105 | TOXENV: ${{ matrix.toxenv }} 106 | FORCE_COLOR: 1 107 | COVERALLS_PARALLEL: 'true' 108 | COVERALLS_SERVICE_NAME: github 109 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 110 | 111 | finish: 112 | needs: 113 | - tests 114 | runs-on: ubuntu-latest 115 | steps: 116 | - name: Coveralls Finished 117 | uses: coverallsapp/github-action@master 118 | with: 119 | github-token: ${{ secrets.github_token }} 120 | parallel-finished: true 121 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | .venv* 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | *.bin 132 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2025 Dmitry Orlov 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include caio/src/threadpool *.* 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: sdist mac_wheel linux_wheel 2 | 3 | .PHONY: sdist mac_wheel linux_wheel 4 | 5 | sdist: 6 | python3 setup.py sdist 7 | 8 | .venvs: 9 | mkdir -p $@ 10 | 11 | .venvs/3.9: .venvs 12 | python3.9 -m venv $@ 13 | $@/bin/python -m pip install -U pip setuptools build wheel 14 | 15 | .venvs/3.10: .venvs 16 | python3.10 -m venv $@ 17 | $@/bin/python -m pip install -U pip setuptools build wheel 18 | 19 | .venvs/3.11: .venvs 20 | python3.11 -m venv $@ 21 | $@/bin/python -m pip install -U pip setuptools build wheel 22 | 23 | .venvs/3.12: .venvs 24 | python3.12 -m venv $@ 25 | $@/bin/python -m pip install -U pip setuptools build wheel 26 | 27 | .venvs/3.13: .venvs 28 | python3.13 -m venv $@ 29 | $@/bin/python -m pip install -U pip setuptools build wheel 30 | 31 | 32 | mac_wheel: .venvs/3.9 .venvs/3.10 .venvs/3.11 .venvs/3.12 .venvs/3.13 33 | .venvs/3.9/bin/python -m build 34 | .venvs/3.10/bin/python -m build 35 | .venvs/3.11/bin/python -m build 36 | .venvs/3.12/bin/python -m build 37 | .venvs/3.13/bin/python -m build 38 | 39 | linux_wheel: 40 | docker run -it --rm \ 41 | -v `pwd`:/mnt \ 42 | --entrypoint /bin/bash \ 43 | --workdir /mnt \ 44 | --platform linux/amd64 \ 45 | quay.io/pypa/manylinux_2_34_x86_64 \ 46 | scripts/make-wheels.sh 47 | 48 | docker run -it --rm \ 49 | -v `pwd`:/mnt \ 50 | --entrypoint /bin/bash \ 51 | --platform linux/arm64 \ 52 | --workdir /mnt \ 53 | quay.io/pypa/manylinux_2_34_aarch64 \ 54 | scripts/make-wheels.sh 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Python wrapper for AIO 2 | ====================== 3 | 4 | > **NOTE:** Native Linux aio implementation supports since 4.18 kernel version. 5 | 6 | Python bindings for Linux AIO API and simple asyncio wrapper. 7 | 8 | Example 9 | ------- 10 | 11 | ```python 12 | 13 | import asyncio 14 | from caio import AsyncioContext 15 | 16 | loop = asyncio.get_event_loop() 17 | 18 | async def main(): 19 | # max_requests=128 by default 20 | ctx = AsyncioContext(max_requests=128) 21 | 22 | with open("test.file", "wb+") as fp: 23 | fd = fp.fileno() 24 | 25 | # Execute one write operation 26 | await ctx.write(b"Hello world", fd, offset=0) 27 | 28 | # Execute one read operation 29 | print(await ctx.read(32, fd, offset=0)) 30 | 31 | # Execute one fdsync operation 32 | await ctx.fdsync(fd) 33 | 34 | op1 = ctx.write(b"Hello from ", fd, offset=0) 35 | op2 = ctx.write(b"async world", fd, offset=11) 36 | 37 | await asyncio.gather(op1, op2) 38 | 39 | print(await ctx.read(32, fd, offset=0)) 40 | # Hello from async world 41 | 42 | 43 | loop.run_until_complete(main()) 44 | ``` 45 | 46 | Troubleshooting 47 | --------------- 48 | 49 | The `linux` implementation works normal for modern linux kernel versions 50 | and file systems. So you may have problems specific for your environment. 51 | It's not a bug and might be resolved some ways: 52 | 53 | 1. Upgrade the kernel 54 | 2. Use compatible file system 55 | 3. Use threads based or pure python implementation. 56 | 57 | The caio since version 0.7.0 contains some ways to do this. 58 | 59 | 1. In runtime use the environment variable `CAIO_IMPL` with possible values: 60 | * `linux` - use native linux kernels aio mechanism 61 | * `thread` - use thread based implementation written in C 62 | * `python` - use pure python implementation 63 | 2. File ``default_implementation`` located near ``__init__.py`` in caio 64 | installation path. It's useful for distros package maintainers. This file 65 | might contains comments (lines starts with ``#`` symbol) and the first line 66 | should be one of ``linux`` ``thread`` or ``python``. 67 | 68 | Previous versions allows direct import of the target implementation. 69 | -------------------------------------------------------------------------------- /benchmark/benchmark_read_common.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import time 4 | from functools import lru_cache 5 | 6 | from caio.asyncio_base import AsyncioContextBase 7 | 8 | 9 | chunk_size = 16 * 1024 # 1024 10 | context_max_requests = 512 11 | 12 | 13 | @lru_cache(1024) 14 | def open_file_by_id(file_id): 15 | fname = f"data/{file_id}.bin" 16 | return open(fname, "rb"), os.stat(fname).st_size 17 | 18 | 19 | async def read_file(ctx: AsyncioContextBase, file_id): 20 | offset = 0 21 | 22 | fp, file_size = open_file_by_id(file_id) 23 | fd = fp.fileno() 24 | 25 | c = 0 26 | futures = [] 27 | while offset < file_size: 28 | futures.append(ctx.read(chunk_size, fd, offset)) 29 | offset += chunk_size 30 | c += 1 31 | 32 | await asyncio.gather(*futures) 33 | return c 34 | 35 | 36 | async def timer(future): 37 | await asyncio.sleep(0) 38 | delta = time.monotonic() 39 | return await future, time.monotonic() - delta 40 | 41 | 42 | async def main(context_maker): 43 | print("files nr min madian max op/s total #ops chunk") 44 | 45 | for generation in range(1, 129): 46 | context = context_maker(context_max_requests) 47 | 48 | futures = [] 49 | 50 | for file_id in range(generation): 51 | futures.append(read_file(context, file_id)) 52 | 53 | stat = [] 54 | total = -time.monotonic() 55 | nops = 0 56 | 57 | for ops, delta in await asyncio.gather(*map(timer, futures)): 58 | stat.append(delta) 59 | nops += ops 60 | 61 | total += time.monotonic() 62 | 63 | stat = sorted(stat) 64 | 65 | ops_sec = nops / total 66 | 67 | dmin = stat[0] 68 | dmedian = stat[int(len(stat) / 2)] 69 | dmax = stat[-1] 70 | 71 | print( 72 | "%5d %4d %2.6f %2.6f %2.6f %6d %-3.6f %5d %d" 73 | % ( 74 | generation, 75 | context_max_requests, 76 | dmin, 77 | dmedian, 78 | dmax, 79 | ops_sec, 80 | total, 81 | nops, 82 | chunk_size, 83 | ), 84 | ) 85 | 86 | context.close() 87 | -------------------------------------------------------------------------------- /benchmark/benchmark_read_linux_aio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from benchmark_read_common import main 4 | from caio.linux_aio_asyncio import AsyncioContext 5 | 6 | 7 | if __name__ == "__main__": 8 | asyncio.run(main(AsyncioContext)) 9 | -------------------------------------------------------------------------------- /benchmark/benchmark_read_python_aio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from benchmark_read_common import main 4 | from caio.python_aio_asyncio import AsyncioContext 5 | 6 | 7 | if __name__ == "__main__": 8 | asyncio.run(main(AsyncioContext)) 9 | -------------------------------------------------------------------------------- /benchmark/benchmark_read_thread_aio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from benchmark_read_common import main 4 | from caio.thread_aio_asyncio import AsyncioContext 5 | 6 | 7 | if __name__ == "__main__": 8 | asyncio.run(main(AsyncioContext)) 9 | -------------------------------------------------------------------------------- /benchmark/benchmark_write_common.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import time 4 | import typing 5 | from tempfile import NamedTemporaryFile 6 | 7 | from caio.asyncio_base import AsyncioContextBase 8 | 9 | 10 | data = os.urandom(1024) 11 | 12 | 13 | async def main(context_maker: typing.Type[AsyncioContextBase]): 14 | async with context_maker() as context: 15 | with NamedTemporaryFile(mode="wb+") as fp: 16 | 17 | async def writer(offset=0): 18 | timer = - time.monotonic() 19 | fileno = fp.file.fileno() 20 | 21 | futures = [] 22 | for i in range(1, 2 ** 15): 23 | futures.append( 24 | context.write(data, fileno, offset * i * len(data)), 25 | ) 26 | 27 | await asyncio.gather(*futures) 28 | timer += time.monotonic() 29 | print("Done", timer) 30 | 31 | return timer 32 | 33 | timers = [] 34 | for i in range(10): 35 | timers.append(await writer(i)) 36 | 37 | print(sum(timers) / len(timers)) 38 | -------------------------------------------------------------------------------- /benchmark/benchmark_write_linux_aio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from benchmark_write_common import main 4 | from caio.linux_aio_asyncio import AsyncioContext 5 | 6 | 7 | if __name__ == "__main__": 8 | asyncio.run(main(AsyncioContext)) 9 | -------------------------------------------------------------------------------- /benchmark/benchmark_write_python_aio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from benchmark_write_common import main 4 | from caio.python_aio_asyncio import AsyncioContext 5 | 6 | 7 | if __name__ == "__main__": 8 | asyncio.run(main(AsyncioContext)) 9 | -------------------------------------------------------------------------------- /benchmark/benchmark_write_thread_aio.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from benchmark_write_common import main 4 | from caio.thread_aio_asyncio import AsyncioContext 5 | 6 | 7 | if __name__ == "__main__": 8 | asyncio.run(main(AsyncioContext)) 9 | -------------------------------------------------------------------------------- /benchmark/gen_data.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | from multiprocessing.pool import ThreadPool 4 | 5 | import tqdm 6 | 7 | 8 | POOL = ThreadPool(32) 9 | 10 | 11 | def gen_data(file_id): 12 | seed = os.urandom(64) 13 | hasher = hashlib.sha512() 14 | 15 | with open(f"data/{file_id}.bin", "wb+") as fp: 16 | for _ in range(100000): 17 | hasher.update(seed) 18 | seed = hasher.digest() 19 | fp.write(seed) 20 | 21 | 22 | def main(): 23 | files = 128 24 | 25 | iterator = tqdm.tqdm( 26 | POOL.imap_unordered(gen_data, range(files)), total=files, 27 | ) 28 | 29 | for _ in iterator: 30 | pass 31 | 32 | 33 | if __name__ == "__main__": 34 | main() 35 | -------------------------------------------------------------------------------- /caio/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import warnings 3 | 4 | from . import python_aio, python_aio_asyncio 5 | from .abstract import AbstractContext, AbstractOperation 6 | from .version import __author__, __version__ 7 | 8 | 9 | try: 10 | from . import linux_aio, linux_aio_asyncio 11 | except ImportError: 12 | linux_aio = None # type: ignore 13 | linux_aio_asyncio = None # type: ignore 14 | 15 | try: 16 | from . import thread_aio, thread_aio_asyncio 17 | except ImportError: 18 | thread_aio = None # type: ignore 19 | thread_aio_asyncio = None # type: ignore 20 | 21 | 22 | variants = tuple(filter(None, [linux_aio, thread_aio, python_aio])) 23 | variants_asyncio = tuple( 24 | filter( 25 | None, [ 26 | linux_aio_asyncio, 27 | thread_aio_asyncio, 28 | python_aio_asyncio, 29 | ], 30 | ), 31 | ) 32 | 33 | preferred = variants[0] 34 | preferred_asyncio = variants_asyncio[0] 35 | 36 | 37 | def __select_implementation(): 38 | global preferred 39 | global preferred_asyncio 40 | 41 | implementations = { 42 | "linux": (linux_aio, linux_aio_asyncio), 43 | "thread": (thread_aio, thread_aio_asyncio), 44 | "python": (python_aio, python_aio_asyncio), 45 | } 46 | 47 | implementations = {k: v for k, v in implementations.items() if all(v)} 48 | 49 | default_implementation = os.path.join( 50 | os.path.dirname(os.path.abspath(__file__)), "default_implementation", 51 | ) 52 | 53 | requested = os.getenv("CAIO_IMPL") 54 | 55 | if not requested and os.path.isfile(default_implementation): 56 | with open(default_implementation, "r") as fp: 57 | for line in fp: 58 | line = line.strip() 59 | if line.startswith("#") or not line: 60 | continue 61 | if line in implementations: 62 | requested = line 63 | break 64 | 65 | elif requested and requested not in implementations: 66 | warnings.warn( 67 | "CAIO_IMPL contains unsupported value %r. Use one of %r" % ( 68 | requested, tuple(implementations), 69 | ), 70 | RuntimeWarning, 71 | ) 72 | return 73 | 74 | preferred, preferred_asyncio = implementations.get( 75 | requested, 76 | (preferred, preferred_asyncio), 77 | ) 78 | 79 | 80 | __select_implementation() 81 | 82 | 83 | Context = preferred.Context # type: ignore 84 | Operation = preferred.Operation # type: ignore 85 | AsyncioContext = preferred_asyncio.AsyncioContext # type: ignore 86 | 87 | 88 | __all__ = ( 89 | "Context", 90 | "Operation", 91 | "AsyncioContext", 92 | "AbstractContext", 93 | "AbstractOperation", 94 | "python_aio", 95 | "python_aio_asyncio", 96 | "linux_aio", 97 | "linux_aio_asyncio", 98 | "thread_aio", 99 | "thread_aio_asyncio", 100 | "__version__", 101 | "__author__", 102 | "variants", 103 | "variants_asyncio", 104 | ) 105 | -------------------------------------------------------------------------------- /caio/abstract.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Any, Callable, Optional, Union 3 | 4 | 5 | class AbstractContext(abc.ABC): 6 | @property 7 | def max_requests(self) -> int: 8 | raise NotImplementedError 9 | 10 | def submit(self, *aio_operations) -> int: 11 | raise NotImplementedError(aio_operations) 12 | 13 | def cancel(self, *aio_operations) -> int: 14 | raise NotImplementedError(aio_operations) 15 | 16 | 17 | class AbstractOperation(abc.ABC): 18 | @classmethod 19 | @abc.abstractmethod 20 | def read( 21 | cls, nbytes: int, fd: int, 22 | offset: int, priority=0, 23 | ) -> "AbstractOperation": 24 | """ 25 | Creates a new instance of AIOOperation on read mode. 26 | """ 27 | raise NotImplementedError 28 | 29 | @classmethod 30 | @abc.abstractmethod 31 | def write( 32 | cls, payload_bytes: bytes, 33 | fd: int, offset: int, priority=0, 34 | ) -> "AbstractOperation": 35 | """ 36 | Creates a new instance of AIOOperation on write mode. 37 | """ 38 | raise NotImplementedError 39 | 40 | @classmethod 41 | @abc.abstractmethod 42 | def fsync(cls, fd: int, priority=0) -> "AbstractOperation": 43 | """ 44 | Creates a new instance of AIOOperation on fsync mode. 45 | """ 46 | raise NotImplementedError 47 | 48 | @classmethod 49 | @abc.abstractmethod 50 | def fdsync(cls, fd: int, priority=0) -> "AbstractOperation": 51 | 52 | """ 53 | Creates a new instance of AIOOperation on fdsync mode. 54 | """ 55 | raise NotImplementedError 56 | 57 | @abc.abstractmethod 58 | def get_value(self) -> Union[bytes, int]: 59 | """ 60 | Method returns a bytes value of AIOOperation's result or None. 61 | """ 62 | raise NotImplementedError 63 | 64 | @property 65 | @abc.abstractmethod 66 | def fileno(self) -> int: 67 | raise NotImplementedError 68 | 69 | @property 70 | @abc.abstractmethod 71 | def offset(self) -> int: 72 | raise NotImplementedError 73 | 74 | @property 75 | @abc.abstractmethod 76 | def payload(self) -> Optional[Union[bytes, memoryview]]: 77 | raise NotImplementedError 78 | 79 | @property 80 | @abc.abstractmethod 81 | def nbytes(self) -> int: 82 | raise NotImplementedError 83 | 84 | @abc.abstractmethod 85 | def set_callback(self, callback: Callable[[int], Any]) -> bool: 86 | raise NotImplementedError 87 | -------------------------------------------------------------------------------- /caio/asyncio_base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import asyncio 3 | import typing 4 | from functools import partial 5 | 6 | from . import abstract 7 | 8 | 9 | ContextType = typing.Type[abstract.AbstractContext] 10 | OperationType = typing.Type[abstract.AbstractOperation] 11 | 12 | 13 | class AsyncioContextBase(abc.ABC): 14 | MAX_REQUESTS_DEFAULT = 512 15 | CONTEXT_CLASS = None # type: ContextType 16 | OPERATION_CLASS = None # type: OperationType 17 | 18 | def __init__(self, max_requests=None, loop=None, **kwargs): 19 | max_requests = max_requests or self.MAX_REQUESTS_DEFAULT 20 | self.loop = loop or asyncio.get_event_loop() 21 | self.semaphore = asyncio.BoundedSemaphore(max_requests) 22 | self.context = self._create_context(max_requests, **kwargs) 23 | 24 | def _create_context(self, max_requests, **kwargs): 25 | return self.CONTEXT_CLASS(max_requests=max_requests, **kwargs) 26 | 27 | def _destroy_context(self): 28 | return 29 | 30 | async def __aenter__(self): 31 | return self 32 | 33 | async def __aexit__(self, exc_type, exc_val, exc_tb): 34 | self.close() 35 | 36 | def close(self): 37 | self._destroy_context() 38 | 39 | async def submit(self, op): 40 | if not isinstance(op, self.OPERATION_CLASS): 41 | raise ValueError("Operation object expected") 42 | 43 | future = self.loop.create_future() 44 | op.set_callback(partial(self._on_done, future)) 45 | 46 | async with self.semaphore: 47 | if self.context.submit(op) != 1: 48 | raise IOError("Operation was not submitted") 49 | 50 | try: 51 | await future 52 | except asyncio.CancelledError: 53 | try: 54 | self.context.cancel(op) 55 | except ValueError: 56 | pass 57 | raise 58 | return op.get_value() 59 | 60 | def _on_done(self, future, result): 61 | """ 62 | In general case impossible predict current thread and the thread 63 | of event loop. So have to use `call_soon_threadsave` the result setter. 64 | """ 65 | 66 | if future.done(): 67 | return 68 | 69 | self.loop.call_soon_threadsafe( 70 | lambda: future.done() or future.set_result(True), 71 | ) 72 | 73 | def read( 74 | self, nbytes: int, fd: int, 75 | offset: int, priority: int = 0, 76 | ) -> typing.Awaitable[bytes]: 77 | return self.submit( 78 | self.OPERATION_CLASS.read(nbytes, fd, offset, priority), 79 | ) 80 | 81 | def write( 82 | self, payload: bytes, fd: int, 83 | offset: int, priority: int = 0, 84 | ) -> typing.Awaitable[int]: 85 | return self.submit( 86 | self.OPERATION_CLASS.write(payload, fd, offset, priority), 87 | ) 88 | 89 | def fsync(self, fd: int) -> typing.Awaitable: 90 | return self.submit(self.OPERATION_CLASS.fsync(fd)) 91 | 92 | def fdsync(self, fd: int) -> typing.Awaitable: 93 | return self.submit(self.OPERATION_CLASS.fdsync(fd)) 94 | -------------------------------------------------------------------------------- /caio/linux_aio.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define PY_SSIZE_T_CLEAN 12 | #include 13 | #include 14 | #include 15 | 16 | 17 | static const unsigned CTX_MAX_REQUESTS_DEFAULT = 32; 18 | static const unsigned EV_MAX_REQUESTS_DEFAULT = 512; 19 | static int kernel_support = -1; 20 | 21 | inline static int io_setup(unsigned nr, aio_context_t *ctxp) { 22 | return syscall(__NR_io_setup, nr, ctxp); 23 | } 24 | 25 | 26 | inline static int io_destroy(aio_context_t ctx) { 27 | return syscall(__NR_io_destroy, ctx); 28 | } 29 | 30 | 31 | inline static int io_getevents( 32 | aio_context_t ctx, long min_nr, long max_nr, 33 | struct io_event *events, struct timespec *timeout 34 | ) { 35 | return syscall(__NR_io_getevents, ctx, min_nr, max_nr, events, timeout); 36 | } 37 | 38 | 39 | inline static int io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp) { 40 | return syscall(__NR_io_submit, ctx, nr, iocbpp); 41 | } 42 | 43 | inline static long io_cancel(aio_context_t ctx, struct iocb *aiocb, struct io_event *res) { 44 | return syscall(__NR_io_cancel, ctx, aiocb, res); 45 | } 46 | 47 | 48 | inline int io_cancel_error(int result) { 49 | if (result == 0) return result; 50 | 51 | switch (errno) { 52 | case EAGAIN: 53 | PyErr_SetString( 54 | PyExc_SystemError, 55 | "Specified operation was not canceled [EAGAIN]" 56 | ); 57 | break; 58 | case EFAULT: 59 | PyErr_SetString( 60 | PyExc_RuntimeError, 61 | "One of the data structures points to invalid data [EFAULT]" 62 | ); 63 | break; 64 | case EINVAL: 65 | PyErr_SetString( 66 | PyExc_ValueError, 67 | "The AIO context specified by ctx_id is invalid [EINVAL]" 68 | ); 69 | break; 70 | case ENOSYS: 71 | PyErr_SetString( 72 | PyExc_NotImplementedError, 73 | "io_cancel() is not implemented on this architecture [ENOSYS]" 74 | ); 75 | break; 76 | default: 77 | PyErr_SetFromErrno(PyExc_SystemError); 78 | break; 79 | } 80 | 81 | return result; 82 | } 83 | 84 | 85 | inline int io_submit_error(int result) { 86 | if (result >= 0) return result; 87 | 88 | switch (errno) { 89 | case EAGAIN: 90 | PyErr_SetString( 91 | PyExc_OverflowError, 92 | "Insufficient resources are available to queue any iocbs [EAGAIN]" 93 | ); 94 | break; 95 | case EBADF: 96 | PyErr_SetString( 97 | PyExc_ValueError, 98 | "The file descriptor specified in the first iocb is invalid [EBADF]" 99 | ); 100 | break; 101 | case EFAULT: 102 | PyErr_SetString( 103 | PyExc_ValueError, 104 | "One of the data structures points to invalid data [EFAULT]" 105 | ); 106 | break; 107 | case EINVAL: 108 | PyErr_SetString( 109 | PyExc_ValueError, 110 | "The AIO context specified by ctx_id is invalid. nr is less " 111 | "than 0. The iocb at *iocbpp[0] is not properly initialized, " 112 | "the operation specified is invalid for the file descriptor in " 113 | "the iocb, or the value in the aio_reqprio field is invalid. " 114 | "[EINVAL]" 115 | ); 116 | break; 117 | default: 118 | PyErr_SetFromErrno(PyExc_SystemError); 119 | break; 120 | } 121 | 122 | return result; 123 | } 124 | 125 | 126 | typedef struct { 127 | PyObject_HEAD 128 | aio_context_t ctx; 129 | int32_t fileno; 130 | uint32_t max_requests; 131 | } AIOContext; 132 | 133 | 134 | typedef struct { 135 | PyObject_HEAD 136 | AIOContext* context; 137 | PyObject* py_buffer; 138 | PyObject* callback; 139 | char* buffer; 140 | int error; 141 | struct iocb iocb; 142 | } AIOOperation; 143 | 144 | 145 | static PyTypeObject* AIOOperationTypeP = NULL; 146 | static PyTypeObject* AIOContextTypeP = NULL; 147 | 148 | 149 | static void 150 | AIOContext_dealloc(AIOContext *self) { 151 | if (self->ctx != 0) { 152 | aio_context_t ctx = self->ctx; 153 | self->ctx = 0; 154 | 155 | io_destroy(ctx); 156 | } 157 | 158 | if (self->fileno >= 0) { 159 | close(self->fileno); 160 | self->fileno = -1; 161 | } 162 | 163 | Py_TYPE(self)->tp_free((PyObject *) self); 164 | } 165 | 166 | /* 167 | AIOContext.__new__ classmethod definition 168 | */ 169 | static PyObject * 170 | AIOContext_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { 171 | AIOContext *self; 172 | 173 | self = (AIOContext *) type->tp_alloc(type, 0); 174 | return (PyObject *) self; 175 | } 176 | 177 | static int 178 | AIOContext_init(AIOContext *self, PyObject *args, PyObject *kwds) 179 | { 180 | static char *kwlist[] = {"max_requests", NULL}; 181 | 182 | self->ctx = 0; 183 | self->max_requests = 0; 184 | self->fileno = eventfd(0, 0); 185 | 186 | if (self->fileno < 0) { 187 | PyErr_SetFromErrno(PyExc_SystemError); 188 | return -1; 189 | } 190 | 191 | if (!PyArg_ParseTupleAndKeywords(args, kwds, "|I", kwlist, &self->max_requests)) { 192 | return -1; 193 | } 194 | 195 | if (self->max_requests <= 0) { 196 | self->max_requests = CTX_MAX_REQUESTS_DEFAULT; 197 | } 198 | 199 | if (io_setup(self->max_requests, &self->ctx) < 0) { 200 | PyErr_SetFromErrno(PyExc_SystemError); 201 | return -1; 202 | } 203 | 204 | return 0; 205 | } 206 | 207 | static PyObject* AIOContext_repr(AIOContext *self) { 208 | if (self->ctx == 0) { 209 | PyErr_SetNone(PyExc_RuntimeError); 210 | return NULL; 211 | } 212 | return PyUnicode_FromFormat( 213 | "<%s as %p: max_requests=%i, ctx=%lli>", 214 | Py_TYPE(self)->tp_name, self, self->max_requests, self->ctx 215 | ); 216 | } 217 | 218 | 219 | 220 | PyDoc_STRVAR(AIOContext_submit_docstring, 221 | "Accepts multiple Operations. Returns \n\n" 222 | " Operation.submit(aio_op1, aio_op2, aio_opN, ...) -> int" 223 | ); 224 | static PyObject* AIOContext_submit(AIOContext *self, PyObject *args) { 225 | if (self == 0) { 226 | PyErr_SetString(PyExc_RuntimeError, "self is NULL"); 227 | return NULL; 228 | } 229 | 230 | if (self->ctx == 0) { 231 | PyErr_SetString(PyExc_RuntimeError, "self->ctx is NULL"); 232 | return NULL; 233 | } 234 | 235 | if (!PyTuple_Check(args)) { 236 | PyErr_SetNone(PyExc_ValueError); 237 | return NULL; 238 | } 239 | 240 | int result = 0; 241 | 242 | uint32_t nr = PyTuple_Size(args); 243 | 244 | PyObject* obj; 245 | AIOOperation* op; 246 | 247 | struct iocb** iocbpp = PyMem_Calloc(nr, sizeof(struct iocb*)); 248 | uint32_t i; 249 | 250 | for (i=0; i < nr; i++) { 251 | obj = PyTuple_GetItem(args, i); 252 | if (PyObject_TypeCheck(obj, AIOOperationTypeP) == 0) { 253 | PyErr_Format( 254 | PyExc_TypeError, 255 | "Wrong type for argument %d -> %r", i, obj 256 | ); 257 | PyMem_Free(iocbpp); 258 | return NULL; 259 | } 260 | 261 | op = (AIOOperation*) obj; 262 | 263 | op->context = self; 264 | Py_INCREF(self); 265 | 266 | Py_INCREF(op); 267 | 268 | op->iocb.aio_flags |= IOCB_FLAG_RESFD; 269 | op->iocb.aio_resfd = self->fileno; 270 | 271 | iocbpp[i] = &op->iocb; 272 | } 273 | 274 | result = io_submit(self->ctx, nr, iocbpp); 275 | 276 | if (io_submit_error(result) < 0) { 277 | PyMem_Free(iocbpp); 278 | return NULL; 279 | } 280 | 281 | PyMem_Free(iocbpp); 282 | 283 | return (PyObject*) PyLong_FromSsize_t(result); 284 | } 285 | 286 | 287 | PyDoc_STRVAR(AIOContext_cancel_docstring, 288 | "Cancels multiple Operations. Returns \n\n" 289 | " Operation.cancel(aio_op1, aio_op2, aio_opN, ...) -> int" 290 | ); 291 | static PyObject* AIOContext_cancel(AIOContext *self, PyObject *args, PyObject *kwds) { 292 | if (self == 0) { 293 | PyErr_SetString(PyExc_RuntimeError, "self is NULL"); 294 | return NULL; 295 | } 296 | 297 | if (self->ctx == 0) { 298 | PyErr_SetString(PyExc_RuntimeError, "self->ctx is NULL"); 299 | return NULL; 300 | } 301 | 302 | static char *kwlist[] = {"operation", NULL}; 303 | 304 | AIOOperation* op = NULL; 305 | 306 | if (!PyArg_ParseTupleAndKeywords(args, kwds, "O", kwlist, &op)) return NULL; 307 | if (PyObject_TypeCheck(op, AIOOperationTypeP) == 0) { 308 | PyErr_Format(PyExc_TypeError, "Operation required not %r", op); 309 | return NULL; 310 | } 311 | 312 | struct io_event ev; 313 | 314 | if (io_cancel_error(io_cancel(self->ctx, &op->iocb, &ev))) { 315 | return NULL; 316 | } 317 | 318 | if (ev.res >= 0) { 319 | op->iocb.aio_nbytes = ev.res; 320 | } else { 321 | op->error = -ev.res; 322 | } 323 | 324 | if (op->callback != NULL) { 325 | if (PyObject_CallFunction(op->callback, "K", ev.res) == NULL) { 326 | return NULL; 327 | } 328 | } 329 | 330 | return (PyObject*) PyLong_FromSsize_t(ev.res); 331 | } 332 | 333 | PyDoc_STRVAR(AIOContext_process_events_docstring, 334 | "Gather events for Context. \n\n" 335 | " Operation.process_events(max_events, min_events) -> Tuple[Tuple[]]" 336 | ); 337 | static PyObject* AIOContext_process_events( 338 | AIOContext *self, PyObject *args, PyObject *kwds 339 | ) { 340 | if (self->ctx == 0) { 341 | PyErr_SetNone(PyExc_RuntimeError); 342 | return NULL; 343 | } 344 | 345 | uint32_t min_requests = 0; 346 | uint32_t max_requests = 0; 347 | int32_t tv_sec = 0; 348 | struct timespec timeout = {0, 0}; 349 | 350 | static char *kwlist[] = {"max_requests", "min_requests", "timeout", NULL}; 351 | 352 | if (!PyArg_ParseTupleAndKeywords( 353 | args, kwds, "|IIi", kwlist, 354 | &max_requests, &min_requests, &tv_sec 355 | )) { return NULL; } 356 | 357 | timeout.tv_sec = tv_sec; 358 | 359 | if (max_requests == 0) { 360 | max_requests = EV_MAX_REQUESTS_DEFAULT; 361 | } 362 | 363 | if (min_requests > max_requests) { 364 | PyErr_Format( 365 | PyExc_ValueError, 366 | "min_requests \"%d\" must be lower then max_requests \"%d\"", 367 | min_requests, max_requests 368 | ); 369 | return NULL; 370 | } 371 | 372 | struct io_event events[max_requests]; 373 | 374 | int result = io_getevents( 375 | self->ctx, 376 | min_requests, 377 | max_requests, 378 | events, 379 | &timeout 380 | ); 381 | 382 | if (result < 0) { 383 | PyErr_SetFromErrno(PyExc_SystemError); 384 | return NULL; 385 | } 386 | 387 | AIOOperation* op; 388 | struct io_event* ev; 389 | 390 | int32_t i; 391 | for (i = 0; i < result; i++) { 392 | ev = &events[i]; 393 | 394 | op = (AIOOperation*)(uintptr_t) ev->data; 395 | if (ev->res >= 0) { 396 | op->iocb.aio_nbytes = ev->res; 397 | } else { 398 | op->error = -ev->res; 399 | } 400 | 401 | if (op->callback == NULL) { 402 | continue; 403 | } 404 | 405 | if (PyObject_CallFunction(op->callback, "K", ev->res) == NULL) { 406 | return NULL; 407 | } 408 | 409 | Py_XDECREF(op); 410 | } 411 | 412 | return (PyObject*) PyLong_FromSsize_t(i); 413 | } 414 | 415 | 416 | PyDoc_STRVAR(AIOContext_poll_docstring, 417 | "Read value from context file descriptor.\n\n" 418 | " Context().poll() -> int" 419 | ); 420 | static PyObject* AIOContext_poll( 421 | AIOContext *self, PyObject *args 422 | ) { 423 | if (self->ctx == 0) { 424 | PyErr_SetNone(PyExc_RuntimeError); 425 | return NULL; 426 | } 427 | 428 | if (self->fileno < 0) { 429 | PyErr_SetNone(PyExc_RuntimeError); 430 | return NULL; 431 | } 432 | 433 | uint64_t result = 0; 434 | int size = read(self->fileno, &result, sizeof(uint64_t)); 435 | 436 | if (size != sizeof(uint64_t)) { 437 | PyErr_SetNone(PyExc_BlockingIOError); 438 | return NULL; 439 | } 440 | 441 | return (PyObject*) PyLong_FromUnsignedLongLong(result); 442 | } 443 | 444 | 445 | /* 446 | AIOContext properties 447 | */ 448 | static PyMemberDef AIOContext_members[] = { 449 | { 450 | "fileno", 451 | T_INT, 452 | offsetof(AIOContext, fileno), 453 | READONLY, 454 | "fileno" 455 | }, 456 | { 457 | "max_requests", 458 | T_USHORT, 459 | offsetof(AIOContext, max_requests), 460 | READONLY, 461 | "max requests" 462 | }, 463 | {NULL} /* Sentinel */ 464 | }; 465 | 466 | static PyMethodDef AIOContext_methods[] = { 467 | { 468 | "submit", 469 | (PyCFunction) AIOContext_submit, METH_VARARGS, 470 | AIOContext_submit_docstring 471 | }, 472 | { 473 | "cancel", 474 | (PyCFunction) AIOContext_cancel, METH_VARARGS | METH_KEYWORDS, 475 | AIOContext_cancel_docstring 476 | }, 477 | { 478 | "process_events", 479 | (PyCFunction) AIOContext_process_events, METH_VARARGS | METH_KEYWORDS, 480 | AIOContext_process_events_docstring 481 | }, 482 | { 483 | "poll", 484 | (PyCFunction) AIOContext_poll, METH_NOARGS, 485 | AIOContext_poll_docstring 486 | }, 487 | {NULL} /* Sentinel */ 488 | }; 489 | 490 | static PyTypeObject 491 | AIOContextType = { 492 | PyVarObject_HEAD_INIT(NULL, 0) 493 | .tp_name = "Context", 494 | .tp_doc = "linux aio context representation", 495 | .tp_basicsize = sizeof(AIOContext), 496 | .tp_itemsize = 0, 497 | .tp_flags = Py_TPFLAGS_DEFAULT, 498 | .tp_new = AIOContext_new, 499 | .tp_init = (initproc) AIOContext_init, 500 | .tp_dealloc = (destructor) AIOContext_dealloc, 501 | .tp_members = AIOContext_members, 502 | .tp_methods = AIOContext_methods, 503 | .tp_repr = (reprfunc) AIOContext_repr 504 | }; 505 | 506 | 507 | static void 508 | AIOOperation_dealloc(AIOOperation *self) { 509 | Py_CLEAR(self->context); 510 | Py_CLEAR(self->callback); 511 | 512 | if (self->iocb.aio_lio_opcode == IOCB_CMD_PREAD && self->buffer != NULL) { 513 | PyMem_Free(self->buffer); 514 | self->buffer = NULL; 515 | } 516 | 517 | Py_CLEAR(self->py_buffer); 518 | Py_TYPE(self)->tp_free((PyObject *) self); 519 | } 520 | 521 | 522 | static PyObject* AIOOperation_repr(AIOOperation *self) { 523 | char* mode; 524 | 525 | switch (self->iocb.aio_lio_opcode) { 526 | case IOCB_CMD_PREAD: 527 | mode = "read"; 528 | break; 529 | 530 | case IOCB_CMD_PWRITE: 531 | mode = "write"; 532 | break; 533 | 534 | case IOCB_CMD_FSYNC: 535 | mode = "fsync"; 536 | break; 537 | 538 | case IOCB_CMD_FDSYNC: 539 | mode = "fdsync"; 540 | break; 541 | default: 542 | mode = "noop"; 543 | break; 544 | } 545 | 546 | return PyUnicode_FromFormat( 547 | "<%s at %p: mode=\"%s\", fd=%i, offset=%i, buffer=%p>", 548 | Py_TYPE(self)->tp_name, self, mode, 549 | self->iocb.aio_fildes, self->iocb.aio_offset, self->iocb.aio_buf 550 | ); 551 | } 552 | 553 | 554 | /* 555 | AIOOperation.read classmethod definition 556 | */ 557 | PyDoc_STRVAR(AIOOperation_read_docstring, 558 | "Creates a new instance of Operation on read mode.\n\n" 559 | " Operation.read(\n" 560 | " nbytes: int,\n" 561 | " aio_context: Context,\n" 562 | " fd: int, \n" 563 | " offset: int,\n" 564 | " priority=0\n" 565 | " )" 566 | ); 567 | static PyObject* AIOOperation_read( 568 | PyTypeObject *type, PyObject *args, PyObject *kwds 569 | ) { 570 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 571 | 572 | static char *kwlist[] = {"nbytes", "fd", "offset", "priority", NULL}; 573 | 574 | if (self == NULL) { 575 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 576 | return NULL; 577 | } 578 | 579 | memset(&self->iocb, 0, sizeof(struct iocb)); 580 | 581 | self->iocb.aio_data = (uint64_t)(uintptr_t) self; 582 | self->context = NULL; 583 | self->buffer = NULL; 584 | self->py_buffer = NULL; 585 | 586 | uint64_t nbytes = 0; 587 | 588 | int argIsOk = PyArg_ParseTupleAndKeywords( 589 | args, kwds, "KI|Lh", kwlist, 590 | &nbytes, 591 | &(self->iocb.aio_fildes), 592 | &(self->iocb.aio_offset), 593 | &(self->iocb.aio_reqprio) 594 | ); 595 | 596 | if (!argIsOk) return NULL; 597 | 598 | self->buffer = PyMem_Calloc(nbytes, sizeof(char)); 599 | self->iocb.aio_buf = (uint64_t)(uintptr_t) self->buffer; 600 | self->iocb.aio_nbytes = nbytes; 601 | self->py_buffer = PyMemoryView_FromMemory(self->buffer, nbytes, PyBUF_READ); 602 | self->iocb.aio_lio_opcode = IOCB_CMD_PREAD; 603 | 604 | return (PyObject*) self; 605 | } 606 | 607 | /* 608 | AIOOperation.write classmethod definition 609 | */ 610 | PyDoc_STRVAR(AIOOperation_write_docstring, 611 | "Creates a new instance of Operation on write mode.\n\n" 612 | " Operation.write(\n" 613 | " payload_bytes: bytes,\n" 614 | " fd: int, \n" 615 | " offset: int,\n" 616 | " priority=0\n" 617 | " )" 618 | ); 619 | 620 | static PyObject* AIOOperation_write( 621 | PyTypeObject *type, PyObject *args, PyObject *kwds 622 | ) { 623 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 624 | 625 | static char *kwlist[] = {"payload_bytes", "fd", "offset", "priority", NULL}; 626 | 627 | if (self == NULL) { 628 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 629 | return NULL; 630 | } 631 | 632 | memset(&self->iocb, 0, sizeof(struct iocb)); 633 | 634 | self->iocb.aio_data = (uint64_t)(uintptr_t) self; 635 | 636 | self->context = NULL; 637 | self->buffer = NULL; 638 | self->py_buffer = NULL; 639 | 640 | Py_ssize_t nbytes = 0; 641 | 642 | int argIsOk = PyArg_ParseTupleAndKeywords( 643 | args, kwds, "OI|Lh", kwlist, 644 | &(self->py_buffer), 645 | &(self->iocb.aio_fildes), 646 | &(self->iocb.aio_offset), 647 | &(self->iocb.aio_reqprio) 648 | ); 649 | 650 | if (!argIsOk) return NULL; 651 | 652 | if (!PyBytes_Check(self->py_buffer)) { 653 | Py_XDECREF(self); 654 | PyErr_SetString( 655 | PyExc_ValueError, 656 | "payload_bytes argument must be bytes" 657 | ); 658 | return NULL; 659 | } 660 | 661 | self->iocb.aio_lio_opcode = IOCB_CMD_PWRITE; 662 | 663 | if (PyBytes_AsStringAndSize( 664 | self->py_buffer, 665 | &self->buffer, 666 | &nbytes 667 | )) { 668 | Py_XDECREF(self); 669 | PyErr_SetString( 670 | PyExc_RuntimeError, 671 | "Can not convert bytes to c string" 672 | ); 673 | return NULL; 674 | } 675 | 676 | Py_INCREF(self->py_buffer); 677 | 678 | self->iocb.aio_nbytes = nbytes; 679 | self->iocb.aio_buf = (uint64_t)(uintptr_t) self->buffer; 680 | 681 | return (PyObject*) self; 682 | } 683 | 684 | 685 | /* 686 | AIOOperation.fsync classmethod definition 687 | */ 688 | PyDoc_STRVAR(AIOOperation_fsync_docstring, 689 | "Creates a new instance of Operation on fsync mode.\n\n" 690 | " Operation.fsync(\n" 691 | " aio_context: AIOContext,\n" 692 | " fd: int, \n" 693 | " priority=0\n" 694 | " )" 695 | ); 696 | static PyObject* AIOOperation_fsync( 697 | PyTypeObject *type, PyObject *args, PyObject *kwds 698 | ) { 699 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 700 | 701 | static char *kwlist[] = {"fd", "priority", NULL}; 702 | 703 | if (self == NULL) { 704 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 705 | return NULL; 706 | } 707 | 708 | memset(&self->iocb, 0, sizeof(struct iocb)); 709 | 710 | self->iocb.aio_data = (uint64_t)(uintptr_t) self; 711 | self->context = NULL; 712 | self->buffer = NULL; 713 | self->py_buffer = NULL; 714 | 715 | int argIsOk = PyArg_ParseTupleAndKeywords( 716 | args, kwds, "I|h", kwlist, 717 | &(self->iocb.aio_fildes), 718 | &(self->iocb.aio_reqprio) 719 | ); 720 | 721 | if (!argIsOk) return NULL; 722 | 723 | self->iocb.aio_lio_opcode = IOCB_CMD_FSYNC; 724 | 725 | return (PyObject*) self; 726 | } 727 | 728 | 729 | /* 730 | AIOOperation.fdsync classmethod definition 731 | */ 732 | PyDoc_STRVAR(AIOOperation_fdsync_docstring, 733 | "Creates a new instance of Operation on fdsync mode.\n\n" 734 | " Operation.fdsync(\n" 735 | " aio_context: AIOContext,\n" 736 | " fd: int, \n" 737 | " priority=0\n" 738 | " )" 739 | ); 740 | 741 | static PyObject* AIOOperation_fdsync( 742 | PyTypeObject *type, PyObject *args, PyObject *kwds 743 | ) { 744 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 745 | 746 | static char *kwlist[] = {"fd", "priority", NULL}; 747 | 748 | if (self == NULL) { 749 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 750 | return NULL; 751 | } 752 | 753 | memset(&self->iocb, 0, sizeof(struct iocb)); 754 | 755 | self->iocb.aio_data = (uint64_t)(uintptr_t) self; 756 | self->buffer = NULL; 757 | self->py_buffer = NULL; 758 | 759 | int argIsOk = PyArg_ParseTupleAndKeywords( 760 | args, kwds, "I|h", kwlist, 761 | &(self->iocb.aio_fildes), 762 | &(self->iocb.aio_reqprio) 763 | ); 764 | 765 | if (!argIsOk) return NULL; 766 | 767 | self->iocb.aio_lio_opcode = IOCB_CMD_FDSYNC; 768 | 769 | return (PyObject*) self; 770 | } 771 | 772 | /* 773 | AIOOperation.get_value method definition 774 | */ 775 | PyDoc_STRVAR(AIOOperation_get_value_docstring, 776 | "Method returns a bytes value of Operation's result or None.\n\n" 777 | " Operation.get_value() -> Optional[bytes]" 778 | ); 779 | 780 | static PyObject* AIOOperation_get_value( 781 | AIOOperation *self, PyObject *args, PyObject *kwds 782 | ) { 783 | 784 | if (self->error != 0) { 785 | PyErr_SetString( 786 | PyExc_SystemError, 787 | strerror(self->error) 788 | ); 789 | 790 | return NULL; 791 | } 792 | 793 | switch (self->iocb.aio_lio_opcode) { 794 | case IOCB_CMD_PREAD: 795 | return PyBytes_FromStringAndSize( 796 | self->buffer, self->iocb.aio_nbytes 797 | ); 798 | 799 | case IOCB_CMD_PWRITE: 800 | return PyLong_FromSsize_t(self->iocb.aio_nbytes); 801 | } 802 | 803 | return Py_None; 804 | } 805 | 806 | 807 | /* 808 | AIOOperation.set_callback method definition 809 | */ 810 | PyDoc_STRVAR(AIOOperation_set_callback_docstring, 811 | "Set callback which will be called after Operation will be finished.\n\n" 812 | " Operation.get_value() -> Optional[bytes]" 813 | ); 814 | 815 | static PyObject* AIOOperation_set_callback( 816 | AIOOperation *self, PyObject *args, PyObject *kwds 817 | ) { 818 | static char *kwlist[] = {"callback", NULL}; 819 | 820 | PyObject* callback; 821 | 822 | int argIsOk = PyArg_ParseTupleAndKeywords( 823 | args, kwds, "O", kwlist, 824 | &callback 825 | ); 826 | 827 | if (!argIsOk) return NULL; 828 | 829 | if (!PyCallable_Check(callback)) { 830 | PyErr_Format( 831 | PyExc_ValueError, 832 | "object %r is not callable", 833 | callback 834 | ); 835 | return NULL; 836 | } 837 | 838 | Py_INCREF(callback); 839 | self->callback = callback; 840 | 841 | Py_RETURN_TRUE; 842 | } 843 | 844 | /* 845 | AIOOperation properties 846 | */ 847 | static PyMemberDef AIOOperation_members[] = { 848 | { 849 | "context", T_OBJECT, 850 | offsetof(AIOOperation, context), 851 | READONLY, "context object" 852 | }, 853 | { 854 | "fileno", T_UINT, 855 | offsetof(AIOOperation, iocb.aio_fildes), 856 | READONLY, "file descriptor" 857 | }, 858 | { 859 | "priority", T_USHORT, 860 | offsetof(AIOOperation, iocb.aio_reqprio), 861 | READONLY, "request priority" 862 | }, 863 | { 864 | "offset", T_ULONGLONG, 865 | offsetof(AIOOperation, iocb.aio_offset), 866 | READONLY, "offset" 867 | }, 868 | { 869 | "payload", T_OBJECT, 870 | offsetof(AIOOperation, py_buffer), 871 | READONLY, "payload" 872 | }, 873 | { 874 | "nbytes", T_ULONGLONG, 875 | offsetof(AIOOperation, iocb.aio_nbytes), 876 | READONLY, "nbytes" 877 | }, 878 | {NULL} /* Sentinel */ 879 | }; 880 | 881 | /* 882 | AIOOperation methods 883 | */ 884 | static PyMethodDef AIOOperation_methods[] = { 885 | { 886 | "read", 887 | (PyCFunction) AIOOperation_read, 888 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 889 | AIOOperation_read_docstring 890 | }, 891 | { 892 | "write", 893 | (PyCFunction) AIOOperation_write, 894 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 895 | AIOOperation_write_docstring 896 | }, 897 | { 898 | "fsync", 899 | (PyCFunction) AIOOperation_fsync, 900 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 901 | AIOOperation_fsync_docstring 902 | }, 903 | { 904 | "fdsync", 905 | (PyCFunction) AIOOperation_fdsync, 906 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 907 | AIOOperation_fdsync_docstring 908 | }, 909 | { 910 | "get_value", 911 | (PyCFunction) AIOOperation_get_value, METH_NOARGS, 912 | AIOOperation_get_value_docstring 913 | }, 914 | { 915 | "set_callback", 916 | (PyCFunction) AIOOperation_set_callback, METH_VARARGS | METH_KEYWORDS, 917 | AIOOperation_set_callback_docstring 918 | }, 919 | {NULL} /* Sentinel */ 920 | }; 921 | 922 | /* 923 | AIOOperation class 924 | */ 925 | static PyTypeObject 926 | AIOOperationType = { 927 | PyVarObject_HEAD_INIT(NULL, 0) 928 | .tp_name = "aio.AIOOperation", 929 | .tp_doc = "linux aio operation representation", 930 | .tp_basicsize = sizeof(AIOOperation), 931 | .tp_itemsize = 0, 932 | .tp_flags = Py_TPFLAGS_DEFAULT, 933 | .tp_dealloc = (destructor) AIOOperation_dealloc, 934 | .tp_members = AIOOperation_members, 935 | .tp_methods = AIOOperation_methods, 936 | .tp_repr = (reprfunc) AIOOperation_repr 937 | }; 938 | 939 | 940 | static PyModuleDef linux_aio_module = { 941 | PyModuleDef_HEAD_INIT, 942 | .m_name = "linux_aio", 943 | .m_doc = "Linux AIO c API bindings.", 944 | .m_size = -1, 945 | }; 946 | 947 | 948 | PyMODINIT_FUNC PyInit_linux_aio(void) { 949 | Py_Initialize(); 950 | 951 | struct utsname uname_data; 952 | 953 | if (uname(&uname_data)) { 954 | PyErr_SetString(PyExc_ImportError, "Can not detect linux kernel version"); 955 | return NULL; 956 | } 957 | 958 | int release[2] = {0}; 959 | sscanf(uname_data.release, "%d.%d", &release[0], &release[1]); 960 | 961 | kernel_support = (release[0] > 4) || (release[0] == 4 && release[1] >= 18); 962 | 963 | if (!kernel_support) { 964 | PyErr_Format( 965 | PyExc_ImportError, 966 | "Linux kernel supported since 4.18 but current kernel is %s.", 967 | uname_data.release 968 | ); 969 | 970 | return NULL; 971 | } 972 | 973 | aio_context_t temp_ctx = 0; 974 | if (io_setup(1, &temp_ctx) < 0) { 975 | PyErr_Format( 976 | PyExc_ImportError, 977 | "Error on io_setup with code %d", 978 | errno 979 | ); 980 | return NULL; 981 | } 982 | 983 | if (io_destroy(temp_ctx)) { 984 | PyErr_Format( 985 | PyExc_ImportError, 986 | "Error on io_destroy with code %d", 987 | errno 988 | ); 989 | return NULL; 990 | } 991 | 992 | AIOContextTypeP = &AIOContextType; 993 | AIOOperationTypeP = &AIOOperationType; 994 | 995 | PyObject *m; 996 | 997 | m = PyModule_Create(&linux_aio_module); 998 | 999 | if (m == NULL) return NULL; 1000 | 1001 | if (PyType_Ready(AIOContextTypeP) < 0) return NULL; 1002 | 1003 | Py_INCREF(AIOContextTypeP); 1004 | 1005 | if (PyModule_AddObject(m, "Context", (PyObject *) AIOContextTypeP) < 0) { 1006 | Py_XDECREF(AIOContextTypeP); 1007 | Py_XDECREF(m); 1008 | return NULL; 1009 | } 1010 | 1011 | if (PyType_Ready(AIOOperationTypeP) < 0) return NULL; 1012 | 1013 | Py_INCREF(AIOOperationTypeP); 1014 | 1015 | if (PyModule_AddObject(m, "Operation", (PyObject *) AIOOperationTypeP) < 0) { 1016 | Py_XDECREF(AIOOperationTypeP); 1017 | Py_XDECREF(m); 1018 | return NULL; 1019 | } 1020 | 1021 | return m; 1022 | } 1023 | 1024 | -------------------------------------------------------------------------------- /caio/linux_aio.pyi: -------------------------------------------------------------------------------- 1 | from typing import Union, Optional, Callable, Any 2 | 3 | from .abstract import AbstractContext, AbstractOperation 4 | 5 | 6 | # noinspection PyPropertyDefinition 7 | class Context(AbstractContext): 8 | def __init__(self, max_requests: int = 32): ... 9 | 10 | 11 | # noinspection PyPropertyDefinition 12 | class Operation(AbstractOperation): 13 | @classmethod 14 | def read( 15 | cls, nbytes: int, fd: int, offset: int, priority=0 16 | ) -> "AbstractOperation": ... 17 | 18 | @classmethod 19 | def write( 20 | cls, payload_bytes: bytes, 21 | fd: int, offset: int, priority=0, 22 | ) -> "AbstractOperation": ... 23 | 24 | @classmethod 25 | def fsync(cls, fd: int, priority=0) -> "AbstractOperation": ... 26 | 27 | @classmethod 28 | def fdsync(cls, fd: int, priority=0) -> "AbstractOperation": ... 29 | 30 | def get_value(self) -> Union[bytes, int]: ... 31 | 32 | @property 33 | def fileno(self) -> int: ... 34 | 35 | @property 36 | def offset(self) -> int: ... 37 | 38 | @property 39 | def payload(self) -> Optional[Union[bytes, memoryview]]: ... 40 | 41 | @property 42 | def nbytes(self) -> int: ... 43 | 44 | def set_callback(self, callback: Callable[[int], Any]) -> bool: ... 45 | 46 | -------------------------------------------------------------------------------- /caio/linux_aio_asyncio.py: -------------------------------------------------------------------------------- 1 | from .asyncio_base import AsyncioContextBase 2 | from .linux_aio import Context, Operation 3 | 4 | 5 | class AsyncioContext(AsyncioContextBase): 6 | OPERATION_CLASS = Operation 7 | CONTEXT_CLASS = Context 8 | 9 | def _create_context(self, max_requests): 10 | context = super()._create_context(max_requests) 11 | self.loop.add_reader(context.fileno, self._on_read_event) 12 | return context 13 | 14 | def _on_done(self, future, result): 15 | """ 16 | Allow to set result directly. 17 | Cause process_events running in the same thread 18 | """ 19 | if future.done(): 20 | return 21 | future.set_result(True) 22 | 23 | def _destroy_context(self): 24 | self.loop.remove_reader(self.context.fileno) 25 | 26 | def _on_read_event(self): 27 | self.context.poll() 28 | self.context.process_events() 29 | -------------------------------------------------------------------------------- /caio/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mosquito/caio/580aa5c7c9ec40a052b55a427b06dcd911026c70/caio/py.typed -------------------------------------------------------------------------------- /caio/python_aio.py: -------------------------------------------------------------------------------- 1 | import os 2 | from collections import defaultdict 3 | from enum import IntEnum, unique 4 | from io import BytesIO 5 | from multiprocessing.pool import ThreadPool 6 | from threading import Lock, RLock 7 | from types import MappingProxyType 8 | from typing import Any, Callable, Optional, Union 9 | 10 | from .abstract import AbstractContext, AbstractOperation 11 | 12 | 13 | fdsync = getattr(os, "fdatasync", os.fsync) 14 | NATIVE_PREAD_PWRITE = hasattr(os, "pread") and hasattr(os, "pwrite") 15 | 16 | 17 | @unique 18 | class OpCode(IntEnum): 19 | READ = 0 20 | WRITE = 1 21 | FSYNC = 2 22 | FDSYNC = 3 23 | NOOP = -1 24 | 25 | 26 | class Context(AbstractContext): 27 | """ 28 | python aio context implementation 29 | """ 30 | 31 | MAX_POOL_SIZE = 128 32 | 33 | def __init__(self, max_requests: int = 32, pool_size: int = 8): 34 | assert pool_size < self.MAX_POOL_SIZE 35 | 36 | self.__max_requests = max_requests 37 | self.pool = ThreadPool(pool_size) 38 | self._in_progress = 0 39 | self._closed = False 40 | self._closed_lock = Lock() 41 | 42 | if not NATIVE_PREAD_PWRITE: 43 | self._locks_cleaner = RLock() # type: ignore 44 | self._locks = defaultdict(RLock) # type: ignore 45 | 46 | @property 47 | def max_requests(self) -> int: 48 | return self.__max_requests 49 | 50 | def _execute(self, operation: "Operation"): 51 | handler = self._OP_MAP[operation.opcode] 52 | 53 | def on_error(exc): 54 | self._in_progress -= 1 55 | operation.exception = exc 56 | operation.written = 0 57 | operation.callback(None) 58 | 59 | def on_success(result): 60 | self._in_progress -= 1 61 | operation.written = result 62 | operation.callback(result) 63 | 64 | if self._in_progress > self.__max_requests: 65 | raise RuntimeError( 66 | "Maximum simultaneous requests have been reached", 67 | ) 68 | 69 | self._in_progress += 1 70 | 71 | self.pool.apply_async( 72 | handler, args=(self, operation), 73 | callback=on_success, 74 | error_callback=on_error, 75 | ) 76 | 77 | if NATIVE_PREAD_PWRITE: 78 | def __pread(self, fd, size, offset): 79 | return os.pread(fd, size, offset) 80 | 81 | def __pwrite(self, fd, bytes, offset): 82 | return os.pwrite(fd, bytes, offset) 83 | else: 84 | def __pread(self, fd, size, offset): 85 | with self._locks[fd]: 86 | os.lseek(fd, 0, os.SEEK_SET) 87 | os.lseek(fd, offset, os.SEEK_SET) 88 | return os.read(fd, size) 89 | 90 | def __pwrite(self, fd, bytes, offset): 91 | with self._locks[fd]: 92 | os.lseek(fd, 0, os.SEEK_SET) 93 | os.lseek(fd, offset, os.SEEK_SET) 94 | return os.write(fd, bytes) 95 | 96 | def _handle_read(self, operation: "Operation"): 97 | return operation.buffer.write( 98 | self.__pread( 99 | operation.fileno, 100 | operation.nbytes, 101 | operation.offset, 102 | ), 103 | ) 104 | 105 | def _handle_write(self, operation: "Operation"): 106 | return self.__pwrite( 107 | operation.fileno, operation.buffer.getvalue(), operation.offset, 108 | ) 109 | 110 | def _handle_fsync(self, operation: "Operation"): 111 | return os.fsync(operation.fileno) 112 | 113 | def _handle_fdsync(self, operation: "Operation"): 114 | return fdsync(operation.fileno) 115 | 116 | def _handle_noop(self, operation: "Operation"): 117 | return 118 | 119 | def submit(self, *aio_operations) -> int: 120 | operations = [] 121 | 122 | for operation in aio_operations: 123 | if not isinstance(operation, Operation): 124 | raise ValueError("Invalid Operation %r", operation) 125 | 126 | operations.append(operation) 127 | 128 | count = 0 129 | for operation in operations: 130 | self._execute(operation) 131 | count += 1 132 | 133 | return count 134 | 135 | def cancel(self, *aio_operations) -> int: 136 | """ 137 | Cancels multiple Operations. Returns 138 | 139 | Operation.cancel(aio_op1, aio_op2, aio_opN, ...) -> int 140 | 141 | (Always returns zero, this method exists for compatibility reasons) 142 | """ 143 | return 0 144 | 145 | def close(self): 146 | if self._closed: 147 | return 148 | 149 | with self._closed_lock: 150 | self.pool.close() 151 | self._closed = True 152 | 153 | def __del__(self): 154 | if self.pool.close(): 155 | self.close() 156 | 157 | _OP_MAP = MappingProxyType({ 158 | OpCode.READ: _handle_read, 159 | OpCode.WRITE: _handle_write, 160 | OpCode.FSYNC: _handle_fsync, 161 | OpCode.FDSYNC: _handle_fdsync, 162 | OpCode.NOOP: _handle_noop, 163 | }) 164 | 165 | 166 | # noinspection PyPropertyDefinition 167 | class Operation(AbstractOperation): 168 | """ 169 | python aio operation implementation 170 | """ 171 | def __init__( 172 | self, 173 | fd: int, 174 | nbytes: Optional[int], 175 | offset: Optional[int], 176 | opcode: OpCode, 177 | payload: Optional[bytes] = None, 178 | priority: Optional[int] = None, 179 | ): 180 | self.callback = None # type: Optional[Callable[[int], Any]] 181 | self.buffer = BytesIO() 182 | 183 | if opcode == OpCode.WRITE and payload: 184 | self.buffer = BytesIO(payload) 185 | 186 | self.opcode = opcode 187 | self.__fileno = fd 188 | self.__offset = offset or 0 189 | self.__opcode = opcode 190 | self.__nbytes = nbytes or 0 191 | self.__priority = priority or 0 192 | self.exception = None 193 | self.written = 0 194 | 195 | @classmethod 196 | def read( 197 | cls, nbytes: int, fd: int, offset: int, priority=0, 198 | ) -> "Operation": 199 | """ 200 | Creates a new instance of Operation on read mode. 201 | """ 202 | return cls(fd, nbytes, offset, opcode=OpCode.READ, priority=priority) 203 | 204 | @classmethod 205 | def write( 206 | cls, payload_bytes: bytes, fd: int, offset: int, priority=0, 207 | ) -> "Operation": 208 | """ 209 | Creates a new instance of AIOOperation on write mode. 210 | """ 211 | return cls( 212 | fd, 213 | len(payload_bytes), 214 | offset, 215 | payload=payload_bytes, 216 | opcode=OpCode.WRITE, 217 | priority=priority, 218 | ) 219 | 220 | @classmethod 221 | def fsync(cls, fd: int, priority=0) -> "Operation": 222 | 223 | """ 224 | Creates a new instance of AIOOperation on fsync mode. 225 | """ 226 | return cls(fd, None, None, opcode=OpCode.FSYNC, priority=priority) 227 | 228 | @classmethod 229 | def fdsync(cls, fd: int, priority=0) -> "Operation": 230 | 231 | """ 232 | Creates a new instance of AIOOperation on fdsync mode. 233 | """ 234 | return cls(fd, None, None, opcode=OpCode.FDSYNC, priority=priority) 235 | 236 | def get_value(self) -> Union[bytes, int]: 237 | """ 238 | Method returns a bytes value of AIOOperation's result or None. 239 | """ 240 | if self.exception: 241 | raise self.exception 242 | 243 | if self.opcode == OpCode.WRITE: 244 | return self.written 245 | 246 | if self.buffer is None: 247 | return 248 | 249 | return self.buffer.getvalue() 250 | 251 | @property 252 | def fileno(self) -> int: 253 | return self.__fileno 254 | 255 | @property 256 | def offset(self) -> int: 257 | return self.__offset 258 | 259 | @property 260 | def payload(self) -> Optional[memoryview]: 261 | return self.buffer.getbuffer() 262 | 263 | @property 264 | def nbytes(self) -> int: 265 | return self.__nbytes 266 | 267 | def set_callback(self, callback: Callable[[int], Any]) -> bool: 268 | self.callback = callback 269 | return True 270 | -------------------------------------------------------------------------------- /caio/python_aio_asyncio.py: -------------------------------------------------------------------------------- 1 | from .asyncio_base import AsyncioContextBase 2 | from .python_aio import Context, Operation 3 | 4 | 5 | class AsyncioContext(AsyncioContextBase): 6 | OPERATION_CLASS = Operation 7 | CONTEXT_CLASS = Context 8 | 9 | def _destroy_context(self): 10 | self.context.close() 11 | -------------------------------------------------------------------------------- /caio/src/threadpool/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016, Mathias Brossard. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | 1. Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 15 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 16 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 17 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 18 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 20 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /caio/src/threadpool/LINK: -------------------------------------------------------------------------------- 1 | https://github.com/mbrossard/threadpool 2 | -------------------------------------------------------------------------------- /caio/src/threadpool/README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/mbrossard/threadpool.svg?branch=master)](https://travis-ci.org/mbrossard/threadpool) 2 | 3 | A simple C thread pool implementation 4 | ===================================== 5 | 6 | Currently, the implementation: 7 | * Works with pthreads only, but API is intentionally opaque to allow 8 | other implementations (Windows for instance). 9 | * Starts all threads on creation of the thread pool. 10 | * Reserves one task for signaling the queue is full. 11 | * Stops and joins all worker threads on destroy. 12 | 13 | Possible enhancements 14 | ===================== 15 | 16 | The API contains addtional unused 'flags' parameters that would allow 17 | some additional options: 18 | 19 | * Lazy creation of threads (easy) 20 | * Reduce number of threads automatically (hard) 21 | * Unlimited queue size (medium) 22 | * Kill worker threads on destroy (hard, dangerous) 23 | * Support Windows API (medium) 24 | * Reduce locking contention (medium/hard) 25 | -------------------------------------------------------------------------------- /caio/src/threadpool/threadpool.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2016, Mathias Brossard . 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in the 14 | * documentation and/or other materials provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | /** 30 | * @file threadpool.c 31 | * @brief Threadpool implementation file 32 | */ 33 | 34 | #include 35 | #include 36 | #include 37 | 38 | #include "threadpool.h" 39 | 40 | typedef enum { 41 | immediate_shutdown = 1, 42 | graceful_shutdown = 2 43 | } threadpool_shutdown_t; 44 | 45 | /** 46 | * @struct threadpool_task 47 | * @brief the work struct 48 | * 49 | * @var function Pointer to the function that will perform the task. 50 | * @var argument Argument to be passed to the function. 51 | */ 52 | 53 | typedef struct { 54 | void (*function)(void *); 55 | void *argument; 56 | } threadpool_task_t; 57 | 58 | /** 59 | * @struct threadpool 60 | * @brief The threadpool struct 61 | * 62 | * @var notify Condition variable to notify worker threads. 63 | * @var threads Array containing worker threads ID. 64 | * @var thread_count Number of threads 65 | * @var queue Array containing the task queue. 66 | * @var queue_size Size of the task queue. 67 | * @var head Index of the first element. 68 | * @var tail Index of the next element. 69 | * @var count Number of pending tasks 70 | * @var shutdown Flag indicating if the pool is shutting down 71 | * @var started Number of started threads 72 | */ 73 | struct threadpool_t { 74 | pthread_mutex_t lock; 75 | pthread_cond_t notify; 76 | pthread_t *threads; 77 | threadpool_task_t *queue; 78 | int thread_count; 79 | int queue_size; 80 | int head; 81 | int tail; 82 | int count; 83 | int shutdown; 84 | int started; 85 | }; 86 | 87 | /** 88 | * @function void *threadpool_thread(void *threadpool) 89 | * @brief the worker thread 90 | * @param threadpool the pool which own the thread 91 | */ 92 | static void *threadpool_thread(void *threadpool); 93 | 94 | int threadpool_free(threadpool_t *pool); 95 | 96 | threadpool_t *threadpool_create(int thread_count, int queue_size, int flags) 97 | { 98 | threadpool_t *pool; 99 | int i; 100 | (void) flags; 101 | 102 | if(thread_count <= 0 || thread_count > MAX_THREADS || queue_size <= 0 || queue_size > MAX_QUEUE) { 103 | return NULL; 104 | } 105 | 106 | if((pool = (threadpool_t *)malloc(sizeof(threadpool_t))) == NULL) { 107 | goto err; 108 | } 109 | 110 | /* Initialize */ 111 | pool->thread_count = 0; 112 | pool->queue_size = queue_size; 113 | pool->head = pool->tail = pool->count = 0; 114 | pool->shutdown = pool->started = 0; 115 | 116 | /* Allocate thread and task queue */ 117 | pool->threads = (pthread_t *)malloc(sizeof(pthread_t) * thread_count); 118 | pool->queue = (threadpool_task_t *)malloc 119 | (sizeof(threadpool_task_t) * queue_size); 120 | 121 | /* Initialize mutex and conditional variable first */ 122 | if((pthread_mutex_init(&(pool->lock), NULL) != 0) || 123 | (pthread_cond_init(&(pool->notify), NULL) != 0) || 124 | (pool->threads == NULL) || 125 | (pool->queue == NULL)) { 126 | goto err; 127 | } 128 | 129 | /* Start worker threads */ 130 | for(i = 0; i < thread_count; i++) { 131 | if(pthread_create(&(pool->threads[i]), NULL, 132 | threadpool_thread, (void*)pool) != 0) { 133 | threadpool_destroy(pool, 0); 134 | return NULL; 135 | } 136 | pool->thread_count++; 137 | pool->started++; 138 | } 139 | 140 | return pool; 141 | 142 | err: 143 | if(pool) { 144 | threadpool_free(pool); 145 | } 146 | return NULL; 147 | } 148 | 149 | int threadpool_add(threadpool_t *pool, void (*function)(void *), 150 | void *argument, int flags) 151 | { 152 | int err = 0; 153 | int next; 154 | (void) flags; 155 | 156 | if(pool == NULL || function == NULL) { 157 | return threadpool_invalid; 158 | } 159 | 160 | if(pthread_mutex_lock(&(pool->lock)) != 0) { 161 | return threadpool_lock_failure; 162 | } 163 | 164 | next = (pool->tail + 1) % pool->queue_size; 165 | 166 | do { 167 | /* Are we full ? */ 168 | if(pool->count == pool->queue_size) { 169 | err = threadpool_queue_full; 170 | break; 171 | } 172 | 173 | /* Are we shutting down ? */ 174 | if(pool->shutdown) { 175 | err = threadpool_shutdown; 176 | break; 177 | } 178 | 179 | /* Add task to queue */ 180 | pool->queue[pool->tail].function = function; 181 | pool->queue[pool->tail].argument = argument; 182 | pool->tail = next; 183 | pool->count += 1; 184 | 185 | /* pthread_cond_broadcast */ 186 | if(pthread_cond_signal(&(pool->notify)) != 0) { 187 | err = threadpool_lock_failure; 188 | break; 189 | } 190 | } while(0); 191 | 192 | if(pthread_mutex_unlock(&pool->lock) != 0) { 193 | err = threadpool_lock_failure; 194 | } 195 | 196 | return err; 197 | } 198 | 199 | int threadpool_destroy(threadpool_t *pool, int flags) 200 | { 201 | int i, err = 0; 202 | 203 | if(pool == NULL) { 204 | return threadpool_invalid; 205 | } 206 | 207 | if(pthread_mutex_lock(&(pool->lock)) != 0) { 208 | return threadpool_lock_failure; 209 | } 210 | 211 | do { 212 | /* Already shutting down */ 213 | if(pool->shutdown) { 214 | err = threadpool_shutdown; 215 | break; 216 | } 217 | 218 | pool->shutdown = (flags & threadpool_graceful) ? 219 | graceful_shutdown : immediate_shutdown; 220 | 221 | /* Wake up all worker threads */ 222 | if((pthread_cond_broadcast(&(pool->notify)) != 0) || 223 | (pthread_mutex_unlock(&(pool->lock)) != 0)) { 224 | err = threadpool_lock_failure; 225 | break; 226 | } 227 | 228 | /* Join all worker thread */ 229 | for(i = 0; i < pool->thread_count; i++) { 230 | if(pthread_join(pool->threads[i], NULL) != 0) { 231 | err = threadpool_thread_failure; 232 | } 233 | } 234 | } while(0); 235 | 236 | /* Only if everything went well do we deallocate the pool */ 237 | if(!err) { 238 | threadpool_free(pool); 239 | } 240 | return err; 241 | } 242 | 243 | int threadpool_free(threadpool_t *pool) 244 | { 245 | if(pool == NULL || pool->started > 0) { 246 | return -1; 247 | } 248 | 249 | /* Did we manage to allocate ? */ 250 | if(pool->threads) { 251 | free(pool->threads); 252 | free(pool->queue); 253 | 254 | /* Because we allocate pool->threads after initializing the 255 | mutex and condition variable, we're sure they're 256 | initialized. Let's lock the mutex just in case. */ 257 | pthread_mutex_lock(&(pool->lock)); 258 | pthread_mutex_destroy(&(pool->lock)); 259 | pthread_cond_destroy(&(pool->notify)); 260 | } 261 | free(pool); 262 | return 0; 263 | } 264 | 265 | 266 | static void *threadpool_thread(void *threadpool) 267 | { 268 | threadpool_t *pool = (threadpool_t *)threadpool; 269 | threadpool_task_t task; 270 | 271 | for(;;) { 272 | /* Lock must be taken to wait on conditional variable */ 273 | pthread_mutex_lock(&(pool->lock)); 274 | 275 | /* Wait on condition variable, check for spurious wakeups. 276 | When returning from pthread_cond_wait(), we own the lock. */ 277 | while((pool->count == 0) && (!pool->shutdown)) { 278 | pthread_cond_wait(&(pool->notify), &(pool->lock)); 279 | } 280 | 281 | if((pool->shutdown == immediate_shutdown) || 282 | ((pool->shutdown == graceful_shutdown) && 283 | (pool->count == 0))) { 284 | break; 285 | } 286 | 287 | /* Grab our task */ 288 | task.function = pool->queue[pool->head].function; 289 | task.argument = pool->queue[pool->head].argument; 290 | pool->head = (pool->head + 1) % pool->queue_size; 291 | pool->count -= 1; 292 | 293 | /* Unlock */ 294 | pthread_mutex_unlock(&(pool->lock)); 295 | 296 | /* Get to work */ 297 | (*(task.function))(task.argument); 298 | } 299 | 300 | pool->started--; 301 | 302 | pthread_mutex_unlock(&(pool->lock)); 303 | pthread_exit(NULL); 304 | return(NULL); 305 | } 306 | -------------------------------------------------------------------------------- /caio/src/threadpool/threadpool.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2016, Mathias Brossard . 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are 7 | * met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright 13 | * notice, this list of conditions and the following disclaimer in the 14 | * documentation and/or other materials provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 | * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | #ifndef _THREADPOOL_H_ 30 | #define _THREADPOOL_H_ 31 | 32 | #ifdef __cplusplus 33 | extern "C" { 34 | #endif 35 | 36 | /** 37 | * @file threadpool.h 38 | * @brief Threadpool Header File 39 | */ 40 | 41 | /** 42 | * Increase this constants at your own risk 43 | * Large values might slow down your system 44 | */ 45 | #define MAX_THREADS 128 46 | #define MAX_QUEUE 65536 47 | 48 | typedef struct threadpool_t threadpool_t; 49 | 50 | typedef enum { 51 | threadpool_invalid = -1, 52 | threadpool_lock_failure = -2, 53 | threadpool_queue_full = -3, 54 | threadpool_shutdown = -4, 55 | threadpool_thread_failure = -5 56 | } threadpool_error_t; 57 | 58 | typedef enum { 59 | threadpool_graceful = 1 60 | } threadpool_destroy_flags_t; 61 | 62 | /** 63 | * @function threadpool_create 64 | * @brief Creates a threadpool_t object. 65 | * @param thread_count Number of worker threads. 66 | * @param queue_size Size of the queue. 67 | * @param flags Unused parameter. 68 | * @return a newly created thread pool or NULL 69 | */ 70 | threadpool_t *threadpool_create(int thread_count, int queue_size, int flags); 71 | 72 | /** 73 | * @function threadpool_add 74 | * @brief add a new task in the queue of a thread pool 75 | * @param pool Thread pool to which add the task. 76 | * @param function Pointer to the function that will perform the task. 77 | * @param argument Argument to be passed to the function. 78 | * @param flags Unused parameter. 79 | * @return 0 if all goes well, negative values in case of error (@see 80 | * threadpool_error_t for codes). 81 | */ 82 | int threadpool_add(threadpool_t *pool, void (*routine)(void *), 83 | void *arg, int flags); 84 | 85 | /** 86 | * @function threadpool_destroy 87 | * @brief Stops and destroys a thread pool. 88 | * @param pool Thread pool to destroy. 89 | * @param flags Flags for shutdown 90 | * 91 | * Known values for flags are 0 (default) and threadpool_graceful in 92 | * which case the thread pool doesn't accept any new tasks but 93 | * processes all pending tasks before shutdown. 94 | */ 95 | int threadpool_destroy(threadpool_t *pool, int flags); 96 | 97 | #ifdef __cplusplus 98 | } 99 | #endif 100 | 101 | #endif /* _THREADPOOL_H_ */ 102 | -------------------------------------------------------------------------------- /caio/thread_aio.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define PY_SSIZE_T_CLEAN 6 | #include 7 | #include 8 | 9 | #include "src/threadpool/threadpool.h" 10 | 11 | 12 | static const unsigned CTX_POOL_SIZE_DEFAULT = 8; 13 | static const unsigned CTX_MAX_REQUESTS_DEFAULT = 512; 14 | 15 | 16 | static PyTypeObject AIOOperationType; 17 | static PyTypeObject AIOContextType; 18 | 19 | typedef struct { 20 | PyObject_HEAD 21 | threadpool_t* pool; 22 | uint16_t max_requests; 23 | uint8_t pool_size; 24 | } AIOContext; 25 | 26 | 27 | typedef struct { 28 | PyObject_HEAD 29 | PyObject* py_buffer; 30 | PyObject* callback; 31 | int opcode; 32 | unsigned int fileno; 33 | off_t offset; 34 | int result; 35 | uint8_t error; 36 | uint8_t in_progress; 37 | Py_ssize_t buf_size; 38 | char* buf; 39 | PyObject* ctx; 40 | } AIOOperation; 41 | 42 | 43 | enum THAIO_OP_CODE { 44 | THAIO_READ, 45 | THAIO_WRITE, 46 | THAIO_FSYNC, 47 | THAIO_FDSYNC, 48 | THAIO_NOOP, 49 | }; 50 | 51 | 52 | static void 53 | AIOContext_dealloc(AIOContext *self) { 54 | if (self->pool != 0) { 55 | threadpool_t* pool = self->pool; 56 | self->pool = 0; 57 | 58 | threadpool_destroy(pool, 0); 59 | } 60 | 61 | Py_TYPE(self)->tp_free((PyObject *) self); 62 | } 63 | 64 | /* 65 | AIOContext.__new__ classmethod definition 66 | */ 67 | static PyObject * 68 | AIOContext_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { 69 | AIOContext *self; 70 | 71 | self = (AIOContext *) type->tp_alloc(type, 0); 72 | return (PyObject *) self; 73 | } 74 | 75 | static int 76 | AIOContext_init(AIOContext *self, PyObject *args, PyObject *kwds) 77 | { 78 | static char *kwlist[] = {"max_requests", "pool_size", NULL}; 79 | 80 | self->pool = NULL; 81 | self->max_requests = 0; 82 | 83 | if (!PyArg_ParseTupleAndKeywords( 84 | args, kwds, "|HH", kwlist, 85 | &self->max_requests, &self->pool_size 86 | )) return -1; 87 | 88 | if (self->max_requests <= 0) { 89 | self->max_requests = CTX_MAX_REQUESTS_DEFAULT; 90 | } 91 | 92 | if (self->pool_size <= 0) { 93 | self->pool_size = CTX_POOL_SIZE_DEFAULT; 94 | } 95 | 96 | if (self->pool_size > MAX_THREADS) { 97 | PyErr_Format( 98 | PyExc_ValueError, 99 | "pool_size too large. Allowed lower then %d", 100 | MAX_THREADS 101 | ); 102 | return -1; 103 | } 104 | 105 | if (self->max_requests >= (MAX_QUEUE - 1)) { 106 | PyErr_Format( 107 | PyExc_ValueError, 108 | "max_requests too large. Allowed lower then %d", 109 | MAX_QUEUE - 1 110 | ); 111 | return -1; 112 | } 113 | 114 | self->pool = threadpool_create(self->pool_size, self->max_requests, 0); 115 | 116 | if (self->pool == NULL) { 117 | PyErr_Format( 118 | PyExc_RuntimeError, 119 | "Pool initialization failed size=%d max_requests=%d", 120 | self->pool_size, self->max_requests 121 | ); 122 | return -1; 123 | } 124 | 125 | return 0; 126 | } 127 | 128 | static PyObject* AIOContext_repr(AIOContext *self) { 129 | if (self->pool == NULL) { 130 | PyErr_SetString(PyExc_RuntimeError, "Pool not initialized"); 131 | return NULL; 132 | } 133 | return PyUnicode_FromFormat( 134 | "<%s as %p: max_requests=%i, pool_size=%i, ctx=%lli>", 135 | Py_TYPE(self)->tp_name, self, self->max_requests, 136 | self->pool_size, self->pool 137 | ); 138 | } 139 | 140 | 141 | void worker(void *arg) { 142 | PyGILState_STATE state; 143 | 144 | AIOOperation* op = arg; 145 | PyObject* ctx = op->ctx; 146 | op->ctx = NULL; 147 | op->error = 0; 148 | 149 | if (op->opcode == THAIO_NOOP) { 150 | state = PyGILState_Ensure(); 151 | op->ctx = NULL; 152 | Py_DECREF(ctx); 153 | Py_DECREF(op); 154 | PyGILState_Release(state); 155 | return; 156 | } 157 | 158 | int fileno = op->fileno; 159 | off_t offset = op->offset; 160 | int buf_size = op->buf_size; 161 | char* buf = op->buf; 162 | 163 | int result; 164 | 165 | switch (op->opcode) { 166 | case THAIO_WRITE: 167 | result = pwrite(fileno, (const char*) buf, buf_size, offset); 168 | break; 169 | case THAIO_FSYNC: 170 | result = fsync(fileno); 171 | break; 172 | case THAIO_FDSYNC: 173 | #ifdef HAVE_FDATASYNC 174 | result = fdatasync(fileno); 175 | #else 176 | result = fsync(fileno); 177 | #endif 178 | break; 179 | 180 | case THAIO_READ: 181 | result = pread(fileno, buf, buf_size, offset); 182 | break; 183 | } 184 | 185 | op->ctx = NULL; 186 | op->result = result; 187 | 188 | if (result < 0) op->error = errno; 189 | 190 | if (op->opcode == THAIO_READ) { 191 | op->buf_size = result; 192 | } 193 | state = PyGILState_Ensure(); 194 | if (op->callback != NULL) { 195 | PyObject_CallFunction(op->callback, "i", result); 196 | } 197 | 198 | if (op->opcode == THAIO_WRITE) { 199 | Py_DECREF(op->py_buffer); 200 | op->py_buffer = NULL; 201 | } 202 | 203 | Py_DECREF(ctx); 204 | Py_DECREF(op); 205 | 206 | PyGILState_Release(state); 207 | } 208 | 209 | 210 | inline int process_pool_error(int code) { 211 | switch (code) { 212 | case threadpool_invalid: 213 | PyErr_SetString( 214 | PyExc_RuntimeError, 215 | "Thread pool pointer is invalid" 216 | ); 217 | return code; 218 | case threadpool_lock_failure: 219 | PyErr_SetString( 220 | PyExc_RuntimeError, 221 | "Failed to lock thread pool" 222 | ); 223 | return code; 224 | case threadpool_queue_full: 225 | PyErr_Format( 226 | PyExc_RuntimeError, 227 | "Thread pool queue full" 228 | ); 229 | return code; 230 | case threadpool_shutdown: 231 | PyErr_SetString( 232 | PyExc_RuntimeError, 233 | "Thread pool is shutdown" 234 | ); 235 | return code; 236 | case threadpool_thread_failure: 237 | PyErr_SetString( 238 | PyExc_RuntimeError, 239 | "Thread failure" 240 | ); 241 | return code; 242 | } 243 | 244 | if (code < 0) PyErr_SetString(PyExc_RuntimeError, "Unknown error"); 245 | return code; 246 | } 247 | 248 | 249 | 250 | PyDoc_STRVAR(AIOContext_submit_docstring, 251 | "Accepts multiple Operations. Returns \n\n" 252 | " Operation.submit(aio_op1, aio_op2, aio_opN, ...) -> int" 253 | ); 254 | static PyObject* AIOContext_submit( 255 | AIOContext *self, PyObject *args 256 | ) { 257 | if (self == NULL) { 258 | PyErr_SetString(PyExc_RuntimeError, "self is NULL"); 259 | return NULL; 260 | } 261 | 262 | if (self->pool == NULL) { 263 | PyErr_SetString(PyExc_RuntimeError, "self->pool is NULL"); 264 | return NULL; 265 | } 266 | 267 | if (!PyTuple_Check(args)) { 268 | PyErr_SetNone(PyExc_ValueError); 269 | return NULL; 270 | } 271 | 272 | unsigned int nr = PyTuple_Size(args); 273 | 274 | PyObject* obj; 275 | AIOOperation* ops[nr]; 276 | unsigned int i; 277 | 278 | for (i=0; i < nr; i++) { 279 | obj = PyTuple_GetItem(args, i); 280 | if (PyObject_TypeCheck(obj, &AIOOperationType) == 0) { 281 | PyErr_Format( 282 | PyExc_TypeError, 283 | "Wrong type for argument %d", i 284 | ); 285 | 286 | return NULL; 287 | } 288 | 289 | ops[i] = (AIOOperation*) obj; 290 | ops[i]->ctx = (void*) self; 291 | } 292 | 293 | unsigned int j=0; 294 | int result = 0; 295 | 296 | for (i=0; i < nr; i++) { 297 | if (ops[i]->in_progress) continue; 298 | ops[i]->in_progress = 1; 299 | Py_INCREF(ops[i]); 300 | Py_INCREF(ops[i]->ctx); 301 | result = threadpool_add(self->pool, worker, (void*) ops[i], 0); 302 | if (process_pool_error(result) < 0) return NULL; 303 | j++; 304 | } 305 | 306 | return (PyObject*) PyLong_FromSsize_t(j); 307 | } 308 | 309 | 310 | PyDoc_STRVAR(AIOContext_cancel_docstring, 311 | "Cancels multiple Operations. Returns \n\n" 312 | " Operation.cancel(aio_op1, aio_op2, aio_opN, ...) -> int\n\n" 313 | "(Always returns zero, this method exists for compatibility reasons)" 314 | ); 315 | static PyObject* AIOContext_cancel( 316 | AIOContext *self, PyObject *args 317 | ) { 318 | return (PyObject*) PyLong_FromSsize_t(0); 319 | } 320 | 321 | 322 | /* 323 | AIOContext properties 324 | */ 325 | static PyMemberDef AIOContext_members[] = { 326 | { 327 | "pool_size", 328 | T_INT, 329 | offsetof(AIOContext, pool_size), 330 | READONLY, 331 | "pool_size" 332 | }, 333 | { 334 | "max_requests", 335 | T_USHORT, 336 | offsetof(AIOContext, max_requests), 337 | READONLY, 338 | "max requests" 339 | }, 340 | {NULL} /* Sentinel */ 341 | }; 342 | 343 | static PyMethodDef AIOContext_methods[] = { 344 | { 345 | "submit", 346 | (PyCFunction) AIOContext_submit, METH_VARARGS, 347 | AIOContext_submit_docstring 348 | }, 349 | { 350 | "cancel", 351 | (PyCFunction) AIOContext_cancel, METH_VARARGS, 352 | AIOContext_cancel_docstring 353 | }, 354 | {NULL} /* Sentinel */ 355 | }; 356 | 357 | static PyTypeObject 358 | AIOContextType = { 359 | PyVarObject_HEAD_INIT(NULL, 0) 360 | .tp_name = "Context", 361 | .tp_doc = "thread aio context", 362 | .tp_basicsize = sizeof(AIOContext), 363 | .tp_itemsize = 0, 364 | .tp_flags = Py_TPFLAGS_DEFAULT, 365 | .tp_new = AIOContext_new, 366 | .tp_init = (initproc) AIOContext_init, 367 | .tp_dealloc = (destructor) AIOContext_dealloc, 368 | .tp_members = AIOContext_members, 369 | .tp_methods = AIOContext_methods, 370 | .tp_repr = (reprfunc) AIOContext_repr 371 | }; 372 | 373 | 374 | static void 375 | AIOOperation_dealloc(AIOOperation *self) { 376 | Py_CLEAR(self->callback); 377 | 378 | if ((self->opcode == THAIO_READ) && self->buf != NULL) { 379 | PyMem_Free(self->buf); 380 | self->buf = NULL; 381 | } 382 | 383 | Py_CLEAR(self->py_buffer); 384 | Py_TYPE(self)->tp_free((PyObject *) self); 385 | } 386 | 387 | 388 | static PyObject* AIOOperation_repr(AIOOperation *self) { 389 | char* mode; 390 | 391 | switch (self->opcode) { 392 | case THAIO_READ: 393 | mode = "read"; 394 | break; 395 | 396 | case THAIO_WRITE: 397 | mode = "write"; 398 | break; 399 | 400 | case THAIO_FSYNC: 401 | mode = "fsync"; 402 | break; 403 | 404 | case THAIO_FDSYNC: 405 | mode = "fdsync"; 406 | break; 407 | default: 408 | mode = "noop"; 409 | break; 410 | } 411 | 412 | return PyUnicode_FromFormat( 413 | "<%s at %p: mode=\"%s\", fd=%i, offset=%i, result=%i, buffer=%p>", 414 | Py_TYPE(self)->tp_name, self, mode, 415 | self->fileno, self->offset, self->result, self->buf 416 | ); 417 | } 418 | 419 | 420 | /* 421 | AIOOperation.read classmethod definition 422 | */ 423 | PyDoc_STRVAR(AIOOperation_read_docstring, 424 | "Creates a new instance of Operation on read mode.\n\n" 425 | " Operation.read(\n" 426 | " nbytes: int,\n" 427 | " aio_context: Context,\n" 428 | " fd: int, \n" 429 | " offset: int,\n" 430 | " priority=0\n" 431 | " )" 432 | ); 433 | 434 | static PyObject* AIOOperation_read( 435 | PyTypeObject *type, PyObject *args, PyObject *kwds 436 | ) { 437 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 438 | 439 | static char *kwlist[] = {"nbytes", "fd", "offset", "priority", NULL}; 440 | 441 | if (self == NULL) { 442 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 443 | return NULL; 444 | } 445 | 446 | self->buf = NULL; 447 | self->py_buffer = NULL; 448 | self->in_progress = 0; 449 | 450 | uint64_t nbytes = 0; 451 | uint16_t priority; 452 | 453 | int argIsOk = PyArg_ParseTupleAndKeywords( 454 | args, kwds, "KI|LH", kwlist, 455 | &nbytes, 456 | &(self->fileno), 457 | &(self->offset), 458 | &priority 459 | ); 460 | 461 | if (!argIsOk) return NULL; 462 | 463 | self->buf = PyMem_Calloc(nbytes, sizeof(char)); 464 | self->buf_size = nbytes; 465 | 466 | self->py_buffer = PyMemoryView_FromMemory( 467 | self->buf, 468 | self->buf_size, 469 | PyBUF_READ 470 | ); 471 | 472 | self->opcode = THAIO_READ; 473 | 474 | return (PyObject*) self; 475 | } 476 | 477 | /* 478 | AIOOperation.write classmethod definition 479 | */ 480 | PyDoc_STRVAR(AIOOperation_write_docstring, 481 | "Creates a new instance of Operation on write mode.\n\n" 482 | " Operation.write(\n" 483 | " payload_bytes: bytes,\n" 484 | " fd: int, \n" 485 | " offset: int,\n" 486 | " priority=0\n" 487 | " )" 488 | ); 489 | 490 | static PyObject* AIOOperation_write( 491 | PyTypeObject *type, PyObject *args, PyObject *kwds 492 | ) { 493 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 494 | 495 | static char *kwlist[] = {"payload_bytes", "fd", "offset", "priority", NULL}; 496 | 497 | if (self == NULL) { 498 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 499 | return NULL; 500 | } 501 | 502 | // unused 503 | uint16_t priority; 504 | 505 | self->buf = NULL; 506 | self->py_buffer = NULL; 507 | self->in_progress = 0; 508 | 509 | int argIsOk = PyArg_ParseTupleAndKeywords( 510 | args, kwds, "OI|LH", kwlist, 511 | &(self->py_buffer), 512 | &(self->fileno), 513 | &(self->offset), 514 | &priority 515 | ); 516 | 517 | if (!argIsOk) return NULL; 518 | 519 | if (!PyBytes_Check(self->py_buffer)) { 520 | Py_XDECREF(self); 521 | PyErr_SetString( 522 | PyExc_ValueError, 523 | "payload_bytes argument must be bytes" 524 | ); 525 | return NULL; 526 | } 527 | 528 | self->opcode = THAIO_WRITE; 529 | 530 | if (PyBytes_AsStringAndSize( 531 | self->py_buffer, 532 | &self->buf, 533 | &self->buf_size 534 | )) { 535 | Py_XDECREF(self); 536 | PyErr_SetString( 537 | PyExc_RuntimeError, 538 | "Can not convert bytes to c string" 539 | ); 540 | return NULL; 541 | } 542 | 543 | Py_INCREF(self->py_buffer); 544 | 545 | return (PyObject*) self; 546 | } 547 | 548 | 549 | /* 550 | AIOOperation.fsync classmethod definition 551 | */ 552 | PyDoc_STRVAR(AIOOperation_fsync_docstring, 553 | "Creates a new instance of Operation on fsync mode.\n\n" 554 | " Operation.fsync(\n" 555 | " aio_context: AIOContext,\n" 556 | " fd: int, \n" 557 | " priority=0\n" 558 | " )" 559 | ); 560 | 561 | static PyObject* AIOOperation_fsync( 562 | PyTypeObject *type, PyObject *args, PyObject *kwds 563 | ) { 564 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 565 | 566 | static char *kwlist[] = {"fd", "priority", NULL}; 567 | 568 | if (self == NULL) { 569 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 570 | return NULL; 571 | } 572 | 573 | uint16_t priority; 574 | 575 | self->buf = NULL; 576 | self->py_buffer = NULL; 577 | self->in_progress = 0; 578 | 579 | int argIsOk = PyArg_ParseTupleAndKeywords( 580 | args, kwds, "I|H", kwlist, 581 | &(self->fileno), 582 | &priority 583 | ); 584 | 585 | if (!argIsOk) return NULL; 586 | 587 | self->opcode = THAIO_FSYNC; 588 | 589 | return (PyObject*) self; 590 | } 591 | 592 | 593 | /* 594 | AIOOperation.fdsync classmethod definition 595 | */ 596 | PyDoc_STRVAR(AIOOperation_fdsync_docstring, 597 | "Creates a new instance of Operation on fdsync mode.\n\n" 598 | " Operation.fdsync(\n" 599 | " aio_context: AIOContext,\n" 600 | " fd: int, \n" 601 | " priority=0\n" 602 | " )" 603 | ); 604 | 605 | static PyObject* AIOOperation_fdsync( 606 | PyTypeObject *type, PyObject *args, PyObject *kwds 607 | ) { 608 | AIOOperation *self = (AIOOperation *) type->tp_alloc(type, 0); 609 | 610 | static char *kwlist[] = {"fd", "priority", NULL}; 611 | 612 | if (self == NULL) { 613 | PyErr_SetString(PyExc_MemoryError, "can not allocate memory"); 614 | return NULL; 615 | } 616 | 617 | self->buf = NULL; 618 | self->py_buffer = NULL; 619 | self->in_progress = 0; 620 | uint16_t priority; 621 | 622 | int argIsOk = PyArg_ParseTupleAndKeywords( 623 | args, kwds, "I|H", kwlist, 624 | &(self->fileno), 625 | &priority 626 | ); 627 | 628 | if (!argIsOk) return NULL; 629 | 630 | self->opcode = THAIO_FDSYNC; 631 | 632 | return (PyObject*) self; 633 | } 634 | 635 | /* 636 | AIOOperation.get_value method definition 637 | */ 638 | PyDoc_STRVAR(AIOOperation_get_value_docstring, 639 | "Method returns a bytes value of Operation's result or None.\n\n" 640 | " Operation.get_value() -> Optional[bytes]" 641 | ); 642 | 643 | static PyObject* AIOOperation_get_value( 644 | AIOOperation *self, PyObject *args, PyObject *kwds 645 | ) { 646 | if (self->error != 0) { 647 | PyErr_SetString( 648 | PyExc_SystemError, 649 | strerror(self->error) 650 | ); 651 | 652 | return NULL; 653 | } 654 | 655 | switch (self->opcode) { 656 | case THAIO_READ: 657 | return PyBytes_FromStringAndSize( 658 | self->buf, self->buf_size 659 | ); 660 | 661 | case THAIO_WRITE: 662 | return PyLong_FromSsize_t(self->result); 663 | } 664 | 665 | return Py_None; 666 | } 667 | 668 | 669 | /* 670 | AIOOperation.get_value method definition 671 | */ 672 | PyDoc_STRVAR(AIOOperation_set_callback_docstring, 673 | "Set callback which will be called after Operation will be finished.\n\n" 674 | " Operation.get_value() -> Optional[bytes]" 675 | ); 676 | 677 | static PyObject* AIOOperation_set_callback( 678 | AIOOperation *self, PyObject *args, PyObject *kwds 679 | ) { 680 | static char *kwlist[] = {"callback", NULL}; 681 | 682 | PyObject* callback; 683 | 684 | int argIsOk = PyArg_ParseTupleAndKeywords( 685 | args, kwds, "O", kwlist, 686 | &callback 687 | ); 688 | 689 | if (!argIsOk) return NULL; 690 | 691 | if (!PyCallable_Check(callback)) { 692 | PyErr_Format( 693 | PyExc_ValueError, 694 | "object %r is not callable", 695 | callback 696 | ); 697 | return NULL; 698 | } 699 | 700 | Py_INCREF(callback); 701 | self->callback = callback; 702 | 703 | Py_RETURN_TRUE; 704 | } 705 | 706 | /* 707 | AIOOperation properties 708 | */ 709 | static PyMemberDef AIOOperation_members[] = { 710 | { 711 | "fileno", T_UINT, 712 | offsetof(AIOOperation, fileno), 713 | READONLY, "file descriptor" 714 | }, 715 | { 716 | "offset", T_ULONGLONG, 717 | offsetof(AIOOperation, offset), 718 | READONLY, "offset" 719 | }, 720 | { 721 | "payload", T_OBJECT, 722 | offsetof(AIOOperation, py_buffer), 723 | READONLY, "payload" 724 | }, 725 | { 726 | "nbytes", T_ULONGLONG, 727 | offsetof(AIOOperation, buf_size), 728 | READONLY, "nbytes" 729 | }, 730 | { 731 | "result", T_INT, 732 | offsetof(AIOOperation, result), 733 | READONLY, "result" 734 | }, 735 | { 736 | "error", T_INT, 737 | offsetof(AIOOperation, error), 738 | READONLY, "error" 739 | }, 740 | {NULL} /* Sentinel */ 741 | }; 742 | 743 | /* 744 | AIOOperation methods 745 | */ 746 | static PyMethodDef AIOOperation_methods[] = { 747 | { 748 | "read", 749 | (PyCFunction) AIOOperation_read, 750 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 751 | AIOOperation_read_docstring 752 | }, 753 | { 754 | "write", 755 | (PyCFunction) AIOOperation_write, 756 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 757 | AIOOperation_write_docstring 758 | }, 759 | { 760 | "fsync", 761 | (PyCFunction) AIOOperation_fsync, 762 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 763 | AIOOperation_fsync_docstring 764 | }, 765 | { 766 | "fdsync", 767 | (PyCFunction) AIOOperation_fdsync, 768 | METH_CLASS | METH_VARARGS | METH_KEYWORDS, 769 | AIOOperation_fdsync_docstring 770 | }, 771 | { 772 | "get_value", 773 | (PyCFunction) AIOOperation_get_value, METH_NOARGS, 774 | AIOOperation_get_value_docstring 775 | }, 776 | { 777 | "set_callback", 778 | (PyCFunction) AIOOperation_set_callback, METH_VARARGS | METH_KEYWORDS, 779 | AIOOperation_set_callback_docstring 780 | }, 781 | {NULL} /* Sentinel */ 782 | }; 783 | 784 | /* 785 | AIOOperation class 786 | */ 787 | static PyTypeObject 788 | AIOOperationType = { 789 | PyVarObject_HEAD_INIT(NULL, 0) 790 | .tp_name = "aio.AIOOperation", 791 | .tp_doc = "thread aio operation representation", 792 | .tp_basicsize = sizeof(AIOOperation), 793 | .tp_itemsize = 0, 794 | .tp_flags = Py_TPFLAGS_DEFAULT, 795 | .tp_dealloc = (destructor) AIOOperation_dealloc, 796 | .tp_members = AIOOperation_members, 797 | .tp_methods = AIOOperation_methods, 798 | .tp_repr = (reprfunc) AIOOperation_repr 799 | }; 800 | 801 | 802 | static PyModuleDef thread_aio_module = { 803 | PyModuleDef_HEAD_INIT, 804 | .m_name = "thread_aio", 805 | .m_doc = "Thread based AIO.", 806 | .m_size = -1, 807 | }; 808 | 809 | 810 | PyMODINIT_FUNC PyInit_thread_aio(void) { 811 | Py_Initialize(); 812 | 813 | PyObject *m; 814 | 815 | m = PyModule_Create(&thread_aio_module); 816 | 817 | if (m == NULL) return NULL; 818 | 819 | if (PyType_Ready(&AIOContextType) < 0) return NULL; 820 | 821 | Py_INCREF(&AIOContextType); 822 | 823 | if (PyModule_AddObject(m, "Context", (PyObject *) &AIOContextType) < 0) { 824 | Py_XDECREF(&AIOContextType); 825 | Py_XDECREF(m); 826 | return NULL; 827 | } 828 | 829 | if (PyType_Ready(&AIOOperationType) < 0) return NULL; 830 | 831 | Py_INCREF(&AIOOperationType); 832 | 833 | if (PyModule_AddObject(m, "Operation", (PyObject *) &AIOOperationType) < 0) { 834 | Py_XDECREF(&AIOOperationType); 835 | Py_XDECREF(m); 836 | return NULL; 837 | } 838 | 839 | return m; 840 | } 841 | -------------------------------------------------------------------------------- /caio/thread_aio.pyi: -------------------------------------------------------------------------------- 1 | from typing import Callable, Any, Union, Optional 2 | 3 | from .abstract import AbstractContext, AbstractOperation 4 | 5 | 6 | # noinspection PyPropertyDefinition 7 | class Context(AbstractContext): 8 | def __init__(self, max_requests: int = 32, pool_size=8): ... 9 | 10 | 11 | # noinspection PyPropertyDefinition 12 | class Operation(AbstractOperation): 13 | @classmethod 14 | def read( 15 | cls, nbytes: int, fd: int, offset: int, priority=0 16 | ) -> "AbstractOperation": ... 17 | 18 | @classmethod 19 | def write( 20 | cls, payload_bytes: bytes, 21 | fd: int, offset: int, priority=0, 22 | ) -> "AbstractOperation": ... 23 | 24 | @classmethod 25 | def fsync(cls, fd: int, priority=0) -> "AbstractOperation": ... 26 | 27 | @classmethod 28 | def fdsync(cls, fd: int, priority=0) -> "AbstractOperation": ... 29 | 30 | def get_value(self) -> Union[bytes, int]: ... 31 | 32 | @property 33 | def fileno(self) -> int: ... 34 | 35 | @property 36 | def offset(self) -> int: ... 37 | 38 | @property 39 | def payload(self) -> Optional[Union[bytes, memoryview]]: ... 40 | 41 | @property 42 | def nbytes(self) -> int: ... 43 | 44 | def set_callback(self, callback: Callable[[int], Any]) -> bool: ... 45 | -------------------------------------------------------------------------------- /caio/thread_aio_asyncio.py: -------------------------------------------------------------------------------- 1 | from .asyncio_base import AsyncioContextBase 2 | from .thread_aio import Context, Operation 3 | 4 | 5 | class AsyncioContext(AsyncioContextBase): 6 | MAX_REQUESTS_DEFAULT = 512 7 | OPERATION_CLASS = Operation 8 | CONTEXT_CLASS = Context 9 | -------------------------------------------------------------------------------- /caio/version.py: -------------------------------------------------------------------------------- 1 | author_info = (("Dmitry Orlov", "me@mosquito.su"),) 2 | 3 | package_info = "Asynchronous file IO for Linux MacOS or Windows." 4 | package_license = "Apache Software License" 5 | 6 | team_email = author_info[0][1] 7 | 8 | version_info = (0, 9, 24) 9 | 10 | __author__ = ", ".join("{} <{}>".format(*info) for info in author_info) 11 | __version__ = ".".join(map(str, version_info)) 12 | 13 | 14 | __all__ = ( 15 | "author_info", 16 | "package_info", 17 | "package_license", 18 | "team_email", 19 | "version_info", 20 | "__author__", 21 | "__version__", 22 | ) 23 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from caio import AsyncioContext 3 | 4 | 5 | loop = asyncio.get_event_loop() 6 | 7 | 8 | async def main(): 9 | # max_requests=128 by default 10 | ctx = AsyncioContext(max_requests=128) 11 | 12 | with open("test.file", "wb+") as fp: 13 | fd = fp.fileno() 14 | 15 | # Execute one write operation 16 | await ctx.write(b"Hello world", fd, offset=0) 17 | 18 | # Execute one read operation 19 | print(await ctx.read(32, fd, offset=0)) 20 | 21 | # Execute one fdsync operation 22 | await ctx.fdsync(fd) 23 | 24 | op1 = ctx.write(b"Hello from ", fd, offset=0) 25 | op2 = ctx.write(b"async world", fd, offset=11) 26 | 27 | await asyncio.gather(op1, op2) 28 | 29 | print(await ctx.read(32, fd, offset=0)) 30 | # Hello from async world 31 | 32 | 33 | loop.run_until_complete(main()) 34 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=77"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "caio" 7 | license = "Apache-2.0" 8 | license-files = ["COPYING"] 9 | description = "Asynchronous file IO for Linux MacOS or Windows." 10 | readme = "README.md" 11 | authors = [{ name = "Dmitry Orlov", email = "me@mosquito.su"}] 12 | requires-python = ">=3.9" 13 | classifiers = [ 14 | "Topic :: Software Development", 15 | "Topic :: Software Development :: Libraries", 16 | "Intended Audience :: Developers", 17 | "Natural Language :: English", 18 | "Operating System :: MacOS", 19 | "Operating System :: POSIX", 20 | "Operating System :: Microsoft", 21 | "Programming Language :: Python", 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.9", 24 | "Programming Language :: Python :: 3.10", 25 | "Programming Language :: Python :: 3.11", 26 | "Programming Language :: Python :: 3.12", 27 | "Programming Language :: Python :: 3.13", 28 | "Programming Language :: Python :: Implementation :: CPython", 29 | ] 30 | dynamic = ["version"] 31 | 32 | [project.urls] 33 | "Source Code" = "https://github.com/mosquito/caio/" 34 | 35 | [project.optional-dependencies] 36 | develop = [ 37 | "aiomisc-pytest", 38 | "coveralls", 39 | "pylama[toml]", 40 | "pytest", 41 | "pytest-cov", 42 | "setuptools", 43 | ] 44 | 45 | [tool.setuptools.packages.find] 46 | include = ["caio*"] 47 | 48 | [tool.setuptools.dynamic] 49 | version = { attr = "caio.version.__version__" } 50 | 51 | [tool.pylama] 52 | ignore= "C901,E252" 53 | 54 | [tool.pylama.linter.pycodestyle] 55 | max_line_length = 80 56 | 57 | [tool.tox] 58 | requires = ["tox>=4.19"] 59 | env_list = ["lint", "mypy", "3.9", "3.10", "3.11", "3.12", "3.13"] 60 | 61 | [tool.tox.env_run_base] 62 | pass_env = ["COVERALLS_*", "FORCE_COLOR", "GITHUB_*"] 63 | extras = ["develop"] 64 | usedevelop = true 65 | commands = [ 66 | ["pytest", "--cov=caio", "--cov-report=term-missing", "-sv", "tests"], 67 | ["-", "coveralls"] 68 | ] 69 | 70 | [tool.tox.env.lint] 71 | description = "Run linting" 72 | deps = ["pyflakes~=2.4.0", "pylava"] 73 | commands = [["pylama", "caio", "tests"]] 74 | 75 | [tool.tox.env.mypy] 76 | description = "Run type checking" 77 | deps = ["mypy"] 78 | commands = [ 79 | ["mypy", "--allow-untyped-calls", "--allow-untyped-defs", "--allow-untyped-decorators", "caio"] 80 | ] 81 | -------------------------------------------------------------------------------- /scripts/make-wheels.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | mkdir -p dist 4 | 5 | MACHINE=$(/opt/python/cp311-cp311/bin/python3 -c 'import platform; print(platform.machine())') 6 | 7 | function build_wheel() { 8 | /opt/python/$1/bin/pip install build 9 | /opt/python/$1/bin/python -m build --wheel 10 | } 11 | 12 | build_wheel cp39-cp39 13 | build_wheel cp310-cp310 14 | build_wheel cp311-cp311 15 | build_wheel cp312-cp312 16 | build_wheel cp313-cp313 17 | 18 | cd dist 19 | 20 | for f in ./*-linux*_${MACHINE}*; 21 | do if [ -f $f ]; then auditwheel repair $f -w . ; rm $f; fi; 22 | done 23 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import platform 2 | 3 | from setuptools import Extension, setup 4 | 5 | 6 | module_name = "caio" 7 | 8 | 9 | OS_NAME = platform.system().lower() 10 | extensions = [] 11 | 12 | 13 | if "linux" in OS_NAME: 14 | extensions.append( 15 | Extension( 16 | "{}.thread_aio".format(module_name), 17 | [ 18 | "{}/thread_aio.c".format(module_name), 19 | "{}/src/threadpool/threadpool.c".format(module_name), 20 | ], 21 | extra_compile_args=["-g", "-DHAVE_FDATASYNC"], 22 | ), 23 | ) 24 | elif "darwin" in OS_NAME: 25 | extensions.append( 26 | Extension( 27 | "{}.thread_aio".format(module_name), 28 | [ 29 | "{}/thread_aio.c".format(module_name), 30 | "{}/src/threadpool/threadpool.c".format(module_name), 31 | ], 32 | extra_compile_args=["-g"], 33 | ), 34 | ) 35 | if "linux" in OS_NAME: 36 | extensions.append( 37 | Extension( 38 | "{}.linux_aio".format(module_name), 39 | ["{}/linux_aio.c".format(module_name)], 40 | extra_compile_args=["-g"], 41 | ), 42 | ) 43 | 44 | 45 | setup( 46 | ext_modules=extensions, 47 | ) 48 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from caio import variants, variants_asyncio 4 | 5 | 6 | @pytest.fixture(params=variants) 7 | def context_maker(request): 8 | return request.param.Context 9 | 10 | 11 | @pytest.fixture(params=variants_asyncio) 12 | def async_context_maker(request): 13 | return request.param.AsyncioContext 14 | -------------------------------------------------------------------------------- /tests/test_aio_context.py: -------------------------------------------------------------------------------- 1 | 2 | def test_aio_context(context_maker): 3 | ctx = context_maker() 4 | assert ctx is not None 5 | 6 | ctx = context_maker(1) 7 | assert ctx is not None 8 | 9 | ctx = context_maker(32218) 10 | assert ctx is not None 11 | -------------------------------------------------------------------------------- /tests/test_asyncio_adapter.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import hashlib 3 | import os 4 | from unittest.mock import Mock 5 | 6 | import aiomisc 7 | import pytest 8 | 9 | 10 | @aiomisc.timeout(5) 11 | async def test_adapter(tmp_path, async_context_maker): 12 | async with async_context_maker() as context: 13 | with open(str(tmp_path / "temp.bin"), "wb+") as fp: 14 | fd = fp.fileno() 15 | 16 | assert await context.read(32, fd, 0) == b"" 17 | s = b"Hello world" 18 | assert await context.write(s, fd, 0) == len(s) 19 | assert await context.read(32, fd, 0) == s 20 | 21 | s = b"Hello real world" 22 | assert await context.write(s, fd, 0) == len(s) 23 | assert await context.read(32, fd, 0) == s 24 | 25 | part = b"\x00\x01\x02\x03" 26 | limit = 32 27 | expected_hash = hashlib.md5(part * limit).hexdigest() 28 | 29 | await asyncio.gather( 30 | *[context.write(part, fd, len(part) * i) for i in range(limit)] 31 | ) 32 | 33 | await context.fdsync(fd) 34 | 35 | data = await context.read(limit * len(part), fd, 0) 36 | assert data == part * limit 37 | 38 | assert hashlib.md5(bytes(data)).hexdigest() == expected_hash 39 | 40 | 41 | @aiomisc.timeout(3) 42 | async def test_bad_file_descritor(tmp_path, async_context_maker): 43 | async with async_context_maker() as context: 44 | with open(str(tmp_path / "temp.bin"), "wb+") as fp: 45 | fd = fp.fileno() 46 | 47 | with pytest.raises((SystemError, OSError, AssertionError, ValueError)): 48 | assert await context.read(1, fd, 0) == b"" 49 | 50 | with pytest.raises((SystemError, OSError, AssertionError, ValueError)): 51 | assert await context.write(b"hello", fd, 0) 52 | 53 | 54 | @pytest.fixture 55 | async def asyncio_exception_handler(event_loop): 56 | handler = Mock( 57 | side_effect=lambda _loop, ctx: _loop.default_exception_handler(ctx) 58 | ) 59 | current_handler = event_loop.get_exception_handler() 60 | event_loop.set_exception_handler(handler=handler) 61 | yield handler 62 | event_loop.set_exception_handler(current_handler) 63 | 64 | 65 | @aiomisc.timeout(3) 66 | async def test_operations_cancel_cleanly( 67 | tmp_path, async_context_maker, asyncio_exception_handler 68 | ): 69 | async with async_context_maker() as context: 70 | with open(str(tmp_path / "temp.bin"), "wb+") as fp: 71 | fd = fp.fileno() 72 | 73 | await context.write(b"\x00", fd, 1024**2 - 1) 74 | assert os.stat(fd).st_size == 1024**2 75 | 76 | for _ in range(50): 77 | reads = [ 78 | asyncio.create_task(context.read(2**16, fd, 2**16 * i)) 79 | for i in range(16) 80 | ] 81 | _, pending = await asyncio.wait( 82 | reads, return_when=asyncio.FIRST_COMPLETED 83 | ) 84 | for read in pending: 85 | read.cancel() 86 | if pending: 87 | await asyncio.wait(pending) 88 | asyncio_exception_handler.assert_not_called() 89 | -------------------------------------------------------------------------------- /tests/test_impl_selector.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import sys 4 | from subprocess import check_output 5 | 6 | import caio 7 | import pytest 8 | 9 | 10 | @pytest.fixture(params=caio.variants) 11 | def implementation(request): 12 | if request.param is caio.linux_aio: 13 | return "linux" 14 | if request.param is caio.thread_aio: 15 | return "thread" 16 | if request.param is caio.python_aio: 17 | return "python" 18 | 19 | raise RuntimeError("Unknown variant %r" % (request.param,)) 20 | 21 | 22 | @pytest.mark.skipif(platform.system() == 'Windows', reason="Windows skip") 23 | def test_env_selector(implementation): 24 | output = check_output( 25 | [ 26 | sys.executable, 27 | "-c", 28 | "import caio, inspect; print(caio.Context.__doc__)" 29 | ], 30 | env={"CAIO_IMPL": implementation} 31 | ).decode() 32 | 33 | assert implementation in output, output 34 | 35 | 36 | @pytest.fixture() 37 | def implementation_file(implementation): 38 | path = os.path.dirname(caio.__file__) 39 | fname = os.path.join(path, "default_implementation") 40 | 41 | try: 42 | with open(fname, "w") as fp: 43 | fp.write("# NEWER COMMIT THIS FILE") 44 | fp.write("\nwrong string\n") 45 | fp.write(implementation) 46 | fp.write("\n\n") 47 | yield implementation 48 | finally: 49 | os.remove(fname) 50 | 51 | 52 | def test_file_selector(implementation_file): 53 | output = check_output( 54 | [ 55 | sys.executable, 56 | "-c", 57 | "import caio, inspect; print(caio.Context.__doc__)" 58 | ], 59 | ).decode() 60 | 61 | assert implementation_file in output, output 62 | --------------------------------------------------------------------------------