├── .flake8 ├── .git_archival.txt ├── .gitattributes ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── periodic.yml ├── .gitignore ├── .readthedocs.yml ├── CHEATSHEET.rst ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── LICENSE.APACHE2 ├── LICENSE.MIT ├── README.rst ├── _trio_parallel_workers ├── __init__.py └── _funcs.py ├── docs ├── Makefile ├── make.bat └── source │ ├── _static │ ├── .gitkeep │ └── custom.css │ ├── conf.py │ ├── examples.rst │ ├── examples │ ├── async_parallel_pipeline.py │ ├── cache_warmup.py │ ├── cancellation.py │ ├── checkpointing.py │ ├── minimal.py │ ├── parallel_loops.py │ ├── parallel_map.py │ └── single_use_workers.py │ ├── history.rst │ ├── index.rst │ ├── reference.rst │ └── test_examples.py ├── newsfragments ├── .gitkeep └── README.rst ├── pyproject.toml ├── requirements ├── coverage.in ├── coverage.txt ├── dev.in ├── dev.txt ├── docs.in ├── docs.txt ├── install.in ├── install.txt ├── lint.in ├── lint.txt ├── test.in └── test.txt └── trio_parallel ├── __init__.py ├── _abc.py ├── _impl.py ├── _posix_pipes.py ├── _proc.py ├── _tests ├── __init__.py ├── conftest.py ├── test_cache.py ├── test_defaults.py ├── test_impl.py ├── test_proc.py └── test_worker.py ├── _windows_cffi.py └── _windows_pipes.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | no-checkpoint-warning-decorators = pytest.fixture, asynccontextmanager 3 | -------------------------------------------------------------------------------- /.git_archival.txt: -------------------------------------------------------------------------------- 1 | node: 58aed5f51b04331c8fcc987f76fad33f82fcea8a 2 | node-date: 2025-05-01T23:17:54Z 3 | describe-name: 1.3.0-24-g58aed5f5 4 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .git_archival.txt export-subst -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Specify location of manifest files for each package manager 2 | 3 | version: 2 4 | updates: 5 | - package-ecosystem: "github-actions" 6 | # Workflow files stored in the 7 | # default location of `.github/workflows` 8 | directory: "/" 9 | schedule: 10 | interval: "monthly" -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | release: 9 | types: [ published ] 10 | 11 | jobs: 12 | Merge-Dependabot: 13 | if: github.event_name == 'pull_request' 14 | timeout-minutes: 1 15 | runs-on: ubuntu-latest 16 | permissions: 17 | pull-requests: write 18 | contents: write 19 | steps: 20 | - uses: fastify/github-action-merge-dependabot@v3.11.1 21 | with: 22 | merge-method: merge 23 | use-github-auto-merge: true 24 | 25 | Verify: 26 | name: 'Verify requirement pinning' 27 | timeout-minutes: 2 28 | runs-on: 'ubuntu-latest' 29 | steps: 30 | - name: Checkout 31 | uses: actions/checkout@v4.2.2 32 | - name: Setup python 33 | uses: actions/setup-python@v5.6.0 34 | with: 35 | python-version: "3.13" 36 | - name: Install requirements 37 | run: | 38 | python -m pip install -U pip 39 | # Optimistically assume SHA checks won't change much 40 | python -m pip install pip-compile-multi 41 | - name: Verify requirements 42 | run: pip-compile-multi verify 43 | 44 | Blacken: 45 | name: 'Formatting and linting' 46 | timeout-minutes: 2 47 | runs-on: 'ubuntu-latest' 48 | steps: 49 | - name: Checkout 50 | uses: actions/checkout@v4.2.2 51 | - name: Setup python 52 | uses: actions/setup-python@v5.6.0 53 | with: 54 | cache: pip 55 | cache-dependency-path: requirements/lint.txt 56 | python-version: "3.13" 57 | - name: Install requirements 58 | run: | 59 | python -m pip install -U pip 60 | python -m pip install -r requirements/lint.txt 61 | - name: Run Black 62 | id: black 63 | run: black --check --diff trio_parallel _trio_parallel_workers 64 | continue-on-error: true 65 | - name: Run flake8 66 | id: flake8 67 | run: | 68 | flake8 --extend-ignore=D,E,W,F401,F403,F405,F821,F822,ASYNC114 \ 69 | --extend-select=ASYNC900,ASYNC910,ASYNC911 70 | continue-on-error: true 71 | - name: Fail on error 72 | if: steps.black.outcome != 'success' || steps.flake8.outcome != 'success' 73 | run: exit 1 74 | 75 | Build: 76 | name: Build 77 | timeout-minutes: 5 78 | runs-on: ubuntu-latest 79 | outputs: 80 | python-versions: ${{ steps.baipp.outputs.supported_python_classifiers_json_array }} 81 | settings: ${{ steps.outputs.outputs.settings }} 82 | steps: 83 | - name: Checkout 84 | uses: actions/checkout@v4.2.2 85 | with: 86 | fetch-depth: 0 87 | - name: Build inspect and upload 88 | id: baipp 89 | uses: hynek/build-and-inspect-python-package@v2.12.0 90 | - name: Set outputs 91 | id: outputs 92 | run: | 93 | echo "settings=$( 94 | tar cz \ 95 | pyproject.toml \ 96 | requirements/install.txt \ 97 | requirements/test.txt \ 98 | | base64 -w 0 99 | )" >> $GITHUB_OUTPUT 100 | cd ${{ steps.baipp.outputs.dist }} && sha256sum *.* >> $GITHUB_STEP_SUMMARY 101 | 102 | Test: 103 | name: 'Test ${{ matrix.os }} (${{ matrix.python }})' 104 | needs: Build 105 | timeout-minutes: 10 106 | runs-on: ${{ matrix.os }} 107 | continue-on-error: ${{ matrix.experimental }} 108 | strategy: 109 | fail-fast: false 110 | matrix: 111 | experimental: [ false ] 112 | test-timeout: [ 10 ] 113 | os: 114 | - windows-latest 115 | - ubuntu-latest 116 | - macos-latest 117 | python: ${{ fromJson(needs.Build.outputs.python-versions) }} 118 | include: 119 | # pypy/linux is comparable to cpy/win 120 | - os: ubuntu-latest 121 | python: 'pypy-3.10' 122 | experimental: false 123 | test-timeout: 10 124 | # - os: ubuntu-latest 125 | # python: '3.13-dev' 126 | # experimental: true 127 | # test-timeout: 3 128 | steps: 129 | - name: Input settings 130 | run: echo "${{ needs.Build.outputs.settings }}" | base64 -d | tar xz 131 | shell: bash 132 | - name: Setup python 133 | uses: actions/setup-python@v5.6.0 134 | with: 135 | python-version: '${{ matrix.python }}' 136 | - name: Setup and Cache UV 137 | uses: hynek/setup-cached-uv@v2.3.0 138 | with: 139 | cache-suffix: '-${{ matrix.python }}' 140 | cache-dependency-path: '**/requirements/test.txt' 141 | - name: Download build artifact 142 | uses: actions/download-artifact@v4.3.0 143 | with: 144 | name: Packages 145 | path: dist 146 | - name: Install requirements 147 | run: | 148 | uv pip install --system -r requirements/test.txt 149 | - name: Install wheel 150 | shell: bash 151 | run: | 152 | uv pip install --system `echo ./dist/*.whl` --no-deps 153 | - name: Run tests 154 | timeout-minutes: ${{ matrix.test-timeout }} 155 | run: | 156 | pytest trio_parallel 157 | coverage report 158 | mv .coverage .coverage.${{ matrix.os }}-${{ matrix.python }} 159 | shell: bash 160 | - name: Upload coverage 161 | if: always() 162 | uses: actions/upload-artifact@v4.6.2 163 | with: 164 | name: Coverage-${{ matrix.os }}-${{ matrix.python }} 165 | path: "*coverage*${{ matrix.os }}-${{ matrix.python }}*" 166 | if-no-files-found: error 167 | include-hidden-files: true 168 | 169 | Examples: 170 | name: 'Test examples ${{ matrix.os }} (${{ matrix.python }})' 171 | timeout-minutes: 10 172 | runs-on: ${{ matrix.os }} 173 | strategy: 174 | fail-fast: false 175 | matrix: 176 | os: 177 | - windows-latest 178 | - ubuntu-latest 179 | - macos-latest 180 | python: 181 | - '3.13' 182 | - 'pypy-3.10' 183 | steps: 184 | - name: Checkout 185 | uses: actions/checkout@v4.2.2 186 | - name: Setup python 187 | uses: actions/setup-python@v5.6.0 188 | with: 189 | cache: pip 190 | cache-dependency-path: requirements/test.txt 191 | python-version: '${{ matrix.python }}' 192 | - name: Install requirements 193 | run: | 194 | python -m pip install -U pip 195 | python -m pip install -r requirements/test.txt 196 | - name: Install editable source 197 | run: | 198 | python -m pip install -e . --no-deps 199 | - name: Run tests 200 | run: | 201 | python -m pytest --no-cov docs/source 202 | 203 | Extras: 204 | name: 'Test [test] extra' 205 | timeout-minutes: 5 206 | runs-on: ubuntu-latest 207 | needs: Build 208 | steps: 209 | - name: Setup python 210 | uses: actions/setup-python@v5.6.0 211 | with: 212 | python-version: '3.13' 213 | - name: Download build artifact 214 | uses: actions/download-artifact@v4.3.0 215 | with: 216 | name: Packages 217 | path: dist 218 | - name: Run tests 219 | shell: bash 220 | run: | 221 | wheel=`echo ./dist/*.whl` 222 | python -m pip install $wheel[test] 223 | pytest --pyargs trio_parallel 224 | 225 | Coverage: 226 | name: Coverage 227 | if: always() 228 | needs: Test 229 | timeout-minutes: 1 230 | runs-on: ubuntu-latest 231 | steps: 232 | - name: Checkout 233 | uses: actions/checkout@v4.2.2 234 | - name: Download coverage artifact 235 | uses: actions/download-artifact@v4.3.0 236 | with: 237 | pattern: Coverage-* 238 | merge-multiple: true 239 | - name: Setup python 240 | uses: actions/setup-python@v5.6.0 241 | with: 242 | cache: pip 243 | cache-dependency-path: requirements/coverage.txt 244 | python-version: "3.13" 245 | - name: Install coverage 246 | run: | 247 | pip install -U pip 248 | pip install -r requirements/coverage.txt 249 | - name: Run coverage 250 | run: | 251 | coverage combine 252 | coverage html 253 | coverage report --fail-under=100 --show-missing --format markdown \ 254 | >> $GITHUB_STEP_SUMMARY 255 | - name: Upload report 256 | if: always() 257 | uses: actions/upload-artifact@v4.6.2 258 | with: 259 | name: Report 260 | path: htmlcov/ 261 | if-no-files-found: error 262 | 263 | All: 264 | name: All checks and tests 265 | if: always() 266 | timeout-minutes: 1 267 | needs: [ Verify, Blacken, Build, Examples, Test, Extras, Coverage ] 268 | runs-on: ubuntu-latest 269 | steps: 270 | - name: Check all needs have passed 271 | uses: re-actors/alls-green@v1.2.2 272 | with: 273 | jobs: ${{ toJSON(needs) }} 274 | 275 | Release: 276 | name: Release on PyPI, Attest, and Update GitHub 277 | if: github.event.action == 'published' 278 | needs: All 279 | timeout-minutes: 2 280 | runs-on: ubuntu-latest 281 | environment: 282 | name: release 283 | url: https://pypi.org/p/trio-parallel 284 | permissions: 285 | id-token: write 286 | contents: write 287 | steps: 288 | - name: Download build artifact 289 | uses: actions/download-artifact@v4.3.0 290 | with: 291 | name: Packages 292 | path: dist 293 | - name: Publish package distributions to PyPI 294 | uses: pypa/gh-action-pypi-publish@release/v1 295 | with: 296 | attestations: true 297 | - name: Upload to GitHub 298 | env: 299 | GITHUB_TOKEN: ${{ github.token }} 300 | run: | 301 | gh release upload ${{ github.ref_name }} dist/* --repo ${{ github.repository }} 302 | -------------------------------------------------------------------------------- /.github/workflows/periodic.yml: -------------------------------------------------------------------------------- 1 | # Smoke test to see if unpinned installations are working 2 | name: Periodic test 3 | 4 | on: 5 | schedule: 6 | - cron: "15 14 16 * *" # monthly but random-ish time 7 | workflow_dispatch: 8 | 9 | 10 | jobs: 11 | Test: 12 | name: Test ${{ matrix.os }} (${{ matrix.python }} ${{ matrix.source }}) 13 | timeout-minutes: 10 14 | runs-on: ${{ matrix.os }} 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | os: 19 | - windows-latest 20 | - ubuntu-latest 21 | - macos-latest 22 | python: 23 | - '3.12' 24 | - 'pypy-3.10' 25 | source: 26 | - 'trio-parallel' 27 | - 'git+https://github.com/python-trio/trio.git trio-parallel' 28 | - '-e .' 29 | steps: 30 | - name: Checkout 31 | if: matrix.source == '-e .' 32 | uses: actions/checkout@v4.2.2 33 | - name: Setup python 34 | uses: actions/setup-python@v5.6.0 35 | with: 36 | python-version: ${{ matrix.python }} 37 | - name: Run tests 38 | run: | 39 | python -m pip install ${{ matrix.source }}[test] 40 | cd .. # Disguise normal pytest config file 41 | python -m pytest --pyargs trio_parallel 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Add any project-specific files here: 2 | .idea/ 3 | 4 | # Sphinx docs 5 | docs/build/ 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *~ 11 | \#* 12 | .#* 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | /build/ 20 | /develop-eggs/ 21 | /dist/ 22 | /eggs/ 23 | /lib/ 24 | /lib64/ 25 | /parts/ 26 | /sdist/ 27 | /var/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # Installer logs 33 | pip-log.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .coverage 39 | .coverage.* 40 | .cache 41 | .pytest_cache 42 | nosetests.xml 43 | coverage.xml 44 | 45 | # Translations 46 | *.mo 47 | 48 | # Mr Developer 49 | .mr.developer.cfg 50 | .project 51 | .pydevproject 52 | 53 | # Rope 54 | .ropeproject 55 | 56 | # Django stuff: 57 | *.log 58 | *.pot 59 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # https://docs.readthedocs.io/en/latest/config-file/index.html 2 | version: 2 3 | 4 | build: 5 | os: ubuntu-22.04 6 | tools: 7 | python: "3.12" 8 | jobs: 9 | post_checkout: 10 | - git fetch --unshallow 11 | 12 | formats: 13 | - htmlzip 14 | - epub 15 | 16 | python: 17 | install: 18 | - method: pip 19 | path: . 20 | - requirements: requirements/docs.txt 21 | 22 | sphinx: 23 | fail_on_warning: true 24 | configuration: docs/source/conf.py 25 | -------------------------------------------------------------------------------- /CHEATSHEET.rst: -------------------------------------------------------------------------------- 1 | Tips 2 | ==== 3 | 4 | If you want to use static typing (mypy) in your project 5 | ------------------------------------------------------- 6 | 7 | * Update ``install_requires`` in ``setup.py`` to include ``"trio-typing"`` 8 | (assuming you use it). 9 | 10 | * Uncomment the dependency on ``mypy`` in ``test-requirements.txt``. 11 | 12 | * Uncomment the mypy invocation in ``check.sh``. 13 | 14 | * Create an empty ``trio_parallel/py.typed`` file, 15 | and add ``"include trio_parallel/py.typed"`` to 16 | ``MANIFEST.in``. 17 | 18 | To run tests 19 | ------------ 20 | 21 | * Install test extras: ``pip install -e .[test]`` 22 | (possibly in a virtualenv) 23 | 24 | * Actually run the tests: ``pytest --pyargs trio_parallel`` 25 | 26 | 27 | To run black 28 | ------------ 29 | 30 | * Show what changes black wants to make: ``black --diff trio_parallel`` 31 | 32 | * Apply all changes directly to the source tree: ``black trio_parallel`` 33 | 34 | 35 | To update pinned requirements 36 | ----------------------------- 37 | 38 | * Run ``pip install pip-compile-multi`` if necessary. 39 | 40 | * Run ``pip-compile-multi --allow-unsafe``. 41 | 42 | * Note that manually changing dependencies will fail a CI check. 43 | 44 | 45 | To make a release 46 | ----------------- 47 | 48 | * Run ``towncrier build --version {version}`` to collect your release notes. 49 | 50 | * Review your release notes. 51 | 52 | * Double-check it all works, docs build, etc. 53 | 54 | * Check everything in. 55 | 56 | * Make a release PR on GitHub. Checks must pass. 57 | 58 | * Use GitHub release mechanism to tag the release PR merge commit: 59 | ``gh release create {version}`` 60 | 61 | * This triggers an action to release on PyPI and GitHub as well. 62 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | We follow the Trio code of conduct, see: 2 | https://trio.readthedocs.io/en/latest/code-of-conduct.html 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | If you notice any bugs, need any help, or want to contribute any code, 2 | GitHub issues and pull requests are very welcome! All pull requests require 3 | ``black`` formatting, clean ``flake8``, and 100% 4 | branch ``coverage``, with any ``pragma`` fully explained by comments. Contributions 5 | are to be made under the terms of the LICENSE. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This software is made available under the terms of *either* of the 2 | licenses found in LICENSE.APACHE2 or LICENSE.MIT. The license applies 3 | to all files and data associated with this software unless specifically noted 4 | otherwise. Contributions are to be made under the terms of *both* these licenses. 5 | -------------------------------------------------------------------------------- /LICENSE.APACHE2: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright (C) Richard J. Sheridan 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /LICENSE.MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (C) Richard J. Sheridan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================================= 2 | trio-parallel: CPU parallelism for Trio 3 | ======================================= 4 | 5 | Do you have CPU-bound work that just keeps slowing down your Trio_ event loop no 6 | matter what you try? Do you need to get all those cores humming at once? This is the 7 | library for you! 8 | 9 | The aim of trio-parallel is to use the lightest-weight, lowest-overhead, lowest-latency 10 | method to achieve CPU parallelism of arbitrary Python code with a dead-simple API. 11 | 12 | Resources 13 | --------- 14 | 15 | ============= ============================= 16 | 17 | License |license badge| 18 | Documentation |documentation badge| 19 | Chat |chat badge| 20 | Forum |forum badge| 21 | Issues |issues badge| 22 | Repository |repository badge| 23 | Tests |tests badge| 24 | Coverage |coverage badge| 25 | Style |style badge| 26 | Distribution | |version badge| 27 | | |python versions badge| 28 | | |python interpreters badge| 29 | | |downloads badge| 30 | 31 | ============= ============================= 32 | 33 | Example 34 | ------- 35 | 36 | .. code-block:: python 37 | 38 | import functools 39 | import multiprocessing 40 | import trio 41 | import trio_parallel 42 | 43 | 44 | def loop(n): 45 | # Arbitrary CPU-bound work 46 | for _ in range(n): 47 | pass 48 | print("Loops completed:", n) 49 | 50 | 51 | async def amain(): 52 | t0 = trio.current_time() 53 | async with trio.open_nursery() as nursery: 54 | # Do CPU-bound work in parallel 55 | for i in [6, 7, 8] * 4: 56 | nursery.start_soon(trio_parallel.run_sync, loop, 10 ** i) 57 | # Event loop remains responsive 58 | t1 = trio.current_time() 59 | await trio.sleep(0) 60 | print("Scheduling latency:", trio.current_time() - t1) 61 | # This job could take far too long, make it cancellable! 62 | nursery.start_soon( 63 | functools.partial( 64 | trio_parallel.run_sync, loop, 10 ** 20, kill_on_cancel=True 65 | ) 66 | ) 67 | await trio.sleep(2) 68 | # Only explicit kill_on_cancel jobs are terminated 69 | nursery.cancel_scope.cancel() 70 | print("Total runtime:", trio.current_time() - t0) 71 | 72 | 73 | if __name__ == "__main__": 74 | multiprocessing.freeze_support() 75 | trio.run(amain) 76 | 77 | 78 | Additional examples and the full API are available in the documentation_. 79 | 80 | Features 81 | -------- 82 | 83 | - Bypasses the GIL for CPU-bound work 84 | - Minimal API complexity 85 | 86 | - looks and feels like Trio threads_ 87 | 88 | - Minimal internal complexity 89 | 90 | - No reliance on ``multiprocessing.Pool``, ``ProcessPoolExecutor``, or any background threads 91 | 92 | - Cross-platform 93 | - ``print`` just works 94 | - Seamless interoperation with 95 | 96 | - coverage.py_ 97 | - viztracer_ 98 | - cloudpickle_ 99 | 100 | - Automatic LIFO caching of subprocesses 101 | - Cancel seriously misbehaving code via SIGKILL/TerminateProcess 102 | 103 | - Convert segfaults and other scary things to catchable errors 104 | 105 | FAQ 106 | --- 107 | 108 | How does trio-parallel run Python code in parallel? 109 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 110 | 111 | Currently, this project is based on ``multiprocessing`` subprocesses and 112 | has all the usual multiprocessing caveats_ (``freeze_support``, pickleable objects 113 | only, executing the ``__main__`` module). 114 | The case for basing these workers on multiprocessing is that it keeps a lot of 115 | complexity outside of the project while offering a set of quirks that users are 116 | likely already familiar with. 117 | 118 | The pickling limitations can be partially alleviated by installing cloudpickle_. 119 | 120 | Can I have my workers talk to each other? 121 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 122 | 123 | This is currently possible through the use of ``multiprocessing.Manager``, 124 | but we don't and will not officially support it. 125 | 126 | This package focuses on providing 127 | a flat hierarchy of worker subprocesses to run synchronous, CPU-bound functions. 128 | If you are looking to create a nested hierarchy of processes communicating 129 | asynchronously with each other, while preserving the power, safety, and convenience of 130 | structured concurrency, look into `tractor `_. 131 | Or, if you are looking for a more customized solution, try using ``trio.run_process`` 132 | to spawn additional Trio runs and have them talk to each other over sockets. 133 | 134 | Can I let my workers outlive the main Trio process? 135 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 136 | 137 | No. Trio's structured concurrency strictly bounds job runs to within a given 138 | ``trio.run`` call, while cached idle workers are shutdown and killed if necessary 139 | by our ``atexit`` handler, so this use case is not supported. 140 | 141 | How should I map a function over a collection of arguments? 142 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 143 | 144 | This is fully possible but we leave the implementation of that up to you. Think 145 | of us as a `loky `_ for your 146 | `joblib `_, but natively async and Trionic. 147 | We take care of the worker handling so that you can focus on the best concurrency 148 | for your application. That said, some example parallelism patterns can be found in 149 | the documentation_. 150 | 151 | Also, look into `aiometer `_? 152 | 153 | Contributing 154 | ------------ 155 | If you notice any bugs, need any help, or want to contribute any code, GitHub issues_ 156 | and pull requests are very welcome! Please read the `code of conduct`_. 157 | 158 | .. _chat: https://gitter.im/python-trio/general 159 | .. |chat badge| image:: https://img.shields.io/badge/chat-join%20now-blue.svg?color=royalblue&logo=Gitter 160 | :target: `chat`_ 161 | :alt: Chatroom 162 | 163 | .. _forum: https://trio.discourse.group 164 | .. |forum badge| image:: https://img.shields.io/badge/forum-join%20now-blue.svg?color=royalblue&logo=Discourse 165 | :target: `forum`_ 166 | :alt: Forum 167 | 168 | .. _documentation: https://trio-parallel.readthedocs.io/ 169 | .. |documentation badge| image:: https://img.shields.io/readthedocs/trio-parallel?logo=readthedocs&logoColor=whitesmoke 170 | :target: `documentation`_ 171 | :alt: Documentation 172 | 173 | .. _distribution: https://pypi.org/project/trio-parallel/ 174 | .. |version badge| image:: https://img.shields.io/pypi/v/trio-parallel?logo=PyPI&logoColor=whitesmoke 175 | :target: `distribution`_ 176 | :alt: Latest Pypi version 177 | 178 | .. _pypistats: https://pypistats.org/packages/trio-parallel 179 | .. |pypistats badge| image:: https://img.shields.io/pypi/dm/trio-parallel?logo=pypi&logoColor=whitesmoke 180 | :target: `pypistats`_ 181 | :alt: Pypi monthly downloads 182 | 183 | .. _pepy: https://pepy.tech/project/trio-parallel 184 | .. |pepy badge| image:: https://pepy.tech/badge/trio-parallel/month 185 | :target: `pepy`_ 186 | :alt: Pypi monthly downloads 187 | 188 | .. |python versions badge| image:: https://img.shields.io/pypi/pyversions/trio-parallel.svg?logo=PyPI&logoColor=whitesmoke 189 | :alt: Supported Python versions 190 | :target: `distribution`_ 191 | 192 | .. |python interpreters badge| image:: https://img.shields.io/pypi/implementation/trio-parallel.svg?logo=PyPI&logoColor=whitesmoke 193 | :alt: Supported Python interpreters 194 | :target: `distribution`_ 195 | 196 | .. _issues: https://github.com/richardsheridan/trio-parallel/issues 197 | .. |issues badge| image:: https://img.shields.io/github/issues-raw/richardsheridan/trio-parallel?logo=github 198 | :target: `issues`_ 199 | :alt: Issues 200 | 201 | .. _repository: https://github.com/richardsheridan/trio-parallel 202 | .. |repository badge| image:: https://img.shields.io/github/last-commit/richardsheridan/trio-parallel?logo=github 203 | :target: `repository`_ 204 | :alt: Repository 205 | 206 | .. _tests: https://github.com/richardsheridan/trio-parallel/actions?query=branch%3Amain 207 | .. |tests badge| image:: https://img.shields.io/github/actions/workflow/status/richardsheridan/trio-parallel/ci.yml?branch=main&logo=Github-Actions&logoColor=whitesmoke 208 | :target: `tests`_ 209 | :alt: Tests 210 | 211 | .. _coverage: https://github.com/richardsheridan/trio-parallel/actions?query=branch%3Amain 212 | .. |coverage badge| image:: https://img.shields.io/github/actions/workflow/status/richardsheridan/trio-parallel/ci.yml?branch=main&label=100%25%20coverage 213 | :target: `coverage`_ 214 | :alt: Test coverage 215 | 216 | .. _style: https://github.com/psf/black 217 | .. |style badge| image:: https://img.shields.io/badge/code%20style-Black-black 218 | :target: `style`_ 219 | :alt: Code style 220 | 221 | .. _license: https://github.com/richardsheridan/trio-parallel/blob/main/LICENSE 222 | .. |license badge| image:: https://img.shields.io/pypi/l/trio-parallel?color=informational 223 | :target: `license`_ 224 | :alt: MIT -or- Apache License 2.0 225 | 226 | .. _downloads: https://pepy.tech/project/trio-parallel 227 | .. |downloads badge| image:: https://static.pepy.tech/badge/trio-parallel/month 228 | :target: `downloads`_ 229 | :alt: Monthly Downloads 230 | 231 | .. _coverage.py: https://coverage.readthedocs.io/ 232 | .. _viztracer: https://viztracer.readthedocs.io/ 233 | .. _cloudpickle: https://github.com/cloudpipe/cloudpickle 234 | .. _threads: https://trio.readthedocs.io/en/stable/reference-core.html#trio.to_thread.run_sync 235 | .. _caveats: https://docs.python.org/3/library/multiprocessing.html#programming-guidelines 236 | .. _Trio: https://github.com/python-trio/trio 237 | .. _code of conduct: https://trio.readthedocs.io/en/stable/code-of-conduct.html 238 | -------------------------------------------------------------------------------- /_trio_parallel_workers/__init__.py: -------------------------------------------------------------------------------- 1 | """Isolated package for trio-parallel's worker behavior 2 | 3 | This allows workers to start up without "heavy" dependencies like trio and attrs. 4 | Users still need to make sure their CPU-bound functions also do not pull in such 5 | packages, but at least we are doing our part.""" 6 | 7 | import signal 8 | import sys 9 | from inspect import iscoroutine 10 | from pickle import HIGHEST_PROTOCOL 11 | from time import perf_counter 12 | 13 | try: 14 | from cloudpickle import dumps, loads 15 | except ImportError: 16 | from pickle import dumps, loads 17 | 18 | from outcome import capture, Error 19 | from tblib.pickling_support import install as install_pickling_support 20 | 21 | MAX_TIMEOUT = 24.0 * 60.0 * 60.0 22 | ACK = b"\x06" 23 | 24 | EAGER_CLEANUP = True # test_retire hook 25 | 26 | 27 | def handle_job(job): 28 | try: 29 | fn, args = loads(job) 30 | ret = fn(*args) 31 | if iscoroutine(ret): 32 | # Manually close coroutine to avoid RuntimeWarnings 33 | ret.close() 34 | raise TypeError( 35 | "trio-parallel worker expected a sync function, but {!r} appears " 36 | "to be asynchronous".format(getattr(fn, "__qualname__", fn)) 37 | ) 38 | return ret 39 | except BaseException as e: 40 | install_pickling_support(e) 41 | raise e 42 | 43 | 44 | def safe_dumps(result): 45 | try: 46 | return dumps(result, protocol=HIGHEST_PROTOCOL) 47 | except BaseException as exc: # noqa: ASYNC103 48 | return dumps(Error(exc), protocol=HIGHEST_PROTOCOL) # noqa: ASYNC104 49 | 50 | 51 | def safe_poll(recv_pipe, timeout): 52 | deadline = perf_counter() + timeout 53 | while timeout > MAX_TIMEOUT: 54 | if recv_pipe.poll(MAX_TIMEOUT): 55 | return True 56 | timeout = deadline - perf_counter() 57 | else: 58 | return recv_pipe.poll(timeout) 59 | 60 | 61 | def worker_behavior(recv_pipe, send_pipe, idle_timeout, init, retire): 62 | # Intercept keyboard interrupts to avoid passing KeyboardInterrupt 63 | # between processes. (Trio will take charge via cancellation.) 64 | signal.signal(signal.SIGINT, signal.SIG_IGN) 65 | try: 66 | if sys.platform == "win32": 67 | # Signal successful startup. 68 | send_pipe.send_bytes(ACK) 69 | if isinstance(init, bytes): # true except on "fork" 70 | init = loads(init) 71 | retire = loads(retire) 72 | init() 73 | while safe_poll(recv_pipe, idle_timeout): 74 | send_pipe.send_bytes( 75 | safe_dumps(capture(handle_job, recv_pipe.recv_bytes())) 76 | ) 77 | if retire(): 78 | break 79 | except (BrokenPipeError, EOFError): 80 | # Graceful shutdown: If the main process closes the pipes, we will 81 | # observe one of these exceptions and can simply exit quietly. 82 | # Closing pipes manually fixed some __del__ flakiness in CI 83 | send_pipe.close() 84 | recv_pipe.close() 85 | return 86 | except BaseException: 87 | # Ensure BrokenWorkerError raised in the main proc. 88 | send_pipe.close() 89 | # recv_pipe must remain open and clear until the main proc closes it. 90 | try: 91 | while True: 92 | recv_pipe.recv_bytes() 93 | except EOFError: 94 | pass 95 | raise 96 | else: 97 | # Clean idle shutdown or retirement: close recv_pipe first to minimize 98 | # subsequent race. 99 | if EAGER_CLEANUP: # Set False to maximize race for test_retire 100 | recv_pipe.close() 101 | # Race condition: it is possible to sneak a write through in the main process 102 | # between the while loop predicate and recv_pipe.close(). Naively, this would 103 | # make a clean shutdown look like a broken worker. By sending a sentinel 104 | # value, we can indicate to a waiting main process that we have hit this 105 | # race condition and need a restart. However, the send MUST be non-blocking 106 | # to free this process's resources in a timely manner. Therefore, this message 107 | # can be any size on Windows but must be less than 512 bytes by POSIX.1-2001. 108 | send_pipe.send_bytes(dumps(None, protocol=HIGHEST_PROTOCOL)) 109 | send_pipe.close() 110 | -------------------------------------------------------------------------------- /_trio_parallel_workers/_funcs.py: -------------------------------------------------------------------------------- 1 | """All functions run in subprocesses should be in here so they don't need to import 2 | trio, which adds a long time to every subprocess startup.""" 3 | 4 | import os 5 | import sys 6 | 7 | 8 | def _bad_retire_fn(): 9 | assert False 10 | 11 | 12 | _NUM_RUNS_LEFT = 0 13 | 14 | 15 | def _init_run_twice(): 16 | global _NUM_RUNS_LEFT 17 | _NUM_RUNS_LEFT = 2 18 | # increase coverage in worker_behavior cleanup and SpawnProcWorker.run_sync 19 | import _trio_parallel_workers 20 | 21 | _trio_parallel_workers.EAGER_CLEANUP = False 22 | 23 | 24 | def _retire_run_twice(): 25 | global _NUM_RUNS_LEFT 26 | _NUM_RUNS_LEFT -= 1 27 | return _NUM_RUNS_LEFT <= 0 28 | 29 | 30 | def _delayed_bad_retire_fn(): 31 | if _retire_run_twice(): 32 | _bad_retire_fn() 33 | 34 | 35 | def _loopy_retire_fn(): # pragma: no cover, will be killed 36 | if _retire_run_twice(): 37 | import time 38 | 39 | while True: 40 | time.sleep(1) 41 | 42 | 43 | def _raise_pid(): 44 | raise ValueError(os.getpid()) 45 | 46 | 47 | def _block_worker(block, start, done): 48 | # Make the worker block for a controlled amount of time 49 | start.set() 50 | block.wait() 51 | done.set() 52 | 53 | 54 | def _never_halts(ev): # pragma: no cover, worker will be killed 55 | # important difference from blocking call is cpu usage 56 | ev.set() 57 | while True: 58 | pass 59 | 60 | 61 | _lambda = lambda: None # pragma: no cover, never run 62 | 63 | 64 | def _return_lambda(): 65 | return _lambda 66 | 67 | 68 | async def _null_async_fn(): # pragma: no cover, coroutine called but not run 69 | pass 70 | 71 | 72 | def _monkeypatch_max_timeout(): 73 | import _trio_parallel_workers 74 | 75 | _trio_parallel_workers.MAX_TIMEOUT = 0.1 76 | return True 77 | 78 | 79 | def _no_trio(): 80 | return "trio" not in sys.modules 81 | 82 | 83 | class SpecialError(Exception): 84 | pass 85 | 86 | 87 | def _chained_exc(): 88 | try: 89 | raise ValueError("test1") 90 | except ValueError as e: 91 | raise SpecialError("test2") from e 92 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = trio-parallel 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=trio-parallel 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/source/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/richardsheridan/trio-parallel/58aed5f51b04331c8fcc987f76fad33f82fcea8a/docs/source/_static/.gitkeep -------------------------------------------------------------------------------- /docs/source/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* Get rid of the horrible red for literal content */ 2 | .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { 3 | color: #222 !important; 4 | } -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Documentation build configuration file, created by 5 | # sphinx-quickstart on Sat Jan 21 19:11:14 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | # 20 | import os 21 | import sys 22 | 23 | # So autodoc can import our package 24 | sys.path.insert(0, os.path.abspath("../..")) 25 | 26 | 27 | # Warn about all references to unknown targets 28 | nitpicky = True 29 | # Except for these ones, which we expect to point to unknown targets: 30 | nitpick_ignore = [ 31 | # Format is ("sphinx reference type", "string"), e.g.: 32 | ("py:class", "CapacityLimiter-like object"), 33 | ("py:class", "bytes-like"), 34 | ("py:class", "None"), 35 | # Was removed but still shows up in changelog 36 | ("py:class", "trio.lowlevel.RunLocal"), 37 | # trio.abc is documented at random places scattered throughout the docs 38 | ("py:mod", "trio.abc"), 39 | ("py:class", "math.inf"), 40 | ("py:exc", "Anything else"), 41 | ("py:class", "async function"), 42 | ("py:class", "sync function"), 43 | # https://github.com/sphinx-doc/sphinx/issues/7722 44 | ("py:class", "SendType"), 45 | ("py:class", "ReceiveType"), 46 | ("py:class", "trio_parallel._impl.T"), 47 | ] 48 | autodoc_inherit_docstrings = False 49 | default_role = "obj" 50 | 51 | # -- General configuration ------------------------------------------------ 52 | 53 | # If your documentation needs a minimal Sphinx version, state it here. 54 | # 55 | # needs_sphinx = '1.0' 56 | 57 | # Add any Sphinx extension module names here, as strings. They can be 58 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 59 | # ones. 60 | extensions = [ 61 | "sphinx.ext.autodoc", 62 | "sphinx.ext.intersphinx", 63 | "sphinx.ext.coverage", 64 | "sphinx.ext.napoleon", 65 | "sphinxcontrib_trio", 66 | "sphinxcontrib.jquery", 67 | ] 68 | 69 | intersphinx_mapping = { 70 | "python": ("https://docs.python.org/3", None), 71 | "trio": ("https://trio.readthedocs.io/en/stable", None), 72 | } 73 | 74 | autodoc_member_order = "bysource" 75 | 76 | # Add any paths that contain templates here, relative to this directory. 77 | templates_path = [] 78 | 79 | # The suffix(es) of source filenames. 80 | # You can specify multiple suffix as a list of string: 81 | # 82 | # source_suffix = ['.rst', '.md'] 83 | source_suffix = ".rst" 84 | 85 | # The master toctree document. 86 | master_doc = "index" 87 | 88 | # General information about the project. 89 | project = "trio-parallel" 90 | copyright = "Richard J. Sheridan" 91 | author = "Richard J. Sheridan" 92 | 93 | # The version info for the project you're documenting, acts as replacement for 94 | # |version| and |release|, also used in various other places throughout the 95 | # built documents. 96 | # 97 | # The short X.Y version. 98 | from importlib.metadata import version 99 | 100 | version = version("trio-parallel") 101 | # The full version, including alpha/beta/rc tags. 102 | release = version 103 | 104 | # https://docs.readthedocs.io/en/stable/builds.html#build-environment 105 | if "READTHEDOCS" in os.environ: 106 | import glob 107 | 108 | if glob.glob("../../newsfragments/*.*.rst"): 109 | print("-- Found newsfragments; running towncrier --", flush=True) 110 | import subprocess 111 | 112 | subprocess.run( 113 | ["towncrier", "build", "--yes", "--version", version], 114 | cwd="../..", 115 | check=True, 116 | ) 117 | 118 | # html_favicon = "_static/favicon-32.png" 119 | # html_logo = "../../logo/wordmark-transparent.svg" 120 | # & down below in html_theme_options we set logo_only=True 121 | 122 | # The language for content autogenerated by Sphinx. Refer to documentation 123 | # for a list of supported languages. 124 | # 125 | # This is also used if you do content translation via gettext catalogs. 126 | # Usually you set "language" from the command line for these cases. 127 | language = "en" 128 | 129 | # List of patterns, relative to source directory, that match files and 130 | # directories to ignore when looking for source files. 131 | # This patterns also effect to html_static_path and html_extra_path 132 | exclude_patterns = [] 133 | 134 | # The name of the Pygments (syntax highlighting) style to use. 135 | pygments_style = "sphinx" 136 | 137 | # The default language for :: blocks 138 | highlight_language = "python3" 139 | 140 | # If true, `todo` and `todoList` produce output, else they produce nothing. 141 | todo_include_todos = False 142 | 143 | # Fold return type into the "Returns:" section, rather than making 144 | # a separate "Return type:" section 145 | napoleon_use_rtype = False 146 | 147 | # This avoids a warning by the epub builder that it can't figure out 148 | # the MIME type for the CI badge. 149 | suppress_warnings = ["epub.unknown_project_files"] 150 | 151 | 152 | # -- Options for HTML output ---------------------------------------------- 153 | 154 | # The theme to use for HTML and HTML Help pages. See the documentation for 155 | # a list of builtin themes. 156 | # 157 | # html_theme = 'alabaster' 158 | 159 | # We have to set this ourselves, not only because it's useful for local 160 | # testing, but also because if we don't then RTD will throw away our 161 | # html_theme_options. 162 | html_theme = "sphinx_rtd_theme" 163 | 164 | # Theme options are theme-specific and customize the look and feel of a theme 165 | # further. For a list of options available for each theme, see the 166 | # documentation. 167 | # 168 | html_theme_options = { 169 | # default is 2 170 | # show deeper nesting in the RTD theme's sidebar TOC 171 | # https://stackoverflow.com/questions/27669376/ 172 | # I'm not 100% sure this actually does anything with our current 173 | # versions/settings... 174 | "navigation_depth": 4, 175 | "logo_only": True, 176 | "prev_next_buttons_location": "both", 177 | } 178 | 179 | # Add any paths that contain custom static files (such as style sheets) here, 180 | # relative to this directory. They are copied after the builtin static files, 181 | # so a file named "default.css" will overwrite the builtin "default.css". 182 | html_static_path = ["_static"] 183 | 184 | html_css_files = [ 185 | "custom.css", 186 | ] 187 | 188 | # -- Options for HTMLHelp output ------------------------------------------ 189 | 190 | # Output file base name for HTML help builder. 191 | htmlhelp_basename = "trio-paralleldoc" 192 | 193 | 194 | # -- Options for LaTeX output --------------------------------------------- 195 | 196 | latex_elements = { 197 | # The paper size ('letterpaper' or 'a4paper'). 198 | # 199 | # 'papersize': 'letterpaper', 200 | # The font size ('10pt', '11pt' or '12pt'). 201 | # 202 | # 'pointsize': '10pt', 203 | # Additional stuff for the LaTeX preamble. 204 | # 205 | # 'preamble': '', 206 | # Latex figure (float) alignment 207 | # 208 | # 'figure_align': 'htbp', 209 | } 210 | 211 | # Grouping the document tree into LaTeX files. List of tuples 212 | # (source start file, target name, title, 213 | # author, documentclass [howto, manual, or own class]). 214 | latex_documents = [ 215 | (master_doc, "trio-parallel.tex", "Trio Documentation", author, "manual"), 216 | ] 217 | 218 | 219 | # -- Options for manual page output --------------------------------------- 220 | 221 | # One entry per manual page. List of tuples 222 | # (source start file, name, description, authors, manual section). 223 | man_pages = [(master_doc, "trio-parallel", "trio-parallel Documentation", [author], 1)] 224 | 225 | 226 | # -- Options for Texinfo output ------------------------------------------- 227 | 228 | # Grouping the document tree into Texinfo files. List of tuples 229 | # (source start file, target name, title, author, 230 | # dir menu entry, description, category) 231 | texinfo_documents = [ 232 | ( 233 | master_doc, 234 | "trio-parallel", 235 | "trio-parallel Documentation", 236 | author, 237 | "trio-parallel", 238 | "CPU parallelism for Trio", 239 | "Miscellaneous", 240 | ), 241 | ] 242 | -------------------------------------------------------------------------------- /docs/source/examples.rst: -------------------------------------------------------------------------------- 1 | Example concurrency patterns 2 | ============================ 3 | 4 | Parallel, ordered map and gather 5 | -------------------------------- 6 | 7 | .. literalinclude:: examples/parallel_map.py 8 | 9 | 10 | Async parallel processing pipeline 11 | ---------------------------------- 12 | 13 | .. literalinclude:: examples/async_parallel_pipeline.py 14 | -------------------------------------------------------------------------------- /docs/source/examples/async_parallel_pipeline.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import multiprocessing 3 | import time 4 | import secrets 5 | 6 | import trio 7 | import trio_parallel 8 | 9 | 10 | async def to_process_map_as_completed( 11 | sync_fn, 12 | job_aiter, 13 | kill_on_cancel=False, 14 | limiter=None, 15 | *, 16 | task_status, 17 | ): 18 | if limiter is None: 19 | limiter = trio_parallel.current_default_worker_limiter() 20 | send_chan, recv_chan = trio.open_memory_channel(0) 21 | task_status.started(recv_chan) 22 | 23 | async def worker(job_item, task_status): 24 | # Backpressure: hold limiter for entire task to avoid 25 | # spawning too many workers 26 | async with limiter: 27 | task_status.started() 28 | result = await trio_parallel.run_sync( 29 | sync_fn, 30 | *job_item, 31 | kill_on_cancel=kill_on_cancel, 32 | limiter=trio.CapacityLimiter(1), 33 | ) 34 | await send_chan.send(result) 35 | 36 | async with send_chan, trio.open_nursery() as nursery: 37 | async for job_item in job_aiter: 38 | await nursery.start(worker, job_item) 39 | 40 | 41 | async def data_generator(*, task_status, limiter=None): 42 | send_chan, recv_chan = trio.open_memory_channel(0) 43 | task_status.started(recv_chan) 44 | if limiter is None: 45 | limiter = trio_parallel.current_default_worker_limiter() 46 | async with send_chan: 47 | for j in range(100): 48 | # Just pretend this is coming from disk or network 49 | data = secrets.token_hex() 50 | # Inputs MUST be throttled with the SAME limiter as 51 | # the rest of the steps of the pipeline 52 | async with limiter: 53 | await send_chan.send((j, data)) 54 | 55 | 56 | def clean_data(j, data): 57 | time.sleep(secrets.randbelow(2) / 20) 58 | return j, data.replace("deadbeef", "f00dbeef") 59 | 60 | 61 | def load_data(j, data): 62 | time.sleep(secrets.randbelow(2) / 20) 63 | return j, binascii.unhexlify(data) 64 | 65 | 66 | def compute(j, data): 67 | time.sleep(secrets.randbelow(2) / 20) 68 | n = 0 69 | for value in data: 70 | if value % 2: 71 | n += 1 72 | return j, n 73 | 74 | 75 | async def amain(): 76 | i = 1 77 | t0 = trio.current_time() 78 | async with trio.open_nursery() as nursery: 79 | data_aiter = await nursery.start(data_generator) 80 | clean_data_aiter = await nursery.start( 81 | to_process_map_as_completed, 82 | clean_data, 83 | data_aiter, 84 | ) 85 | loaded_data_aiter = await nursery.start( 86 | to_process_map_as_completed, 87 | load_data, 88 | clean_data_aiter, 89 | ) 90 | computational_result_aiter = await nursery.start( 91 | to_process_map_as_completed, 92 | compute, 93 | loaded_data_aiter, 94 | ) 95 | async for result in computational_result_aiter: 96 | print(i, (trio.current_time() - t0) / i, *result) 97 | if result[1] <= 9: 98 | print("Winner! after ", trio.current_time() - t0, "seconds") 99 | nursery.cancel_scope.cancel() 100 | i += 1 101 | print("No extra-even bytestrings after ", trio.current_time() - t0, "seconds") 102 | 103 | 104 | if __name__ == "__main__": 105 | multiprocessing.freeze_support() 106 | trio.run(amain) 107 | -------------------------------------------------------------------------------- /docs/source/examples/cache_warmup.py: -------------------------------------------------------------------------------- 1 | import trio, trio_parallel 2 | 3 | async def amain(): 4 | t0 = trio.current_time() 5 | await trio_parallel.run_sync(bool) 6 | t1 = trio.current_time() 7 | await trio_parallel.run_sync(bool) 8 | t2 = trio.current_time() 9 | await trio_parallel.run_sync(bytearray, 10**8) 10 | t3 = trio.current_time() 11 | print("Cold cache latency:", t1-t0) 12 | print("Warm cache latency:", t2-t1) 13 | print("IPC latency/MB:", (t3-t2)/10**2) 14 | 15 | if __name__ == '__main__': 16 | trio.run(amain) 17 | -------------------------------------------------------------------------------- /docs/source/examples/cancellation.py: -------------------------------------------------------------------------------- 1 | import trio, trio_parallel, time 2 | 3 | 4 | def hello_delayed_world(): 5 | print("Hello") 6 | time.sleep(1.0) 7 | print("world!") 8 | 9 | 10 | async def amain(): 11 | # warm up thread/process caches 12 | await trio_parallel.run_sync(bool) 13 | await trio.to_thread.run_sync(bool) 14 | 15 | with trio.move_on_after(0.5): 16 | await trio_parallel.run_sync(hello_delayed_world, kill_on_cancel=True) 17 | 18 | with trio.move_on_after(0.5): 19 | await trio.to_thread.run_sync(hello_delayed_world, abandon_on_cancel=True) 20 | 21 | # grace period for abandoned thread 22 | await trio.sleep(0.6) 23 | 24 | 25 | if __name__ == "__main__": 26 | trio.run(amain) 27 | -------------------------------------------------------------------------------- /docs/source/examples/checkpointing.py: -------------------------------------------------------------------------------- 1 | import trio, trio_parallel, time 2 | 3 | async def check_scheduling_latency(): 4 | for _ in range(10): 5 | t0 = trio.current_time() 6 | await trio.lowlevel.checkpoint() 7 | print(trio.current_time() - t0) 8 | 9 | async def amain(): 10 | async with trio.open_nursery() as nursery: 11 | nursery.start_soon(check_scheduling_latency) 12 | await trio_parallel.run_sync(time.sleep, 1) 13 | 14 | if __name__ == "__main__": 15 | trio.run(amain) 16 | -------------------------------------------------------------------------------- /docs/source/examples/minimal.py: -------------------------------------------------------------------------------- 1 | import trio, trio_parallel 2 | from operator import add 3 | 4 | async def parallel_add(): 5 | return await trio_parallel.run_sync(add, 1, 2) 6 | 7 | # Guard against our workers trying to recursively start workers on startup 8 | if __name__ == '__main__': 9 | assert add(1, 2) == trio.run(parallel_add) == 3 -------------------------------------------------------------------------------- /docs/source/examples/parallel_loops.py: -------------------------------------------------------------------------------- 1 | import trio, trio_parallel, time 2 | 3 | def loop(i=0): 4 | deadline = time.perf_counter() + 1 5 | # Arbitrary CPU-bound work 6 | while time.perf_counter() < deadline: 7 | i += 1 8 | print("Loops completed:", i) 9 | 10 | async def amain(): 11 | async with trio.open_nursery() as nursery: 12 | for i in range(4): 13 | nursery.start_soon(trio_parallel.run_sync, loop) 14 | 15 | if __name__ == "__main__": 16 | trio.run(amain) 17 | -------------------------------------------------------------------------------- /docs/source/examples/parallel_map.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import random 3 | 4 | import trio 5 | import trio_parallel 6 | 7 | 8 | def twiddle(i): 9 | for j in range(50000): 10 | i *= random.choice((-1, 1)) 11 | return i 12 | 13 | 14 | async def parallel_map(fn, inputs, *args): 15 | results = [None] * len(inputs) 16 | 17 | async def worker(j, inp): 18 | results[j] = await trio_parallel.run_sync(fn, inp, *args) 19 | print(j, "done") 20 | 21 | async with trio.open_nursery() as nursery: 22 | for i, inp in enumerate(inputs): 23 | nursery.start_soon(worker, i, inp) 24 | 25 | return results 26 | 27 | 28 | if __name__ == "__main__": 29 | multiprocessing.freeze_support() 30 | print(trio.run(parallel_map, twiddle, range(100))) 31 | -------------------------------------------------------------------------------- /docs/source/examples/single_use_workers.py: -------------------------------------------------------------------------------- 1 | import trio, trio_parallel, os 2 | 3 | 4 | def worker(i): 5 | print(i, "hello from", os.getpid()) 6 | 7 | 8 | def after_single_use(): 9 | return True 10 | 11 | 12 | WORKER_HAS_BEEN_USED = False 13 | 14 | 15 | def after_dual_use(): 16 | global WORKER_HAS_BEEN_USED 17 | if WORKER_HAS_BEEN_USED: 18 | return True # retire 19 | else: 20 | WORKER_HAS_BEEN_USED = True 21 | return False # don't retire... YET 22 | 23 | 24 | async def amain(): 25 | trio_parallel.current_default_worker_limiter().total_tokens = 4 26 | 27 | print("single use worker behavior:") 28 | async with trio_parallel.open_worker_context(retire=after_single_use) as ctx: 29 | async with trio.open_nursery() as nursery: 30 | for i in range(10): 31 | nursery.start_soon(ctx.run_sync, worker, i) 32 | 33 | print("dual use worker behavior:") 34 | async with trio_parallel.cache_scope(retire=after_dual_use): 35 | async with trio.open_nursery() as nursery: 36 | for i in range(10): 37 | nursery.start_soon(trio_parallel.run_sync, worker, i) 38 | 39 | print("default behavior:") 40 | async with trio.open_nursery() as nursery: 41 | for i in range(10): 42 | nursery.start_soon(trio_parallel.run_sync, worker, i) 43 | 44 | 45 | if __name__ == "__main__": 46 | trio.run(amain) 47 | -------------------------------------------------------------------------------- /docs/source/history.rst: -------------------------------------------------------------------------------- 1 | Release history 2 | =============== 3 | 4 | .. currentmodule:: trio_parallel 5 | 6 | .. towncrier release notes start 7 | 8 | trio-parallel 1.3.0 (2024-12-25) 9 | -------------------------------- 10 | 11 | Features 12 | ~~~~~~~~ 13 | 14 | - Advertise support for Python-3.13, although no code changes were made to support it. (`#434 `__) 15 | - Add ``kill_on_cancel`` kwarg to :func:`run_sync`. The alias ``cancellable`` will remain indefinitely. (`#437 `__) 16 | - Add `cache_scope()`, an async context manager that can override the behavior of 17 | `trio_parallel.run_sync()` in a subtree of your Trio tasks with an implicit 18 | `WorkerContext`. (`#455 `__) 19 | 20 | 21 | Deprecations and Removals 22 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 23 | 24 | - Stop advertising support for Python-3.8, although no code changes were made to break it. (`#434 `__) 25 | - Removed deprecated ``atexit_shutdown_grace_period``. Use `configure_default_context` to configure the default context shutdown grace period. (`#435 `__) 26 | 27 | 28 | trio-parallel 1.2.4 (2024-12-21) 29 | -------------------------------- 30 | 31 | Bugfixes 32 | ~~~~~~~~ 33 | 34 | - Ensure worker processes are eagerly reaped after a rare race condition edge case. (`#436 `__) 35 | - Fix a usage of a removed internal trio function in the test suite. (`#444 `__) 36 | 37 | 38 | trio-parallel 1.2.3 (2024-10-19) 39 | -------------------------------- 40 | 41 | Bugfixes 42 | ~~~~~~~~ 43 | 44 | - Fix a regression induced by trio-0.27.0 that causes worker contexts to crash on exit if they happen to wait for jobs to finish. (`#432 `__) 45 | 46 | 47 | trio-parallel 1.2.2 (2024-04-24) 48 | -------------------------------- 49 | 50 | Bugfixes 51 | ~~~~~~~~ 52 | 53 | - Fixed a rare race condition during cleanup that could trigger unraisable error tracebacks. (`#398 `__) 54 | - Made several internal changes that may make compatibility with future Trio versions more stable (`#412 `__) 55 | 56 | 57 | trio-parallel 1.2.1 (2023-11-04) 58 | -------------------------------- 59 | 60 | Bugfixes 61 | ~~~~~~~~ 62 | 63 | - Resolved a deprecation warning on python 3.12. (`#380 `__) 64 | 65 | 66 | Deprecations and Removals 67 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | 69 | - Although python 3.7 has not been specifically broken, it is no longer tested in CI. (`#389 `__) 70 | 71 | 72 | trio-parallel 1.2.0 (2022-10-29) 73 | -------------------------------- 74 | 75 | Features 76 | ~~~~~~~~ 77 | 78 | - The behavior of the default context is now fully configurable, superseding ``atexit_shutdown_grace_period`` (`#328 `__) 79 | 80 | 81 | Bugfixes 82 | ~~~~~~~~ 83 | 84 | - Use tblib lazily to pass tracebacks on user exceptions. Previously, tracebacks would only be passed on the built-in python exceptions. (`#332 `__) 85 | 86 | 87 | trio-parallel 1.1.0 (2022-09-18) 88 | -------------------------------- 89 | 90 | Features 91 | ~~~~~~~~ 92 | 93 | - Add type hints for `run_sync` (`#322 `__) 94 | - Use ``tblib`` to enable pickling of tracebacks between processes. Mainly, this 95 | preserves context of exceptions including chained exceptions. (`#323 `__) 96 | 97 | 98 | Bugfixes 99 | ~~~~~~~~ 100 | 101 | - Prevent Ctrl+C from inducing various leaks and inconsistent states. (`#239 `__) 102 | - Cleaned up names/qualnames of objects in the trio_parallel namespace. (`#291 `__) 103 | 104 | 105 | Deprecations and Removals 106 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 107 | 108 | - Removed python 3.6 support (`#236 `__) 109 | 110 | 111 | trio-parallel 1.0.0 (2021-12-04) 112 | -------------------------------- 113 | 114 | Bugfixes 115 | ~~~~~~~~ 116 | 117 | - Fixed a hang on failed worker subprocess spawns that mostly occurred upon 118 | accidental multiprocessing recursive spawn. (`#167 `__) 119 | - Fixed a hang on Windows when trying to use :meth:`WorkerContext.run_sync` in sequential 120 | and concurrent Trio runs. (`#171 `__) 121 | 122 | 123 | Improved Documentation 124 | ~~~~~~~~~~~~~~~~~~~~~~ 125 | 126 | - Revamped documentation with tested examples. (`#168 `__) 127 | 128 | 129 | trio-parallel 1.0.0b0 (2021-11-12) 130 | ---------------------------------- 131 | 132 | With this release I consider the project "feature complete". 133 | 134 | Features 135 | ~~~~~~~~ 136 | 137 | - Added an API to view statistics about a `WorkerContext`, specifically counting 138 | ``idle_workers`` and ``running_workers``. (`#155 `__) 139 | 140 | 141 | trio-parallel 1.0.0a2 (2021-10-08) 142 | ---------------------------------- 143 | 144 | Features 145 | ~~~~~~~~ 146 | 147 | - Opportunistically use ``cloudpickle`` to serialize jobs and results. (`#115 `__) 148 | - Timeout arguments of :func:`open_worker_context`, ``idle_timeout`` and ``grace_period``, 149 | now work like trio timeouts, accepting any non-negative `~float` value. (`#116 `__) 150 | - Worker process startup is now faster, by importing trio lazily (`#117 `__) 151 | - :func:`open_worker_context` now returns a context object that can be used to run 152 | functions explicitly in a certain context (:meth:`WorkerContext.run_sync`) rather 153 | than implicitly altering the behavior of :func:`trio_parallel.run_sync`. (`#127 `__) 154 | 155 | 156 | trio-parallel 1.0.0a1 (2021-09-05) 157 | ---------------------------------- 158 | 159 | Features 160 | ~~~~~~~~ 161 | 162 | - Added configuration options for the grace periods permitted to worker caches upon 163 | shutdown. This includes a new keyword argument for :func:`open_worker_context` and 164 | a new top level function ``atexit_shutdown_grace_period``. (`#108 `__) 165 | - :func:`open_worker_context` gained a new argument, ``init``, and ``retire`` is no longer 166 | called before the first job in the worker. (`#110 `__) 167 | 168 | 169 | trio-parallel 1.0.0a0 (2021-07-22) 170 | ---------------------------------- 171 | 172 | Features 173 | ~~~~~~~~ 174 | 175 | - The behavior and lifetime of worker processes can now be customized with the :func:`open_worker_context` context manager. (`#19 `__) 176 | 177 | 178 | trio-parallel 0.5.1 (2021-05-05) 179 | -------------------------------- 180 | 181 | Bugfixes 182 | ~~~~~~~~ 183 | 184 | - Remove ``__version__`` attribute to avoid crash on import when metadata is not available (`#55 `__) 185 | 186 | 187 | trio-parallel 0.5.0 (2021-05-02) 188 | --------------------------------------------------------- 189 | 190 | Features 191 | ~~~~~~~~ 192 | 193 | - :exc:`trio_parallel.BrokenWorkerError` now contains a reference to the underlying worker process which can be inspected e.g. to handle specific exit codes. (`#48 `__) 194 | 195 | 196 | Bugfixes 197 | ~~~~~~~~ 198 | 199 | - Workers are now fully synchronized with only pipe/channel-like objects, making it impossible to leak semaphores. (`#33 `__) 200 | - Fix a regression of a rare race condition where idle workers shut down cleanly but appear broken. (`#43 `__) 201 | - Ensure a clean worker shutdown if IPC pipes are closed (`#51 `__) 202 | 203 | 204 | Misc 205 | ~~~~ 206 | 207 | - `#40 `__, `#42 `__, `#44 `__ 208 | 209 | 210 | trio-parallel 0.4.0 (2021-03-25) 211 | -------------------------------- 212 | 213 | Bugfixes 214 | ~~~~~~~~ 215 | 216 | - Correctly handle the case where `os.cpu_count` returns `None`. (`#32 `__) 217 | - Ignore keyboard interrupt (SIGINT) in workers to ensure correct cancellation semantics and clean shutdown on CTRL+C. (`#35 `__) 218 | 219 | 220 | Misc 221 | ~~~~ 222 | 223 | - `#27 `__ 224 | 225 | 226 | trio-parallel 0.3.0 (2021-02-21) 227 | -------------------------------- 228 | 229 | Bugfixes 230 | ~~~~~~~~ 231 | 232 | - Fixed an underlying race condition in IPC. Not a critical bugfix, as it should not be triggered in practice. (`#15 `__) 233 | - Reduce the production of zombie children on Unix systems (`#20 `__) 234 | - Close internal race condition when waiting for subprocess exit codes on macOS. (`#23 `__) 235 | - Avoid a race condition leading to deadlocks when a worker process is killed right after receiving work. (`#25 `__) 236 | 237 | 238 | Improved Documentation 239 | ~~~~~~~~~~~~~~~~~~~~~~ 240 | 241 | - Reorganized documentation for less redundancy and more clarity (`#16 `__) 242 | 243 | 244 | trio-parallel 0.2.0 (2021-02-02) 245 | -------------------------------- 246 | 247 | Bugfixes 248 | ~~~~~~~~ 249 | 250 | - Changed subprocess context to explicitly always spawn new processes (`#5 `__) 251 | - Changed synchronization scheme to achieve full passing tests on 252 | 253 | - Windows, Linux, MacOS 254 | - CPython 3.6, 3.7, 3.8, 3.9 255 | - Pypy 3.6, 3.7, 3.7-nightly 256 | 257 | Note Pypy on Windows is not supported here or by Trio (`#10 `__) 258 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. documentation master file, created by 2 | sphinx-quickstart on Sat Jan 21 19:11:14 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | 7 | .. include:: ../../README.rst 8 | 9 | ============ 10 | Navigation 11 | ============ 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | 16 | reference.rst 17 | examples.rst 18 | history.rst 19 | 20 | ==================== 21 | Indices and tables 22 | ==================== 23 | 24 | * :ref:`genindex` 25 | * :ref:`modindex` 26 | * :ref:`search` 27 | * :ref:`glossary` 28 | -------------------------------------------------------------------------------- /docs/source/reference.rst: -------------------------------------------------------------------------------- 1 | Reference 2 | ========= 3 | 4 | .. currentmodule:: trio_parallel 5 | 6 | This project's aim is to use the lightest-weight, lowest-overhead, lowest latency 7 | method to achieve parallelism of arbitrary Python code, and make it natively async for Trio. 8 | Given that Python (and CPython in particular) has ongoing difficulties parallelizing 9 | CPU-bound work in threads, this package dispatches synchronous function execution to 10 | *subprocesses*. However, this project is not fundamentally constrained by that, 11 | and will be considering subinterpreters, or any other avenue as they become available. 12 | 13 | Running CPU-bound functions in parallel 14 | --------------------------------------- 15 | 16 | The main interface for ``trio-parallel`` is :func:`run_sync`: 17 | 18 | .. autofunction:: run_sync 19 | 20 | .. note:: 21 | 22 | :func:`trio_parallel.run_sync` does not work with functions defined at the REPL 23 | or in a Jupyter notebook cell due to the use of the `multiprocessing` ``spawn`` 24 | context... *unless* cloudpickle_ is also installed! 25 | 26 | A minimal program that dispatches work with :func:`run_sync` looks like this: 27 | 28 | .. literalinclude:: examples/minimal.py 29 | 30 | Just like that, you've dispatched a CPU-bound synchronous function to a worker 31 | subprocess and returned the result! However, only doing this much is a bit pointless; 32 | we are just expending the startup time of a whole python process to achieve the same 33 | result that we could have gotten synchronously. To take advantage, some other task 34 | needs to be able to run concurrently: 35 | 36 | .. literalinclude:: examples/checkpointing.py 37 | 38 | The output of this script indicates that the Trio event loop is running smoothly. 39 | Still, this doesn't demonstrate much advantage over :func:`trio.to_thread.run_sync`. 40 | You can see for yourself by substituting the function calls, since the call 41 | signatures are intentionally identical. 42 | 43 | No, ``trio-parallel`` really shines when your function has significant CPU-intensive 44 | work that regularly involves the python interpreter: 45 | 46 | .. literalinclude:: examples/parallel_loops.py 47 | 48 | This script should output a roughly equal number of loops completed for each process, 49 | as opposed to the lower and unbalanced number you might observe using threads. 50 | 51 | As with Trio threads, these processes are cached to minimize latency and resource 52 | usage. Despite this, executing a function in a process can take orders of magnitude 53 | longer than in a thread when dealing with large arguments or a cold cache. 54 | 55 | .. literalinclude:: examples/cache_warmup.py 56 | 57 | Therefore, we recommend avoiding worker process dispatch 58 | for synchronous functions with an expected duration of less than about 1 ms. 59 | 60 | Controlling Concurrency 61 | ----------------------- 62 | 63 | By default, ``trio-parallel`` will cache as many workers as the system has CPUs 64 | (as reported by :func:`os.cpu_count`), allowing fair, maximal, truly-parallel 65 | dispatch of CPU-bound work in the vast majority of cases. There are two ways to modify 66 | this behavior. The first is the ``limiter`` argument of :func:`run_sync`, which 67 | permits you to limit the concurrency of a specific function dispatch. In some cases, 68 | it may be useful to modify the default limiter, which will affect all :func:`run_sync` 69 | calls. 70 | 71 | .. autofunction:: current_default_worker_limiter 72 | 73 | Cancellation and Exceptions 74 | --------------------------- 75 | 76 | Unlike threads, subprocesses are strongly isolated from the parent process, which 77 | allows two important features that cannot be portably implemented in threads: 78 | 79 | - Forceful cancellation: a deadlocked call or infinite loop can be cancelled 80 | by completely terminating the process. 81 | - Protection from errors: if a call segfaults or an extension module has an 82 | unrecoverable error, the worker may die but the main process will raise 83 | a normal Python exception. 84 | 85 | Cancellation 86 | ~~~~~~~~~~~~ 87 | 88 | Cancellation of :func:`trio_parallel.run_sync` is modeled after 89 | :func:`trio.to_thread.run_sync`, with a ``kill_on_cancel`` keyword argument that 90 | defaults to ``False``. Entry is an unconditional checkpoint, i.e. regardless of 91 | the value of ``kill_on_cancel``. The key difference in behavior comes upon cancellation 92 | when ``kill_on_cancel=True``. A Trio thread will be abandoned to run in the background 93 | while this package will kill the worker with ``SIGKILL``/``TerminateProcess``: 94 | 95 | .. literalinclude:: examples/cancellation.py 96 | 97 | We recommend to avoid using the ``kill_on_cancel`` feature 98 | if loss of intermediate results, writes to the filesystem, or shared memory writes 99 | may leave the larger system in an incoherent state. 100 | 101 | Exceptions 102 | ~~~~~~~~~~ 103 | 104 | .. autoexception:: BrokenWorkerError 105 | 106 | Signal Handling 107 | ~~~~~~~~~~~~~~~ 108 | 109 | This library configures worker processes to ignore ``SIGINT`` to have correct semantics 110 | when you hit ``CTRL+C``, but all other signal handlers are left in python's default 111 | state. This can have surprising consequences if you handle signals in the main 112 | process, as the workers are in the same process group but do not share the same 113 | signal handlers. For example, if you handle ``SIGTERM`` in the main process to 114 | achieve a graceful shutdown of a service_, a spurious :class:`BrokenWorkerError` will 115 | raise at any running calls to :func:`run_sync`. You will either 116 | need to handle the exeptions, change the method you use to send signals, or configure 117 | the workers to handle signals at initialization using the tools in the next section. 118 | 119 | Configuring workers 120 | ------------------- 121 | 122 | By default, :func:`trio_parallel.run_sync` draws workers from a global cache 123 | that is shared across sequential and between concurrent :func:`trio.run()` 124 | calls, with workers' lifetimes limited to the life of the main process. This 125 | can be configured with `configure_default_context()`: 126 | 127 | .. autofunction:: configure_default_context 128 | 129 | This covers most use cases, but for the many edge cases, `open_worker_context()` 130 | yields a `WorkerContext` object on which `WorkerContext.run_sync()` pulls workers 131 | from an isolated cache with behavior specified by the class arguments. It is only 132 | advised to use this if specific control over worker type, state, or 133 | lifetime is required in a subset of your application. 134 | 135 | .. autofunction:: open_worker_context 136 | :async-with: ctx 137 | 138 | .. autoclass:: WorkerContext() 139 | :members: 140 | 141 | Alternatively, you can implicitly override the default context of :func:`run_sync` 142 | in any subset of the task tree using `cache_scope()`. This async context manager 143 | sets an internal TreeVar_ so that the current task and all nested subtasks operate 144 | using an internal, isolated `WorkerContext`, without having to manually pass a 145 | context object around. 146 | 147 | .. autofunction:: cache_scope 148 | :async-with: 149 | 150 | One typical use case for configuring workers is to set a policy for taking a worker 151 | out of service. For this, use the ``retire`` argument. This example shows how to 152 | build (trivial) stateless and stateful worker retirement policies. 153 | 154 | .. literalinclude:: examples/single_use_workers.py 155 | 156 | A more realistic use-case might examine the worker process's memory usage (e.g. with 157 | psutil_) and retire if usage is too high. 158 | 159 | If you are retiring workers frequently, like in the single-use case, a large amount 160 | of process startup overhead will be incurred with the default "spawn" worker type. 161 | If your platform supports it, an alternate `WorkerType` might cut that overhead down. 162 | 163 | .. autoclass:: WorkerType() 164 | 165 | Internal Esoterica 166 | ------------------ 167 | 168 | You probably won't use these... but create an issue if you do and need help! 169 | 170 | .. autofunction:: default_context_statistics 171 | 172 | .. _cloudpickle: https://github.com/cloudpipe/cloudpickle 173 | .. _psutil: https://psutil.readthedocs.io/en/latest/ 174 | .. _service: https://github.com/richardsheridan/trio-parallel/issues/348 175 | .. _TreeVar: https://tricycle.readthedocs.io/en/latest/reference.html#tricycle.TreeVar 176 | -------------------------------------------------------------------------------- /docs/source/test_examples.py: -------------------------------------------------------------------------------- 1 | # https://stackoverflow.com/a/56813896 CC BY-SA 4.0 2 | # https://stackoverflow.com/a/36295481 CC BY-SA 4.0 3 | 4 | import pathlib 5 | import subprocess 6 | import sys 7 | 8 | import pytest 9 | 10 | scripts = pathlib.Path(__file__).with_name("examples").resolve().glob("*.py") 11 | 12 | 13 | @pytest.mark.parametrize("script", scripts, ids=lambda x: x.name) 14 | def test_all(script, capfd): 15 | subprocess.run([sys.executable, str(script)], check=True, timeout=60) 16 | # TODO: elegantly assert something about stdout 17 | -------------------------------------------------------------------------------- /newsfragments/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/richardsheridan/trio-parallel/58aed5f51b04331c8fcc987f76fad33f82fcea8a/newsfragments/.gitkeep -------------------------------------------------------------------------------- /newsfragments/README.rst: -------------------------------------------------------------------------------- 1 | Adding newsfragments 2 | ==================== 3 | 4 | This directory collects "newsfragments": short files that each contain 5 | a snippet of ReST-formatted text that will be added to the next 6 | release notes. This should be a description of aspects of the change 7 | (if any) that are relevant to users. (This contrasts with your commit 8 | message and PR description, which are a description of the change as 9 | relevant to people working on the code itself.) 10 | 11 | Each file should be named like ``..rst``, where 12 | ```` is an issue numbers, and ```` is one of: 13 | 14 | * ``feature`` 15 | * ``bugfix`` 16 | * ``doc`` 17 | * ``removal`` 18 | * ``misc`` 19 | 20 | So for example: ``123.feature.rst``, ``456.bugfix.rst`` 21 | 22 | If your PR fixes an issue, use that number here. If there is no issue, 23 | then after you submit the PR and get the PR number you can add a 24 | newsfragment using that instead. 25 | 26 | Note that the ``towncrier`` tool will automatically 27 | reflow your text, so don't try to do any fancy formatting. You can 28 | install ``towncrier`` and then run ``towncrier build --draft --version {version}`` 29 | if you want to get a preview of how your change will look in the final release 30 | notes. 31 | 32 | 33 | Making releases 34 | =============== 35 | 36 | ``pip install towncrier`` , then run ``towncrier build --version {version}``. 37 | (You can use ``towncrier build --draft --version {version}`` to get a preview 38 | of what this will do.) 39 | 40 | You can configure ``towncrier`` (for example: customizing the 41 | different types of changes) by modifying ``pyproject.toml``. 42 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | # keep me in sync with build.in! 3 | requires = ["setuptools >= 45", "wheel", "setuptools_scm[toml] >= 7.0.5"] 4 | build-backend = "setuptools.build_meta" 5 | 6 | [project] 7 | name = "trio-parallel" 8 | description = "CPU parallelism for Trio" 9 | readme = { file = "README.rst", content-type = "text/x-rst" } 10 | authors = [{ name = "Richard Sheridan", email = "richard.sheridan@gmail.com" }] 11 | license = { text = "MIT OR Apache-2.0" } 12 | keywords = ["parallel", "trio", "async", "dispatch", "multiprocessing"] 13 | classifiers = [ 14 | "License :: OSI Approved :: MIT License", 15 | "License :: OSI Approved :: Apache Software License", 16 | "Framework :: Trio", 17 | "Operating System :: POSIX :: Linux", 18 | "Operating System :: MacOS :: MacOS X", 19 | "Operating System :: Microsoft :: Windows", 20 | "Programming Language :: Python :: 3 :: Only", 21 | "Programming Language :: Python :: 3.9", 22 | "Programming Language :: Python :: 3.10", 23 | "Programming Language :: Python :: 3.11", 24 | "Programming Language :: Python :: 3.12", 25 | "Programming Language :: Python :: 3.13", 26 | "Programming Language :: Python :: Implementation :: CPython", 27 | "Programming Language :: Python :: Implementation :: PyPy", 28 | "Development Status :: 5 - Production/Stable", 29 | "Intended Audience :: Developers", 30 | "Intended Audience :: Financial and Insurance Industry", 31 | "Intended Audience :: Science/Research", 32 | "Topic :: Software Development :: Libraries", 33 | ] 34 | dependencies = [ 35 | "trio >= 0.18.0", 36 | "outcome", 37 | "attrs >= 17.3.0", 38 | "cffi; os_name == 'nt' and implementation_name != 'pypy'", 39 | "tblib", 40 | "tricycle >= 0.3.0" 41 | ] 42 | requires-python = ">=3.7" 43 | dynamic = ["version"] 44 | 45 | [project.urls] 46 | Homepage = "https://github.com/richardsheridan/trio-parallel" 47 | Documentation = "https://trio-parallel.readthedocs.io/" 48 | Changelog = "https://trio-parallel.readthedocs.io/en/latest/history.html" 49 | 50 | [project.optional-dependencies] 51 | test = [ 52 | "pytest", 53 | "pytest-trio", 54 | "trio >= 0.23.0", 55 | ] 56 | 57 | [tool.setuptools] 58 | include-package-data = true 59 | packages = ["trio_parallel", "_trio_parallel_workers"] 60 | 61 | [tool.setuptools_scm] 62 | 63 | [tool.towncrier] 64 | package = "trio_parallel" 65 | filename = "docs/source/history.rst" 66 | directory = "newsfragments" 67 | name = "trio-parallel" 68 | underlines = ["-", "~", "^"] 69 | issue_format = "`#{issue} `__" 70 | 71 | [tool.pytest.ini_options] 72 | addopts = "--pyargs -r a -n auto --verbose --cov --cov-config=pyproject.toml --cov-context=test" 73 | filterwarnings = ["error"] 74 | xfail_strict = true 75 | faulthandler_timeout = 60 76 | 77 | [tool.coverage.run] 78 | branch = true 79 | concurrency = ["multiprocessing", "thread"] 80 | parallel = true 81 | source_pkgs = ["trio_parallel", "_trio_parallel_workers"] 82 | disable_warnings = [ 83 | "module-not-imported", # for test_clean_exit_on_shutdown 84 | "module-not-measured", # for fork shutdown tests 85 | "no-data-collected", # for fork shutdown tests 86 | ] 87 | 88 | [tool.coverage.report] 89 | precision = 1 90 | exclude_lines = [ 91 | "pragma: no cover", 92 | "abc.abstractmethod", 93 | "abstractmethod", 94 | ] 95 | 96 | [tool.coverage.html] 97 | show_contexts = true 98 | 99 | [tool.coverage.paths] 100 | source = ["trio_parallel/", "*/trio_parallel"] 101 | workers = ["_trio_parallel_workers/", "*/_trio_parallel_workers"] 102 | -------------------------------------------------------------------------------- /requirements/coverage.in: -------------------------------------------------------------------------------- 1 | coverage[toml] -------------------------------------------------------------------------------- /requirements/coverage.txt: -------------------------------------------------------------------------------- 1 | # SHA1:6b3dcbe6a73ddc9b2689276326531186b8b52cf0 2 | # 3 | # This file is autogenerated by pip-compile-multi 4 | # To update, run: 5 | # 6 | # pip-compile-multi 7 | # 8 | coverage[toml]==7.6.9 9 | # via -r requirements\coverage.in 10 | -------------------------------------------------------------------------------- /requirements/dev.in: -------------------------------------------------------------------------------- 1 | -r test.in 2 | -r docs.in 3 | -r lint.in 4 | -r coverage.in 5 | -------------------------------------------------------------------------------- /requirements/dev.txt: -------------------------------------------------------------------------------- 1 | # SHA1:64f3330cb21a24de944be0c0611a2d1920d63b44 2 | # 3 | # This file is autogenerated by pip-compile-multi 4 | # To update, run: 5 | # 6 | # pip-compile-multi 7 | # 8 | -r coverage.txt 9 | -r docs.txt 10 | -r lint.txt 11 | -r test.txt 12 | -------------------------------------------------------------------------------- /requirements/docs.in: -------------------------------------------------------------------------------- 1 | -r install.in 2 | sphinx > 3.5.0 3 | sphinx_rtd_theme 4 | sphinxcontrib-trio 5 | towncrier -------------------------------------------------------------------------------- /requirements/docs.txt: -------------------------------------------------------------------------------- 1 | # SHA1:9e4adcae1f2b907f280c376935bee1cfd1604d87 2 | # 3 | # This file is autogenerated by pip-compile-multi 4 | # To update, run: 5 | # 6 | # pip-compile-multi 7 | # 8 | -r install.txt 9 | alabaster==1.0.0 10 | # via sphinx 11 | babel==2.16.0 12 | # via sphinx 13 | certifi==2024.12.14 14 | # via requests 15 | charset-normalizer==3.4.1 16 | # via requests 17 | click==8.1.8 18 | # via towncrier 19 | colorama==0.4.6 20 | # via 21 | # click 22 | # sphinx 23 | docutils==0.21.2 24 | # via 25 | # sphinx 26 | # sphinx-rtd-theme 27 | imagesize==1.4.1 28 | # via sphinx 29 | jinja2==3.1.5 30 | # via 31 | # sphinx 32 | # towncrier 33 | markupsafe==3.0.2 34 | # via jinja2 35 | packaging==24.2 36 | # via sphinx 37 | pygments==2.18.0 38 | # via sphinx 39 | requests==2.32.3 40 | # via sphinx 41 | snowballstemmer==2.2.0 42 | # via sphinx 43 | sphinx==8.1.3 44 | # via 45 | # -r requirements\docs.in 46 | # sphinx-rtd-theme 47 | # sphinxcontrib-jquery 48 | # sphinxcontrib-trio 49 | sphinx-rtd-theme==3.0.2 50 | # via -r requirements\docs.in 51 | sphinxcontrib-applehelp==2.0.0 52 | # via sphinx 53 | sphinxcontrib-devhelp==2.0.0 54 | # via sphinx 55 | sphinxcontrib-htmlhelp==2.1.0 56 | # via sphinx 57 | sphinxcontrib-jquery==4.1 58 | # via sphinx-rtd-theme 59 | sphinxcontrib-jsmath==1.0.1 60 | # via sphinx 61 | sphinxcontrib-qthelp==2.0.0 62 | # via sphinx 63 | sphinxcontrib-serializinghtml==2.0.0 64 | # via sphinx 65 | sphinxcontrib-trio==1.1.2 66 | # via -r requirements\docs.in 67 | towncrier==24.8.0 68 | # via -r requirements\docs.in 69 | urllib3==2.3.0 70 | # via requests 71 | -------------------------------------------------------------------------------- /requirements/install.in: -------------------------------------------------------------------------------- 1 | # keep me in sync with pyproject.toml! 2 | trio >= 0.18.0 3 | outcome 4 | attrs >= 17.3.0 5 | cffi; os_name == 'nt' and implementation_name != 'pypy' 6 | tblib 7 | tricycle >= 0.3.0 8 | -------------------------------------------------------------------------------- /requirements/install.txt: -------------------------------------------------------------------------------- 1 | # SHA1:ab818e2cd7a4dab60404b5f2cc3e40669cdb1c52 2 | # 3 | # This file is autogenerated by pip-compile-multi 4 | # To update, run: 5 | # 6 | # pip-compile-multi 7 | # 8 | attrs==24.3.0 9 | # via 10 | # -r requirements\install.in 11 | # outcome 12 | # trio 13 | cffi==1.17.1 ; os_name == "nt" and implementation_name != "pypy" 14 | # via 15 | # -r requirements\install.in 16 | # trio 17 | idna==3.10 18 | # via trio 19 | outcome==1.3.0.post0 20 | # via 21 | # -r requirements\install.in 22 | # trio 23 | pycparser==2.22 24 | # via cffi 25 | sniffio==1.3.1 26 | # via trio 27 | sortedcontainers==2.4.0 28 | # via trio 29 | tblib==3.0.0 30 | # via -r requirements\install.in 31 | tricycle==0.4.1 32 | # via -r requirements\install.in 33 | trio==0.27.0 34 | # via 35 | # -r requirements\install.in 36 | # tricycle 37 | -------------------------------------------------------------------------------- /requirements/lint.in: -------------------------------------------------------------------------------- 1 | black 2 | flake8 3 | flake8-async 4 | -------------------------------------------------------------------------------- /requirements/lint.txt: -------------------------------------------------------------------------------- 1 | # SHA1:6c87e66b8ae90e26b9c63847f5558cd02603fc9e 2 | # 3 | # This file is autogenerated by pip-compile-multi 4 | # To update, run: 5 | # 6 | # pip-compile-multi 7 | # 8 | black==24.10.0 9 | # via -r requirements\lint.in 10 | click==8.1.8 11 | # via black 12 | colorama==0.4.6 13 | # via click 14 | flake8==7.1.1 15 | # via 16 | # -r requirements\lint.in 17 | # flake8-async 18 | flake8-async==24.11.4 19 | # via -r requirements\lint.in 20 | libcst==1.5.1 21 | # via flake8-async 22 | mccabe==0.7.0 23 | # via flake8 24 | mypy-extensions==1.0.0 25 | # via black 26 | packaging==24.2 27 | # via black 28 | pathspec==0.12.1 29 | # via black 30 | platformdirs==4.3.6 31 | # via black 32 | pycodestyle==2.12.1 33 | # via flake8 34 | pyflakes==3.2.0 35 | # via flake8 36 | pyyaml==6.0.2 37 | # via libcst 38 | -------------------------------------------------------------------------------- /requirements/test.in: -------------------------------------------------------------------------------- 1 | -r install.in 2 | pytest 3 | pytest-trio 4 | pytest-cov 5 | pytest-xdist 6 | coverage[toml] 7 | -------------------------------------------------------------------------------- /requirements/test.txt: -------------------------------------------------------------------------------- 1 | # SHA1:8b82c6850c274e5160fedb09dd88c2c4b26e14dd 2 | # 3 | # This file is autogenerated by pip-compile-multi 4 | # To update, run: 5 | # 6 | # pip-compile-multi 7 | # 8 | -r install.txt 9 | colorama==0.4.6 10 | # via pytest 11 | coverage[toml]==7.6.9 12 | # via 13 | # -r requirements\test.in 14 | # pytest-cov 15 | execnet==2.1.1 16 | # via pytest-xdist 17 | iniconfig==2.0.0 18 | # via pytest 19 | packaging==24.2 20 | # via pytest 21 | pluggy==1.5.0 22 | # via pytest 23 | pytest==8.3.4 24 | # via 25 | # -r requirements\test.in 26 | # pytest-cov 27 | # pytest-trio 28 | # pytest-xdist 29 | pytest-cov==6.0.0 30 | # via -r requirements\test.in 31 | pytest-trio==0.8.0 32 | # via -r requirements\test.in 33 | pytest-xdist==3.6.1 34 | # via -r requirements\test.in 35 | -------------------------------------------------------------------------------- /trio_parallel/__init__.py: -------------------------------------------------------------------------------- 1 | """trio-parallel: CPU parallelism for Trio""" 2 | 3 | from ._impl import ( 4 | run_sync, 5 | open_worker_context, 6 | cache_scope, 7 | WorkerContext, 8 | WorkerType, 9 | current_default_worker_limiter, 10 | configure_default_context, 11 | default_context_statistics, 12 | ) 13 | from ._abc import BrokenWorkerError 14 | 15 | 16 | # Vendored from trio._util in v0.20.0 under identical MIT/Apache2 license. 17 | # Copyright Contributors to the Trio project. 18 | def fixup_module_metadata(module_name, namespace): 19 | seen_ids = set() 20 | 21 | def fix_one(qualname, name, obj): 22 | # avoid infinite recursion (relevant when using 23 | # typing.Generic, for example) 24 | if id(obj) in seen_ids: 25 | return 26 | seen_ids.add(id(obj)) 27 | 28 | mod = getattr(obj, "__module__", None) 29 | if mod is not None and mod.startswith("trio_parallel."): 30 | obj.__module__ = module_name 31 | # Modules, unlike everything else in Python, put fully-qualified 32 | # names into their __name__ attribute. Trio checks for "." to avoid 33 | # rewriting these, but we don't have any, so it's always true. 34 | nodot = hasattr(obj, "__name__") and "." not in obj.__name__ 35 | if nodot: # pragma: no branch 36 | obj.__name__ = name 37 | obj.__qualname__ = qualname 38 | if isinstance(obj, type): 39 | for attr_name, attr_value in obj.__dict__.items(): 40 | fix_one(objname + "." + attr_name, attr_name, attr_value) 41 | 42 | for objname, obj in namespace.items(): 43 | if not objname.startswith("_"): # ignore private attributes 44 | fix_one(objname, objname, obj) 45 | 46 | 47 | fixup_module_metadata(__name__, globals()) 48 | del fixup_module_metadata 49 | -------------------------------------------------------------------------------- /trio_parallel/_abc.py: -------------------------------------------------------------------------------- 1 | """Abstract base classes for internal use when implementing future workers 2 | 3 | The idea is that if we keep the interface between the implementation of the 4 | trio-parallel API minimal, we can put in new workers and options without needing 5 | frontend rewrites.""" 6 | 7 | from abc import ABC, abstractmethod, ABCMeta 8 | from typing import Optional, Callable, TypeVar, Type, Any, Deque, Generic 9 | 10 | from outcome import Outcome 11 | 12 | T = TypeVar("T") 13 | 14 | 15 | class BrokenWorkerError(RuntimeError): 16 | """Raised when a worker fails or dies unexpectedly. 17 | 18 | This error is not typically encountered in normal use, and indicates a severe 19 | failure of either ``trio-parallel`` or the code that was executing in the worker. 20 | Some example failures may include segfaults, being killed by an external signal, 21 | or failing to cleanly shut down within a specified ``grace_period``. (See 22 | :func:`configure_default_context` and :func:`open_worker_context`.) 23 | """ 24 | 25 | 26 | class AbstractWorker(ABC): 27 | @abstractmethod 28 | def __init__( 29 | self, 30 | idle_timeout: float, 31 | init: Optional[Callable[[], bool]], 32 | retire: Optional[Callable[[], bool]], 33 | ): 34 | pass 35 | 36 | @abstractmethod 37 | async def start(self): 38 | """Perform async startup tasks that really should be in init.""" 39 | 40 | @abstractmethod 41 | async def run_sync(self, sync_fn: Callable, *args) -> Optional[Outcome]: 42 | """Run the sync_fn in a worker. 43 | 44 | Args: 45 | sync_fn: A synchronous callable. 46 | *args: Positional arguments to pass to sync_fn. If you need keyword 47 | arguments, use :func:`functools.partial`. 48 | 49 | Returns: 50 | Optional[Outcome]: The outcome of the CPU bound job performed in the 51 | worker, or ``None``, indicating the work should be submitted again, 52 | but to a different worker, because this worker should be discarded. 53 | 54 | Raises: 55 | BrokenWorkerError: Indicates the worker died unexpectedly. Not encountered 56 | in normal use.""" 57 | 58 | @abstractmethod 59 | def shutdown(self): 60 | """Trigger a graceful shutdown of the worker. 61 | 62 | :meth:`run_sync` will return None in response to any future job submissions. 63 | Jobs in progress will complete as normal.""" 64 | 65 | @abstractmethod 66 | async def wait(self): 67 | """Wait for the worker to terminate.""" 68 | 69 | 70 | class WorkerCache(Deque[T], ABC, Generic[T]): 71 | @abstractmethod 72 | def prune(self): 73 | """Clean up any resources associated with workers that have timed out 74 | while idle in the cache.""" 75 | 76 | @abstractmethod 77 | def shutdown(self, timeout): 78 | """Stop and clean up any resources associated with all cached workers. 79 | 80 | Args: 81 | timeout: Time in seconds to wait for graceful shutdown before 82 | raising. 83 | 84 | Raises: 85 | BrokenWorkerError: Raised if any workers fail to respond to a graceful 86 | shutdown signal within ``grace_period``.""" 87 | 88 | 89 | # Vendored from trio._util in v0.19.0 under identical MIT/Apache2 license. 90 | # Copyright Contributors to the Trio project. 91 | # Modified so it's not Final so that we can create a test fake subclass. 92 | 93 | 94 | class NoPublicConstructor(ABCMeta): 95 | """Metaclass that ensures a private constructor. 96 | 97 | If a class uses this metaclass like this:: 98 | 99 | class SomeClass(metaclass=NoPublicConstructor): 100 | pass 101 | 102 | The metaclass will ensure that no sub class can be created, and that no instance 103 | can be initialized. 104 | 105 | If you try to instantiate your class (SomeClass()), a TypeError will be thrown. 106 | 107 | Raises 108 | ------ 109 | - TypeError if a sub class or an instance is created. 110 | """ 111 | 112 | def __call__(cls, *args, **kwargs): 113 | raise TypeError( 114 | f"{cls.__module__}.{cls.__qualname__} has no public constructor" 115 | ) 116 | 117 | def _create(cls: Type[T], *args: Any, **kwargs: Any) -> T: 118 | return super().__call__(*args, **kwargs) # type: ignore 119 | -------------------------------------------------------------------------------- /trio_parallel/_impl.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import os 3 | import sys 4 | import threading 5 | import warnings 6 | from contextlib import asynccontextmanager 7 | from enum import Enum 8 | from itertools import count 9 | from typing import Type, Callable, Any, TypeVar 10 | 11 | import attr 12 | import tricycle 13 | import trio 14 | 15 | from ._proc import WORKER_PROC_MAP 16 | from ._abc import WorkerCache, AbstractWorker, NoPublicConstructor 17 | 18 | T = TypeVar("T") 19 | 20 | # Sane default might be to expect cpu-bound work 21 | DEFAULT_LIMIT = os.cpu_count() or 1 22 | limiter_runvar = trio.lowlevel.RunVar("trio_parallel") 23 | 24 | 25 | def current_default_worker_limiter(): 26 | """Get the default `~trio.CapacityLimiter` used by 27 | :func:`trio_parallel.run_sync`. 28 | 29 | The most common reason to call this would be if you want to modify its 30 | :attr:`~trio.CapacityLimiter.total_tokens` attribute. This attribute 31 | is initialized to the number of CPUs reported by :func:`os.cpu_count`. 32 | 33 | """ 34 | try: 35 | return limiter_runvar.get() 36 | except LookupError: 37 | limiter = trio.CapacityLimiter(DEFAULT_LIMIT) 38 | limiter_runvar.set(limiter) 39 | return limiter 40 | 41 | 42 | WORKER_MAP = {**WORKER_PROC_MAP} 43 | 44 | WorkerType = Enum( 45 | "WorkerType", ((x.upper(), x) for x in WORKER_MAP), type=str, module=__name__ 46 | ) 47 | WorkerType.__doc__ = """An Enum of available kinds of workers. 48 | 49 | Instances of this Enum can be passed to :func:`open_worker_context` or 50 | :func:`configure_default_context` to customize worker startup behavior. 51 | 52 | Currently, these correspond to the values of 53 | :func:`multiprocessing.get_all_start_methods`, which vary by platform. 54 | ``WorkerType.SPAWN`` is the default and is supported on all platforms. 55 | ``WorkerType.FORKSERVER`` is available on POSIX platforms and could be an 56 | optimization if workers need to be killed/restarted often. 57 | ``WorkerType.FORK`` is available on POSIX for experimentation, but not 58 | recommended.""" 59 | 60 | 61 | @attr.s(slots=True, eq=False) 62 | class ContextLifetimeManager: 63 | waiting_task = attr.ib(None) 64 | entrances = attr.ib(0) 65 | exits = attr.ib(0) 66 | # Counters are used for thread safety of the default cache 67 | enter_counter = attr.ib(factory=lambda: count(1)) 68 | exit_counter = attr.ib(factory=lambda: count(1)) 69 | 70 | async def __aenter__(self): # noqa: ASYNC910 71 | # only async to save indentation 72 | if self.waiting_task: 73 | raise trio.ClosedResourceError 74 | self.entrances = next(self.enter_counter) 75 | 76 | async def __aexit__(self, exc_type, exc_val, exc_tb): # noqa: ASYNC910 77 | # only async to save indentation 78 | self.exits = next(self.exit_counter) 79 | if self.waiting_task: 80 | if self.calc_running() == 0: 81 | trio.lowlevel.reschedule(self.waiting_task) 82 | 83 | def calc_running(self): 84 | return self.entrances - self.exits 85 | 86 | 87 | @attr.s(auto_attribs=True, slots=True, frozen=True) 88 | class WorkerContextStatistics: 89 | idle_workers: int 90 | running_workers: int 91 | 92 | 93 | def check_non_negative(instance, attribute, value): 94 | if value < 0.0: 95 | raise ValueError(f"{attribute} must be non-negative, was {value}") 96 | 97 | 98 | @attr.s(frozen=True, eq=False) 99 | class WorkerContext(metaclass=NoPublicConstructor): 100 | """A reification of a context where workers have a custom configuration. 101 | 102 | Instances of this class are to be created using :func:`open_worker_context`, 103 | and cannot be directly instantiated. The arguments to :func:`open_worker_context` 104 | that created an instance are available for inspection as read-only attributes. 105 | 106 | This class provides a ``statistics()`` method, which returns an object with the 107 | following fields: 108 | 109 | * ``idle_workers``: The number of live workers currently stored in the context's 110 | cache. 111 | * ``running_workers``: The number of workers currently executing jobs. 112 | """ 113 | 114 | idle_timeout: float = attr.ib( 115 | default=600.0, 116 | validator=check_non_negative, 117 | ) 118 | init: Callable[[], Any] = attr.ib( 119 | default=bool, 120 | validator=attr.validators.is_callable(), 121 | ) 122 | retire: Callable[[], Any] = attr.ib( 123 | default=bool, 124 | validator=attr.validators.is_callable(), 125 | ) 126 | grace_period: float = attr.ib( 127 | default=30.0, 128 | validator=check_non_negative, 129 | ) 130 | worker_type: WorkerType = attr.ib( 131 | default=WorkerType.SPAWN, 132 | validator=attr.validators.in_(set(WorkerType)), 133 | ) 134 | _worker_class: Type[AbstractWorker] = attr.ib(repr=False, init=False) 135 | _worker_cache: WorkerCache = attr.ib(repr=False, init=False) 136 | _lifetime: ContextLifetimeManager = attr.ib( 137 | factory=ContextLifetimeManager, repr=False, init=False 138 | ) 139 | 140 | def __attrs_post_init__(self): 141 | worker_class, cache_class = WORKER_MAP[self.worker_type] 142 | self.__dict__["_worker_class"] = worker_class 143 | self.__dict__["_worker_cache"] = cache_class() 144 | 145 | @trio.lowlevel.enable_ki_protection 146 | async def run_sync( 147 | self, 148 | sync_fn: Callable[..., T], 149 | *args, 150 | kill_on_cancel: bool = False, 151 | cancellable: bool = False, 152 | limiter: trio.CapacityLimiter = None, 153 | ) -> T: 154 | """Run ``sync_fn(*args)`` in a separate process and return/raise its outcome. 155 | 156 | Behaves according to the customized attributes of the context. See 157 | :func:`trio_parallel.run_sync()` for details. 158 | 159 | Raises: 160 | trio.ClosedResourceError: if this method is run on a closed context""" 161 | if limiter is None: 162 | limiter = current_default_worker_limiter() 163 | 164 | async with limiter, self._lifetime: 165 | self._worker_cache.prune() 166 | while True: 167 | with trio.CancelScope(shield=not (cancellable or kill_on_cancel)): 168 | try: 169 | worker = self._worker_cache.pop() 170 | except IndexError: 171 | worker = self._worker_class( 172 | self.idle_timeout, self.init, self.retire 173 | ) 174 | await worker.start() 175 | result = await worker.run_sync(sync_fn, *args) 176 | 177 | if result is None: 178 | # Prevent uninterruptible loop 179 | # when KI-protected & cancellable=False 180 | await trio.lowlevel.checkpoint_if_cancelled() 181 | else: 182 | self._worker_cache.append(worker) 183 | return result.unwrap() 184 | 185 | async def _aclose(self): 186 | assert not self._lifetime.waiting_task 187 | self._lifetime.waiting_task = trio.lowlevel.current_task() 188 | with trio.CancelScope(shield=True): 189 | if self._lifetime.calc_running() != 0: 190 | assert self is not get_default_context() 191 | 192 | def abort_func(raise_cancel): # pragma: no cover 193 | return trio.lowlevel.Abort.FAILED # never cancelled anyway 194 | 195 | await trio.lowlevel.wait_task_rescheduled(abort_func) 196 | await trio.to_thread.run_sync( 197 | self._worker_cache.shutdown, self.grace_period 198 | ) 199 | 200 | @trio.lowlevel.enable_ki_protection 201 | def statistics(self): 202 | self._worker_cache.prune() 203 | return WorkerContextStatistics( 204 | idle_workers=len(self._worker_cache), 205 | running_workers=self._lifetime.calc_running(), 206 | ) 207 | 208 | 209 | # Exists on all platforms as single source of truth for kwarg defaults 210 | DEFAULT_CONTEXT = WorkerContext._create() 211 | 212 | 213 | def configure_default_context( 214 | idle_timeout=DEFAULT_CONTEXT.idle_timeout, 215 | init=DEFAULT_CONTEXT.init, 216 | retire=DEFAULT_CONTEXT.retire, 217 | grace_period=DEFAULT_CONTEXT.grace_period, 218 | worker_type=WorkerType.SPAWN, 219 | ): 220 | """Configure the default `WorkerContext` parameters associated with `run_sync`. 221 | 222 | Args: 223 | idle_timeout (float): The time in seconds an idle worker will 224 | wait for a CPU-bound job before shutting down and releasing its own 225 | resources. Pass `math.inf` to wait forever. MUST be non-negative. 226 | init (Callable[[], bool]): 227 | An object to call within the worker before waiting for jobs. 228 | This is suitable for initializing worker state so that such stateful logic 229 | does not need to be included in functions passed to 230 | :func:`trio_parallel.run_sync`. MUST be callable without arguments. 231 | retire (Callable[[], bool]): 232 | An object to call within the worker after executing a CPU-bound job. 233 | The return value indicates whether worker should be retired (shut down.) 234 | By default, workers are never retired. 235 | The process-global environment is stable between calls. Among other things, 236 | that means that storing state in global variables works. 237 | MUST be callable without arguments. 238 | grace_period (float): The time in seconds to wait in the atexit handler for 239 | workers to exit before issuing SIGKILL/TerminateProcess and raising 240 | `BrokenWorkerError`. Pass `math.inf` to wait forever. MUST be non-negative. 241 | worker_type (WorkerType): The kind of worker to create, see :class:`WorkerType`. 242 | 243 | Raises: 244 | RuntimeError: if this function is called outside the main thread. 245 | 246 | .. warning:: 247 | 248 | This function is meant to be used once before any usage of `run_sync`. 249 | Doing otherwise may (on POSIX) result in workers being leaked until 250 | the main process ends, or (on Win32) having no effect until the next `trio.run`! 251 | """ 252 | new_parm_dict = locals().copy() 253 | if threading.current_thread() is not threading.main_thread(): 254 | raise RuntimeError("Only configure default context from the main thread") 255 | if sys.platform == "win32": 256 | try: 257 | DEFAULT_CONTEXT_RUNVAR.get() 258 | except (LookupError, RuntimeError): 259 | pass 260 | else: 261 | warnings.warn("Previous default context active until next `trio.run`") 262 | DEFAULT_CONTEXT_PARAMS.update(**new_parm_dict) 263 | else: 264 | stats = default_context_statistics() 265 | if stats.idle_workers or stats.running_workers: 266 | warnings.warn("Previous default context leaving zombie workers behind") 267 | # assign to a local for KI protection 268 | ctx = WorkerContext._create(**new_parm_dict) 269 | atexit.register(graceful_default_shutdown, ctx) 270 | global DEFAULT_CONTEXT 271 | DEFAULT_CONTEXT = ctx 272 | 273 | 274 | CACHE_SCOPE_TREEVAR = tricycle.TreeVar("tp_cache_scope") 275 | 276 | if sys.platform == "win32": 277 | DEFAULT_CONTEXT_RUNVAR = trio.lowlevel.RunVar("tp_win32_ctx") 278 | DEFAULT_CONTEXT_PARAMS = {} 279 | 280 | def get_default_context(): 281 | try: 282 | return CACHE_SCOPE_TREEVAR.get() 283 | except LookupError: 284 | pass 285 | try: 286 | ctx = DEFAULT_CONTEXT_RUNVAR.get() 287 | except LookupError: 288 | ctx = WorkerContext._create(**DEFAULT_CONTEXT_PARAMS) 289 | trio.lowlevel.spawn_system_task(close_at_run_end, ctx) 290 | # set ctx last so as not to leak on KeyboardInterrupt 291 | DEFAULT_CONTEXT_RUNVAR.set(ctx) 292 | return ctx 293 | 294 | async def close_at_run_end(ctx): 295 | try: 296 | await trio.sleep_forever() 297 | finally: 298 | await ctx._aclose() # noqa: ASYNC102 299 | 300 | else: 301 | 302 | def get_default_context(): 303 | try: 304 | return CACHE_SCOPE_TREEVAR.get() 305 | except (LookupError, RuntimeError): 306 | return DEFAULT_CONTEXT 307 | 308 | @atexit.register 309 | def graceful_default_shutdown(ctx=DEFAULT_CONTEXT): 310 | ctx._worker_cache.shutdown(ctx.grace_period) 311 | 312 | 313 | def default_context_statistics(): 314 | """Return the statistics corresponding to the default context. 315 | 316 | Because the default context used by `trio_parallel.run_sync` is a private 317 | implementation detail, this function serves to provide public access to the default 318 | context statistics object. 319 | 320 | .. note:: 321 | 322 | The statistics are only eventually consistent in the case of multiple trio 323 | threads concurrently using `trio_parallel.run_sync`.""" 324 | return get_default_context().statistics() 325 | 326 | 327 | @asynccontextmanager 328 | @trio.lowlevel.enable_ki_protection 329 | async def open_worker_context( 330 | idle_timeout=DEFAULT_CONTEXT.idle_timeout, 331 | init=DEFAULT_CONTEXT.init, 332 | retire=DEFAULT_CONTEXT.retire, 333 | grace_period=DEFAULT_CONTEXT.grace_period, 334 | worker_type=WorkerType.SPAWN, 335 | ): 336 | """Create a new, customized worker context with isolated workers. 337 | 338 | The context will automatically wait for any running workers to become idle when 339 | exiting the scope. Since this wait cannot be cancelled, it is more convenient to 340 | only pass the context object to tasks that cannot outlive the scope, for example, 341 | by using a :class:`~trio.Nursery`. 342 | 343 | Args: 344 | idle_timeout (float): The time in seconds an idle worker will 345 | wait for a CPU-bound job before shutting down and releasing its own 346 | resources. Pass `math.inf` to wait forever. MUST be non-negative. 347 | init (Callable[[], bool]): 348 | An object to call within the worker before waiting for jobs. 349 | This is suitable for initializing worker state so that such stateful logic 350 | does not need to be included in functions passed to 351 | :func:`WorkerContext.run_sync`. MUST be callable without arguments. 352 | retire (Callable[[], bool]): 353 | An object to call within the worker after executing a CPU-bound job. 354 | The return value indicates whether worker should be retired (shut down.) 355 | By default, workers are never retired. 356 | The process-global environment is stable between calls. Among other things, 357 | that means that storing state in global variables works. 358 | MUST be callable without arguments. 359 | grace_period (float): The time in seconds to wait in ``__aexit__`` for workers to 360 | exit before issuing SIGKILL/TerminateProcess and raising `BrokenWorkerError`. 361 | Pass `math.inf` to wait forever. MUST be non-negative. 362 | worker_type (WorkerType): The kind of worker to create, see :class:`WorkerType`. 363 | 364 | Raises: 365 | ValueError | TypeError: if an invalid value is passed for an argument, such as a 366 | negative timeout. 367 | BrokenWorkerError: if a worker does not shut down cleanly when exiting the scope. 368 | 369 | .. warning:: 370 | 371 | The callables passed to retire MUST not raise! Doing so will result in a 372 | :class:`BrokenWorkerError` at an indeterminate future 373 | :func:`WorkerContext.run_sync` call. 374 | 375 | """ 376 | ctx = WorkerContext._create(idle_timeout, init, retire, grace_period, worker_type) 377 | try: 378 | yield ctx 379 | finally: 380 | await ctx._aclose() # noqa: ASYNC102 381 | 382 | 383 | @asynccontextmanager 384 | @trio.lowlevel.enable_ki_protection 385 | async def cache_scope( 386 | idle_timeout=DEFAULT_CONTEXT.idle_timeout, 387 | init=DEFAULT_CONTEXT.init, 388 | retire=DEFAULT_CONTEXT.retire, 389 | grace_period=DEFAULT_CONTEXT.grace_period, 390 | worker_type=WorkerType.SPAWN, 391 | ): 392 | """ 393 | Override the configuration of `trio_parallel.run_sync()` in this task and all 394 | subtasks. 395 | 396 | The internal `WorkerContext` is passed implicitly down the task tree and can 397 | be overridden by nested scopes. Explicit `WorkerContext` objects from 398 | `open_worker_context` will not be overridden. 399 | 400 | Args: 401 | idle_timeout (float): The time in seconds an idle worker will 402 | wait for a CPU-bound job before shutting down and releasing its own 403 | resources. Pass `math.inf` to wait forever. MUST be non-negative. 404 | init (Callable[[], bool]): 405 | An object to call within the worker before waiting for jobs. 406 | This is suitable for initializing worker state so that such stateful logic 407 | does not need to be included in functions passed to 408 | :func:`WorkerContext.run_sync`. MUST be callable without arguments. 409 | retire (Callable[[], bool]): 410 | An object to call within the worker after executing a CPU-bound job. 411 | The return value indicates whether worker should be retired (shut down.) 412 | By default, workers are never retired. 413 | The process-global environment is stable between calls. Among other things, 414 | that means that storing state in global variables works. 415 | MUST be callable without arguments. 416 | grace_period (float): The time in seconds to wait in ``__aexit__`` for workers to 417 | exit before issuing SIGKILL/TerminateProcess and raising `BrokenWorkerError`. 418 | Pass `math.inf` to wait forever. MUST be non-negative. 419 | worker_type (WorkerType): The kind of worker to create, see :class:`WorkerType`. 420 | 421 | Raises: 422 | ValueError | TypeError: if an invalid value is passed for an argument, such as a 423 | negative timeout. 424 | BrokenWorkerError: if a worker does not shut down cleanly when exiting the scope. 425 | 426 | .. warning:: 427 | 428 | The callables passed to retire MUST not raise! Doing so will result in a 429 | :class:`BrokenWorkerError` at an indeterminate future 430 | :func:`WorkerContext.run_sync` call. 431 | """ 432 | 433 | ctx = WorkerContext._create(idle_timeout, init, retire, grace_period, worker_type) 434 | 435 | try: 436 | token = CACHE_SCOPE_TREEVAR.set(ctx) 437 | yield 438 | finally: 439 | CACHE_SCOPE_TREEVAR.reset(token) 440 | await ctx._aclose() # noqa: ASYNC102 441 | 442 | 443 | async def run_sync( 444 | sync_fn: Callable[..., T], 445 | *args, 446 | kill_on_cancel: bool = False, 447 | cancellable: bool = False, 448 | limiter: trio.CapacityLimiter = None, 449 | ) -> T: 450 | """Run ``sync_fn(*args)`` in a separate process and return/raise its outcome. 451 | 452 | This function is intended to enable the following: 453 | 454 | - Circumventing the GIL to run CPU-bound functions in parallel 455 | - Making blocking APIs or infinite loops truly cancellable through 456 | SIGKILL/TerminateProcess without leaking resources 457 | - Protecting the main process from unstable/crashy code 458 | 459 | Currently, this is a wrapping of :class:`multiprocessing.Process` that 460 | follows the API of :func:`trio.to_thread.run_sync`. 461 | Other :mod:`multiprocessing` features may work but are not officially 462 | supported, and all the normal :mod:`multiprocessing` caveats apply. 463 | To customize worker behavior, use :func:`open_worker_context`. 464 | 465 | The underlying workers are cached LIFO and reused to minimize latency. 466 | Global state of the workers is not stable between and across calls. 467 | 468 | Args: 469 | sync_fn: An importable or pickleable synchronous callable. See the 470 | :mod:`multiprocessing` documentation for detailed explanation of 471 | limitations. 472 | *args: Positional arguments to pass to sync_fn. If you need keyword 473 | arguments, use :func:`functools.partial`. 474 | kill_on_cancel (bool): Whether to allow cancellation of this operation. 475 | Cancellation always involves abrupt termination of the worker process 476 | with SIGKILL/TerminateProcess. To obtain correct semantics with CTRL+C, 477 | SIGINT is ignored when raised in workers. 478 | cancellable (bool): Alias for ``kill_on_cancel``. If both aliases are passed, 479 | Python's ``or`` operator combines them. 480 | limiter (None, or trio.CapacityLimiter): 481 | An object used to limit the number of simultaneous processes. Most 482 | commonly this will be a `~trio.CapacityLimiter`, but any async 483 | context manager will succeed. 484 | 485 | Returns: 486 | Any: Whatever ``sync_fn(*args)`` returns. 487 | 488 | Raises: 489 | BaseException: Whatever ``sync_fn(*args)`` raises. 490 | BrokenWorkerError: Indicates the worker died unexpectedly. Not encountered 491 | in normal use. 492 | 493 | """ 494 | return await get_default_context().run_sync( 495 | sync_fn, 496 | *args, 497 | kill_on_cancel=kill_on_cancel, 498 | cancellable=cancellable, 499 | limiter=limiter, 500 | ) 501 | 502 | 503 | if sys.platform == "win32": 504 | del DEFAULT_CONTEXT 505 | -------------------------------------------------------------------------------- /trio_parallel/_posix_pipes.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import os 3 | import sys 4 | import struct 5 | from typing import TYPE_CHECKING 6 | 7 | import trio 8 | 9 | assert not sys.platform == "win32" or not TYPE_CHECKING 10 | 11 | DEFAULT_RECEIVE_SIZE = 65536 12 | 13 | # Vendored from trio in v0.25.0 under identical MIT/Apache2 license. 14 | # Copyright Contributors to the Trio project. 15 | # It's modified so it doesn't need internals or abcs with methods we don't use. 16 | 17 | 18 | class FdStream: 19 | """ 20 | Represents a stream given the file descriptor to a pipe, TTY, etc. 21 | """ 22 | 23 | def __init__(self, fd: int) -> None: 24 | self._fd = fd 25 | os.set_blocking(fd, False) 26 | 27 | async def send_all(self, data: bytes) -> None: 28 | await trio.lowlevel.checkpoint() 29 | length = len(data) 30 | # adapted from the SocketStream code 31 | with memoryview(data) as view: 32 | sent = 0 33 | while sent < length: 34 | with view[sent:] as remaining: 35 | try: 36 | sent += os.write(self._fd, remaining) 37 | except BlockingIOError: 38 | await trio.lowlevel.wait_writable(self._fd) 39 | except OSError as e: 40 | if e.errno == errno.EBADF: # pragma: no cover, never closed 41 | raise trio.ClosedResourceError( 42 | "file was already closed" 43 | ) from None 44 | else: 45 | raise trio.BrokenResourceError from e 46 | 47 | async def receive_some(self, max_bytes: int) -> bytes: 48 | await trio.lowlevel.checkpoint() 49 | while True: 50 | try: 51 | data = os.read(self._fd, max_bytes) 52 | except BlockingIOError: 53 | await trio.lowlevel.wait_readable(self._fd) 54 | except OSError as e: # pragma: no cover, never closed, impossible error 55 | if e.errno == errno.EBADF: 56 | raise trio.ClosedResourceError("file was already closed") from None 57 | else: 58 | raise trio.BrokenResourceError from e 59 | else: 60 | break 61 | 62 | return data 63 | 64 | 65 | # We copy the wire protocol code from multiprocessing.connection.Connection 66 | # but asyncifiy it with FdStream so as a derivative work this notice is required: 67 | # Copyright © Python Software Foundation; All Rights Reserved 68 | class FdChannel: 69 | """Represents a message stream over a pipe object.""" 70 | 71 | def __init__(self, send_fd): 72 | self._stream = FdStream(send_fd) 73 | 74 | async def send(self, buf: bytes) -> None: 75 | n = len(buf) 76 | if n > 0x7FFFFFFF: # pragma: no cover, can't go this big on CI 77 | pre_header = struct.pack("!i", -1) 78 | header = struct.pack("!Q", n) 79 | await self._stream.send_all(pre_header) 80 | await self._stream.send_all(header) 81 | await self._stream.send_all(buf) 82 | else: 83 | # For wire compatibility with multiprocessing Connection 3.7 and lower 84 | header = struct.pack("!i", n) 85 | if n > 16384: 86 | # The payload is large so Nagle's algorithm won't be triggered 87 | # and we'd better avoid the cost of concatenation. 88 | await self._stream.send_all(header) 89 | await self._stream.send_all(buf) 90 | else: 91 | # Issue #20540: concatenate before sending, to avoid delays due 92 | # to Nagle's algorithm on a TCP socket. 93 | # Also note we want to avoid sending a 0-length buffer separately, 94 | # to avoid "broken pipe" errors if the other end closed the pipe. 95 | await self._stream.send_all(header + buf) 96 | 97 | async def receive(self) -> bytes: 98 | buf = await self._recv_exactly(4) 99 | (size,) = struct.unpack("!i", buf) 100 | if size == -1: # pragma: no cover, can't go this big on CI 101 | buf = await self._recv_exactly(8) 102 | (size,) = struct.unpack("!Q", buf) 103 | return await self._recv_exactly(size) 104 | 105 | async def _recv_exactly(self, size): 106 | await trio.lowlevel.checkpoint_if_cancelled() 107 | result_bytes = bytearray() 108 | while size: 109 | partial_result = await self._stream.receive_some(size) 110 | num_recvd = len(partial_result) 111 | if not num_recvd: 112 | if not result_bytes: 113 | raise trio.EndOfChannel 114 | else: # pragma: no cover, edge case from mp.Pipe 115 | raise OSError("got end of file during message") 116 | result_bytes.extend(partial_result) 117 | if num_recvd > size: # pragma: no cover, edge case from mp.Pipe 118 | raise RuntimeError("Oversized response") 119 | else: 120 | size -= num_recvd 121 | await trio.lowlevel.cancel_shielded_checkpoint() 122 | return result_bytes 123 | -------------------------------------------------------------------------------- /trio_parallel/_proc.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import multiprocessing 3 | import time 4 | 5 | from itertools import count 6 | from pickle import HIGHEST_PROTOCOL 7 | from typing import Optional, Callable 8 | 9 | import trio 10 | 11 | try: 12 | from cloudpickle import dumps, loads 13 | except ImportError: 14 | from pickle import dumps, loads 15 | 16 | from outcome import Outcome, Error 17 | from . import _abc 18 | import _trio_parallel_workers as tp_workers 19 | 20 | multiprocessing.get_logger() # to register multiprocessing atexit handler 21 | 22 | if sys.platform == "win32": 23 | from trio.lowlevel import WaitForSingleObject as lowlevel_wait 24 | from ._windows_pipes import ( 25 | PipeReceiveChannel as RecvChan, 26 | PipeSendChannel as SendChan, 27 | ) 28 | 29 | 30 | else: 31 | from trio.lowlevel import wait_readable as lowlevel_wait 32 | from ._posix_pipes import FdChannel as RecvChan 33 | 34 | SendChan = RecvChan 35 | 36 | 37 | class BrokenWorkerProcessError(_abc.BrokenWorkerError): 38 | __doc__ = f"""{_abc.BrokenWorkerError.__doc__} 39 | The last argument of the exception is the underlying 40 | :class:`multiprocessing.Process` which may be inspected for e.g. exit codes. 41 | """ 42 | 43 | 44 | class SpawnProcWorker(_abc.AbstractWorker): 45 | _proc_counter = count() 46 | mp_context = multiprocessing.get_context("spawn") 47 | 48 | def __init__(self, idle_timeout, init, retire): 49 | self._child_recv_pipe, self._send_pipe = self.mp_context.Pipe(duplex=False) 50 | self._recv_pipe, self._child_send_pipe = self.mp_context.Pipe(duplex=False) 51 | self._receive_chan = RecvChan(self._recv_pipe.fileno()) 52 | self._send_chan = SendChan(self._send_pipe.fileno()) 53 | self.proc = self.mp_context.Process( 54 | target=tp_workers.worker_behavior, 55 | args=( 56 | self._child_recv_pipe, 57 | self._child_send_pipe, 58 | idle_timeout, 59 | dumps(init, protocol=HIGHEST_PROTOCOL), 60 | dumps(retire, protocol=HIGHEST_PROTOCOL), 61 | ), 62 | name=f"trio-parallel worker process {next(self._proc_counter)}", 63 | daemon=True, 64 | ) 65 | 66 | async def start(self): 67 | await trio.to_thread.run_sync(self.proc.start) 68 | # XXX: We must explicitly close these after start to see child closures 69 | self._child_send_pipe.close() 70 | self._child_recv_pipe.close() 71 | 72 | if sys.platform != "win32": 73 | return 74 | 75 | # Give a nice error on accidental recursive spawn instead of hanging 76 | async def wait_for_ack(): 77 | try: 78 | code = await self._receive_chan.receive() 79 | assert code == tp_workers.ACK 80 | except BaseException: 81 | self.kill() 82 | with trio.CancelScope(shield=True): 83 | await self.wait() # noqa: ASYNC102 84 | raise 85 | nursery.cancel_scope.cancel() 86 | 87 | exitcode = None 88 | async with trio.open_nursery() as nursery: 89 | nursery.start_soon(wait_for_ack) 90 | exitcode = await self.wait() 91 | nursery.cancel_scope.cancel() 92 | if exitcode is not None: 93 | raise BrokenWorkerProcessError("Worker failed to start", self.proc) 94 | 95 | async def run_sync(self, sync_fn: Callable, *args) -> Optional[Outcome]: 96 | try: 97 | job = dumps((sync_fn, args), protocol=HIGHEST_PROTOCOL) 98 | except BaseException as exc: # noqa: ASYNC103 99 | return Error(exc) # noqa: ASYNC104, ASYNC910 100 | 101 | try: 102 | try: 103 | await self._send_chan.send(job) 104 | except trio.BrokenResourceError: 105 | with trio.CancelScope(shield=True): 106 | await self.wait() 107 | return None 108 | 109 | try: 110 | result = loads(await self._receive_chan.receive()) 111 | except trio.EndOfChannel: 112 | self._send_pipe.close() # edge case: free proc spinning on recv_bytes 113 | with trio.CancelScope(shield=True): 114 | await self.wait() # noqa: ASYNC120 115 | raise BrokenWorkerProcessError( 116 | "Worker died unexpectedly:", self.proc 117 | ) from None 118 | except BaseException: 119 | # cancellations require kill by contract 120 | # other exceptions will almost certainly leave us in an 121 | # unrecoverable state requiring kill as well 122 | self.kill() 123 | with trio.CancelScope(shield=True): 124 | await self.wait() # noqa: ASYNC102 125 | raise 126 | 127 | if result is None: 128 | # race in worker_behavior cleanup was triggered 129 | with trio.CancelScope(shield=True): 130 | await self.wait() # noqa: ASYNC102 131 | 132 | return result 133 | 134 | def is_alive(self): 135 | # if the proc is alive, there is a race condition where it could be 136 | # dying. This call reaps zombie children on Unix. 137 | return self.proc.is_alive() 138 | 139 | def shutdown(self): 140 | self._send_pipe.close() 141 | 142 | def kill(self): 143 | self.proc.kill() 144 | 145 | async def wait(self): 146 | if self.proc.exitcode is not None: 147 | await trio.lowlevel.cancel_shielded_checkpoint() 148 | return self.proc.exitcode 149 | if self.proc.pid is None: 150 | await trio.lowlevel.cancel_shielded_checkpoint() 151 | return None # waiting before started 152 | await lowlevel_wait(self.proc.sentinel) 153 | # fix a macos race: Trio GH#1296 154 | self.proc.join() 155 | # unfortunately join does not return exitcode 156 | return self.proc.exitcode 157 | 158 | 159 | class WorkerProcCache(_abc.WorkerCache[SpawnProcWorker]): 160 | def prune(self): 161 | # remove procs that have died from the idle timeout 162 | while True: 163 | try: 164 | worker = self.popleft() 165 | except IndexError: 166 | return 167 | if worker.is_alive(): 168 | self.appendleft(worker) 169 | return 170 | 171 | def shutdown(self, timeout): 172 | unclean = [] 173 | killed = [] 174 | for worker in self: 175 | worker.shutdown() 176 | deadline = time.perf_counter() + timeout 177 | for worker in self: 178 | timeout = deadline - time.perf_counter() 179 | while timeout > tp_workers.MAX_TIMEOUT: 180 | worker.proc.join(tp_workers.MAX_TIMEOUT) 181 | if worker.proc.exitcode is not None: 182 | break 183 | timeout = deadline - time.perf_counter() 184 | else: 185 | # guard rare race on macos if exactly == 0.0 186 | worker.proc.join(timeout or -0.1) 187 | if worker.proc.exitcode is None: 188 | worker.kill() 189 | killed.append(worker.proc) 190 | elif worker.proc.exitcode: 191 | unclean.append(worker.proc) 192 | if unclean or killed: 193 | for proc in killed: 194 | proc.join() 195 | raise BrokenWorkerProcessError( 196 | f"Graceful shutdown failed: {len(unclean)} nonzero exit codes " 197 | f"and {len(killed)} forceful terminations.", 198 | *unclean, 199 | *killed, 200 | ) 201 | 202 | 203 | WORKER_PROC_MAP = {"spawn": (SpawnProcWorker, WorkerProcCache)} 204 | 205 | _all_start_methods = set(multiprocessing.get_all_start_methods()) 206 | 207 | if "forkserver" in _all_start_methods: # pragma: no branch 208 | 209 | class ForkserverProcWorker(SpawnProcWorker): 210 | mp_context = multiprocessing.get_context("forkserver") 211 | 212 | WORKER_PROC_MAP["forkserver"] = ForkserverProcWorker, WorkerProcCache 213 | 214 | if "fork" in _all_start_methods: # pragma: no branch 215 | 216 | class ForkProcWorker(SpawnProcWorker): 217 | mp_context = multiprocessing.get_context("fork") 218 | 219 | def __init__(self, idle_timeout, init, retire): 220 | super().__init__(idle_timeout, None, None) 221 | self._idle_timeout = idle_timeout 222 | self._init = init 223 | self._retire = retire 224 | self.proc.run = self._run 225 | 226 | def _run(self): 227 | self._send_pipe.close() 228 | self._recv_pipe.close() 229 | tp_workers.worker_behavior( 230 | self._child_recv_pipe, 231 | self._child_send_pipe, 232 | self._idle_timeout, 233 | self._init, 234 | self._retire, 235 | ) 236 | 237 | async def start(self): 238 | await trio.lowlevel.checkpoint_if_cancelled() 239 | # on fork, doing start() in a thread is racy, and should be 240 | # fast enough to be considered non-blocking anyway 241 | self.proc.start() 242 | # XXX: We must explicitly close these after start to see child closures 243 | self._child_send_pipe.close() 244 | self._child_recv_pipe.close() 245 | # These are possibly large and deallocation would be desireable 246 | del self._init 247 | del self._retire 248 | # Breaks a reference cycle 249 | del self.proc.run 250 | 251 | WORKER_PROC_MAP["fork"] = ForkProcWorker, WorkerProcCache 252 | 253 | del _all_start_methods 254 | -------------------------------------------------------------------------------- /trio_parallel/_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/richardsheridan/trio-parallel/58aed5f51b04331c8fcc987f76fad33f82fcea8a/trio_parallel/_tests/__init__.py -------------------------------------------------------------------------------- /trio_parallel/_tests/conftest.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import pytest 3 | from pytest_trio.enable_trio_mode import * 4 | 5 | 6 | @pytest.fixture(scope="package") 7 | def manager(): 8 | with multiprocessing.get_context("spawn").Manager() as mgr: 9 | yield mgr 10 | -------------------------------------------------------------------------------- /trio_parallel/_tests/test_cache.py: -------------------------------------------------------------------------------- 1 | """ Tests of internal cache API ("contract" tests)""" 2 | 3 | import math 4 | 5 | import pytest 6 | import trio 7 | 8 | from _trio_parallel_workers._funcs import ( 9 | _init_run_twice, 10 | _retire_run_twice, 11 | _bad_retire_fn, 12 | _delayed_bad_retire_fn, 13 | _loopy_retire_fn, 14 | _monkeypatch_max_timeout, 15 | ) 16 | from .._impl import WORKER_MAP 17 | from .._abc import BrokenWorkerError 18 | 19 | 20 | @pytest.fixture(params=list(WORKER_MAP.values()), ids=list(WORKER_MAP.keys())) 21 | def cache_and_workertype(request): 22 | worker_type, cache_type = request.param 23 | cache = cache_type() 24 | try: 25 | yield cache, worker_type 26 | finally: 27 | cache.shutdown(10) # internal assertion of clean shutdown 28 | 29 | 30 | async def test_prune_cache(cache_and_workertype): 31 | # setup phase 32 | cache, worker_type = cache_and_workertype 33 | dead_worker = worker_type(0.3, bool, bool) 34 | await dead_worker.start() 35 | assert (await dead_worker.run_sync(_monkeypatch_max_timeout)).unwrap() is True 36 | with trio.fail_after(4): 37 | assert await dead_worker.wait() is not None 38 | live_worker = worker_type(math.inf, bool, bool) 39 | await live_worker.start() 40 | assert (await live_worker.run_sync(bool)).unwrap() is False 41 | # put dead worker into the cache on the left 42 | cache.extend(iter([dead_worker, live_worker])) 43 | cache.prune() 44 | assert live_worker in cache 45 | assert dead_worker not in cache 46 | 47 | 48 | async def test_retire(cache_and_workertype): 49 | cache, worker_type = cache_and_workertype 50 | worker = worker_type(math.inf, _init_run_twice, _retire_run_twice) 51 | await worker.start() 52 | with trio.fail_after(4): 53 | assert await worker.run_sync(bool) is not None 54 | assert await worker.run_sync(bool) is not None 55 | assert await worker.run_sync(bool) is None 56 | assert await worker.wait() == 0 57 | 58 | 59 | async def test_bad_retire_fn(cache_and_workertype, capfd): 60 | cache, worker_type = cache_and_workertype 61 | if worker_type.mp_context._name == "forkserver": 62 | pytest.skip("capfd doesn't work on ForkserverProcWorker") 63 | worker = worker_type(math.inf, bool, _bad_retire_fn) 64 | await worker.start() 65 | await worker.run_sync(bool) 66 | with pytest.raises(BrokenWorkerError): 67 | await worker.run_sync(bool) 68 | with trio.fail_after(1): 69 | assert await worker.wait() == 1 70 | out, err = capfd.readouterr() 71 | assert "trio-parallel worker process" in err 72 | assert "AssertionError" in err 73 | 74 | 75 | async def test_delayed_bad_retire_fn(cache_and_workertype, capfd): 76 | cache, worker_type = cache_and_workertype 77 | if worker_type.mp_context._name == "forkserver": 78 | pytest.skip("capfd doesn't work on ForkserverProcWorker") 79 | worker = worker_type(math.inf, _init_run_twice, _delayed_bad_retire_fn) 80 | await worker.start() 81 | await worker.run_sync(bool) 82 | await worker.run_sync(bool) 83 | with pytest.raises(BrokenWorkerError): 84 | await worker.run_sync(bool) 85 | with trio.fail_after(1): 86 | assert await worker.wait() == 1 87 | 88 | cache.append(worker) 89 | with pytest.raises(BrokenWorkerError): 90 | cache.shutdown(0.5) 91 | cache.clear() 92 | out, err = capfd.readouterr() 93 | assert "trio-parallel worker process" in err 94 | assert "AssertionError" in err 95 | 96 | 97 | async def test_loopy_retire_fn(cache_and_workertype, monkeypatch): 98 | cache, worker_type = cache_and_workertype 99 | worker = worker_type(math.inf, _init_run_twice, _loopy_retire_fn) 100 | await worker.start() 101 | await worker.run_sync(bool) 102 | await worker.run_sync(bool) 103 | 104 | # increase coverage in cache.shutdown 105 | import _trio_parallel_workers 106 | 107 | monkeypatch.setattr(_trio_parallel_workers, "MAX_TIMEOUT", 0.1) 108 | cache.append(worker) 109 | with pytest.raises(BrokenWorkerError): 110 | cache.shutdown(0.5) 111 | cache.clear() 112 | 113 | 114 | async def test_shutdown(cache_and_workertype): 115 | cache, worker_type = cache_and_workertype 116 | # test that shutdown actually works 117 | worker = worker_type(math.inf, bool, bool) 118 | await worker.start() 119 | await worker.run_sync(bool) 120 | cache.append(worker) 121 | cache.shutdown(1) 122 | worker = cache.pop() 123 | with trio.fail_after(1): 124 | assert await worker.wait() is not None 125 | # test that math.inf is a valid input 126 | # contained in same test with above because we want to first 127 | # assert that shutdown works at all! 128 | worker = worker_type(math.inf, bool, bool) 129 | await worker.start() 130 | await worker.run_sync(bool) 131 | cache.append(worker) 132 | cache.shutdown(math.inf) 133 | cache.clear() 134 | 135 | 136 | async def test_shutdown_immediately(cache_and_workertype): 137 | cache, worker_type = cache_and_workertype 138 | worker = worker_type(math.inf, bool, bool) 139 | await worker.start() 140 | await worker.run_sync(bool) 141 | cache.append(worker) 142 | with pytest.raises(BrokenWorkerError): 143 | cache.shutdown(0) 144 | cache.clear() 145 | -------------------------------------------------------------------------------- /trio_parallel/_tests/test_defaults.py: -------------------------------------------------------------------------------- 1 | """End-to-end integrated tests of default cache""" 2 | 3 | import inspect 4 | import os 5 | import subprocess 6 | import sys 7 | import warnings 8 | 9 | import pytest 10 | import trio 11 | 12 | from .. import _impl 13 | from _trio_parallel_workers._funcs import _block_worker, _raise_pid 14 | from .._impl import ( 15 | get_default_context, 16 | run_sync, 17 | open_worker_context, 18 | default_context_statistics, 19 | configure_default_context, 20 | ) 21 | 22 | if sys.platform == "win32": 23 | 24 | @pytest.fixture 25 | def shutdown_cache(): 26 | configure_default_context() 27 | 28 | else: 29 | 30 | @pytest.fixture 31 | def shutdown_cache(): 32 | yield 33 | _impl.DEFAULT_CONTEXT._worker_cache.shutdown(50) 34 | configure_default_context() 35 | 36 | 37 | async def test_run_sync(shutdown_cache): 38 | trio_pid = os.getpid() 39 | limiter = trio.CapacityLimiter(1) 40 | 41 | child_pid = await run_sync(os.getpid, limiter=limiter) 42 | assert child_pid != trio_pid 43 | 44 | with pytest.raises(ValueError) as excinfo: 45 | await run_sync(_raise_pid, limiter=limiter) 46 | 47 | assert excinfo.value.args[0] != trio_pid 48 | 49 | 50 | async def test_entry_cancellation(manager, shutdown_cache): 51 | async def child(kill_on_cancel): 52 | nonlocal child_start, child_done 53 | child_start = True 54 | try: 55 | return await run_sync( 56 | _block_worker, 57 | block, 58 | worker_start, 59 | worker_done, 60 | kill_on_cancel=kill_on_cancel, 61 | ) 62 | finally: 63 | child_done = True 64 | 65 | block, worker_start, worker_done = manager.Event(), manager.Event(), manager.Event() 66 | child_start = False 67 | child_done = False 68 | 69 | # If we cancel *before* it enters, the entry is itself a cancellation 70 | # point 71 | with trio.CancelScope() as scope: 72 | scope.cancel() 73 | await child(False) 74 | assert scope.cancelled_caught 75 | assert child_start 76 | assert child_done 77 | assert not worker_start.is_set() 78 | assert not worker_done.is_set() 79 | 80 | 81 | async def test_kill_cancellation(manager, shutdown_cache): 82 | async def child(kill_on_cancel): 83 | nonlocal child_start, child_done 84 | child_start = True 85 | try: 86 | return await run_sync( 87 | _block_worker, 88 | block, 89 | worker_start, 90 | worker_done, 91 | kill_on_cancel=kill_on_cancel, 92 | ) 93 | finally: 94 | child_done = True 95 | 96 | block, worker_start, worker_done = manager.Event(), manager.Event(), manager.Event() 97 | child_start = False 98 | child_done = False 99 | # prime worker cache so fail timeout doesn't have to be so long 100 | await run_sync(bool) 101 | # This is truly cancellable by killing the worker 102 | async with trio.open_nursery() as nursery: 103 | nursery.start_soon(child, True) 104 | # Give it a chance to get started. (This is important because 105 | # to_thread_run_sync does a checkpoint_if_cancelled before 106 | # blocking on the thread, and we don't want to trigger this.) 107 | await trio.testing.wait_all_tasks_blocked(0.01) 108 | assert child_start 109 | with trio.fail_after(1): 110 | await trio.to_thread.run_sync(worker_start.wait, abandon_on_cancel=True) 111 | # Then cancel it. 112 | nursery.cancel_scope.cancel() 113 | # The task exited, but the worker died 114 | assert not block.is_set() 115 | assert not worker_done.is_set() 116 | assert child_done 117 | 118 | 119 | async def test_uncancellable_cancellation(manager, shutdown_cache): 120 | async def child(kill_on_cancel): 121 | nonlocal child_start, child_done 122 | child_start = True 123 | await run_sync( 124 | _block_worker, 125 | block, 126 | worker_start, 127 | worker_done, 128 | kill_on_cancel=kill_on_cancel, 129 | ) 130 | child_done = True 131 | 132 | block, worker_start, worker_done = manager.Event(), manager.Event(), manager.Event() 133 | child_start = False 134 | child_done = False 135 | # This one can't be cancelled 136 | async with trio.open_nursery() as nursery: 137 | nursery.start_soon(child, False) 138 | await trio.to_thread.run_sync(worker_start.wait, abandon_on_cancel=True) 139 | assert child_start 140 | nursery.cancel_scope.cancel() 141 | with trio.CancelScope(shield=True): 142 | await trio.testing.wait_all_tasks_blocked(0.01) 143 | # It's still running 144 | assert not worker_done.is_set() 145 | block.set() 146 | # Now it exits 147 | assert child_done 148 | assert worker_done.is_set() 149 | 150 | 151 | async def test_aclose(): 152 | async with open_worker_context() as ctx: 153 | await ctx.run_sync(bool) 154 | with pytest.raises(trio.ClosedResourceError): 155 | await ctx.run_sync(bool) 156 | 157 | 158 | async def test_context_waits(manager): 159 | # TODO: convert this to a collaboration test 160 | finished = False 161 | block = manager.Event() 162 | start = manager.Event() 163 | done = manager.Event() 164 | 165 | async def child(): 166 | nonlocal finished 167 | try: 168 | await ctx.run_sync(_block_worker, block, start, done) 169 | finally: 170 | finished = True 171 | 172 | async with trio.open_nursery() as nursery: 173 | async with open_worker_context() as ctx: 174 | nursery.start_soon(child) 175 | nursery.start_soon(child) 176 | await trio.to_thread.run_sync(start.wait, abandon_on_cancel=True) 177 | block.set() 178 | assert finished 179 | 180 | 181 | def _atexit_shutdown(): # pragma: no cover, source code extracted 182 | # run in a subprocess, no access to globals 183 | import trio 184 | 185 | # note the order here: if trio_parallel is imported after multiprocessing, 186 | # specifically after invoking the logger, a more naive installation of the atexit 187 | # handler could be done and still pass the test 188 | import trio_parallel 189 | import multiprocessing # noqa: F811 190 | 191 | # we inspect the logger output in stderr to validate the test 192 | multiprocessing.log_to_stderr(10) 193 | trio.run(trio_parallel.run_sync, bool) 194 | 195 | 196 | def test_we_control_atexit_shutdowns(): 197 | # multiprocessing will either terminate or workers or lock up during its atexit. 198 | # Our graceful shutdown code allows atexit handlers *in the workers* to run as 199 | # well as avoiding being joined by the multiprocessing code. We test the latter. 200 | test_code = f"""{inspect.getsource(_atexit_shutdown)}\nif __name__ == '__main__': 201 | {_atexit_shutdown.__name__}()""" 202 | result = subprocess.run( 203 | [sys.executable, "-c", test_code], 204 | stderr=subprocess.PIPE, 205 | check=True, 206 | timeout=20, 207 | ) 208 | assert b"[INFO/MainProcess] process shutting down" in result.stderr 209 | assert b"calling join() for" not in result.stderr 210 | 211 | 212 | def test_startup_failure_doesnt_hang(tmp_path): 213 | # Failing to guard startup against worker spawn recursion is the only failure 214 | # case of startup that I have run into. 215 | script_path = tmp_path / "script.py" 216 | with script_path.open("w") as f: 217 | f.write( 218 | "import trio,trio_parallel; trio.run(trio_parallel.run_sync, int)\n", 219 | ) 220 | result = subprocess.run( 221 | [sys.executable, script_path], 222 | stdout=subprocess.PIPE, 223 | stderr=subprocess.PIPE, 224 | check=False, # we expect a failure 225 | timeout=20, 226 | ) 227 | assert not result.stdout 228 | assert b"An attempt has been made to start a new process" in result.stderr 229 | assert b"ExceptionGroup" not in result.stderr 230 | assert b"MultiError" not in result.stderr 231 | assert result.returncode 232 | 233 | 234 | async def _compare_pids(): 235 | first = await run_sync(os.getpid) 236 | second = await run_sync(os.getpid) 237 | return first == second 238 | 239 | 240 | async def test_configure_default_context(shutdown_cache): 241 | configure_default_context(retire=object) 242 | assert not await _compare_pids() 243 | 244 | 245 | async def test_configure_default_context_warns(shutdown_cache): 246 | try: 247 | configure_default_context(idle_timeout=float("inf")) 248 | assert await _compare_pids() 249 | finally: 250 | with pytest.warns(UserWarning): 251 | warnings.simplefilter("always") 252 | configure_default_context() 253 | 254 | 255 | async def test_configure_default_context_thread(shutdown_cache): 256 | with pytest.raises(RuntimeError, match="thread"): 257 | await trio.to_thread.run_sync(configure_default_context) 258 | 259 | 260 | async def test_get_default_context_stats(): # noqa: ASYNC910 261 | s = default_context_statistics() 262 | assert hasattr(s, "idle_workers") 263 | assert hasattr(s, "running_workers") 264 | assert s == get_default_context().statistics() 265 | 266 | 267 | @pytest.mark.xfail( 268 | sys.platform == "win32", 269 | reason="Default cache is not global on Windows", 270 | raises=AssertionError, 271 | ) 272 | def test_sequential_runs(shutdown_cache): 273 | async def run_with_timeout(): 274 | with trio.fail_after(20): 275 | return await run_sync(os.getpid, kill_on_cancel=True) 276 | 277 | same_pid = trio.run(run_with_timeout) == trio.run(run_with_timeout) 278 | assert same_pid 279 | 280 | 281 | async def test_concurrent_runs(shutdown_cache): 282 | async def worker(i): 283 | with trio.fail_after(20): 284 | assert await run_sync(int, i, kill_on_cancel=True) == i 285 | for _ in range(30): 286 | assert await run_sync(int, i, kill_on_cancel=True) == i 287 | with trio.move_on_after(0.5): 288 | while True: 289 | assert await run_sync(int, i, kill_on_cancel=True) == i 290 | 291 | async with trio.open_nursery() as n: 292 | for i in range(2): 293 | n.start_soon(trio.to_thread.run_sync, trio.run, worker, i) 294 | 295 | 296 | def test_nice_names(): 297 | import trio_parallel 298 | 299 | for objname, obj in trio_parallel.__dict__.items(): 300 | if not objname.startswith("_"): # ignore private attributes 301 | assert "._" not in obj.__module__ 302 | -------------------------------------------------------------------------------- /trio_parallel/_tests/test_impl.py: -------------------------------------------------------------------------------- 1 | """ Tests of public API with mocked-out workers ("collaboration" tests)""" 2 | 3 | import os 4 | import sys 5 | from typing import Callable, Optional 6 | 7 | import pytest 8 | import trio 9 | from outcome import Outcome, capture 10 | 11 | from .. import _impl 12 | from .._abc import AbstractWorker, WorkerCache 13 | from .._impl import ( 14 | run_sync, 15 | ) 16 | 17 | 18 | def _special_none_making_retire(): # pragma: no cover, never called 19 | pass 20 | 21 | 22 | class MockWorker(AbstractWorker): 23 | def __init__(self, idle_timeout, init, retire): 24 | self.idle_timeout = idle_timeout 25 | self.init = init 26 | self.retire = retire 27 | 28 | async def start(self): 29 | await trio.lowlevel.checkpoint() 30 | 31 | async def run_sync(self, sync_fn: Callable, *args) -> Optional[Outcome]: 32 | await trio.lowlevel.checkpoint() 33 | if self.retire is not _special_none_making_retire: 34 | return capture( 35 | lambda *a: (sync_fn, args, trio.current_effective_deadline()) 36 | ) 37 | 38 | def shutdown(self): 39 | self.retire = _special_none_making_retire 40 | 41 | async def wait(self): # pragma: no cover, only here to satisfy ABC 42 | pass 43 | 44 | 45 | class MockCache(WorkerCache): 46 | pruned_count = 0 47 | shutdown_count = 0 48 | 49 | def prune(self): 50 | assert trio.lowlevel.currently_ki_protected() 51 | self.pruned_count += 1 52 | while self: 53 | worker = self.popleft() 54 | if worker.retire is not _special_none_making_retire: 55 | self.appendleft(worker) 56 | return 57 | 58 | def shutdown(self, grace_period): 59 | for worker in self: 60 | worker.shutdown() 61 | self.shutdown_count += 1 62 | 63 | 64 | class MockContext(_impl.WorkerContext): 65 | def __attrs_post_init__(self): 66 | super().__attrs_post_init__() 67 | self.__dict__["_worker_class"] = MockWorker 68 | self.__dict__["_worker_cache"] = MockCache() 69 | 70 | async def _aclose(self): 71 | assert trio.lowlevel.currently_ki_protected() 72 | await super()._aclose() 73 | 74 | 75 | @pytest.fixture 76 | async def mock_context(monkeypatch): 77 | monkeypatch.setattr(_impl, "WorkerContext", MockContext) 78 | ctx = MockContext._create() 79 | if sys.platform == "win32": 80 | token = _impl.DEFAULT_CONTEXT_RUNVAR.set(ctx) 81 | yield ctx 82 | _impl.DEFAULT_CONTEXT_RUNVAR.reset(token) 83 | else: 84 | monkeypatch.setattr(_impl, "DEFAULT_CONTEXT", ctx) 85 | yield ctx 86 | 87 | 88 | async def test_context_methods(mock_context): 89 | await run_sync(bool) 90 | await run_sync(bool) 91 | assert mock_context._worker_cache.pruned_count == 2 92 | assert mock_context._worker_cache.shutdown_count == 0 93 | await run_sync(bool) 94 | with trio.CancelScope() as cs: 95 | cs.cancel() 96 | await run_sync(bool) 97 | assert cs.cancelled_caught 98 | assert mock_context._worker_cache.pruned_count == 3 99 | assert mock_context._worker_cache.shutdown_count == 0 100 | 101 | 102 | async def test_context_methods2(mock_context): 103 | async with _impl.open_worker_context() as ctx: 104 | s = ctx.statistics() 105 | assert s.idle_workers == 0 106 | assert s.running_workers == 0 107 | await ctx.run_sync(bool) 108 | s = ctx.statistics() 109 | assert s.idle_workers == 1 110 | assert s.running_workers == 0 111 | assert ctx._worker_cache.pruned_count == 3 112 | assert ctx._worker_cache.shutdown_count == 1 113 | s = ctx.statistics() 114 | assert s.idle_workers == 0 115 | assert s.running_workers == 0 116 | assert ctx._worker_cache.pruned_count == 4 117 | 118 | 119 | async def test_cancellable(mock_context): 120 | deadline = trio.current_time() + 3 121 | with trio.CancelScope(deadline=deadline): 122 | _, _, obsvd_deadline = await run_sync(bool) 123 | assert obsvd_deadline == float("inf") 124 | _, _, obsvd_deadline = await run_sync(bool, cancellable=True) 125 | assert obsvd_deadline == deadline 126 | _, _, obsvd_deadline = await run_sync(bool, kill_on_cancel=True) 127 | assert obsvd_deadline == deadline 128 | 129 | 130 | async def test_cache_scope_args(mock_context): 131 | async with _impl.open_worker_context( 132 | init=float, retire=int, idle_timeout=33 133 | ) as ctx: 134 | await ctx.run_sync(bool) 135 | worker = ctx._worker_cache.pop() 136 | assert not ctx._worker_cache 137 | assert worker.init is float 138 | assert worker.retire is int 139 | assert worker.idle_timeout == 33 140 | 141 | 142 | def _idfn(val): 143 | k = next(iter(val)) 144 | v = val[k] 145 | return f"{k}-{v}" 146 | 147 | 148 | @pytest.mark.parametrize( 149 | "kwargs", 150 | [ 151 | dict(idle_timeout=[-1]), 152 | dict(init=0), 153 | dict(retire=None), 154 | dict(grace_period=None), 155 | ], 156 | ids=_idfn, 157 | ) 158 | async def test_erroneous_scope_types(kwargs): 159 | with pytest.raises(TypeError): 160 | async with _impl.open_worker_context(**kwargs): 161 | pytest.fail("should be unreachable") 162 | 163 | 164 | @pytest.mark.parametrize( 165 | "kwargs", 166 | [ 167 | dict(worker_type="wrong"), 168 | dict(grace_period=-1), 169 | dict(idle_timeout=-1), 170 | ], 171 | ids=_idfn, 172 | ) 173 | async def test_erroneous_scope_values(kwargs): 174 | with pytest.raises(ValueError): 175 | async with _impl.open_worker_context(**kwargs): 176 | pytest.fail("should be unreachable") 177 | 178 | 179 | async def test_worker_returning_none_can_be_cancelled(): 180 | with trio.move_on_after(0.1) as cs: 181 | ctx = MockContext._create(retire=_special_none_making_retire) 182 | assert await ctx.run_sync(int) 183 | assert cs.cancelled_caught 184 | 185 | 186 | def test_cannot_instantiate_WorkerContext(): 187 | with pytest.raises(TypeError): 188 | _impl.WorkerContext() 189 | 190 | 191 | async def _assert_worker_pid(pid, matches): 192 | comparison = pid == await run_sync(os.getpid) 193 | assert comparison == matches 194 | 195 | 196 | async def test_cache_scope_overrides_run_sync(): 197 | pid = await run_sync(os.getpid) 198 | 199 | async with _impl.cache_scope(): 200 | await _assert_worker_pid(pid, False) 201 | 202 | 203 | async def test_cache_scope_overrides_nursery_task(): 204 | pid = await run_sync(os.getpid) 205 | 206 | async def check_both_sides_of_task_status_started(pid, task_status): 207 | await _assert_worker_pid(pid, True) 208 | task_status.started() 209 | await _assert_worker_pid(pid, True) 210 | 211 | async with trio.open_nursery() as nursery: 212 | async with _impl.cache_scope(): 213 | nursery.start_soon(_assert_worker_pid, pid, True) 214 | 215 | async with trio.open_nursery() as nursery: 216 | async with _impl.cache_scope(): 217 | await nursery.start(check_both_sides_of_task_status_started, pid) 218 | 219 | 220 | async def test_cache_scope_follows_task_tree_discipline(): 221 | shared_nursery: Optional[trio.Nursery] = None 222 | 223 | async def make_a_cache_scope_around_nursery(task_status): 224 | nonlocal shared_nursery 225 | async with _impl.cache_scope(), trio.open_nursery() as shared_nursery: 226 | await _assert_worker_pid(pid, False) 227 | task_status.started() 228 | await e.wait() 229 | await _assert_worker_pid(pid, True) 230 | 231 | async def assert_elsewhere_in_task_tree(): 232 | await _assert_worker_pid(pid, False) 233 | e.set() 234 | 235 | pid = await run_sync(os.getpid) 236 | e = trio.Event() 237 | async with trio.open_nursery() as nursery: 238 | await nursery.start(make_a_cache_scope_around_nursery) 239 | # this line tests the main difference from contextvars vs treevars 240 | shared_nursery.start_soon(assert_elsewhere_in_task_tree) 241 | 242 | 243 | async def test_cache_scope_overrides_nested(): 244 | pid1 = await run_sync(os.getpid) 245 | async with _impl.cache_scope(): 246 | pid2 = await run_sync(os.getpid) 247 | async with _impl.cache_scope(): 248 | await _assert_worker_pid(pid1, False) 249 | await _assert_worker_pid(pid2, False) 250 | 251 | 252 | async def test_cache_scope_doesnt_override_explicit_context(): 253 | async with _impl.open_worker_context() as ctx: 254 | pid = await ctx.run_sync(os.getpid) 255 | async with _impl.cache_scope(): 256 | assert pid == await ctx.run_sync(os.getpid) 257 | -------------------------------------------------------------------------------- /trio_parallel/_tests/test_proc.py: -------------------------------------------------------------------------------- 1 | """ Tests of internal worker process API ("contract" tests) 2 | 3 | These are specific to subprocesses and you wouldn't expect these to pass 4 | with thread or subinterpreter workers. 5 | """ 6 | 7 | import math 8 | import os 9 | import signal 10 | 11 | import trio 12 | import pytest 13 | 14 | from _trio_parallel_workers._funcs import ( 15 | _lambda, 16 | _return_lambda, 17 | _never_halts, 18 | _no_trio, 19 | ) 20 | from .._proc import WORKER_PROC_MAP 21 | from .._abc import BrokenWorkerError 22 | 23 | 24 | @pytest.fixture(params=list(WORKER_PROC_MAP.values()), ids=list(WORKER_PROC_MAP.keys())) 25 | async def worker(request): 26 | worker = request.param[0](math.inf, bool, bool) 27 | await worker.start() 28 | yield worker 29 | with trio.move_on_after(10) as cs: 30 | worker.shutdown() 31 | await worker.wait() 32 | if cs.cancelled_caught: 33 | with trio.fail_after(1) as cs: # pragma: no cover, leads to failure case 34 | worker.kill() 35 | await worker.wait() 36 | pytest.fail( 37 | "tests should be responsible for killing and waiting if " 38 | "they do not lead to a graceful shutdown state" 39 | ) 40 | 41 | 42 | async def test_run_sync_cancel_infinite_loop(worker, manager): 43 | ev = manager.Event() 44 | 45 | async with trio.open_nursery() as nursery: 46 | nursery.start_soon(worker.run_sync, _never_halts, ev) 47 | await trio.to_thread.run_sync(ev.wait, abandon_on_cancel=True) 48 | nursery.cancel_scope.cancel() 49 | with trio.fail_after(1): 50 | assert await worker.wait() in (-15, -9) 51 | 52 | 53 | async def test_run_sync_raises_on_kill(worker, manager): 54 | ev = manager.Event() 55 | 56 | async def killer(): 57 | try: 58 | await trio.to_thread.run_sync(ev.wait, abandon_on_cancel=True) 59 | finally: 60 | worker.kill() # also tests multiple calls to worker.kill 61 | 62 | await worker.run_sync(int) # running start so actual test is less racy 63 | with trio.fail_after(5): 64 | async with trio.open_nursery() as nursery: 65 | nursery.start_soon(killer) 66 | with pytest.raises(BrokenWorkerError) as exc_info: 67 | await worker.run_sync(_never_halts, ev) 68 | exitcode = await worker.wait() 69 | assert exitcode in (-15, -9) 70 | assert exc_info.value.args[-1].exitcode == exitcode 71 | 72 | 73 | async def test_run_sync_raises_on_sudden_death(worker, capfd): 74 | expected_code = 42 75 | with pytest.raises(BrokenWorkerError) as excinfo: 76 | with trio.fail_after(20): 77 | assert (await worker.run_sync(os._exit, expected_code)).unwrap() 78 | exitcode = await worker.wait() 79 | assert exitcode == expected_code 80 | assert excinfo.value.args[-1].exitcode == expected_code 81 | 82 | 83 | # to test that cancellation does not ever leave a living process behind 84 | # currently requires manually targeting all but last checkpoints 85 | 86 | 87 | async def test_exhaustively_cancel_run_sync(worker, manager): 88 | # cancel at job send if we reuse the worker 89 | ev = manager.Event() 90 | await worker.run_sync(int) 91 | with trio.fail_after(1): 92 | with trio.move_on_after(0): 93 | await worker.run_sync(_never_halts, ev) 94 | assert await worker.wait() in (-15, -9) 95 | 96 | # cancel at result recv is tested elsewhere 97 | 98 | 99 | async def test_ki_does_not_propagate(worker): 100 | (await worker.run_sync(signal.raise_signal, signal.SIGINT)).unwrap() 101 | 102 | 103 | @pytest.mark.parametrize("job", [_lambda, _return_lambda]) 104 | async def test_unpickleable(job, worker): 105 | from pickle import PicklingError 106 | 107 | with pytest.raises(PicklingError): 108 | (await worker.run_sync(job)).unwrap() 109 | 110 | 111 | async def test_no_trio_in_subproc(worker): 112 | if worker.mp_context._name == "fork": 113 | pytest.skip("Doesn't matter on ForkProcWorker") 114 | assert (await worker.run_sync(_no_trio)).unwrap() 115 | -------------------------------------------------------------------------------- /trio_parallel/_tests/test_worker.py: -------------------------------------------------------------------------------- 1 | """ Tests of internal worker API ("contract" tests) 2 | 3 | All workers should pass these tests, regardless of implementation 4 | """ 5 | 6 | import math 7 | 8 | import pytest 9 | import trio 10 | 11 | from _trio_parallel_workers._funcs import _null_async_fn, _chained_exc, SpecialError 12 | from .._impl import WORKER_MAP 13 | 14 | 15 | @pytest.fixture(params=list(WORKER_MAP.values()), ids=list(WORKER_MAP.keys())) 16 | async def worker(request): 17 | worker = request.param[0](math.inf, bool, bool) 18 | yield worker 19 | with trio.move_on_after(5) as cs: 20 | worker.shutdown() 21 | await worker.wait() 22 | if cs.cancelled_caught: # pragma: no cover, leads to failure case 23 | pytest.fail( 24 | "tests should be responsible for killing and waiting if they do not " 25 | "lead to a graceful shutdown state" 26 | ) 27 | 28 | 29 | async def test_cancel_start(worker): 30 | # cancel at startup 31 | with trio.fail_after(1): 32 | with trio.move_on_after(0) as cs: 33 | await worker.start() 34 | assert cs.cancelled_caught 35 | assert await worker.wait() is None 36 | 37 | 38 | async def test_run_sync(worker): 39 | await worker.start() 40 | assert (await worker.run_sync(bool)).unwrap() is False 41 | 42 | 43 | async def test_run_sync_large_job(worker): 44 | await worker.start() 45 | n = 2**20 46 | x = (await worker.run_sync(bytes, bytearray(n))).unwrap() 47 | assert len(x) == n 48 | 49 | 50 | async def test_run_sync_coroutine_error(worker): 51 | await worker.start() 52 | with pytest.raises(TypeError, match="expected a sync function"): 53 | (await worker.run_sync(_null_async_fn)).unwrap() 54 | 55 | 56 | async def test_clean_exit_on_shutdown(worker, capfd): 57 | if worker.mp_context._name == "forkserver": 58 | pytest.skip("capfd doesn't work on ForkserverProcWorker") 59 | await worker.start() 60 | # This could happen on weird __del__/weakref/atexit situations. 61 | # It was not visible on normal, clean exits because multiprocessing 62 | # would call terminate before pipes were GC'd. 63 | assert (await worker.run_sync(bool)).unwrap() is False 64 | worker.shutdown() 65 | with trio.fail_after(2): 66 | exitcode = await worker.wait() 67 | out, err = capfd.readouterr() 68 | assert not err 69 | assert exitcode == 0 70 | assert not out 71 | 72 | 73 | async def test_tracebacks(worker): 74 | await worker.start() 75 | with pytest.raises(SpecialError, match="test2") as excinfo: 76 | (await worker.run_sync(_chained_exc)).unwrap() 77 | c = excinfo.getrepr().chain 78 | assert c 79 | assert "test1" in str(c) 80 | -------------------------------------------------------------------------------- /trio_parallel/_windows_cffi.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import cffi 4 | 5 | ################################################################ 6 | # Functions and types 7 | ################################################################ 8 | 9 | LIB = """ 10 | BOOL PeekNamedPipe( 11 | HANDLE hNamedPipe, 12 | LPVOID lpBuffer, 13 | DWORD nBufferSize, 14 | LPDWORD lpBytesRead, 15 | LPDWORD lpTotalBytesAvail, 16 | LPDWORD lpBytesLeftThisMessage 17 | ); 18 | """ 19 | 20 | ffi = cffi.FFI() 21 | ffi.cdef(LIB) 22 | 23 | kernel32 = ffi.dlopen("kernel32.dll") 24 | 25 | ################################################################ 26 | # Magic numbers 27 | ################################################################ 28 | 29 | # Here's a great resource for looking these up: 30 | # https://www.magnumdb.com 31 | # (Tip: check the box to see "Hex value") 32 | 33 | INVALID_HANDLE_VALUE = ffi.cast("HANDLE", -1) 34 | 35 | 36 | class ErrorCodes(enum.IntEnum): 37 | STATUS_TIMEOUT = 0x102 38 | WAIT_TIMEOUT = 0x102 39 | WAIT_ABANDONED = 0x80 40 | WAIT_OBJECT_0 = 0x00 # object is signaled 41 | WAIT_FAILED = 0xFFFFFFFF 42 | ERROR_IO_PENDING = 997 43 | ERROR_OPERATION_ABORTED = 995 44 | ERROR_ABANDONED_WAIT_0 = 735 45 | ERROR_INVALID_HANDLE = 6 46 | ERROR_INVALID_PARMETER = 87 47 | ERROR_NOT_FOUND = 1168 48 | ERROR_NOT_SOCKET = 10038 49 | ERROR_MORE_DATA = 234 50 | 51 | 52 | ################################################################ 53 | # Generic helpers 54 | ################################################################ 55 | 56 | 57 | # vendored from trio, so no coverage 58 | def raise_winerror(winerror=None, *, filename=None, filename2=None): # pragma: no cover 59 | if winerror is None: 60 | winerror, msg = ffi.getwinerror() 61 | else: 62 | _, msg = ffi.getwinerror(winerror) 63 | # https://docs.python.org/3/library/exceptions.html#OSError 64 | raise OSError(0, msg, filename, winerror, filename2) 65 | 66 | 67 | def peek_pipe_message_left(handle: int): 68 | # If you try to pass in a file descriptor instead, it's not going to work out. 69 | assert type(handle) is int 70 | handle = ffi.cast("HANDLE", handle) 71 | left = ffi.new("LPDWORD") 72 | if not kernel32.PeekNamedPipe(handle, ffi.NULL, 0, ffi.NULL, ffi.NULL, left): 73 | raise_winerror() # pragma: no cover 74 | return left[0] 75 | -------------------------------------------------------------------------------- /trio_parallel/_windows_pipes.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import TYPE_CHECKING 3 | 4 | import trio 5 | from ._windows_cffi import ErrorCodes, peek_pipe_message_left 6 | 7 | assert sys.platform == "win32" or not TYPE_CHECKING 8 | 9 | DEFAULT_RECEIVE_SIZE = 65536 10 | 11 | # Vendored from trio in v0.25.0 under identical MIT/Apache2 license. 12 | # Copyright Contributors to the Trio project. 13 | # It's trio._util.PipeSendStream but modified, so it doesn't need 14 | # internals or abcs with methods we don't use. 15 | 16 | 17 | class PipeSendChannel: 18 | """Represents a message stream over a pipe object.""" 19 | 20 | def __init__(self, handle: int) -> None: 21 | # handles are "owned" by multiprocessing.connection.Pipe 22 | self._handle = handle 23 | trio.lowlevel.register_with_iocp(handle) 24 | 25 | async def send(self, value: bytes) -> None: 26 | # we never send empty bytes 27 | # if not value: 28 | # await trio.lowlevel.checkpoint() 29 | # return 30 | 31 | try: 32 | written = await trio.lowlevel.write_overlapped(self._handle, value) 33 | except BrokenPipeError as ex: 34 | raise trio.BrokenResourceError from ex 35 | # By my reading of MSDN, this assert is guaranteed to pass so long 36 | # as the pipe isn't in nonblocking mode, but... let's just 37 | # double-check. 38 | assert written == len(value) 39 | 40 | 41 | class PipeReceiveChannel: 42 | """Represents a message stream over a pipe object.""" 43 | 44 | def __init__(self, handle: int) -> None: 45 | # handles are "owned" by multiprocessing.connection.Pipe 46 | self._handle = handle 47 | trio.lowlevel.register_with_iocp(handle) 48 | 49 | async def receive(self) -> bytes: 50 | buffer = bytearray(DEFAULT_RECEIVE_SIZE) 51 | try: 52 | received = await self._receive_some_into(buffer) 53 | except OSError as e: 54 | if e.winerror != ErrorCodes.ERROR_MORE_DATA: 55 | raise # pragma: no cover, real OSError we can't generate 56 | left = peek_pipe_message_left(self._handle) 57 | # preallocate memory to avoid an extra copy of very large messages 58 | newbuffer = bytearray(DEFAULT_RECEIVE_SIZE + left) 59 | with memoryview(newbuffer) as view: 60 | view[:DEFAULT_RECEIVE_SIZE] = buffer 61 | with trio.CancelScope(shield=True): 62 | await self._receive_some_into(view[DEFAULT_RECEIVE_SIZE:]) 63 | return newbuffer 64 | else: 65 | del buffer[received:] 66 | return buffer 67 | 68 | async def _receive_some_into(self, buffer): 69 | try: 70 | return await trio.lowlevel.readinto_overlapped(self._handle, buffer) 71 | except BrokenPipeError: 72 | # Windows raises BrokenPipeError on one end of a pipe 73 | # whenever the other end closes, regardless of direction. 74 | # Convert this to EndOfChannel. 75 | # 76 | # We are raising an exception, so we don't need to checkpoint, 77 | # in contrast to PipeReceiveStream. 78 | raise trio.EndOfChannel 79 | --------------------------------------------------------------------------------