├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.yaml
│ ├── config.yml
│ └── features_request.yaml
├── pull_request_template.md
└── workflows
│ ├── publish.yml
│ ├── test-downstream.yml
│ └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── LICENSE
├── README.rst
├── docs
├── api.rst
├── basics.rst
├── cancellation.rst
├── conf.py
├── contextmanagers.rst
├── contributing.rst
├── faq.rst
├── fileio.rst
├── index.rst
├── migration.rst
├── networking.rst
├── signals.rst
├── streams.rst
├── subinterpreters.rst
├── subprocesses.rst
├── support.rst
├── synchronization.rst
├── tasks.rst
├── tempfile.rst
├── testing.rst
├── threads.rst
├── typedattrs.rst
└── versionhistory.rst
├── pyproject.toml
├── src
└── anyio
│ ├── __init__.py
│ ├── _backends
│ ├── __init__.py
│ ├── _asyncio.py
│ └── _trio.py
│ ├── _core
│ ├── __init__.py
│ ├── _asyncio_selector_thread.py
│ ├── _contextmanagers.py
│ ├── _eventloop.py
│ ├── _exceptions.py
│ ├── _fileio.py
│ ├── _resources.py
│ ├── _signals.py
│ ├── _sockets.py
│ ├── _streams.py
│ ├── _subprocesses.py
│ ├── _synchronization.py
│ ├── _tasks.py
│ ├── _tempfile.py
│ ├── _testing.py
│ └── _typedattr.py
│ ├── abc
│ ├── __init__.py
│ ├── _eventloop.py
│ ├── _resources.py
│ ├── _sockets.py
│ ├── _streams.py
│ ├── _subprocesses.py
│ ├── _tasks.py
│ └── _testing.py
│ ├── from_thread.py
│ ├── lowlevel.py
│ ├── py.typed
│ ├── pytest_plugin.py
│ ├── streams
│ ├── __init__.py
│ ├── buffered.py
│ ├── file.py
│ ├── memory.py
│ ├── stapled.py
│ ├── text.py
│ └── tls.py
│ ├── to_interpreter.py
│ ├── to_process.py
│ └── to_thread.py
└── tests
├── __init__.py
├── conftest.py
├── streams
├── __init__.py
├── test_buffered.py
├── test_file.py
├── test_memory.py
├── test_stapled.py
├── test_text.py
└── test_tls.py
├── test_contextmanagers.py
├── test_debugging.py
├── test_eventloop.py
├── test_fileio.py
├── test_from_thread.py
├── test_lowlevel.py
├── test_pytest_plugin.py
├── test_signals.py
├── test_sockets.py
├── test_subprocesses.py
├── test_synchronization.py
├── test_taskgroups.py
├── test_tempfile.py
├── test_to_interpreter.py
├── test_to_process.py
├── test_to_thread.py
└── test_typedattr.py
/.github/ISSUE_TEMPLATE/bug_report.yaml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: File a bug report
3 | labels: ["bug"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: >
8 | If you observed a crash in the library, or saw unexpected behavior in it, report
9 | your findings here.
10 | - type: checkboxes
11 | attributes:
12 | label: Things to check first
13 | options:
14 | - label: >
15 | I have searched the existing issues and didn't find my bug already reported
16 | there
17 | required: true
18 | - label: >
19 | I have checked that my bug is still present in the latest release
20 | required: true
21 | - type: input
22 | id: anyio-version
23 | attributes:
24 | label: AnyIO version
25 | description: What version of AnyIO were you running?
26 | validations:
27 | required: true
28 | - type: input
29 | id: python-version
30 | attributes:
31 | label: Python version
32 | description: What version of Python were you running?
33 | validations:
34 | required: true
35 | - type: textarea
36 | id: what-happened
37 | attributes:
38 | label: What happened?
39 | description: >
40 | Unless you are reporting a crash, tell us what you expected to happen instead.
41 | validations:
42 | required: true
43 | - type: textarea
44 | id: mwe
45 | attributes:
46 | label: How can we reproduce the bug?
47 | description: >
48 | In order to investigate the bug, we need to be able to reproduce it on our own.
49 | Please create a
50 | [minimum workable example](https://stackoverflow.com/help/minimal-reproducible-example)
51 | that demonstrates the problem. List any third party libraries required for this,
52 | but avoid using them unless absolutely necessary.
53 | validations:
54 | required: true
55 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: GitHub Discussions
4 | url: https://github.com/agronholm/anyio/discussions/categories/q-a
5 | about: Technical support forum
6 | - name: Support chat on Gitter
7 | url: https://gitter.im/python-trio/AnyIO
8 | about: Technical support chat
9 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/features_request.yaml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Suggest a new feature
3 | labels: ["enhancement"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: >
8 | If you have thought of a new feature that would increase the usefulness of this
9 | project, please use this form to send us your idea.
10 | - type: checkboxes
11 | attributes:
12 | label: Things to check first
13 | options:
14 | - label: >
15 | I have searched the existing issues and didn't find my feature already
16 | requested there
17 | required: true
18 | - type: textarea
19 | id: feature
20 | attributes:
21 | label: Feature description
22 | description: >
23 | Describe the feature in detail. The more specific the description you can give,
24 | the easier it should be to implement this feature.
25 | validations:
26 | required: true
27 | - type: textarea
28 | id: usecase
29 | attributes:
30 | label: Use case
31 | description: >
32 | Explain why you need this feature, and why you think it would be useful to
33 | others too.
34 | validations:
35 | required: true
36 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 |
2 | ## Changes
3 |
4 | Fixes #.
5 |
6 |
7 |
8 | ## Checklist
9 |
10 | If this is a user-facing code change, like a bugfix or a new feature, please ensure that
11 | you've fulfilled the following conditions (where applicable):
12 |
13 | - [ ] You've added tests (in `tests/`) added which would fail without your patch
14 | - [ ] You've updated the documentation (in `docs/`, in case of behavior changes or new
15 | features)
16 | - [ ] You've added a new changelog entry (in `docs/versionhistory.rst`).
17 |
18 | If this is a trivial change, like a typo fix or a code reformatting, then you can ignore
19 | these instructions.
20 |
21 | ### Updating the changelog
22 |
23 | If there are no entries after the last release, use `**UNRELEASED**` as the version.
24 | If, say, your patch fixes issue #123, the entry should look like this:
25 |
26 | ```
27 | - Fix big bad boo-boo in task groups
28 | (`#123 `_; PR by @yourgithubaccount)
29 | ```
30 |
31 | If there's no issue linked, just link to your pull request instead by updating the
32 | changelog after you've created the PR.
33 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish packages to PyPI
2 |
3 | on:
4 | push:
5 | tags:
6 | - "[0-9]+.[0-9]+.[0-9]+"
7 | - "[0-9]+.[0-9]+.[0-9]+.post[0-9]+"
8 | - "[0-9]+.[0-9]+.[0-9]+[a-b][0-9]+"
9 | - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+"
10 |
11 | jobs:
12 | build:
13 | name: Build the source tarball and the wheel
14 | runs-on: ubuntu-latest
15 | environment: release
16 | steps:
17 | - uses: actions/checkout@v4
18 | - name: Set up Python
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: 3.x
22 | - name: Install dependencies
23 | run: pip install build
24 | - name: Create packages
25 | run: python -m build
26 | - name: Archive packages
27 | uses: actions/upload-artifact@v4
28 | with:
29 | name: dist
30 | path: dist
31 |
32 | publish:
33 | name: Publish build artifacts to the PyPI
34 | needs: build
35 | runs-on: ubuntu-latest
36 | environment: release
37 | permissions:
38 | id-token: write
39 | steps:
40 | - name: Retrieve packages
41 | uses: actions/download-artifact@v4
42 | - name: Upload packages
43 | uses: pypa/gh-action-pypi-publish@release/v1
44 |
45 | release:
46 | name: Create a GitHub release
47 | needs: build
48 | runs-on: ubuntu-latest
49 | permissions:
50 | contents: write
51 | steps:
52 | - uses: actions/checkout@v4
53 | - id: changelog
54 | uses: agronholm/release-notes@v1
55 | with:
56 | path: docs/versionhistory.rst
57 | - uses: ncipollo/release-action@v1
58 | with:
59 | body: ${{ steps.changelog.outputs.changelog }}
60 |
--------------------------------------------------------------------------------
/.github/workflows/test-downstream.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Test against downstream projects
3 |
4 | on:
5 | workflow_dispatch:
6 |
7 | jobs:
8 | starlette:
9 | name: "Starlette on Python ${{ matrix.python-version }}"
10 | runs-on: ubuntu-latest
11 | strategy:
12 | fail-fast: false
13 | matrix:
14 | python-version: ["3.9", "3.13"]
15 | steps:
16 | - uses: actions/checkout@v4
17 | with:
18 | repository: encode/starlette
19 | - uses: actions/setup-python@v5
20 | with:
21 | python-version: "${{ matrix.python-version }}"
22 | allow-prereleases: true
23 | cache: pip
24 | cache-dependency-path: requirements.txt
25 | - name: Install dependencies
26 | run: |
27 | scripts/install
28 | pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
29 | - name: Run tests
30 | run: scripts/test
31 | - name: Enforce coverage
32 | run: scripts/coverage
33 |
34 | httpcore:
35 | name: "Httpcore on Python ${{ matrix.python-version }}"
36 | runs-on: ubuntu-latest
37 | strategy:
38 | fail-fast: false
39 | matrix:
40 | python-version: ["3.9", "3.13"]
41 | steps:
42 | - uses: actions/checkout@v4
43 | with:
44 | repository: encode/httpcore
45 | - uses: actions/setup-python@v5
46 | with:
47 | python-version: "${{ matrix.python-version }}"
48 | allow-prereleases: true
49 | cache: pip
50 | cache-dependency-path: requirements.txt
51 | - name: Install dependencies
52 | run: |
53 | scripts/install
54 | pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
55 | - name: Run tests
56 | run: scripts/test
57 | - name: Enforce coverage
58 | run: scripts/coverage
59 |
60 | fastapi:
61 | name: "FastAPI on Python ${{ matrix.python-version }}"
62 | runs-on: ubuntu-latest
63 | strategy:
64 | fail-fast: false
65 | matrix:
66 | python-version: ["3.9", "3.13"]
67 | env:
68 | UV_SYSTEM_PYTHON: 1
69 | steps:
70 | - uses: actions/checkout@v4
71 | with:
72 | repository: tiangolo/fastapi
73 | - uses: actions/setup-python@v5
74 | with:
75 | python-version: "${{ matrix.python-version }}"
76 | - name: Setup uv
77 | uses: astral-sh/setup-uv@v5
78 | with:
79 | version: "0.4.15"
80 | enable-cache: true
81 | cache-dependency-glob: |
82 | requirements**.txt
83 | pyproject.toml
84 | - name: Install dependencies
85 | run: |
86 | uv pip install -r requirements-tests.txt
87 | uv pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
88 | - name: Run tests
89 | run: pytest -W ignore::ResourceWarning
90 | env:
91 | PYTHONPATH: ./docs_src
92 |
93 | litestar:
94 | name: "Litestar on Python ${{ matrix.python-version }}"
95 | runs-on: ubuntu-latest
96 | strategy:
97 | fail-fast: false
98 | matrix:
99 | python-version: ["3.9", "3.13"]
100 | steps:
101 | - uses: actions/checkout@v4
102 | with:
103 | repository: litestar-org/litestar
104 | - name: Set up python ${{ inputs.python-version }}
105 | uses: actions/setup-python@v5
106 | with:
107 | python-version: ${{ inputs.python-version }}
108 | - name: Install uv
109 | uses: astral-sh/setup-uv@v5
110 | with:
111 | version: "0.5.4"
112 | enable-cache: true
113 | - name: Install dependencies
114 | run: |
115 | uv sync
116 | uv pip install anyio[trio]@git+https://github.com/agronholm/anyio.git@${{ github.ref_name }}
117 | - name: Test
118 | run: uv run pytest docs/examples tests -n auto
119 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: test suite
2 |
3 | on:
4 | push:
5 | branches: [master]
6 | pull_request:
7 |
8 | jobs:
9 | changed-files:
10 | runs-on: ubuntu-latest
11 | outputs:
12 | workflow-changed: ${{ steps.changed-files.outputs.workflow_any_changed }}
13 | pyproject-changed: ${{ steps.changed-files.outputs.src_any_changed }}
14 | src-changed: ${{ steps.changed-files.outputs.src_any_changed }}
15 | tests-changed: ${{ steps.changed-files.outputs.tests_any_changed }}
16 | docs-changed: ${{ steps.changed-files.outputs.doc_any_changed }}
17 | steps:
18 | - uses: actions/checkout@v4
19 | - name: Get changed files by category
20 | id: changed-files
21 | uses: tj-actions/changed-files@v46
22 | with:
23 | files_yaml: |
24 | workflow:
25 | - .github/workflows/test.yml
26 | pyproject:
27 | - pyproject.toml
28 | src:
29 | - src/**
30 | tests:
31 | - tests/**
32 | doc:
33 | - README.rst
34 | - docs/**
35 |
36 | pyright:
37 | runs-on: ubuntu-latest
38 | needs: changed-files
39 | if: |
40 | ${{
41 | (needs.changed-files.outputs.workflow-changed == 'true')
42 | || (needs.changed-files.outputs.src-changed == 'true')
43 | }}
44 | steps:
45 | - uses: actions/checkout@v4
46 | - name: Set up Python
47 | uses: actions/setup-python@v5
48 | with:
49 | python-version: 3.x
50 | - uses: actions/cache@v4
51 | with:
52 | path: ~/.cache/pip
53 | key: pip-pyright
54 | - name: Install dependencies
55 | run: pip install -e . pyright pytest
56 | - name: Run pyright
57 | run: pyright --verifytypes anyio
58 |
59 | test:
60 | strategy:
61 | fail-fast: false
62 | matrix:
63 | os: [ubuntu-latest]
64 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14", pypy-3.11]
65 | include:
66 | - os: macos-latest
67 | python-version: "3.9"
68 | - os: macos-latest
69 | python-version: "3.13"
70 | - os: windows-latest
71 | python-version: "3.9"
72 | - os: windows-latest
73 | python-version: "3.13"
74 | runs-on: ${{ matrix.os }}
75 | needs: changed-files
76 | if: |
77 | ${{
78 | (needs.changed-files.outputs.workflow-changed == 'true')
79 | || (needs.changed-files.outputs.pyproject-changed == 'true')
80 | || (needs.changed-files.outputs.src-changed == 'true')
81 | || (needs.changed-files.outputs.tests-changed == 'true')
82 | }}
83 | steps:
84 | - uses: actions/checkout@v4
85 | - name: Set up Python ${{ matrix.python-version }}
86 | uses: actions/setup-python@v5
87 | with:
88 | python-version: ${{ matrix.python-version }}
89 | allow-prereleases: true
90 | cache: pip
91 | cache-dependency-path: pyproject.toml
92 | - name: Ensure pip >= v25.1
93 | run: python -m pip install "pip >= 25.1"
94 | - name: Install the project and its dependencies
95 | run: pip install --group test -e .
96 | - name: Patch /etc/hosts
97 | if: runner.os != 'Windows'
98 | run: |
99 | echo "1.2.3.4 xn--fa-hia.de" | sudo tee -a /etc/hosts
100 | echo "5.6.7.8 fass.de" | sudo tee -a /etc/hosts
101 | - name: Patch C:\Windows\System32\drivers\etc\hosts
102 | if: runner.os == 'Windows'
103 | run: |
104 | Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "1.2.3.4 xn--fa-hia.de"
105 | Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "5.6.7.8 fass.de"
106 | - name: Test with pytest
107 | run: coverage run -m pytest -v
108 | timeout-minutes: 5
109 | env:
110 | PYTEST_DISABLE_PLUGIN_AUTOLOAD: 1
111 | - name: Generate coverage report
112 | run: coverage xml
113 | - name: Upload Coverage
114 | uses: coverallsapp/github-action@v2
115 | with:
116 | parallel: true
117 | file: coverage.xml
118 |
119 | docs:
120 | runs-on: ubuntu-latest
121 | needs: changed-files
122 | if: |
123 | ${{
124 | (needs.changed-files.outputs.workflow-changed == 'true')
125 | || (needs.changed-files.outputs.pyproject-changed == 'true')
126 | || (needs.changed-files.outputs.src-changed == 'true')
127 | || (needs.changed-files.outputs.docs-changed == 'true')
128 | }}
129 | steps:
130 | - uses: actions/checkout@v4
131 | - name: Set up Python ${{ matrix.python-version }}
132 | uses: actions/setup-python@v5
133 | with:
134 | python-version: "3.11"
135 | cache: pip
136 | cache-dependency-path: pyproject.toml
137 | - name: Ensure pip >= v25.1
138 | run: python -m pip install "pip >= 25.1"
139 | - name: Install the project and its dependencies
140 | run: pip install --group doc -e .
141 | - name: Build documentation
142 | run: sphinx-build -W docs build/sphinx
143 |
144 | coveralls:
145 | name: Finish Coveralls
146 | needs: test
147 | runs-on: ubuntu-latest
148 | steps:
149 | - name: Finished
150 | uses: coverallsapp/github-action@v2
151 | with:
152 | parallel-finished: true
153 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.egg-info
2 | *.dist-info
3 | *.pyc
4 | build
5 | dist
6 | docs/_build
7 | venv*/
8 | __pycache__
9 | .coverage
10 | .pytest_cache/
11 | .mypy_cache/
12 | .ruff_cache/
13 | .hypothesis/
14 | .eggs/
15 | .tox
16 | .idea
17 | .cache
18 | .local
19 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # This is the configuration file for pre-commit (https://pre-commit.com/).
2 | # To use:
3 | # * Install pre-commit (https://pre-commit.com/#installation)
4 | # * Copy this file as ".pre-commit-config.yaml"
5 | # * Run "pre-commit install".
6 | repos:
7 | - repo: https://github.com/adrienverge/yamllint
8 | rev: v1.37.1
9 | hooks:
10 | - id: yamllint
11 | args: ['-d {extends: relaxed, rules: {line-length: disable}}', '-s']
12 |
13 | - repo: https://github.com/pre-commit/pre-commit-hooks
14 | rev: v5.0.0
15 | hooks:
16 | - id: check-toml
17 | - id: check-yaml
18 | - id: debug-statements
19 | - id: end-of-file-fixer
20 | - id: mixed-line-ending
21 | args: [ "--fix=lf" ]
22 | - id: trailing-whitespace
23 |
24 | - repo: https://github.com/codespell-project/codespell
25 | rev: v2.4.1
26 | hooks:
27 | - id: codespell
28 | additional_dependencies:
29 | - tomli
30 |
31 | - repo: https://github.com/astral-sh/ruff-pre-commit
32 | rev: v0.11.12
33 | hooks:
34 | - id: ruff-check
35 | args: [--fix, --show-fixes]
36 | - id: ruff-format
37 |
38 | - repo: https://github.com/pre-commit/mirrors-mypy
39 | rev: v1.16.0
40 | hooks:
41 | - id: mypy
42 | additional_dependencies:
43 | - pytest
44 | - trio >= 0.26
45 | - packaging
46 |
47 | - repo: https://github.com/pre-commit/pygrep-hooks
48 | rev: v1.10.0
49 | hooks:
50 | - id: rst-backticks
51 | - id: rst-directive-colons
52 | - id: rst-inline-touching-normal
53 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: ubuntu-22.04
5 | tools:
6 | python: "3.11"
7 | jobs:
8 | install:
9 | - python -m pip install --no-cache-dir "pip >= 25.1"
10 | - python -m pip install --upgrade --upgrade-strategy only-if-needed --no-cache-dir --group doc .
11 |
12 | sphinx:
13 | configuration: docs/conf.py
14 | fail_on_warning: true
15 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2018 Alex Grönholm
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | .. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg
2 | :target: https://github.com/agronholm/anyio/actions/workflows/test.yml
3 | :alt: Build Status
4 | .. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master
5 | :target: https://coveralls.io/github/agronholm/anyio?branch=master
6 | :alt: Code Coverage
7 | .. image:: https://readthedocs.org/projects/anyio/badge/?version=latest
8 | :target: https://anyio.readthedocs.io/en/latest/?badge=latest
9 | :alt: Documentation
10 | .. image:: https://badges.gitter.im/gitterHQ/gitter.svg
11 | :target: https://gitter.im/python-trio/AnyIO
12 | :alt: Gitter chat
13 |
14 | AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or
15 | trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony
16 | with the native SC of trio itself.
17 |
18 | Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or
19 | trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full
20 | refactoring necessary. It will blend in with the native libraries of your chosen backend.
21 |
22 | Documentation
23 | -------------
24 |
25 | View full documentation at: https://anyio.readthedocs.io/
26 |
27 | Features
28 | --------
29 |
30 | AnyIO offers the following functionality:
31 |
32 | * Task groups (nurseries_ in trio terminology)
33 | * High-level networking (TCP, UDP and UNIX sockets)
34 |
35 | * `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python
36 | 3.8)
37 | * async/await style UDP sockets (unlike asyncio where you still have to use Transports and
38 | Protocols)
39 |
40 | * A versatile API for byte streams and object streams
41 | * Inter-task synchronization and communication (locks, conditions, events, semaphores, object
42 | streams)
43 | * Worker threads
44 | * Subprocesses
45 | * Asynchronous file I/O (using worker threads)
46 | * Signal handling
47 |
48 | AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.
49 | It even works with the popular Hypothesis_ library.
50 |
51 | .. _asyncio: https://docs.python.org/3/library/asyncio.html
52 | .. _trio: https://github.com/python-trio/trio
53 | .. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
54 | .. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning
55 | .. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs
56 | .. _pytest: https://docs.pytest.org/en/latest/
57 | .. _Hypothesis: https://hypothesis.works/
58 |
--------------------------------------------------------------------------------
/docs/basics.rst:
--------------------------------------------------------------------------------
1 | The basics
2 | ==========
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | AnyIO requires Python 3.8 or later to run. It is recommended that you set up a
7 | virtualenv_ when developing or playing around with AnyIO.
8 |
9 | Installation
10 | ------------
11 |
12 | To install AnyIO, run:
13 |
14 | .. code-block:: bash
15 |
16 | pip install anyio
17 |
18 | To install a supported version of Trio_, you can install it as an extra like this:
19 |
20 | .. code-block:: bash
21 |
22 | pip install anyio[trio]
23 |
24 | Running async programs
25 | ----------------------
26 |
27 | The simplest possible AnyIO program looks like this::
28 |
29 | from anyio import run
30 |
31 |
32 | async def main():
33 | print('Hello, world!')
34 |
35 | run(main)
36 |
37 | This will run the program above on the default backend (asyncio). To run it on another
38 | supported backend, say Trio_, you can use the ``backend`` argument, like so::
39 |
40 | run(main, backend='trio')
41 |
42 | But AnyIO code is not required to be run via :func:`run`. You can just as well use the
43 | native ``run()`` function of the backend library::
44 |
45 | import sniffio
46 | import trio
47 | from anyio import sleep
48 |
49 |
50 | async def main():
51 | print('Hello')
52 | await sleep(1)
53 | print("I'm running on", sniffio.current_async_library())
54 |
55 | trio.run(main)
56 |
57 | .. versionchanged:: 4.0.0
58 | On the ``asyncio`` backend, ``anyio.run()`` now uses a back-ported version of
59 | :class:`asyncio.Runner` on Pythons older than 3.11.
60 |
61 | .. _backend options:
62 |
63 | Backend specific options
64 | ------------------------
65 |
66 | **Asyncio**:
67 |
68 | * options covered in the documentation of :class:`asyncio.Runner`
69 | * ``use_uvloop`` (``bool``, default=False): Use the faster uvloop_ event loop
70 | implementation, if available (this is a shorthand for passing
71 | ``loop_factory=uvloop.new_event_loop``, and is ignored if ``loop_factory`` is passed
72 | a value other than ``None``)
73 |
74 | **Trio**: options covered in the
75 | `official documentation
76 | `_
77 |
78 | .. versionchanged:: 3.2.0
79 | The default value of ``use_uvloop`` was changed to ``False``.
80 | .. versionchanged:: 4.0.0
81 | The ``policy`` option was replaced with ``loop_factory``.
82 |
83 | .. _uvloop: https://pypi.org/project/uvloop/
84 |
85 | Using native async libraries
86 | ----------------------------
87 |
88 | AnyIO lets you mix and match code written for AnyIO and code written for the
89 | asynchronous framework of your choice. There are a few rules to keep in mind however:
90 |
91 | * You can only use "native" libraries for the backend you're running, so you cannot, for
92 | example, use a library written for Trio_ together with a library written for asyncio.
93 | * Tasks spawned by these "native" libraries on backends other than Trio_ are not subject
94 | to the cancellation rules enforced by AnyIO
95 | * Threads spawned outside of AnyIO cannot use :func:`.from_thread.run` to call
96 | asynchronous code
97 |
98 | .. _virtualenv: https://docs.python-guide.org/dev/virtualenvs/
99 | .. _Trio: https://github.com/python-trio/trio
100 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from __future__ import annotations
3 |
4 | from importlib.metadata import version as get_version
5 |
6 | from packaging.version import parse
7 |
8 | extensions = [
9 | "sphinx.ext.autodoc",
10 | "sphinx.ext.intersphinx",
11 | "sphinx_tabs.tabs",
12 | "sphinx_autodoc_typehints",
13 | "sphinx_rtd_theme",
14 | ]
15 |
16 | templates_path = ["_templates"]
17 | source_suffix = ".rst"
18 | master_doc = "index"
19 | project = "AnyIO"
20 | author = "Alex Grönholm"
21 | copyright = "2018, " + author
22 |
23 | v = parse(get_version("anyio"))
24 | version = v.base_version
25 | release = v.public
26 |
27 | language = "en"
28 |
29 | exclude_patterns = ["_build"]
30 | pygments_style = "sphinx"
31 | autodoc_default_options = {"members": True, "show-inheritance": True}
32 | autodoc_mock_imports = ["_typeshed", "pytest", "_pytest"]
33 | todo_include_todos = False
34 |
35 | html_theme = "sphinx_rtd_theme"
36 | htmlhelp_basename = "anyiodoc"
37 |
38 | intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
39 |
--------------------------------------------------------------------------------
/docs/contextmanagers.rst:
--------------------------------------------------------------------------------
1 | Context manager mix-in classes
2 | ==============================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | Python classes that want to offer context management functionality normally implement
7 | ``__enter__()`` and ``__exit__()`` (for synchronous context managers) or
8 | ``__aenter__()`` and ``__aexit__()`` (for asynchronous context managers). While this
9 | offers precise control and re-entrancy support, embedding _other_ context managers in
10 | this logic can be very error prone. To make this easier, AnyIO provides two context
11 | manager mix-in classes, :class:`ContextManagerMixin` and
12 | :class:`AsyncContextManagerMixin`. These classes provide implementations of
13 | ``__enter__()``, ``__exit__()`` or ``__aenter__()``, and ``__aexit__()``, that provide
14 | another way to implement context managers similar to
15 | :func:`@contextmanager ` and
16 | :func:`@asynccontextmanager ` - a generator-based
17 | approach where the ``yield`` statement signals that the context has been entered.
18 |
19 | Here's a trivial example of how to use the mix-in classes:
20 |
21 | .. tabs::
22 |
23 | .. code-tab:: python Synchronous
24 |
25 | from collections.abc import Generator
26 | from contextlib import contextmanager
27 | from typing import Self
28 |
29 | from anyio import ContextManagerMixin
30 |
31 | class MyContextManager(ContextManagerMixin):
32 | @contextmanager
33 | def __contextmanager__(self) -> Generator[Self]:
34 | print("entering context")
35 | yield self
36 | print("exiting context")
37 |
38 | .. code-tab:: python Asynchronous
39 |
40 | from collections.abc import AsyncGenerator
41 | from contextlib import asynccontextmanager
42 | from typing import Self
43 |
44 | from anyio import AsyncContextManagerMixin
45 |
46 | class MyAsyncContextManager(AsyncContextManagerMixin):
47 | @asynccontextmanager
48 | async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
49 | print("entering context")
50 | yield self
51 | print("exiting context")
52 |
53 | When should I use the contextmanager mix-in classes?
54 | ----------------------------------------------------
55 |
56 | When embedding other context managers, a common mistake is forgetting about error
57 | handling when entering the context. Consider this example::
58 |
59 | from typing import Self
60 |
61 | from anyio import create_task_group
62 |
63 | class MyBrokenContextManager:
64 | async def __aenter__(self) -> Self:
65 | self._task_group = await create_task_group().__aenter__()
66 | # BOOM: missing the "arg" argument here to my_background_func!
67 | self._task_group.start_soon(self.my_background_func)
68 | return self
69 |
70 | async def __aexit__(self, exc_type, exc_value, traceback) -> bool | None:
71 | return await self._task_group.__aexit__(exc_type, exc_value, traceback)
72 |
73 | async my_background_func(self, arg: int) -> None:
74 | ...
75 |
76 | It's easy to think that you have everything covered with ``__aexit__()`` here, but what
77 | if something goes wrong in ``__aenter__()``? The ``__aexit__()`` method will never be
78 | called.
79 |
80 | The mix-in classes solve this problem by providing a robust implementation of
81 | ``__enter__()``/``__exit__()`` or ``__aenter__()``/``__aexit__()`` that handles errors
82 | correctly. Thus, the above code should be written as::
83 |
84 | from collections.abc import AsyncGenerator
85 | from contextlib import asynccontextmanager
86 | from typing import Self
87 |
88 | from anyio import AsyncContextManagerMixin, create_task_group
89 |
90 | class MyBetterContextManager(AsyncContextManagerMixin):
91 | @asynccontextmanager
92 | async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
93 | async with create_task_group() as task_group:
94 | # Still crashes, but at least now the task group is exited
95 | task_group.start_soon(self.my_background_func)
96 | yield self
97 |
98 | async my_background_func(self, arg: int) -> None:
99 | ...
100 |
101 | .. seealso:: :ref:`cancel_scope_stack_corruption`
102 |
103 | Inheriting context manager classes
104 | ----------------------------------
105 |
106 | Here's how you would call the superclass implementation from a subclass:
107 |
108 | .. tabs::
109 |
110 | .. code-tab:: python Synchronous
111 |
112 | from collections.abc import Generator
113 | from contextlib import contextmanager
114 | from typing import Self
115 |
116 | from anyio import ContextManagerMixin
117 |
118 | class SuperclassContextManager(ContextManagerMixin):
119 | @contextmanager
120 | def __contextmanager__(self) -> Generator[Self]:
121 | print("superclass entered")
122 | try:
123 | yield self
124 | finally:
125 | print("superclass exited")
126 |
127 |
128 | class SubclassContextManager(SuperclassContextManager):
129 | @contextmanager
130 | def __contextmanager__(self) -> Generator[Self]:
131 | print("subclass entered")
132 | try:
133 | with super().__contextmanager__():
134 | yield self
135 | finally:
136 | print("subclass exited")
137 |
138 | .. code-tab:: python Asynchronous
139 |
140 | from collections.abc import AsyncGenerator
141 | from contextlib import asynccontextmanager
142 | from typing import Self
143 |
144 | from anyio import AsyncContextManagerMixin
145 |
146 | class SuperclassContextManager(AsyncContextManagerMixin):
147 | @asynccontextmanager
148 | async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
149 | print("superclass entered")
150 | try:
151 | yield self
152 | finally:
153 | print("superclass exited")
154 |
155 |
156 | class SubclassContextManager(SuperclassContextManager):
157 | @asynccontextmanager
158 | async def __asynccontextmanager__(self) -> AsyncGenerator[Self]:
159 | print("subclass entered")
160 | try:
161 | async with super().__asynccontextmanager__():
162 | yield self
163 | finally:
164 | print("subclass exited")
165 |
--------------------------------------------------------------------------------
/docs/contributing.rst:
--------------------------------------------------------------------------------
1 | Contributing to AnyIO
2 | =====================
3 |
4 | If you wish to contribute a fix or feature to AnyIO, please follow the following
5 | guidelines.
6 |
7 | When you make a pull request against the main AnyIO codebase, Github runs the AnyIO test
8 | suite against your modified code. Before making a pull request, you should ensure that
9 | the modified code passes tests locally. To that end, the use of tox_ is recommended. The
10 | default tox run first runs ``pre-commit`` and then the actual test suite. To run the
11 | checks on all environments in parallel, invoke tox with ``tox -p``.
12 |
13 | To build the documentation, run ``tox -e docs`` which will generate a directory named
14 | ``build`` in which you may view the formatted HTML documentation.
15 |
16 | AnyIO uses pre-commit_ to perform several code style/quality checks. It is recommended
17 | to activate pre-commit_ on your local clone of the repository (using
18 | ``pre-commit install``) to ensure that your changes will pass the same checks on GitHub.
19 |
20 | .. _tox: https://tox.readthedocs.io/en/latest/install.html
21 | .. _pre-commit: https://pre-commit.com/#installation
22 |
23 | Making a pull request on Github
24 | -------------------------------
25 |
26 | To get your changes merged to the main codebase, you need a Github account.
27 |
28 | #. Fork the repository (if you don't have your own fork of it yet) by navigating to the
29 | `main AnyIO repository`_ and clicking on "Fork" near the top right corner.
30 | #. Clone the forked repository to your local machine with
31 | ``git clone git@github.com/yourusername/anyio``.
32 | #. Create a branch for your pull request, like ``git checkout -b myfixname``
33 | #. Make the desired changes to the code base.
34 | #. Commit your changes locally. If your changes close an existing issue, add the text
35 | ``Fixes XXX.`` or ``Closes XXX.`` to the commit message (where XXX is the issue
36 | number).
37 | #. Push the changeset(s) to your forked repository (``git push``)
38 | #. Navigate to Pull requests page on the original repository (not your fork) and click
39 | "New pull request"
40 | #. Click on the text "compare across forks".
41 | #. Select your own fork as the head repository and then select the correct branch name.
42 | #. Click on "Create pull request".
43 |
44 | If you have trouble, consult the `pull request making guide`_ on opensource.com.
45 |
46 | .. _main AnyIO repository: https://github.com/agronholm/anyio
47 | .. _pull request making guide:
48 | https://opensource.com/article/19/7/create-pull-request-github
49 |
--------------------------------------------------------------------------------
/docs/faq.rst:
--------------------------------------------------------------------------------
1 | Frequently Asked Questions
2 | ==========================
3 |
4 | Why is Curio not supported as a backend?
5 | ----------------------------------------
6 |
7 | Curio_ was supported in AnyIO before v3.0. Support for it was dropped for two reasons:
8 |
9 | #. Its interface allowed only coroutine functions to access the Curio_ kernel. This
10 | forced AnyIO to follow suit in its own API design, making it difficult to adapt
11 | existing applications that relied on synchronous callbacks to use AnyIO. It also
12 | interfered with the goal of matching Trio's API in functions with the same purpose
13 | (e.g. ``Event.set()``).
14 | #. The maintainer specifically requested Curio_ support to be removed from AnyIO
15 | (`issue 185 `_).
16 |
17 | .. _Curio: https://github.com/dabeaz/curio
18 |
19 | Why is Twisted not supported as a backend?
20 | ------------------------------------------
21 |
22 | The minimum requirement to support Twisted_ would be for sniffio_ to be able to detect a
23 | running Twisted event loop (and be able to tell when Twisted_ is being run on top of its
24 | asyncio reactor). This is not currently supported in sniffio_, so AnyIO cannot support
25 | Twisted either.
26 |
27 | There is a Twisted `issue `_ that you can
28 | follow if you're interested in Twisted support in AnyIO.
29 |
30 | .. _Twisted: https://twistedmatrix.com/trac/
31 | .. _sniffio: https://github.com/python-trio/sniffio
32 |
--------------------------------------------------------------------------------
/docs/fileio.rst:
--------------------------------------------------------------------------------
1 | Asynchronous file I/O support
2 | =============================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | AnyIO provides asynchronous wrappers for blocking file operations. These wrappers run
7 | blocking operations in worker threads.
8 |
9 | Example::
10 |
11 | from anyio import open_file, run
12 |
13 |
14 | async def main():
15 | async with await open_file('/some/path/somewhere') as f:
16 | contents = await f.read()
17 | print(contents)
18 |
19 | run(main)
20 |
21 | The wrappers also support asynchronous iteration of the file line by line, just as the
22 | standard file objects support synchronous iteration::
23 |
24 | from anyio import open_file, run
25 |
26 |
27 | async def main():
28 | async with await open_file('/some/path/somewhere') as f:
29 | async for line in f:
30 | print(line, end='')
31 |
32 | run(main)
33 |
34 | To wrap an existing open file object as an asynchronous file, you can use
35 | :func:`.wrap_file`::
36 |
37 | from anyio import wrap_file, run
38 |
39 |
40 | async def main():
41 | with open('/some/path/somewhere') as f:
42 | async for line in wrap_file(f):
43 | print(line, end='')
44 |
45 | run(main)
46 |
47 | .. note:: Closing the wrapper also closes the underlying synchronous file object.
48 |
49 | .. seealso:: :ref:`FileStreams`
50 |
51 | Asynchronous path operations
52 | ----------------------------
53 |
54 | AnyIO provides an asynchronous version of the :class:`pathlib.Path` class. It differs
55 | with the original in a number of ways:
56 |
57 | * Operations that perform disk I/O (like :meth:`~pathlib.Path.read_bytes`) are run in a
58 | worker thread and thus require an ``await``
59 | * Methods like :meth:`~pathlib.Path.glob` return an asynchronous iterator that yields
60 | asynchronous :class:`~.Path` objects
61 | * Properties and methods that normally return :class:`pathlib.Path` objects return
62 | :class:`~.Path` objects instead
63 | * Methods and properties from the Python 3.10 API are available on all versions
64 | * Use as a context manager is not supported, as it is deprecated in pathlib
65 |
66 | For example, to create a file with binary content::
67 |
68 | from anyio import Path, run
69 |
70 |
71 | async def main():
72 | path = Path('/foo/bar')
73 | await path.write_bytes(b'hello, world')
74 |
75 | run(main)
76 |
77 | Asynchronously iterating a directory contents can be done as follows::
78 |
79 | from anyio import Path, run
80 |
81 |
82 | async def main():
83 | # Print the contents of every file (assumed to be text) in the directory /foo/bar
84 | dir_path = Path('/foo/bar')
85 | async for path in dir_path.iterdir():
86 | if await path.is_file():
87 | print(await path.read_text())
88 | print('---------------------')
89 |
90 | run(main)
91 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | AnyIO
2 | =====
3 |
4 | .. include:: ../README.rst
5 |
6 | The manual
7 | ----------
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | basics
13 | tasks
14 | cancellation
15 | synchronization
16 | streams
17 | typedattrs
18 | networking
19 | threads
20 | subprocesses
21 | subinterpreters
22 | fileio
23 | tempfile
24 | signals
25 | contextmanagers
26 | testing
27 | api
28 | migration
29 | faq
30 | support
31 | contributing
32 | versionhistory
33 |
--------------------------------------------------------------------------------
/docs/signals.rst:
--------------------------------------------------------------------------------
1 | Receiving operating system signals
2 | ==================================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | You may occasionally find it useful to receive signals sent to your application in a
7 | meaningful way. For example, when you receive a ``signal.SIGTERM`` signal, your
8 | application is expected to shut down gracefully. Likewise, ``SIGHUP`` is often used as a
9 | means to ask the application to reload its configuration.
10 |
11 | AnyIO provides a simple mechanism for you to receive the signals you're interested in::
12 |
13 | import signal
14 |
15 | from anyio import open_signal_receiver, run
16 |
17 |
18 | async def main():
19 | with open_signal_receiver(signal.SIGTERM, signal.SIGHUP) as signals:
20 | async for signum in signals:
21 | if signum == signal.SIGTERM:
22 | return
23 | elif signum == signal.SIGHUP:
24 | print('Reloading configuration')
25 |
26 | run(main)
27 |
28 | .. note:: Signal handlers can only be installed in the main thread, so they will not
29 | work when the event loop is being run through :class:`~.from_thread.BlockingPortal`,
30 | for instance.
31 |
32 | .. note:: Windows does not natively support signals so do not rely on this in a cross
33 | platform application.
34 |
35 | Handling KeyboardInterrupt and SystemExit
36 | -----------------------------------------
37 |
38 | By default, different backends handle the Ctrl+C (or Ctrl+Break on Windows) key
39 | combination and external termination (:exc:`KeyboardInterrupt` and :exc:`SystemExit`,
40 | respectively) differently: Trio raises the relevant exception inside the application
41 | while asyncio shuts down all the tasks and exits. If you need to do your own cleanup in
42 | these situations, you will need to install a signal handler::
43 |
44 | import signal
45 |
46 | from anyio import open_signal_receiver, create_task_group, run
47 | from anyio.abc import CancelScope
48 |
49 |
50 | async def signal_handler(scope: CancelScope):
51 | with open_signal_receiver(signal.SIGINT, signal.SIGTERM) as signals:
52 | async for signum in signals:
53 | if signum == signal.SIGINT:
54 | print('Ctrl+C pressed!')
55 | else:
56 | print('Terminated!')
57 |
58 | scope.cancel()
59 | return
60 |
61 |
62 | async def main():
63 | async with create_task_group() as tg:
64 | tg.start_soon(signal_handler, tg.cancel_scope)
65 | ... # proceed with starting the actual application logic
66 |
67 | run(main)
68 |
69 | .. note:: Windows does not support the :data:`~signal.SIGTERM` signal so if you need a
70 | mechanism for graceful shutdown on Windows, you will have to find another way.
71 |
--------------------------------------------------------------------------------
/docs/subinterpreters.rst:
--------------------------------------------------------------------------------
1 | Working with subinterpreters
2 | ============================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | Subinterpreters offer a middle ground between worker threads and worker processes. They
7 | allow you to utilize multiple CPU cores to run Python code while avoiding the overhead
8 | and complexities of spawning subprocesses.
9 |
10 | .. warning:: Subinterpreter support is considered **experimental**. The underlying
11 | Python API for managing subinterpreters has not been finalized yet, and has had
12 | little real-world testing. As such, it is not recommended to use this feature for
13 | anything important yet.
14 |
15 | Running a function in a worker interpreter
16 | ------------------------------------------
17 |
18 | Running functions in a worker interpreter makes sense when:
19 |
20 | * The code you want to run in parallel is CPU intensive
21 | * The code is either pure Python code, or extension code that does not release the
22 | Global Interpreter Lock (GIL)
23 |
24 | If the code you're trying to run only does blocking network I/O, or file I/O, then
25 | you're better off using :doc:`worker thread ` instead.
26 |
27 | This is done by using :func:`.interpreter.run_sync`::
28 |
29 | import time
30 |
31 | from anyio import run, to_interpreter
32 |
33 | from yourothermodule import cpu_intensive_function
34 |
35 | async def main():
36 | result = await to_interpreter.run_sync(
37 | cpu_intensive_function, 'Hello, ', 'world!'
38 | )
39 | print(result)
40 |
41 | run(main)
42 |
43 | Limitations
44 | -----------
45 |
46 | * Subinterpreters are only supported on Python 3.13 or later
47 | * Code in the ``__main__`` module cannot be run with this (as a consequence, this
48 | applies to any functions defined in the REPL)
49 | * The target functions cannot react to cancellation
50 | * Unlike with threads, the code running in the subinterpreter cannot share mutable data
51 | with other interpreters/threads (however, sharing _immutable_ data is fine)
52 |
--------------------------------------------------------------------------------
/docs/subprocesses.rst:
--------------------------------------------------------------------------------
1 | Using subprocesses
2 | ==================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | AnyIO allows you to run arbitrary executables in subprocesses, either as a one-shot call
7 | or by opening a process handle for you that gives you more control over the subprocess.
8 |
9 | You can either give the command as a string, in which case it is passed to your default
10 | shell (equivalent to ``shell=True`` in :func:`subprocess.run`), or as a sequence of
11 | strings (``shell=False``) in which case the executable is the first item in the sequence
12 | and the rest are arguments passed to it.
13 |
14 | Running one-shot commands
15 | -------------------------
16 |
17 | To run an external command with one call, use :func:`~run_process`::
18 |
19 | from anyio import run_process, run
20 |
21 |
22 | async def main():
23 | result = await run_process('ps')
24 | print(result.stdout.decode())
25 |
26 | run(main)
27 |
28 | The snippet above runs the ``ps`` command within a shell. To run it directly::
29 |
30 | from anyio import run_process, run
31 |
32 |
33 | async def main():
34 | result = await run_process(['ps'])
35 | print(result.stdout.decode())
36 |
37 | run(main)
38 |
39 | Working with processes
40 | ----------------------
41 |
42 | When you have more complex requirements for your interaction with subprocesses, you can
43 | launch one with :func:`~open_process`::
44 |
45 | from anyio import open_process, run
46 | from anyio.streams.text import TextReceiveStream
47 |
48 |
49 | async def main():
50 | async with await open_process(['ps']) as process:
51 | async for text in TextReceiveStream(process.stdout):
52 | print(text)
53 |
54 | run(main)
55 |
56 | See the API documentation of :class:`~.abc.Process` for more information.
57 |
58 | .. _RunInProcess:
59 |
60 | Running functions in worker processes
61 | -------------------------------------
62 |
63 | When you need to run CPU intensive code, worker processes are better than threads
64 | because, with the exception of the experimental free-threaded builds of Python 3.13 and
65 | later, current implementations of Python cannot run Python code in multiple threads at
66 | once.
67 |
68 | Exceptions to this rule are:
69 |
70 | #. Blocking I/O operations
71 | #. C extension code that explicitly releases the Global Interpreter Lock
72 | #. :doc:`Subinterpreter workers `
73 | (experimental; available on Python 3.13 and later)
74 |
75 | If the code you wish to run does not belong in this category, it's best to use worker
76 | processes instead in order to take advantage of multiple CPU cores.
77 | This is done by using :func:`.to_process.run_sync`::
78 |
79 | import time
80 |
81 | from anyio import run, to_process
82 |
83 |
84 | def cpu_intensive_function(arg1, arg2):
85 | time.sleep(1)
86 | return arg1 + arg2
87 |
88 | async def main():
89 | result = await to_process.run_sync(cpu_intensive_function, 'Hello, ', 'world!')
90 | print(result)
91 |
92 | # This check is important when the application uses to_process.run_sync()
93 | if __name__ == '__main__':
94 | run(main)
95 |
96 | Technical details
97 | *****************
98 |
99 | There are some limitations regarding the arguments and return values passed:
100 |
101 | * the arguments must be pickleable (using the highest available protocol)
102 | * the return value must be pickleable (using the highest available protocol)
103 | * the target callable must be importable (lambdas and inner functions won't work)
104 |
105 | Other considerations:
106 |
107 | * Even ``cancellable=False`` runs can be cancelled before the request has been sent to
108 | the worker process
109 | * If a cancellable call is cancelled during execution on the worker process, the worker
110 | process will be killed
111 | * The worker process imports the parent's ``__main__`` module, so guarding for any
112 | import time side effects using ``if __name__ == '__main__':`` is required to avoid
113 | infinite recursion
114 | * ``sys.stdin`` and ``sys.stdout``, ``sys.stderr`` are redirected to ``/dev/null`` so
115 | :func:`print` and :func:`input` won't work
116 | * Worker processes terminate after 5 minutes of inactivity, or when the event loop is
117 | finished
118 |
119 | * On asyncio, either :func:`asyncio.run` or :func:`anyio.run` must be used for proper
120 | cleanup to happen
121 | * Multiprocessing-style synchronization primitives are currently not available
122 |
--------------------------------------------------------------------------------
/docs/support.rst:
--------------------------------------------------------------------------------
1 | Getting help
2 | ============
3 |
4 | If you are having trouble with AnyIO, make sure you've first checked the
5 | :doc:`FAQ ` to see if your question is answered there. If not, you have a couple
6 | ways for getting support:
7 |
8 | * Post a question on `Stack Overflow`_ and use the ``anyio`` tag
9 | * Join the `python-trio/AnyIO`_ room on Gitter
10 |
11 | .. _Stack Overflow: https://stackoverflow.com/
12 | .. _python-trio/AnyIO: https://gitter.im/python-trio/AnyIO
13 |
14 | Reporting bugs
15 | ==============
16 |
17 | If you're fairly certain that you have discovered a bug, you can `file an issue`_ on
18 | Github. If you feel unsure, come talk to us first! The issue tracker is **not** the
19 | proper venue for asking support questions.
20 |
21 | .. _file an issue: https://github.com/agronholm/anyio/issues
22 |
--------------------------------------------------------------------------------
/docs/synchronization.rst:
--------------------------------------------------------------------------------
1 | Using synchronization primitives
2 | ================================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | Synchronization primitives are objects that are used by tasks to communicate and
7 | coordinate with each other. They are useful for things like distributing workload,
8 | notifying other tasks and guarding access to shared resources.
9 |
10 | .. note:: AnyIO primitives are not thread-safe, therefore they should not be used
11 | directly from worker threads. Use :func:`~from_thread.run_sync` for that.
12 |
13 | Events
14 | ------
15 |
16 | Events are used to notify tasks that something they've been waiting to happen has
17 | happened. An event object can have multiple listeners and they are all notified when the
18 | event is triggered.
19 |
20 | Example::
21 |
22 | from anyio import Event, create_task_group, run
23 |
24 |
25 | async def notify(event):
26 | event.set()
27 |
28 |
29 | async def main():
30 | event = Event()
31 | async with create_task_group() as tg:
32 | tg.start_soon(notify, event)
33 | await event.wait()
34 | print('Received notification!')
35 |
36 | run(main)
37 |
38 | .. note:: Unlike standard library Events, AnyIO events cannot be reused, and must be
39 | replaced instead. This practice prevents a class of race conditions, and matches the
40 | semantics of the Trio library.
41 |
42 | Semaphores
43 | ----------
44 |
45 | Semaphores are used for limiting access to a shared resource. A semaphore starts with a
46 | maximum value, which is decremented each time the semaphore is acquired by a task and
47 | incremented when it is released. If the value drops to zero, any attempt to acquire the
48 | semaphore will block until another task frees it.
49 |
50 | Example::
51 |
52 | from anyio import Semaphore, create_task_group, sleep, run
53 |
54 |
55 | async def use_resource(tasknum, semaphore):
56 | async with semaphore:
57 | print('Task number', tasknum, 'is now working with the shared resource')
58 | await sleep(1)
59 |
60 |
61 | async def main():
62 | semaphore = Semaphore(2)
63 | async with create_task_group() as tg:
64 | for num in range(10):
65 | tg.start_soon(use_resource, num, semaphore)
66 |
67 | run(main)
68 |
69 | .. tip:: If the performance of semaphores is critical for you, you could pass
70 | ``fast_acquire=True`` to :class:`Semaphore`. This has the effect of skipping the
71 | :func:`~.lowlevel.cancel_shielded_checkpoint` call in :meth:`Semaphore.acquire` if
72 | there is no contention (acquisition succeeds immediately). This could, in some cases,
73 | lead to the task never yielding control back to to the event loop if you use the
74 | semaphore in a loop that does not have other yield points.
75 |
76 | Locks
77 | -----
78 |
79 | Locks are used to guard shared resources to ensure sole access to a single task at once.
80 | They function much like semaphores with a maximum value of 1, except that only the task
81 | that acquired the lock is allowed to release it.
82 |
83 | Example::
84 |
85 | from anyio import Lock, create_task_group, sleep, run
86 |
87 |
88 | async def use_resource(tasknum, lock):
89 | async with lock:
90 | print('Task number', tasknum, 'is now working with the shared resource')
91 | await sleep(1)
92 |
93 |
94 | async def main():
95 | lock = Lock()
96 | async with create_task_group() as tg:
97 | for num in range(4):
98 | tg.start_soon(use_resource, num, lock)
99 |
100 | run(main)
101 |
102 | .. tip:: If the performance of locks is critical for you, you could pass
103 | ``fast_acquire=True`` to :class:`Lock`. This has the effect of skipping the
104 | :func:`~.lowlevel.cancel_shielded_checkpoint` call in :meth:`Lock.acquire` if there
105 | is no contention (acquisition succeeds immediately). This could, in some cases, lead
106 | to the task never yielding control back to to the event loop if use the lock in a
107 | loop that does not have other yield points.
108 |
109 | Conditions
110 | ----------
111 |
112 | A condition is basically a combination of an event and a lock. It first acquires a lock
113 | and then waits for a notification from the event. Once the condition receives a
114 | notification, it releases the lock. The notifying task can also choose to wake up more
115 | than one listener at once, or even all of them.
116 |
117 | Like :class:`Lock`, :class:`Condition` also requires that the task which locked it also
118 | the one to release it.
119 |
120 | Example::
121 |
122 | from anyio import Condition, create_task_group, sleep, run
123 |
124 |
125 | async def listen(tasknum, condition):
126 | async with condition:
127 | await condition.wait()
128 | print('Woke up task number', tasknum)
129 |
130 |
131 | async def main():
132 | condition = Condition()
133 | async with create_task_group() as tg:
134 | for tasknum in range(6):
135 | tg.start_soon(listen, tasknum, condition)
136 |
137 | await sleep(1)
138 | async with condition:
139 | condition.notify(1)
140 |
141 | await sleep(1)
142 | async with condition:
143 | condition.notify(2)
144 |
145 | await sleep(1)
146 | async with condition:
147 | condition.notify_all()
148 |
149 | run(main)
150 |
151 | Capacity limiters
152 | -----------------
153 |
154 | Capacity limiters are like semaphores except that a single borrower (the current task by
155 | default) can only hold a single token at a time. It is also possible to borrow a token
156 | on behalf of any arbitrary object, so long as that object is hashable.
157 |
158 | Example::
159 |
160 | from anyio import CapacityLimiter, create_task_group, sleep, run
161 |
162 |
163 | async def use_resource(tasknum, limiter):
164 | async with limiter:
165 | print('Task number', tasknum, 'is now working with the shared resource')
166 | await sleep(1)
167 |
168 |
169 | async def main():
170 | limiter = CapacityLimiter(2)
171 | async with create_task_group() as tg:
172 | for num in range(10):
173 | tg.start_soon(use_resource, num, limiter)
174 |
175 | run(main)
176 |
177 | You can adjust the total number of tokens by setting a different value on the limiter's
178 | ``total_tokens`` property.
179 |
180 | Resource guards
181 | ---------------
182 |
183 | Some resources, such as sockets, are very sensitive about concurrent use and should not
184 | allow even attempts to be used concurrently. For such cases, :class:`ResourceGuard` is
185 | the appropriate solution::
186 |
187 | class Resource:
188 | def __init__(self):
189 | self._guard = ResourceGuard()
190 |
191 | async def do_something() -> None:
192 | with self._guard:
193 | ...
194 |
195 | Now, if another task tries calling the ``do_something()`` method on the same
196 | ``Resource`` instance before the first call has finished, that will raise a
197 | :exc:`BusyResourceError`.
198 |
199 | Queues
200 | ------
201 |
202 | In place of queues, AnyIO offers a more powerful construct:
203 | :ref:`memory object streams `.
204 |
--------------------------------------------------------------------------------
/docs/tempfile.rst:
--------------------------------------------------------------------------------
1 | Asynchronous Temporary File and Directory
2 | =========================================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | This module provides asynchronous wrappers for handling temporary files and directories
7 | using the :mod:`tempfile` module. The asynchronous methods execute blocking operations in worker threads.
8 |
9 | Temporary File
10 | --------------
11 |
12 | :class:`TemporaryFile` creates a temporary file that is automatically deleted upon closure.
13 |
14 | **Example:**
15 |
16 | .. code-block:: python
17 |
18 | from anyio import TemporaryFile, run
19 |
20 | async def main():
21 | async with TemporaryFile(mode="w+") as f:
22 | await f.write("Temporary file content")
23 | await f.seek(0)
24 | print(await f.read()) # Output: Temporary file content
25 |
26 | run(main)
27 |
28 | Named Temporary File
29 | --------------------
30 |
31 | :class:`NamedTemporaryFile` works similarly to :class:`TemporaryFile`, but the file has a visible name in the filesystem.
32 |
33 | **Example:**
34 |
35 | .. code-block:: python
36 |
37 | from anyio import NamedTemporaryFile, run
38 |
39 | async def main():
40 | async with NamedTemporaryFile(mode="w+", delete=True) as f:
41 | print(f"Temporary file name: {f.name}")
42 | await f.write("Named temp file content")
43 | await f.seek(0)
44 | print(await f.read())
45 |
46 | run(main)
47 |
48 | Spooled Temporary File
49 | ----------------------
50 |
51 | :class:`SpooledTemporaryFile` is useful when temporary data is small and should be kept in memory rather than written to disk.
52 |
53 | **Example:**
54 |
55 | .. code-block:: python
56 |
57 | from anyio import SpooledTemporaryFile, run
58 |
59 | async def main():
60 | async with SpooledTemporaryFile(max_size=1024, mode="w+") as f:
61 | await f.write("Spooled temp file content")
62 | await f.seek(0)
63 | print(await f.read())
64 |
65 | run(main)
66 |
67 | Temporary Directory
68 | -------------------
69 |
70 | The :class:`TemporaryDirectory` provides an asynchronous way to create temporary directories.
71 |
72 | **Example:**
73 |
74 | .. code-block:: python
75 |
76 | from anyio import TemporaryDirectory, run
77 |
78 | async def main():
79 | async with TemporaryDirectory() as temp_dir:
80 | print(f"Temporary directory path: {temp_dir}")
81 |
82 | run(main)
83 |
84 | Low-Level Temporary File and Directory Creation
85 | -----------------------------------------------
86 |
87 | For more control, the module provides lower-level functions:
88 |
89 | - :func:`mkstemp` - Creates a temporary file and returns a tuple of file descriptor and path.
90 | - :func:`mkdtemp` - Creates a temporary directory and returns the directory path.
91 | - :func:`gettempdir` - Returns the path of the default temporary directory.
92 | - :func:`gettempdirb` - Returns the path of the default temporary directory in bytes.
93 |
94 | **Example:**
95 |
96 | .. code-block:: python
97 |
98 | from anyio import mkstemp, mkdtemp, gettempdir, run
99 | import os
100 |
101 | async def main():
102 | fd, path = await mkstemp(suffix=".txt", prefix="mkstemp_", text=True)
103 | print(f"Created temp file: {path}")
104 |
105 | temp_dir = await mkdtemp(prefix="mkdtemp_")
106 | print(f"Created temp dir: {temp_dir}")
107 |
108 | print(f"Default temp dir: {await gettempdir()}")
109 |
110 | os.remove(path)
111 |
112 | run(main)
113 |
114 | .. note::
115 | Using these functions requires manual cleanup of the created files and directories.
116 |
117 | .. seealso::
118 |
119 | - Python Standard Library: :mod:`tempfile` (`official documentation `_)
120 |
--------------------------------------------------------------------------------
/docs/typedattrs.rst:
--------------------------------------------------------------------------------
1 | Using typed attributes
2 | ======================
3 |
4 | .. py:currentmodule:: anyio
5 |
6 | On AnyIO, streams and listeners can be layered on top of each other to provide extra
7 | functionality. But when you want to look up information from one of the layers down
8 | below, you might have to traverse the entire chain to find what you're looking for,
9 | which is highly inconvenient. To address this, AnyIO has a system of *typed attributes*
10 | where you can look for a specific attribute by its unique key. If a stream or listener
11 | wrapper does not have the attribute you're looking for, it will look it up in the
12 | wrapped instance, and that wrapper can look in its wrapped instance and so on, until the
13 | attribute is either found or the end of the chain is reached. This also lets wrappers
14 | override attributes from the wrapped objects when necessary.
15 |
16 | A common use case is finding the IP address of the remote side of a TCP connection when
17 | the stream may be either :class:`~.abc.SocketStream` or
18 | :class:`~.streams.tls.TLSStream`::
19 |
20 | from anyio import connect_tcp
21 | from anyio.abc import SocketAttribute
22 |
23 |
24 | async def connect(host, port, tls: bool):
25 | stream = await connect_tcp(host, port, tls=tls)
26 | print('Connected to', stream.extra(SocketAttribute.remote_address))
27 |
28 | Each typed attribute provider class should document the set of attributes it provides on
29 | its own.
30 |
31 | Defining your own typed attributes
32 | ----------------------------------
33 |
34 | By convention, typed attributes are stored together in a container class with other
35 | attributes of the same category::
36 |
37 | from anyio import TypedAttributeSet, typed_attribute
38 |
39 |
40 | class MyTypedAttribute(TypedAttributeSet):
41 | string_valued_attribute: str = typed_attribute()
42 | some_float_attribute: float = typed_attribute()
43 |
44 | To provide values for these attributes, implement the
45 | :meth:`~.TypedAttributeProvider.extra_attributes` property in your class::
46 |
47 | from collections.abc import Callable, Mapping
48 |
49 | from anyio import TypedAttributeProvider
50 |
51 |
52 | class MyAttributeProvider(TypedAttributeProvider):
53 | @property
54 | def extra_attributes() -> Mapping[Any, Callable[[], Any]]:
55 | return {
56 | MyTypedAttribute.string_valued_attribute: lambda: 'my attribute value',
57 | MyTypedAttribute.some_float_attribute: lambda: 6.492
58 | }
59 |
60 | If your class inherits from another typed attribute provider, make sure you include its
61 | attributes in the return value::
62 |
63 | class AnotherAttributeProvider(MyAttributeProvider):
64 | @property
65 | def extra_attributes() -> Mapping[Any, Callable[[], Any]]:
66 | return {
67 | **super().extra_attributes,
68 | MyTypedAttribute.string_valued_attribute: lambda: 'overridden attribute value'
69 | }
70 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "setuptools >= 77",
4 | "setuptools_scm >= 6.4"
5 | ]
6 | build-backend = "setuptools.build_meta"
7 |
8 | [project]
9 | name = "anyio"
10 | description = "High level compatibility layer for multiple asynchronous event loop implementations"
11 | readme = "README.rst"
12 | authors = [{name = "Alex Grönholm", email = "alex.gronholm@nextday.fi"}]
13 | license = "MIT"
14 | classifiers = [
15 | "Development Status :: 5 - Production/Stable",
16 | "Intended Audience :: Developers",
17 | "Framework :: AnyIO",
18 | "Typing :: Typed",
19 | "Programming Language :: Python",
20 | "Programming Language :: Python :: 3",
21 | "Programming Language :: Python :: 3.9",
22 | "Programming Language :: Python :: 3.10",
23 | "Programming Language :: Python :: 3.11",
24 | "Programming Language :: Python :: 3.12",
25 | "Programming Language :: Python :: 3.13",
26 | ]
27 | requires-python = ">= 3.9"
28 | dependencies = [
29 | "exceptiongroup >= 1.0.2; python_version < '3.11'",
30 | "idna >= 2.8",
31 | "sniffio >= 1.1",
32 | "typing_extensions >= 4.5; python_version < '3.13'",
33 | ]
34 | dynamic = ["version"]
35 |
36 | [project.urls]
37 | Documentation = "https://anyio.readthedocs.io/en/latest/"
38 | Changelog = "https://anyio.readthedocs.io/en/stable/versionhistory.html"
39 | "Source code" = "https://github.com/agronholm/anyio"
40 | "Issue tracker" = "https://github.com/agronholm/anyio/issues"
41 |
42 | [project.optional-dependencies]
43 | trio = ["trio >= 0.26.1"]
44 |
45 | [project.entry-points]
46 | pytest11 = {anyio = "anyio.pytest_plugin"}
47 |
48 | [dependency-groups]
49 | test = [
50 | "anyio[trio]",
51 | "blockbuster >= 1.5.23",
52 | "coverage[toml] >= 7",
53 | "exceptiongroup >= 1.2.0",
54 | "hypothesis >= 4.0",
55 | "psutil >= 5.9",
56 | "pytest >= 7.0",
57 | "pytest-mock >= 3.14",
58 | "trustme",
59 | "truststore >= 0.9.1; python_version >= '3.10'",
60 | """\
61 | uvloop >= 0.21; platform_python_implementation == 'CPython' \
62 | and platform_system != 'Windows' \
63 | and python_version < '3.14'\
64 | """
65 | ]
66 | doc = [
67 | "packaging",
68 | "Sphinx ~= 8.2",
69 | "sphinx_rtd_theme",
70 | "sphinx-autodoc-typehints >= 1.2.0",
71 | "sphinx-tabs >= 3.3.1",
72 | ]
73 |
74 | [tool.setuptools_scm]
75 | version_scheme = "post-release"
76 | local_scheme = "dirty-tag"
77 |
78 | [tool.ruff]
79 | src = ["src"]
80 |
81 | [tool.ruff.lint]
82 | extend-select = [
83 | "ASYNC", # flake8-async
84 | "B", # flake8-bugbear
85 | "C4", # flake8-comprehensions
86 | "G", # flake8-logging-format
87 | "I", # isort
88 | "ISC", # flake8-implicit-str-concat
89 | "PERF", # flake8-performance
90 | "PGH", # pygrep-hooks
91 | "RUF100", # unused noqa (yesqa)
92 | "T201", # print
93 | "UP", # pyupgrade
94 | "W", # pycodestyle warnings
95 | ]
96 | ignore = ["B009", "PERF203"]
97 |
98 | [tool.ruff.lint.isort]
99 | "required-imports" = ["from __future__ import annotations"]
100 |
101 | [tool.ruff.lint.per-file-ignores]
102 | "tests/test_tempfile.py" = ["ASYNC230"]
103 |
104 | [tool.mypy]
105 | python_version = "3.13"
106 | strict = true
107 | disallow_any_generics = false
108 | warn_return_any = false
109 | disallow_untyped_decorators = false
110 |
111 | [tool.pytest.ini_options]
112 | addopts = "-rsfE --tb=short --strict-config --strict-markers -p anyio -p pytest_mock -p no:asyncio -p no:trio"
113 | testpaths = ["tests"]
114 | xfail_strict = true
115 | filterwarnings = [
116 | "error",
117 | # Ignore resource warnings due to a CPython/Windows bug (https://bugs.python.org/issue44428)
118 | "ignore:unclosed transport <_ProactorSocketTransport.*:ResourceWarning",
119 | # Workaround for Python 3.9.7 (see https://bugs.python.org/issue45097)
120 | "ignore:The loop argument is deprecated since Python 3\\.8, and scheduled for removal in Python 3\\.10\\.:DeprecationWarning:asyncio",
121 | ]
122 | markers = [
123 | "network: marks tests as requiring Internet access",
124 | ]
125 |
126 | [tool.codespell]
127 | ignore-words-list = "asend,daa,hel"
128 |
129 | [tool.coverage.run]
130 | source = ["anyio"]
131 | relative_files = true
132 |
133 | [tool.coverage.report]
134 | show_missing = true
135 | exclude_also = [
136 | "if TYPE_CHECKING:",
137 | "@(abc\\.)?abstractmethod",
138 | ]
139 |
140 | [tool.tox]
141 | env_list = ["pre-commit", "py39", "py310", "py311", "py312", "py313", "py314", "pypy3"]
142 | skip_missing_interpreters = true
143 | requires = ["tox >= 4.22"]
144 |
145 | [tool.tox.env_run_base]
146 | depends = ["pre-commit"]
147 | package = "editable"
148 | commands = [["coverage", "run", "-m", "pytest", { replace = "posargs", extend = true }]]
149 | dependency_groups = ["test"]
150 |
151 | [tool.tox.env.pypy3]
152 | commands = [["pytest", { replace = "posargs", extend = true }]]
153 |
154 | [tool.tox.env.pre-commit]
155 | commands = [["pre-commit", "run", "--all-files"]]
156 | depends = []
157 | allowlist_externals = ["pre-commit"]
158 | package = "skip"
159 |
160 | [tool.tox.env.pyright]
161 | deps = ["pyright"]
162 | commands = [["pyright", "--verifytypes", "anyio"]]
163 |
164 | [tool.tox.env.docs]
165 | depends = []
166 | dependency_groups = ["doc"]
167 | commands = [["sphinx-build", "-W", "docs", "build/sphinx"]]
168 |
--------------------------------------------------------------------------------
/src/anyio/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ._core._contextmanagers import AsyncContextManagerMixin as AsyncContextManagerMixin
4 | from ._core._contextmanagers import ContextManagerMixin as ContextManagerMixin
5 | from ._core._eventloop import current_time as current_time
6 | from ._core._eventloop import get_all_backends as get_all_backends
7 | from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
8 | from ._core._eventloop import run as run
9 | from ._core._eventloop import sleep as sleep
10 | from ._core._eventloop import sleep_forever as sleep_forever
11 | from ._core._eventloop import sleep_until as sleep_until
12 | from ._core._exceptions import BrokenResourceError as BrokenResourceError
13 | from ._core._exceptions import BrokenWorkerIntepreter as BrokenWorkerIntepreter
14 | from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
15 | from ._core._exceptions import BusyResourceError as BusyResourceError
16 | from ._core._exceptions import ClosedResourceError as ClosedResourceError
17 | from ._core._exceptions import ConnectionFailed as ConnectionFailed
18 | from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
19 | from ._core._exceptions import EndOfStream as EndOfStream
20 | from ._core._exceptions import IncompleteRead as IncompleteRead
21 | from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
22 | from ._core._exceptions import WouldBlock as WouldBlock
23 | from ._core._fileio import AsyncFile as AsyncFile
24 | from ._core._fileio import Path as Path
25 | from ._core._fileio import open_file as open_file
26 | from ._core._fileio import wrap_file as wrap_file
27 | from ._core._resources import aclose_forcefully as aclose_forcefully
28 | from ._core._signals import open_signal_receiver as open_signal_receiver
29 | from ._core._sockets import TCPConnectable as TCPConnectable
30 | from ._core._sockets import UNIXConnectable as UNIXConnectable
31 | from ._core._sockets import as_connectable as as_connectable
32 | from ._core._sockets import connect_tcp as connect_tcp
33 | from ._core._sockets import connect_unix as connect_unix
34 | from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
35 | from ._core._sockets import (
36 | create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
37 | )
38 | from ._core._sockets import create_tcp_listener as create_tcp_listener
39 | from ._core._sockets import create_udp_socket as create_udp_socket
40 | from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
41 | from ._core._sockets import create_unix_listener as create_unix_listener
42 | from ._core._sockets import getaddrinfo as getaddrinfo
43 | from ._core._sockets import getnameinfo as getnameinfo
44 | from ._core._sockets import notify_closing as notify_closing
45 | from ._core._sockets import wait_readable as wait_readable
46 | from ._core._sockets import wait_socket_readable as wait_socket_readable
47 | from ._core._sockets import wait_socket_writable as wait_socket_writable
48 | from ._core._sockets import wait_writable as wait_writable
49 | from ._core._streams import create_memory_object_stream as create_memory_object_stream
50 | from ._core._subprocesses import open_process as open_process
51 | from ._core._subprocesses import run_process as run_process
52 | from ._core._synchronization import CapacityLimiter as CapacityLimiter
53 | from ._core._synchronization import (
54 | CapacityLimiterStatistics as CapacityLimiterStatistics,
55 | )
56 | from ._core._synchronization import Condition as Condition
57 | from ._core._synchronization import ConditionStatistics as ConditionStatistics
58 | from ._core._synchronization import Event as Event
59 | from ._core._synchronization import EventStatistics as EventStatistics
60 | from ._core._synchronization import Lock as Lock
61 | from ._core._synchronization import LockStatistics as LockStatistics
62 | from ._core._synchronization import ResourceGuard as ResourceGuard
63 | from ._core._synchronization import Semaphore as Semaphore
64 | from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
65 | from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
66 | from ._core._tasks import CancelScope as CancelScope
67 | from ._core._tasks import create_task_group as create_task_group
68 | from ._core._tasks import current_effective_deadline as current_effective_deadline
69 | from ._core._tasks import fail_after as fail_after
70 | from ._core._tasks import move_on_after as move_on_after
71 | from ._core._tempfile import NamedTemporaryFile as NamedTemporaryFile
72 | from ._core._tempfile import SpooledTemporaryFile as SpooledTemporaryFile
73 | from ._core._tempfile import TemporaryDirectory as TemporaryDirectory
74 | from ._core._tempfile import TemporaryFile as TemporaryFile
75 | from ._core._tempfile import gettempdir as gettempdir
76 | from ._core._tempfile import gettempdirb as gettempdirb
77 | from ._core._tempfile import mkdtemp as mkdtemp
78 | from ._core._tempfile import mkstemp as mkstemp
79 | from ._core._testing import TaskInfo as TaskInfo
80 | from ._core._testing import get_current_task as get_current_task
81 | from ._core._testing import get_running_tasks as get_running_tasks
82 | from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
83 | from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
84 | from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
85 | from ._core._typedattr import typed_attribute as typed_attribute
86 |
87 | # Re-export imports so they look like they live directly in this package
88 | for __value in list(locals().values()):
89 | if getattr(__value, "__module__", "").startswith("anyio."):
90 | __value.__module__ = __name__
91 |
92 | del __value
93 |
--------------------------------------------------------------------------------
/src/anyio/_backends/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agronholm/anyio/561d81270a12f7c6bbafb5bc5fad99a2a13f96be/src/anyio/_backends/__init__.py
--------------------------------------------------------------------------------
/src/anyio/_core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agronholm/anyio/561d81270a12f7c6bbafb5bc5fad99a2a13f96be/src/anyio/_core/__init__.py
--------------------------------------------------------------------------------
/src/anyio/_core/_asyncio_selector_thread.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import socket
5 | import threading
6 | from collections.abc import Callable
7 | from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector
8 | from typing import TYPE_CHECKING, Any
9 |
10 | if TYPE_CHECKING:
11 | from _typeshed import FileDescriptorLike
12 |
13 | _selector_lock = threading.Lock()
14 | _selector: Selector | None = None
15 |
16 |
17 | class Selector:
18 | def __init__(self) -> None:
19 | self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")
20 | self._selector = DefaultSelector()
21 | self._send, self._receive = socket.socketpair()
22 | self._send.setblocking(False)
23 | self._receive.setblocking(False)
24 | # This somewhat reduces the amount of memory wasted queueing up data
25 | # for wakeups. With these settings, maximum number of 1-byte sends
26 | # before getting BlockingIOError:
27 | # Linux 4.8: 6
28 | # macOS (darwin 15.5): 1
29 | # Windows 10: 525347
30 | # Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
31 | # blocking, even on non-blocking sockets, so don't do that.)
32 | self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
33 | self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
34 | # On Windows this is a TCP socket so this might matter. On other
35 | # platforms this fails b/c AF_UNIX sockets aren't actually TCP.
36 | try:
37 | self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
38 | except OSError:
39 | pass
40 |
41 | self._selector.register(self._receive, EVENT_READ)
42 | self._closed = False
43 |
44 | def start(self) -> None:
45 | self._thread.start()
46 | threading._register_atexit(self._stop) # type: ignore[attr-defined]
47 |
48 | def _stop(self) -> None:
49 | global _selector
50 | self._closed = True
51 | self._notify_self()
52 | self._send.close()
53 | self._thread.join()
54 | self._selector.unregister(self._receive)
55 | self._receive.close()
56 | self._selector.close()
57 | _selector = None
58 | assert not self._selector.get_map(), (
59 | "selector still has registered file descriptors after shutdown"
60 | )
61 |
62 | def _notify_self(self) -> None:
63 | try:
64 | self._send.send(b"\x00")
65 | except BlockingIOError:
66 | pass
67 |
68 | def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
69 | loop = asyncio.get_running_loop()
70 | try:
71 | key = self._selector.get_key(fd)
72 | except KeyError:
73 | self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})
74 | else:
75 | if EVENT_READ in key.data:
76 | raise ValueError(
77 | "this file descriptor is already registered for reading"
78 | )
79 |
80 | key.data[EVENT_READ] = loop, callback
81 | self._selector.modify(fd, key.events | EVENT_READ, key.data)
82 |
83 | self._notify_self()
84 |
85 | def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
86 | loop = asyncio.get_running_loop()
87 | try:
88 | key = self._selector.get_key(fd)
89 | except KeyError:
90 | self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})
91 | else:
92 | if EVENT_WRITE in key.data:
93 | raise ValueError(
94 | "this file descriptor is already registered for writing"
95 | )
96 |
97 | key.data[EVENT_WRITE] = loop, callback
98 | self._selector.modify(fd, key.events | EVENT_WRITE, key.data)
99 |
100 | self._notify_self()
101 |
102 | def remove_reader(self, fd: FileDescriptorLike) -> bool:
103 | try:
104 | key = self._selector.get_key(fd)
105 | except KeyError:
106 | return False
107 |
108 | if new_events := key.events ^ EVENT_READ:
109 | del key.data[EVENT_READ]
110 | self._selector.modify(fd, new_events, key.data)
111 | else:
112 | self._selector.unregister(fd)
113 |
114 | return True
115 |
116 | def remove_writer(self, fd: FileDescriptorLike) -> bool:
117 | try:
118 | key = self._selector.get_key(fd)
119 | except KeyError:
120 | return False
121 |
122 | if new_events := key.events ^ EVENT_WRITE:
123 | del key.data[EVENT_WRITE]
124 | self._selector.modify(fd, new_events, key.data)
125 | else:
126 | self._selector.unregister(fd)
127 |
128 | return True
129 |
130 | def run(self) -> None:
131 | while not self._closed:
132 | for key, events in self._selector.select():
133 | if key.fileobj is self._receive:
134 | try:
135 | while self._receive.recv(4096):
136 | pass
137 | except BlockingIOError:
138 | pass
139 |
140 | continue
141 |
142 | if events & EVENT_READ:
143 | loop, callback = key.data[EVENT_READ]
144 | self.remove_reader(key.fd)
145 | try:
146 | loop.call_soon_threadsafe(callback)
147 | except RuntimeError:
148 | pass # the loop was already closed
149 |
150 | if events & EVENT_WRITE:
151 | loop, callback = key.data[EVENT_WRITE]
152 | self.remove_writer(key.fd)
153 | try:
154 | loop.call_soon_threadsafe(callback)
155 | except RuntimeError:
156 | pass # the loop was already closed
157 |
158 |
159 | def get_selector() -> Selector:
160 | global _selector
161 |
162 | with _selector_lock:
163 | if _selector is None:
164 | _selector = Selector()
165 | _selector.start()
166 |
167 | return _selector
168 |
--------------------------------------------------------------------------------
/src/anyio/_core/_contextmanagers.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from abc import abstractmethod
4 | from contextlib import AbstractAsyncContextManager, AbstractContextManager
5 | from inspect import isasyncgen, iscoroutine, isgenerator
6 | from types import TracebackType
7 | from typing import Protocol, TypeVar, cast, final
8 |
9 | _T_co = TypeVar("_T_co", covariant=True)
10 | _ExitT_co = TypeVar("_ExitT_co", covariant=True, bound="bool | None")
11 |
12 |
13 | class _SupportsCtxMgr(Protocol[_T_co, _ExitT_co]):
14 | def __contextmanager__(self) -> AbstractContextManager[_T_co, _ExitT_co]: ...
15 |
16 |
17 | class _SupportsAsyncCtxMgr(Protocol[_T_co, _ExitT_co]):
18 | def __asynccontextmanager__(
19 | self,
20 | ) -> AbstractAsyncContextManager[_T_co, _ExitT_co]: ...
21 |
22 |
23 | class ContextManagerMixin:
24 | """
25 | Mixin class providing context manager functionality via a generator-based
26 | implementation.
27 |
28 | This class allows you to implement a context manager via :meth:`__contextmanager__`
29 | which should return a generator. The mechanics are meant to mirror those of
30 | :func:`@contextmanager `.
31 |
32 | .. note:: Classes using this mix-in are not reentrant as context managers, meaning
33 | that once you enter it, you can't re-enter before first exiting it.
34 |
35 | .. seealso:: :doc:`contextmanagers`
36 | """
37 |
38 | __cm: AbstractContextManager[object, bool | None] | None = None
39 |
40 | @final
41 | def __enter__(self: _SupportsCtxMgr[_T_co, bool | None]) -> _T_co:
42 | # Needed for mypy to assume self still has the __cm member
43 | assert isinstance(self, ContextManagerMixin)
44 | if self.__cm is not None:
45 | raise RuntimeError(
46 | f"this {self.__class__.__qualname__} has already been entered"
47 | )
48 |
49 | cm = self.__contextmanager__()
50 | if not isinstance(cm, AbstractContextManager):
51 | if isgenerator(cm):
52 | raise TypeError(
53 | "__contextmanager__() returned a generator object instead of "
54 | "a context manager. Did you forget to add the @contextmanager "
55 | "decorator?"
56 | )
57 |
58 | raise TypeError(
59 | f"__contextmanager__() did not return a context manager object, "
60 | f"but {cm.__class__!r}"
61 | )
62 |
63 | if cm is self:
64 | raise TypeError(
65 | f"{self.__class__.__qualname__}.__contextmanager__() returned "
66 | f"self. Did you forget to add the @contextmanager decorator and a "
67 | f"'yield' statement?"
68 | )
69 |
70 | value = cm.__enter__()
71 | self.__cm = cm
72 | return value
73 |
74 | @final
75 | def __exit__(
76 | self: _SupportsCtxMgr[object, _ExitT_co],
77 | exc_type: type[BaseException] | None,
78 | exc_val: BaseException | None,
79 | exc_tb: TracebackType | None,
80 | ) -> _ExitT_co:
81 | # Needed for mypy to assume self still has the __cm member
82 | assert isinstance(self, ContextManagerMixin)
83 | if self.__cm is None:
84 | raise RuntimeError(
85 | f"this {self.__class__.__qualname__} has not been entered yet"
86 | )
87 |
88 | # Prevent circular references
89 | cm = self.__cm
90 | del self.__cm
91 |
92 | return cast(_ExitT_co, cm.__exit__(exc_type, exc_val, exc_tb))
93 |
94 | @abstractmethod
95 | def __contextmanager__(self) -> AbstractContextManager[object, bool | None]:
96 | """
97 | Implement your context manager logic here.
98 |
99 | This method **must** be decorated with
100 | :func:`@contextmanager `.
101 |
102 | .. note:: Remember that the ``yield`` will raise any exception raised in the
103 | enclosed context block, so use a ``finally:`` block to clean up resources!
104 |
105 | :return: a context manager object
106 | """
107 |
108 |
109 | class AsyncContextManagerMixin:
110 | """
111 | Mixin class providing async context manager functionality via a generator-based
112 | implementation.
113 |
114 | This class allows you to implement a context manager via
115 | :meth:`__asynccontextmanager__`. The mechanics are meant to mirror those of
116 | :func:`@asynccontextmanager `.
117 |
118 | .. note:: Classes using this mix-in are not reentrant as context managers, meaning
119 | that once you enter it, you can't re-enter before first exiting it.
120 |
121 | .. seealso:: :doc:`contextmanagers`
122 | """
123 |
124 | __cm: AbstractAsyncContextManager[object, bool | None] | None = None
125 |
126 | @final
127 | async def __aenter__(self: _SupportsAsyncCtxMgr[_T_co, bool | None]) -> _T_co:
128 | # Needed for mypy to assume self still has the __cm member
129 | assert isinstance(self, AsyncContextManagerMixin)
130 | if self.__cm is not None:
131 | raise RuntimeError(
132 | f"this {self.__class__.__qualname__} has already been entered"
133 | )
134 |
135 | cm = self.__asynccontextmanager__()
136 | if not isinstance(cm, AbstractAsyncContextManager):
137 | if isasyncgen(cm):
138 | raise TypeError(
139 | "__asynccontextmanager__() returned an async generator instead of "
140 | "an async context manager. Did you forget to add the "
141 | "@asynccontextmanager decorator?"
142 | )
143 | elif iscoroutine(cm):
144 | cm.close()
145 | raise TypeError(
146 | "__asynccontextmanager__() returned a coroutine object instead of "
147 | "an async context manager. Did you forget to add the "
148 | "@asynccontextmanager decorator and a 'yield' statement?"
149 | )
150 |
151 | raise TypeError(
152 | f"__asynccontextmanager__() did not return an async context manager, "
153 | f"but {cm.__class__!r}"
154 | )
155 |
156 | if cm is self:
157 | raise TypeError(
158 | f"{self.__class__.__qualname__}.__asynccontextmanager__() returned "
159 | f"self. Did you forget to add the @asynccontextmanager decorator and a "
160 | f"'yield' statement?"
161 | )
162 |
163 | value = await cm.__aenter__()
164 | self.__cm = cm
165 | return value
166 |
167 | @final
168 | async def __aexit__(
169 | self: _SupportsAsyncCtxMgr[object, _ExitT_co],
170 | exc_type: type[BaseException] | None,
171 | exc_val: BaseException | None,
172 | exc_tb: TracebackType | None,
173 | ) -> _ExitT_co:
174 | assert isinstance(self, AsyncContextManagerMixin)
175 | if self.__cm is None:
176 | raise RuntimeError(
177 | f"this {self.__class__.__qualname__} has not been entered yet"
178 | )
179 |
180 | # Prevent circular references
181 | cm = self.__cm
182 | del self.__cm
183 |
184 | return cast(_ExitT_co, await cm.__aexit__(exc_type, exc_val, exc_tb))
185 |
186 | @abstractmethod
187 | def __asynccontextmanager__(
188 | self,
189 | ) -> AbstractAsyncContextManager[object, bool | None]:
190 | """
191 | Implement your async context manager logic here.
192 |
193 | This method **must** be decorated with
194 | :func:`@asynccontextmanager `.
195 |
196 | .. note:: Remember that the ``yield`` will raise any exception raised in the
197 | enclosed context block, so use a ``finally:`` block to clean up resources!
198 |
199 | :return: an async context manager object
200 | """
201 |
--------------------------------------------------------------------------------
/src/anyio/_core/_eventloop.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import math
4 | import sys
5 | import threading
6 | from collections.abc import Awaitable, Callable, Generator
7 | from contextlib import contextmanager
8 | from importlib import import_module
9 | from typing import TYPE_CHECKING, Any, TypeVar
10 |
11 | import sniffio
12 |
13 | if sys.version_info >= (3, 11):
14 | from typing import TypeVarTuple, Unpack
15 | else:
16 | from typing_extensions import TypeVarTuple, Unpack
17 |
18 | if TYPE_CHECKING:
19 | from ..abc import AsyncBackend
20 |
21 | # This must be updated when new backends are introduced
22 | BACKENDS = "asyncio", "trio"
23 |
24 | T_Retval = TypeVar("T_Retval")
25 | PosArgsT = TypeVarTuple("PosArgsT")
26 |
27 | threadlocals = threading.local()
28 | loaded_backends: dict[str, type[AsyncBackend]] = {}
29 |
30 |
31 | def run(
32 | func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
33 | *args: Unpack[PosArgsT],
34 | backend: str = "asyncio",
35 | backend_options: dict[str, Any] | None = None,
36 | ) -> T_Retval:
37 | """
38 | Run the given coroutine function in an asynchronous event loop.
39 |
40 | The current thread must not be already running an event loop.
41 |
42 | :param func: a coroutine function
43 | :param args: positional arguments to ``func``
44 | :param backend: name of the asynchronous event loop implementation – currently
45 | either ``asyncio`` or ``trio``
46 | :param backend_options: keyword arguments to call the backend ``run()``
47 | implementation with (documented :ref:`here `)
48 | :return: the return value of the coroutine function
49 | :raises RuntimeError: if an asynchronous event loop is already running in this
50 | thread
51 | :raises LookupError: if the named backend is not found
52 |
53 | """
54 | try:
55 | asynclib_name = sniffio.current_async_library()
56 | except sniffio.AsyncLibraryNotFoundError:
57 | pass
58 | else:
59 | raise RuntimeError(f"Already running {asynclib_name} in this thread")
60 |
61 | try:
62 | async_backend = get_async_backend(backend)
63 | except ImportError as exc:
64 | raise LookupError(f"No such backend: {backend}") from exc
65 |
66 | token = None
67 | if sniffio.current_async_library_cvar.get(None) is None:
68 | # Since we're in control of the event loop, we can cache the name of the async
69 | # library
70 | token = sniffio.current_async_library_cvar.set(backend)
71 |
72 | try:
73 | backend_options = backend_options or {}
74 | return async_backend.run(func, args, {}, backend_options)
75 | finally:
76 | if token:
77 | sniffio.current_async_library_cvar.reset(token)
78 |
79 |
80 | async def sleep(delay: float) -> None:
81 | """
82 | Pause the current task for the specified duration.
83 |
84 | :param delay: the duration, in seconds
85 |
86 | """
87 | return await get_async_backend().sleep(delay)
88 |
89 |
90 | async def sleep_forever() -> None:
91 | """
92 | Pause the current task until it's cancelled.
93 |
94 | This is a shortcut for ``sleep(math.inf)``.
95 |
96 | .. versionadded:: 3.1
97 |
98 | """
99 | await sleep(math.inf)
100 |
101 |
102 | async def sleep_until(deadline: float) -> None:
103 | """
104 | Pause the current task until the given time.
105 |
106 | :param deadline: the absolute time to wake up at (according to the internal
107 | monotonic clock of the event loop)
108 |
109 | .. versionadded:: 3.1
110 |
111 | """
112 | now = current_time()
113 | await sleep(max(deadline - now, 0))
114 |
115 |
116 | def current_time() -> float:
117 | """
118 | Return the current value of the event loop's internal clock.
119 |
120 | :return: the clock value (seconds)
121 |
122 | """
123 | return get_async_backend().current_time()
124 |
125 |
126 | def get_all_backends() -> tuple[str, ...]:
127 | """Return a tuple of the names of all built-in backends."""
128 | return BACKENDS
129 |
130 |
131 | def get_cancelled_exc_class() -> type[BaseException]:
132 | """Return the current async library's cancellation exception class."""
133 | return get_async_backend().cancelled_exception_class()
134 |
135 |
136 | #
137 | # Private API
138 | #
139 |
140 |
141 | @contextmanager
142 | def claim_worker_thread(
143 | backend_class: type[AsyncBackend], token: object
144 | ) -> Generator[Any, None, None]:
145 | threadlocals.current_async_backend = backend_class
146 | threadlocals.current_token = token
147 | try:
148 | yield
149 | finally:
150 | del threadlocals.current_async_backend
151 | del threadlocals.current_token
152 |
153 |
154 | def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
155 | if asynclib_name is None:
156 | asynclib_name = sniffio.current_async_library()
157 |
158 | # We use our own dict instead of sys.modules to get the already imported back-end
159 | # class because the appropriate modules in sys.modules could potentially be only
160 | # partially initialized
161 | try:
162 | return loaded_backends[asynclib_name]
163 | except KeyError:
164 | module = import_module(f"anyio._backends._{asynclib_name}")
165 | loaded_backends[asynclib_name] = module.backend_class
166 | return module.backend_class
167 |
--------------------------------------------------------------------------------
/src/anyio/_core/_exceptions.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from collections.abc import Generator
5 | from textwrap import dedent
6 | from typing import Any
7 |
8 | if sys.version_info < (3, 11):
9 | from exceptiongroup import BaseExceptionGroup
10 |
11 |
12 | class BrokenResourceError(Exception):
13 | """
14 | Raised when trying to use a resource that has been rendered unusable due to external
15 | causes (e.g. a send stream whose peer has disconnected).
16 | """
17 |
18 |
19 | class BrokenWorkerProcess(Exception):
20 | """
21 | Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or
22 | otherwise misbehaves.
23 | """
24 |
25 |
26 | class BrokenWorkerIntepreter(Exception):
27 | """
28 | Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is
29 | raised in the subinterpreter.
30 | """
31 |
32 | def __init__(self, excinfo: Any):
33 | # This was adapted from concurrent.futures.interpreter.ExecutionFailed
34 | msg = excinfo.formatted
35 | if not msg:
36 | if excinfo.type and excinfo.msg:
37 | msg = f"{excinfo.type.__name__}: {excinfo.msg}"
38 | else:
39 | msg = excinfo.type.__name__ or excinfo.msg
40 |
41 | super().__init__(msg)
42 | self.excinfo = excinfo
43 |
44 | def __str__(self) -> str:
45 | try:
46 | formatted = self.excinfo.errdisplay
47 | except Exception:
48 | return super().__str__()
49 | else:
50 | return dedent(
51 | f"""
52 | {super().__str__()}
53 |
54 | Uncaught in the interpreter:
55 |
56 | {formatted}
57 | """.strip()
58 | )
59 |
60 |
61 | class BusyResourceError(Exception):
62 | """
63 | Raised when two tasks are trying to read from or write to the same resource
64 | concurrently.
65 | """
66 |
67 | def __init__(self, action: str):
68 | super().__init__(f"Another task is already {action} this resource")
69 |
70 |
71 | class ClosedResourceError(Exception):
72 | """Raised when trying to use a resource that has been closed."""
73 |
74 |
75 | class ConnectionFailed(OSError):
76 | """
77 | Raised when a connection attempt fails.
78 |
79 | .. note:: This class inherits from :exc:`OSError` for backwards compatibility.
80 | """
81 |
82 |
83 | def iterate_exceptions(
84 | exception: BaseException,
85 | ) -> Generator[BaseException, None, None]:
86 | if isinstance(exception, BaseExceptionGroup):
87 | for exc in exception.exceptions:
88 | yield from iterate_exceptions(exc)
89 | else:
90 | yield exception
91 |
92 |
93 | class DelimiterNotFound(Exception):
94 | """
95 | Raised during
96 | :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
97 | maximum number of bytes has been read without the delimiter being found.
98 | """
99 |
100 | def __init__(self, max_bytes: int) -> None:
101 | super().__init__(
102 | f"The delimiter was not found among the first {max_bytes} bytes"
103 | )
104 |
105 |
106 | class EndOfStream(Exception):
107 | """
108 | Raised when trying to read from a stream that has been closed from the other end.
109 | """
110 |
111 |
112 | class IncompleteRead(Exception):
113 | """
114 | Raised during
115 | :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
116 | :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
117 | connection is closed before the requested amount of bytes has been read.
118 | """
119 |
120 | def __init__(self) -> None:
121 | super().__init__(
122 | "The stream was closed before the read operation could be completed"
123 | )
124 |
125 |
126 | class TypedAttributeLookupError(LookupError):
127 | """
128 | Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
129 | is not found and no default value has been given.
130 | """
131 |
132 |
133 | class WouldBlock(Exception):
134 | """Raised by ``X_nowait`` functions if ``X()`` would block."""
135 |
--------------------------------------------------------------------------------
/src/anyio/_core/_resources.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ..abc import AsyncResource
4 | from ._tasks import CancelScope
5 |
6 |
7 | async def aclose_forcefully(resource: AsyncResource) -> None:
8 | """
9 | Close an asynchronous resource in a cancelled scope.
10 |
11 | Doing this closes the resource without waiting on anything.
12 |
13 | :param resource: the resource to close
14 |
15 | """
16 | with CancelScope() as scope:
17 | scope.cancel()
18 | await resource.aclose()
19 |
--------------------------------------------------------------------------------
/src/anyio/_core/_signals.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import AsyncIterator
4 | from contextlib import AbstractContextManager
5 | from signal import Signals
6 |
7 | from ._eventloop import get_async_backend
8 |
9 |
10 | def open_signal_receiver(
11 | *signals: Signals,
12 | ) -> AbstractContextManager[AsyncIterator[Signals]]:
13 | """
14 | Start receiving operating system signals.
15 |
16 | :param signals: signals to receive (e.g. ``signal.SIGINT``)
17 | :return: an asynchronous context manager for an asynchronous iterator which yields
18 | signal numbers
19 |
20 | .. warning:: Windows does not support signals natively so it is best to avoid
21 | relying on this in cross-platform applications.
22 |
23 | .. warning:: On asyncio, this permanently replaces any previous signal handler for
24 | the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
25 |
26 | """
27 | return get_async_backend().open_signal_receiver(*signals)
28 |
--------------------------------------------------------------------------------
/src/anyio/_core/_streams.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import math
4 | from typing import TypeVar
5 | from warnings import warn
6 |
7 | from ..streams.memory import (
8 | MemoryObjectReceiveStream,
9 | MemoryObjectSendStream,
10 | MemoryObjectStreamState,
11 | )
12 |
13 | T_Item = TypeVar("T_Item")
14 |
15 |
16 | class create_memory_object_stream(
17 | tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
18 | ):
19 | """
20 | Create a memory object stream.
21 |
22 | The stream's item type can be annotated like
23 | :func:`create_memory_object_stream[T_Item]`.
24 |
25 | :param max_buffer_size: number of items held in the buffer until ``send()`` starts
26 | blocking
27 | :param item_type: old way of marking the streams with the right generic type for
28 | static typing (does nothing on AnyIO 4)
29 |
30 | .. deprecated:: 4.0
31 | Use ``create_memory_object_stream[YourItemType](...)`` instead.
32 | :return: a tuple of (send stream, receive stream)
33 |
34 | """
35 |
36 | def __new__( # type: ignore[misc]
37 | cls, max_buffer_size: float = 0, item_type: object = None
38 | ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
39 | if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
40 | raise ValueError("max_buffer_size must be either an integer or math.inf")
41 | if max_buffer_size < 0:
42 | raise ValueError("max_buffer_size cannot be negative")
43 | if item_type is not None:
44 | warn(
45 | "The item_type argument has been deprecated in AnyIO 4.0. "
46 | "Use create_memory_object_stream[YourItemType](...) instead.",
47 | DeprecationWarning,
48 | stacklevel=2,
49 | )
50 |
51 | state = MemoryObjectStreamState[T_Item](max_buffer_size)
52 | return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
53 |
--------------------------------------------------------------------------------
/src/anyio/_core/_tasks.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import math
4 | from collections.abc import Generator
5 | from contextlib import contextmanager
6 | from types import TracebackType
7 |
8 | from ..abc._tasks import TaskGroup, TaskStatus
9 | from ._eventloop import get_async_backend
10 |
11 |
12 | class _IgnoredTaskStatus(TaskStatus[object]):
13 | def started(self, value: object = None) -> None:
14 | pass
15 |
16 |
17 | TASK_STATUS_IGNORED = _IgnoredTaskStatus()
18 |
19 |
20 | class CancelScope:
21 | """
22 | Wraps a unit of work that can be made separately cancellable.
23 |
24 | :param deadline: The time (clock value) when this scope is cancelled automatically
25 | :param shield: ``True`` to shield the cancel scope from external cancellation
26 | """
27 |
28 | def __new__(
29 | cls, *, deadline: float = math.inf, shield: bool = False
30 | ) -> CancelScope:
31 | return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
32 |
33 | def cancel(self) -> None:
34 | """Cancel this scope immediately."""
35 | raise NotImplementedError
36 |
37 | @property
38 | def deadline(self) -> float:
39 | """
40 | The time (clock value) when this scope is cancelled automatically.
41 |
42 | Will be ``float('inf')`` if no timeout has been set.
43 |
44 | """
45 | raise NotImplementedError
46 |
47 | @deadline.setter
48 | def deadline(self, value: float) -> None:
49 | raise NotImplementedError
50 |
51 | @property
52 | def cancel_called(self) -> bool:
53 | """``True`` if :meth:`cancel` has been called."""
54 | raise NotImplementedError
55 |
56 | @property
57 | def cancelled_caught(self) -> bool:
58 | """
59 | ``True`` if this scope suppressed a cancellation exception it itself raised.
60 |
61 | This is typically used to check if any work was interrupted, or to see if the
62 | scope was cancelled due to its deadline being reached. The value will, however,
63 | only be ``True`` if the cancellation was triggered by the scope itself (and not
64 | an outer scope).
65 |
66 | """
67 | raise NotImplementedError
68 |
69 | @property
70 | def shield(self) -> bool:
71 | """
72 | ``True`` if this scope is shielded from external cancellation.
73 |
74 | While a scope is shielded, it will not receive cancellations from outside.
75 |
76 | """
77 | raise NotImplementedError
78 |
79 | @shield.setter
80 | def shield(self, value: bool) -> None:
81 | raise NotImplementedError
82 |
83 | def __enter__(self) -> CancelScope:
84 | raise NotImplementedError
85 |
86 | def __exit__(
87 | self,
88 | exc_type: type[BaseException] | None,
89 | exc_val: BaseException | None,
90 | exc_tb: TracebackType | None,
91 | ) -> bool:
92 | raise NotImplementedError
93 |
94 |
95 | @contextmanager
96 | def fail_after(
97 | delay: float | None, shield: bool = False
98 | ) -> Generator[CancelScope, None, None]:
99 | """
100 | Create a context manager which raises a :class:`TimeoutError` if does not finish in
101 | time.
102 |
103 | :param delay: maximum allowed time (in seconds) before raising the exception, or
104 | ``None`` to disable the timeout
105 | :param shield: ``True`` to shield the cancel scope from external cancellation
106 | :return: a context manager that yields a cancel scope
107 | :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
108 |
109 | """
110 | current_time = get_async_backend().current_time
111 | deadline = (current_time() + delay) if delay is not None else math.inf
112 | with get_async_backend().create_cancel_scope(
113 | deadline=deadline, shield=shield
114 | ) as cancel_scope:
115 | yield cancel_scope
116 |
117 | if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
118 | raise TimeoutError
119 |
120 |
121 | def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
122 | """
123 | Create a cancel scope with a deadline that expires after the given delay.
124 |
125 | :param delay: maximum allowed time (in seconds) before exiting the context block, or
126 | ``None`` to disable the timeout
127 | :param shield: ``True`` to shield the cancel scope from external cancellation
128 | :return: a cancel scope
129 |
130 | """
131 | deadline = (
132 | (get_async_backend().current_time() + delay) if delay is not None else math.inf
133 | )
134 | return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
135 |
136 |
137 | def current_effective_deadline() -> float:
138 | """
139 | Return the nearest deadline among all the cancel scopes effective for the current
140 | task.
141 |
142 | :return: a clock value from the event loop's internal clock (or ``float('inf')`` if
143 | there is no deadline in effect, or ``float('-inf')`` if the current scope has
144 | been cancelled)
145 | :rtype: float
146 |
147 | """
148 | return get_async_backend().current_effective_deadline()
149 |
150 |
151 | def create_task_group() -> TaskGroup:
152 | """
153 | Create a task group.
154 |
155 | :return: a task group
156 |
157 | """
158 | return get_async_backend().create_task_group()
159 |
--------------------------------------------------------------------------------
/src/anyio/_core/_testing.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Awaitable, Generator
4 | from typing import Any, cast
5 |
6 | from ._eventloop import get_async_backend
7 |
8 |
9 | class TaskInfo:
10 | """
11 | Represents an asynchronous task.
12 |
13 | :ivar int id: the unique identifier of the task
14 | :ivar parent_id: the identifier of the parent task, if any
15 | :vartype parent_id: Optional[int]
16 | :ivar str name: the description of the task (if any)
17 | :ivar ~collections.abc.Coroutine coro: the coroutine object of the task
18 | """
19 |
20 | __slots__ = "_name", "id", "parent_id", "name", "coro"
21 |
22 | def __init__(
23 | self,
24 | id: int,
25 | parent_id: int | None,
26 | name: str | None,
27 | coro: Generator[Any, Any, Any] | Awaitable[Any],
28 | ):
29 | func = get_current_task
30 | self._name = f"{func.__module__}.{func.__qualname__}"
31 | self.id: int = id
32 | self.parent_id: int | None = parent_id
33 | self.name: str | None = name
34 | self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
35 |
36 | def __eq__(self, other: object) -> bool:
37 | if isinstance(other, TaskInfo):
38 | return self.id == other.id
39 |
40 | return NotImplemented
41 |
42 | def __hash__(self) -> int:
43 | return hash(self.id)
44 |
45 | def __repr__(self) -> str:
46 | return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
47 |
48 | def has_pending_cancellation(self) -> bool:
49 | """
50 | Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
51 |
52 | """
53 | return False
54 |
55 |
56 | def get_current_task() -> TaskInfo:
57 | """
58 | Return the current task.
59 |
60 | :return: a representation of the current task
61 |
62 | """
63 | return get_async_backend().get_current_task()
64 |
65 |
66 | def get_running_tasks() -> list[TaskInfo]:
67 | """
68 | Return a list of running tasks in the current event loop.
69 |
70 | :return: a list of task info objects
71 |
72 | """
73 | return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
74 |
75 |
76 | async def wait_all_tasks_blocked() -> None:
77 | """Wait until all other tasks are waiting for something."""
78 | await get_async_backend().wait_all_tasks_blocked()
79 |
--------------------------------------------------------------------------------
/src/anyio/_core/_typedattr.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Callable, Mapping
4 | from typing import Any, TypeVar, final, overload
5 |
6 | from ._exceptions import TypedAttributeLookupError
7 |
8 | T_Attr = TypeVar("T_Attr")
9 | T_Default = TypeVar("T_Default")
10 | undefined = object()
11 |
12 |
13 | def typed_attribute() -> Any:
14 | """Return a unique object, used to mark typed attributes."""
15 | return object()
16 |
17 |
18 | class TypedAttributeSet:
19 | """
20 | Superclass for typed attribute collections.
21 |
22 | Checks that every public attribute of every subclass has a type annotation.
23 | """
24 |
25 | def __init_subclass__(cls) -> None:
26 | annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
27 | for attrname in dir(cls):
28 | if not attrname.startswith("_") and attrname not in annotations:
29 | raise TypeError(
30 | f"Attribute {attrname!r} is missing its type annotation"
31 | )
32 |
33 | super().__init_subclass__()
34 |
35 |
36 | class TypedAttributeProvider:
37 | """Base class for classes that wish to provide typed extra attributes."""
38 |
39 | @property
40 | def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
41 | """
42 | A mapping of the extra attributes to callables that return the corresponding
43 | values.
44 |
45 | If the provider wraps another provider, the attributes from that wrapper should
46 | also be included in the returned mapping (but the wrapper may override the
47 | callables from the wrapped instance).
48 |
49 | """
50 | return {}
51 |
52 | @overload
53 | def extra(self, attribute: T_Attr) -> T_Attr: ...
54 |
55 | @overload
56 | def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
57 |
58 | @final
59 | def extra(self, attribute: Any, default: object = undefined) -> object:
60 | """
61 | extra(attribute, default=undefined)
62 |
63 | Return the value of the given typed extra attribute.
64 |
65 | :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
66 | look for
67 | :param default: the value that should be returned if no value is found for the
68 | attribute
69 | :raises ~anyio.TypedAttributeLookupError: if the search failed and no default
70 | value was given
71 |
72 | """
73 | try:
74 | getter = self.extra_attributes[attribute]
75 | except KeyError:
76 | if default is undefined:
77 | raise TypedAttributeLookupError("Attribute not found") from None
78 | else:
79 | return default
80 |
81 | return getter()
82 |
--------------------------------------------------------------------------------
/src/anyio/abc/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from ._eventloop import AsyncBackend as AsyncBackend
4 | from ._resources import AsyncResource as AsyncResource
5 | from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
6 | from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
7 | from ._sockets import IPAddressType as IPAddressType
8 | from ._sockets import IPSockAddrType as IPSockAddrType
9 | from ._sockets import SocketAttribute as SocketAttribute
10 | from ._sockets import SocketListener as SocketListener
11 | from ._sockets import SocketStream as SocketStream
12 | from ._sockets import UDPPacketType as UDPPacketType
13 | from ._sockets import UDPSocket as UDPSocket
14 | from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
15 | from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
16 | from ._sockets import UNIXSocketStream as UNIXSocketStream
17 | from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
18 | from ._streams import AnyByteSendStream as AnyByteSendStream
19 | from ._streams import AnyByteStream as AnyByteStream
20 | from ._streams import AnyByteStreamConnectable as AnyByteStreamConnectable
21 | from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
22 | from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
23 | from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
24 | from ._streams import ByteReceiveStream as ByteReceiveStream
25 | from ._streams import ByteSendStream as ByteSendStream
26 | from ._streams import ByteStream as ByteStream
27 | from ._streams import ByteStreamConnectable as ByteStreamConnectable
28 | from ._streams import Listener as Listener
29 | from ._streams import ObjectReceiveStream as ObjectReceiveStream
30 | from ._streams import ObjectSendStream as ObjectSendStream
31 | from ._streams import ObjectStream as ObjectStream
32 | from ._streams import ObjectStreamConnectable as ObjectStreamConnectable
33 | from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
34 | from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
35 | from ._streams import UnreliableObjectStream as UnreliableObjectStream
36 | from ._subprocesses import Process as Process
37 | from ._tasks import TaskGroup as TaskGroup
38 | from ._tasks import TaskStatus as TaskStatus
39 | from ._testing import TestRunner as TestRunner
40 |
41 | # Re-exported here, for backwards compatibility
42 | # isort: off
43 | from .._core._synchronization import (
44 | CapacityLimiter as CapacityLimiter,
45 | Condition as Condition,
46 | Event as Event,
47 | Lock as Lock,
48 | Semaphore as Semaphore,
49 | )
50 | from .._core._tasks import CancelScope as CancelScope
51 | from ..from_thread import BlockingPortal as BlockingPortal
52 |
53 | # Re-export imports so they look like they live directly in this package
54 | for __value in list(locals().values()):
55 | if getattr(__value, "__module__", "").startswith("anyio.abc."):
56 | __value.__module__ = __name__
57 |
58 | del __value
59 |
--------------------------------------------------------------------------------
/src/anyio/abc/_resources.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from abc import ABCMeta, abstractmethod
4 | from types import TracebackType
5 | from typing import TypeVar
6 |
7 | T = TypeVar("T")
8 |
9 |
10 | class AsyncResource(metaclass=ABCMeta):
11 | """
12 | Abstract base class for all closeable asynchronous resources.
13 |
14 | Works as an asynchronous context manager which returns the instance itself on enter,
15 | and calls :meth:`aclose` on exit.
16 | """
17 |
18 | __slots__ = ()
19 |
20 | async def __aenter__(self: T) -> T:
21 | return self
22 |
23 | async def __aexit__(
24 | self,
25 | exc_type: type[BaseException] | None,
26 | exc_val: BaseException | None,
27 | exc_tb: TracebackType | None,
28 | ) -> None:
29 | await self.aclose()
30 |
31 | @abstractmethod
32 | async def aclose(self) -> None:
33 | """Close the resource."""
34 |
--------------------------------------------------------------------------------
/src/anyio/abc/_sockets.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import socket
4 | import sys
5 | from abc import abstractmethod
6 | from collections.abc import Callable, Collection, Mapping
7 | from contextlib import AsyncExitStack
8 | from io import IOBase
9 | from ipaddress import IPv4Address, IPv6Address
10 | from socket import AddressFamily
11 | from typing import Any, TypeVar, Union
12 |
13 | from .._core._typedattr import (
14 | TypedAttributeProvider,
15 | TypedAttributeSet,
16 | typed_attribute,
17 | )
18 | from ._streams import ByteStream, Listener, UnreliableObjectStream
19 | from ._tasks import TaskGroup
20 |
21 | if sys.version_info >= (3, 10):
22 | from typing import TypeAlias
23 | else:
24 | from typing_extensions import TypeAlias
25 |
26 | IPAddressType: TypeAlias = Union[str, IPv4Address, IPv6Address]
27 | IPSockAddrType: TypeAlias = tuple[str, int]
28 | SockAddrType: TypeAlias = Union[IPSockAddrType, str]
29 | UDPPacketType: TypeAlias = tuple[bytes, IPSockAddrType]
30 | UNIXDatagramPacketType: TypeAlias = tuple[bytes, str]
31 | T_Retval = TypeVar("T_Retval")
32 |
33 |
34 | class SocketAttribute(TypedAttributeSet):
35 | #: the address family of the underlying socket
36 | family: AddressFamily = typed_attribute()
37 | #: the local socket address of the underlying socket
38 | local_address: SockAddrType = typed_attribute()
39 | #: for IP addresses, the local port the underlying socket is bound to
40 | local_port: int = typed_attribute()
41 | #: the underlying stdlib socket object
42 | raw_socket: socket.socket = typed_attribute()
43 | #: the remote address the underlying socket is connected to
44 | remote_address: SockAddrType = typed_attribute()
45 | #: for IP addresses, the remote port the underlying socket is connected to
46 | remote_port: int = typed_attribute()
47 |
48 |
49 | class _SocketProvider(TypedAttributeProvider):
50 | @property
51 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
52 | from .._core._sockets import convert_ipv6_sockaddr as convert
53 |
54 | attributes: dict[Any, Callable[[], Any]] = {
55 | SocketAttribute.family: lambda: self._raw_socket.family,
56 | SocketAttribute.local_address: lambda: convert(
57 | self._raw_socket.getsockname()
58 | ),
59 | SocketAttribute.raw_socket: lambda: self._raw_socket,
60 | }
61 | try:
62 | peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
63 | except OSError:
64 | peername = None
65 |
66 | # Provide the remote address for connected sockets
67 | if peername is not None:
68 | attributes[SocketAttribute.remote_address] = lambda: peername
69 |
70 | # Provide local and remote ports for IP based sockets
71 | if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
72 | attributes[SocketAttribute.local_port] = (
73 | lambda: self._raw_socket.getsockname()[1]
74 | )
75 | if peername is not None:
76 | remote_port = peername[1]
77 | attributes[SocketAttribute.remote_port] = lambda: remote_port
78 |
79 | return attributes
80 |
81 | @property
82 | @abstractmethod
83 | def _raw_socket(self) -> socket.socket:
84 | pass
85 |
86 |
87 | class SocketStream(ByteStream, _SocketProvider):
88 | """
89 | Transports bytes over a socket.
90 |
91 | Supports all relevant extra attributes from :class:`~SocketAttribute`.
92 | """
93 |
94 |
95 | class UNIXSocketStream(SocketStream):
96 | @abstractmethod
97 | async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
98 | """
99 | Send file descriptors along with a message to the peer.
100 |
101 | :param message: a non-empty bytestring
102 | :param fds: a collection of files (either numeric file descriptors or open file
103 | or socket objects)
104 | """
105 |
106 | @abstractmethod
107 | async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
108 | """
109 | Receive file descriptors along with a message from the peer.
110 |
111 | :param msglen: length of the message to expect from the peer
112 | :param maxfds: maximum number of file descriptors to expect from the peer
113 | :return: a tuple of (message, file descriptors)
114 | """
115 |
116 |
117 | class SocketListener(Listener[SocketStream], _SocketProvider):
118 | """
119 | Listens to incoming socket connections.
120 |
121 | Supports all relevant extra attributes from :class:`~SocketAttribute`.
122 | """
123 |
124 | @abstractmethod
125 | async def accept(self) -> SocketStream:
126 | """Accept an incoming connection."""
127 |
128 | async def serve(
129 | self,
130 | handler: Callable[[SocketStream], Any],
131 | task_group: TaskGroup | None = None,
132 | ) -> None:
133 | from .. import create_task_group
134 |
135 | async with AsyncExitStack() as stack:
136 | if task_group is None:
137 | task_group = await stack.enter_async_context(create_task_group())
138 |
139 | while True:
140 | stream = await self.accept()
141 | task_group.start_soon(handler, stream)
142 |
143 |
144 | class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
145 | """
146 | Represents an unconnected UDP socket.
147 |
148 | Supports all relevant extra attributes from :class:`~SocketAttribute`.
149 | """
150 |
151 | async def sendto(self, data: bytes, host: str, port: int) -> None:
152 | """
153 | Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
154 |
155 | """
156 | return await self.send((data, (host, port)))
157 |
158 |
159 | class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
160 | """
161 | Represents an connected UDP socket.
162 |
163 | Supports all relevant extra attributes from :class:`~SocketAttribute`.
164 | """
165 |
166 |
167 | class UNIXDatagramSocket(
168 | UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
169 | ):
170 | """
171 | Represents an unconnected Unix datagram socket.
172 |
173 | Supports all relevant extra attributes from :class:`~SocketAttribute`.
174 | """
175 |
176 | async def sendto(self, data: bytes, path: str) -> None:
177 | """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
178 | return await self.send((data, path))
179 |
180 |
181 | class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
182 | """
183 | Represents a connected Unix datagram socket.
184 |
185 | Supports all relevant extra attributes from :class:`~SocketAttribute`.
186 | """
187 |
--------------------------------------------------------------------------------
/src/anyio/abc/_subprocesses.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from abc import abstractmethod
4 | from signal import Signals
5 |
6 | from ._resources import AsyncResource
7 | from ._streams import ByteReceiveStream, ByteSendStream
8 |
9 |
10 | class Process(AsyncResource):
11 | """An asynchronous version of :class:`subprocess.Popen`."""
12 |
13 | @abstractmethod
14 | async def wait(self) -> int:
15 | """
16 | Wait until the process exits.
17 |
18 | :return: the exit code of the process
19 | """
20 |
21 | @abstractmethod
22 | def terminate(self) -> None:
23 | """
24 | Terminates the process, gracefully if possible.
25 |
26 | On Windows, this calls ``TerminateProcess()``.
27 | On POSIX systems, this sends ``SIGTERM`` to the process.
28 |
29 | .. seealso:: :meth:`subprocess.Popen.terminate`
30 | """
31 |
32 | @abstractmethod
33 | def kill(self) -> None:
34 | """
35 | Kills the process.
36 |
37 | On Windows, this calls ``TerminateProcess()``.
38 | On POSIX systems, this sends ``SIGKILL`` to the process.
39 |
40 | .. seealso:: :meth:`subprocess.Popen.kill`
41 | """
42 |
43 | @abstractmethod
44 | def send_signal(self, signal: Signals) -> None:
45 | """
46 | Send a signal to the subprocess.
47 |
48 | .. seealso:: :meth:`subprocess.Popen.send_signal`
49 |
50 | :param signal: the signal number (e.g. :data:`signal.SIGHUP`)
51 | """
52 |
53 | @property
54 | @abstractmethod
55 | def pid(self) -> int:
56 | """The process ID of the process."""
57 |
58 | @property
59 | @abstractmethod
60 | def returncode(self) -> int | None:
61 | """
62 | The return code of the process. If the process has not yet terminated, this will
63 | be ``None``.
64 | """
65 |
66 | @property
67 | @abstractmethod
68 | def stdin(self) -> ByteSendStream | None:
69 | """The stream for the standard input of the process."""
70 |
71 | @property
72 | @abstractmethod
73 | def stdout(self) -> ByteReceiveStream | None:
74 | """The stream for the standard output of the process."""
75 |
76 | @property
77 | @abstractmethod
78 | def stderr(self) -> ByteReceiveStream | None:
79 | """The stream for the standard error output of the process."""
80 |
--------------------------------------------------------------------------------
/src/anyio/abc/_tasks.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from abc import ABCMeta, abstractmethod
5 | from collections.abc import Awaitable, Callable
6 | from types import TracebackType
7 | from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
8 |
9 | if sys.version_info >= (3, 11):
10 | from typing import TypeVarTuple, Unpack
11 | else:
12 | from typing_extensions import TypeVarTuple, Unpack
13 |
14 | if TYPE_CHECKING:
15 | from .._core._tasks import CancelScope
16 |
17 | T_Retval = TypeVar("T_Retval")
18 | T_contra = TypeVar("T_contra", contravariant=True)
19 | PosArgsT = TypeVarTuple("PosArgsT")
20 |
21 |
22 | class TaskStatus(Protocol[T_contra]):
23 | @overload
24 | def started(self: TaskStatus[None]) -> None: ...
25 |
26 | @overload
27 | def started(self, value: T_contra) -> None: ...
28 |
29 | def started(self, value: T_contra | None = None) -> None:
30 | """
31 | Signal that the task has started.
32 |
33 | :param value: object passed back to the starter of the task
34 | """
35 |
36 |
37 | class TaskGroup(metaclass=ABCMeta):
38 | """
39 | Groups several asynchronous tasks together.
40 |
41 | :ivar cancel_scope: the cancel scope inherited by all child tasks
42 | :vartype cancel_scope: CancelScope
43 |
44 | .. note:: On asyncio, support for eager task factories is considered to be
45 | **experimental**. In particular, they don't follow the usual semantics of new
46 | tasks being scheduled on the next iteration of the event loop, and may thus
47 | cause unexpected behavior in code that wasn't written with such semantics in
48 | mind.
49 | """
50 |
51 | cancel_scope: CancelScope
52 |
53 | @abstractmethod
54 | def start_soon(
55 | self,
56 | func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
57 | *args: Unpack[PosArgsT],
58 | name: object = None,
59 | ) -> None:
60 | """
61 | Start a new task in this task group.
62 |
63 | :param func: a coroutine function
64 | :param args: positional arguments to call the function with
65 | :param name: name of the task, for the purposes of introspection and debugging
66 |
67 | .. versionadded:: 3.0
68 | """
69 |
70 | @abstractmethod
71 | async def start(
72 | self,
73 | func: Callable[..., Awaitable[Any]],
74 | *args: object,
75 | name: object = None,
76 | ) -> Any:
77 | """
78 | Start a new task and wait until it signals for readiness.
79 |
80 | :param func: a coroutine function
81 | :param args: positional arguments to call the function with
82 | :param name: name of the task, for the purposes of introspection and debugging
83 | :return: the value passed to ``task_status.started()``
84 | :raises RuntimeError: if the task finishes without calling
85 | ``task_status.started()``
86 |
87 | .. versionadded:: 3.0
88 | """
89 |
90 | @abstractmethod
91 | async def __aenter__(self) -> TaskGroup:
92 | """Enter the task group context and allow starting new tasks."""
93 |
94 | @abstractmethod
95 | async def __aexit__(
96 | self,
97 | exc_type: type[BaseException] | None,
98 | exc_val: BaseException | None,
99 | exc_tb: TracebackType | None,
100 | ) -> bool:
101 | """Exit the task group context waiting for all tasks to finish."""
102 |
--------------------------------------------------------------------------------
/src/anyio/abc/_testing.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import types
4 | from abc import ABCMeta, abstractmethod
5 | from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
6 | from typing import Any, TypeVar
7 |
8 | _T = TypeVar("_T")
9 |
10 |
11 | class TestRunner(metaclass=ABCMeta):
12 | """
13 | Encapsulates a running event loop. Every call made through this object will use the
14 | same event loop.
15 | """
16 |
17 | def __enter__(self) -> TestRunner:
18 | return self
19 |
20 | @abstractmethod
21 | def __exit__(
22 | self,
23 | exc_type: type[BaseException] | None,
24 | exc_val: BaseException | None,
25 | exc_tb: types.TracebackType | None,
26 | ) -> bool | None: ...
27 |
28 | @abstractmethod
29 | def run_asyncgen_fixture(
30 | self,
31 | fixture_func: Callable[..., AsyncGenerator[_T, Any]],
32 | kwargs: dict[str, Any],
33 | ) -> Iterable[_T]:
34 | """
35 | Run an async generator fixture.
36 |
37 | :param fixture_func: the fixture function
38 | :param kwargs: keyword arguments to call the fixture function with
39 | :return: an iterator yielding the value yielded from the async generator
40 | """
41 |
42 | @abstractmethod
43 | def run_fixture(
44 | self,
45 | fixture_func: Callable[..., Coroutine[Any, Any, _T]],
46 | kwargs: dict[str, Any],
47 | ) -> _T:
48 | """
49 | Run an async fixture.
50 |
51 | :param fixture_func: the fixture function
52 | :param kwargs: keyword arguments to call the fixture function with
53 | :return: the return value of the fixture function
54 | """
55 |
56 | @abstractmethod
57 | def run_test(
58 | self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
59 | ) -> None:
60 | """
61 | Run an async test function.
62 |
63 | :param test_func: the test function
64 | :param kwargs: keyword arguments to call the test function with
65 | """
66 |
--------------------------------------------------------------------------------
/src/anyio/lowlevel.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import enum
4 | from dataclasses import dataclass
5 | from typing import Any, Generic, Literal, TypeVar, overload
6 | from weakref import WeakKeyDictionary
7 |
8 | from ._core._eventloop import get_async_backend
9 |
10 | T = TypeVar("T")
11 | D = TypeVar("D")
12 |
13 |
14 | async def checkpoint() -> None:
15 | """
16 | Check for cancellation and allow the scheduler to switch to another task.
17 |
18 | Equivalent to (but more efficient than)::
19 |
20 | await checkpoint_if_cancelled()
21 | await cancel_shielded_checkpoint()
22 |
23 |
24 | .. versionadded:: 3.0
25 |
26 | """
27 | await get_async_backend().checkpoint()
28 |
29 |
30 | async def checkpoint_if_cancelled() -> None:
31 | """
32 | Enter a checkpoint if the enclosing cancel scope has been cancelled.
33 |
34 | This does not allow the scheduler to switch to a different task.
35 |
36 | .. versionadded:: 3.0
37 |
38 | """
39 | await get_async_backend().checkpoint_if_cancelled()
40 |
41 |
42 | async def cancel_shielded_checkpoint() -> None:
43 | """
44 | Allow the scheduler to switch to another task but without checking for cancellation.
45 |
46 | Equivalent to (but potentially more efficient than)::
47 |
48 | with CancelScope(shield=True):
49 | await checkpoint()
50 |
51 |
52 | .. versionadded:: 3.0
53 |
54 | """
55 | await get_async_backend().cancel_shielded_checkpoint()
56 |
57 |
58 | def current_token() -> object:
59 | """
60 | Return a backend specific token object that can be used to get back to the event
61 | loop.
62 |
63 | """
64 | return get_async_backend().current_token()
65 |
66 |
67 | _run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
68 | _token_wrappers: dict[Any, _TokenWrapper] = {}
69 |
70 |
71 | @dataclass(frozen=True)
72 | class _TokenWrapper:
73 | __slots__ = "_token", "__weakref__"
74 | _token: object
75 |
76 |
77 | class _NoValueSet(enum.Enum):
78 | NO_VALUE_SET = enum.auto()
79 |
80 |
81 | class RunvarToken(Generic[T]):
82 | __slots__ = "_var", "_value", "_redeemed"
83 |
84 | def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
85 | self._var = var
86 | self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
87 | self._redeemed = False
88 |
89 |
90 | class RunVar(Generic[T]):
91 | """
92 | Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
93 | """
94 |
95 | __slots__ = "_name", "_default"
96 |
97 | NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
98 |
99 | _token_wrappers: set[_TokenWrapper] = set()
100 |
101 | def __init__(
102 | self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
103 | ):
104 | self._name = name
105 | self._default = default
106 |
107 | @property
108 | def _current_vars(self) -> dict[str, T]:
109 | token = current_token()
110 | try:
111 | return _run_vars[token]
112 | except KeyError:
113 | run_vars = _run_vars[token] = {}
114 | return run_vars
115 |
116 | @overload
117 | def get(self, default: D) -> T | D: ...
118 |
119 | @overload
120 | def get(self) -> T: ...
121 |
122 | def get(
123 | self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
124 | ) -> T | D:
125 | try:
126 | return self._current_vars[self._name]
127 | except KeyError:
128 | if default is not RunVar.NO_VALUE_SET:
129 | return default
130 | elif self._default is not RunVar.NO_VALUE_SET:
131 | return self._default
132 |
133 | raise LookupError(
134 | f'Run variable "{self._name}" has no value and no default set'
135 | )
136 |
137 | def set(self, value: T) -> RunvarToken[T]:
138 | current_vars = self._current_vars
139 | token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
140 | current_vars[self._name] = value
141 | return token
142 |
143 | def reset(self, token: RunvarToken[T]) -> None:
144 | if token._var is not self:
145 | raise ValueError("This token does not belong to this RunVar")
146 |
147 | if token._redeemed:
148 | raise ValueError("This token has already been used")
149 |
150 | if token._value is _NoValueSet.NO_VALUE_SET:
151 | try:
152 | del self._current_vars[self._name]
153 | except KeyError:
154 | pass
155 | else:
156 | self._current_vars[self._name] = token._value
157 |
158 | token._redeemed = True
159 |
160 | def __repr__(self) -> str:
161 | return f""
162 |
--------------------------------------------------------------------------------
/src/anyio/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agronholm/anyio/561d81270a12f7c6bbafb5bc5fad99a2a13f96be/src/anyio/py.typed
--------------------------------------------------------------------------------
/src/anyio/streams/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agronholm/anyio/561d81270a12f7c6bbafb5bc5fad99a2a13f96be/src/anyio/streams/__init__.py
--------------------------------------------------------------------------------
/src/anyio/streams/buffered.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from collections.abc import Callable, Mapping
5 | from dataclasses import dataclass, field
6 | from typing import Any
7 |
8 | from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
9 | from ..abc import (
10 | AnyByteReceiveStream,
11 | AnyByteStream,
12 | AnyByteStreamConnectable,
13 | ByteReceiveStream,
14 | ByteStream,
15 | ByteStreamConnectable,
16 | )
17 |
18 | if sys.version_info >= (3, 12):
19 | from typing import override
20 | else:
21 | from typing_extensions import override
22 |
23 |
24 | @dataclass(eq=False)
25 | class BufferedByteReceiveStream(ByteReceiveStream):
26 | """
27 | Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
28 | receiving capabilities in the form of a byte stream.
29 | """
30 |
31 | receive_stream: AnyByteReceiveStream
32 | _buffer: bytearray = field(init=False, default_factory=bytearray)
33 | _closed: bool = field(init=False, default=False)
34 |
35 | async def aclose(self) -> None:
36 | await self.receive_stream.aclose()
37 | self._closed = True
38 |
39 | @property
40 | def buffer(self) -> bytes:
41 | """The bytes currently in the buffer."""
42 | return bytes(self._buffer)
43 |
44 | @property
45 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
46 | return self.receive_stream.extra_attributes
47 |
48 | async def receive(self, max_bytes: int = 65536) -> bytes:
49 | if self._closed:
50 | raise ClosedResourceError
51 |
52 | if self._buffer:
53 | chunk = bytes(self._buffer[:max_bytes])
54 | del self._buffer[:max_bytes]
55 | return chunk
56 | elif isinstance(self.receive_stream, ByteReceiveStream):
57 | return await self.receive_stream.receive(max_bytes)
58 | else:
59 | # With a bytes-oriented object stream, we need to handle any surplus bytes
60 | # we get from the receive() call
61 | chunk = await self.receive_stream.receive()
62 | if len(chunk) > max_bytes:
63 | # Save the surplus bytes in the buffer
64 | self._buffer.extend(chunk[max_bytes:])
65 | return chunk[:max_bytes]
66 | else:
67 | return chunk
68 |
69 | async def receive_exactly(self, nbytes: int) -> bytes:
70 | """
71 | Read exactly the given amount of bytes from the stream.
72 |
73 | :param nbytes: the number of bytes to read
74 | :return: the bytes read
75 | :raises ~anyio.IncompleteRead: if the stream was closed before the requested
76 | amount of bytes could be read from the stream
77 |
78 | """
79 | while True:
80 | remaining = nbytes - len(self._buffer)
81 | if remaining <= 0:
82 | retval = self._buffer[:nbytes]
83 | del self._buffer[:nbytes]
84 | return bytes(retval)
85 |
86 | try:
87 | if isinstance(self.receive_stream, ByteReceiveStream):
88 | chunk = await self.receive_stream.receive(remaining)
89 | else:
90 | chunk = await self.receive_stream.receive()
91 | except EndOfStream as exc:
92 | raise IncompleteRead from exc
93 |
94 | self._buffer.extend(chunk)
95 |
96 | async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
97 | """
98 | Read from the stream until the delimiter is found or max_bytes have been read.
99 |
100 | :param delimiter: the marker to look for in the stream
101 | :param max_bytes: maximum number of bytes that will be read before raising
102 | :exc:`~anyio.DelimiterNotFound`
103 | :return: the bytes read (not including the delimiter)
104 | :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
105 | was found
106 | :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
107 | bytes read up to the maximum allowed
108 |
109 | """
110 | delimiter_size = len(delimiter)
111 | offset = 0
112 | while True:
113 | # Check if the delimiter can be found in the current buffer
114 | index = self._buffer.find(delimiter, offset)
115 | if index >= 0:
116 | found = self._buffer[:index]
117 | del self._buffer[: index + len(delimiter) :]
118 | return bytes(found)
119 |
120 | # Check if the buffer is already at or over the limit
121 | if len(self._buffer) >= max_bytes:
122 | raise DelimiterNotFound(max_bytes)
123 |
124 | # Read more data into the buffer from the socket
125 | try:
126 | data = await self.receive_stream.receive()
127 | except EndOfStream as exc:
128 | raise IncompleteRead from exc
129 |
130 | # Move the offset forward and add the new data to the buffer
131 | offset = max(len(self._buffer) - delimiter_size + 1, 0)
132 | self._buffer.extend(data)
133 |
134 |
135 | class BufferedByteStream(BufferedByteReceiveStream, ByteStream):
136 | """
137 | A full-duplex variant of :class:`BufferedByteReceiveStream`. All writes are passed
138 | through to the wrapped stream as-is.
139 | """
140 |
141 | def __init__(self, stream: AnyByteStream):
142 | """
143 | :param stream: the stream to be wrapped
144 |
145 | """
146 | super().__init__(stream)
147 | self._stream = stream
148 |
149 | @override
150 | async def send_eof(self) -> None:
151 | await self._stream.send_eof()
152 |
153 | @override
154 | async def send(self, item: bytes) -> None:
155 | await self._stream.send(item)
156 |
157 |
158 | class BufferedConnectable(ByteStreamConnectable):
159 | def __init__(self, connectable: AnyByteStreamConnectable):
160 | """
161 | :param connectable: the connectable to wrap
162 |
163 | """
164 | self.connectable = connectable
165 |
166 | @override
167 | async def connect(self) -> BufferedByteStream:
168 | stream = await self.connectable.connect()
169 | return BufferedByteStream(stream)
170 |
--------------------------------------------------------------------------------
/src/anyio/streams/file.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Callable, Mapping
4 | from io import SEEK_SET, UnsupportedOperation
5 | from os import PathLike
6 | from pathlib import Path
7 | from typing import Any, BinaryIO, cast
8 |
9 | from .. import (
10 | BrokenResourceError,
11 | ClosedResourceError,
12 | EndOfStream,
13 | TypedAttributeSet,
14 | to_thread,
15 | typed_attribute,
16 | )
17 | from ..abc import ByteReceiveStream, ByteSendStream
18 |
19 |
20 | class FileStreamAttribute(TypedAttributeSet):
21 | #: the open file descriptor
22 | file: BinaryIO = typed_attribute()
23 | #: the path of the file on the file system, if available (file must be a real file)
24 | path: Path = typed_attribute()
25 | #: the file number, if available (file must be a real file or a TTY)
26 | fileno: int = typed_attribute()
27 |
28 |
29 | class _BaseFileStream:
30 | def __init__(self, file: BinaryIO):
31 | self._file = file
32 |
33 | async def aclose(self) -> None:
34 | await to_thread.run_sync(self._file.close)
35 |
36 | @property
37 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
38 | attributes: dict[Any, Callable[[], Any]] = {
39 | FileStreamAttribute.file: lambda: self._file,
40 | }
41 |
42 | if hasattr(self._file, "name"):
43 | attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
44 |
45 | try:
46 | self._file.fileno()
47 | except UnsupportedOperation:
48 | pass
49 | else:
50 | attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
51 |
52 | return attributes
53 |
54 |
55 | class FileReadStream(_BaseFileStream, ByteReceiveStream):
56 | """
57 | A byte stream that reads from a file in the file system.
58 |
59 | :param file: a file that has been opened for reading in binary mode
60 |
61 | .. versionadded:: 3.0
62 | """
63 |
64 | @classmethod
65 | async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
66 | """
67 | Create a file read stream by opening the given file.
68 |
69 | :param path: path of the file to read from
70 |
71 | """
72 | file = await to_thread.run_sync(Path(path).open, "rb")
73 | return cls(cast(BinaryIO, file))
74 |
75 | async def receive(self, max_bytes: int = 65536) -> bytes:
76 | try:
77 | data = await to_thread.run_sync(self._file.read, max_bytes)
78 | except ValueError:
79 | raise ClosedResourceError from None
80 | except OSError as exc:
81 | raise BrokenResourceError from exc
82 |
83 | if data:
84 | return data
85 | else:
86 | raise EndOfStream
87 |
88 | async def seek(self, position: int, whence: int = SEEK_SET) -> int:
89 | """
90 | Seek the file to the given position.
91 |
92 | .. seealso:: :meth:`io.IOBase.seek`
93 |
94 | .. note:: Not all file descriptors are seekable.
95 |
96 | :param position: position to seek the file to
97 | :param whence: controls how ``position`` is interpreted
98 | :return: the new absolute position
99 | :raises OSError: if the file is not seekable
100 |
101 | """
102 | return await to_thread.run_sync(self._file.seek, position, whence)
103 |
104 | async def tell(self) -> int:
105 | """
106 | Return the current stream position.
107 |
108 | .. note:: Not all file descriptors are seekable.
109 |
110 | :return: the current absolute position
111 | :raises OSError: if the file is not seekable
112 |
113 | """
114 | return await to_thread.run_sync(self._file.tell)
115 |
116 |
117 | class FileWriteStream(_BaseFileStream, ByteSendStream):
118 | """
119 | A byte stream that writes to a file in the file system.
120 |
121 | :param file: a file that has been opened for writing in binary mode
122 |
123 | .. versionadded:: 3.0
124 | """
125 |
126 | @classmethod
127 | async def from_path(
128 | cls, path: str | PathLike[str], append: bool = False
129 | ) -> FileWriteStream:
130 | """
131 | Create a file write stream by opening the given file for writing.
132 |
133 | :param path: path of the file to write to
134 | :param append: if ``True``, open the file for appending; if ``False``, any
135 | existing file at the given path will be truncated
136 |
137 | """
138 | mode = "ab" if append else "wb"
139 | file = await to_thread.run_sync(Path(path).open, mode)
140 | return cls(cast(BinaryIO, file))
141 |
142 | async def send(self, item: bytes) -> None:
143 | try:
144 | await to_thread.run_sync(self._file.write, item)
145 | except ValueError:
146 | raise ClosedResourceError from None
147 | except OSError as exc:
148 | raise BrokenResourceError from exc
149 |
--------------------------------------------------------------------------------
/src/anyio/streams/stapled.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Callable, Mapping, Sequence
4 | from dataclasses import dataclass
5 | from typing import Any, Generic, TypeVar
6 |
7 | from ..abc import (
8 | ByteReceiveStream,
9 | ByteSendStream,
10 | ByteStream,
11 | Listener,
12 | ObjectReceiveStream,
13 | ObjectSendStream,
14 | ObjectStream,
15 | TaskGroup,
16 | )
17 |
18 | T_Item = TypeVar("T_Item")
19 | T_Stream = TypeVar("T_Stream")
20 |
21 |
22 | @dataclass(eq=False)
23 | class StapledByteStream(ByteStream):
24 | """
25 | Combines two byte streams into a single, bidirectional byte stream.
26 |
27 | Extra attributes will be provided from both streams, with the receive stream
28 | providing the values in case of a conflict.
29 |
30 | :param ByteSendStream send_stream: the sending byte stream
31 | :param ByteReceiveStream receive_stream: the receiving byte stream
32 | """
33 |
34 | send_stream: ByteSendStream
35 | receive_stream: ByteReceiveStream
36 |
37 | async def receive(self, max_bytes: int = 65536) -> bytes:
38 | return await self.receive_stream.receive(max_bytes)
39 |
40 | async def send(self, item: bytes) -> None:
41 | await self.send_stream.send(item)
42 |
43 | async def send_eof(self) -> None:
44 | await self.send_stream.aclose()
45 |
46 | async def aclose(self) -> None:
47 | await self.send_stream.aclose()
48 | await self.receive_stream.aclose()
49 |
50 | @property
51 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
52 | return {
53 | **self.send_stream.extra_attributes,
54 | **self.receive_stream.extra_attributes,
55 | }
56 |
57 |
58 | @dataclass(eq=False)
59 | class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
60 | """
61 | Combines two object streams into a single, bidirectional object stream.
62 |
63 | Extra attributes will be provided from both streams, with the receive stream
64 | providing the values in case of a conflict.
65 |
66 | :param ObjectSendStream send_stream: the sending object stream
67 | :param ObjectReceiveStream receive_stream: the receiving object stream
68 | """
69 |
70 | send_stream: ObjectSendStream[T_Item]
71 | receive_stream: ObjectReceiveStream[T_Item]
72 |
73 | async def receive(self) -> T_Item:
74 | return await self.receive_stream.receive()
75 |
76 | async def send(self, item: T_Item) -> None:
77 | await self.send_stream.send(item)
78 |
79 | async def send_eof(self) -> None:
80 | await self.send_stream.aclose()
81 |
82 | async def aclose(self) -> None:
83 | await self.send_stream.aclose()
84 | await self.receive_stream.aclose()
85 |
86 | @property
87 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
88 | return {
89 | **self.send_stream.extra_attributes,
90 | **self.receive_stream.extra_attributes,
91 | }
92 |
93 |
94 | @dataclass(eq=False)
95 | class MultiListener(Generic[T_Stream], Listener[T_Stream]):
96 | """
97 | Combines multiple listeners into one, serving connections from all of them at once.
98 |
99 | Any MultiListeners in the given collection of listeners will have their listeners
100 | moved into this one.
101 |
102 | Extra attributes are provided from each listener, with each successive listener
103 | overriding any conflicting attributes from the previous one.
104 |
105 | :param listeners: listeners to serve
106 | :type listeners: Sequence[Listener[T_Stream]]
107 | """
108 |
109 | listeners: Sequence[Listener[T_Stream]]
110 |
111 | def __post_init__(self) -> None:
112 | listeners: list[Listener[T_Stream]] = []
113 | for listener in self.listeners:
114 | if isinstance(listener, MultiListener):
115 | listeners.extend(listener.listeners)
116 | del listener.listeners[:] # type: ignore[attr-defined]
117 | else:
118 | listeners.append(listener)
119 |
120 | self.listeners = listeners
121 |
122 | async def serve(
123 | self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
124 | ) -> None:
125 | from .. import create_task_group
126 |
127 | async with create_task_group() as tg:
128 | for listener in self.listeners:
129 | tg.start_soon(listener.serve, handler, task_group)
130 |
131 | async def aclose(self) -> None:
132 | for listener in self.listeners:
133 | await listener.aclose()
134 |
135 | @property
136 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
137 | attributes: dict = {}
138 | for listener in self.listeners:
139 | attributes.update(listener.extra_attributes)
140 |
141 | return attributes
142 |
--------------------------------------------------------------------------------
/src/anyio/streams/text.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import codecs
4 | import sys
5 | from collections.abc import Callable, Mapping
6 | from dataclasses import InitVar, dataclass, field
7 | from typing import Any
8 |
9 | from ..abc import (
10 | AnyByteReceiveStream,
11 | AnyByteSendStream,
12 | AnyByteStream,
13 | AnyByteStreamConnectable,
14 | ObjectReceiveStream,
15 | ObjectSendStream,
16 | ObjectStream,
17 | ObjectStreamConnectable,
18 | )
19 |
20 | if sys.version_info >= (3, 12):
21 | from typing import override
22 | else:
23 | from typing_extensions import override
24 |
25 |
26 | @dataclass(eq=False)
27 | class TextReceiveStream(ObjectReceiveStream[str]):
28 | """
29 | Stream wrapper that decodes bytes to strings using the given encoding.
30 |
31 | Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
32 | completely received unicode characters as soon as they come in.
33 |
34 | :param transport_stream: any bytes-based receive stream
35 | :param encoding: character encoding to use for decoding bytes to strings (defaults
36 | to ``utf-8``)
37 | :param errors: handling scheme for decoding errors (defaults to ``strict``; see the
38 | `codecs module documentation`_ for a comprehensive list of options)
39 |
40 | .. _codecs module documentation:
41 | https://docs.python.org/3/library/codecs.html#codec-objects
42 | """
43 |
44 | transport_stream: AnyByteReceiveStream
45 | encoding: InitVar[str] = "utf-8"
46 | errors: InitVar[str] = "strict"
47 | _decoder: codecs.IncrementalDecoder = field(init=False)
48 |
49 | def __post_init__(self, encoding: str, errors: str) -> None:
50 | decoder_class = codecs.getincrementaldecoder(encoding)
51 | self._decoder = decoder_class(errors=errors)
52 |
53 | async def receive(self) -> str:
54 | while True:
55 | chunk = await self.transport_stream.receive()
56 | decoded = self._decoder.decode(chunk)
57 | if decoded:
58 | return decoded
59 |
60 | async def aclose(self) -> None:
61 | await self.transport_stream.aclose()
62 | self._decoder.reset()
63 |
64 | @property
65 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
66 | return self.transport_stream.extra_attributes
67 |
68 |
69 | @dataclass(eq=False)
70 | class TextSendStream(ObjectSendStream[str]):
71 | """
72 | Sends strings to the wrapped stream as bytes using the given encoding.
73 |
74 | :param AnyByteSendStream transport_stream: any bytes-based send stream
75 | :param str encoding: character encoding to use for encoding strings to bytes
76 | (defaults to ``utf-8``)
77 | :param str errors: handling scheme for encoding errors (defaults to ``strict``; see
78 | the `codecs module documentation`_ for a comprehensive list of options)
79 |
80 | .. _codecs module documentation:
81 | https://docs.python.org/3/library/codecs.html#codec-objects
82 | """
83 |
84 | transport_stream: AnyByteSendStream
85 | encoding: InitVar[str] = "utf-8"
86 | errors: str = "strict"
87 | _encoder: Callable[..., tuple[bytes, int]] = field(init=False)
88 |
89 | def __post_init__(self, encoding: str) -> None:
90 | self._encoder = codecs.getencoder(encoding)
91 |
92 | async def send(self, item: str) -> None:
93 | encoded = self._encoder(item, self.errors)[0]
94 | await self.transport_stream.send(encoded)
95 |
96 | async def aclose(self) -> None:
97 | await self.transport_stream.aclose()
98 |
99 | @property
100 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
101 | return self.transport_stream.extra_attributes
102 |
103 |
104 | @dataclass(eq=False)
105 | class TextStream(ObjectStream[str]):
106 | """
107 | A bidirectional stream that decodes bytes to strings on receive and encodes strings
108 | to bytes on send.
109 |
110 | Extra attributes will be provided from both streams, with the receive stream
111 | providing the values in case of a conflict.
112 |
113 | :param AnyByteStream transport_stream: any bytes-based stream
114 | :param str encoding: character encoding to use for encoding/decoding strings to/from
115 | bytes (defaults to ``utf-8``)
116 | :param str errors: handling scheme for encoding errors (defaults to ``strict``; see
117 | the `codecs module documentation`_ for a comprehensive list of options)
118 |
119 | .. _codecs module documentation:
120 | https://docs.python.org/3/library/codecs.html#codec-objects
121 | """
122 |
123 | transport_stream: AnyByteStream
124 | encoding: InitVar[str] = "utf-8"
125 | errors: InitVar[str] = "strict"
126 | _receive_stream: TextReceiveStream = field(init=False)
127 | _send_stream: TextSendStream = field(init=False)
128 |
129 | def __post_init__(self, encoding: str, errors: str) -> None:
130 | self._receive_stream = TextReceiveStream(
131 | self.transport_stream, encoding=encoding, errors=errors
132 | )
133 | self._send_stream = TextSendStream(
134 | self.transport_stream, encoding=encoding, errors=errors
135 | )
136 |
137 | async def receive(self) -> str:
138 | return await self._receive_stream.receive()
139 |
140 | async def send(self, item: str) -> None:
141 | await self._send_stream.send(item)
142 |
143 | async def send_eof(self) -> None:
144 | await self.transport_stream.send_eof()
145 |
146 | async def aclose(self) -> None:
147 | await self._send_stream.aclose()
148 | await self._receive_stream.aclose()
149 |
150 | @property
151 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
152 | return {
153 | **self._send_stream.extra_attributes,
154 | **self._receive_stream.extra_attributes,
155 | }
156 |
157 |
158 | class TextConnectable(ObjectStreamConnectable[str]):
159 | def __init__(self, connectable: AnyByteStreamConnectable):
160 | """
161 | :param connectable: the bytestream endpoint to wrap
162 |
163 | """
164 | self.connectable = connectable
165 |
166 | @override
167 | async def connect(self) -> TextStream:
168 | stream = await self.connectable.connect()
169 | return TextStream(stream)
170 |
--------------------------------------------------------------------------------
/src/anyio/to_interpreter.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import atexit
4 | import os
5 | import pickle
6 | import sys
7 | from collections import deque
8 | from collections.abc import Callable
9 | from textwrap import dedent
10 | from typing import Any, Final, TypeVar
11 |
12 | from . import current_time, to_thread
13 | from ._core._exceptions import BrokenWorkerIntepreter
14 | from ._core._synchronization import CapacityLimiter
15 | from .lowlevel import RunVar
16 |
17 | if sys.version_info >= (3, 11):
18 | from typing import TypeVarTuple, Unpack
19 | else:
20 | from typing_extensions import TypeVarTuple, Unpack
21 |
22 | UNBOUND: Final = 2 # I have no clue how this works, but it was used in the stdlib
23 | FMT_UNPICKLED: Final = 0
24 | FMT_PICKLED: Final = 1
25 | DEFAULT_CPU_COUNT: Final = 8 # this is just an arbitrarily selected value
26 | MAX_WORKER_IDLE_TIME = (
27 | 30 # seconds a subinterpreter can be idle before becoming eligible for pruning
28 | )
29 | QUEUE_PICKLE_ARGS: Final = (
30 | (UNBOUND,) if sys.version_info >= (3, 14, 0, "beta", 2) else (FMT_PICKLED, UNBOUND)
31 | )
32 | QUEUE_UNPICKLE_ARGS: Final = (
33 | (UNBOUND,)
34 | if sys.version_info >= (3, 14, 0, "beta", 2)
35 | else (FMT_UNPICKLED, UNBOUND)
36 | )
37 |
38 | T_Retval = TypeVar("T_Retval")
39 | PosArgsT = TypeVarTuple("PosArgsT")
40 |
41 | _idle_workers = RunVar[deque["Worker"]]("_available_workers")
42 | _default_interpreter_limiter = RunVar[CapacityLimiter]("_default_interpreter_limiter")
43 |
44 |
45 | class Worker:
46 | _run_func = compile(
47 | dedent("""
48 | import _interpqueues as queues
49 | import _interpreters as interpreters
50 | from pickle import loads, dumps, HIGHEST_PROTOCOL
51 |
52 | item = queues.get(queue_id)[0]
53 | try:
54 | func, args = loads(item)
55 | retval = func(*args)
56 | except BaseException as exc:
57 | is_exception = True
58 | retval = exc
59 | else:
60 | is_exception = False
61 |
62 | try:
63 | queues.put(queue_id, (retval, is_exception), *QUEUE_UNPICKLE_ARGS)
64 | except interpreters.NotShareableError:
65 | retval = dumps(retval, HIGHEST_PROTOCOL)
66 | queues.put(queue_id, (retval, is_exception), *QUEUE_PICKLE_ARGS)
67 | """),
68 | "",
69 | "exec",
70 | )
71 |
72 | last_used: float = 0
73 |
74 | _initialized: bool = False
75 | _interpreter_id: int
76 | _queue_id: int
77 |
78 | def initialize(self) -> None:
79 | import _interpqueues as queues
80 | import _interpreters as interpreters
81 |
82 | self._interpreter_id = interpreters.create()
83 | self._queue_id = queues.create(2, *QUEUE_UNPICKLE_ARGS)
84 | self._initialized = True
85 | interpreters.set___main___attrs(
86 | self._interpreter_id,
87 | {
88 | "queue_id": self._queue_id,
89 | "QUEUE_PICKLE_ARGS": QUEUE_PICKLE_ARGS,
90 | "QUEUE_UNPICKLE_ARGS": QUEUE_UNPICKLE_ARGS,
91 | },
92 | )
93 |
94 | def destroy(self) -> None:
95 | import _interpqueues as queues
96 | import _interpreters as interpreters
97 |
98 | if self._initialized:
99 | interpreters.destroy(self._interpreter_id)
100 | queues.destroy(self._queue_id)
101 |
102 | def _call(
103 | self,
104 | func: Callable[..., T_Retval],
105 | args: tuple[Any],
106 | ) -> tuple[Any, bool]:
107 | import _interpqueues as queues
108 | import _interpreters as interpreters
109 |
110 | if not self._initialized:
111 | self.initialize()
112 |
113 | payload = pickle.dumps((func, args), pickle.HIGHEST_PROTOCOL)
114 | queues.put(self._queue_id, payload, *QUEUE_PICKLE_ARGS)
115 |
116 | res: Any
117 | is_exception: bool
118 | if exc_info := interpreters.exec(self._interpreter_id, self._run_func):
119 | raise BrokenWorkerIntepreter(exc_info)
120 |
121 | (res, is_exception), fmt = queues.get(self._queue_id)[:2]
122 | if fmt == FMT_PICKLED:
123 | res = pickle.loads(res)
124 |
125 | return res, is_exception
126 |
127 | async def call(
128 | self,
129 | func: Callable[..., T_Retval],
130 | args: tuple[Any],
131 | limiter: CapacityLimiter,
132 | ) -> T_Retval:
133 | result, is_exception = await to_thread.run_sync(
134 | self._call,
135 | func,
136 | args,
137 | limiter=limiter,
138 | )
139 | if is_exception:
140 | raise result
141 |
142 | return result
143 |
144 |
145 | def _stop_workers(workers: deque[Worker]) -> None:
146 | for worker in workers:
147 | worker.destroy()
148 |
149 | workers.clear()
150 |
151 |
152 | async def run_sync(
153 | func: Callable[[Unpack[PosArgsT]], T_Retval],
154 | *args: Unpack[PosArgsT],
155 | limiter: CapacityLimiter | None = None,
156 | ) -> T_Retval:
157 | """
158 | Call the given function with the given arguments in a subinterpreter.
159 |
160 | If the ``cancellable`` option is enabled and the task waiting for its completion is
161 | cancelled, the call will still run its course but its return value (or any raised
162 | exception) will be ignored.
163 |
164 | .. warning:: This feature is **experimental**. The upstream interpreter API has not
165 | yet been finalized or thoroughly tested, so don't rely on this for anything
166 | mission critical.
167 |
168 | :param func: a callable
169 | :param args: positional arguments for the callable
170 | :param limiter: capacity limiter to use to limit the total amount of subinterpreters
171 | running (if omitted, the default limiter is used)
172 | :return: the result of the call
173 | :raises BrokenWorkerIntepreter: if there's an internal error in a subinterpreter
174 |
175 | """
176 | if sys.version_info <= (3, 13):
177 | raise RuntimeError("subinterpreters require at least Python 3.13")
178 |
179 | if limiter is None:
180 | limiter = current_default_interpreter_limiter()
181 |
182 | try:
183 | idle_workers = _idle_workers.get()
184 | except LookupError:
185 | idle_workers = deque()
186 | _idle_workers.set(idle_workers)
187 | atexit.register(_stop_workers, idle_workers)
188 |
189 | async with limiter:
190 | try:
191 | worker = idle_workers.pop()
192 | except IndexError:
193 | worker = Worker()
194 |
195 | try:
196 | return await worker.call(func, args, limiter)
197 | finally:
198 | # Prune workers that have been idle for too long
199 | now = current_time()
200 | while idle_workers:
201 | if now - idle_workers[0].last_used <= MAX_WORKER_IDLE_TIME:
202 | break
203 |
204 | await to_thread.run_sync(idle_workers.popleft().destroy, limiter=limiter)
205 |
206 | worker.last_used = current_time()
207 | idle_workers.append(worker)
208 |
209 |
210 | def current_default_interpreter_limiter() -> CapacityLimiter:
211 | """
212 | Return the capacity limiter that is used by default to limit the number of
213 | concurrently running subinterpreters.
214 |
215 | Defaults to the number of CPU cores.
216 |
217 | :return: a capacity limiter object
218 |
219 | """
220 | try:
221 | return _default_interpreter_limiter.get()
222 | except LookupError:
223 | limiter = CapacityLimiter(os.cpu_count() or DEFAULT_CPU_COUNT)
224 | _default_interpreter_limiter.set(limiter)
225 | return limiter
226 |
--------------------------------------------------------------------------------
/src/anyio/to_thread.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from collections.abc import Callable
5 | from typing import TypeVar
6 | from warnings import warn
7 |
8 | from ._core._eventloop import get_async_backend
9 | from .abc import CapacityLimiter
10 |
11 | if sys.version_info >= (3, 11):
12 | from typing import TypeVarTuple, Unpack
13 | else:
14 | from typing_extensions import TypeVarTuple, Unpack
15 |
16 | T_Retval = TypeVar("T_Retval")
17 | PosArgsT = TypeVarTuple("PosArgsT")
18 |
19 |
20 | async def run_sync(
21 | func: Callable[[Unpack[PosArgsT]], T_Retval],
22 | *args: Unpack[PosArgsT],
23 | abandon_on_cancel: bool = False,
24 | cancellable: bool | None = None,
25 | limiter: CapacityLimiter | None = None,
26 | ) -> T_Retval:
27 | """
28 | Call the given function with the given arguments in a worker thread.
29 |
30 | If the ``cancellable`` option is enabled and the task waiting for its completion is
31 | cancelled, the thread will still run its course but its return value (or any raised
32 | exception) will be ignored.
33 |
34 | :param func: a callable
35 | :param args: positional arguments for the callable
36 | :param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
37 | unchecked on own) if the host task is cancelled, ``False`` to ignore
38 | cancellations in the host task until the operation has completed in the worker
39 | thread
40 | :param cancellable: deprecated alias of ``abandon_on_cancel``; will override
41 | ``abandon_on_cancel`` if both parameters are passed
42 | :param limiter: capacity limiter to use to limit the total amount of threads running
43 | (if omitted, the default limiter is used)
44 | :return: an awaitable that yields the return value of the function.
45 |
46 | """
47 | if cancellable is not None:
48 | abandon_on_cancel = cancellable
49 | warn(
50 | "The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
51 | "deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
52 | DeprecationWarning,
53 | stacklevel=2,
54 | )
55 |
56 | return await get_async_backend().run_sync_in_worker_thread(
57 | func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
58 | )
59 |
60 |
61 | def current_default_thread_limiter() -> CapacityLimiter:
62 | """
63 | Return the capacity limiter that is used by default to limit the number of
64 | concurrent threads.
65 |
66 | :return: a capacity limiter object
67 |
68 | """
69 | return get_async_backend().current_default_thread_limiter()
70 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agronholm/anyio/561d81270a12f7c6bbafb5bc5fad99a2a13f96be/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import ssl
5 | import sys
6 | from collections.abc import Generator, Iterator
7 | from ssl import SSLContext
8 | from typing import TYPE_CHECKING, Any
9 | from unittest.mock import Mock
10 |
11 | import pytest
12 | import trustme
13 | from _pytest.fixtures import SubRequest
14 | from trustme import CA
15 |
16 | if TYPE_CHECKING:
17 | from blockbuster import BlockBuster
18 |
19 | uvloop_marks = []
20 | try:
21 | import uvloop
22 | except ImportError:
23 | uvloop_marks.append(pytest.mark.skip(reason="uvloop not available"))
24 | uvloop = Mock()
25 | else:
26 | if hasattr(asyncio.AbstractEventLoop, "shutdown_default_executor") and not hasattr(
27 | uvloop.loop.Loop, "shutdown_default_executor"
28 | ):
29 | uvloop_marks.append(
30 | pytest.mark.skip(reason="uvloop is missing shutdown_default_executor()")
31 | )
32 |
33 | pytest_plugins = ["pytester"]
34 |
35 | asyncio_params = [
36 | pytest.param(("asyncio", {"debug": True}), id="asyncio"),
37 | pytest.param(
38 | ("asyncio", {"debug": True, "loop_factory": uvloop.new_event_loop}),
39 | marks=uvloop_marks,
40 | id="asyncio+uvloop",
41 | ),
42 | ]
43 | if sys.version_info >= (3, 12):
44 |
45 | def eager_task_loop_factory() -> asyncio.AbstractEventLoop:
46 | loop = asyncio.new_event_loop()
47 | loop.set_task_factory(asyncio.eager_task_factory)
48 | return loop
49 |
50 | asyncio_params.append(
51 | pytest.param(
52 | ("asyncio", {"debug": True, "loop_factory": eager_task_loop_factory}),
53 | id="asyncio+eager",
54 | ),
55 | )
56 |
57 |
58 | @pytest.fixture(autouse=True)
59 | def blockbuster() -> Iterator[BlockBuster | None]:
60 | try:
61 | from blockbuster import blockbuster_ctx
62 | except ImportError:
63 | yield None
64 | return
65 |
66 | with blockbuster_ctx(
67 | "anyio", excluded_modules=["anyio.pytest_plugin", "anyio._backends._asyncio"]
68 | ) as bb:
69 | bb.functions["socket.socket.accept"].can_block_in(
70 | "anyio/_core/_asyncio_selector_thread.py", {"get_selector"}
71 | )
72 | for func in ["os.stat", "os.unlink"]:
73 | bb.functions[func].can_block_in(
74 | "anyio/_core/_sockets.py", "setup_unix_local_socket"
75 | )
76 |
77 | yield bb
78 |
79 |
80 | @pytest.fixture
81 | def deactivate_blockbuster(blockbuster: BlockBuster | None) -> None:
82 | if blockbuster is not None:
83 | blockbuster.deactivate()
84 |
85 |
86 | @pytest.fixture(params=[*asyncio_params, pytest.param("trio")])
87 | def anyio_backend(request: SubRequest) -> tuple[str, dict[str, Any]]:
88 | return request.param
89 |
90 |
91 | @pytest.fixture(scope="session")
92 | def ca() -> CA:
93 | return trustme.CA()
94 |
95 |
96 | @pytest.fixture(scope="session")
97 | def server_context(ca: CA) -> SSLContext:
98 | server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
99 | if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
100 | server_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
101 |
102 | ca.issue_cert("localhost").configure_cert(server_context)
103 | return server_context
104 |
105 |
106 | @pytest.fixture(scope="session")
107 | def client_context(ca: CA) -> SSLContext:
108 | client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
109 | if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
110 | client_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
111 |
112 | ca.configure_trust(client_context)
113 | return client_context
114 |
115 |
116 | @pytest.fixture
117 | def asyncio_event_loop() -> Generator[asyncio.AbstractEventLoop, None, None]:
118 | if sys.version_info >= (3, 13):
119 | loop = asyncio.EventLoop()
120 | else:
121 | loop = asyncio.new_event_loop()
122 |
123 | if sys.version_info < (3, 10):
124 | asyncio.set_event_loop(loop)
125 |
126 | yield loop
127 |
128 | if sys.version_info < (3, 10):
129 | asyncio.set_event_loop(None)
130 |
131 | loop.close()
132 |
133 |
134 | if sys.version_info >= (3, 14):
135 |
136 | def no_other_refs() -> list[object]:
137 | return [sys._getframe(1).f_generator]
138 |
139 | elif sys.version_info >= (3, 11):
140 |
141 | def no_other_refs() -> list[object]:
142 | return []
143 | else:
144 |
145 | def no_other_refs() -> list[object]:
146 | return [sys._getframe(1)]
147 |
--------------------------------------------------------------------------------
/tests/streams/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/agronholm/anyio/561d81270a12f7c6bbafb5bc5fad99a2a13f96be/tests/streams/__init__.py
--------------------------------------------------------------------------------
/tests/streams/test_buffered.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import pytest
4 |
5 | from anyio import (
6 | ClosedResourceError,
7 | EndOfStream,
8 | IncompleteRead,
9 | create_memory_object_stream,
10 | )
11 | from anyio.abc import ObjectStream, ObjectStreamConnectable
12 | from anyio.streams.buffered import (
13 | BufferedByteReceiveStream,
14 | BufferedByteStream,
15 | BufferedConnectable,
16 | )
17 | from anyio.streams.stapled import StapledObjectStream
18 |
19 | pytestmark = pytest.mark.anyio
20 |
21 |
22 | async def test_receive_exactly() -> None:
23 | send_stream, receive_stream = create_memory_object_stream[bytes](2)
24 | buffered_stream = BufferedByteReceiveStream(receive_stream)
25 | await send_stream.send(b"abcd")
26 | await send_stream.send(b"efgh")
27 | result = await buffered_stream.receive_exactly(8)
28 | assert result == b"abcdefgh"
29 | assert isinstance(result, bytes)
30 |
31 | send_stream.close()
32 | receive_stream.close()
33 |
34 |
35 | async def test_receive_exactly_incomplete() -> None:
36 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
37 | buffered_stream = BufferedByteReceiveStream(receive_stream)
38 | await send_stream.send(b"abcd")
39 | await send_stream.aclose()
40 | with pytest.raises(IncompleteRead):
41 | await buffered_stream.receive_exactly(8)
42 |
43 | send_stream.close()
44 | receive_stream.close()
45 |
46 |
47 | async def test_receive_until() -> None:
48 | send_stream, receive_stream = create_memory_object_stream[bytes](2)
49 | buffered_stream = BufferedByteReceiveStream(receive_stream)
50 | await send_stream.send(b"abcd")
51 | await send_stream.send(b"efgh")
52 |
53 | result = await buffered_stream.receive_until(b"de", 10)
54 | assert result == b"abc"
55 | assert isinstance(result, bytes)
56 |
57 | result = await buffered_stream.receive_until(b"h", 10)
58 | assert result == b"fg"
59 | assert isinstance(result, bytes)
60 |
61 | send_stream.close()
62 | receive_stream.close()
63 |
64 |
65 | async def test_receive_until_incomplete() -> None:
66 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
67 | buffered_stream = BufferedByteReceiveStream(receive_stream)
68 | await send_stream.send(b"abcd")
69 | await send_stream.aclose()
70 | with pytest.raises(IncompleteRead):
71 | assert await buffered_stream.receive_until(b"de", 10)
72 |
73 | assert buffered_stream.buffer == b"abcd"
74 |
75 | send_stream.close()
76 | receive_stream.close()
77 |
78 |
79 | async def test_buffered_stream() -> None:
80 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
81 | buffered_stream = BufferedByteStream(
82 | StapledObjectStream(send_stream, receive_stream)
83 | )
84 | await send_stream.send(b"abcd")
85 | assert await buffered_stream.receive_exactly(2) == b"ab"
86 | assert await buffered_stream.receive_exactly(2) == b"cd"
87 |
88 | # send_eof() should close only the sending end
89 | await buffered_stream.send_eof()
90 | pytest.raises(ClosedResourceError, send_stream.send_nowait, b"abc")
91 | pytest.raises(EndOfStream, receive_stream.receive_nowait)
92 |
93 | # aclose() closes the receive stream too
94 | await buffered_stream.aclose()
95 | pytest.raises(ClosedResourceError, receive_stream.receive_nowait)
96 |
97 |
98 | async def test_buffered_connectable() -> None:
99 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
100 | memory_stream = StapledObjectStream(send_stream, receive_stream)
101 |
102 | class MemoryObjectConnectable(ObjectStreamConnectable[bytes]):
103 | async def connect(self) -> ObjectStream[bytes]:
104 | return memory_stream
105 |
106 | connectable = BufferedConnectable(MemoryObjectConnectable())
107 | async with await connectable.connect() as stream:
108 | assert isinstance(stream, BufferedByteStream)
109 | await stream.send(b"abcd")
110 | assert await stream.receive_exactly(2) == b"ab"
111 | assert await stream.receive_exactly(2) == b"cd"
112 |
--------------------------------------------------------------------------------
/tests/streams/test_file.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from pathlib import Path
4 |
5 | import pytest
6 | from _pytest.fixtures import SubRequest
7 | from _pytest.tmpdir import TempPathFactory
8 |
9 | from anyio import ClosedResourceError, EndOfStream
10 | from anyio.abc import ByteReceiveStream
11 | from anyio.streams.file import FileReadStream, FileStreamAttribute, FileWriteStream
12 |
13 | pytestmark = pytest.mark.anyio
14 |
15 |
16 | class TestFileReadStream:
17 | @pytest.fixture(scope="class")
18 | def file_path(self, tmp_path_factory: TempPathFactory) -> Path:
19 | path = tmp_path_factory.mktemp("filestream") / "data.txt"
20 | path.write_text("Hello")
21 | return path
22 |
23 | @pytest.fixture(params=[False, True], ids=["str", "path"])
24 | def file_path_or_str(self, request: SubRequest, file_path: Path) -> Path | str:
25 | return file_path if request.param else str(file_path)
26 |
27 | async def _run_filestream_test(self, stream: ByteReceiveStream) -> None:
28 | assert await stream.receive(3) == b"Hel"
29 | assert await stream.receive(3) == b"lo"
30 | with pytest.raises(EndOfStream):
31 | await stream.receive(1)
32 |
33 | async def test_read_file_as_path(self, file_path_or_str: Path | str) -> None:
34 | async with await FileReadStream.from_path(file_path_or_str) as stream:
35 | await self._run_filestream_test(stream)
36 |
37 | async def test_read_file(self, file_path: Path) -> None:
38 | with file_path.open("rb") as file:
39 | async with FileReadStream(file) as stream:
40 | await self._run_filestream_test(stream)
41 |
42 | async def test_read_after_close(self, file_path: Path) -> None:
43 | async with await FileReadStream.from_path(file_path) as stream:
44 | pass
45 |
46 | with pytest.raises(ClosedResourceError):
47 | await stream.receive()
48 |
49 | async def test_seek(self, file_path: Path) -> None:
50 | with file_path.open("rb") as file:
51 | async with FileReadStream(file) as stream:
52 | await stream.seek(2)
53 | assert await stream.tell() == 2
54 | data = await stream.receive()
55 | assert data == b"llo"
56 | assert await stream.tell() == 5
57 |
58 | async def test_extra_attributes(self, file_path: Path) -> None:
59 | async with await FileReadStream.from_path(file_path) as stream:
60 | path = stream.extra(FileStreamAttribute.path)
61 | assert path == file_path
62 |
63 | fileno = stream.extra(FileStreamAttribute.fileno)
64 | assert fileno > 2
65 |
66 | file = stream.extra(FileStreamAttribute.file)
67 | assert file.fileno() == fileno
68 |
69 |
70 | class TestFileWriteStream:
71 | @pytest.fixture
72 | def file_path(self, tmp_path: Path) -> Path:
73 | return tmp_path / "written_data.txt"
74 |
75 | async def test_write_file(self, file_path: Path) -> None:
76 | async with await FileWriteStream.from_path(file_path) as stream:
77 | await stream.send(b"Hel")
78 | await stream.send(b"lo")
79 |
80 | assert file_path.read_text() == "Hello"
81 |
82 | async def test_append_file(self, file_path: Path) -> None:
83 | file_path.write_text("Hello")
84 | async with await FileWriteStream.from_path(file_path, True) as stream:
85 | await stream.send(b", World!")
86 |
87 | assert file_path.read_text() == "Hello, World!"
88 |
89 | async def test_write_after_close(self, file_path: Path) -> None:
90 | async with await FileWriteStream.from_path(file_path, True) as stream:
91 | pass
92 |
93 | with pytest.raises(ClosedResourceError):
94 | await stream.send(b"foo")
95 |
96 | async def test_extra_attributes(self, file_path: Path) -> None:
97 | async with await FileWriteStream.from_path(file_path) as stream:
98 | path = stream.extra(FileStreamAttribute.path)
99 | assert path == file_path
100 |
101 | fileno = stream.extra(FileStreamAttribute.fileno)
102 | assert fileno > 2
103 |
104 | file = stream.extra(FileStreamAttribute.file)
105 | assert file.fileno() == fileno
106 |
--------------------------------------------------------------------------------
/tests/streams/test_stapled.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections import deque
4 | from collections.abc import Iterable
5 | from dataclasses import InitVar, dataclass, field
6 | from typing import TypeVar
7 |
8 | import pytest
9 |
10 | from anyio import ClosedResourceError, EndOfStream
11 | from anyio.abc import (
12 | ByteReceiveStream,
13 | ByteSendStream,
14 | ObjectReceiveStream,
15 | ObjectSendStream,
16 | )
17 | from anyio.streams.stapled import StapledByteStream, StapledObjectStream
18 |
19 | pytestmark = pytest.mark.anyio
20 |
21 |
22 | @dataclass
23 | class DummyByteReceiveStream(ByteReceiveStream):
24 | data: InitVar[bytes]
25 | buffer: bytearray = field(init=False)
26 | _closed: bool = field(init=False, default=False)
27 |
28 | def __post_init__(self, data: bytes) -> None:
29 | self.buffer = bytearray(data)
30 |
31 | async def receive(self, max_bytes: int = 65536) -> bytes:
32 | if self._closed:
33 | raise ClosedResourceError
34 |
35 | data = bytes(self.buffer[:max_bytes])
36 | del self.buffer[:max_bytes]
37 | return data
38 |
39 | async def aclose(self) -> None:
40 | self._closed = True
41 |
42 |
43 | @dataclass
44 | class DummyByteSendStream(ByteSendStream):
45 | buffer: bytearray = field(init=False, default_factory=bytearray)
46 | _closed: bool = field(init=False, default=False)
47 |
48 | async def send(self, item: bytes) -> None:
49 | if self._closed:
50 | raise ClosedResourceError
51 |
52 | self.buffer.extend(item)
53 |
54 | async def aclose(self) -> None:
55 | self._closed = True
56 |
57 |
58 | class TestStapledByteStream:
59 | @pytest.fixture
60 | def send_stream(self) -> DummyByteSendStream:
61 | return DummyByteSendStream()
62 |
63 | @pytest.fixture
64 | def receive_stream(self) -> DummyByteReceiveStream:
65 | return DummyByteReceiveStream(b"hello, world")
66 |
67 | @pytest.fixture
68 | def stapled(
69 | self, send_stream: DummyByteSendStream, receive_stream: DummyByteReceiveStream
70 | ) -> StapledByteStream:
71 | return StapledByteStream(send_stream, receive_stream)
72 |
73 | async def test_receive_send(
74 | self, stapled: StapledByteStream, send_stream: DummyByteSendStream
75 | ) -> None:
76 | assert await stapled.receive(3) == b"hel"
77 | assert await stapled.receive() == b"lo, world"
78 | assert await stapled.receive() == b""
79 |
80 | await stapled.send(b"how are you ")
81 | await stapled.send(b"today?")
82 | assert stapled.send_stream is send_stream
83 | assert bytes(send_stream.buffer) == b"how are you today?"
84 |
85 | async def test_send_eof(self, stapled: StapledByteStream) -> None:
86 | await stapled.send_eof()
87 | await stapled.send_eof()
88 | with pytest.raises(ClosedResourceError):
89 | await stapled.send(b"world")
90 |
91 | assert await stapled.receive() == b"hello, world"
92 |
93 | async def test_aclose(self, stapled: StapledByteStream) -> None:
94 | await stapled.aclose()
95 | with pytest.raises(ClosedResourceError):
96 | await stapled.receive()
97 | with pytest.raises(ClosedResourceError):
98 | await stapled.send(b"")
99 |
100 |
101 | T_Item = TypeVar("T_Item")
102 |
103 |
104 | @dataclass
105 | class DummyObjectReceiveStream(ObjectReceiveStream[T_Item]):
106 | data: InitVar[Iterable[T_Item]]
107 | buffer: deque[T_Item] = field(init=False)
108 | _closed: bool = field(init=False, default=False)
109 |
110 | def __post_init__(self, data: Iterable[T_Item]) -> None:
111 | self.buffer = deque(data)
112 |
113 | async def receive(self) -> T_Item:
114 | if self._closed:
115 | raise ClosedResourceError
116 | if not self.buffer:
117 | raise EndOfStream
118 |
119 | return self.buffer.popleft()
120 |
121 | async def aclose(self) -> None:
122 | self._closed = True
123 |
124 |
125 | @dataclass
126 | class DummyObjectSendStream(ObjectSendStream[T_Item]):
127 | buffer: list[T_Item] = field(init=False, default_factory=list)
128 | _closed: bool = field(init=False, default=False)
129 |
130 | async def send(self, item: T_Item) -> None:
131 | if self._closed:
132 | raise ClosedResourceError
133 |
134 | self.buffer.append(item)
135 |
136 | async def aclose(self) -> None:
137 | self._closed = True
138 |
139 |
140 | class TestStapledObjectStream:
141 | @pytest.fixture
142 | def receive_stream(self) -> DummyObjectReceiveStream[str]:
143 | return DummyObjectReceiveStream(["hello", "world"])
144 |
145 | @pytest.fixture
146 | def send_stream(self) -> DummyObjectSendStream[str]:
147 | return DummyObjectSendStream[str]()
148 |
149 | @pytest.fixture
150 | def stapled(
151 | self,
152 | receive_stream: DummyObjectReceiveStream[str],
153 | send_stream: DummyObjectSendStream[str],
154 | ) -> StapledObjectStream[str]:
155 | return StapledObjectStream(send_stream, receive_stream)
156 |
157 | async def test_receive_send(
158 | self, stapled: StapledObjectStream[str], send_stream: DummyObjectSendStream[str]
159 | ) -> None:
160 | assert await stapled.receive() == "hello"
161 | assert await stapled.receive() == "world"
162 | with pytest.raises(EndOfStream):
163 | await stapled.receive()
164 |
165 | await stapled.send("how are you ")
166 | await stapled.send("today?")
167 | assert stapled.send_stream is send_stream
168 | assert send_stream.buffer == ["how are you ", "today?"]
169 |
170 | async def test_send_eof(self, stapled: StapledObjectStream[str]) -> None:
171 | await stapled.send_eof()
172 | await stapled.send_eof()
173 | with pytest.raises(ClosedResourceError):
174 | await stapled.send("world")
175 |
176 | assert await stapled.receive() == "hello"
177 | assert await stapled.receive() == "world"
178 |
179 | async def test_aclose(self, stapled: StapledObjectStream[str]) -> None:
180 | await stapled.aclose()
181 | with pytest.raises(ClosedResourceError):
182 | await stapled.receive()
183 | with pytest.raises(ClosedResourceError):
184 | await stapled.send(b"") # type: ignore[arg-type]
185 |
--------------------------------------------------------------------------------
/tests/streams/test_text.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import platform
4 | import sys
5 |
6 | import pytest
7 |
8 | from anyio import create_memory_object_stream
9 | from anyio.abc import ObjectStream, ObjectStreamConnectable
10 | from anyio.streams.stapled import StapledObjectStream
11 | from anyio.streams.text import (
12 | TextConnectable,
13 | TextReceiveStream,
14 | TextSendStream,
15 | TextStream,
16 | )
17 |
18 | pytestmark = pytest.mark.anyio
19 |
20 |
21 | async def test_receive() -> None:
22 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
23 | text_stream = TextReceiveStream(receive_stream)
24 | await send_stream.send(b"\xc3\xa5\xc3\xa4\xc3") # ends with half of the "ö" letter
25 | assert await text_stream.receive() == "åä"
26 |
27 | # Send the missing byte for "ö"
28 | await send_stream.send(b"\xb6")
29 | assert await text_stream.receive() == "ö"
30 |
31 | send_stream.close()
32 | receive_stream.close()
33 |
34 |
35 | async def test_send() -> None:
36 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
37 | text_stream = TextSendStream(send_stream)
38 | await text_stream.send("åäö")
39 | assert await receive_stream.receive() == b"\xc3\xa5\xc3\xa4\xc3\xb6"
40 |
41 | send_stream.close()
42 | receive_stream.close()
43 |
44 |
45 | @pytest.mark.xfail(
46 | platform.python_implementation() == "PyPy" and sys.pypy_version_info < (7, 3, 2), # type: ignore[attr-defined]
47 | reason="PyPy has a bug in its incremental UTF-8 decoder (#3274)",
48 | )
49 | async def test_receive_encoding_error() -> None:
50 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
51 | text_stream = TextReceiveStream(receive_stream, errors="replace")
52 | await send_stream.send(b"\xe5\xe4\xf6") # "åäö" in latin-1
53 | assert await text_stream.receive() == "���"
54 |
55 | send_stream.close()
56 | receive_stream.close()
57 |
58 |
59 | async def test_send_encoding_error() -> None:
60 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
61 | text_stream = TextSendStream(send_stream, encoding="iso-8859-1", errors="replace")
62 | await text_stream.send("€")
63 | assert await receive_stream.receive() == b"?"
64 |
65 | send_stream.close()
66 | receive_stream.close()
67 |
68 |
69 | async def test_bidirectional_stream() -> None:
70 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
71 | stapled_stream = StapledObjectStream(send_stream, receive_stream)
72 | text_stream = TextStream(stapled_stream)
73 |
74 | await text_stream.send("åäö")
75 | assert await receive_stream.receive() == b"\xc3\xa5\xc3\xa4\xc3\xb6"
76 |
77 | await send_stream.send(b"\xc3\xa6\xc3\xb8")
78 | assert await text_stream.receive() == "æø"
79 | assert text_stream.extra_attributes == {}
80 |
81 | send_stream.close()
82 | receive_stream.close()
83 |
84 |
85 | async def test_text_connectable() -> None:
86 | send_stream, receive_stream = create_memory_object_stream[bytes](1)
87 | memory_stream = StapledObjectStream(send_stream, receive_stream)
88 |
89 | class MemoryConnectable(ObjectStreamConnectable[bytes]):
90 | async def connect(self) -> ObjectStream[bytes]:
91 | return memory_stream
92 |
93 | connectable = TextConnectable(MemoryConnectable())
94 | async with await connectable.connect() as stream:
95 | assert isinstance(stream, TextStream)
96 | await stream.send("hello")
97 | assert await stream.receive() == "hello"
98 |
--------------------------------------------------------------------------------
/tests/test_debugging.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import sys
5 | from collections.abc import AsyncGenerator, Generator
6 | from types import CoroutineType, GeneratorType
7 | from typing import Any, cast
8 |
9 | import pytest
10 |
11 | import anyio
12 | from anyio import (
13 | Event,
14 | TaskInfo,
15 | create_task_group,
16 | get_current_task,
17 | get_running_tasks,
18 | move_on_after,
19 | wait_all_tasks_blocked,
20 | )
21 | from anyio.abc import TaskStatus
22 |
23 | from .conftest import asyncio_params
24 |
25 | pytestmark = pytest.mark.anyio
26 |
27 |
28 | get_coro = asyncio.Task.get_coro
29 |
30 |
31 | def test_main_task_name(
32 | anyio_backend_name: str, anyio_backend_options: dict[str, Any]
33 | ) -> None:
34 | task_name = None
35 |
36 | async def main() -> None:
37 | nonlocal task_name
38 | task_name = get_current_task().name
39 |
40 | anyio.run(main, backend=anyio_backend_name, backend_options=anyio_backend_options)
41 | assert task_name == "tests.test_debugging.test_main_task_name..main"
42 |
43 | # Work around sniffio/asyncio bug that leaves behind an unclosed event loop
44 | if anyio_backend_name == "asyncio":
45 | import asyncio
46 | import gc
47 |
48 | for loop in [
49 | obj
50 | for obj in gc.get_objects()
51 | if isinstance(obj, asyncio.AbstractEventLoop)
52 | ]:
53 | loop.close()
54 |
55 |
56 | @pytest.mark.parametrize(
57 | "name_input,expected",
58 | [
59 | (None, "tests.test_debugging.test_non_main_task_name..non_main"),
60 | (b"name", "b'name'"),
61 | ("name", "name"),
62 | ("", ""),
63 | ],
64 | )
65 | async def test_non_main_task_name(
66 | name_input: bytes | str | None, expected: str
67 | ) -> None:
68 | async def non_main(*, task_status: TaskStatus) -> None:
69 | task_status.started(anyio.get_current_task().name)
70 |
71 | async with anyio.create_task_group() as tg:
72 | name = await tg.start(non_main, name=name_input)
73 |
74 | assert name == expected
75 |
76 |
77 | async def test_get_running_tasks() -> None:
78 | async def inspect() -> None:
79 | await wait_all_tasks_blocked()
80 | new_tasks = set(get_running_tasks()) - existing_tasks
81 | task_infos[:] = sorted(new_tasks, key=lambda info: info.name or "")
82 | event.set()
83 |
84 | event = Event()
85 | task_infos: list[TaskInfo] = []
86 | host_task = get_current_task()
87 | async with create_task_group() as tg:
88 | existing_tasks = set(get_running_tasks())
89 | tg.start_soon(event.wait, name="task1")
90 | tg.start_soon(event.wait, name="task2")
91 | tg.start_soon(inspect)
92 |
93 | assert len(task_infos) == 3
94 | expected_names = [
95 | "task1",
96 | "task2",
97 | "tests.test_debugging.test_get_running_tasks..inspect",
98 | ]
99 | for task, expected_name in zip(task_infos, expected_names):
100 | assert task.parent_id == host_task.id
101 | assert task.name == expected_name
102 | assert repr(task).endswith(f"TaskInfo(id={task.id}, name={expected_name!r})")
103 |
104 |
105 | @pytest.mark.skipif(
106 | sys.version_info >= (3, 11),
107 | reason="Generator based coroutines have been removed in Python 3.11",
108 | )
109 | @pytest.mark.filterwarnings(
110 | 'ignore:"@coroutine" decorator is deprecated:DeprecationWarning'
111 | )
112 | def test_wait_generator_based_task_blocked(
113 | asyncio_event_loop: asyncio.AbstractEventLoop,
114 | ) -> None:
115 | async def native_coro_part() -> None:
116 | await wait_all_tasks_blocked()
117 | gen = cast(GeneratorType, get_coro(gen_task))
118 | assert not gen.gi_running
119 | coro = cast(CoroutineType, gen.gi_yieldfrom)
120 | assert coro.cr_code.co_name == "wait"
121 |
122 | event.set()
123 |
124 | @asyncio.coroutine # type: ignore[attr-defined]
125 | def generator_part() -> Generator[object, BaseException, None]:
126 | yield from event.wait() # type: ignore[misc]
127 |
128 | event = asyncio.Event()
129 | gen_task: asyncio.Task[None] = asyncio_event_loop.create_task(generator_part())
130 | asyncio_event_loop.run_until_complete(native_coro_part())
131 |
132 |
133 | @pytest.mark.parametrize("anyio_backend", asyncio_params)
134 | async def test_wait_all_tasks_blocked_asend(anyio_backend: str) -> None:
135 | """Test that wait_all_tasks_blocked() does not crash on an `asend()` object."""
136 |
137 | async def agen_func() -> AsyncGenerator[None, None]:
138 | yield
139 |
140 | agen = agen_func()
141 | coro = agen.asend(None)
142 | loop = asyncio.get_running_loop()
143 | task = loop.create_task(cast("CoroutineType[Any, Any, Any]", coro))
144 | await wait_all_tasks_blocked()
145 | await task
146 | await agen.aclose()
147 |
148 |
149 | async def test_wait_all_tasks_blocked_cancelled_task() -> None:
150 | done = False
151 |
152 | async def self_cancel(*, task_status: TaskStatus) -> None:
153 | nonlocal done
154 | task_status.started()
155 | with move_on_after(-1):
156 | await Event().wait()
157 |
158 | done = True
159 |
160 | async with create_task_group() as tg:
161 | await tg.start(self_cancel)
162 | await wait_all_tasks_blocked()
163 | assert done
164 |
--------------------------------------------------------------------------------
/tests/test_eventloop.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import asyncio
4 | import math
5 | from asyncio import get_running_loop
6 | from collections.abc import Generator
7 | from unittest import mock
8 | from unittest.mock import AsyncMock
9 |
10 | import pytest
11 | from pytest import MonkeyPatch
12 |
13 | from anyio import run, sleep_forever, sleep_until
14 |
15 | pytestmark = pytest.mark.anyio
16 | fake_current_time = 1620581544.0
17 |
18 |
19 | @pytest.fixture
20 | def fake_sleep() -> Generator[AsyncMock, None, None]:
21 | with mock.patch(
22 | "anyio._core._eventloop.current_time", return_value=fake_current_time
23 | ):
24 | with mock.patch("anyio._core._eventloop.sleep", AsyncMock()) as v:
25 | yield v
26 |
27 |
28 | async def test_sleep_until(fake_sleep: AsyncMock) -> None:
29 | deadline = fake_current_time + 500.102352
30 | await sleep_until(deadline)
31 | fake_sleep.assert_called_once_with(deadline - fake_current_time)
32 |
33 |
34 | async def test_sleep_until_in_past(fake_sleep: AsyncMock) -> None:
35 | deadline = fake_current_time - 500.102352
36 | await sleep_until(deadline)
37 | fake_sleep.assert_called_once_with(0)
38 |
39 |
40 | async def test_sleep_forever(fake_sleep: AsyncMock) -> None:
41 | await sleep_forever()
42 | fake_sleep.assert_called_once_with(math.inf)
43 |
44 |
45 | def test_run_task() -> None:
46 | """Test that anyio.run() on asyncio will work with a callable returning a Future."""
47 |
48 | async def async_add(x: int, y: int) -> int:
49 | return x + y
50 |
51 | result = run(asyncio.create_task, async_add(1, 2), backend="asyncio")
52 | assert result == 3
53 |
54 |
55 | class TestAsyncioOptions:
56 | def test_debug(self) -> None:
57 | async def main() -> bool:
58 | return get_running_loop().get_debug()
59 |
60 | debug = run(main, backend="asyncio", backend_options={"debug": True})
61 | assert debug is True
62 |
63 | def test_debug_via_env(self, monkeypatch: MonkeyPatch) -> None:
64 | async def main() -> bool:
65 | return get_running_loop().get_debug()
66 |
67 | monkeypatch.setenv("PYTHONASYNCIODEBUG", "1")
68 | debug = run(main, backend="asyncio")
69 | assert debug is True
70 |
71 | def test_loop_factory(self) -> None:
72 | async def main() -> type:
73 | return type(get_running_loop())
74 |
75 | uvloop = pytest.importorskip("uvloop", reason="uvloop not installed")
76 | loop_class = run(
77 | main,
78 | backend="asyncio",
79 | backend_options={"loop_factory": uvloop.new_event_loop},
80 | )
81 | assert issubclass(loop_class, uvloop.Loop)
82 |
83 | def test_use_uvloop(self) -> None:
84 | async def main() -> type:
85 | return type(get_running_loop())
86 |
87 | uvloop = pytest.importorskip("uvloop", reason="uvloop not installed")
88 | loop_class = run(main, backend="asyncio", backend_options={"use_uvloop": True})
89 | assert issubclass(loop_class, uvloop.Loop)
90 |
--------------------------------------------------------------------------------
/tests/test_lowlevel.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Any
4 |
5 | import pytest
6 |
7 | from anyio import create_task_group, run
8 | from anyio.lowlevel import (
9 | RunVar,
10 | cancel_shielded_checkpoint,
11 | checkpoint,
12 | checkpoint_if_cancelled,
13 | )
14 |
15 | pytestmark = pytest.mark.anyio
16 |
17 |
18 | @pytest.mark.parametrize("cancel", [False, True])
19 | async def test_checkpoint_if_cancelled(cancel: bool) -> None:
20 | finished = second_finished = False
21 |
22 | async def func() -> None:
23 | nonlocal finished
24 | tg.start_soon(second_func)
25 | if cancel:
26 | tg.cancel_scope.cancel()
27 |
28 | await checkpoint_if_cancelled()
29 | finished = True
30 |
31 | async def second_func() -> None:
32 | nonlocal second_finished
33 | assert finished != cancel
34 | second_finished = True
35 |
36 | async with create_task_group() as tg:
37 | tg.start_soon(func)
38 |
39 | assert finished != cancel
40 | assert second_finished
41 |
42 |
43 | @pytest.mark.parametrize("cancel", [False, True])
44 | async def test_cancel_shielded_checkpoint(cancel: bool) -> None:
45 | finished = second_finished = False
46 |
47 | async def func() -> None:
48 | nonlocal finished
49 | await cancel_shielded_checkpoint()
50 | finished = True
51 |
52 | async def second_func() -> None:
53 | nonlocal second_finished
54 | assert not finished
55 | second_finished = True
56 |
57 | async with create_task_group() as tg:
58 | tg.start_soon(func)
59 | tg.start_soon(second_func)
60 | if cancel:
61 | tg.cancel_scope.cancel()
62 |
63 | assert finished
64 | assert second_finished
65 |
66 |
67 | @pytest.mark.parametrize("cancel", [False, True])
68 | async def test_checkpoint(cancel: bool) -> None:
69 | finished = second_finished = False
70 |
71 | async def func() -> None:
72 | nonlocal finished
73 | await checkpoint()
74 | finished = True
75 |
76 | async def second_func() -> None:
77 | nonlocal second_finished
78 | assert not finished
79 | second_finished = True
80 |
81 | async with create_task_group() as tg:
82 | tg.start_soon(func)
83 | tg.start_soon(second_func)
84 | if cancel:
85 | tg.cancel_scope.cancel()
86 |
87 | assert finished != cancel
88 | assert second_finished
89 |
90 |
91 | class TestRunVar:
92 | def test_get_set(
93 | self,
94 | anyio_backend_name: str,
95 | anyio_backend_options: dict[str, Any],
96 | ) -> None:
97 | async def taskfunc(index: int) -> None:
98 | assert var.get() == index
99 | var.set(index + 1)
100 |
101 | async def main() -> None:
102 | pytest.raises(LookupError, var.get)
103 | for i in range(2):
104 | var.set(i)
105 | async with create_task_group() as tg:
106 | tg.start_soon(taskfunc, i)
107 |
108 | assert var.get() == i + 1
109 |
110 | var = RunVar[int]("var")
111 | for _ in range(2):
112 | run(main, backend=anyio_backend_name, backend_options=anyio_backend_options)
113 |
114 | async def test_reset_token_used_on_wrong_runvar(self) -> None:
115 | var1 = RunVar[str]("var1")
116 | var2 = RunVar[str]("var2")
117 | token = var1.set("blah")
118 | with pytest.raises(
119 | ValueError, match="This token does not belong to this RunVar"
120 | ):
121 | var2.reset(token)
122 |
123 | async def test_reset_token_used_twice(self) -> None:
124 | var = RunVar[str]("var")
125 | token = var.set("blah")
126 | var.reset(token)
127 | with pytest.raises(ValueError, match="This token has already been used"):
128 | var.reset(token)
129 |
--------------------------------------------------------------------------------
/tests/test_signals.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import signal
5 | import sys
6 | from collections.abc import AsyncIterable
7 |
8 | import pytest
9 |
10 | from anyio import create_task_group, fail_after, open_signal_receiver, to_thread
11 |
12 | pytestmark = [
13 | pytest.mark.anyio,
14 | pytest.mark.skipif(
15 | sys.platform == "win32",
16 | reason="Signal delivery cannot be tested on Windows",
17 | ),
18 | ]
19 |
20 |
21 | async def test_receive_signals() -> None:
22 | with open_signal_receiver(signal.SIGUSR1, signal.SIGUSR2) as sigiter:
23 | await to_thread.run_sync(os.kill, os.getpid(), signal.SIGUSR1)
24 | await to_thread.run_sync(os.kill, os.getpid(), signal.SIGUSR2)
25 | with fail_after(1):
26 | sigusr1 = await sigiter.__anext__()
27 | assert isinstance(sigusr1, signal.Signals)
28 | assert sigusr1 == signal.Signals.SIGUSR1
29 |
30 | sigusr2 = await sigiter.__anext__()
31 | assert isinstance(sigusr2, signal.Signals)
32 | assert sigusr2 == signal.Signals.SIGUSR2
33 |
34 |
35 | async def test_task_group_cancellation_open() -> None:
36 | async def signal_handler() -> None:
37 | with open_signal_receiver(signal.SIGUSR1) as sigiter:
38 | async for _ in sigiter:
39 | pytest.fail("SIGUSR1 should not be sent")
40 |
41 | pytest.fail("signal_handler should have been cancelled")
42 |
43 | pytest.fail("open_signal_receiver should not suppress cancellation")
44 |
45 | async with create_task_group() as tg:
46 | tg.start_soon(signal_handler)
47 | tg.cancel_scope.cancel()
48 |
49 |
50 | async def test_task_group_cancellation_consume() -> None:
51 | async def consume(sigiter: AsyncIterable[int]) -> None:
52 | async for _ in sigiter:
53 | pytest.fail("SIGUSR1 should not be sent")
54 |
55 | pytest.fail("consume should have been cancelled")
56 |
57 | with open_signal_receiver(signal.SIGUSR1) as sigiter:
58 | async with create_task_group() as tg:
59 | tg.start_soon(consume, sigiter)
60 | tg.cancel_scope.cancel()
61 |
--------------------------------------------------------------------------------
/tests/test_tempfile.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import pathlib
5 | import shutil
6 | import tempfile
7 | from typing import AnyStr
8 | from unittest.mock import patch
9 |
10 | import pytest
11 |
12 | from anyio import (
13 | NamedTemporaryFile,
14 | SpooledTemporaryFile,
15 | TemporaryDirectory,
16 | TemporaryFile,
17 | gettempdir,
18 | gettempdirb,
19 | mkdtemp,
20 | mkstemp,
21 | )
22 |
23 | pytestmark = pytest.mark.anyio
24 |
25 |
26 | class TestTemporaryFile:
27 | async def test_temporary_file(self) -> None:
28 | data = b"temporary file data"
29 | async with TemporaryFile[bytes]() as af:
30 | await af.write(data)
31 | await af.seek(0)
32 | result = await af.read()
33 |
34 | assert result == data
35 | assert af.closed
36 |
37 |
38 | class TestNamedTemporaryFile:
39 | async def test_named_temporary_file(self) -> None:
40 | data = b"named temporary file data"
41 | async with NamedTemporaryFile[bytes]() as af:
42 | filename = af.name
43 | assert os.path.exists(filename) # type: ignore[arg-type]
44 |
45 | await af.write(data)
46 | await af.seek(0)
47 | assert await af.read() == data
48 |
49 | assert not os.path.exists(filename) # type: ignore[arg-type]
50 |
51 | async def test_exception_handling(self) -> None:
52 | async with NamedTemporaryFile[bytes]() as af:
53 | filename = af.name
54 | assert os.path.exists(filename) # type: ignore[arg-type]
55 |
56 | assert not os.path.exists(filename) # type: ignore[arg-type]
57 |
58 | with pytest.raises(ValueError):
59 | await af.write(b"should fail")
60 |
61 |
62 | class TestSpooledTemporaryFile:
63 | async def test_writewithout_rolled(self) -> None:
64 | rollover_called = False
65 |
66 | async def fake_rollover() -> None:
67 | nonlocal rollover_called
68 | rollover_called = True
69 | await original_rollover()
70 |
71 | async with SpooledTemporaryFile(max_size=10) as stf:
72 | original_rollover = stf.rollover
73 | with patch.object(stf, "rollover", fake_rollover):
74 | assert await stf.write(b"12345") == 5
75 | assert not rollover_called
76 |
77 | await stf.write(b"67890X")
78 | assert rollover_called
79 |
80 | async def test_writelines(self) -> None:
81 | rollover_called = False
82 |
83 | async def fake_rollover() -> None:
84 | nonlocal rollover_called
85 | rollover_called = True
86 | await original_rollover()
87 |
88 | async with SpooledTemporaryFile(max_size=20) as stf:
89 | original_rollover = stf.rollover
90 | with patch.object(stf, "rollover", fake_rollover):
91 | await stf.writelines([b"hello", b"world"])
92 | assert not rollover_called
93 | await stf.seek(0)
94 |
95 | assert await stf.read() == b"helloworld"
96 | await stf.writelines([b"1234567890123456"])
97 | assert rollover_called
98 |
99 | async def test_closed_state(self) -> None:
100 | async with SpooledTemporaryFile(max_size=10) as stf:
101 | assert not stf.closed
102 |
103 | assert stf.closed
104 |
105 | async def test_exact_boundary_no_rollover(self) -> None:
106 | async with SpooledTemporaryFile(max_size=10) as stf:
107 | await stf.write(b"0123456789")
108 | assert not stf._rolled
109 |
110 | await stf.write(b"x")
111 | assert stf._rolled
112 |
113 |
114 | class TestTemporaryDirectory:
115 | async def test_context_manager(self) -> None:
116 | async with TemporaryDirectory() as td:
117 | td_path = pathlib.Path(td)
118 | assert td_path.exists() and td_path.is_dir()
119 |
120 | file_path = td_path / "test.txt"
121 | file_path.write_text("temp dir test", encoding="utf-8")
122 | assert file_path.exists()
123 |
124 | assert not td_path.exists()
125 |
126 | async def test_cleanup_method(self) -> None:
127 | td = TemporaryDirectory()
128 | td_str = await td.__aenter__()
129 | td_path = pathlib.Path(td_str)
130 |
131 | file_path = td_path / "file.txt"
132 | file_path.write_text("cleanup test", encoding="utf-8")
133 |
134 | await td.cleanup()
135 | assert not td_path.exists()
136 |
137 | async def test_exception_handling(self) -> None:
138 | async with TemporaryDirectory() as td:
139 | td_path = pathlib.Path(td)
140 | assert td_path.exists() and td_path.is_dir()
141 |
142 | assert not td_path.exists()
143 |
144 | with pytest.raises(FileNotFoundError):
145 | (td_path / "nonexistent.txt").write_text("should fail", encoding="utf-8")
146 |
147 |
148 | @pytest.mark.parametrize(
149 | "suffix, prefix, text, content",
150 | [
151 | (".txt", "mkstemp_", True, "mkstemp"),
152 | (b".txt", b"mkstemp_", False, b"mkstemp"),
153 | ],
154 | )
155 | async def test_mkstemp(
156 | suffix: AnyStr,
157 | prefix: AnyStr,
158 | text: bool,
159 | content: AnyStr,
160 | ) -> None:
161 | fd, path = await mkstemp(suffix=suffix, prefix=prefix, text=text)
162 |
163 | assert isinstance(fd, int)
164 | if text:
165 | assert isinstance(path, str)
166 | else:
167 | assert isinstance(path, bytes)
168 |
169 | if text:
170 | with os.fdopen(fd, "w", encoding="utf-8") as f:
171 | f.write(content)
172 | with open(path, encoding="utf-8") as f:
173 | read_content = f.read()
174 | else:
175 | with os.fdopen(fd, "wb") as f:
176 | f.write(content)
177 | with open(os.fsdecode(path), "rb") as f:
178 | read_content = f.read()
179 |
180 | assert read_content == content
181 |
182 | os.remove(path)
183 |
184 |
185 | @pytest.mark.parametrize("prefix", [b"mkdtemp_", "mkdtemp_"])
186 | async def test_mkdtemp(prefix: AnyStr) -> None:
187 | d = await mkdtemp(prefix=prefix)
188 |
189 | if isinstance(d, bytes):
190 | dp = pathlib.Path(os.fsdecode(d))
191 | else:
192 | dp = pathlib.Path(d)
193 |
194 | assert dp.is_dir()
195 |
196 | shutil.rmtree(dp)
197 |
198 |
199 | async def test_gettemp_functions() -> None:
200 | tdir = await gettempdir()
201 | tdirb = await gettempdirb()
202 |
203 | assert tdir == tempfile.gettempdir()
204 | assert tdirb == tempfile.gettempdirb()
205 |
--------------------------------------------------------------------------------
/tests/test_to_interpreter.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from collections.abc import AsyncGenerator
5 | from functools import partial
6 |
7 | import pytest
8 | from pytest import fixture
9 |
10 | from anyio import to_interpreter
11 |
12 | pytestmark = [
13 | pytest.mark.anyio,
14 | pytest.mark.skipif(sys.version_info < (3, 13), reason="requires Python 3.13+"),
15 | ]
16 |
17 |
18 | @fixture(autouse=True)
19 | async def destroy_workers() -> AsyncGenerator[None]:
20 | yield
21 | idle_workers = to_interpreter._idle_workers.get()
22 | for worker in idle_workers:
23 | worker.destroy()
24 |
25 | idle_workers.clear()
26 |
27 |
28 | async def test_run_sync() -> None:
29 | """
30 | Test that the function runs in a different interpreter, and the same interpreter in
31 | both calls.
32 |
33 | """
34 | import _interpreters
35 |
36 | main_interpreter_id, _ = _interpreters.get_current()
37 | interpreter_id, _ = await to_interpreter.run_sync(_interpreters.get_current)
38 | interpreter_id_2, _ = await to_interpreter.run_sync(_interpreters.get_current)
39 | assert interpreter_id == interpreter_id_2
40 | assert interpreter_id != main_interpreter_id
41 |
42 |
43 | async def test_args_kwargs() -> None:
44 | """Test that partial() can be used to pass keyword arguments."""
45 | result = await to_interpreter.run_sync(partial(sorted, reverse=True), ["a", "b"])
46 | assert result == ["b", "a"]
47 |
48 |
49 | async def test_exception() -> None:
50 | """Test that exceptions are delivered properly."""
51 | with pytest.raises(ValueError, match="invalid literal for int"):
52 | assert await to_interpreter.run_sync(int, "a")
53 |
--------------------------------------------------------------------------------
/tests/test_to_process.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 | import sys
5 | import time
6 | from functools import partial
7 | from pathlib import Path
8 | from unittest.mock import Mock
9 |
10 | import pytest
11 | from pytest import MonkeyPatch
12 |
13 | from anyio import (
14 | CancelScope,
15 | create_task_group,
16 | fail_after,
17 | to_process,
18 | wait_all_tasks_blocked,
19 | )
20 | from anyio.abc import Process
21 |
22 | pytestmark = pytest.mark.anyio
23 |
24 |
25 | async def test_run_sync_in_process_pool() -> None:
26 | """
27 | Test that the function runs in a different process, and the same process in both
28 | calls.
29 |
30 | """
31 | worker_pid = await to_process.run_sync(os.getpid)
32 | assert worker_pid != os.getpid()
33 | assert await to_process.run_sync(os.getpid) == worker_pid
34 |
35 |
36 | async def test_identical_sys_path() -> None:
37 | """Test that partial() can be used to pass keyword arguments."""
38 | assert await to_process.run_sync(eval, "sys.path") == sys.path
39 |
40 |
41 | async def test_partial() -> None:
42 | """Test that partial() can be used to pass keyword arguments."""
43 | assert await to_process.run_sync(partial(sorted, reverse=True), ["a", "b"]) == [
44 | "b",
45 | "a",
46 | ]
47 |
48 |
49 | async def test_exception() -> None:
50 | """Test that exceptions are delivered properly."""
51 | with pytest.raises(ValueError, match="invalid literal for int"):
52 | assert await to_process.run_sync(int, "a")
53 |
54 |
55 | async def test_print() -> None:
56 | """Test that print() won't interfere with parent-worker communication."""
57 | worker_pid = await to_process.run_sync(os.getpid)
58 | await to_process.run_sync(print, "hello")
59 | await to_process.run_sync(print, "world")
60 | assert await to_process.run_sync(os.getpid) == worker_pid
61 |
62 |
63 | async def test_cancel_before() -> None:
64 | """
65 | Test that starting to_process.run_sync() in a cancelled scope does not cause a
66 | worker process to be reserved.
67 |
68 | """
69 | with CancelScope() as scope:
70 | scope.cancel()
71 | await to_process.run_sync(os.getpid)
72 |
73 | pytest.raises(LookupError, to_process._process_pool_workers.get)
74 |
75 |
76 | @pytest.mark.usefixtures("deactivate_blockbuster")
77 | async def test_cancel_during() -> None:
78 | """
79 | Test that cancelling an operation on the worker process causes the process to be
80 | killed.
81 |
82 | """
83 | worker_pid = await to_process.run_sync(os.getpid)
84 | with fail_after(4):
85 | async with create_task_group() as tg:
86 | tg.start_soon(partial(to_process.run_sync, cancellable=True), time.sleep, 5)
87 | await wait_all_tasks_blocked()
88 | tg.cancel_scope.cancel()
89 |
90 | # The previous worker was killed so we should get a new one now
91 | assert await to_process.run_sync(os.getpid) != worker_pid
92 |
93 |
94 | async def test_exec_while_pruning() -> None:
95 | """
96 | Test that in the case when one or more idle workers are pruned, the originally
97 | selected idle worker is re-added to the queue of idle workers.
98 | """
99 |
100 | worker_pid1 = await to_process.run_sync(os.getpid)
101 | workers = to_process._process_pool_workers.get()
102 | idle_workers = to_process._process_pool_idle_workers.get()
103 | real_worker = next(iter(workers))
104 |
105 | fake_idle_process = Mock(Process)
106 | workers.add(fake_idle_process)
107 | try:
108 | # Add a mock worker process that's guaranteed to be eligible for pruning
109 | idle_workers.appendleft(
110 | (fake_idle_process, -to_process.WORKER_MAX_IDLE_TIME - 1)
111 | )
112 |
113 | worker_pid2 = await to_process.run_sync(os.getpid)
114 | assert worker_pid1 == worker_pid2
115 | fake_idle_process.kill.assert_called_once_with()
116 | assert idle_workers[0][0] is real_worker
117 | finally:
118 | workers.discard(fake_idle_process)
119 |
120 |
121 | async def test_nonexistent_main_module(
122 | monkeypatch: MonkeyPatch, tmp_path: Path
123 | ) -> None:
124 | """
125 | Test that worker process creation won't fail if the detected path to the `__main__`
126 | module doesn't exist. Regression test for #696.
127 | """
128 |
129 | script_path = tmp_path / "badscript"
130 | script_path.touch()
131 | monkeypatch.setattr("__main__.__file__", str(script_path / "__main__.py"))
132 | await to_process.run_sync(os.getpid)
133 |
--------------------------------------------------------------------------------
/tests/test_typedattr.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections.abc import Mapping
4 | from typing import Any, Callable
5 |
6 | import pytest
7 |
8 | from anyio import TypedAttributeProvider
9 |
10 |
11 | class DummyAttributeProvider(TypedAttributeProvider):
12 | def get_dummyattr(self) -> str:
13 | raise KeyError("foo")
14 |
15 | @property
16 | def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
17 | return {str: self.get_dummyattr}
18 |
19 |
20 | def test_typedattr_keyerror() -> None:
21 | """
22 | Test that if the extra attribute getter raises KeyError, it won't be confused for a
23 | missing attribute.
24 |
25 | """
26 | with pytest.raises(KeyError, match="^'foo'$"):
27 | DummyAttributeProvider().extra(str)
28 |
--------------------------------------------------------------------------------