├── docs ├── .gitignore ├── requirements.txt ├── changelog.rst ├── crash.rst ├── subprocess.rst ├── index.rst ├── conf.py ├── known-limitations.rst ├── remote.rst ├── how-it-works.rst ├── distribution.rst └── how-to.rst ├── changelog ├── 1263.trivial.rst ├── 1261.trivial.rst ├── 1252.feature.rst └── _template.rst ├── example ├── loadscope │ ├── requirements.txt │ ├── tox.ini │ ├── epsilon │ │ └── __init__.py │ └── test │ │ ├── test_beta.py │ │ ├── test_alpha.py │ │ ├── test_gamma.py │ │ └── test_delta.py └── boxed.txt ├── testing ├── util.py ├── conftest.py ├── test_newhooks.py ├── test_plugin.py ├── test_looponfail.py └── test_remote.py ├── .readthedocs.yaml ├── src └── xdist │ ├── __init__.py │ ├── scheduler │ ├── __init__.py │ ├── protocol.py │ ├── loadgroup.py │ ├── loadfile.py │ ├── each.py │ ├── worksteal.py │ ├── load.py │ └── loadscope.py │ ├── _path.py │ ├── report.py │ ├── newhooks.py │ ├── looponfail.py │ ├── plugin.py │ └── remote.py ├── .gitignore ├── .github ├── dependabot.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── deploy.yml │ └── test.yml ├── .pre-commit-config.yaml ├── LICENSE ├── tox.ini ├── README.rst ├── RELEASING.rst └── pyproject.toml /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-rtd-theme 3 | -------------------------------------------------------------------------------- /changelog/1263.trivial.rst: -------------------------------------------------------------------------------- 1 | Remove unused `py` imports 2 | -------------------------------------------------------------------------------- /changelog/1261.trivial.rst: -------------------------------------------------------------------------------- 1 | Update a few links in code comments. 2 | -------------------------------------------------------------------------------- /example/loadscope/requirements.txt: -------------------------------------------------------------------------------- 1 | ipdb 2 | pytest 3 | ../../ 4 | -------------------------------------------------------------------------------- /changelog/1252.feature.rst: -------------------------------------------------------------------------------- 1 | Python 3.14 is now tested and supported. 2 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | .. include:: ../CHANGELOG.rst 6 | -------------------------------------------------------------------------------- /testing/util.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | 4 | class MyWarning2(UserWarning): 5 | pass 6 | 7 | 8 | def generate_warning() -> None: 9 | warnings.warn(MyWarning2("hello")) 10 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | 8 | sphinx: 9 | configuration: docs/conf.py 10 | 11 | python: 12 | install: 13 | - path: . 14 | - requirements: docs/requirements.txt 15 | -------------------------------------------------------------------------------- /docs/crash.rst: -------------------------------------------------------------------------------- 1 | When tests crash 2 | ================ 3 | 4 | If a test crashes a worker, pytest-xdist will automatically restart that worker 5 | and report the test’s failure. You can use the ``--max-worker-restart`` option 6 | to limit the number of worker restarts that are allowed, or disable restarting 7 | altogether using ``--max-worker-restart 0``. 8 | -------------------------------------------------------------------------------- /src/xdist/__init__.py: -------------------------------------------------------------------------------- 1 | from xdist._version import version as __version__ 2 | from xdist.plugin import get_xdist_worker_id 3 | from xdist.plugin import is_xdist_controller 4 | from xdist.plugin import is_xdist_master 5 | from xdist.plugin import is_xdist_worker 6 | 7 | 8 | __all__ = [ 9 | "__version__", 10 | "get_xdist_worker_id", 11 | "is_xdist_controller", 12 | "is_xdist_master", 13 | "is_xdist_worker", 14 | ] 15 | -------------------------------------------------------------------------------- /example/loadscope/tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = test 3 | setupdir = {toxinidir}/../../ 4 | 5 | [testenv:test] 6 | basepython = python3 7 | passenv = http_proxy https_proxy 8 | deps = -rrequirements.txt 9 | changedir = {envtmpdir} 10 | commands = 11 | pytest -s -v \ 12 | --doctest-modules \ 13 | --junitxml=tests.xml \ 14 | --dist=loadscope \ 15 | --tx=8*popen \ 16 | {toxinidir}/test \ 17 | {toxinidir}/epsilon 18 | -------------------------------------------------------------------------------- /src/xdist/scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | from xdist.scheduler.each import EachScheduling as EachScheduling 2 | from xdist.scheduler.load import LoadScheduling as LoadScheduling 3 | from xdist.scheduler.loadfile import LoadFileScheduling as LoadFileScheduling 4 | from xdist.scheduler.loadgroup import LoadGroupScheduling as LoadGroupScheduling 5 | from xdist.scheduler.loadscope import LoadScopeScheduling as LoadScopeScheduling 6 | from xdist.scheduler.protocol import Scheduling as Scheduling 7 | from xdist.scheduler.worksteal import WorkStealingScheduling as WorkStealingScheduling 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Automatically generated by `hgimportsvn` 3 | syntax:glob 4 | .svn 5 | .hgsvn 6 | 7 | # These lines are suggested according to the svn:ignore property 8 | # Feel free to enable them by uncommenting them 9 | syntax:glob 10 | *.pyc 11 | *.pyo 12 | *.swp 13 | *.html 14 | *.class 15 | *.orig 16 | 17 | *.sublime-* 18 | .Python 19 | 20 | build/ 21 | dist/ 22 | include/ 23 | lib/ 24 | bin/ 25 | env/ 26 | src/xdist/_version.py* 27 | pytest_xdist.egg-info 28 | issue/ 29 | 3rdparty/ 30 | pytestdebug.log 31 | .tox/ 32 | .cache/ 33 | .pytest_cache/ 34 | .eggs/ 35 | .idea/ 36 | -------------------------------------------------------------------------------- /docs/subprocess.rst: -------------------------------------------------------------------------------- 1 | Running tests in a Python subprocess 2 | ==================================== 3 | 4 | To instantiate a ``python3.9`` subprocess and send tests to it, you may type:: 5 | 6 | pytest -d --tx popen//python=python3.9 7 | 8 | This will start a subprocess which is run with the ``python3.9`` 9 | Python interpreter, found in your system binary lookup path. 10 | 11 | If you prefix the --tx option value like this:: 12 | 13 | --tx 3*popen//python=python3.9 14 | 15 | then three subprocesses would be created and tests 16 | will be load-balanced across these three processes. 17 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /example/loadscope/epsilon/__init__.py: -------------------------------------------------------------------------------- 1 | def epsilon1(arg1, arg2=1000): 2 | """Do epsilon1 3 | 4 | Usage: 5 | 6 | >>> epsilon1(10, 20) 7 | 40 8 | >>> epsilon1(30) 9 | 1040 10 | """ 11 | return arg1 + arg2 + 10 12 | 13 | 14 | def epsilon2(arg1, arg2=1000): 15 | """Do epsilon2 16 | 17 | Usage: 18 | 19 | >>> epsilon2(10, 20) 20 | -20 21 | >>> epsilon2(30) 22 | -980 23 | """ 24 | return arg1 - arg2 - 10 25 | 26 | 27 | def epsilon3(arg1, arg2=1000): 28 | """Do epsilon3 29 | 30 | Usage: 31 | 32 | >>> epsilon3(10, 20) 33 | 200 34 | >>> epsilon3(30) 35 | 30000 36 | """ 37 | return arg1 * arg2 38 | -------------------------------------------------------------------------------- /src/xdist/_path.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterator 2 | from itertools import chain 3 | import os 4 | from pathlib import Path 5 | from typing import Callable 6 | 7 | 8 | def visit_path( 9 | path: Path, *, filter: Callable[[Path], bool], recurse: Callable[[Path], bool] 10 | ) -> Iterator[Path]: 11 | """ 12 | Implements the interface of ``py.path.local.visit()`` for Path objects, 13 | to simplify porting the code over from ``py.path.local``. 14 | """ 15 | for dirpath, dirnames, filenames in os.walk(path): 16 | dirnames[:] = [x for x in dirnames if recurse(Path(dirpath, x))] 17 | for name in chain(dirnames, filenames): 18 | p = Path(dirpath, name) 19 | if filter(p): 20 | yield p 21 | -------------------------------------------------------------------------------- /example/loadscope/test/test_beta.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | 4 | def test_beta0(): 5 | sleep(5) 6 | assert True 7 | 8 | 9 | def test_beta1(): 10 | sleep(5) 11 | assert True 12 | 13 | 14 | def test_beta2(): 15 | sleep(5) 16 | assert True 17 | 18 | 19 | def test_beta3(): 20 | sleep(5) 21 | assert True 22 | 23 | 24 | def test_beta4(): 25 | sleep(5) 26 | assert True 27 | 28 | 29 | def test_beta5(): 30 | sleep(5) 31 | assert True 32 | 33 | 34 | def test_beta6(): 35 | sleep(5) 36 | assert True 37 | 38 | 39 | def test_beta7(): 40 | sleep(5) 41 | assert True 42 | 43 | 44 | def test_beta8(): 45 | sleep(5) 46 | assert True 47 | 48 | 49 | def test_beta9(): 50 | sleep(5) 51 | assert True 52 | -------------------------------------------------------------------------------- /example/loadscope/test/test_alpha.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | 4 | def test_alpha0(): 5 | sleep(5) 6 | assert True 7 | 8 | 9 | def test_alpha1(): 10 | sleep(5) 11 | assert True 12 | 13 | 14 | def test_alpha2(): 15 | sleep(5) 16 | assert True 17 | 18 | 19 | def test_alpha3(): 20 | sleep(5) 21 | assert True 22 | 23 | 24 | def test_alpha4(): 25 | sleep(5) 26 | assert True 27 | 28 | 29 | def test_alpha5(): 30 | sleep(5) 31 | assert True 32 | 33 | 34 | def test_alpha6(): 35 | sleep(5) 36 | assert True 37 | 38 | 39 | def test_alpha7(): 40 | sleep(5) 41 | assert True 42 | 43 | 44 | def test_alpha8(): 45 | sleep(5) 46 | assert True 47 | 48 | 49 | def test_alpha9(): 50 | sleep(5) 51 | assert True 52 | -------------------------------------------------------------------------------- /example/loadscope/test/test_gamma.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | 4 | def test_gamma0(): 5 | sleep(5) 6 | assert True 7 | 8 | 9 | def test_gamma1(): 10 | sleep(5) 11 | assert True 12 | 13 | 14 | def test_gamma2(): 15 | sleep(5) 16 | assert True 17 | 18 | 19 | def test_gamma3(): 20 | sleep(5) 21 | assert True 22 | 23 | 24 | def test_gamma4(): 25 | sleep(5) 26 | assert True 27 | 28 | 29 | def test_gamma5(): 30 | sleep(5) 31 | assert True 32 | 33 | 34 | def test_gamma6(): 35 | sleep(5) 36 | assert True 37 | 38 | 39 | def test_gamma7(): 40 | sleep(5) 41 | assert True 42 | 43 | 44 | def test_gamma8(): 45 | sleep(5) 46 | assert True 47 | 48 | 49 | def test_gamma9(): 50 | sleep(5) 51 | assert True 52 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Thanks for submitting a PR, your contribution is really appreciated! 2 | 3 | Here's a quick checklist that should be present in PRs: 4 | 5 | - [ ] Make sure to include reasonable tests for your change if necessary 6 | 7 | - [ ] We use [towncrier](https://pypi.python.org/pypi/towncrier) for changelog management, so please add a *news* file into the `changelog` folder following these guidelines: 8 | * Name it `$issue_id.$type` for example `588.bugfix`; 9 | * If you don't have an issue_id change it to the PR id after creating it 10 | * Ensure type is one of `removal`, `feature`, `bugfix`, `vendor`, `doc` or `trivial` 11 | * Make sure to use full sentences with correct case and punctuation, for example: 12 | 13 | ``` 14 | Fix issue with non-ascii contents in doctest text files. 15 | ``` 16 | -------------------------------------------------------------------------------- /changelog/_template.rst: -------------------------------------------------------------------------------- 1 | {% for section in sections %} 2 | {% set underline = "-" %} 3 | {% if section %} 4 | {{section}} 5 | {{ underline * section|length }}{% set underline = "~" %} 6 | 7 | {% endif %} 8 | {% if sections[section] %} 9 | {% for category, val in definitions.items() if category in sections[section] %} 10 | 11 | {{ definitions[category]['name'] }} 12 | {{ underline * definitions[category]['name']|length }} 13 | 14 | {% if definitions[category]['showcontent'] %} 15 | {% for text, values in sections[section][category]|dictsort(by='value') %} 16 | - `{{ values[0] }} `_: {{ text }} 17 | 18 | {% endfor %} 19 | {% else %} 20 | - {{ sections[section][category]['']|sort|join(', ') }} 21 | 22 | 23 | {% endif %} 24 | {% if sections[section][category]|length == 0 %} 25 | 26 | No significant changes. 27 | 28 | 29 | {% else %} 30 | {% endif %} 31 | {% endfor %} 32 | {% else %} 33 | 34 | No significant changes. 35 | 36 | 37 | {% endif %} 38 | {% endfor %} 39 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | rev: "v0.14.9" 4 | hooks: 5 | - id: ruff 6 | args: ["--fix"] 7 | - id: ruff-format 8 | - repo: https://github.com/asottile/blacken-docs 9 | rev: 1.20.0 10 | hooks: 11 | - id: blacken-docs 12 | additional_dependencies: [black==23.1.0] 13 | - repo: https://github.com/pre-commit/pre-commit-hooks 14 | rev: v6.0.0 15 | hooks: 16 | - id: check-yaml 17 | - repo: local 18 | hooks: 19 | - id: rst 20 | name: rst 21 | entry: rst-lint 22 | files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst|changelog/.*)$ 23 | language: python 24 | additional_dependencies: [pygments, restructuredtext_lint] 25 | - repo: https://github.com/pre-commit/mirrors-mypy 26 | rev: v1.19.1 27 | hooks: 28 | - id: mypy 29 | files: ^(src/|testing/) 30 | args: [] 31 | additional_dependencies: 32 | - pytest>=7.0.0 33 | - execnet>=2.1.0 34 | - types-psutil 35 | - setproctitle 36 | -------------------------------------------------------------------------------- /src/xdist/report.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Sequence 4 | from difflib import unified_diff 5 | 6 | 7 | def report_collection_diff( 8 | from_collection: Sequence[str], 9 | to_collection: Sequence[str], 10 | from_id: str, 11 | to_id: str, 12 | ) -> str | None: 13 | """Report the collected test difference between two nodes. 14 | 15 | :returns: detailed message describing the difference between the given 16 | collections, or None if they are equal. 17 | """ 18 | if from_collection == to_collection: 19 | return None 20 | 21 | diff = unified_diff(from_collection, to_collection, fromfile=from_id, tofile=to_id) 22 | error_message = ( 23 | "Different tests were collected between {from_id} and {to_id}. " 24 | "The difference is:\n" 25 | "{diff}\n" 26 | "To see why this happens see 'Known limitations' in documentation " 27 | "for pytest-xdist" 28 | ).format(from_id=from_id, to_id=to_id, diff="\n".join(diff)) 29 | msg = "\n".join(x.rstrip() for x in error_message.split("\n")) 30 | return msg 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2010 Holger Krekel and contributors. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | linting 4 | py{39,310,311,312,313,314}-pytestlatest 5 | py310-pytestmain 6 | py310-psutil 7 | py310-setproctitle 8 | isolated_build = true 9 | 10 | [testenv] 11 | extras = 12 | testing 13 | psutil: psutil 14 | setproctitle: setproctitle 15 | deps = 16 | pytestmin: pytest==7.0.0 17 | pytestlatest: pytest 18 | pytestmain: git+https://github.com/pytest-dev/pytest.git 19 | commands = 20 | pytest {posargs:{env:_XDIST_TOX_DEFAULT_POSARGS:}} 21 | setenv = 22 | _XDIST_TOX_DEFAULT_POSARGS={env:_XDIST_TOX_POSARGS_PSUTIL:} 23 | psutil: _XDIST_TOX_POSARGS_PSUTIL=-k psutil 24 | 25 | [testenv:linting] 26 | skip_install = True 27 | usedevelop = True 28 | passenv = PRE_COMMIT_HOME 29 | deps = 30 | pre-commit 31 | commands = pre-commit run --all-files --show-diff-on-failure 32 | 33 | [testenv:release] 34 | changedir = 35 | description = do a release, required posarg of the version number 36 | skipsdist = True 37 | usedevelop = True 38 | passenv = * 39 | deps = 40 | towncrier 41 | commands = 42 | towncrier build --version {posargs} --yes 43 | 44 | [testenv:docs] 45 | usedevelop = True 46 | deps = 47 | sphinx 48 | sphinx_rtd_theme 49 | commands = 50 | sphinx-build -W --keep-going -b html docs docs/_build/html {posargs:} 51 | -------------------------------------------------------------------------------- /src/xdist/scheduler/protocol.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Sequence 4 | from typing import Protocol 5 | 6 | from xdist.workermanage import WorkerController 7 | 8 | 9 | class Scheduling(Protocol): 10 | @property 11 | def nodes(self) -> list[WorkerController]: ... 12 | 13 | @property 14 | def collection_is_completed(self) -> bool: ... 15 | 16 | @property 17 | def tests_finished(self) -> bool: ... 18 | 19 | @property 20 | def has_pending(self) -> bool: ... 21 | 22 | def add_node(self, node: WorkerController) -> None: ... 23 | 24 | def add_node_collection( 25 | self, 26 | node: WorkerController, 27 | collection: Sequence[str], 28 | ) -> None: ... 29 | 30 | def mark_test_complete( 31 | self, 32 | node: WorkerController, 33 | item_index: int, 34 | duration: float = 0, 35 | ) -> None: ... 36 | 37 | def mark_test_pending(self, item: str) -> None: ... 38 | 39 | def remove_pending_tests_from_node( 40 | self, 41 | node: WorkerController, 42 | indices: Sequence[int], 43 | ) -> None: ... 44 | 45 | def remove_node(self, node: WorkerController) -> str | None: ... 46 | 47 | def schedule(self) -> None: ... 48 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | pytest-xdist 3 | ============ 4 | 5 | .. image:: http://img.shields.io/pypi/v/pytest-xdist.svg 6 | :alt: PyPI version 7 | :target: https://pypi.python.org/pypi/pytest-xdist 8 | 9 | .. image:: https://img.shields.io/conda/vn/conda-forge/pytest-xdist.svg 10 | :target: https://anaconda.org/conda-forge/pytest-xdist 11 | 12 | .. image:: https://img.shields.io/pypi/pyversions/pytest-xdist.svg 13 | :alt: Python versions 14 | :target: https://pypi.python.org/pypi/pytest-xdist 15 | 16 | .. image:: https://github.com/pytest-dev/pytest-xdist/workflows/test/badge.svg 17 | :target: https://github.com/pytest-dev/pytest-xdist/actions 18 | 19 | .. image:: https://img.shields.io/badge/code%20style-black-000000.svg 20 | :target: https://github.com/ambv/black 21 | 22 | The `pytest-xdist`_ plugin extends pytest with new test execution modes, the most used being distributing 23 | tests across multiple CPUs to speed up test execution:: 24 | 25 | pytest -n auto 26 | 27 | With this call, pytest will spawn a number of workers processes equal to the number of available CPUs, and distribute 28 | the tests randomly across them. 29 | 30 | Documentation 31 | ============= 32 | 33 | Documentation is available at `Read The Docs `__. 34 | -------------------------------------------------------------------------------- /RELEASING.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Releasing pytest-xdist 3 | ====================== 4 | 5 | This document describes the steps to make a new ``pytest-xdist`` release. 6 | 7 | Version 8 | ------- 9 | 10 | ``master`` should always be green and a potential release candidate. ``pytest-xdist`` follows 11 | semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number 12 | one needs to look at the ``changelog`` folder: 13 | 14 | - If there is any file named ``*.feature``, then we must make a new **minor** release: next 15 | release will be ``X.Y+1.0``. 16 | 17 | - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. 18 | 19 | 20 | Steps 21 | ----- 22 | 23 | To publish a new release ``X.Y.Z``, the steps are as follows: 24 | 25 | #. Create a new branch named ``release-X.Y.Z`` from the latest ``master``. 26 | 27 | #. Install ``tox`` in a virtualenv:: 28 | 29 | $ pip install tox 30 | 31 | #. Update the necessary files with:: 32 | 33 | $ tox -e release -- X.Y.Z 34 | 35 | #. Commit and push the branch to ``upstream`` and open a PR. 36 | 37 | #. Once the PR is **green** and **approved**, start the ``deploy`` workflow manually from the branch ``release-VERSION``, passing ``VERSION`` as parameter, or execute:: 38 | 39 | gh workflow run deploy.yml -R pytest-dev/pytest-xdist --ref release-X.Y.Z --field version=X.Y.Z 40 | 41 | #. Merge the release PR to ``master``. 42 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version: 7 | description: 'Release version' 8 | required: true 9 | default: '1.2.3' 10 | 11 | jobs: 12 | 13 | package: 14 | runs-on: ubuntu-latest 15 | env: 16 | SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }} 17 | 18 | steps: 19 | - uses: actions/checkout@v6 20 | 21 | - name: Build and Check Package 22 | uses: hynek/build-and-inspect-python-package@v2.14 23 | 24 | deploy: 25 | needs: package 26 | runs-on: ubuntu-latest 27 | environment: deploy 28 | permissions: 29 | id-token: write # For PyPI trusted publishers. 30 | contents: write # For tag. 31 | 32 | steps: 33 | - uses: actions/checkout@v6 34 | 35 | - name: Download Package 36 | uses: actions/download-artifact@v7 37 | with: 38 | name: Packages 39 | path: dist 40 | 41 | - name: Publish package to PyPI 42 | uses: pypa/gh-action-pypi-publish@v1.13.0 43 | with: 44 | attestations: true 45 | 46 | - name: Push tag 47 | run: | 48 | git config user.name "pytest bot" 49 | git config user.email "pytestbot@gmail.com" 50 | git tag --annotate --message=v${{ github.event.inputs.version }} v${{ github.event.inputs.version }} ${{ github.sha }} 51 | git push origin v${{ github.event.inputs.version }} 52 | 53 | - name: GitHub Release 54 | uses: softprops/action-gh-release@v2 55 | with: 56 | files: dist/* 57 | tag_name: v${{ github.event.inputs.version }} 58 | -------------------------------------------------------------------------------- /example/loadscope/test/test_delta.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | from unittest import TestCase 3 | 4 | 5 | class Delta1(TestCase): 6 | def test_delta0(self): 7 | sleep(5) 8 | assert True 9 | 10 | def test_delta1(self): 11 | sleep(5) 12 | assert True 13 | 14 | def test_delta2(self): 15 | sleep(5) 16 | assert True 17 | 18 | def test_delta3(self): 19 | sleep(5) 20 | assert True 21 | 22 | def test_delta4(self): 23 | sleep(5) 24 | assert True 25 | 26 | def test_delta5(self): 27 | sleep(5) 28 | assert True 29 | 30 | def test_delta6(self): 31 | sleep(5) 32 | assert True 33 | 34 | def test_delta7(self): 35 | sleep(5) 36 | assert True 37 | 38 | def test_delta8(self): 39 | sleep(5) 40 | assert True 41 | 42 | def test_delta9(self): 43 | sleep(5) 44 | assert True 45 | 46 | 47 | class Delta2(TestCase): 48 | def test_delta0(self): 49 | sleep(5) 50 | assert True 51 | 52 | def test_delta1(self): 53 | sleep(5) 54 | assert True 55 | 56 | def test_delta2(self): 57 | sleep(5) 58 | assert True 59 | 60 | def test_delta3(self): 61 | sleep(5) 62 | assert True 63 | 64 | def test_delta4(self): 65 | sleep(5) 66 | assert True 67 | 68 | def test_delta5(self): 69 | sleep(5) 70 | assert True 71 | 72 | def test_delta6(self): 73 | sleep(5) 74 | assert True 75 | 76 | def test_delta7(self): 77 | sleep(5) 78 | assert True 79 | 80 | def test_delta8(self): 81 | sleep(5) 82 | assert True 83 | 84 | def test_delta9(self): 85 | sleep(5) 86 | assert True 87 | -------------------------------------------------------------------------------- /testing/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Generator 4 | import shutil 5 | from typing import Callable 6 | 7 | import execnet 8 | import pytest 9 | 10 | 11 | pytest_plugins = "pytester" 12 | 13 | 14 | @pytest.fixture(autouse=True) 15 | def _divert_atexit(monkeypatch: pytest.MonkeyPatch) -> Generator[None]: 16 | import atexit 17 | 18 | finalizers = [] 19 | 20 | def fake_register( 21 | func: Callable[..., object], *args: object, **kwargs: object 22 | ) -> None: 23 | finalizers.append((func, args, kwargs)) 24 | 25 | monkeypatch.setattr(atexit, "register", fake_register) 26 | 27 | yield 28 | 29 | while finalizers: 30 | func, args, kwargs = finalizers.pop() 31 | func(*args, **kwargs) 32 | 33 | 34 | def pytest_addoption(parser: pytest.Parser) -> None: 35 | parser.addoption( 36 | "--gx", 37 | action="append", 38 | dest="gspecs", 39 | help="add a global test environment, XSpec-syntax. ", 40 | ) 41 | 42 | 43 | @pytest.fixture 44 | def specssh(request: pytest.FixtureRequest) -> str: 45 | return getspecssh(request.config) 46 | 47 | 48 | # configuration information for tests 49 | def getgspecs(config: pytest.Config) -> list[execnet.XSpec]: 50 | return [execnet.XSpec(spec) for spec in config.getvalueorskip("gspecs")] 51 | 52 | 53 | def getspecssh(config: pytest.Config) -> str: 54 | xspecs = getgspecs(config) 55 | for spec in xspecs: 56 | if spec.ssh: 57 | if not shutil.which("ssh"): 58 | pytest.skip("command not found: ssh") 59 | return str(spec) 60 | pytest.skip("need '--gx ssh=...'") 61 | 62 | 63 | def getsocketspec(config: pytest.Config) -> execnet.XSpec: 64 | xspecs = getgspecs(config) 65 | for spec in xspecs: 66 | if spec.socket: 67 | return spec 68 | pytest.skip("need '--gx socket=...'") 69 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | pytest-xdist 2 | ============ 3 | 4 | The `pytest-xdist`_ plugin extends pytest with new test execution modes, the most used being distributing 5 | tests across multiple CPUs to speed up test execution:: 6 | 7 | pytest -n auto 8 | 9 | With this call, pytest will spawn a number of workers processes equal to the number of available CPUs, and distribute 10 | the tests randomly across them. 11 | 12 | .. note:: 13 | Due to how pytest-xdist is implemented, the ``-s/--capture=no`` option does not work. 14 | 15 | 16 | Installation 17 | ------------ 18 | 19 | Install the plugin with:: 20 | 21 | pip install pytest-xdist 22 | 23 | 24 | To use ``psutil`` for detection of the number of CPUs available, install the ``psutil`` extra:: 25 | 26 | pip install pytest-xdist[psutil] 27 | 28 | Features 29 | -------- 30 | 31 | * Test run :ref:`parallelization`: tests can be executed across multiple CPUs or hosts. 32 | This allows to speed up development or to use special resources of :ref:`remote machines`. 33 | 34 | * ``--looponfail``: run your tests repeatedly in a subprocess. After each run 35 | pytest waits until a file in your project changes and then re-runs 36 | the previously failing tests. This is repeated until all tests pass 37 | after which again a full run is performed (DEPRECATED). 38 | 39 | * :ref:`Multi-Platform` coverage: you can specify different Python interpreters 40 | or different platforms and run tests in parallel on all of them. 41 | 42 | Before running tests remotely, ``pytest`` efficiently "rsyncs" your 43 | program source code to the remote place. 44 | You may specify different Python versions and interpreters. It does not 45 | installs/synchronize dependencies however. 46 | 47 | **Note**: this mode exists mostly for backward compatibility, as modern development 48 | relies on continuous integration for multi-platform testing. 49 | 50 | 51 | 52 | .. toctree:: 53 | :maxdepth: 2 54 | :caption: Contents: 55 | 56 | distribution 57 | subprocess 58 | remote 59 | crash 60 | how-to 61 | how-it-works 62 | known-limitations 63 | changelog 64 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | # import os 14 | # import sys 15 | # sys.path.insert(0, os.path.abspath('.')) 16 | 17 | 18 | # -- Project information ----------------------------------------------------- 19 | 20 | project = "pytest-xdist" 21 | copyright = "2010, holger krekel and contributors" 22 | author = "holger krekel and contributors" 23 | 24 | master_doc = "index" 25 | 26 | # -- General configuration --------------------------------------------------- 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [ 32 | "sphinx_rtd_theme", 33 | "sphinx.ext.autodoc", 34 | ] 35 | 36 | # Add any paths that contain templates here, relative to this directory. 37 | templates_path = ["_templates"] 38 | 39 | # List of patterns, relative to source directory, that match files and 40 | # directories to ignore when looking for source files. 41 | # This pattern also affects html_static_path and html_extra_path. 42 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 43 | 44 | 45 | # -- Options for HTML output ------------------------------------------------- 46 | 47 | # The theme to use for HTML and HTML Help pages. See the documentation for 48 | # a list of builtin themes. 49 | # 50 | html_theme = "sphinx_rtd_theme" 51 | 52 | # Add any paths that contain custom static files (such as style sheets) here, 53 | # relative to this directory. They are copied after the builtin static files, 54 | # so a file named "default.css" will overwrite the builtin "default.css". 55 | # html_static_path = ['_static'] 56 | -------------------------------------------------------------------------------- /src/xdist/scheduler/loadgroup.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | from xdist.remote import Producer 6 | 7 | from .loadscope import LoadScopeScheduling 8 | 9 | 10 | class LoadGroupScheduling(LoadScopeScheduling): 11 | """Implement load scheduling across nodes, but grouping test by xdist_group mark. 12 | 13 | This class behaves very much like LoadScopeScheduling, but it groups tests by xdist_group mark 14 | instead of the module or class to which they belong to. 15 | """ 16 | 17 | def __init__(self, config: pytest.Config, log: Producer | None = None) -> None: 18 | super().__init__(config, log) 19 | if log is None: 20 | self.log = Producer("loadgroupsched") 21 | else: 22 | self.log = log.loadgroupsched 23 | 24 | def _split_scope(self, nodeid: str) -> str: 25 | """Determine the scope (grouping) of a nodeid. 26 | 27 | There are usually 3 cases for a nodeid:: 28 | 29 | example/loadsuite/test/test_beta.py::test_beta0 30 | example/loadsuite/test/test_delta.py::Delta1::test_delta0 31 | example/loadsuite/epsilon/__init__.py::epsilon.epsilon 32 | 33 | #. Function in a test module. 34 | #. Method of a class in a test module. 35 | #. Doctest in a function in a package. 36 | 37 | With loadgroup, two cases are added:: 38 | 39 | example/loadsuite/test/test_beta.py::test_beta0 40 | example/loadsuite/test/test_delta.py::Delta1::test_delta0 41 | example/loadsuite/epsilon/__init__.py::epsilon.epsilon 42 | example/loadsuite/test/test_gamma.py::test_beta0@gname 43 | example/loadsuite/test/test_delta.py::Gamma1::test_gamma0@gname 44 | 45 | This function will group tests with the scope determined by splitting the first ``@`` 46 | from the right. That is, test will be grouped in a single work unit when they have 47 | same group name. In the above example, scopes will be:: 48 | 49 | example/loadsuite/test/test_beta.py::test_beta0 50 | example/loadsuite/test/test_delta.py::Delta1::test_delta0 51 | example/loadsuite/epsilon/__init__.py::epsilon.epsilon 52 | gname 53 | gname 54 | """ 55 | if nodeid.rfind("@") > nodeid.rfind("]"): 56 | # check the index of ']' to avoid the case: parametrize mark value has '@' 57 | return nodeid.split("@")[-1] 58 | else: 59 | return nodeid 60 | -------------------------------------------------------------------------------- /src/xdist/scheduler/loadfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | from xdist.remote import Producer 6 | 7 | from .loadscope import LoadScopeScheduling 8 | 9 | 10 | class LoadFileScheduling(LoadScopeScheduling): 11 | """Implement load scheduling across nodes, but grouping test test file. 12 | 13 | This distributes the tests collected across all nodes so each test is run 14 | just once. All nodes collect and submit the list of tests and when all 15 | collections are received it is verified they are identical collections. 16 | Then the collection gets divided up in work units, grouped by test file, 17 | and those work units get submitted to nodes. Whenever a node finishes an 18 | item, it calls ``.mark_test_complete()`` which will trigger the scheduler 19 | to assign more work units if the number of pending tests for the node falls 20 | below a low-watermark. 21 | 22 | When created, ``numnodes`` defines how many nodes are expected to submit a 23 | collection. This is used to know when all nodes have finished collection. 24 | 25 | This class behaves very much like LoadScopeScheduling, but with a file-level scope. 26 | """ 27 | 28 | def __init__(self, config: pytest.Config, log: Producer | None = None) -> None: 29 | super().__init__(config, log) 30 | if log is None: 31 | self.log = Producer("loadfilesched") 32 | else: 33 | self.log = log.loadfilesched 34 | 35 | def _split_scope(self, nodeid: str) -> str: 36 | """Determine the scope (grouping) of a nodeid. 37 | 38 | There are usually 3 cases for a nodeid:: 39 | 40 | example/loadsuite/test/test_beta.py::test_beta0 41 | example/loadsuite/test/test_delta.py::Delta1::test_delta0 42 | example/loadsuite/epsilon/__init__.py::epsilon.epsilon 43 | 44 | #. Function in a test module. 45 | #. Method of a class in a test module. 46 | #. Doctest in a function in a package. 47 | 48 | This function will group tests with the scope determined by splitting 49 | the first ``::`` from the left. That is, test will be grouped in a 50 | single work unit when they reside in the same file. 51 | 52 | In the above example, scopes will be:: 53 | 54 | .. code-block:: text 55 | 56 | example/loadsuite/test/test_beta.py 57 | example/loadsuite/test/test_delta.py 58 | example/loadsuite/epsilon/__init__.py 59 | """ 60 | return nodeid.split("::", 1)[0] 61 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - "test-me-*" 8 | 9 | pull_request: 10 | branches: 11 | - "*" 12 | 13 | # Cancel running jobs for the same workflow and branch. 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }} 16 | cancel-in-progress: true 17 | 18 | jobs: 19 | 20 | package: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v6 24 | - name: Build and Check Package 25 | uses: hynek/build-and-inspect-python-package@v2.14 26 | 27 | test: 28 | 29 | needs: [package] 30 | 31 | runs-on: ${{ matrix.os }} 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | tox_env: 37 | - "py39-pytestmin" 38 | - "py39-pytestlatest" 39 | - "py310-pytestlatest" 40 | - "py311-pytestlatest" 41 | - "py311-pytestmain" 42 | - "py312-pytestlatest" 43 | - "py313-pytestlatest" 44 | - "py314-pytestlatest" 45 | - "py310-psutil" 46 | - "py310-setproctitle" 47 | 48 | os: [ubuntu-latest, windows-latest] 49 | include: 50 | - tox_env: "py39-pytestmin" 51 | python: "3.9" 52 | - tox_env: "py39-pytestlatest" 53 | python: "3.9" 54 | - tox_env: "py310-pytestlatest" 55 | python: "3.10" 56 | - tox_env: "py311-pytestlatest" 57 | python: "3.11" 58 | - tox_env: "py311-pytestmain" 59 | python: "3.11" 60 | - tox_env: "py312-pytestlatest" 61 | python: "3.12" 62 | - tox_env: "py313-pytestlatest" 63 | python: "3.13" 64 | - tox_env: "py314-pytestlatest" 65 | python: "~3.14.0-0" 66 | - tox_env: "py310-psutil" 67 | python: "3.10" 68 | - tox_env: "py310-setproctitle" 69 | python: "3.10" 70 | 71 | steps: 72 | - uses: actions/checkout@v6 73 | with: 74 | # Needed to fetch tags, which are required by setuptools-scm. 75 | fetch-depth: 0 76 | 77 | - name: Download Package 78 | uses: actions/download-artifact@v7 79 | with: 80 | name: Packages 81 | path: dist 82 | 83 | - name: Set up Python 84 | uses: actions/setup-python@v6 85 | with: 86 | python-version: ${{ matrix.python }} 87 | 88 | - name: Install tox 89 | run: | 90 | python -m pip install --upgrade pip 91 | pip install tox 92 | 93 | - name: Test 94 | shell: bash 95 | run: | 96 | tox run -e ${{ matrix.tox_env }} --installpkg "$(find dist/*.whl)" 97 | -------------------------------------------------------------------------------- /docs/known-limitations.rst: -------------------------------------------------------------------------------- 1 | Known limitations 2 | ================= 3 | 4 | pytest-xdist has some limitations that may be supported in pytest but can't be supported in pytest-xdist. 5 | 6 | Order and amount of test must be consistent 7 | ------------------------------------------- 8 | 9 | It is not possible to have tests that differ in order or their amount across workers. 10 | 11 | This is especially true with ``pytest.mark.parametrize``, when values are produced with sets or other unordered iterables/generators. 12 | 13 | 14 | Example: 15 | 16 | .. code-block:: python 17 | 18 | import pytest 19 | 20 | 21 | @pytest.mark.parametrize("param", {"a", "b"}) 22 | def test_pytest_parametrize_unordered(param): 23 | pass 24 | 25 | In the example above, the fact that ``set`` are not necessarily ordered can cause different workers 26 | to collect tests in different order, which will throw an error. 27 | 28 | Workarounds 29 | ~~~~~~~~~~~ 30 | 31 | A solution to this is to guarantee that the parametrized values have the same order. 32 | 33 | Some solutions: 34 | 35 | * Convert your sequence to a ``list``. 36 | 37 | .. code-block:: python 38 | 39 | import pytest 40 | 41 | 42 | @pytest.mark.parametrize("param", ["a", "b"]) 43 | def test_pytest_parametrize_unordered(param): 44 | pass 45 | 46 | * Sort your sequence, guaranteeing order. 47 | 48 | .. code-block:: python 49 | 50 | import pytest 51 | 52 | 53 | @pytest.mark.parametrize("param", sorted({"a", "b"})) 54 | def test_pytest_parametrize_unordered(param): 55 | pass 56 | 57 | Output (stdout and stderr) from workers 58 | --------------------------------------- 59 | 60 | The ``-s``/``--capture=no`` option is meant to disable pytest capture, so users can then see stdout and stderr output in the terminal from tests and application code in real time. 61 | 62 | However, this option does not work with ``pytest-xdist`` because `execnet `__ the underlying library used for communication between master and workers, does not support transferring stdout/stderr from workers. 63 | 64 | Currently, there are no plans to support this in ``pytest-xdist``. 65 | 66 | Debugging 67 | ~~~~~~~~~ 68 | 69 | This also means that debugging using PDB (or any other debugger that wants to use standard I/O) will not work. The ``--pdb`` option is disabled when distributing tests with ``pytest-xdist`` for this reason. 70 | 71 | It is generally likely best to use ``pytest-xdist`` to find failing tests and then debug them without distribution; however, if you need to debug from within a worker process (for example, to address failures that only happen when running tests concurrently), remote debuggers (for example, `python-remote-pdb `__ or `python-web-pdb `__) have been reported to work for this purpose. 72 | -------------------------------------------------------------------------------- /example/boxed.txt: -------------------------------------------------------------------------------- 1 | .. warning:: 2 | 3 | Since 1.19.0, the actual implementation of the ``--boxed`` option has been moved to a 4 | separate plugin, `pytest-forked `_ 5 | which can be installed independently. The ``--boxed`` command-line option is deprecated 6 | and will be removed in pytest-xdist 3.0.0; use ``--forked`` from pytest-forked instead. 7 | 8 | 9 | If your testing involves C or C++ libraries you might have to deal 10 | with crashing processes. The xdist-plugin provides the ``--boxed`` option 11 | to run each test in a controlled subprocess. Here is a basic example:: 12 | 13 | # content of test_module.py 14 | 15 | import pytest 16 | import os 17 | import time 18 | 19 | # run test function 50 times with different argument 20 | @pytest.mark.parametrize("arg", range(50)) 21 | def test_func(arg): 22 | time.sleep(0.05) # each tests takes a while 23 | if arg % 19 == 0: 24 | os.kill(os.getpid(), 15) 25 | 26 | If you run this with:: 27 | 28 | $ pytest -n1 29 | =========================== test session starts ============================ 30 | platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev8 31 | plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov 32 | collecting ... collected 50 items 33 | 34 | test_module.py f..................f..................f........... 35 | 36 | ================================= FAILURES ================================= 37 | _______________________________ test_func[0] _______________________________ 38 | /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 39 | ______________________________ test_func[19] _______________________________ 40 | /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 41 | ______________________________ test_func[38] _______________________________ 42 | /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 43 | =================== 3 failed, 47 passed in 3.41 seconds ==================== 44 | 45 | You'll see that a couple of tests are reported as crashing, indicated 46 | by lower-case ``f`` and the respective failure summary. You can also use 47 | the xdist-provided parallelization feature to speed up your testing:: 48 | 49 | $ pytest -n3 50 | =========================== test session starts ============================ 51 | platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev8 52 | plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov 53 | gw0 I / gw1 I / gw2 I 54 | gw0 [50] / gw1 [50] / gw2 [50] 55 | 56 | scheduling tests via LoadScheduling 57 | ..f...............f..................f............ 58 | ================================= FAILURES ================================= 59 | _______________________________ test_func[0] _______________________________ 60 | [gw0] linux2 -- Python 2.7.3 /home/hpk/venv/1/bin/python 61 | /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 62 | ______________________________ test_func[19] _______________________________ 63 | [gw2] linux2 -- Python 2.7.3 /home/hpk/venv/1/bin/python 64 | /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 65 | ______________________________ test_func[38] _______________________________ 66 | [gw2] linux2 -- Python 2.7.3 /home/hpk/venv/1/bin/python 67 | /home/hpk/tmp/doc-exec-420/test_module.py:6: running the test CRASHED with signal 15 68 | =================== 3 failed, 47 passed in 2.03 seconds ==================== 69 | -------------------------------------------------------------------------------- /docs/remote.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _`Multi-Platform`: 3 | .. _`remote machines`: 4 | 5 | Sending tests to remote SSH accounts 6 | ==================================== 7 | 8 | .. deprecated:: 3.0 9 | 10 | .. warning:: 11 | 12 | The ``rsync`` feature is deprecated because its implementation is faulty 13 | in terms of reproducing the development environment in the remote 14 | worker, and there is no clear solution moving forward. 15 | 16 | For that reason, ``rsync`` is scheduled to be removed in release 4.0, to let the team 17 | focus on a smaller set of features. 18 | 19 | Note that SSH and socket server are not planned for removal, as they are part 20 | of the ``execnet`` feature set. 21 | 22 | Suppose you have a package ``mypkg`` which contains some 23 | tests that you can successfully run locally. And you 24 | have a ssh-reachable machine ``myhost``. Then 25 | you can ad-hoc distribute your tests by typing:: 26 | 27 | pytest -d --rsyncdir mypkg --tx ssh=myhostpopen mypkg/tests/unit/test_something.py 28 | 29 | This will synchronize your :code:`mypkg` package directory 30 | to a remote ssh account and then locally collect tests 31 | and send them to remote places for execution. 32 | 33 | You can specify multiple :code:`--rsyncdir` directories 34 | to be sent to the remote side. 35 | 36 | .. note:: 37 | 38 | For pytest to collect and send tests correctly 39 | you not only need to make sure all code and tests 40 | directories are rsynced, but that any test (sub) directory 41 | also has an :code:`__init__.py` file because internally 42 | pytest references tests as a fully qualified python 43 | module path. **You will otherwise get strange errors** 44 | during setup of the remote side. 45 | 46 | 47 | You can specify multiple :code:`--rsyncignore` glob patterns 48 | to be ignored when file are sent to the remote side. 49 | There are also internal ignores: :code:`.*, *.pyc, *.pyo, *~` 50 | Those you cannot override using rsyncignore command-line or 51 | ini-file option(s). 52 | 53 | 54 | Sending tests to remote Socket Servers 55 | -------------------------------------- 56 | 57 | Download the single-module `socketserver.py`_ Python program 58 | and run it like this:: 59 | 60 | python socketserver.py 61 | 62 | It will tell you that it starts listening on the default 63 | port. You can now on your home machine specify this 64 | new socket host with something like this:: 65 | 66 | pytest -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg 67 | 68 | 69 | Using proxies to run multiple workers on remote machines 70 | --------------------------------------- 71 | 72 | In case you want to run multiple workers on a remote machine, 73 | you can create a proxy gateway for the machine, and run multiple 74 | workers using the `via` attribute.:: 75 | 76 | pytest -d --px id=my_proxy//socket=192.168.1.102:8888 --tx 5*popen//via=my_proxy 77 | 78 | Here we declare a proxy gateway using the `--px` arg, and 79 | create 5 workers that run on the remote server using the proxy. 80 | Note that the proxy gateway does not run a worker, thus only 5 81 | workers are created. 82 | 83 | 84 | Running tests on many platforms at once 85 | --------------------------------------- 86 | 87 | The basic command to run tests on multiple platforms is:: 88 | 89 | pytest --dist=each --tx=spec1 --tx=spec2 90 | 91 | If you specify a windows host, an OSX host and a Linux 92 | environment this command will send each tests to all 93 | platforms - and report back failures from all platforms 94 | at once. The specifications strings use the `xspec syntax`_. 95 | 96 | .. _`xspec syntax`: https://codespeak.net/execnet/basics.html#xspec 97 | 98 | .. _`execnet`: https://codespeak.net/execnet 99 | 100 | .. _`socketserver.py`: https://raw.githubusercontent.com/pytest-dev/execnet/master/src/execnet/script/socketserver.py 101 | -------------------------------------------------------------------------------- /src/xdist/newhooks.py: -------------------------------------------------------------------------------- 1 | """ 2 | xdist hooks. 3 | 4 | Additionally, pytest-xdist will also decorate a few other hooks 5 | with the worker instance that executed the hook originally: 6 | 7 | ``pytest_runtest_logreport``: ``rep`` parameter has a ``node`` attribute. 8 | 9 | You can use this hooks just as you would use normal pytest hooks, but some care 10 | must be taken in plugins in case ``xdist`` is not installed. Please see: 11 | 12 | https://pytest.org/en/latest/how-to/writing_hook_functions.html#optionally-using-hooks-from-3rd-party-plugins 13 | """ 14 | 15 | from __future__ import annotations 16 | 17 | from collections.abc import Sequence 18 | import os 19 | from typing import Any 20 | from typing import TYPE_CHECKING 21 | 22 | import execnet 23 | import pytest 24 | 25 | 26 | if TYPE_CHECKING: 27 | from xdist.remote import Producer 28 | from xdist.scheduler.protocol import Scheduling 29 | from xdist.workermanage import WorkerController 30 | 31 | 32 | @pytest.hookspec() 33 | def pytest_xdist_setupnodes( 34 | config: pytest.Config, specs: Sequence[execnet.XSpec] 35 | ) -> None: 36 | """Called before any remote node is set up.""" 37 | 38 | 39 | @pytest.hookspec() 40 | def pytest_xdist_newgateway(gateway: execnet.Gateway) -> None: 41 | """Called on new raw gateway creation.""" 42 | 43 | 44 | @pytest.hookspec( 45 | warn_on_impl=DeprecationWarning( 46 | "rsync feature is deprecated and will be removed in pytest-xdist 4.0" 47 | ) 48 | ) 49 | def pytest_xdist_rsyncstart( 50 | source: str | os.PathLike[str], 51 | gateways: Sequence[execnet.Gateway], 52 | ) -> None: 53 | """Called before rsyncing a directory to remote gateways takes place.""" 54 | 55 | 56 | @pytest.hookspec( 57 | warn_on_impl=DeprecationWarning( 58 | "rsync feature is deprecated and will be removed in pytest-xdist 4.0" 59 | ) 60 | ) 61 | def pytest_xdist_rsyncfinish( 62 | source: str | os.PathLike[str], 63 | gateways: Sequence[execnet.Gateway], 64 | ) -> None: 65 | """Called after rsyncing a directory to remote gateways takes place.""" 66 | 67 | 68 | @pytest.hookspec(firstresult=True) 69 | def pytest_xdist_getremotemodule() -> Any: 70 | """Called when creating remote node.""" 71 | 72 | 73 | @pytest.hookspec() 74 | def pytest_configure_node(node: WorkerController) -> None: 75 | """Configure node information before it gets instantiated.""" 76 | 77 | 78 | @pytest.hookspec() 79 | def pytest_testnodeready(node: WorkerController) -> None: 80 | """Test Node is ready to operate.""" 81 | 82 | 83 | @pytest.hookspec() 84 | def pytest_testnodedown(node: WorkerController, error: object | None) -> None: 85 | """Test Node is down.""" 86 | 87 | 88 | @pytest.hookspec() 89 | def pytest_xdist_node_collection_finished( 90 | node: WorkerController, ids: Sequence[str] 91 | ) -> None: 92 | """Called by the controller node when a worker node finishes collecting.""" 93 | 94 | 95 | @pytest.hookspec(firstresult=True) 96 | def pytest_xdist_make_scheduler( 97 | config: pytest.Config, log: Producer 98 | ) -> Scheduling | None: 99 | """Return a node scheduler implementation.""" 100 | 101 | 102 | @pytest.hookspec(firstresult=True) 103 | def pytest_xdist_auto_num_workers(config: pytest.Config) -> int: 104 | """ 105 | Return the number of workers to spawn when ``--numprocesses=auto`` is given in the 106 | command-line. 107 | 108 | .. versionadded:: 2.1 109 | """ 110 | raise NotImplementedError() 111 | 112 | 113 | @pytest.hookspec(firstresult=True) 114 | def pytest_handlecrashitem( 115 | crashitem: str, report: pytest.TestReport, sched: Scheduling 116 | ) -> None: 117 | """ 118 | Handle a crashitem, modifying the report if necessary. 119 | 120 | The scheduler is provided as a parameter to reschedule the test if desired with 121 | `sched.mark_test_pending`. 122 | 123 | def pytest_handlecrashitem(crashitem, report, sched): 124 | if should_rerun(crashitem): 125 | sched.mark_test_pending(crashitem) 126 | report.outcome = "rerun" 127 | 128 | .. versionadded:: 2.2.1 129 | """ 130 | -------------------------------------------------------------------------------- /docs/how-it-works.rst: -------------------------------------------------------------------------------- 1 | How it works? 2 | ============= 3 | 4 | ``xdist`` works by spawning one or more **workers**, which are 5 | controlled by the **controller**. Each **worker** is responsible for 6 | performing a full test collection and afterwards running tests as 7 | dictated by the **controller**. 8 | 9 | The execution flow is: 10 | 11 | 1. **controller** spawns one or more **workers** at the beginning of the 12 | test session. The communication between **controller** and **worker** 13 | nodes makes use of `execnet `__ and 14 | its 15 | `gateways `__. 16 | The actual interpreters executing the code for the **workers** might 17 | be remote or local. 18 | 19 | 2. Each **worker** itself is a mini pytest runner. **workers** at this 20 | point perform a full test collection, sending back the collected 21 | test-ids back to the **controller** which does not perform any 22 | collection itself. 23 | 24 | 3. The **controller** receives the result of the collection from all 25 | nodes. At this point the **controller** performs some sanity check to 26 | ensure that all **workers** collected the same tests (including 27 | order), bailing out otherwise. If all is well, it converts the list 28 | of test-ids into a list of simple indexes, where each index 29 | corresponds to the position of that test in the original collection 30 | list. This works because all nodes have the same collection list, and 31 | saves bandwidth because the **controller** can now tell one of the 32 | workers to just *execute test index 3* instead of passing the full 33 | test id. 34 | 35 | 4. If **dist-mode** is **each**: the **controller** just sends the full 36 | list of test indexes to each node at this moment. 37 | 38 | 5. If **dist-mode** is **load**: the **controller** takes around 25% of 39 | the tests and sends them one by one to each **worker** in a round 40 | robin fashion. The rest of the tests will be distributed later as 41 | **workers** finish tests (see below). 42 | 43 | 6. Note that ``pytest_xdist_make_scheduler`` hook can be used to 44 | implement custom tests distribution logic. 45 | 46 | 7. **workers** re-implement ``pytest_runtestloop``: pytest’s default 47 | implementation basically loops over all collected items in the 48 | ``session`` object and executes the ``pytest_runtest_protocol`` for 49 | each test item, but in xdist **workers** sit idly waiting for 50 | **controller** to send tests for execution. As tests are received by 51 | **workers**, ``pytest_runtest_protocol`` is executed for each test. 52 | Here it worth noting an implementation detail: **workers** always 53 | must keep at least one test item on their queue due to how the 54 | ``pytest_runtest_protocol(item, nextitem)`` hook is defined: in order 55 | to pass the ``nextitem`` to the hook, the worker must wait for more 56 | instructions from controller before executing that remaining test. If 57 | it receives more tests, then it can safely call 58 | ``pytest_runtest_protocol`` because it knows what the ``nextitem`` 59 | parameter will be. If it receives a “shutdown” signal, then it can 60 | execute the hook passing ``nextitem`` as ``None``. 61 | 62 | 8. As tests are started and completed at the **workers**, the results 63 | are sent back to the **controller**, which then just forwards the 64 | results to the appropriate pytest hooks: ``pytest_runtest_logstart`` 65 | and ``pytest_runtest_logreport``. This way other plugins (for example 66 | ``junitxml``) can work normally. The **controller** (when in 67 | dist-mode **load**) decides to send more tests to a node when a test 68 | completes, using some heuristics such as test durations and how many 69 | tests each **worker** still has to run. 70 | 71 | 9. When the **controller** has no more pending tests it will send a 72 | “shutdown” signal to all **workers**, which will then run their 73 | remaining tests to completion and shut down. At this point the 74 | **controller** will sit waiting for **workers** to shut down, still 75 | processing events such as ``pytest_runtest_logreport``. 76 | 77 | FAQ 78 | --- 79 | 80 | **Question**: Why does each worker do its own collection, as opposed to having the 81 | controller collect once and distribute from that collection to the 82 | workers? 83 | 84 | If collection was performed by controller then it would have to 85 | serialize collected items to send them through the wire, as workers live 86 | in another process. The problem is that test items are not easily 87 | (impossible?) to serialize, as they contain references to the test 88 | functions, fixture managers, config objects, etc. Even if one manages to 89 | serialize it, it seems it would be very hard to get it right and easy to 90 | break by any small change in pytest. 91 | -------------------------------------------------------------------------------- /testing/test_newhooks.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | class TestHooks: 5 | @pytest.fixture(autouse=True) 6 | def create_test_file(self, pytester: pytest.Pytester) -> None: 7 | pytester.makepyfile( 8 | """ 9 | import os 10 | def test_a(): pass 11 | def test_b(): pass 12 | def test_c(): pass 13 | """ 14 | ) 15 | 16 | def test_runtest_logreport(self, pytester: pytest.Pytester) -> None: 17 | """Test that log reports from pytest_runtest_logreport when running with 18 | xdist contain "node", "nodeid", "worker_id", and "testrun_uid" 19 | attributes (#8).""" 20 | pytester.makeconftest( 21 | """ 22 | def pytest_runtest_logreport(report): 23 | if hasattr(report, 'node'): 24 | if report.when == "call": 25 | workerid = report.node.workerinput['workerid'] 26 | testrunuid = report.node.workerinput['testrunuid'] 27 | if workerid != report.worker_id: 28 | print("HOOK: Worker id mismatch: %s %s" 29 | % (workerid, report.worker_id)) 30 | elif testrunuid != report.testrun_uid: 31 | print("HOOK: Testrun uid mismatch: %s %s" 32 | % (testrunuid, report.testrun_uid)) 33 | else: 34 | print("HOOK: %s %s %s" 35 | % (report.nodeid, report.worker_id, report.testrun_uid)) 36 | """ 37 | ) 38 | res = pytester.runpytest("-n1", "-s") 39 | res.stdout.fnmatch_lines( 40 | [ 41 | "*HOOK: test_runtest_logreport.py::test_a gw0 *", 42 | "*HOOK: test_runtest_logreport.py::test_b gw0 *", 43 | "*HOOK: test_runtest_logreport.py::test_c gw0 *", 44 | "*3 passed*", 45 | ] 46 | ) 47 | 48 | def test_node_collection_finished(self, pytester: pytest.Pytester) -> None: 49 | """Test pytest_xdist_node_collection_finished hook (#8).""" 50 | pytester.makeconftest( 51 | """ 52 | def pytest_xdist_node_collection_finished(node, ids): 53 | workerid = node.workerinput['workerid'] 54 | stripped_ids = [x.split('::')[1] for x in ids] 55 | print("HOOK: %s %s" % (workerid, ', '.join(stripped_ids))) 56 | """ 57 | ) 58 | res = pytester.runpytest("-n2", "-s") 59 | res.stdout.fnmatch_lines_random( 60 | ["*HOOK: gw0 test_a, test_b, test_c", "*HOOK: gw1 test_a, test_b, test_c"] 61 | ) 62 | res.stdout.fnmatch_lines(["*3 passed*"]) 63 | 64 | 65 | class TestCrashItem: 66 | @pytest.fixture(autouse=True) 67 | def create_test_file(self, pytester: pytest.Pytester) -> None: 68 | pytester.makepyfile( 69 | """ 70 | import os 71 | def test_a(): pass 72 | def test_b(): os._exit(1) 73 | def test_c(): pass 74 | def test_d(): pass 75 | """ 76 | ) 77 | 78 | def test_handlecrashitem(self, pytester: pytest.Pytester) -> None: 79 | """Test pytest_handlecrashitem hook.""" 80 | pytester.makeconftest( 81 | """ 82 | test_runs = 0 83 | 84 | def pytest_handlecrashitem(crashitem, report, sched): 85 | global test_runs 86 | 87 | if test_runs == 0: 88 | sched.mark_test_pending(crashitem) 89 | test_runs = 1 90 | else: 91 | print("HOOK: pytest_handlecrashitem") 92 | """ 93 | ) 94 | res = pytester.runpytest("-n2", "-s") 95 | res.stdout.fnmatch_lines_random(["*HOOK: pytest_handlecrashitem"]) 96 | res.stdout.fnmatch_lines(["*3 passed*"]) 97 | 98 | def test_handlecrashitem_one(self, pytester: pytest.Pytester) -> None: 99 | """Test pytest_handlecrashitem hook with just one test.""" 100 | pytester.makeconftest( 101 | """ 102 | test_runs = 0 103 | 104 | def pytest_handlecrashitem(crashitem, report, sched): 105 | global test_runs 106 | 107 | if test_runs == 0: 108 | sched.mark_test_pending(crashitem) 109 | test_runs = 1 110 | else: 111 | print("HOOK: pytest_handlecrashitem") 112 | """ 113 | ) 114 | res = pytester.runpytest("-n1", "-s", "-k", "test_b") 115 | res.stdout.fnmatch_lines_random(["*HOOK: pytest_handlecrashitem"]) 116 | res.stdout.fnmatch_lines( 117 | [ 118 | "FAILED test_handlecrashitem_one.py::test_b*", 119 | "FAILED test_handlecrashitem_one.py::test_b*", 120 | ] 121 | ) 122 | -------------------------------------------------------------------------------- /docs/distribution.rst: -------------------------------------------------------------------------------- 1 | .. _parallelization: 2 | 3 | Running tests across multiple CPUs 4 | ================================== 5 | 6 | To send tests to multiple CPUs, use the ``-n`` (or ``--numprocesses``) option:: 7 | 8 | pytest -n auto 9 | 10 | This can lead to considerable speed ups, especially if your test suite takes a 11 | noticeable amount of time. 12 | 13 | With ``-n auto``, pytest-xdist will use as many processes as your computer 14 | has physical CPU cores. 15 | 16 | Use ``-n logical`` to use the number of *logical* CPU cores rather than 17 | physical ones. This currently requires the `psutil `__ package to be installed; 18 | if it is not or if it fails to determine the number of logical CPUs, fall back to ``-n auto`` behavior. 19 | 20 | Pass a number, e.g. ``-n 8``, to specify the number of processes explicitly. 21 | 22 | Use ``-n 0`` to disable xdist and run all tests in the main process. 23 | 24 | To specify a different meaning for ``-n auto`` and ``-n logical`` for your 25 | tests, you can: 26 | 27 | * Set the environment variable ``PYTEST_XDIST_AUTO_NUM_WORKERS`` to the 28 | desired number of processes. 29 | 30 | * Implement the ``pytest_xdist_auto_num_workers`` 31 | `pytest hook `__ 32 | (a ``pytest_xdist_auto_num_workers(config)`` function in e.g. ``conftest.py``) 33 | that returns the number of processes to use. 34 | The hook can use ``config.option.numprocesses`` to determine if the user 35 | asked for ``"auto"`` or ``"logical"``, and it can return ``None`` to fall 36 | back to the default. 37 | 38 | If both the hook and environment variable are specified, the hook takes 39 | priority. 40 | 41 | 42 | Parallelization can be configured further with these options: 43 | 44 | * ``--maxprocesses=maxprocesses``: limit the maximum number of workers to 45 | process the tests. 46 | 47 | * ``--max-worker-restart``: maximum number of workers that can be restarted 48 | when crashed (set to zero to disable this feature). 49 | 50 | The test distribution algorithm is configured with the ``--dist`` command-line option: 51 | 52 | .. _distribution modes: 53 | 54 | * ``--dist load`` **(default)**: Sends pending tests to any worker that is 55 | available, without any guaranteed order. Scheduling can be fine-tuned with 56 | the `--maxschedchunk` option, see output of `pytest --help`. 57 | 58 | * ``--dist loadscope``: Tests are grouped by **module** for *test functions* 59 | and by **class** for *test methods*. Groups are distributed to available 60 | workers as whole units. This guarantees that all tests in a group run in the 61 | same process. This can be useful if you have expensive module-level or 62 | class-level fixtures. Grouping by class takes priority over grouping by 63 | module. 64 | 65 | * ``--dist loadfile``: Tests are grouped by their containing file. Groups are 66 | distributed to available workers as whole units. This guarantees that all 67 | tests in a file run in the same worker. 68 | 69 | * ``--dist loadgroup``: Tests are grouped by the ``xdist_group`` mark. Groups are 70 | distributed to available workers as whole units. This guarantees that all 71 | tests with same ``xdist_group`` name run in the same worker. If a test has 72 | multiple groups, they will be joined together into a new group, 73 | the order of the marks doesn't matter. This works along with marks from fixtures 74 | and from the pytestmark global variable. 75 | 76 | .. code-block:: python 77 | 78 | @pytest.mark.xdist_group(name="group1") 79 | def test1(): 80 | pass 81 | 82 | 83 | class TestA: 84 | @pytest.mark.xdist_group("group1") 85 | def test2(): 86 | pass 87 | 88 | This will make sure ``test1`` and ``TestA::test2`` will run in the same worker. 89 | 90 | .. code-block:: python 91 | 92 | @pytest.fixture( 93 | scope="session", 94 | params=[ 95 | pytest.param( 96 | "chrome", 97 | marks=pytest.mark.xdist_group("chrome"), 98 | ), 99 | pytest.param( 100 | "firefox", 101 | marks=pytest.mark.xdist_group("firefox"), 102 | ), 103 | pytest.param( 104 | "edge", 105 | marks=pytest.mark.xdist_group("edge"), 106 | ), 107 | ], 108 | ) 109 | def setup_container(): 110 | pass 111 | 112 | 113 | @pytest.mark.xdist_group(name="data-store") 114 | def test_data_store(setup_container): 115 | ... 116 | 117 | This will generate 3 new groups: ``chrome_data-store``, ``data-store_firefox`` and ``data-store_edge`` (the markers are lexically sorted before being merged together). 118 | 119 | Tests without the ``xdist_group`` mark are distributed normally as in the ``--dist=load`` mode. 120 | 121 | * ``--dist worksteal``: Initially, tests are distributed evenly among all 122 | available workers. When a worker completes most of its assigned tests and 123 | doesn't have enough tests to continue (currently, every worker needs at least 124 | two tests in its queue), an attempt is made to reassign ("steal") a portion 125 | of tests from some other worker's queue. The results should be similar to 126 | the ``load`` method, but ``worksteal`` should handle tests with significantly 127 | differing duration better, and, at the same time, it should provide similar 128 | or better reuse of fixtures. 129 | 130 | * ``--dist no``: The normal pytest execution mode, runs one test at a time (no distribution at all). 131 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=77.0", 4 | "setuptools-scm[toml]>=6.2.3", 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | 8 | [project] 9 | name = "pytest-xdist" 10 | description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" 11 | readme = "README.rst" 12 | license = "MIT" 13 | authors = [{name = "holger krekel and contributors", email = "pytest-dev@python.org"}, {email = "holger@merlinux.eu"}] 14 | classifiers = [ 15 | "Development Status :: 5 - Production/Stable", 16 | "Framework :: Pytest", 17 | "Intended Audience :: Developers", 18 | "Operating System :: POSIX", 19 | "Operating System :: Microsoft :: Windows", 20 | "Operating System :: MacOS :: MacOS X", 21 | "Topic :: Software Development :: Testing", 22 | "Topic :: Software Development :: Quality Assurance", 23 | "Topic :: Utilities", 24 | "Programming Language :: Python", 25 | "Programming Language :: Python :: 3", 26 | "Programming Language :: Python :: 3 :: Only", 27 | "Programming Language :: Python :: 3.9", 28 | "Programming Language :: Python :: 3.10", 29 | "Programming Language :: Python :: 3.11", 30 | "Programming Language :: Python :: 3.12", 31 | "Programming Language :: Python :: 3.13", 32 | "Programming Language :: Python :: 3.14", 33 | ] 34 | requires-python = ">=3.9" 35 | dependencies = [ 36 | "execnet>=2.1", 37 | "pytest>=7.0.0", 38 | ] 39 | dynamic = ["version"] 40 | 41 | [project.urls] 42 | Homepage = "https://github.com/pytest-dev/pytest-xdist" 43 | Documentation = "https://pytest-xdist.readthedocs.io/en/latest" 44 | Changelog = "https://pytest-xdist.readthedocs.io/en/latest/changelog.html" 45 | Source = "https://github.com/pytest-dev/pytest-xdist" 46 | Tracker = "https://github.com/pytest-dev/pytest-xdist/issues" 47 | 48 | [project.entry-points.pytest11] 49 | xdist = "xdist.plugin" 50 | "xdist.looponfail" = "xdist.looponfail" 51 | 52 | [project.optional-dependencies] 53 | testing = ["filelock"] 54 | psutil = ["psutil>=3.0"] 55 | setproctitle = ["setproctitle"] 56 | 57 | [tool.setuptools_scm] 58 | write_to = "src/xdist/_version.py" 59 | 60 | [tool.pytest.ini_options] 61 | # pytest-services also defines a worker_id fixture, disable 62 | # it so they don't conflict with each other (#611). 63 | addopts = "-ra -p no:pytest-services" 64 | testpaths = ["testing"] 65 | 66 | [tool.ruff] 67 | src = ["src"] 68 | 69 | [tool.ruff.format] 70 | docstring-code-format = true 71 | 72 | [tool.ruff.lint] 73 | select = [ 74 | "B", # bugbear 75 | "D", # pydocstyle 76 | "E", # pycodestyle 77 | "F", # pyflakes 78 | "I", # isort 79 | "PYI", # flake8-pyi 80 | "UP", # pyupgrade 81 | "RUF", # ruff 82 | "W", # pycodestyle 83 | "T10", # flake8-debugger 84 | "PIE", # flake8-pie 85 | "FA", # flake8-future-annotations 86 | "PGH", # pygrep-hooks 87 | "PLE", # pylint error 88 | "PLW", # pylint warning 89 | "PLR1714", # Consider merging multiple comparisons 90 | ] 91 | ignore = [ 92 | # bugbear ignore 93 | "B011", # Do not `assert False` (`python -O` removes these calls) 94 | "B028", # No explicit `stacklevel` keyword argument found 95 | # pydocstyle ignore 96 | "D100", # Missing docstring in public module 97 | "D101", # Missing docstring in public class 98 | "D102", # Missing docstring in public method 99 | "D103", # Missing docstring in public function 100 | "D104", # Missing docstring in public package 101 | "D105", # Missing docstring in magic method 102 | "D106", # Missing docstring in public nested class 103 | "D107", # Missing docstring in `__init__` 104 | "D209", # Multi-line docstring closing quotes should be on a separate line 105 | "D205", # 1 blank line required between summary line and description 106 | "D400", # First line should end with a period 107 | "D401", # First line of docstring should be in imperative mood 108 | # ruff ignore 109 | "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` 110 | # pylint ignore 111 | "PLW0603", # Using the global statement 112 | "PLW0120", # remove the else and dedent its contents 113 | "PLW2901", # for loop variable overwritten by assignment target 114 | "PLR5501", # Use `elif` instead of `else` then `if` 115 | "UP031", # Use format specifiers instead of percent format 116 | ] 117 | 118 | [tool.ruff.lint.pycodestyle] 119 | # In order to be able to format for 88 char in ruff format 120 | max-line-length = 120 121 | 122 | [tool.ruff.lint.pydocstyle] 123 | convention = "pep257" 124 | 125 | [tool.ruff.lint.isort] 126 | force-single-line = true 127 | combine-as-imports = true 128 | force-sort-within-sections = true 129 | order-by-type = false 130 | lines-after-imports = 2 131 | 132 | [tool.ruff.lint.per-file-ignores] 133 | "src/xdist/_version.py" = ["I001"] 134 | 135 | [tool.mypy] 136 | mypy_path = ["src"] 137 | files = ["src", "testing"] 138 | strict = true 139 | warn_unreachable = true 140 | [[tool.mypy.overrides]] 141 | module = ["xdist._version"] 142 | ignore_missing_imports = true 143 | 144 | 145 | [tool.towncrier] 146 | package = "xdist" 147 | filename = "CHANGELOG.rst" 148 | directory = "changelog/" 149 | title_format = "pytest-xdist {version} ({project_date})" 150 | template = "changelog/_template.rst" 151 | 152 | [tool.towncrier.fragment.removal] 153 | name = "Removals" 154 | 155 | [tool.towncrier.fragment.deprecation] 156 | name = "Deprecations" 157 | 158 | [tool.towncrier.fragment.feature] 159 | name = "Features" 160 | 161 | [tool.towncrier.fragment.bugfix] 162 | name = "Bug Fixes" 163 | 164 | [tool.towncrier.fragment.vendor] 165 | name = "Vendored Libraries" 166 | 167 | [tool.towncrier.fragment.doc] 168 | name = "Improved Documentation" 169 | 170 | [tool.towncrier.fragment.trivial] 171 | name = "Trivial Changes" 172 | -------------------------------------------------------------------------------- /src/xdist/scheduler/each.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Sequence 4 | 5 | import pytest 6 | 7 | from xdist.remote import Producer 8 | from xdist.report import report_collection_diff 9 | from xdist.workermanage import parse_tx_spec_config 10 | from xdist.workermanage import WorkerController 11 | 12 | 13 | class EachScheduling: 14 | """Implement scheduling of test items on all nodes. 15 | 16 | If a node gets added after the test run is started then it is 17 | assumed to replace a node which got removed before it finished 18 | its collection. In this case it will only be used if a node 19 | with the same spec got removed earlier. 20 | 21 | Any nodes added after the run is started will only get items 22 | assigned if a node with a matching spec was removed before it 23 | finished all its pending items. The new node will then be 24 | assigned the remaining items from the removed node. 25 | """ 26 | 27 | def __init__(self, config: pytest.Config, log: Producer | None = None) -> None: 28 | self.config = config 29 | self.numnodes = len(parse_tx_spec_config(config)) 30 | self.node2collection: dict[WorkerController, list[str]] = {} 31 | self.node2pending: dict[WorkerController, list[int]] = {} 32 | self._started: list[WorkerController] = [] 33 | self._removed2pending: dict[WorkerController, list[int]] = {} 34 | if log is None: 35 | self.log = Producer("eachsched") 36 | else: 37 | self.log = log.eachsched 38 | self.collection_is_completed = False 39 | 40 | @property 41 | def nodes(self) -> list[WorkerController]: 42 | """A list of all nodes in the scheduler.""" 43 | return list(self.node2pending.keys()) 44 | 45 | @property 46 | def tests_finished(self) -> bool: 47 | if not self.collection_is_completed: 48 | return False 49 | if self._removed2pending: 50 | return False 51 | for pending in self.node2pending.values(): 52 | if len(pending) >= 2: 53 | return False 54 | return True 55 | 56 | @property 57 | def has_pending(self) -> bool: 58 | """Return True if there are pending test items. 59 | 60 | This indicates that collection has finished and nodes are 61 | still processing test items, so this can be thought of as 62 | "the scheduler is active". 63 | """ 64 | for pending in self.node2pending.values(): 65 | if pending: 66 | return True 67 | return False 68 | 69 | def add_node(self, node: WorkerController) -> None: 70 | assert node not in self.node2pending 71 | self.node2pending[node] = [] 72 | 73 | def add_node_collection( 74 | self, node: WorkerController, collection: Sequence[str] 75 | ) -> None: 76 | """Add the collected test items from a node. 77 | 78 | Collection is complete once all nodes have submitted their 79 | collection. In this case its pending list is set to an empty 80 | list. When the collection is already completed this 81 | submission is from a node which was restarted to replace a 82 | dead node. In this case we already assign the pending items 83 | here. In either case ``.schedule()`` will instruct the 84 | node to start running the required tests. 85 | """ 86 | assert node in self.node2pending 87 | if not self.collection_is_completed: 88 | self.node2collection[node] = list(collection) 89 | self.node2pending[node] = [] 90 | if len(self.node2collection) >= self.numnodes: 91 | self.collection_is_completed = True 92 | elif self._removed2pending: 93 | for deadnode in self._removed2pending: 94 | if deadnode.gateway.spec == node.gateway.spec: 95 | dead_collection = self.node2collection[deadnode] 96 | if collection != dead_collection: 97 | msg = report_collection_diff( 98 | dead_collection, 99 | collection, 100 | deadnode.gateway.id, 101 | node.gateway.id, 102 | ) 103 | self.log(msg) 104 | return 105 | pending = self._removed2pending.pop(deadnode) 106 | self.node2pending[node] = pending 107 | break 108 | 109 | def mark_test_complete( 110 | self, node: WorkerController, item_index: int, duration: float = 0 111 | ) -> None: 112 | self.node2pending[node].remove(item_index) 113 | 114 | def mark_test_pending(self, item: str) -> None: 115 | raise NotImplementedError() 116 | 117 | def remove_pending_tests_from_node( 118 | self, 119 | node: WorkerController, 120 | indices: Sequence[int], 121 | ) -> None: 122 | raise NotImplementedError() 123 | 124 | def remove_node(self, node: WorkerController) -> str | None: 125 | # KeyError if we didn't get an add_node() yet 126 | pending = self.node2pending.pop(node) 127 | if not pending: 128 | return None 129 | crashitem = self.node2collection[node][pending.pop(0)] 130 | if pending: 131 | self._removed2pending[node] = pending 132 | return crashitem 133 | 134 | def schedule(self) -> None: 135 | """Schedule the test items on the nodes. 136 | 137 | If the node's pending list is empty it is a new node which 138 | needs to run all the tests. If the pending list is already 139 | populated (by ``.add_node_collection()``) then it replaces a 140 | dead node and we only need to run those tests. 141 | """ 142 | assert self.collection_is_completed 143 | for node, pending in self.node2pending.items(): 144 | if node in self._started: 145 | continue 146 | if not pending: 147 | pending[:] = range(len(self.node2collection[node])) 148 | node.send_runtest_all() 149 | node.shutdown() 150 | else: 151 | node.send_runtest_some(pending) 152 | self._started.append(node) 153 | -------------------------------------------------------------------------------- /docs/how-to.rst: -------------------------------------------------------------------------------- 1 | How-tos 2 | ------- 3 | 4 | This section show cases how to accomplish some specialized tasks with ``pytest-xdist``. 5 | 6 | Identifying the worker process during a test 7 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 8 | 9 | *New in version 1.15.* 10 | 11 | If you need to determine the identity of a worker process in 12 | a test or fixture, you may use the ``worker_id`` fixture to do so: 13 | 14 | .. code-block:: python 15 | 16 | @pytest.fixture() 17 | def user_account(worker_id): 18 | """use a different account in each xdist worker""" 19 | return "account_%s" % worker_id 20 | 21 | When ``xdist`` is disabled (running with ``-n0`` for example), then 22 | ``worker_id`` will return ``"master"``. 23 | 24 | Worker processes also have the following environment variables 25 | defined: 26 | 27 | .. envvar:: PYTEST_XDIST_WORKER 28 | 29 | The name of the worker, e.g., ``"gw2"``. 30 | 31 | .. envvar:: PYTEST_XDIST_WORKER_COUNT 32 | 33 | The total number of workers in this session, e.g., ``"4"`` when ``-n 4`` is given in the command-line. 34 | 35 | The information about the worker_id in a test is stored in the ``TestReport`` as 36 | well, under the ``worker_id`` attribute. 37 | 38 | Since version 2.0, the following functions are also available in the ``xdist`` module: 39 | 40 | 41 | .. autofunction:: xdist.is_xdist_worker 42 | .. autofunction:: xdist.is_xdist_controller 43 | .. autofunction:: xdist.is_xdist_master 44 | .. autofunction:: xdist.get_xdist_worker_id 45 | 46 | Identifying workers from the system environment 47 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 48 | 49 | *New in version 2.4* 50 | 51 | If the `setproctitle`_ package is installed, ``pytest-xdist`` will use it to 52 | update the process title (command line) on its workers to show their current 53 | state. The titles used are ``[pytest-xdist running] file.py/node::id`` and 54 | ``[pytest-xdist idle]``, visible in standard tools like ``ps`` and ``top`` on 55 | Linux, Mac OS X and BSD systems. For Windows, please follow `setproctitle`_'s 56 | pointer regarding the Process Explorer tool. 57 | 58 | This is intended purely as an UX enhancement, e.g. to track down issues with 59 | long-running or CPU intensive tests. Errors in changing the title are ignored 60 | silently. Please try not to rely on the title format or title changes in 61 | external scripts. 62 | 63 | .. _`setproctitle`: https://pypi.org/project/setproctitle/ 64 | 65 | 66 | Uniquely identifying the current test run 67 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 68 | 69 | *New in version 1.32.* 70 | 71 | If you need to globally distinguish one test run from others in your 72 | workers, you can use the ``testrun_uid`` fixture. For instance, let's say you 73 | wanted to create a separate database for each test run: 74 | 75 | .. code-block:: python 76 | 77 | import pytest 78 | from posix_ipc import Semaphore, O_CREAT 79 | 80 | 81 | @pytest.fixture(scope="session", autouse=True) 82 | def create_unique_database(testrun_uid): 83 | """create a unique database for this particular test run""" 84 | database_url = f"psql://myapp-{testrun_uid}" 85 | 86 | with Semaphore(f"/{testrun_uid}-lock", flags=O_CREAT, initial_value=1): 87 | if not database_exists(database_url): 88 | create_database(database_url) 89 | 90 | 91 | @pytest.fixture() 92 | def db(testrun_uid): 93 | """retrieve unique database""" 94 | database_url = f"psql://myapp-{testrun_uid}" 95 | return database_get_instance(database_url) 96 | 97 | 98 | Additionally, during a test run, the following environment variable is defined: 99 | 100 | .. envvar:: PYTEST_XDIST_TESTRUNUID 101 | 102 | The unique id of the test run. 103 | 104 | Accessing ``sys.argv`` from the controller node in workers 105 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 106 | 107 | To access the ``sys.argv`` passed to the command-line of the controller node, use 108 | ``request.config.workerinput["mainargv"]``. 109 | 110 | 111 | Specifying test exec environments in an ini file 112 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 113 | 114 | You can use pytest's ini file configuration to avoid typing common options. 115 | You can for example make running with three subprocesses your default like this: 116 | 117 | .. code-block:: ini 118 | 119 | [pytest] 120 | addopts = -n3 121 | 122 | You can also add default environments like this: 123 | 124 | .. code-block:: ini 125 | 126 | [pytest] 127 | addopts = --tx ssh=myhost//python=python3.9 --tx ssh=myhost//python=python3.6 128 | 129 | and then just type:: 130 | 131 | pytest --dist=each 132 | 133 | to run tests in each of the environments. 134 | 135 | 136 | Specifying "rsync" dirs in an ini-file 137 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 138 | 139 | In a ``tox.ini`` or ``setup.cfg`` file in your root project directory 140 | you may specify directories to include or to exclude in synchronisation: 141 | 142 | .. code-block:: ini 143 | 144 | [pytest] 145 | rsyncdirs = . mypkg helperpkg 146 | rsyncignore = .hg 147 | 148 | These directory specifications are relative to the directory 149 | where the configuration file was found. 150 | 151 | .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist 152 | .. _`pytest-xdist repository`: https://github.com/pytest-dev/pytest-xdist 153 | .. _`pytest`: http://pytest.org 154 | 155 | 156 | Making session-scoped fixtures execute only once 157 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 158 | 159 | ``pytest-xdist`` is designed so that each worker process will perform its own collection and execute 160 | a subset of all tests. This means that tests in different processes requesting a high-level 161 | scoped fixture (for example ``session``) will execute the fixture code more than once, which 162 | breaks expectations and might be undesired in certain situations. 163 | 164 | While ``pytest-xdist`` does not have a builtin support for ensuring a session-scoped fixture is 165 | executed exactly once, this can be achieved by using a lock file for inter-process communication. 166 | 167 | The example below needs to execute the fixture ``session_data`` only once (because it is 168 | resource intensive, or needs to execute only once to define configuration options, etc), so it makes 169 | use of a `FileLock `_ to produce the fixture data only once 170 | when the first process requests the fixture, while the other processes will then read 171 | the data from a file. 172 | 173 | Here is the code: 174 | 175 | .. code-block:: python 176 | 177 | import json 178 | 179 | import pytest 180 | from filelock import FileLock 181 | 182 | 183 | @pytest.fixture(scope="session") 184 | def session_data(tmp_path_factory, worker_id): 185 | if worker_id == "master": 186 | # not executing in with multiple workers, just produce the data and let 187 | # pytest's fixture caching do its job 188 | return produce_expensive_data() 189 | 190 | # get the temp directory shared by all workers 191 | root_tmp_dir = tmp_path_factory.getbasetemp().parent 192 | 193 | fn = root_tmp_dir / "data.json" 194 | with FileLock(str(fn) + ".lock"): 195 | if fn.is_file(): 196 | data = json.loads(fn.read_text()) 197 | else: 198 | data = produce_expensive_data() 199 | fn.write_text(json.dumps(data)) 200 | return data 201 | 202 | 203 | The example above can also be use in cases a fixture needs to execute exactly once per test session, like 204 | initializing a database service and populating initial tables. 205 | 206 | This technique might not work for every case, but should be a starting point for many situations 207 | where executing a high-scope fixture exactly once is important. 208 | 209 | 210 | Creating one log file for each worker 211 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 212 | 213 | To create one log file for each worker with ``pytest-xdist``, you can leverage :envvar:`PYTEST_XDIST_WORKER` 214 | to generate a unique filename for each worker. 215 | 216 | Example: 217 | 218 | .. code-block:: python 219 | 220 | # content of conftest.py 221 | def pytest_configure(config): 222 | worker_id = os.environ.get("PYTEST_XDIST_WORKER") 223 | if worker_id is not None: 224 | logging.basicConfig( 225 | format=config.getini("log_file_format"), 226 | filename=f"tests_{worker_id}.log", 227 | level=config.getini("log_file_level"), 228 | ) 229 | 230 | 231 | When running the tests with ``-n3``, for example, three files will be created in the current directory: 232 | ``tests_gw0.log``, ``tests_gw1.log`` and ``tests_gw2.log``. 233 | -------------------------------------------------------------------------------- /src/xdist/looponfail.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implement -f aka looponfailing for pytest. 3 | 4 | NOTE that we try to avoid loading and depending on application modules 5 | within the controlling process (the one that starts repeatedly test 6 | processes) otherwise changes to source code can crash 7 | the controlling process which should best never happen. 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | from collections.abc import Sequence 13 | import os 14 | from pathlib import Path 15 | import sys 16 | import time 17 | from typing import Any 18 | 19 | from _pytest._io import TerminalWriter 20 | import execnet 21 | import pytest 22 | 23 | from xdist._path import visit_path 24 | 25 | 26 | @pytest.hookimpl 27 | def pytest_addoption(parser: pytest.Parser) -> None: 28 | group = parser.getgroup("xdist", "distributed and subprocess testing") 29 | group._addoption( 30 | "-f", 31 | "--looponfail", 32 | action="store_true", 33 | dest="looponfail", 34 | default=False, 35 | help="Run tests in subprocess: wait for files to be modified, then " 36 | "re-run failing test set until all pass.", 37 | ) 38 | 39 | 40 | @pytest.hookimpl 41 | def pytest_cmdline_main(config: pytest.Config) -> int | None: 42 | if config.getoption("looponfail"): 43 | usepdb = config.getoption("usepdb", False) # a core option 44 | if usepdb: 45 | raise pytest.UsageError("--pdb is incompatible with --looponfail.") 46 | looponfail_main(config) 47 | return 2 # looponfail only can get stop with ctrl-C anyway 48 | return None 49 | 50 | 51 | def looponfail_main(config: pytest.Config) -> None: 52 | remotecontrol = RemoteControl(config) 53 | config_roots = config.getini("looponfailroots") 54 | if not config_roots: 55 | config_roots = [Path.cwd()] 56 | rootdirs = [Path(root) for root in config_roots] 57 | statrecorder = StatRecorder(rootdirs) 58 | try: 59 | while 1: 60 | remotecontrol.loop_once() 61 | if not remotecontrol.failures and remotecontrol.wasfailing: 62 | # the last failures passed, let's immediately rerun all 63 | continue 64 | repr_pytest_looponfailinfo( 65 | failreports=remotecontrol.failures, rootdirs=rootdirs 66 | ) 67 | statrecorder.waitonchange(checkinterval=2.0) 68 | except KeyboardInterrupt: 69 | print() 70 | 71 | 72 | class RemoteControl: 73 | gateway: execnet.Gateway 74 | 75 | def __init__(self, config: pytest.Config) -> None: 76 | self.config = config 77 | self.failures: list[str] = [] 78 | 79 | def trace(self, *args: object) -> None: 80 | if self.config.option.debug: 81 | msg = " ".join(str(x) for x in args) 82 | print("RemoteControl:", msg) 83 | 84 | def initgateway(self) -> execnet.Gateway: 85 | return execnet.makegateway("execmodel=main_thread_only//popen") 86 | 87 | def setup(self) -> None: 88 | if hasattr(self, "gateway"): 89 | raise ValueError("already have gateway %r" % self.gateway) 90 | self.trace("setting up worker session") 91 | self.gateway = self.initgateway() 92 | self.channel = channel = self.gateway.remote_exec( 93 | init_worker_session, 94 | args=self.config.args, 95 | option_dict=vars(self.config.option), 96 | ) 97 | remote_outchannel: execnet.Channel = channel.receive() 98 | 99 | out = TerminalWriter() 100 | 101 | def write(s: str) -> None: 102 | out._file.write(s) 103 | out._file.flush() 104 | 105 | remote_outchannel.setcallback(write) 106 | 107 | def ensure_teardown(self) -> None: 108 | if hasattr(self, "channel"): 109 | if not self.channel.isclosed(): 110 | self.trace("closing", self.channel) 111 | self.channel.close() 112 | del self.channel 113 | if hasattr(self, "gateway"): 114 | self.trace("exiting", self.gateway) 115 | self.gateway.exit() 116 | del self.gateway 117 | 118 | def runsession(self) -> tuple[list[str], list[str], bool]: 119 | try: 120 | self.trace("sending", self.failures) 121 | self.channel.send(self.failures) 122 | try: 123 | return self.channel.receive() # type: ignore[no-any-return] 124 | except self.channel.RemoteError: 125 | e = sys.exc_info()[1] 126 | self.trace("ERROR", e) 127 | raise 128 | finally: 129 | self.ensure_teardown() 130 | 131 | def loop_once(self) -> None: 132 | self.setup() 133 | self.wasfailing = self.failures and len(self.failures) 134 | result = self.runsession() 135 | failures, _reports, collection_failed = result 136 | if collection_failed: 137 | pass # "Collection failed, keeping previous failure set" 138 | else: 139 | uniq_failures = [] 140 | for failure in failures: 141 | if failure not in uniq_failures: 142 | uniq_failures.append(failure) 143 | self.failures = uniq_failures 144 | 145 | 146 | def repr_pytest_looponfailinfo( 147 | failreports: Sequence[str], rootdirs: Sequence[Path] 148 | ) -> None: 149 | tr = TerminalWriter() 150 | if failreports: 151 | tr.sep("#", "LOOPONFAILING", bold=True) 152 | for report in failreports: 153 | if report: 154 | tr.line(report, red=True) 155 | tr.sep("#", "waiting for changes", bold=True) 156 | for rootdir in rootdirs: 157 | tr.line(f"### Watching: {rootdir}", bold=True) 158 | 159 | 160 | def init_worker_session( 161 | channel: "execnet.Channel", # noqa: UP037 162 | args: list[str], 163 | option_dict: dict[str, "Any"], # noqa: UP037 164 | ) -> None: 165 | import os 166 | import sys 167 | 168 | outchannel = channel.gateway.newchannel() 169 | sys.stdout = sys.stderr = outchannel.makefile("w") 170 | channel.send(outchannel) 171 | # prune sys.path to not contain relative paths 172 | newpaths = [] 173 | for p in sys.path: 174 | if p: 175 | # Ignore path placeholders created for editable installs 176 | if not os.path.isabs(p) and not p.endswith(".__path_hook__"): 177 | p = os.path.abspath(p) 178 | newpaths.append(p) 179 | sys.path[:] = newpaths 180 | 181 | # fullwidth, hasmarkup = channel.receive() 182 | from pytest import Config 183 | 184 | config = Config.fromdictargs(option_dict, list(args)) 185 | config.args = args 186 | from xdist.looponfail import WorkerFailSession 187 | 188 | WorkerFailSession(config, channel).main() 189 | 190 | 191 | class WorkerFailSession: 192 | def __init__(self, config: pytest.Config, channel: execnet.Channel) -> None: 193 | self.config = config 194 | self.channel = channel 195 | self.recorded_failures: list[pytest.CollectReport | pytest.TestReport] = [] 196 | self.collection_failed = False 197 | config.pluginmanager.register(self) 198 | config.option.looponfail = False 199 | config.option.usepdb = False 200 | 201 | def DEBUG(self, *args: object) -> None: 202 | if self.config.option.debug: 203 | print(" ".join(map(str, args))) 204 | 205 | @pytest.hookimpl 206 | def pytest_collection(self, session: pytest.Session) -> bool: 207 | self.session = session 208 | self.trails = self.current_command 209 | hook = self.session.ihook 210 | try: 211 | items = session.perform_collect(self.trails or None) 212 | except pytest.UsageError: 213 | items = session.perform_collect(None) 214 | hook.pytest_collection_modifyitems( 215 | session=session, config=session.config, items=items 216 | ) 217 | hook.pytest_collection_finish(session=session) 218 | return True 219 | 220 | @pytest.hookimpl 221 | def pytest_runtest_logreport(self, report: pytest.TestReport) -> None: 222 | if report.failed: 223 | self.recorded_failures.append(report) 224 | 225 | @pytest.hookimpl 226 | def pytest_collectreport(self, report: pytest.CollectReport) -> None: 227 | if report.failed: 228 | self.recorded_failures.append(report) 229 | self.collection_failed = True 230 | 231 | def main(self) -> None: 232 | self.DEBUG("WORKER: received configuration, waiting for command trails") 233 | try: 234 | command = self.channel.receive() 235 | except KeyboardInterrupt: 236 | return # in the worker we can't do much about this 237 | self.DEBUG("received", command) 238 | self.current_command = command 239 | self.config.hook.pytest_cmdline_main(config=self.config) 240 | trails, failreports = [], [] 241 | for rep in self.recorded_failures: 242 | trails.append(rep.nodeid) 243 | loc = rep.longrepr 244 | loc = str(getattr(loc, "reprcrash", loc)) 245 | failreports.append(loc) 246 | result = (trails, failreports, self.collection_failed) 247 | self.channel.send(result) 248 | 249 | 250 | class StatRecorder: 251 | def __init__(self, rootdirlist: Sequence[Path]) -> None: 252 | self.rootdirlist = rootdirlist 253 | self.statcache: dict[Path, os.stat_result] = {} 254 | self.check() # snapshot state 255 | 256 | def fil(self, p: Path) -> bool: 257 | return p.is_file() and not p.name.startswith(".") and p.suffix != ".pyc" 258 | 259 | def rec(self, p: Path) -> bool: 260 | return not p.name.startswith(".") and p.exists() 261 | 262 | def waitonchange(self, checkinterval: float = 1.0) -> None: 263 | while 1: 264 | changed = self.check() 265 | if changed: 266 | return 267 | time.sleep(checkinterval) 268 | 269 | def check(self, removepycfiles: bool = True) -> bool: 270 | changed = False 271 | newstat: dict[Path, os.stat_result] = {} 272 | for rootdir in self.rootdirlist: 273 | for path in visit_path(rootdir, filter=self.fil, recurse=self.rec): 274 | oldstat = self.statcache.pop(path, None) 275 | try: 276 | curstat = path.stat() 277 | except OSError: 278 | if oldstat: 279 | changed = True 280 | else: 281 | newstat[path] = curstat 282 | if oldstat is not None: 283 | if ( 284 | oldstat.st_mtime != curstat.st_mtime 285 | or oldstat.st_size != curstat.st_size 286 | ): 287 | changed = True 288 | print("# MODIFIED", path) 289 | if removepycfiles and path.suffix == ".py": 290 | pycfile = path.with_suffix(".pyc") 291 | if pycfile.is_file(): 292 | os.unlink(pycfile) 293 | 294 | else: 295 | changed = True 296 | if self.statcache: 297 | changed = True 298 | self.statcache = newstat 299 | return changed 300 | -------------------------------------------------------------------------------- /testing/test_plugin.py: -------------------------------------------------------------------------------- 1 | from contextlib import suppress 2 | import os 3 | from pathlib import Path 4 | import sys 5 | 6 | import execnet 7 | import pytest 8 | 9 | from xdist.workermanage import NodeManager 10 | 11 | 12 | @pytest.fixture 13 | def monkeypatch_3_cpus(monkeypatch: pytest.MonkeyPatch) -> None: 14 | """Make pytest-xdist believe the system has 3 CPUs.""" 15 | # block import 16 | monkeypatch.setitem(sys.modules, "psutil", None) 17 | monkeypatch.delattr(os, "sched_getaffinity", raising=False) 18 | monkeypatch.setattr(os, "cpu_count", lambda: 3) 19 | 20 | 21 | def test_dist_incompatibility_messages(pytester: pytest.Pytester) -> None: 22 | result = pytester.runpytest("--pdb", "--looponfail") 23 | assert result.ret != 0 24 | result = pytester.runpytest("--pdb", "-n", "3") 25 | assert result.ret != 0 26 | assert "incompatible" in result.stderr.str() 27 | result = pytester.runpytest("--pdb", "-d", "--tx", "popen") 28 | assert result.ret != 0 29 | assert "incompatible" in result.stderr.str() 30 | 31 | 32 | def test_dist_options(pytester: pytest.Pytester) -> None: 33 | from xdist.plugin import pytest_cmdline_main as check_options 34 | 35 | config = pytester.parseconfigure("-n 2") 36 | check_options(config) 37 | assert config.option.dist == "load" 38 | assert config.option.tx == ["popen"] * 2 39 | config = pytester.parseconfigure("--numprocesses", "2") 40 | check_options(config) 41 | assert config.option.dist == "load" 42 | assert config.option.tx == ["popen"] * 2 43 | config = pytester.parseconfigure("--numprocesses", "3", "--maxprocesses", "2") 44 | check_options(config) 45 | assert config.option.dist == "load" 46 | assert config.option.tx == ["popen"] * 2 47 | config = pytester.parseconfigure("-d") 48 | check_options(config) 49 | assert config.option.dist == "load" 50 | 51 | config = pytester.parseconfigure("--numprocesses", "0") 52 | check_options(config) 53 | assert config.option.dist == "no" 54 | assert config.option.tx == [] 55 | 56 | config = pytester.parseconfigure("--numprocesses", "0", "-d") 57 | check_options(config) 58 | assert config.option.dist == "no" 59 | assert config.option.tx == [] 60 | 61 | config = pytester.parseconfigure( 62 | "--numprocesses", "0", "--dist", "each", "--tx", "2*popen" 63 | ) 64 | check_options(config) 65 | assert config.option.dist == "no" 66 | assert config.option.tx == [] 67 | 68 | 69 | def test_auto_detect_cpus( 70 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 71 | ) -> None: 72 | from xdist.plugin import pytest_cmdline_main as check_options 73 | 74 | monkeypatch.delenv("PYTEST_XDIST_AUTO_NUM_WORKERS", raising=False) 75 | 76 | with suppress(ImportError): 77 | import psutil 78 | 79 | monkeypatch.setattr(psutil, "cpu_count", lambda logical=True: None) 80 | 81 | if hasattr(os, "sched_getaffinity"): 82 | monkeypatch.setattr(os, "sched_getaffinity", lambda _pid: set(range(99))) 83 | elif hasattr(os, "cpu_count"): 84 | monkeypatch.setattr(os, "cpu_count", lambda: 99) 85 | else: 86 | import multiprocessing 87 | 88 | monkeypatch.setattr(multiprocessing, "cpu_count", lambda: 99) 89 | 90 | config = pytester.parseconfigure("-n2") 91 | assert config.getoption("numprocesses") == 2 92 | 93 | config = pytester.parseconfigure("-nauto") 94 | check_options(config) 95 | assert config.getoption("numprocesses") == 99 96 | 97 | for numprocesses in (0, "auto", "logical"): 98 | config = pytester.parseconfigure(f"-n{numprocesses}", "--pdb") 99 | check_options(config) 100 | assert config.getoption("usepdb") 101 | assert config.getoption("numprocesses") == 0 102 | assert config.getoption("dist") == "no" 103 | 104 | monkeypatch.delattr(os, "sched_getaffinity", raising=False) 105 | monkeypatch.setenv("TRAVIS", "true") 106 | config = pytester.parseconfigure("-nauto") 107 | check_options(config) 108 | assert config.getoption("numprocesses") == 2 109 | 110 | 111 | def test_auto_detect_cpus_psutil( 112 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 113 | ) -> None: 114 | from xdist.plugin import pytest_cmdline_main as check_options 115 | 116 | psutil = pytest.importorskip("psutil") 117 | 118 | monkeypatch.delenv("PYTEST_XDIST_AUTO_NUM_WORKERS", raising=False) 119 | monkeypatch.setattr(psutil, "cpu_count", lambda logical=True: 84 if logical else 42) 120 | 121 | config = pytester.parseconfigure("-nauto") 122 | check_options(config) 123 | assert config.getoption("numprocesses") == 42 124 | 125 | config = pytester.parseconfigure("-nlogical") 126 | check_options(config) 127 | assert config.getoption("numprocesses") == 84 128 | 129 | 130 | def test_auto_detect_cpus_os( 131 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, monkeypatch_3_cpus: None 132 | ) -> None: 133 | from xdist.plugin import pytest_cmdline_main as check_options 134 | 135 | monkeypatch.delenv("PYTEST_XDIST_AUTO_NUM_WORKERS", raising=False) 136 | 137 | config = pytester.parseconfigure("-nauto") 138 | check_options(config) 139 | assert config.getoption("numprocesses") == 3 140 | 141 | config = pytester.parseconfigure("-nlogical") 142 | check_options(config) 143 | assert config.getoption("numprocesses") == 3 144 | 145 | 146 | def test_hook_auto_num_workers( 147 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 148 | ) -> None: 149 | from xdist.plugin import pytest_cmdline_main as check_options 150 | 151 | pytester.makeconftest( 152 | """ 153 | def pytest_xdist_auto_num_workers(): 154 | return 42 155 | """ 156 | ) 157 | config = pytester.parseconfigure("-nauto") 158 | check_options(config) 159 | assert config.getoption("numprocesses") == 42 160 | 161 | config = pytester.parseconfigure("-nlogical") 162 | check_options(config) 163 | assert config.getoption("numprocesses") == 42 164 | 165 | 166 | def test_hook_auto_num_workers_arg( 167 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 168 | ) -> None: 169 | # config.option.numprocesses is a pytest feature, 170 | # but we document it so let's test it. 171 | from xdist.plugin import pytest_cmdline_main as check_options 172 | 173 | pytester.makeconftest( 174 | """ 175 | def pytest_xdist_auto_num_workers(config): 176 | if config.option.numprocesses == 'auto': 177 | return 42 178 | if config.option.numprocesses == 'logical': 179 | return 8 180 | """ 181 | ) 182 | config = pytester.parseconfigure("-nauto") 183 | check_options(config) 184 | assert config.getoption("numprocesses") == 42 185 | 186 | config = pytester.parseconfigure("-nlogical") 187 | check_options(config) 188 | assert config.getoption("numprocesses") == 8 189 | 190 | 191 | def test_hook_auto_num_workers_none( 192 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, monkeypatch_3_cpus: None 193 | ) -> None: 194 | # Returning None from a hook to skip it is pytest behavior, 195 | # but we document it so let's test it. 196 | from xdist.plugin import pytest_cmdline_main as check_options 197 | 198 | monkeypatch.delenv("PYTEST_XDIST_AUTO_NUM_WORKERS", raising=False) 199 | 200 | pytester.makeconftest( 201 | """ 202 | def pytest_xdist_auto_num_workers(): 203 | return None 204 | """ 205 | ) 206 | config = pytester.parseconfigure("-nauto") 207 | check_options(config) 208 | assert config.getoption("numprocesses") == 3 209 | 210 | monkeypatch.setenv("PYTEST_XDIST_AUTO_NUM_WORKERS", "5") 211 | 212 | config = pytester.parseconfigure("-nauto") 213 | check_options(config) 214 | assert config.getoption("numprocesses") == 5 215 | 216 | 217 | def test_envvar_auto_num_workers( 218 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 219 | ) -> None: 220 | from xdist.plugin import pytest_cmdline_main as check_options 221 | 222 | monkeypatch.setenv("PYTEST_XDIST_AUTO_NUM_WORKERS", "7") 223 | 224 | config = pytester.parseconfigure("-nauto") 225 | check_options(config) 226 | assert config.getoption("numprocesses") == 7 227 | 228 | config = pytester.parseconfigure("-nlogical") 229 | check_options(config) 230 | assert config.getoption("numprocesses") == 7 231 | 232 | 233 | def test_envvar_auto_num_workers_warn( 234 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, monkeypatch_3_cpus: None 235 | ) -> None: 236 | from xdist.plugin import pytest_cmdline_main as check_options 237 | 238 | monkeypatch.setenv("PYTEST_XDIST_AUTO_NUM_WORKERS", "fourscore") 239 | 240 | config = pytester.parseconfigure("-nauto") 241 | with pytest.warns(UserWarning): 242 | check_options(config) 243 | assert config.getoption("numprocesses") == 3 244 | 245 | 246 | def test_auto_num_workers_hook_overrides_envvar( 247 | pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch, monkeypatch_3_cpus: None 248 | ) -> None: 249 | from xdist.plugin import pytest_cmdline_main as check_options 250 | 251 | monkeypatch.setenv("PYTEST_XDIST_AUTO_NUM_WORKERS", "987") 252 | pytester.makeconftest( 253 | """ 254 | def pytest_xdist_auto_num_workers(): 255 | return 2 256 | """ 257 | ) 258 | config = pytester.parseconfigure("-nauto") 259 | check_options(config) 260 | assert config.getoption("numprocesses") == 2 261 | 262 | config = pytester.parseconfigure("-nauto") 263 | check_options(config) 264 | assert config.getoption("numprocesses") == 2 265 | 266 | 267 | def test_dsession_with_collect_only(pytester: pytest.Pytester) -> None: 268 | from xdist.plugin import pytest_cmdline_main as check_options 269 | from xdist.plugin import pytest_configure as configure 270 | 271 | config = pytester.parseconfigure("-n1") 272 | check_options(config) 273 | configure(config) 274 | assert config.pluginmanager.hasplugin("dsession") 275 | 276 | config = pytester.parseconfigure("-n1", "--collect-only") 277 | check_options(config) 278 | configure(config) 279 | assert not config.pluginmanager.hasplugin("dsession") 280 | 281 | 282 | def test_testrunuid_provided(pytester: pytest.Pytester) -> None: 283 | config = pytester.parseconfigure("--testrunuid", "test123", "--tx=popen") 284 | nm = NodeManager(config) 285 | assert nm.testrunuid == "test123" 286 | 287 | 288 | def test_testrunuid_generated(pytester: pytest.Pytester) -> None: 289 | config = pytester.parseconfigure("--tx=popen") 290 | nm = NodeManager(config) 291 | assert len(nm.testrunuid) == 32 292 | 293 | 294 | class TestDistOptions: 295 | def test_getxspecs(self, pytester: pytest.Pytester) -> None: 296 | config = pytester.parseconfigure("--tx=popen", "--tx", "ssh=xyz") 297 | nodemanager = NodeManager(config) 298 | xspecs = nodemanager._gettxspecs() 299 | assert len(xspecs) == 2 300 | print(xspecs) 301 | assert xspecs[0].popen 302 | assert xspecs[1].ssh == "xyz" 303 | 304 | def test_xspecs_multiplied(self, pytester: pytest.Pytester) -> None: 305 | config = pytester.parseconfigure("--tx=3*popen") 306 | xspecs = NodeManager(config)._gettxspecs() 307 | assert len(xspecs) == 3 308 | assert xspecs[1].popen 309 | 310 | def test_getrsyncdirs(self, pytester: pytest.Pytester) -> None: 311 | config = pytester.parseconfigure("--rsyncdir=" + str(pytester.path)) 312 | nm = NodeManager(config, specs=[execnet.XSpec("popen")]) 313 | assert not nm._getrsyncdirs() 314 | nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=qwe")]) 315 | assert nm.roots 316 | assert pytester.path in nm.roots 317 | 318 | def test_getrsyncignore(self, pytester: pytest.Pytester) -> None: 319 | config = pytester.parseconfigure("--rsyncignore=fo*") 320 | nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=qwe")]) 321 | assert "fo*" in nm.rsyncoptions["ignores"] 322 | 323 | def test_getrsyncdirs_with_conftest(self, pytester: pytest.Pytester) -> None: 324 | p = Path.cwd() 325 | for bn in ("x", "y", "z"): 326 | p.joinpath(bn).mkdir() 327 | pytester.makeini( 328 | """ 329 | [pytest] 330 | rsyncdirs= x 331 | """ 332 | ) 333 | config = pytester.parseconfigure(pytester.path, "--rsyncdir=y", "--rsyncdir=z") 334 | nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=xyz")]) 335 | roots = nm._getrsyncdirs() 336 | # assert len(roots) == 3 + 1 # pylib 337 | assert Path("y").resolve() in roots 338 | assert Path("z").resolve() in roots 339 | assert pytester.path.joinpath("x") in roots 340 | -------------------------------------------------------------------------------- /src/xdist/scheduler/worksteal.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Sequence 4 | from typing import NamedTuple 5 | 6 | import pytest 7 | 8 | from xdist.remote import Producer 9 | from xdist.report import report_collection_diff 10 | from xdist.workermanage import parse_tx_spec_config 11 | from xdist.workermanage import WorkerController 12 | 13 | 14 | class NodePending(NamedTuple): 15 | node: WorkerController 16 | pending: list[int] 17 | 18 | 19 | # Every worker needs at least 2 tests in queue - the current and the next one. 20 | MIN_PENDING = 2 21 | 22 | 23 | class WorkStealingScheduling: 24 | """Implement work-stealing scheduling. 25 | 26 | Initially, tests are distributed evenly among all nodes. 27 | 28 | When some node completes most of its assigned tests (when only one pending 29 | test remains), an attempt is made to reassign ("steal") some tests from 30 | other nodes to this node. 31 | 32 | Attributes:: 33 | 34 | :numnodes: The expected number of nodes taking part. The actual 35 | number of nodes will vary during the scheduler's lifetime as 36 | nodes are added by the DSession as they are brought up and 37 | removed either because of a dead node or normal shutdown. This 38 | number is primarily used to know when the initial collection is 39 | completed. 40 | 41 | :node2collection: Map of nodes and their test collection. All 42 | collections should always be identical. 43 | 44 | :node2pending: Map of nodes and the indices of their pending 45 | tests. The indices are an index into ``.pending`` (which is 46 | identical to their own collection stored in 47 | ``.node2collection``). 48 | 49 | :collection: The one collection once it is validated to be 50 | identical between all the nodes. It is initialised to None 51 | until ``.schedule()`` is called. 52 | 53 | :pending: List of indices of globally pending tests. These are 54 | tests which have not yet been allocated to a chunk for a node 55 | to process. 56 | 57 | :log: A py.log.Producer instance. 58 | 59 | :config: Config object, used for handling hooks. 60 | 61 | :steal_requested_from_node: The node to which the current "steal" request 62 | was sent. ``None`` if there is no request in progress. Only one request 63 | can be in progress at any time, the scheduler doesn't send multiple 64 | simultaneous requests. 65 | """ 66 | 67 | def __init__(self, config: pytest.Config, log: Producer | None = None) -> None: 68 | self.numnodes = len(parse_tx_spec_config(config)) 69 | self.node2collection: dict[WorkerController, list[str]] = {} 70 | self.node2pending: dict[WorkerController, list[int]] = {} 71 | self.pending: list[int] = [] 72 | self.collection: list[str] | None = None 73 | if log is None: 74 | self.log = Producer("workstealsched") 75 | else: 76 | self.log = log.workstealsched 77 | self.config = config 78 | self.steal_requested_from_node: WorkerController | None = None 79 | 80 | @property 81 | def nodes(self) -> list[WorkerController]: 82 | """A list of all nodes in the scheduler.""" 83 | return list(self.node2pending.keys()) 84 | 85 | @property 86 | def collection_is_completed(self) -> bool: 87 | """Boolean indication initial test collection is complete. 88 | 89 | This is a boolean indicating all initial participating nodes 90 | have finished collection. The required number of initial 91 | nodes is defined by ``.numnodes``. 92 | """ 93 | return len(self.node2collection) >= self.numnodes 94 | 95 | @property 96 | def tests_finished(self) -> bool: 97 | """Return True if all tests have been executed by the nodes.""" 98 | if not self.collection_is_completed: 99 | return False 100 | if self.pending: 101 | return False 102 | if self.steal_requested_from_node is not None: 103 | return False 104 | for pending in self.node2pending.values(): 105 | if len(pending) >= MIN_PENDING: 106 | return False 107 | return True 108 | 109 | @property 110 | def has_pending(self) -> bool: 111 | """Return True if there are pending test items. 112 | 113 | This indicates that collection has finished and nodes are 114 | still processing test items, so this can be thought of as 115 | "the scheduler is active". 116 | """ 117 | if self.pending: 118 | return True 119 | for pending in self.node2pending.values(): 120 | if pending: 121 | return True 122 | return False 123 | 124 | def add_node(self, node: WorkerController) -> None: 125 | """Add a new node to the scheduler. 126 | 127 | From now on the node will be allocated chunks of tests to 128 | execute. 129 | 130 | Called by the ``DSession.worker_workerready`` hook when it 131 | successfully bootstraps a new node. 132 | """ 133 | assert node not in self.node2pending 134 | self.node2pending[node] = [] 135 | 136 | def add_node_collection( 137 | self, node: WorkerController, collection: Sequence[str] 138 | ) -> None: 139 | """Add the collected test items from a node. 140 | 141 | The collection is stored in the ``.node2collection`` map. 142 | Called by the ``DSession.worker_collectionfinish`` hook. 143 | """ 144 | assert node in self.node2pending 145 | if self.collection_is_completed: 146 | # A new node has been added later, perhaps an original one died. 147 | # .schedule() should have 148 | # been called by now 149 | assert self.collection 150 | if collection != self.collection: 151 | other_node = next(iter(self.node2collection.keys())) 152 | msg = report_collection_diff( 153 | self.collection, collection, other_node.gateway.id, node.gateway.id 154 | ) 155 | self.log(msg) 156 | return 157 | self.node2collection[node] = list(collection) 158 | 159 | def mark_test_complete( 160 | self, node: WorkerController, item_index: int, duration: float | None = None 161 | ) -> None: 162 | """Mark test item as completed by node. 163 | 164 | This is called by the ``DSession.worker_testreport`` hook. 165 | """ 166 | self.node2pending[node].remove(item_index) 167 | self.check_schedule() 168 | 169 | def mark_test_pending(self, item: str) -> None: 170 | assert self.collection is not None 171 | self.pending.insert( 172 | 0, 173 | self.collection.index(item), 174 | ) 175 | self.check_schedule() 176 | 177 | def remove_pending_tests_from_node( 178 | self, 179 | node: WorkerController, 180 | indices: Sequence[int], 181 | ) -> None: 182 | """Node returned some test indices back in response to 'steal' command. 183 | 184 | This is called by ``DSession.worker_unscheduled``. 185 | """ 186 | assert node is self.steal_requested_from_node 187 | self.steal_requested_from_node = None 188 | 189 | indices_set = set(indices) 190 | self.node2pending[node] = [ 191 | i for i in self.node2pending[node] if i not in indices_set 192 | ] 193 | self.pending.extend(indices) 194 | self.check_schedule() 195 | 196 | def check_schedule(self) -> None: 197 | """Reschedule tests/perform load balancing.""" 198 | nodes_up = [ 199 | NodePending(node, pending) 200 | for node, pending in self.node2pending.items() 201 | if not node.shutting_down 202 | ] 203 | 204 | def get_idle_nodes() -> list[WorkerController]: 205 | return [node for node, pending in nodes_up if len(pending) < MIN_PENDING] 206 | 207 | idle_nodes = get_idle_nodes() 208 | if not idle_nodes: 209 | return 210 | 211 | if self.pending: 212 | # Distribute pending tests evenly among idle nodes 213 | for i, node in enumerate(idle_nodes): 214 | nodes_remaining = len(idle_nodes) - i 215 | num_send = len(self.pending) // nodes_remaining 216 | self._send_tests(node, num_send) 217 | 218 | idle_nodes = get_idle_nodes() 219 | # No need to steal anything if all nodes have enough work to continue 220 | if not idle_nodes: 221 | return 222 | 223 | # Only one active stealing request is allowed 224 | if self.steal_requested_from_node is not None: 225 | return 226 | 227 | # Find the node that has the longest test queue 228 | steal_from = max( 229 | nodes_up, key=lambda node_pending: len(node_pending.pending), default=None 230 | ) 231 | 232 | if steal_from is None: 233 | num_steal = 0 234 | else: 235 | # Steal half of the test queue - but keep that node running too. 236 | # If the node has 2 or less tests queued, stealing will fail 237 | # anyway. 238 | max_steal = max(0, len(steal_from.pending) - MIN_PENDING) 239 | num_steal = min(len(steal_from.pending) // 2, max_steal) 240 | 241 | if num_steal == 0: 242 | # Can't get more work - shutdown idle nodes. This will force them 243 | # to run the last test now instead of waiting for more tests. 244 | for node in idle_nodes: 245 | node.shutdown() 246 | return 247 | 248 | assert steal_from is not None 249 | steal_from.node.send_steal(steal_from.pending[-num_steal:]) 250 | self.steal_requested_from_node = steal_from.node 251 | 252 | def remove_node(self, node: WorkerController) -> str | None: 253 | """Remove a node from the scheduler. 254 | 255 | This should be called either when the node crashed or at 256 | shutdown time. In the former case any pending items assigned 257 | to the node will be re-scheduled. Called by the 258 | ``DSession.worker_workerfinished`` and 259 | ``DSession.worker_errordown`` hooks. 260 | 261 | Return the item which was being executing while the node 262 | crashed or None if the node has no more pending items. 263 | """ 264 | pending = self.node2pending.pop(node) 265 | 266 | # If node was removed without completing its assigned tests - it crashed 267 | if pending: 268 | assert self.collection is not None 269 | crashitem = self.collection[pending.pop(0)] 270 | else: 271 | crashitem = None 272 | 273 | self.pending.extend(pending) 274 | 275 | # Dead node won't respond to "steal" request 276 | if self.steal_requested_from_node is node: 277 | self.steal_requested_from_node = None 278 | 279 | self.check_schedule() 280 | return crashitem 281 | 282 | def schedule(self) -> None: 283 | """Initiate distribution of the test collection. 284 | 285 | Initiate scheduling of the items across the nodes. If this 286 | gets called again later it behaves the same as calling 287 | ``.check_schedule()`` on all nodes so that newly added nodes 288 | will start to be used. 289 | 290 | This is called by the ``DSession.worker_collectionfinish`` hook 291 | if ``.collection_is_completed`` is True. 292 | """ 293 | assert self.collection_is_completed 294 | 295 | # Initial distribution already happened, reschedule on all nodes 296 | if self.collection is not None: 297 | self.check_schedule() 298 | return 299 | 300 | if not self._check_nodes_have_same_collection(): 301 | self.log("**Different tests collected, aborting run**") 302 | return 303 | 304 | # Collections are identical, create the index of pending items. 305 | self.collection = next(iter(self.node2collection.values())) 306 | self.pending[:] = range(len(self.collection)) 307 | if not self.collection: 308 | return 309 | 310 | self.check_schedule() 311 | 312 | def _send_tests(self, node: WorkerController, num: int) -> None: 313 | tests_per_node = self.pending[:num] 314 | if tests_per_node: 315 | del self.pending[:num] 316 | self.node2pending[node].extend(tests_per_node) 317 | node.send_runtest_some(tests_per_node) 318 | 319 | def _check_nodes_have_same_collection(self) -> bool: 320 | """Return True if all nodes have collected the same items. 321 | 322 | If collections differ, this method returns False while logging 323 | the collection differences and posting collection errors to 324 | pytest_collectreport hook. 325 | """ 326 | node_collection_items = list(self.node2collection.items()) 327 | first_node, col = node_collection_items[0] 328 | same_collection = True 329 | for node, collection in node_collection_items[1:]: 330 | msg = report_collection_diff( 331 | col, collection, first_node.gateway.id, node.gateway.id 332 | ) 333 | if msg: 334 | same_collection = False 335 | self.log(msg) 336 | if self.config is not None: 337 | rep = pytest.CollectReport( 338 | nodeid=node.gateway.id, 339 | outcome="failed", 340 | longrepr=msg, 341 | result=[], 342 | ) 343 | self.config.hook.pytest_collectreport(report=rep) 344 | 345 | return same_collection 346 | -------------------------------------------------------------------------------- /testing/test_looponfail.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pathlib 4 | from pathlib import Path 5 | import shutil 6 | import tempfile 7 | import textwrap 8 | import unittest.mock 9 | 10 | import pytest 11 | 12 | from xdist.looponfail import RemoteControl 13 | from xdist.looponfail import StatRecorder 14 | 15 | 16 | class TestStatRecorder: 17 | def test_filechange(self, tmp_path: Path) -> None: 18 | tmp = tmp_path 19 | hello = tmp / "hello.py" 20 | hello.touch() 21 | sd = StatRecorder([tmp]) 22 | changed = sd.check() 23 | assert not changed 24 | 25 | hello.write_text("world") 26 | changed = sd.check() 27 | assert changed 28 | 29 | hello.with_suffix(".pyc").write_text("hello") 30 | changed = sd.check() 31 | assert not changed 32 | 33 | p = tmp / "new.py" 34 | p.touch() 35 | changed = sd.check() 36 | assert changed 37 | 38 | p.unlink() 39 | changed = sd.check() 40 | assert changed 41 | 42 | tmp.joinpath("a", "b").mkdir(parents=True) 43 | tmp.joinpath("a", "b", "c.py").touch() 44 | changed = sd.check() 45 | assert changed 46 | 47 | tmp.joinpath("a", "c.txt").touch() 48 | changed = sd.check() 49 | assert changed 50 | changed = sd.check() 51 | assert not changed 52 | 53 | shutil.rmtree(str(tmp.joinpath("a"))) 54 | changed = sd.check() 55 | assert changed 56 | 57 | def test_dirchange(self, tmp_path: Path) -> None: 58 | tmp = tmp_path 59 | tmp.joinpath("dir").mkdir() 60 | tmp.joinpath("dir", "hello.py").touch() 61 | sd = StatRecorder([tmp]) 62 | assert not sd.fil(tmp / "dir") 63 | 64 | def test_filechange_deletion_race(self, tmp_path: Path) -> None: 65 | tmp = tmp_path 66 | sd = StatRecorder([tmp]) 67 | changed = sd.check() 68 | assert not changed 69 | 70 | p = tmp.joinpath("new.py") 71 | p.touch() 72 | changed = sd.check() 73 | assert changed 74 | 75 | p.unlink() 76 | # make check()'s visit() call return our just removed 77 | # path as if we were in a race condition 78 | dirname = str(tmp) 79 | dirnames: list[str] = [] 80 | filenames = [str(p)] 81 | with unittest.mock.patch( 82 | "os.walk", return_value=[(dirname, dirnames, filenames)], autospec=True 83 | ): 84 | changed = sd.check() 85 | assert changed 86 | 87 | def test_pycremoval(self, tmp_path: Path) -> None: 88 | tmp = tmp_path 89 | hello = tmp / "hello.py" 90 | hello.touch() 91 | sd = StatRecorder([tmp]) 92 | changed = sd.check() 93 | assert not changed 94 | 95 | pycfile = hello.with_suffix(".pyc") 96 | pycfile.touch() 97 | hello.write_text("world") 98 | changed = sd.check() 99 | assert changed 100 | assert not pycfile.exists() 101 | 102 | def test_waitonchange( 103 | self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch 104 | ) -> None: 105 | tmp = tmp_path 106 | sd = StatRecorder([tmp]) 107 | 108 | ret_values = [True, False] 109 | monkeypatch.setattr(StatRecorder, "check", lambda self: ret_values.pop()) 110 | sd.waitonchange(checkinterval=0.2) 111 | assert not ret_values 112 | 113 | 114 | class TestRemoteControl: 115 | def test_nofailures(self, pytester: pytest.Pytester) -> None: 116 | item = pytester.getitem("def test_func(): pass\n") 117 | control = RemoteControl(item.config) 118 | control.setup() 119 | _topdir, failures = control.runsession()[:2] 120 | assert not failures 121 | 122 | def test_failures_somewhere(self, pytester: pytest.Pytester) -> None: 123 | item = pytester.getitem("def test_func():\n assert 0\n") 124 | control = RemoteControl(item.config) 125 | control.setup() 126 | failures = control.runsession()[0] 127 | assert failures 128 | control.setup() 129 | item.path.write_text("def test_func():\n assert 1\n") 130 | removepyc(item.path) 131 | _topdir, failures = control.runsession()[:2] 132 | assert not failures 133 | 134 | def test_failure_change(self, pytester: pytest.Pytester) -> None: 135 | modcol = pytester.getitem( 136 | textwrap.dedent( 137 | """ 138 | def test_func(): 139 | assert 0 140 | """ 141 | ) 142 | ) 143 | control = RemoteControl(modcol.config) 144 | control.loop_once() 145 | assert control.failures 146 | modcol_path = modcol.path 147 | 148 | modcol_path.write_text( 149 | textwrap.dedent( 150 | """ 151 | def test_func(): 152 | assert 1 153 | def test_new(): 154 | assert 0 155 | """ 156 | ) 157 | ) 158 | removepyc(modcol_path) 159 | control.loop_once() 160 | assert not control.failures 161 | control.loop_once() 162 | assert control.failures 163 | assert str(control.failures).find("test_new") != -1 164 | 165 | def test_failure_subdir_no_init( 166 | self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 167 | ) -> None: 168 | modcol = pytester.getitem( 169 | textwrap.dedent( 170 | """ 171 | def test_func(): 172 | assert 0 173 | """ 174 | ) 175 | ) 176 | parent = modcol.path.parent.parent 177 | monkeypatch.chdir(parent) 178 | modcol.config.args = [ 179 | str(Path(x).relative_to(parent)) for x in modcol.config.args 180 | ] 181 | control = RemoteControl(modcol.config) 182 | control.loop_once() 183 | assert control.failures 184 | control.loop_once() 185 | assert control.failures 186 | 187 | def test_ignore_sys_path_hook_entry( 188 | self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 189 | ) -> None: 190 | # Modifying sys.path as seen by the worker process is a bit tricky, 191 | # because any changes made in the current process do not carry over. 192 | # However, we can leverage the `sitecustomize` behavior to run arbitrary 193 | # code when the subprocess interpreter is starting up. We just need to 194 | # install our module in the search path, which we can accomplish by 195 | # adding a temporary directory to PYTHONPATH. 196 | tmpdir = tempfile.TemporaryDirectory() 197 | with open(pathlib.Path(tmpdir.name) / "sitecustomize.py", "w") as custom: 198 | print( 199 | textwrap.dedent( 200 | """ 201 | import sys 202 | sys.path.append('dummy.__path_hook__') 203 | """ 204 | ), 205 | file=custom, 206 | ) 207 | 208 | monkeypatch.setenv("PYTHONPATH", tmpdir.name, prepend=":") 209 | 210 | item = pytester.getitem( 211 | textwrap.dedent( 212 | """ 213 | def test_func(): 214 | import sys 215 | assert "dummy.__path_hook__" in sys.path 216 | """ 217 | ) 218 | ) 219 | control = RemoteControl(item.config) 220 | control.setup() 221 | _topdir, failures = control.runsession()[:2] 222 | assert not failures 223 | 224 | 225 | class TestLooponFailing: 226 | def test_looponfail_from_fail_to_ok(self, pytester: pytest.Pytester) -> None: 227 | modcol = pytester.getmodulecol( 228 | textwrap.dedent( 229 | """ 230 | def test_one(): 231 | x = 0 232 | assert x == 1 233 | def test_two(): 234 | assert 1 235 | """ 236 | ) 237 | ) 238 | remotecontrol = RemoteControl(modcol.config) 239 | remotecontrol.loop_once() 240 | assert len(remotecontrol.failures) == 1 241 | 242 | modcol.path.write_text( 243 | textwrap.dedent( 244 | """ 245 | def test_one(): 246 | assert 1 247 | def test_two(): 248 | assert 1 249 | """ 250 | ) 251 | ) 252 | removepyc(modcol.path) 253 | remotecontrol.loop_once() 254 | assert not remotecontrol.failures 255 | 256 | def test_looponfail_from_one_to_two_tests(self, pytester: pytest.Pytester) -> None: 257 | modcol = pytester.getmodulecol( 258 | textwrap.dedent( 259 | """ 260 | def test_one(): 261 | assert 0 262 | """ 263 | ) 264 | ) 265 | remotecontrol = RemoteControl(modcol.config) 266 | remotecontrol.loop_once() 267 | assert len(remotecontrol.failures) == 1 268 | assert "test_one" in remotecontrol.failures[0] 269 | 270 | modcol.path.write_text( 271 | textwrap.dedent( 272 | """ 273 | def test_one(): 274 | assert 1 # passes now 275 | def test_two(): 276 | assert 0 # new and fails 277 | """ 278 | ) 279 | ) 280 | removepyc(modcol.path) 281 | remotecontrol.loop_once() 282 | assert len(remotecontrol.failures) == 0 283 | remotecontrol.loop_once() 284 | assert len(remotecontrol.failures) == 1 285 | assert "test_one" not in remotecontrol.failures[0] 286 | assert "test_two" in remotecontrol.failures[0] 287 | 288 | @pytest.mark.xfail(reason="broken by pytest 3.1+", strict=True) 289 | def test_looponfail_removed_test(self, pytester: pytest.Pytester) -> None: 290 | modcol = pytester.getmodulecol( 291 | textwrap.dedent( 292 | """ 293 | def test_one(): 294 | assert 0 295 | def test_two(): 296 | assert 0 297 | """ 298 | ) 299 | ) 300 | remotecontrol = RemoteControl(modcol.config) 301 | remotecontrol.loop_once() 302 | assert len(remotecontrol.failures) == 2 303 | 304 | modcol.path.write_text( 305 | textwrap.dedent( 306 | """ 307 | def test_xxx(): # renamed test 308 | assert 0 309 | def test_two(): 310 | assert 1 # pass now 311 | """ 312 | ) 313 | ) 314 | removepyc(modcol.path) 315 | remotecontrol.loop_once() 316 | assert len(remotecontrol.failures) == 0 317 | 318 | remotecontrol.loop_once() 319 | assert len(remotecontrol.failures) == 1 320 | 321 | def test_looponfail_multiple_errors( 322 | self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 323 | ) -> None: 324 | modcol = pytester.getmodulecol( 325 | textwrap.dedent( 326 | """ 327 | def test_one(): 328 | assert 0 329 | """ 330 | ) 331 | ) 332 | remotecontrol = RemoteControl(modcol.config) 333 | orig_runsession = remotecontrol.runsession 334 | 335 | def runsession_dups() -> tuple[list[str], list[str], bool]: 336 | # twisted.trial test cases may report multiple errors. 337 | failures, reports, collection_failed = orig_runsession() 338 | print(failures) 339 | return failures * 2, reports, collection_failed 340 | 341 | monkeypatch.setattr(remotecontrol, "runsession", runsession_dups) 342 | remotecontrol.loop_once() 343 | assert len(remotecontrol.failures) == 1 344 | 345 | 346 | class TestFunctional: 347 | def test_fail_to_ok(self, pytester: pytest.Pytester) -> None: 348 | p = pytester.makepyfile( 349 | textwrap.dedent( 350 | """ 351 | def test_one(): 352 | x = 0 353 | assert x == 1 354 | """ 355 | ) 356 | ) 357 | # p = pytester.mkdir("sub").join(p1.basename) 358 | # p1.move(p) 359 | child = pytester.spawn_pytest("-f %s --traceconfig" % p, expect_timeout=30.0) 360 | child.expect("def test_one") 361 | child.expect("x == 1") 362 | child.expect("1 failed") 363 | child.expect("### LOOPONFAILING ####") 364 | child.expect("waiting for changes") 365 | p.write_text( 366 | textwrap.dedent( 367 | """ 368 | def test_one(): 369 | x = 1 370 | assert x == 1 371 | """ 372 | ), 373 | ) 374 | child.expect(".*1 passed.*") 375 | child.kill(15) 376 | 377 | def test_xfail_passes(self, pytester: pytest.Pytester) -> None: 378 | p = pytester.makepyfile( 379 | textwrap.dedent( 380 | """ 381 | import pytest 382 | @pytest.mark.xfail 383 | def test_one(): 384 | pass 385 | """ 386 | ) 387 | ) 388 | child = pytester.spawn_pytest("-f %s" % p, expect_timeout=30.0) 389 | child.expect("1 xpass") 390 | # child.expect("### LOOPONFAILING ####") 391 | child.expect("waiting for changes") 392 | child.kill(15) 393 | 394 | 395 | def removepyc(path: Path) -> None: 396 | # XXX damn those pyc files 397 | pyc = path.with_suffix(".pyc") 398 | if pyc.exists(): 399 | pyc.unlink() 400 | c = path.parent / "__pycache__" 401 | if c.exists(): 402 | shutil.rmtree(c) 403 | -------------------------------------------------------------------------------- /src/xdist/scheduler/load.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Sequence 4 | from itertools import cycle 5 | 6 | import pytest 7 | 8 | from xdist.remote import Producer 9 | from xdist.report import report_collection_diff 10 | from xdist.workermanage import parse_tx_spec_config 11 | from xdist.workermanage import WorkerController 12 | 13 | 14 | class LoadScheduling: 15 | """Implement load scheduling across nodes. 16 | 17 | This distributes the tests collected across all nodes so each test 18 | is run just once. All nodes collect and submit the test suite and 19 | when all collections are received it is verified they are 20 | identical collections. Then the collection gets divided up in 21 | chunks and chunks get submitted to nodes. Whenever a node finishes 22 | an item, it calls ``.mark_test_complete()`` which will trigger the 23 | scheduler to assign more tests if the number of pending tests for 24 | the node falls below a low-watermark. 25 | 26 | When created, ``numnodes`` defines how many nodes are expected to 27 | submit a collection. This is used to know when all nodes have 28 | finished collection or how large the chunks need to be created. 29 | 30 | Attributes:: 31 | 32 | :numnodes: The expected number of nodes taking part. The actual 33 | number of nodes will vary during the scheduler's lifetime as 34 | nodes are added by the DSession as they are brought up and 35 | removed either because of a dead node or normal shutdown. This 36 | number is primarily used to know when the initial collection is 37 | completed. 38 | 39 | :node2collection: Map of nodes and their test collection. All 40 | collections should always be identical. 41 | 42 | :node2pending: Map of nodes and the indices of their pending 43 | tests. The indices are an index into ``.pending`` (which is 44 | identical to their own collection stored in 45 | ``.node2collection``). 46 | 47 | :collection: The one collection once it is validated to be 48 | identical between all the nodes. It is initialised to None 49 | until ``.schedule()`` is called. 50 | 51 | :pending: List of indices of globally pending tests. These are 52 | tests which have not yet been allocated to a chunk for a node 53 | to process. 54 | 55 | :log: A py.log.Producer instance. 56 | 57 | :config: Config object, used for handling hooks. 58 | """ 59 | 60 | def __init__(self, config: pytest.Config, log: Producer | None = None) -> None: 61 | self.numnodes = len(parse_tx_spec_config(config)) 62 | self.node2collection: dict[WorkerController, list[str]] = {} 63 | self.node2pending: dict[WorkerController, list[int]] = {} 64 | self.pending: list[int] = [] 65 | self.collection: list[str] | None = None 66 | if log is None: 67 | self.log = Producer("loadsched") 68 | else: 69 | self.log = log.loadsched 70 | self.config = config 71 | self.maxschedchunk = self.config.getoption("maxschedchunk") 72 | 73 | @property 74 | def nodes(self) -> list[WorkerController]: 75 | """A list of all nodes in the scheduler.""" 76 | return list(self.node2pending.keys()) 77 | 78 | @property 79 | def collection_is_completed(self) -> bool: 80 | """Boolean indication initial test collection is complete. 81 | 82 | This is a boolean indicating all initial participating nodes 83 | have finished collection. The required number of initial 84 | nodes is defined by ``.numnodes``. 85 | """ 86 | return len(self.node2collection) >= self.numnodes 87 | 88 | @property 89 | def tests_finished(self) -> bool: 90 | """Return True if all tests have been executed by the nodes.""" 91 | if not self.collection_is_completed: 92 | return False 93 | if self.pending: 94 | return False 95 | for pending in self.node2pending.values(): 96 | if len(pending) >= 2: 97 | return False 98 | return True 99 | 100 | @property 101 | def has_pending(self) -> bool: 102 | """Return True if there are pending test items. 103 | 104 | This indicates that collection has finished and nodes are 105 | still processing test items, so this can be thought of as 106 | "the scheduler is active". 107 | """ 108 | if self.pending: 109 | return True 110 | for pending in self.node2pending.values(): 111 | if pending: 112 | return True 113 | return False 114 | 115 | def add_node(self, node: WorkerController) -> None: 116 | """Add a new node to the scheduler. 117 | 118 | From now on the node will be allocated chunks of tests to 119 | execute. 120 | 121 | Called by the ``DSession.worker_workerready`` hook when it 122 | successfully bootstraps a new node. 123 | """ 124 | assert node not in self.node2pending 125 | self.node2pending[node] = [] 126 | 127 | def add_node_collection( 128 | self, node: WorkerController, collection: Sequence[str] 129 | ) -> None: 130 | """Add the collected test items from a node. 131 | 132 | The collection is stored in the ``.node2collection`` map. 133 | Called by the ``DSession.worker_collectionfinish`` hook. 134 | """ 135 | assert node in self.node2pending 136 | if self.collection_is_completed: 137 | # A new node has been added later, perhaps an original one died. 138 | # .schedule() should have 139 | # been called by now 140 | assert self.collection 141 | if collection != self.collection: 142 | other_node = next(iter(self.node2collection.keys())) 143 | msg = report_collection_diff( 144 | self.collection, collection, other_node.gateway.id, node.gateway.id 145 | ) 146 | self.log(msg) 147 | return 148 | self.node2collection[node] = list(collection) 149 | 150 | def mark_test_complete( 151 | self, node: WorkerController, item_index: int, duration: float = 0 152 | ) -> None: 153 | """Mark test item as completed by node. 154 | 155 | The duration it took to execute the item is used as a hint to 156 | the scheduler. 157 | 158 | This is called by the ``DSession.worker_testreport`` hook. 159 | """ 160 | self.node2pending[node].remove(item_index) 161 | self.check_schedule(node, duration=duration) 162 | 163 | def mark_test_pending(self, item: str) -> None: 164 | assert self.collection is not None 165 | self.pending.insert( 166 | 0, 167 | self.collection.index(item), 168 | ) 169 | for node in self.node2pending: 170 | self.check_schedule(node) 171 | 172 | def remove_pending_tests_from_node( 173 | self, 174 | node: WorkerController, 175 | indices: Sequence[int], 176 | ) -> None: 177 | raise NotImplementedError() 178 | 179 | def check_schedule(self, node: WorkerController, duration: float = 0) -> None: 180 | """Maybe schedule new items on the node. 181 | 182 | If there are any globally pending nodes left then this will 183 | check if the given node should be given any more tests. The 184 | ``duration`` of the last test is optionally used as a 185 | heuristic to influence how many tests the node is assigned. 186 | """ 187 | if node.shutting_down: 188 | return 189 | 190 | if self.pending: 191 | # how many nodes do we have? 192 | num_nodes = len(self.node2pending) 193 | # if our node goes below a heuristic minimum, fill it out to 194 | # heuristic maximum 195 | items_per_node_min = max(2, len(self.pending) // num_nodes // 4) 196 | items_per_node_max = max(2, len(self.pending) // num_nodes // 2) 197 | node_pending = self.node2pending[node] 198 | if len(node_pending) < items_per_node_min: 199 | if duration >= 0.1 and len(node_pending) >= 2: 200 | # seems the node is doing long-running tests 201 | # and has enough items to continue 202 | # so let's rather wait with sending new items 203 | return 204 | num_send = items_per_node_max - len(node_pending) 205 | # keep at least 2 tests pending even if --maxschedchunk=1 206 | maxschedchunk = max(2 - len(node_pending), self.maxschedchunk) 207 | self._send_tests(node, min(num_send, maxschedchunk)) 208 | else: 209 | node.shutdown() 210 | 211 | self.log("num items waiting for node:", len(self.pending)) 212 | 213 | def remove_node(self, node: WorkerController) -> str | None: 214 | """Remove a node from the scheduler. 215 | 216 | This should be called either when the node crashed or at 217 | shutdown time. In the former case any pending items assigned 218 | to the node will be re-scheduled. Called by the 219 | ``DSession.worker_workerfinished`` and 220 | ``DSession.worker_errordown`` hooks. 221 | 222 | Return the item which was being executing while the node 223 | crashed or None if the node has no more pending items. 224 | 225 | """ 226 | pending = self.node2pending.pop(node) 227 | if not pending: 228 | return None 229 | 230 | # The node crashed, reassing pending items 231 | assert self.collection is not None 232 | crashitem = self.collection[pending.pop(0)] 233 | self.pending.extend(pending) 234 | for node in self.node2pending: 235 | self.check_schedule(node) 236 | return crashitem 237 | 238 | def schedule(self) -> None: 239 | """Initiate distribution of the test collection. 240 | 241 | Initiate scheduling of the items across the nodes. If this 242 | gets called again later it behaves the same as calling 243 | ``.check_schedule()`` on all nodes so that newly added nodes 244 | will start to be used. 245 | 246 | This is called by the ``DSession.worker_collectionfinish`` hook 247 | if ``.collection_is_completed`` is True. 248 | """ 249 | assert self.collection_is_completed 250 | 251 | # Initial distribution already happened, reschedule on all nodes 252 | if self.collection is not None: 253 | for node in self.nodes: 254 | self.check_schedule(node) 255 | return 256 | 257 | # XXX allow nodes to have different collections 258 | if not self._check_nodes_have_same_collection(): 259 | self.log("**Different tests collected, aborting run**") 260 | return 261 | 262 | # Collections are identical, create the index of pending items. 263 | self.collection = next(iter(self.node2collection.values())) 264 | self.pending[:] = range(len(self.collection)) 265 | if not self.collection: 266 | return 267 | 268 | if self.maxschedchunk is None: 269 | self.maxschedchunk = len(self.collection) 270 | 271 | # Send a batch of tests to run. If we don't have at least two 272 | # tests per node, we have to send them all so that we can send 273 | # shutdown signals and get all nodes working. 274 | if len(self.pending) < 2 * len(self.nodes): 275 | # Distribute tests round-robin. Try to load all nodes if there are 276 | # enough tests. The other branch tries sends at least 2 tests 277 | # to each node - which is suboptimal when you have less than 278 | # 2 * len(nodes) tests. 279 | nodes = cycle(self.nodes) 280 | for _ in range(len(self.pending)): 281 | self._send_tests(next(nodes), 1) 282 | else: 283 | # Send batches of consecutive tests. By default, pytest sorts tests 284 | # in order for optimal single-threaded execution, minimizing the 285 | # number of necessary fixture setup/teardown. Try to keep that 286 | # optimal order for every worker. 287 | 288 | # how many items per node do we have about? 289 | items_per_node = len(self.collection) // len(self.node2pending) 290 | # take a fraction of tests for initial distribution 291 | node_chunksize = min(items_per_node // 4, self.maxschedchunk) 292 | node_chunksize = max(node_chunksize, 2) 293 | # and initialize each node with a chunk of tests 294 | for node in self.nodes: 295 | self._send_tests(node, node_chunksize) 296 | 297 | if not self.pending: 298 | # initial distribution sent all tests, start node shutdown 299 | for node in self.nodes: 300 | node.shutdown() 301 | 302 | def _send_tests(self, node: WorkerController, num: int) -> None: 303 | tests_per_node = self.pending[:num] 304 | if tests_per_node: 305 | del self.pending[:num] 306 | self.node2pending[node].extend(tests_per_node) 307 | node.send_runtest_some(tests_per_node) 308 | 309 | def _check_nodes_have_same_collection(self) -> bool: 310 | """Return True if all nodes have collected the same items. 311 | 312 | If collections differ, this method returns False while logging 313 | the collection differences and posting collection errors to 314 | pytest_collectreport hook. 315 | """ 316 | node_collection_items = list(self.node2collection.items()) 317 | first_node, col = node_collection_items[0] 318 | same_collection = True 319 | for node, collection in node_collection_items[1:]: 320 | msg = report_collection_diff( 321 | col, collection, first_node.gateway.id, node.gateway.id 322 | ) 323 | if msg: 324 | same_collection = False 325 | self.log(msg) 326 | if self.config is not None: 327 | rep = pytest.CollectReport( 328 | nodeid=node.gateway.id, 329 | outcome="failed", 330 | longrepr=msg, 331 | result=[], 332 | ) 333 | self.config.hook.pytest_collectreport(report=rep) 334 | 335 | return same_collection 336 | -------------------------------------------------------------------------------- /testing/test_remote.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import marshal 4 | import pprint 5 | from queue import Queue 6 | import sys 7 | from typing import Any 8 | from typing import Callable 9 | from typing import cast 10 | from typing import Union 11 | import uuid 12 | 13 | import execnet 14 | import pytest 15 | 16 | from xdist.workermanage import NodeManager 17 | from xdist.workermanage import WorkerController 18 | 19 | 20 | WAIT_TIMEOUT = 10.0 21 | 22 | 23 | def check_marshallable(d: object) -> None: 24 | try: 25 | marshal.dumps(d) # type: ignore[arg-type] 26 | except ValueError as e: 27 | pprint.pprint(d) 28 | raise ValueError("not marshallable") from e 29 | 30 | 31 | class EventCall: 32 | def __init__(self, eventcall: tuple[str, dict[str, Any]]) -> None: 33 | self.name, self.kwargs = eventcall 34 | 35 | def __str__(self) -> str: 36 | return f"" 37 | 38 | 39 | class WorkerSetup: 40 | def __init__( 41 | self, request: pytest.FixtureRequest, pytester: pytest.Pytester 42 | ) -> None: 43 | self.request = request 44 | self.pytester = pytester 45 | self.use_callback = False 46 | self.events = Queue() # type: ignore[var-annotated] 47 | 48 | def setup(self) -> None: 49 | self.pytester.chdir() 50 | # import os ; os.environ['EXECNET_DEBUG'] = "2" 51 | self.gateway = execnet.makegateway("execmodel=main_thread_only//popen") 52 | self.config = config = self.pytester.parseconfigure() 53 | putevent = self.events.put if self.use_callback else None 54 | 55 | class DummyMananger: 56 | testrunuid = uuid.uuid4().hex 57 | specs = [0, 1] 58 | 59 | nodemanager = cast(NodeManager, DummyMananger) 60 | 61 | self.slp = WorkerController( 62 | nodemanager=nodemanager, 63 | gateway=self.gateway, 64 | config=config, 65 | putevent=putevent, # type: ignore[arg-type] 66 | ) 67 | self.request.addfinalizer(self.slp.ensure_teardown) 68 | self.slp.setup() 69 | 70 | def popevent(self, name: str | None = None) -> EventCall: 71 | while 1: 72 | if self.use_callback: 73 | data = self.events.get(timeout=WAIT_TIMEOUT) 74 | else: 75 | data = self.slp.channel.receive(timeout=WAIT_TIMEOUT) 76 | ev = EventCall(data) 77 | if name is None or ev.name == name: 78 | return ev 79 | print(f"skipping {ev}") 80 | 81 | def sendcommand(self, name: str, **kwargs: Any) -> None: 82 | self.slp.sendcommand(name, **kwargs) 83 | 84 | 85 | @pytest.fixture 86 | def worker(request: pytest.FixtureRequest, pytester: pytest.Pytester) -> WorkerSetup: 87 | return WorkerSetup(request, pytester) 88 | 89 | 90 | class TestWorkerInteractor: 91 | UnserializerReport = Callable[ 92 | [dict[str, Any]], Union[pytest.CollectReport, pytest.TestReport] 93 | ] 94 | 95 | @pytest.fixture 96 | def unserialize_report(self, pytestconfig: pytest.Config) -> UnserializerReport: 97 | def unserialize( 98 | data: dict[str, Any], 99 | ) -> pytest.CollectReport | pytest.TestReport: 100 | return pytestconfig.hook.pytest_report_from_serializable( # type: ignore[no-any-return] 101 | config=pytestconfig, data=data 102 | ) 103 | 104 | return unserialize 105 | 106 | def test_basic_collect_and_runtests( 107 | self, worker: WorkerSetup, unserialize_report: UnserializerReport 108 | ) -> None: 109 | worker.pytester.makepyfile( 110 | """ 111 | def test_func(): 112 | pass 113 | """ 114 | ) 115 | worker.setup() 116 | ev = worker.popevent() 117 | assert ev.name == "workerready" 118 | ev = worker.popevent() 119 | assert ev.name == "collectionstart" 120 | assert not ev.kwargs 121 | ev = worker.popevent("collectionfinish") 122 | assert ev.kwargs["topdir"] == str(worker.pytester.path) 123 | ids = ev.kwargs["ids"] 124 | assert len(ids) == 1 125 | worker.sendcommand("runtests", indices=list(range(len(ids)))) 126 | worker.sendcommand("shutdown") 127 | ev = worker.popevent("logstart") 128 | assert ev.kwargs["nodeid"].endswith("test_func") 129 | assert len(ev.kwargs["location"]) == 3 130 | ev = worker.popevent("testreport") # setup 131 | ev = worker.popevent("testreport") 132 | assert ev.name == "testreport" 133 | rep = unserialize_report(ev.kwargs["data"]) 134 | assert rep.nodeid.endswith("::test_func") 135 | assert rep.passed 136 | assert rep.when == "call" 137 | ev = worker.popevent("workerfinished") 138 | assert "workeroutput" in ev.kwargs 139 | 140 | def test_remote_collect_skip( 141 | self, worker: WorkerSetup, unserialize_report: UnserializerReport 142 | ) -> None: 143 | worker.pytester.makepyfile( 144 | """ 145 | import pytest 146 | pytest.skip("hello", allow_module_level=True) 147 | """ 148 | ) 149 | worker.setup() 150 | ev = worker.popevent("collectionstart") 151 | assert not ev.kwargs 152 | ev = worker.popevent() 153 | assert ev.name == "collectreport" 154 | rep = unserialize_report(ev.kwargs["data"]) 155 | assert rep.skipped 156 | assert isinstance(rep.longrepr, tuple) 157 | assert rep.longrepr[2] == "Skipped: hello" 158 | ev = worker.popevent("collectionfinish") 159 | assert not ev.kwargs["ids"] 160 | 161 | def test_remote_collect_fail( 162 | self, worker: WorkerSetup, unserialize_report: UnserializerReport 163 | ) -> None: 164 | worker.pytester.makepyfile("""aasd qwe""") 165 | worker.setup() 166 | ev = worker.popevent("collectionstart") 167 | assert not ev.kwargs 168 | ev = worker.popevent() 169 | assert ev.name == "collectreport" 170 | rep = unserialize_report(ev.kwargs["data"]) 171 | assert rep.failed 172 | ev = worker.popevent("collectionfinish") 173 | assert not ev.kwargs["ids"] 174 | 175 | def test_runtests_all( 176 | self, worker: WorkerSetup, unserialize_report: UnserializerReport 177 | ) -> None: 178 | worker.pytester.makepyfile( 179 | """ 180 | def test_func(): pass 181 | def test_func2(): pass 182 | """ 183 | ) 184 | worker.setup() 185 | ev = worker.popevent() 186 | assert ev.name == "workerready" 187 | ev = worker.popevent() 188 | assert ev.name == "collectionstart" 189 | assert not ev.kwargs 190 | ev = worker.popevent("collectionfinish") 191 | ids = ev.kwargs["ids"] 192 | assert len(ids) == 2 193 | worker.sendcommand("runtests_all") 194 | worker.sendcommand("shutdown") 195 | for func in "::test_func", "::test_func2": 196 | for _ in range(3): # setup/call/teardown 197 | ev = worker.popevent("testreport") 198 | assert ev.name == "testreport" 199 | rep = unserialize_report(ev.kwargs["data"]) 200 | assert rep.nodeid.endswith(func) 201 | ev = worker.popevent("workerfinished") 202 | assert "workeroutput" in ev.kwargs 203 | 204 | def test_happy_run_events_converted( 205 | self, pytester: pytest.Pytester, worker: WorkerSetup 206 | ) -> None: 207 | pytest.xfail("implement a simple test for event production") 208 | assert not worker.use_callback # type: ignore[unreachable] 209 | worker.pytester.makepyfile( 210 | """ 211 | def test_func(): 212 | pass 213 | """ 214 | ) 215 | worker.setup() 216 | hookrec = pytester.getreportrecorder(worker.config) 217 | for data in worker.slp.channel: 218 | worker.slp.process_from_remote(data) 219 | worker.slp.process_from_remote(worker.slp.ENDMARK) 220 | pprint.pprint(hookrec.hookrecorder.calls) 221 | hookrec.hookrecorder.contains( 222 | [ 223 | ("pytest_collectstart", "collector.fspath == aaa"), 224 | ("pytest_pycollect_makeitem", "name == 'test_func'"), 225 | ("pytest_collectreport", "report.collector.fspath == aaa"), 226 | ("pytest_collectstart", "collector.fspath == bbb"), 227 | ("pytest_pycollect_makeitem", "name == 'test_func'"), 228 | ("pytest_collectreport", "report.collector.fspath == bbb"), 229 | ] 230 | ) 231 | 232 | def test_process_from_remote_error_handling( 233 | self, worker: WorkerSetup, capsys: pytest.CaptureFixture[str] 234 | ) -> None: 235 | worker.use_callback = True 236 | worker.setup() 237 | worker.slp.process_from_remote(("", {})) 238 | out, _err = capsys.readouterr() 239 | assert "INTERNALERROR> ValueError: unknown event: " in out 240 | ev = worker.popevent() 241 | assert ev.name == "errordown" 242 | 243 | def test_steal_work( 244 | self, worker: WorkerSetup, unserialize_report: UnserializerReport 245 | ) -> None: 246 | worker.pytester.makepyfile( 247 | """ 248 | import time 249 | def test_func(): time.sleep(1) 250 | def test_func2(): pass 251 | def test_func3(): pass 252 | def test_func4(): pass 253 | """ 254 | ) 255 | worker.setup() 256 | ev = worker.popevent("collectionfinish") 257 | ids = ev.kwargs["ids"] 258 | assert len(ids) == 4 259 | worker.sendcommand("runtests_all") 260 | 261 | # wait for test_func setup 262 | ev = worker.popevent("testreport") 263 | rep = unserialize_report(ev.kwargs["data"]) 264 | assert rep.nodeid.endswith("::test_func") 265 | assert rep.when == "setup" 266 | 267 | worker.sendcommand("steal", indices=[1, 2]) 268 | ev = worker.popevent("unscheduled") 269 | # Cannot steal index 1 because it is completed already, so do not steal any. 270 | assert ev.kwargs["indices"] == [] 271 | 272 | # Index 2 can be stolen, as it is still pending. 273 | worker.sendcommand("steal", indices=[2]) 274 | ev = worker.popevent("unscheduled") 275 | assert ev.kwargs["indices"] == [2] 276 | 277 | reports = [ 278 | ("test_func", "call"), 279 | ("test_func", "teardown"), 280 | ("test_func2", "setup"), 281 | ("test_func2", "call"), 282 | ("test_func2", "teardown"), 283 | ] 284 | 285 | for func, when in reports: 286 | ev = worker.popevent("testreport") 287 | rep = unserialize_report(ev.kwargs["data"]) 288 | assert rep.nodeid.endswith(f"::{func}") 289 | assert rep.when == when 290 | 291 | worker.sendcommand("shutdown") 292 | 293 | for when in ["setup", "call", "teardown"]: 294 | ev = worker.popevent("testreport") 295 | rep = unserialize_report(ev.kwargs["data"]) 296 | assert rep.nodeid.endswith("::test_func4") 297 | assert rep.when == when 298 | 299 | ev = worker.popevent("workerfinished") 300 | assert "workeroutput" in ev.kwargs 301 | 302 | def test_steal_empty_queue( 303 | self, worker: WorkerSetup, unserialize_report: UnserializerReport 304 | ) -> None: 305 | worker.pytester.makepyfile( 306 | """ 307 | def test_func(): pass 308 | def test_func2(): pass 309 | """ 310 | ) 311 | worker.setup() 312 | ev = worker.popevent("collectionfinish") 313 | ids = ev.kwargs["ids"] 314 | assert len(ids) == 2 315 | worker.sendcommand("runtests_all") 316 | 317 | for when in ["setup", "call", "teardown"]: 318 | ev = worker.popevent("testreport") 319 | rep = unserialize_report(ev.kwargs["data"]) 320 | assert rep.nodeid.endswith("::test_func") 321 | assert rep.when == when 322 | 323 | worker.sendcommand("steal", indices=[0, 1]) 324 | ev = worker.popevent("unscheduled") 325 | assert ev.kwargs["indices"] == [] 326 | 327 | worker.sendcommand("shutdown") 328 | 329 | for when in ["setup", "call", "teardown"]: 330 | ev = worker.popevent("testreport") 331 | rep = unserialize_report(ev.kwargs["data"]) 332 | assert rep.nodeid.endswith("::test_func2") 333 | assert rep.when == when 334 | 335 | ev = worker.popevent("workerfinished") 336 | assert "workeroutput" in ev.kwargs 337 | 338 | 339 | def test_remote_env_vars(pytester: pytest.Pytester) -> None: 340 | pytester.makepyfile( 341 | """ 342 | import os 343 | def test(): 344 | assert len(os.environ['PYTEST_XDIST_TESTRUNUID']) == 32 345 | assert os.environ['PYTEST_XDIST_WORKER'] in ('gw0', 'gw1') 346 | assert os.environ['PYTEST_XDIST_WORKER_COUNT'] == '2' 347 | """ 348 | ) 349 | result = pytester.runpytest("-n2", "--max-worker-restart=0") 350 | assert result.ret == 0 351 | 352 | 353 | def test_remote_inner_argv(pytester: pytest.Pytester) -> None: 354 | """Test/document the behavior due to execnet using `python -c`.""" 355 | pytester.makepyfile( 356 | """ 357 | import sys 358 | 359 | def test_argv(): 360 | assert sys.argv == ["-c"] 361 | """ 362 | ) 363 | result = pytester.runpytest("-n1") 364 | assert result.ret == 0 365 | 366 | 367 | def test_remote_mainargv(pytester: pytest.Pytester) -> None: 368 | outer_argv = sys.argv 369 | 370 | pytester.makepyfile( 371 | f""" 372 | def test_mainargv(request): 373 | assert request.config.workerinput["mainargv"] == {outer_argv!r} 374 | """ 375 | ) 376 | result = pytester.runpytest("-n1") 377 | assert result.ret == 0 378 | 379 | 380 | def test_remote_usage_prog(pytester: pytest.Pytester) -> None: 381 | if pytest.version_tuple[:2] >= (9, 0): 382 | get_optparser_expr = "get_config_parser.optparser" 383 | else: 384 | get_optparser_expr = "get_config_parser._getparser()" 385 | 386 | pytester.makeconftest( 387 | """ 388 | import pytest 389 | 390 | config_parser = None 391 | 392 | @pytest.fixture 393 | def get_config_parser(): 394 | return config_parser 395 | 396 | def pytest_configure(config): 397 | global config_parser 398 | config_parser = config._parser 399 | """ 400 | ) 401 | pytester.makepyfile( 402 | f""" 403 | import sys 404 | 405 | def test(get_config_parser, request): 406 | {get_optparser_expr}.error("my_usage_error") 407 | """ 408 | ) 409 | 410 | result = pytester.runpytest_subprocess("-n1") 411 | assert result.ret == 1 412 | result.stdout.fnmatch_lines(["*usage: *", "*error: my_usage_error"]) 413 | 414 | 415 | def test_remote_sys_path(pytester: pytest.Pytester) -> None: 416 | """Work around sys.path differences due to execnet using `python -c`.""" 417 | pytester.makepyfile( 418 | """ 419 | import sys 420 | 421 | def test_sys_path(): 422 | assert "" not in sys.path 423 | """ 424 | ) 425 | result = pytester.runpytest("-n1") 426 | assert result.ret == 0 427 | -------------------------------------------------------------------------------- /src/xdist/plugin.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | import sys 5 | from typing import Literal 6 | import uuid 7 | import warnings 8 | 9 | import pytest 10 | 11 | 12 | _sys_path = list(sys.path) # freeze a copy of sys.path at interpreter startup 13 | 14 | 15 | @pytest.hookimpl 16 | def pytest_xdist_auto_num_workers(config: pytest.Config) -> int: 17 | env_var = os.environ.get("PYTEST_XDIST_AUTO_NUM_WORKERS") 18 | if env_var: 19 | try: 20 | return int(env_var) 21 | except ValueError: 22 | warnings.warn( 23 | "PYTEST_XDIST_AUTO_NUM_WORKERS is not a number: {env_var!r}. Ignoring it." 24 | ) 25 | 26 | try: 27 | import psutil 28 | except ImportError: 29 | pass 30 | else: 31 | use_logical: bool = config.option.numprocesses == "logical" 32 | count = psutil.cpu_count(logical=use_logical) or psutil.cpu_count() 33 | if count: 34 | return count 35 | try: 36 | from os import sched_getaffinity 37 | 38 | def cpu_count() -> int: 39 | return len(sched_getaffinity(0)) 40 | 41 | except ImportError: 42 | if os.environ.get("TRAVIS") == "true": 43 | # workaround https://github.com/pypy/pypy/issues/2375 44 | return 2 45 | try: 46 | from os import cpu_count # type: ignore[assignment] 47 | except ImportError: 48 | from multiprocessing import cpu_count 49 | try: 50 | n = cpu_count() 51 | except NotImplementedError: 52 | return 1 53 | return n if n else 1 54 | 55 | 56 | def parse_numprocesses(s: str) -> int | Literal["auto", "logical"]: 57 | if s in ("auto", "logical"): 58 | return s # type: ignore[return-value] 59 | elif s is not None: 60 | return int(s) 61 | 62 | 63 | @pytest.hookimpl 64 | def pytest_addoption(parser: pytest.Parser) -> None: 65 | # 'Help' formatting (same rules as pytest's): 66 | # Start with capitalized letters. 67 | # If a single phrase, do not end with period. If more than one phrase, all phrases end with periods. 68 | # Use \n to separate logical lines. 69 | group = parser.getgroup("xdist", "distributed and subprocess testing") 70 | group._addoption( 71 | "-n", 72 | "--numprocesses", 73 | dest="numprocesses", 74 | metavar="numprocesses", 75 | action="store", 76 | type=parse_numprocesses, 77 | help="Shortcut for '--dist=load --tx=NUM*popen'.\n" 78 | "With 'logical', attempt to detect logical CPU count (requires psutil, falls back to 'auto').\n" 79 | "With 'auto', attempt to detect physical CPU count. If physical CPU count cannot be determined, " 80 | "falls back to 1.\n" 81 | "Forced to 0 (disabled) when used with --pdb.", 82 | ) 83 | group.addoption( 84 | "--maxprocesses", 85 | dest="maxprocesses", 86 | metavar="maxprocesses", 87 | action="store", 88 | type=int, 89 | help="Limit the maximum number of workers to process the tests when using --numprocesses " 90 | "with 'auto' or 'logical'", 91 | ) 92 | group.addoption( 93 | "--max-worker-restart", 94 | action="store", 95 | default=None, 96 | dest="maxworkerrestart", 97 | help="Maximum number of workers that can be restarted " 98 | "when crashed (set to zero to disable this feature)", 99 | ) 100 | group.addoption( 101 | "--dist", 102 | metavar="distmode", 103 | action="store", 104 | choices=[ 105 | "each", 106 | "load", 107 | "loadscope", 108 | "loadfile", 109 | "loadgroup", 110 | "worksteal", 111 | "no", 112 | ], 113 | dest="dist", 114 | default="no", 115 | help=( 116 | "Set mode for distributing tests to exec environments.\n\n" 117 | "each: Send each test to all available environments.\n\n" 118 | "load: Load balance by sending any pending test to any" 119 | " available environment.\n\n" 120 | "loadscope: Load balance by sending pending groups of tests in" 121 | " the same scope to any available environment.\n\n" 122 | "loadfile: Load balance by sending test grouped by file" 123 | " to any available environment.\n\n" 124 | "loadgroup: Like 'load', but sends tests marked with 'xdist_group' to the same worker.\n\n" 125 | "worksteal: Split the test suite between available environments," 126 | " then re-balance when any worker runs out of tests.\n\n" 127 | "(default) no: Run tests inprocess, don't distribute." 128 | ), 129 | ) 130 | group.addoption( 131 | "--loadscope-reorder", 132 | dest="loadscopereorder", 133 | action="store_true", 134 | default=True, 135 | help=( 136 | "Pytest-xdist will default reorder tests by number of tests per scope " 137 | "when used in conjunction with loadscope.\n" 138 | "This option will enable loadscope reorder which will improve the " 139 | "parallelism of the test suite.\n" 140 | "However, the partial order of tests might not be retained.\n" 141 | ), 142 | ) 143 | group.addoption( 144 | "--no-loadscope-reorder", 145 | dest="loadscopereorder", 146 | action="store_false", 147 | help=( 148 | "Pytest-xdist will default reorder tests by number of tests per scope " 149 | "when used in conjunction with loadscope.\n" 150 | "This option will disable loadscope reorder, " 151 | "and the partial order of tests can be retained.\n" 152 | "This is useful when pytest-xdist is used together with " 153 | "other plugins that specify tests in a specific order." 154 | ), 155 | ) 156 | group.addoption( 157 | "--tx", 158 | dest="tx", 159 | action="append", 160 | default=[], 161 | metavar="xspec", 162 | help=( 163 | "Add a test execution environment. Some examples:\n" 164 | "--tx popen//python=python2.5 --tx socket=192.168.1.102:8888\n" 165 | "--tx ssh=user@codespeak.net//chdir=testcache" 166 | ), 167 | ) 168 | group.addoption( 169 | "--px", 170 | dest="px", 171 | action="append", 172 | default=[], 173 | metavar="xspec", 174 | help=( 175 | "Add a proxy gateway to pass to test execution environments using `via`. Example:\n" 176 | "--px id=my_proxy//socket=192.168.1.102:8888 --tx 5*popen//via=my_proxy" 177 | ), 178 | ) 179 | group._addoption( 180 | "-d", 181 | action="store_true", 182 | dest="distload", 183 | default=False, 184 | help="Load-balance tests. Shortcut for '--dist=load'.", 185 | ) 186 | group.addoption( 187 | "--rsyncdir", 188 | action="append", 189 | default=[], 190 | metavar="DIR", 191 | help="Add directory for rsyncing to remote tx nodes", 192 | ) 193 | group.addoption( 194 | "--rsyncignore", 195 | action="append", 196 | default=[], 197 | metavar="GLOB", 198 | help="Add expression for ignores when rsyncing to remote tx nodes", 199 | ) 200 | group.addoption( 201 | "--testrunuid", 202 | action="store", 203 | help=( 204 | "Provide an identifier shared amongst all workers as the value of " 205 | "the 'testrun_uid' fixture.\n" 206 | "If not provided, 'testrun_uid' is filled with a new unique string " 207 | "on every test run." 208 | ), 209 | ) 210 | group.addoption( 211 | "--maxschedchunk", 212 | action="store", 213 | type=int, 214 | help=( 215 | "Maximum number of tests scheduled in one step for --dist=load.\n" 216 | "Setting it to 1 will force pytest to send tests to workers one by " 217 | "one - might be useful for a small number of slow tests.\n" 218 | "Larger numbers will allow the scheduler to submit consecutive " 219 | "chunks of tests to workers - allows reusing fixtures.\n" 220 | "Due to implementation reasons, at least 2 tests are scheduled per " 221 | "worker at the start. Only later tests can be scheduled one by one.\n" 222 | "Unlimited if not set." 223 | ), 224 | ) 225 | 226 | parser.addini( 227 | "rsyncdirs", 228 | "list of (relative) paths to be rsynced for remote distributed testing.", 229 | type="paths", 230 | ) 231 | parser.addini( 232 | "rsyncignore", 233 | "list of (relative) glob-style paths to be ignored for rsyncing.", 234 | type="paths", 235 | ) 236 | parser.addini( 237 | "looponfailroots", 238 | type="paths", 239 | help="directories to check for changes. Default: current directory.", 240 | ) 241 | 242 | 243 | # ------------------------------------------------------------------------- 244 | # distributed testing hooks 245 | # ------------------------------------------------------------------------- 246 | 247 | 248 | @pytest.hookimpl 249 | def pytest_addhooks(pluginmanager: pytest.PytestPluginManager) -> None: 250 | from xdist import newhooks 251 | 252 | pluginmanager.add_hookspecs(newhooks) 253 | 254 | 255 | # ------------------------------------------------------------------------- 256 | # distributed testing initialization 257 | # ------------------------------------------------------------------------- 258 | 259 | 260 | @pytest.hookimpl(trylast=True) 261 | def pytest_configure(config: pytest.Config) -> None: 262 | config_line = ( 263 | "xdist_group: specify group for tests should run in same session." 264 | "in relation to one another. Provided by pytest-xdist." 265 | ) 266 | config.addinivalue_line("markers", config_line) 267 | 268 | # Skip this plugin entirely when only doing collection. 269 | if config.getvalue("collectonly"): 270 | return 271 | 272 | # Create the distributed session in case we have a valid distribution 273 | # mode and test environments. 274 | if _is_distribution_mode(config): 275 | from xdist.dsession import DSession 276 | 277 | session = DSession(config) 278 | config.pluginmanager.register(session, "dsession") 279 | tr = config.pluginmanager.getplugin("terminalreporter") 280 | if tr: 281 | tr.showfspath = False 282 | 283 | # Deprecation warnings for deprecated command-line/configuration options. 284 | if config.getoption("looponfail", None) or config.getini("looponfailroots"): 285 | warning = DeprecationWarning( 286 | "The --looponfail command line argument and looponfailroots config variable are deprecated.\n" 287 | "The loop-on-fail feature will be removed in pytest-xdist 4.0." 288 | ) 289 | config.issue_config_time_warning(warning, 2) 290 | 291 | if config.getoption("rsyncdir", None) or config.getini("rsyncdirs"): 292 | warning = DeprecationWarning( 293 | "The --rsyncdir command line argument and rsyncdirs config variable are deprecated.\n" 294 | "The rsync feature will be removed in pytest-xdist 4.0." 295 | ) 296 | config.issue_config_time_warning(warning, 2) 297 | 298 | 299 | def _is_distribution_mode(config: pytest.Config) -> bool: 300 | """Whether distribution mode is on.""" 301 | return config.getoption("dist") != "no" and bool(config.getoption("tx")) 302 | 303 | 304 | @pytest.hookimpl(tryfirst=True) 305 | def pytest_cmdline_main(config: pytest.Config) -> None: 306 | if config.option.distload: 307 | config.option.dist = "load" 308 | 309 | usepdb = config.getoption("usepdb", False) # a core option 310 | if config.option.numprocesses in ("auto", "logical"): 311 | if usepdb: 312 | config.option.numprocesses = 0 313 | config.option.dist = "no" 314 | else: 315 | auto_num_cpus = config.hook.pytest_xdist_auto_num_workers(config=config) 316 | config.option.numprocesses = auto_num_cpus 317 | 318 | if config.option.numprocesses: 319 | if config.option.dist == "no": 320 | config.option.dist = "load" 321 | numprocesses = config.option.numprocesses 322 | if config.option.maxprocesses: 323 | numprocesses = min(numprocesses, config.option.maxprocesses) 324 | config.option.tx = ["popen"] * numprocesses 325 | 326 | if config.option.numprocesses == 0: 327 | config.option.dist = "no" 328 | config.option.tx = [] 329 | 330 | val = config.getvalue 331 | if not val("collectonly") and _is_distribution_mode(config) and usepdb: 332 | raise pytest.UsageError( 333 | "--pdb is incompatible with distributing tests; try using -n0 or -nauto." 334 | ) 335 | 336 | 337 | # ------------------------------------------------------------------------- 338 | # fixtures and API to easily know the role of current node 339 | # ------------------------------------------------------------------------- 340 | 341 | 342 | def is_xdist_worker( 343 | request_or_session: pytest.FixtureRequest | pytest.Session, 344 | ) -> bool: 345 | """Return `True` if this is an xdist worker, `False` otherwise. 346 | 347 | :param request_or_session: the `pytest` `request` or `session` object 348 | """ 349 | return hasattr(request_or_session.config, "workerinput") 350 | 351 | 352 | def is_xdist_controller( 353 | request_or_session: pytest.FixtureRequest | pytest.Session, 354 | ) -> bool: 355 | """Return `True` if this is the xdist controller, `False` otherwise. 356 | 357 | Note: this method also returns `False` when distribution has not been 358 | activated at all. 359 | 360 | :param request_or_session: the `pytest` `request` or `session` object 361 | """ 362 | return ( 363 | not is_xdist_worker(request_or_session) 364 | and request_or_session.config.option.dist != "no" 365 | ) 366 | 367 | 368 | # ALIAS: TODO, deprecate (#592) 369 | is_xdist_master = is_xdist_controller 370 | 371 | 372 | def get_xdist_worker_id( 373 | request_or_session: pytest.FixtureRequest | pytest.Session, 374 | ) -> str: 375 | """Return the id of the current worker ('gw0', 'gw1', etc) or 'master' 376 | if running on the controller node. 377 | 378 | If not distributing tests (for example passing `-n0` or not passing `-n` at all) 379 | also return 'master'. 380 | 381 | :param request_or_session: the `pytest` `request` or `session` object 382 | """ 383 | if hasattr(request_or_session.config, "workerinput"): 384 | workerid: str = request_or_session.config.workerinput["workerid"] 385 | return workerid 386 | else: 387 | # TODO: remove "master", ideally for a None 388 | return "master" 389 | 390 | 391 | @pytest.fixture(scope="session") 392 | def worker_id(request: pytest.FixtureRequest) -> str: 393 | """Return the id of the current worker ('gw0', 'gw1', etc) or 'master' 394 | if running on the master node. 395 | """ 396 | # TODO: remove "master", ideally for a None 397 | return get_xdist_worker_id(request) 398 | 399 | 400 | @pytest.fixture(scope="session") 401 | def testrun_uid(request: pytest.FixtureRequest) -> str: 402 | """Return the unique id of the current test.""" 403 | if hasattr(request.config, "workerinput"): 404 | testrunid: str = request.config.workerinput["testrunuid"] 405 | return testrunid 406 | else: 407 | return uuid.uuid4().hex 408 | -------------------------------------------------------------------------------- /src/xdist/remote.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module is executed in remote subprocesses and helps to 3 | control a remote testing session and relay back information. 4 | It assumes that 'py' is importable and does not have dependencies 5 | on the rest of the xdist code. This means that the xdist-plugin 6 | needs not to be installed in remote environments. 7 | """ 8 | 9 | from __future__ import annotations 10 | 11 | import collections 12 | from collections.abc import Generator 13 | from collections.abc import Iterable 14 | from collections.abc import Sequence 15 | import contextlib 16 | import enum 17 | import os 18 | import sys 19 | import time 20 | from typing import Any 21 | from typing import Literal 22 | from typing import TypedDict 23 | from typing import Union 24 | import warnings 25 | 26 | from _pytest.config import _prepareconfig 27 | import execnet 28 | import pytest 29 | 30 | 31 | try: 32 | from setproctitle import setproctitle 33 | except ImportError: 34 | 35 | def setproctitle(title: str) -> None: 36 | pass 37 | 38 | 39 | class Producer: 40 | """ 41 | Simplified implementation of the same interface as py.log, for backward compatibility 42 | since we dropped the dependency on pylib. 43 | Note: this is defined here because this module can't depend on xdist, so we need 44 | to have the other way around. 45 | """ 46 | 47 | def __init__(self, name: str, *, enabled: bool = True) -> None: 48 | self.name = name 49 | self.enabled = enabled 50 | 51 | def __repr__(self) -> str: 52 | return f"{type(self).__name__}({self.name!r}, enabled={self.enabled})" 53 | 54 | def __call__(self, *a: Any, **k: Any) -> None: 55 | if self.enabled: 56 | print(f"[{self.name}]", *a, **k, file=sys.stderr) 57 | 58 | def __getattr__(self, name: str) -> Producer: 59 | return type(self)(name, enabled=self.enabled) 60 | 61 | 62 | def worker_title(title: str) -> None: 63 | try: 64 | setproctitle(title) 65 | except Exception: 66 | # changing the process name is very optional, no errors please 67 | pass 68 | 69 | 70 | class Marker(enum.Enum): 71 | SHUTDOWN = 0 72 | 73 | 74 | class TestQueue: 75 | """A simple queue that can be inspected and modified while the lock is held via the ``lock()`` method.""" 76 | 77 | Item = Union[int, Literal[Marker.SHUTDOWN]] 78 | 79 | def __init__(self, execmodel: execnet.gateway_base.ExecModel): 80 | self._items: collections.deque[TestQueue.Item] = collections.deque() 81 | self._lock = execmodel.RLock() # type: ignore[no-untyped-call] 82 | self._has_items_event = execmodel.Event() 83 | 84 | def get(self) -> Item: 85 | while True: 86 | with self.lock() as locked_items: 87 | if locked_items: 88 | return locked_items.popleft() 89 | 90 | self._has_items_event.wait() 91 | 92 | def put(self, item: Item) -> None: 93 | with self.lock() as locked_items: 94 | locked_items.append(item) 95 | 96 | def replace(self, iterable: Iterable[Item]) -> None: 97 | with self.lock(): 98 | self._items = collections.deque(iterable) 99 | 100 | @contextlib.contextmanager 101 | def lock(self) -> Generator[collections.deque[Item]]: 102 | with self._lock: 103 | try: 104 | yield self._items 105 | finally: 106 | if self._items: 107 | self._has_items_event.set() 108 | else: 109 | self._has_items_event.clear() 110 | 111 | 112 | class WorkerInteractor: 113 | def __init__(self, config: pytest.Config, channel: execnet.Channel) -> None: 114 | self.config = config 115 | workerinput: dict[str, Any] = config.workerinput # type: ignore[attr-defined] 116 | self.workerid = workerinput.get("workerid", "?") 117 | self.testrunuid = workerinput["testrunuid"] 118 | self.log = Producer(f"worker-{self.workerid}", enabled=config.option.debug) 119 | self.channel = channel 120 | self.torun = TestQueue(self.channel.gateway.execmodel) 121 | self.nextitem_index: int | None | Literal[Marker.SHUTDOWN] = None 122 | config.pluginmanager.register(self) 123 | 124 | def sendevent(self, name: str, **kwargs: object) -> None: 125 | self.log("sending", name, kwargs) 126 | self.channel.send((name, kwargs)) 127 | 128 | @pytest.hookimpl 129 | def pytest_internalerror(self, excrepr: object) -> None: 130 | formatted_error = str(excrepr) 131 | for line in formatted_error.split("\n"): 132 | self.log("IERROR>", line) 133 | interactor.sendevent("internal_error", formatted_error=formatted_error) 134 | 135 | @pytest.hookimpl 136 | def pytest_sessionstart(self, session: pytest.Session) -> None: 137 | self.session = session 138 | workerinfo = getinfodict() 139 | self.sendevent("workerready", workerinfo=workerinfo) 140 | 141 | @pytest.hookimpl(hookwrapper=True) 142 | def pytest_sessionfinish(self, exitstatus: int) -> Generator[None, object, None]: 143 | workeroutput: dict[str, Any] = self.config.workeroutput # type: ignore[attr-defined] 144 | # in pytest 5.0+, exitstatus is an IntEnum object 145 | workeroutput["exitstatus"] = int(exitstatus) 146 | workeroutput["shouldfail"] = self.session.shouldfail 147 | workeroutput["shouldstop"] = self.session.shouldstop 148 | yield 149 | self.sendevent("workerfinished", workeroutput=workeroutput) 150 | 151 | @pytest.hookimpl 152 | def pytest_collection(self) -> None: 153 | self.sendevent("collectionstart") 154 | 155 | def handle_command( 156 | self, command: tuple[str, dict[str, Any]] | Literal[Marker.SHUTDOWN] 157 | ) -> None: 158 | if command is Marker.SHUTDOWN: 159 | self.torun.put(Marker.SHUTDOWN) 160 | return 161 | 162 | name, kwargs = command 163 | 164 | self.log("received command", name, kwargs) 165 | if name == "runtests": 166 | for i in kwargs["indices"]: 167 | self.torun.put(i) 168 | elif name == "runtests_all": 169 | for i in range(len(self.session.items)): 170 | self.torun.put(i) 171 | elif name == "shutdown": 172 | self.torun.put(Marker.SHUTDOWN) 173 | elif name == "steal": 174 | self.steal(kwargs["indices"]) 175 | 176 | def steal(self, indices: Sequence[int]) -> None: 177 | """ 178 | Remove tests from the queue. 179 | 180 | Removes either all requested tests, or none, if some of these tests 181 | are not in the queue (for example, if they were processed already). 182 | 183 | :param indices: indices of the tests to remove. 184 | """ 185 | requested_set = set(indices) 186 | 187 | with self.torun.lock() as locked_queue: 188 | stolen = list(item for item in locked_queue if item in requested_set) 189 | 190 | # Stealing only if all requested tests are still pending 191 | if len(stolen) == len(requested_set): 192 | self.torun.replace( 193 | item for item in locked_queue if item not in requested_set 194 | ) 195 | else: 196 | stolen = [] 197 | 198 | self.sendevent("unscheduled", indices=stolen) 199 | 200 | @pytest.hookimpl 201 | def pytest_runtestloop(self, session: pytest.Session) -> bool: 202 | self.log("entering main loop") 203 | self.channel.setcallback(self.handle_command, endmarker=Marker.SHUTDOWN) 204 | self.nextitem_index = self.torun.get() 205 | while self.nextitem_index is not Marker.SHUTDOWN: 206 | self.run_one_test() 207 | if session.shouldfail or session.shouldstop: 208 | break 209 | return True 210 | 211 | def run_one_test(self) -> None: 212 | assert isinstance(self.nextitem_index, int) 213 | self.item_index = self.nextitem_index 214 | self.nextitem_index = self.torun.get() 215 | 216 | items = self.session.items 217 | item = items[self.item_index] 218 | if self.nextitem_index is Marker.SHUTDOWN: 219 | nextitem = None 220 | else: 221 | assert self.nextitem_index is not None 222 | nextitem = items[self.nextitem_index] 223 | 224 | worker_title("[pytest-xdist running] %s" % item.nodeid) 225 | 226 | start = time.perf_counter() 227 | self.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) 228 | duration = time.perf_counter() - start 229 | 230 | worker_title("[pytest-xdist idle]") 231 | 232 | self.sendevent( 233 | "runtest_protocol_complete", item_index=self.item_index, duration=duration 234 | ) 235 | 236 | def pytest_collection_modifyitems( 237 | self, 238 | config: pytest.Config, 239 | items: list[pytest.Item], 240 | ) -> None: 241 | # add the group name to nodeid as suffix if --dist=loadgroup 242 | if config.getvalue("loadgroup"): 243 | for item in items: 244 | gnames: set[str] = set() 245 | for mark in item.iter_markers("xdist_group"): 246 | name = ( 247 | mark.args[0] 248 | if len(mark.args) > 0 249 | else mark.kwargs.get("name", "default") 250 | ) 251 | gnames.add(str(name)) 252 | if not gnames: 253 | continue 254 | item._nodeid = f"{item.nodeid}@{'_'.join(sorted(gnames))}" 255 | 256 | @pytest.hookimpl 257 | def pytest_collection_finish(self, session: pytest.Session) -> None: 258 | self.sendevent( 259 | "collectionfinish", 260 | topdir=str(self.config.rootpath), 261 | ids=[item.nodeid for item in session.items], 262 | ) 263 | 264 | @pytest.hookimpl 265 | def pytest_runtest_logstart( 266 | self, 267 | nodeid: str, 268 | location: tuple[str, int | None, str], 269 | ) -> None: 270 | self.sendevent("logstart", nodeid=nodeid, location=location) 271 | 272 | @pytest.hookimpl 273 | def pytest_runtest_logfinish( 274 | self, 275 | nodeid: str, 276 | location: tuple[str, int | None, str], 277 | ) -> None: 278 | self.sendevent("logfinish", nodeid=nodeid, location=location) 279 | 280 | @pytest.hookimpl 281 | def pytest_runtest_logreport(self, report: pytest.TestReport) -> None: 282 | data = self.config.hook.pytest_report_to_serializable( 283 | config=self.config, report=report 284 | ) 285 | data["item_index"] = self.item_index 286 | data["worker_id"] = self.workerid 287 | data["testrun_uid"] = self.testrunuid 288 | assert self.session.items[self.item_index].nodeid == report.nodeid 289 | self.sendevent("testreport", data=data) 290 | 291 | @pytest.hookimpl 292 | def pytest_collectreport(self, report: pytest.CollectReport) -> None: 293 | # send only reports that have not passed to controller as optimization (#330) 294 | if not report.passed: 295 | data = self.config.hook.pytest_report_to_serializable( 296 | config=self.config, report=report 297 | ) 298 | self.sendevent("collectreport", data=data) 299 | 300 | @pytest.hookimpl 301 | def pytest_warning_recorded( 302 | self, 303 | warning_message: warnings.WarningMessage, 304 | when: str, 305 | nodeid: str, 306 | location: tuple[str, int, str] | None, 307 | ) -> None: 308 | self.sendevent( 309 | "warning_recorded", 310 | warning_message_data=serialize_warning_message(warning_message), 311 | when=when, 312 | nodeid=nodeid, 313 | location=location, 314 | ) 315 | 316 | 317 | def serialize_warning_message( 318 | warning_message: warnings.WarningMessage, 319 | ) -> dict[str, Any]: 320 | if isinstance(warning_message.message, Warning): 321 | message_module = type(warning_message.message).__module__ 322 | message_class_name = type(warning_message.message).__name__ 323 | message_str = str(warning_message.message) 324 | # check now if we can serialize the warning arguments (#349) 325 | # if not, we will just use the exception message on the controller node 326 | try: 327 | execnet.dumps(warning_message.message.args) 328 | except execnet.DumpError: 329 | message_args = None 330 | else: 331 | message_args = warning_message.message.args 332 | else: 333 | message_str = warning_message.message 334 | message_module = None 335 | message_class_name = None 336 | message_args = None 337 | if warning_message.category: 338 | category_module = warning_message.category.__module__ 339 | category_class_name = warning_message.category.__name__ 340 | else: 341 | category_module = None 342 | category_class_name = None 343 | 344 | result = { 345 | "message_str": message_str, 346 | "message_module": message_module, 347 | "message_class_name": message_class_name, 348 | "message_args": message_args, 349 | "category_module": category_module, 350 | "category_class_name": category_class_name, 351 | } 352 | # access private _WARNING_DETAILS because the attributes vary between Python versions 353 | for attr_name in warning_message._WARNING_DETAILS: # type: ignore[attr-defined] 354 | if attr_name in ("message", "category"): 355 | continue 356 | attr = getattr(warning_message, attr_name) 357 | # Check if we can serialize the warning detail, marking `None` otherwise 358 | # Note that we need to define the attr (even as `None`) to allow deserializing 359 | try: 360 | execnet.dumps(attr) 361 | except execnet.DumpError: 362 | result[attr_name] = repr(attr) 363 | else: 364 | result[attr_name] = attr 365 | return result 366 | 367 | 368 | class WorkerInfo(TypedDict): 369 | version: str 370 | version_info: tuple[int, int, int, str, int] 371 | sysplatform: str 372 | platform: str 373 | executable: str 374 | cwd: str 375 | id: str 376 | spec: execnet.XSpec 377 | 378 | 379 | def getinfodict() -> WorkerInfo: 380 | import platform 381 | 382 | return dict( 383 | version=sys.version, 384 | version_info=tuple(sys.version_info), # type: ignore[typeddict-item] 385 | sysplatform=sys.platform, 386 | platform=platform.platform(), 387 | executable=sys.executable, 388 | cwd=os.getcwd(), 389 | ) 390 | 391 | 392 | def setup_config(config: pytest.Config, basetemp: str | None) -> None: 393 | config.option.loadgroup = config.getvalue("dist") == "loadgroup" 394 | config.option.looponfail = False 395 | config.option.usepdb = False 396 | config.option.dist = "no" 397 | config.option.distload = False 398 | config.option.numprocesses = None 399 | config.option.maxprocesses = None 400 | config.option.basetemp = basetemp 401 | 402 | 403 | if __name__ == "__channelexec__": 404 | channel: execnet.Channel = channel # type: ignore[name-defined] # noqa: F821, PLW0127 405 | workerinput, args, option_dict, change_sys_path = channel.receive() # type: ignore[name-defined] 406 | 407 | if change_sys_path is None: 408 | importpath = os.getcwd() 409 | sys.path.insert(0, importpath) 410 | os.environ["PYTHONPATH"] = ( 411 | importpath + os.pathsep + os.environ.get("PYTHONPATH", "") 412 | ) 413 | else: 414 | sys.path = change_sys_path 415 | 416 | os.environ["PYTEST_XDIST_TESTRUNUID"] = workerinput["testrunuid"] 417 | os.environ["PYTEST_XDIST_WORKER"] = workerinput["workerid"] 418 | os.environ["PYTEST_XDIST_WORKER_COUNT"] = str(workerinput["workercount"]) 419 | 420 | config = _prepareconfig(args, None) 421 | 422 | setup_config(config, option_dict.get("basetemp")) 423 | config._parser.prog = os.path.basename(workerinput["mainargv"][0]) 424 | config.workerinput = workerinput # type: ignore[attr-defined] 425 | config.workeroutput = {} # type: ignore[attr-defined] 426 | interactor = WorkerInteractor(config, channel) # type: ignore[name-defined] 427 | config.hook.pytest_cmdline_main(config=config) 428 | -------------------------------------------------------------------------------- /src/xdist/scheduler/loadscope.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections import OrderedDict 4 | from collections.abc import Sequence 5 | from typing import NoReturn 6 | 7 | import pytest 8 | 9 | from xdist.remote import Producer 10 | from xdist.report import report_collection_diff 11 | from xdist.workermanage import parse_tx_spec_config 12 | from xdist.workermanage import WorkerController 13 | 14 | 15 | class LoadScopeScheduling: 16 | """Implement load scheduling across nodes, but grouping test by scope. 17 | 18 | This distributes the tests collected across all nodes so each test is run 19 | just once. All nodes collect and submit the list of tests and when all 20 | collections are received it is verified they are identical collections. 21 | Then the collection gets divided up in work units, grouped by test scope, 22 | and those work units get submitted to nodes. Whenever a node finishes an 23 | item, it calls ``.mark_test_complete()`` which will trigger the scheduler 24 | to assign more work units if the number of pending tests for the node falls 25 | below a low-watermark. 26 | 27 | When created, ``numnodes`` defines how many nodes are expected to submit a 28 | collection. This is used to know when all nodes have finished collection. 29 | 30 | Attributes:: 31 | 32 | :numnodes: The expected number of nodes taking part. The actual number of 33 | nodes will vary during the scheduler's lifetime as nodes are added by 34 | the DSession as they are brought up and removed either because of a dead 35 | node or normal shutdown. This number is primarily used to know when the 36 | initial collection is completed. 37 | 38 | :collection: The final list of tests collected by all nodes once it is 39 | validated to be identical between all the nodes. It is initialised to 40 | None until ``.schedule()`` is called. 41 | 42 | :workqueue: Ordered dictionary that maps all available scopes with their 43 | associated tests (nodeid). Nodeids are in turn associated with their 44 | completion status. One entry of the workqueue is called a work unit. 45 | In turn, a collection of work unit is called a workload. 46 | 47 | :: 48 | 49 | workqueue = { 50 | '///test_module.py': { 51 | '///test_module.py::test_case1': False, 52 | '///test_module.py::test_case2': False, 53 | (...) 54 | }, 55 | (...) 56 | } 57 | 58 | :assigned_work: Ordered dictionary that maps worker nodes with their 59 | assigned work units. 60 | 61 | :: 62 | 63 | assigned_work = { 64 | '': { 65 | '///test_module.py': { 66 | '///test_module.py::test_case1': False, 67 | '///test_module.py::test_case2': False, 68 | (...) 69 | }, 70 | (...) 71 | }, 72 | (...) 73 | } 74 | 75 | :registered_collections: Ordered dictionary that maps worker nodes with 76 | their collection of tests gathered during test discovery. 77 | 78 | :: 79 | 80 | registered_collections = { 81 | '': [ 82 | '///test_module.py::test_case1', 83 | '///test_module.py::test_case2', 84 | ], 85 | (...) 86 | } 87 | 88 | :log: A py.log.Producer instance. 89 | 90 | :config: Config object, used for handling hooks. 91 | """ 92 | 93 | def __init__(self, config: pytest.Config, log: Producer | None = None) -> None: 94 | self.numnodes = len(parse_tx_spec_config(config)) 95 | self.collection: list[str] | None = None 96 | 97 | self.workqueue: OrderedDict[str, dict[str, bool]] = OrderedDict() 98 | self.assigned_work: dict[WorkerController, dict[str, dict[str, bool]]] = {} 99 | self.registered_collections: dict[WorkerController, list[str]] = {} 100 | 101 | if log is None: 102 | self.log = Producer("loadscopesched") 103 | else: 104 | self.log = log.loadscopesched 105 | 106 | self.config = config 107 | 108 | @property 109 | def nodes(self) -> list[WorkerController]: 110 | """A list of all active nodes in the scheduler.""" 111 | return list(self.assigned_work.keys()) 112 | 113 | @property 114 | def collection_is_completed(self) -> bool: 115 | """Boolean indication initial test collection is complete. 116 | 117 | This is a boolean indicating all initial participating nodes have 118 | finished collection. The required number of initial nodes is defined 119 | by ``.numnodes``. 120 | """ 121 | return len(self.registered_collections) >= self.numnodes 122 | 123 | @property 124 | def tests_finished(self) -> bool: 125 | """Return True if all tests have been executed by the nodes.""" 126 | if not self.collection_is_completed: 127 | return False 128 | 129 | if self.workqueue: 130 | return False 131 | 132 | for assigned_unit in self.assigned_work.values(): 133 | if self._pending_of(assigned_unit) >= 2: 134 | return False 135 | 136 | return True 137 | 138 | @property 139 | def has_pending(self) -> bool: 140 | """Return True if there are pending test items. 141 | 142 | This indicates that collection has finished and nodes are still 143 | processing test items, so this can be thought of as 144 | "the scheduler is active". 145 | """ 146 | if self.workqueue: 147 | return True 148 | 149 | for assigned_unit in self.assigned_work.values(): 150 | if self._pending_of(assigned_unit) > 0: 151 | return True 152 | 153 | return False 154 | 155 | def add_node(self, node: WorkerController) -> None: 156 | """Add a new node to the scheduler. 157 | 158 | From now on the node will be assigned work units to be executed. 159 | 160 | Called by the ``DSession.worker_workerready`` hook when it successfully 161 | bootstraps a new node. 162 | """ 163 | assert node not in self.assigned_work 164 | self.assigned_work[node] = {} 165 | 166 | def remove_node(self, node: WorkerController) -> str | None: 167 | """Remove a node from the scheduler. 168 | 169 | This should be called either when the node crashed or at shutdown time. 170 | In the former case any pending items assigned to the node will be 171 | re-scheduled. 172 | 173 | Called by the hooks: 174 | 175 | - ``DSession.worker_workerfinished``. 176 | - ``DSession.worker_errordown``. 177 | 178 | Return the item being executed while the node crashed or None if the 179 | node has no more pending items. 180 | """ 181 | workload = self.assigned_work.pop(node) 182 | if not self._pending_of(workload): 183 | return None 184 | 185 | # The node crashed, identify test that crashed 186 | for work_unit in workload.values(): 187 | for nodeid, completed in work_unit.items(): 188 | if not completed: 189 | crashitem = nodeid 190 | break 191 | else: 192 | continue 193 | break 194 | else: 195 | raise RuntimeError( 196 | "Unable to identify crashitem on a workload with pending items" 197 | ) 198 | 199 | # Made uncompleted work unit available again 200 | self.workqueue.update(workload) 201 | 202 | for node in self.assigned_work: 203 | self._reschedule(node) 204 | 205 | return crashitem 206 | 207 | def add_node_collection( 208 | self, node: WorkerController, collection: Sequence[str] 209 | ) -> None: 210 | """Add the collected test items from a node. 211 | 212 | The collection is stored in the ``.registered_collections`` dictionary. 213 | 214 | Called by the hook: 215 | 216 | - ``DSession.worker_collectionfinish``. 217 | """ 218 | # Check that add_node() was called on the node before 219 | assert node in self.assigned_work 220 | 221 | # A new node has been added later, perhaps an original one died. 222 | if self.collection_is_completed: 223 | # Assert that .schedule() should have been called by now 224 | assert self.collection 225 | 226 | # Check that the new collection matches the official collection 227 | if collection != self.collection: 228 | other_node = next(iter(self.registered_collections.keys())) 229 | 230 | msg = report_collection_diff( 231 | self.collection, collection, other_node.gateway.id, node.gateway.id 232 | ) 233 | self.log(msg) 234 | return 235 | 236 | self.registered_collections[node] = list(collection) 237 | 238 | def mark_test_complete( 239 | self, node: WorkerController, item_index: int, duration: float = 0 240 | ) -> None: 241 | """Mark test item as completed by node. 242 | 243 | Called by the hook: 244 | 245 | - ``DSession.worker_testreport``. 246 | """ 247 | nodeid = self.registered_collections[node][item_index] 248 | scope = self._split_scope(nodeid) 249 | 250 | self.assigned_work[node][scope][nodeid] = True 251 | self._reschedule(node) 252 | 253 | def mark_test_pending(self, item: str) -> NoReturn: 254 | raise NotImplementedError() 255 | 256 | def remove_pending_tests_from_node( 257 | self, 258 | node: WorkerController, 259 | indices: Sequence[int], 260 | ) -> None: 261 | raise NotImplementedError() 262 | 263 | def _assign_work_unit(self, node: WorkerController) -> None: 264 | """Assign a work unit to a node.""" 265 | assert self.workqueue 266 | 267 | # Grab a unit of work 268 | scope, work_unit = self.workqueue.popitem(last=False) 269 | 270 | # Keep track of the assigned work 271 | assigned_to_node = self.assigned_work.setdefault(node, {}) 272 | assigned_to_node[scope] = work_unit 273 | 274 | # Ask the node to execute the workload 275 | worker_collection = self.registered_collections[node] 276 | nodeids_indexes = [ 277 | worker_collection.index(nodeid) 278 | for nodeid, completed in work_unit.items() 279 | if not completed 280 | ] 281 | 282 | node.send_runtest_some(nodeids_indexes) 283 | 284 | def _split_scope(self, nodeid: str) -> str: 285 | """Determine the scope (grouping) of a nodeid. 286 | 287 | There are usually 3 cases for a nodeid:: 288 | 289 | example/loadsuite/test/test_beta.py::test_beta0 290 | example/loadsuite/test/test_delta.py::Delta1::test_delta0 291 | example/loadsuite/epsilon/__init__.py::epsilon.epsilon 292 | 293 | #. Function in a test module. 294 | #. Method of a class in a test module. 295 | #. Doctest in a function in a package. 296 | 297 | This function will group tests with the scope determined by splitting 298 | the first ``::`` from the right. That is, classes will be grouped in a 299 | single work unit, and functions from a test module will be grouped by 300 | their module. In the above example, scopes will be:: 301 | 302 | example/loadsuite/test/test_beta.py 303 | example/loadsuite/test/test_delta.py::Delta1 304 | example/loadsuite/epsilon/__init__.py 305 | """ 306 | return nodeid.rsplit("::", 1)[0] 307 | 308 | def _pending_of(self, workload: dict[str, dict[str, bool]]) -> int: 309 | """Return the number of pending tests in a workload.""" 310 | pending = sum(list(scope.values()).count(False) for scope in workload.values()) 311 | return pending 312 | 313 | def _reschedule(self, node: WorkerController) -> None: 314 | """Maybe schedule new items on the node. 315 | 316 | If there are any globally pending work units left then this will check 317 | if the given node should be given any more tests. 318 | """ 319 | # Do not add more work to a node shutting down 320 | if node.shutting_down: 321 | return 322 | 323 | # Check that more work is available 324 | if not self.workqueue: 325 | node.shutdown() 326 | return 327 | 328 | self.log("Number of units waiting for node:", len(self.workqueue)) 329 | 330 | # Check that the node is almost depleted of work 331 | # 2: Heuristic of minimum tests to enqueue more work 332 | if self._pending_of(self.assigned_work[node]) > 2: 333 | return 334 | 335 | # Pop one unit of work and assign it 336 | self._assign_work_unit(node) 337 | 338 | def schedule(self) -> None: 339 | """Initiate distribution of the test collection. 340 | 341 | Initiate scheduling of the items across the nodes. If this gets called 342 | again later it behaves the same as calling ``._reschedule()`` on all 343 | nodes so that newly added nodes will start to be used. 344 | 345 | If ``.collection_is_completed`` is True, this is called by the hook: 346 | 347 | - ``DSession.worker_collectionfinish``. 348 | """ 349 | assert self.collection_is_completed 350 | 351 | # Initial distribution already happened, reschedule on all nodes 352 | if self.collection is not None: 353 | for node in self.nodes: 354 | self._reschedule(node) 355 | return 356 | 357 | # Check that all nodes collected the same tests 358 | if not self._check_nodes_have_same_collection(): 359 | self.log("**Different tests collected, aborting run**") 360 | return 361 | 362 | # Collections are identical, create the final list of items 363 | self.collection = list(next(iter(self.registered_collections.values()))) 364 | if not self.collection: 365 | return 366 | 367 | # Determine chunks of work (scopes) 368 | unsorted_workqueue: dict[str, dict[str, bool]] = {} 369 | for nodeid in self.collection: 370 | scope = self._split_scope(nodeid) 371 | work_unit = unsorted_workqueue.setdefault(scope, {}) 372 | work_unit[nodeid] = False 373 | 374 | if self.config.option.loadscopereorder: 375 | # Insert tests scopes into work queue ordered by number of tests. 376 | for scope, nodeids in sorted( 377 | unsorted_workqueue.items(), key=lambda item: -len(item[1]) 378 | ): 379 | self.workqueue[scope] = nodeids 380 | else: 381 | for scope, nodeids in unsorted_workqueue.items(): 382 | self.workqueue[scope] = nodeids 383 | 384 | # Avoid having more workers than work 385 | extra_nodes = len(self.nodes) - len(self.workqueue) 386 | 387 | if extra_nodes > 0: 388 | self.log(f"Shutting down {extra_nodes} nodes") 389 | 390 | for _ in range(extra_nodes): 391 | unused_node, _assigned = self.assigned_work.popitem() 392 | 393 | self.log(f"Shutting down unused node {unused_node}") 394 | unused_node.shutdown() 395 | 396 | # Assign initial workload 397 | for node in self.nodes: 398 | self._assign_work_unit(node) 399 | 400 | # Ensure nodes start with at least two work units if possible (#277) 401 | for node in self.nodes: 402 | self._reschedule(node) 403 | 404 | # Initial distribution sent all tests, start node shutdown 405 | if not self.workqueue: 406 | for node in self.nodes: 407 | node.shutdown() 408 | 409 | def _check_nodes_have_same_collection(self) -> bool: 410 | """Return True if all nodes have collected the same items. 411 | 412 | If collections differ, this method returns False while logging 413 | the collection differences and posting collection errors to 414 | pytest_collectreport hook. 415 | """ 416 | node_collection_items = list(self.registered_collections.items()) 417 | first_node, col = node_collection_items[0] 418 | same_collection = True 419 | 420 | for node, collection in node_collection_items[1:]: 421 | msg = report_collection_diff( 422 | col, collection, first_node.gateway.id, node.gateway.id 423 | ) 424 | if not msg: 425 | continue 426 | 427 | same_collection = False 428 | self.log(msg) 429 | 430 | rep = pytest.CollectReport( 431 | nodeid=node.gateway.id, 432 | outcome="failed", 433 | longrepr=msg, 434 | result=[], 435 | ) 436 | self.config.hook.pytest_collectreport(report=rep) 437 | 438 | return same_collection 439 | --------------------------------------------------------------------------------