├── .github
├── dependabot.yml
└── workflows
│ ├── deploy.yml
│ └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.rst
├── LICENSE
├── MANIFEST.in
├── README.rst
├── RELEASING.rst
├── pyproject.toml
├── src
└── pytest_replay
│ └── __init__.py
├── tests
├── conftest.py
└── test_replay.py
└── tox.ini
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # Keep GitHub Actions up to date with GitHub's Dependabot...
2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
4 | version: 2
5 | updates:
6 | - package-ecosystem: github-actions
7 | directory: /
8 | groups:
9 | github-actions:
10 | patterns:
11 | - "*" # Group all Actions updates into a single larger pull request
12 | schedule:
13 | interval: weekly
14 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: deploy
2 |
3 | on:
4 | push:
5 | tags:
6 | - "[0-9]+.[0-9]+.[0-9]+"
7 |
8 | jobs:
9 |
10 | package:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - uses: actions/checkout@v4
15 | - name: Build and Check Package
16 | uses: hynek/build-and-inspect-python-package@v2.12.0
17 |
18 | deploy:
19 | runs-on: ubuntu-latest
20 | permissions:
21 | id-token: write
22 | contents: write
23 |
24 | needs: package
25 |
26 | steps:
27 | - name: Download Package
28 | uses: actions/download-artifact@v4.3.0
29 | with:
30 | name: Packages
31 | path: dist
32 |
33 | - name: Publish package to PyPI
34 | uses: pypa/gh-action-pypi-publish@v1.12.4
35 |
36 | - name: Publish GitHub Release
37 | uses: softprops/action-gh-release@v2
38 | with:
39 | files: dist/*
40 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: test
2 |
3 | on:
4 | push:
5 | branches:
6 | - "master"
7 | - "test-me-*"
8 |
9 | pull_request:
10 |
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref }}
14 | cancel-in-progress: true
15 |
16 |
17 | jobs:
18 |
19 | package:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v4
23 | - name: Build and Check Package
24 | uses: hynek/build-and-inspect-python-package@v2.12.0
25 |
26 | test:
27 |
28 | runs-on: ${{ matrix.os }}
29 |
30 | needs: package
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | python: ["3.9", "3.10", "3.11", "3.12", "3.13"]
36 | os: [ubuntu-latest, windows-latest]
37 |
38 | steps:
39 | - uses: actions/checkout@v4
40 |
41 | - name: Download Package
42 | uses: actions/download-artifact@v4.3.0
43 | with:
44 | name: Packages
45 | path: dist
46 |
47 | - name: Set up Python
48 | uses: actions/setup-python@v5
49 | with:
50 | python-version: ${{ matrix.python }}
51 |
52 | - name: Install tox
53 | run: |
54 | python -m pip install --upgrade pip
55 | pip install tox
56 |
57 | - name: Test
58 | shell: bash
59 | run: |
60 | tox run -e py --installpkg `find dist/*.tar.gz`
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask instance folder
57 | instance/
58 |
59 | # Sphinx documentation
60 | docs/_build/
61 |
62 | # MkDocs documentation
63 | /site/
64 |
65 | # PyBuilder
66 | target/
67 |
68 | # IPython Notebook
69 | .ipynb_checkpoints
70 |
71 | # pyenv
72 | .python-version
73 |
74 | # pycharm
75 | .idea
76 |
77 | # example virtual env used in "Contributing"
78 | .env
79 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black-pre-commit-mirror
3 | rev: 25.1.0
4 | hooks:
5 | - id: black
6 | args: [--safe, --quiet]
7 | language_version: python3
8 | - repo: https://github.com/pre-commit/pre-commit-hooks
9 | rev: v5.0.0
10 | hooks:
11 | - id: trailing-whitespace
12 | - id: end-of-file-fixer
13 | - id: debug-statements
14 | - repo: https://github.com/asottile/reorder-python-imports
15 | rev: v3.15.0
16 | hooks:
17 | - id: reorder-python-imports
18 | args: ['--application-directories=.:src', --py36-plus]
19 | - repo: local
20 | hooks:
21 | - id: rst
22 | name: rst
23 | entry: rst-lint --encoding utf-8
24 | files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst)$
25 | language: python
26 | additional_dependencies: [pygments, restructuredtext_lint]
27 |
--------------------------------------------------------------------------------
/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | 1.6.0
2 | =====
3 |
4 | *2025-02-05*
5 |
6 | * Add support for Python 3.13.
7 | * Dropped support for EOL Python 3.8.
8 | * Change build to use ``pyproject.toml``.
9 | * Allow cutomization of metadata in replay file (`#78`_).
10 |
11 | .. _`#78`: https://github.com/ESSS/pytest-replay/issues/78
12 |
13 |
14 | 1.5.3
15 | =====
16 |
17 | *2024-11-29*
18 |
19 | * Ignore empty and blank lines in the replay file (`#70`_).
20 |
21 | .. _`#70`: https://github.com/ESSS/pytest-replay/issues/70
22 |
23 | 1.5.2
24 | ==================
25 |
26 | *2024-09-03*
27 |
28 | * Fix test outcome reported in the replay file (`#64`_).
29 |
30 | .. _`#64`: https://github.com/ESSS/pytest-replay/issues/64
31 |
32 | 1.5.1
33 | =====
34 |
35 | *2024-01-11*
36 |
37 | * Dropped support for Python 3.6 and 3.7 (EOL).
38 | * Added official support for Python 3.10, 3.11 and 3.12.
39 | * Test execution order using ``--replay`` now follows the recorded order, not the collection order, as was always intended (`#52`_).
40 |
41 | .. _`#52`: https://github.com/ESSS/pytest-replay/pull/53
42 |
43 | 1.4.0
44 | =====
45 |
46 | *2021-06-09*
47 |
48 | * Introduce new ``--replay-skip-cleanup`` option that skips the cleanup before running the command. This allows to keep previously generated replay files when running new commands.
49 |
50 | 1.3.0
51 | =====
52 |
53 | *2020-12-09*
54 |
55 | * Replay files can now contain comments (``#`` or ``//``), to make it easy to comment out tests from them when trying to narrow the tests to find a culprit.
56 |
57 |
58 | 1.2.1
59 | =====
60 |
61 | *2020-08-24*
62 |
63 | * Add proper support when running with ``xdist`` in a frozen executable.
64 |
65 | 1.2.0
66 | =====
67 |
68 | *2019-11-14*
69 |
70 | * Change the format of the output to be able to add more information. The new output has new information such as
71 | start time, end time, outcome and the node identification, all these data is represented by each line being a ``json``
72 | format.
73 |
74 | 1.1.0
75 | =====
76 |
77 | *2019-11-11*
78 |
79 | * Introduce new ``--replay-base-name`` option that lets users configure a different name of the replay file. Defaults to ``.pytest-replay``.
80 |
81 | 1.0.0
82 | =====
83 |
84 | * Drop support for Python 2.
85 |
86 | 0.2.2
87 | =====
88 |
89 | * Normal runs and ``xdist`` runs no longer clean up each other's files.
90 |
91 | 0.2.1
92 | =====
93 |
94 | * Fix crash ``IOError`` when tests changed the current working directory in the middle
95 | of the testing session.
96 |
97 | 0.2.0
98 | =====
99 |
100 | * Replace the shell scripts by plain text files and add new
101 | ``--replay`` flag which accepts the generated files to run the tests.
102 |
103 | 0.1.1
104 | =====
105 |
106 | * Escape node ids in the generated shell scripts.
107 |
108 | 0.1.0
109 | =====
110 |
111 | * Initial release.
112 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | The MIT License (MIT)
3 |
4 | Copyright (c) 2018 Bruno Oliveira
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.rst
3 |
4 | recursive-exclude * __pycache__
5 | recursive-exclude * *.py[co]
6 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | =============
2 | pytest-replay
3 | =============
4 |
5 |
6 | .. image:: http://img.shields.io/pypi/v/pytest-replay.svg
7 | :target: https://pypi.python.org/pypi/pytest-replay
8 |
9 | .. image:: https://anaconda.org/conda-forge/pytest-replay/badges/version.svg
10 | :target: https://anaconda.org/conda-forge/pytest-replay
11 |
12 | .. image:: https://github.com/ESSS/pytest-replay/workflows/test/badge.svg
13 | :target: https://github.com/ESSS/pytest-replay/actions?query=workflow%3Atest
14 |
15 | .. image:: https://img.shields.io/pypi/pyversions/pytest-replay.svg
16 | :target: https://pypi.python.org/pypi/pytest-replay
17 |
18 | .. image:: https://img.shields.io/badge/code%20style-black-000000.svg
19 | :target: https://github.com/psf/black
20 |
21 |
22 | Saves previous test runs and allow re-execute previous pytest runs to reproduce crashes or flaky tests
23 |
24 | ----
25 |
26 | This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `Cookiecutter-pytest-plugin`_ template.
27 |
28 |
29 | Features
30 | --------
31 |
32 | This plugin helps to reproduce random or flaky behavior when running tests with xdist. ``pytest-xdist`` executes tests
33 | in a non-predictable order, making it hard to reproduce a behavior seen in CI locally because there's no convenient way
34 | to track which test executed in which worker.
35 |
36 | This plugin records the executed node ids by each worker in the directory given by ``--replay-record-dir=
`` flag,
37 | and a ``--replay=`` can be used to re-run the tests from a previous run. For example::
38 |
39 | $ pytest -n auto --replay-record-dir=build/tests/replay
40 |
41 | This will generate files with each line being a ``json`` with the following content:
42 | node identification, start time, end time and outcome. It is interesting to note
43 | that usually the node id is repeated twice, that is necessary in case of a test
44 | suddenly crashes we will still have the record of that test started. After the
45 | test finishes, ``pytest-replay`` will add another ``json`` line with the
46 | complete information.
47 | That is also useful to analyze concurrent tests which might have some kind of
48 | race condition and interfere in each other.
49 |
50 | For example worker ``gw1`` will generate a file
51 | ``.pytest-replay-gw1.txt`` with contents like this::
52 |
53 | {"nodeid": "test_foo.py::test[1]", "start": 0.000}
54 | {"nodeid": "test_foo.py::test[1]", "start": 0.000, "finish": 1.5, "outcome": "passed"}
55 | {"nodeid": "test_foo.py::test[3]", "start": 1.5}
56 | {"nodeid": "test_foo.py::test[3]", "start": 1.5, "finish": 2.5, "outcome": "passed"}
57 | {"nodeid": "test_foo.py::test[5]", "start": 2.5}
58 | {"nodeid": "test_foo.py::test[5]", "start": 2.5, "finish": 3.5, "outcome": "passed"}
59 | {"nodeid": "test_foo.py::test[7]", "start": 3.5}
60 | {"nodeid": "test_foo.py::test[7]", "start": 3.5, "finish": 4.5, "outcome": "passed"}
61 | {"nodeid": "test_foo.py::test[8]", "start": 4.5}
62 | {"nodeid": "test_foo.py::test[8]", "start": 4.5, "finish": 5.5, "outcome": "passed"}
63 |
64 |
65 | If there is a crash or a flaky failure in the tests of the worker ``gw1``, one can take that file from the CI server and
66 | execute the tests in the same order with::
67 |
68 | $ pytest --replay=.pytest-replay-gw1.txt
69 |
70 | Hopefully this will make it easier to reproduce the problem and fix it.
71 |
72 | Additional metadata
73 | -------------------
74 |
75 | *Version added: 1.6*
76 |
77 | In cases where it is necessary to add new metadata to the replay file to make the test reproducible, `pytest-replay`
78 | provides a fixture called ``replay_metadata`` that allows new information to be added using the ``metadata``
79 | attribute.
80 |
81 | Example:
82 |
83 | .. code-block:: python
84 |
85 | import pytest
86 | import numpy as np
87 | import random
88 |
89 | @pytest.fixture
90 | def rng(replay_metadata):
91 | seed = replay_metadata.metadata.setdefault("seed", random.randint(0, 100))
92 | return np.random.default_rng(seed=seed)
93 |
94 | def test_random(rng):
95 | data = rng.standard_normal((100, 100))
96 | assert data.shape == (100, 100)
97 |
98 |
99 | When using it with pytest-replay it generates a replay file similar to
100 |
101 | .. code-block:: json
102 |
103 | {"nodeid": "test_bar.py::test_random", "start": 0.000}
104 | {"nodeid": "test_bar.py::test_random", "start": 0.000, "finish": 1.5, "outcome": "passed", "metadata": {"seed": 12}}
105 |
106 |
107 | FAQ
108 | ~~~
109 |
110 | 1. ``pytest`` has its own `cache `_, why use a different mechanism?
111 |
112 | The internal cache saves its data using ``json``, which is not suitable in the advent of a crash because the file
113 | will not be readable.
114 |
115 | 2. Shouldn't the ability of selecting tests from a file be part of the ``pytest`` core?
116 |
117 | Sure, but let's try to use this a bit as a separate plugin before proposing
118 | its inclusion into the core.
119 |
120 | Installation
121 | ------------
122 |
123 | You can install ``pytest-replay`` via `pip`_ from `PyPI`_::
124 |
125 | $ pip install pytest-replay
126 |
127 | Or with conda::
128 |
129 | $ conda install -c conda-forge pytest-replay
130 |
131 |
132 | Contributing
133 | ------------
134 |
135 | Contributions are very welcome.
136 |
137 | Tests can be run with `tox`_ if you are using a native Python installation.
138 |
139 | To run tests with `conda `_, first create a virtual environment and execute tests from there
140 | (conda with Python 3.5+ in the root environment)::
141 |
142 | $ python -m venv .env
143 | $ .env\scripts\activate
144 | $ pip install -e . pytest-xdist
145 | $ pytest tests
146 |
147 |
148 | Releases
149 | ~~~~~~~~
150 |
151 | Follow these steps to make a new release:
152 |
153 | 1. Create a new branch ``release-X.Y.Z`` from ``master``;
154 | 2. Update ``CHANGELOG.rst``;
155 | 3. Open a PR;
156 | 4. After it is **green** and **approved**, push a new tag in the format ``X.Y.Z``;
157 |
158 | GitHub Actions will deploy to PyPI automatically.
159 |
160 | Afterwards, update the recipe in `conda-forge/pytest-replay-feedstock `_.
161 |
162 |
163 | License
164 | -------
165 |
166 | Distributed under the terms of the `MIT`_ license.
167 |
168 |
169 | Issues
170 | ------
171 |
172 | If you encounter any problems, please `file an issue`_ along with a detailed description.
173 |
174 | .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter
175 | .. _`@hackebrot`: https://github.com/hackebrot
176 | .. _`MIT`: http://opensource.org/licenses/MIT
177 | .. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause
178 | .. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt
179 | .. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0
180 | .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin
181 | .. _`file an issue`: https://github.com/ESSS/pytest-replay/issues
182 | .. _`pytest`: https://github.com/pytest-dev/pytest
183 | .. _`tox`: https://tox.readthedocs.io/en/latest/
184 | .. _`pip`: https://pypi.python.org/pypi/pip/
185 | .. _`PyPI`: https://pypi.python.org/pypi
186 |
--------------------------------------------------------------------------------
/RELEASING.rst:
--------------------------------------------------------------------------------
1 | Here are the steps on how to make a new release.
2 |
3 | 1. Create a ``release-VERSION`` branch from ``upstream/master``.
4 | 2. Update ``CHANGELOG.rst``.
5 | 3. Push a branch with the changes.
6 | 4. Once all builds pass, push a ``VERSION`` tag to ``upstream``.
7 | 5. Merge the PR.
8 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | build-backend = "setuptools.build_meta"
3 | requires = ["setuptools>=61", "setuptools_scm"]
4 |
5 | [tool.setuptools_scm]
6 |
7 | [project]
8 | name = "pytest-replay"
9 | authors = [
10 | { name = "ESSS", email = "foss@esss.co" },
11 | ]
12 | dynamic = ["version"]
13 | license = { text = "MIT" }
14 | urls = { Homepage = "https://github.com/ESSS/pytest-replay" }
15 | description = "Saves previous test runs and allow re-execute previous pytest runs to reproduce crashes or flaky tests"
16 | readme = "README.rst"
17 | requires-python = ">=3.9"
18 | dependencies = [
19 | "pytest",
20 | ]
21 | classifiers = [
22 | "Development Status :: 5 - Production/Stable",
23 | "Framework :: Pytest",
24 | "Intended Audience :: Developers",
25 | "Topic :: Software Development :: Testing",
26 | "Programming Language :: Python",
27 | "Programming Language :: Python :: 3",
28 | "Programming Language :: Python :: 3.9",
29 | "Programming Language :: Python :: 3.10",
30 | "Programming Language :: Python :: 3.11",
31 | "Programming Language :: Python :: 3.12",
32 | "Programming Language :: Python :: 3.13",
33 | "Programming Language :: Python :: Implementation :: CPython",
34 | "Operating System :: OS Independent",
35 | "License :: OSI Approved :: MIT License",
36 | ]
37 |
38 | [tool.setuptools]
39 | packages = { find = { where = ["src"] } }
40 | package-dir = { "" = "src" }
41 |
42 | [project.entry-points.pytest11]
43 | replay = "pytest_replay"
44 |
--------------------------------------------------------------------------------
/src/pytest_replay/__init__.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import dataclasses
3 | import json
4 | import os
5 | import time
6 | from dataclasses import asdict
7 | from glob import glob
8 | from typing import Any
9 | from typing import Optional
10 |
11 | import pytest
12 |
13 |
14 | def pytest_addoption(parser):
15 | group = parser.getgroup("replay")
16 | group.addoption(
17 | "--replay-record-dir",
18 | action="store",
19 | dest="replay_record_dir",
20 | default=None,
21 | help="Directory to write record files to reproduce runs.",
22 | )
23 | group.addoption(
24 | "--replay",
25 | action="store",
26 | dest="replay_file",
27 | default=None,
28 | help="Use a replay file to run the tests from that file only",
29 | )
30 | group.addoption(
31 | "--replay-base-name",
32 | action="store",
33 | dest="base_name",
34 | default=".pytest-replay",
35 | help="Base name for the output file.",
36 | )
37 | group.addoption(
38 | "--replay-skip-cleanup",
39 | action="store_true",
40 | dest="skip_cleanup",
41 | default=False,
42 | help="Skips cleanup scripts before running (does not remove previously "
43 | "generated replay files).",
44 | )
45 |
46 |
47 | @dataclasses.dataclass
48 | class ReplayTestInfo:
49 | nodeid: str
50 | start: float = 0.0
51 | finish: Optional[float] = None
52 | outcome: Optional[str] = None
53 | metadata: dict[str, Any] = dataclasses.field(default_factory=dict)
54 |
55 | def to_clean_dict(self) -> dict[str, Any]:
56 | return {k: v for k, v in asdict(self).items() if v}
57 |
58 |
59 | class _ReplayTestInfoDefaultDict(collections.defaultdict):
60 | def __missing__(self, key):
61 | self[key] = ReplayTestInfo(nodeid=key)
62 | return self[key]
63 |
64 |
65 | class ReplayPlugin:
66 | def __init__(self, config):
67 | self.dir = config.getoption("replay_record_dir")
68 | self.base_script_name = config.getoption("base_name")
69 | if self.dir:
70 | self.dir = os.path.abspath(self.dir)
71 | nprocs = config.getoption("numprocesses", 0)
72 | self.running_xdist = nprocs is not None and nprocs > 1
73 | self.xdist_worker_name = os.environ.get("PYTEST_XDIST_WORKER", "")
74 | self.ext = ".txt"
75 | self.written_nodeids = set()
76 | skip_cleanup = config.getoption("skip_cleanup", False)
77 | if not skip_cleanup:
78 | self.cleanup_scripts()
79 | self.nodes = _ReplayTestInfoDefaultDict()
80 | self.session_start_time = config.replay_start_time
81 |
82 | @pytest.fixture(scope="function")
83 | def replay_metadata(self, request):
84 | return self.nodes[request.node.nodeid]
85 |
86 | def cleanup_scripts(self):
87 | if self.xdist_worker_name:
88 | # only cleanup scripts on the master node
89 | return
90 | if self.dir:
91 | if os.path.isdir(self.dir):
92 | if self.running_xdist:
93 | mask = os.path.join(
94 | self.dir, self.base_script_name + "-*" + self.ext
95 | )
96 | else:
97 | mask = os.path.join(self.dir, self.base_script_name + self.ext)
98 | for fn in glob(mask):
99 | os.remove(fn)
100 | else:
101 | os.makedirs(self.dir)
102 |
103 | def pytest_runtest_logstart(self, nodeid):
104 | if self.running_xdist and not self.xdist_worker_name:
105 | # only workers report running tests when running in xdist
106 | return
107 | if self.dir:
108 | self.nodes[nodeid].start = time.perf_counter() - self.session_start_time
109 | json_content = json.dumps(self.nodes[nodeid].to_clean_dict())
110 | self.append_test_to_script(nodeid, json_content)
111 |
112 | @pytest.hookimpl(hookwrapper=True)
113 | def pytest_runtest_makereport(self, item):
114 | report = yield
115 | result = report.get_result()
116 | if self.dir:
117 | self.nodes[item.nodeid].outcome = (
118 | self.nodes[item.nodeid].outcome or result.outcome
119 | )
120 | current = self.nodes[item.nodeid].outcome
121 | if not result.passed and current != "failed":
122 | # do not overwrite a failed outcome with a skipped one
123 | self.nodes[item.nodeid].outcome = result.outcome
124 |
125 | if result.when == "teardown":
126 | self.nodes[item.nodeid].finish = (
127 | time.perf_counter() - self.session_start_time
128 | )
129 | json_content = json.dumps(self.nodes[item.nodeid].to_clean_dict())
130 | self.append_test_to_script(item.nodeid, json_content)
131 |
132 | def pytest_collection_modifyitems(self, items, config):
133 | replay_file = config.getoption("replay_file")
134 | if not replay_file:
135 | return
136 |
137 | with open(replay_file, encoding="UTF-8") as f:
138 | all_lines = f.readlines()
139 | # Use a dict to deduplicate the node ids while keeping the order.
140 | nodeids = {}
141 | for line in all_lines:
142 | stripped = line.strip()
143 | # Ignore blank linkes and comments. (#70)
144 | if stripped and not stripped.startswith(("#", "//")):
145 | node_info = json.loads(stripped)
146 | nodeid = node_info["nodeid"]
147 | if "finish" in node_info:
148 | self.nodes[nodeid] = ReplayTestInfo(**node_info)
149 | nodeids[nodeid] = None
150 |
151 | items_dict = {item.nodeid: item for item in items}
152 | remaining = []
153 | # Make sure to respect the order from the JSON file (#52).
154 | for nodeid in nodeids:
155 | item = items_dict.pop(nodeid)
156 | if item:
157 | remaining.append(item)
158 | deselected = list(items_dict.values())
159 |
160 | if deselected:
161 | config.hook.pytest_deselected(items=deselected)
162 |
163 | items[:] = remaining
164 |
165 | def append_test_to_script(self, nodeid, line):
166 | suffix = "-" + self.xdist_worker_name if self.xdist_worker_name else ""
167 | fn = os.path.join(self.dir, self.base_script_name + suffix + self.ext)
168 | with open(fn, "a", encoding="UTF-8") as f:
169 | f.write(line + "\n")
170 | f.flush()
171 | self.written_nodeids.add(nodeid)
172 |
173 |
174 | class DeferPlugin:
175 | def pytest_configure_node(self, node):
176 | node.workerinput["replay_start_time"] = node.config.replay_start_time
177 |
178 |
179 | def pytest_configure(config):
180 | if config.getoption("replay_record_dir") or config.getoption("replay_file"):
181 | if hasattr(config, "workerinput"):
182 | config.replay_start_time = config.workerinput["replay_start_time"]
183 | else:
184 | config.replay_start_time = time.perf_counter()
185 | # check for xdist and xdist.plugin: the former is the name of the plugin in normal
186 | # circumstances, the latter happens when xdist is loaded explicitly using '-p' in
187 | # a frozen executable
188 | if config.pluginmanager.has_plugin("xdist") or config.pluginmanager.has_plugin(
189 | "xdist.plugin"
190 | ):
191 | config.pluginmanager.register(DeferPlugin())
192 | config.pluginmanager.register(ReplayPlugin(config), "replay-writer")
193 |
194 |
195 | def pytest_report_header(config):
196 | if config.getoption("replay_record_dir"):
197 | return "replay dir: {}".format(config.getoption("replay_record_dir"))
198 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | pytest_plugins = "pytester"
2 |
--------------------------------------------------------------------------------
/tests/test_replay.py:
--------------------------------------------------------------------------------
1 | import itertools as it
2 | import json
3 | import re
4 | from pathlib import Path
5 |
6 | import pytest
7 |
8 |
9 | @pytest.fixture
10 | def suite(testdir):
11 | testdir.makepyfile(
12 | test_1="""
13 | def test_foo():
14 | pass
15 | def test_bar():
16 | pass
17 | """,
18 | test_2="""
19 | def test_zz():
20 | pass
21 | """,
22 | test_3="""
23 | def test_foobar():
24 | pass
25 | """,
26 | )
27 |
28 |
29 | @pytest.mark.parametrize(
30 | "extra_option", [(None, ".pytest-replay"), ("--replay-base-name", "NEW-BASE-NAME")]
31 | )
32 | def test_normal_execution(suite, testdir, extra_option, monkeypatch):
33 | """Ensure scripts are created and the tests are executed when using --replay."""
34 |
35 | class MockTime:
36 | fake_time = 0.0
37 |
38 | @classmethod
39 | def perf_counter(cls):
40 | cls.fake_time += 1.0
41 | return cls.fake_time
42 |
43 | monkeypatch.setattr("pytest_replay.time", MockTime)
44 |
45 | extra_arg, base_name = extra_option
46 | dir = testdir.tmpdir / "replay"
47 | options = ["test_1.py", f"--replay-record-dir={dir}"]
48 |
49 | if extra_arg:
50 | options.append(f"{extra_arg}={base_name}")
51 |
52 | result = testdir.runpytest(*options)
53 |
54 | result.stdout.fnmatch_lines(f"*replay dir: {dir}")
55 |
56 | replay_file = dir / f"{base_name}.txt"
57 | contents = replay_file.readlines(True)
58 | contents = [json.loads(line.strip()) for line in contents]
59 | assert len(contents) == 4
60 | assert contents[0] == {"nodeid": "test_1.py::test_foo", "start": 1.0}
61 | assert contents[1] == {
62 | "nodeid": "test_1.py::test_foo",
63 | "start": 1.0,
64 | "finish": 2.0,
65 | "outcome": "passed",
66 | }
67 | assert contents[2] == {"nodeid": "test_1.py::test_bar", "start": 3.0}
68 | assert contents[3] == {
69 | "nodeid": "test_1.py::test_bar",
70 | "start": 3.0,
71 | "finish": 4.0,
72 | "outcome": "passed",
73 | }
74 | assert result.ret == 0
75 | result = testdir.runpytest(f"--replay={replay_file}")
76 | assert result.ret == 0
77 | result.stdout.fnmatch_lines(["test_1.py*100%*", "*= 2 passed, 2 deselected in *="])
78 |
79 |
80 | @pytest.mark.parametrize("comment_format", ["#", "//"])
81 | @pytest.mark.parametrize("name_to_comment, deselected", [("foo", 2), ("zz", 1)])
82 | def test_line_comments(suite, testdir, comment_format, name_to_comment, deselected):
83 | """Check line comments"""
84 |
85 | replay_dir = testdir.tmpdir / "replay"
86 | result = testdir.runpytest(f"--replay-record-dir={replay_dir}")
87 | replay_file = replay_dir / ".pytest-replay.txt"
88 |
89 | contents = replay_file.readlines(True)
90 | contents = [line.strip() for line in contents]
91 | contents = [
92 | (comment_format + line) if name_to_comment in line else line
93 | for line in contents
94 | ]
95 | replay_file_commented = replay_dir / ".pytest-replay_commneted.txt"
96 | replay_file_commented.write_text("\n".join(contents), encoding="utf-8")
97 |
98 | result = testdir.runpytest(f"--replay={replay_file_commented}")
99 | assert result.ret == 0
100 | passed = 4 - deselected
101 | result.stdout.fnmatch_lines([f"*= {passed} passed, {deselected} deselected in *="])
102 |
103 |
104 | @pytest.mark.parametrize("do_crash", [True, False])
105 | def test_crash(testdir, do_crash):
106 | testdir.makepyfile(
107 | test_crash="""
108 | import os
109 | def test_crash():
110 | if {do_crash}:
111 | os._exit(1)
112 | def test_normal():
113 | pass
114 | """.format(
115 | do_crash=do_crash
116 | )
117 | )
118 | dir = testdir.tmpdir / "replay"
119 | result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
120 |
121 | contents = (dir / ".pytest-replay.txt").read()
122 | test_id = "test_crash.py::test_normal"
123 | if do_crash:
124 | assert test_id not in contents
125 | assert result.ret != 0
126 | else:
127 | assert test_id in contents
128 | assert result.ret == 0
129 |
130 |
131 | def test_xdist(testdir):
132 | testdir.makepyfile(
133 | """
134 | import pytest
135 | @pytest.mark.parametrize('i', range(10))
136 | def test(i):
137 | pass
138 | """
139 | )
140 | dir = testdir.tmpdir / "replay"
141 | procs = 2
142 | testdir.runpytest_subprocess("-n", str(procs), f"--replay-record-dir={dir}")
143 |
144 | files = dir.listdir()
145 | assert len(files) == procs
146 | test_ids = set()
147 | for f in files:
148 | test_ids.update({json.loads(x.strip())["nodeid"] for x in f.readlines()})
149 | expected_ids = {f"test_xdist.py::test[{x}]" for x in range(10)}
150 | assert test_ids == expected_ids
151 |
152 |
153 | @pytest.mark.parametrize("reverse", [True, False])
154 | def test_alternate_serial_parallel_does_not_erase_runs(suite, testdir, reverse):
155 | """xdist and normal runs should not erase each other's files."""
156 | command_lines = [
157 | ("-n", "2", "--replay-record-dir=replay"),
158 | ("--replay-record-dir=replay",),
159 | ]
160 | if reverse:
161 | command_lines.reverse()
162 | for command_line in command_lines:
163 | result = testdir.runpytest_subprocess(*command_line)
164 | assert result.ret == 0
165 | assert {x.basename for x in (testdir.tmpdir / "replay").listdir()} == {
166 | ".pytest-replay.txt",
167 | ".pytest-replay-gw0.txt",
168 | ".pytest-replay-gw1.txt",
169 | }
170 |
171 |
172 | def test_skip_cleanup_does_not_erase_replay_files(suite, testdir):
173 | """--replay-skip-cleanup will not erase replay files, appending data on next run."""
174 | command_lines = [
175 | ("-n", "2", "--replay-record-dir=replay"),
176 | ("-n", "2", "--replay-record-dir=replay", "--replay-skip-cleanup"),
177 | ]
178 |
179 | expected_node_ids = [
180 | "test_1.py::test_foo",
181 | "test_1.py::test_foo",
182 | "test_1.py::test_bar",
183 | "test_1.py::test_bar",
184 | ]
185 |
186 | dir = testdir.tmpdir / "replay"
187 | expected = expected_node_ids[:]
188 | for command_line in command_lines:
189 | result = testdir.runpytest_subprocess(*command_line)
190 | assert result.ret == 0
191 | assert {x.basename for x in dir.listdir()} == {
192 | ".pytest-replay-gw0.txt",
193 | ".pytest-replay-gw1.txt",
194 | }
195 |
196 | replay_file = dir / ".pytest-replay-gw0.txt"
197 | contents = [json.loads(line)["nodeid"] for line in replay_file.readlines()]
198 | assert contents == expected
199 | # Next run will expect same tests appended again.
200 | expected.extend(expected_node_ids)
201 |
202 |
203 | def test_cwd_changed(testdir):
204 | """Ensure that the plugin works even if some tests changes cwd."""
205 | testdir.tmpdir.join("subdir").ensure(dir=1)
206 | testdir.makepyfile(
207 | """
208 | import os
209 | def test_1():
210 | os.chdir('subdir')
211 | def test_2():
212 | pass
213 | """
214 | )
215 | dir = testdir.tmpdir / "replay"
216 | result = testdir.runpytest_subprocess("--replay-record-dir={}".format("replay"))
217 | replay_file = dir / ".pytest-replay.txt"
218 | contents = {json.loads(line)["nodeid"] for line in replay_file.readlines()}
219 | expected = {"test_cwd_changed.py::test_1", "test_cwd_changed.py::test_2"}
220 | assert contents == expected
221 | assert result.ret == 0
222 |
223 |
224 | @pytest.mark.usefixtures("suite")
225 | def test_execution_different_order(testdir):
226 | """Ensure tests execute in the order defined by the JSON file, not collection (#52)."""
227 | dir = testdir.tmpdir / "replay"
228 | options = [f"--replay-record-dir={dir}"]
229 | result = testdir.runpytest(*options)
230 |
231 | replay_file = dir / ".pytest-replay.txt"
232 |
233 | with replay_file.open("r+") as f:
234 | content = f.readlines()
235 |
236 | # pairwise shuffle of replay file
237 | pairs = [(content[i], content[i + 1]) for i in range(0, len(content), 2)]
238 | pairs = [pairs[2], pairs[0], pairs[3], pairs[1]]
239 | content = list(it.chain.from_iterable(pairs))
240 |
241 | f.seek(0)
242 | f.writelines(content)
243 |
244 | result = testdir.runpytest(f"--replay={replay_file}", "-v")
245 | assert result.ret == 0
246 | result.stdout.fnmatch_lines(
247 | [
248 | "test_2.py::test_zz*25%*",
249 | "test_1.py::test_foo*50%*",
250 | "test_3.py::test_foobar*75%*",
251 | "test_1.py::test_bar*100%*",
252 | ],
253 | consecutive=True,
254 | )
255 |
256 |
257 | @pytest.mark.usefixtures("suite")
258 | def test_filter_out_tests_not_in_file(testdir):
259 | """Tests not found in the JSON file should not run."""
260 | dir = testdir.tmpdir / "replay"
261 | options = [f"--replay-record-dir={dir}", "-k", "foo"]
262 | result = testdir.runpytest(*options)
263 |
264 | replay_file = dir / ".pytest-replay.txt"
265 |
266 | result = testdir.runpytest(f"--replay={replay_file}", "-v")
267 | assert result.ret == 0
268 | result.stdout.fnmatch_lines(
269 | [
270 | "test_1.py::test_foo*50%*",
271 | "test_3.py::test_foobar*100%*",
272 | ],
273 | consecutive=True,
274 | )
275 |
276 |
277 | def test_metadata(pytester, tmp_path):
278 | pytester.makepyfile(
279 | """
280 | import pytest
281 |
282 | @pytest.fixture
283 | def seed(replay_metadata):
284 | assert replay_metadata.metadata == {}
285 | replay_metadata.metadata["seed"] = seed = 1234
286 | return seed
287 |
288 | def test_foo(seed):
289 | assert seed == 1234
290 | """
291 | )
292 | dir = tmp_path / "replay"
293 | result = pytester.runpytest(f"--replay-record-dir={dir}")
294 | assert result.ret == 0
295 |
296 | # Rewrite the fixture to always returns the metadata, as written previously.
297 | pytester.makepyfile(
298 | """
299 | import pytest
300 |
301 | @pytest.fixture
302 | def seed(replay_metadata):
303 | return replay_metadata.metadata["seed"]
304 |
305 | def test_foo(seed):
306 | assert seed == 1234
307 | """
308 | )
309 | result = pytester.runpytest(f"--replay={dir / '.pytest-replay.txt'}")
310 | assert result.ret == 0
311 |
312 |
313 | def test_replay_file_outcome_is_correct(testdir):
314 | """Tests that the outcomes in the replay file are correct."""
315 | testdir.makepyfile(
316 | test_module="""
317 | import pytest
318 |
319 | def test_success():
320 | pass
321 |
322 | def test_failure():
323 | assert False
324 |
325 | @pytest.fixture
326 | def failing_teardown_fixture():
327 | yield
328 | assert False
329 |
330 | def test_failure_fixture_teardown(failing_teardown_fixture):
331 | assert True
332 |
333 | @pytest.fixture
334 | def failing_setup_fixture():
335 | assert False
336 |
337 | def test_failure_fixture_setup(failing_setup_fixture):
338 | assert True
339 | """
340 | )
341 | dir = testdir.tmpdir / "replay"
342 | result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
343 | assert result.ret != 0
344 |
345 | contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()]
346 | outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r}
347 | assert outcomes == {
348 | "test_module.py::test_success": "passed",
349 | "test_module.py::test_failure": "failed",
350 | "test_module.py::test_failure_fixture_teardown": "failed",
351 | "test_module.py::test_failure_fixture_setup": "failed",
352 | }
353 |
354 |
355 | def test_replay_file_outcome_is_correct_xdist(testdir):
356 | """Tests that the outcomes in the replay file are correct when running in parallel."""
357 | testdir.makepyfile(
358 | test_module="""
359 | import pytest
360 |
361 | @pytest.mark.parametrize('i', range(10))
362 | def test_val(i):
363 | assert i < 5
364 | """
365 | )
366 | dir = testdir.tmpdir / "replay"
367 | procs = 2
368 | result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}", f"-n {procs}")
369 | assert result.ret != 0
370 |
371 | contents = [
372 | s
373 | for n in range(procs)
374 | for s in (dir / f".pytest-replay-gw{n}.txt").read().splitlines()
375 | ]
376 | pattern = re.compile(r"test_val\[(\d+)\]")
377 | for content in contents:
378 | parsed = json.loads(content)
379 | if "outcome" not in parsed:
380 | continue
381 |
382 | i = int(pattern.search(parsed["nodeid"]).group(1))
383 | if i < 5:
384 | assert parsed["outcome"] == "passed", i
385 | else:
386 | assert parsed["outcome"] == "failed", i
387 |
388 |
389 | def test_outcomes_in_replay_file(testdir):
390 | """Tests that checks how the outcomes are handled in the report hook when the various
391 | phases yield failure or skipped."""
392 | testdir.makepyfile(
393 | test_module="""
394 | import pytest
395 |
396 | @pytest.fixture()
397 | def skip_setup():
398 | pytest.skip("skipping")
399 | yield
400 |
401 | @pytest.fixture()
402 | def skip_teardown():
403 | yield
404 | pytest.skip("skipping")
405 |
406 | @pytest.fixture()
407 | def fail_setup():
408 | assert False
409 |
410 | @pytest.fixture()
411 | def fail_teardown():
412 | yield
413 | assert False
414 |
415 | def test_skip_fail(skip_setup, fail_teardown):
416 | pass
417 |
418 | def test_fail_skip(fail_setup, skip_teardown):
419 | pass
420 |
421 | def test_skip_setup(skip_setup):
422 | pass
423 |
424 | def test_skip_teardown(skip_teardown):
425 | pass
426 |
427 | def test_test_fail_skip_teardown(skip_teardown):
428 | assert False
429 | """
430 | )
431 | dir = testdir.tmpdir / "replay"
432 | testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
433 |
434 | contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()]
435 | outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r}
436 | assert outcomes == {
437 | "test_module.py::test_skip_fail": "skipped",
438 | "test_module.py::test_fail_skip": "failed",
439 | "test_module.py::test_skip_setup": "skipped",
440 | "test_module.py::test_skip_teardown": "skipped",
441 | "test_module.py::test_test_fail_skip_teardown": "failed",
442 | }
443 |
444 |
445 | @pytest.mark.usefixtures("suite")
446 | def test_empty_or_blank_lines(testdir):
447 | """Empty or blank line in replay files should be ignored."""
448 | dir = testdir.tmpdir / "replay"
449 | options = [f"--replay-record-dir={dir}"]
450 | result = testdir.runpytest(*options)
451 |
452 | replay_file: Path = dir / ".pytest-replay.txt"
453 |
454 | with replay_file.open("r+") as f:
455 | content = f.readlines()
456 |
457 | # Add empty line
458 | content.insert(1, "\n")
459 | # Add blank line
460 | content.insert(1, " \n")
461 | # Add empty line
462 | content.append("\n")
463 | # Add mixed blank line
464 | content.append("\t \n")
465 | f.seek(0)
466 | f.writelines(content)
467 |
468 | result = testdir.runpytest(f"--replay={replay_file}", "-v")
469 | assert result.ret == 0
470 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py39,py310,py311,py312,313
3 |
4 | [testenv]
5 | deps =
6 | pytest-xdist
7 | commands = pytest {posargs:tests}
8 |
9 | [pytest]
10 | addopts = -ra --color=yes
11 |
--------------------------------------------------------------------------------