├── MANIFEST.in
├── tox.ini
├── RELEASING.rst
├── tests
├── conftest.py
├── test_replay_xdist.py
└── test_replay.py
├── .github
├── dependabot.yml
└── workflows
│ ├── deploy.yml
│ └── test.yml
├── .pre-commit-config.yaml
├── LICENSE
├── .gitignore
├── pyproject.toml
├── CHANGELOG.rst
├── README.rst
└── src
└── pytest_replay
└── __init__.py
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.rst
3 |
4 | recursive-exclude * __pycache__
5 | recursive-exclude * *.py[co]
6 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py39,py310,py311,py312,313
3 |
4 | [testenv]
5 | deps =
6 | pytest-xdist
7 | commands = pytest {posargs:tests}
8 |
9 | [pytest]
10 | addopts = -ra --color=yes
11 |
--------------------------------------------------------------------------------
/RELEASING.rst:
--------------------------------------------------------------------------------
1 | Here are the steps on how to make a new release.
2 |
3 | 1. Create a ``release-VERSION`` branch from ``upstream/master``.
4 | 2. Update ``CHANGELOG.rst``.
5 | 3. Push a branch with the changes.
6 | 4. Once all builds pass, push a ``VERSION`` tag to ``upstream``.
7 | 5. Merge the PR.
8 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | pytest_plugins = "pytester"
5 |
6 |
7 | @pytest.fixture
8 | def suite(testdir):
9 | testdir.makepyfile(
10 | test_1="""
11 | def test_foo():
12 | pass
13 | def test_bar():
14 | pass
15 | """,
16 | test_2="""
17 | def test_zz():
18 | pass
19 | """,
20 | test_3="""
21 | def test_foobar():
22 | pass
23 | """,
24 | )
25 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # Keep GitHub Actions up to date with GitHub's Dependabot...
2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
4 | version: 2
5 | updates:
6 | - package-ecosystem: github-actions
7 | directory: /
8 | groups:
9 | github-actions:
10 | patterns:
11 | - "*" # Group all Actions updates into a single larger pull request
12 | schedule:
13 | interval: weekly
14 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black-pre-commit-mirror
3 | rev: 25.12.0
4 | hooks:
5 | - id: black
6 | args: [--safe, --quiet]
7 | language_version: python3
8 | - repo: https://github.com/pre-commit/pre-commit-hooks
9 | rev: v6.0.0
10 | hooks:
11 | - id: trailing-whitespace
12 | - id: end-of-file-fixer
13 | - id: debug-statements
14 | - repo: https://github.com/asottile/reorder-python-imports
15 | rev: v3.16.0
16 | hooks:
17 | - id: reorder-python-imports
18 | args: ['--application-directories=.:src', --py36-plus]
19 | - repo: local
20 | hooks:
21 | - id: rst
22 | name: rst
23 | entry: rst-lint
24 | files: ^(CHANGELOG.rst|HOWTORELEASE.rst|README.rst)$
25 | language: python
26 | additional_dependencies: [pygments, restructuredtext_lint]
27 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: deploy
2 |
3 | on:
4 | push:
5 | tags:
6 | - "[0-9]+.[0-9]+.[0-9]+"
7 |
8 | jobs:
9 |
10 | package:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - uses: actions/checkout@v6
15 | - name: Build and Check Package
16 | uses: hynek/build-and-inspect-python-package@v2.14.0
17 |
18 | deploy:
19 | runs-on: ubuntu-latest
20 | permissions:
21 | id-token: write
22 | contents: write
23 |
24 | needs: package
25 |
26 | steps:
27 | - name: Download Package
28 | uses: actions/download-artifact@v7.0.0
29 | with:
30 | name: Packages
31 | path: dist
32 |
33 | - name: Publish package to PyPI
34 | uses: pypa/gh-action-pypi-publish@v1.13.0
35 |
36 | - name: Publish GitHub Release
37 | uses: softprops/action-gh-release@v2
38 | with:
39 | files: dist/*
40 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | The MIT License (MIT)
3 |
4 | Copyright (c) 2018 Bruno Oliveira
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask instance folder
57 | instance/
58 |
59 | # Sphinx documentation
60 | docs/_build/
61 |
62 | # MkDocs documentation
63 | /site/
64 |
65 | # PyBuilder
66 | target/
67 |
68 | # IPython Notebook
69 | .ipynb_checkpoints
70 |
71 | # pyenv
72 | .python-version
73 |
74 | # pycharm
75 | .idea
76 |
77 | # example virtual env used in "Contributing"
78 | .env
79 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: test
2 |
3 | on:
4 | push:
5 | branches:
6 | - "master"
7 | - "test-me-*"
8 |
9 | pull_request:
10 |
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref }}
14 | cancel-in-progress: true
15 |
16 |
17 | jobs:
18 |
19 | package:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v6
23 | - name: Build and Check Package
24 | uses: hynek/build-and-inspect-python-package@v2.14.0
25 |
26 | test:
27 |
28 | runs-on: ${{ matrix.os }}
29 |
30 | needs: package
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | python: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
36 | os: [ubuntu-latest, windows-latest, macos-latest]
37 |
38 | steps:
39 | - uses: actions/checkout@v6
40 |
41 | - name: Download Package
42 | uses: actions/download-artifact@v7.0.0
43 | with:
44 | name: Packages
45 | path: dist
46 |
47 | - name: Set up Python
48 | uses: actions/setup-python@v6
49 | with:
50 | python-version: ${{ matrix.python }}
51 |
52 | - name: Install tox
53 | run: |
54 | python -m pip install --upgrade pip
55 | pip install tox
56 |
57 | - name: Test
58 | shell: bash
59 | run: |
60 | tox run -e py --installpkg `find dist/*.tar.gz`
61 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | build-backend = "setuptools.build_meta"
3 | requires = ["setuptools>=61", "setuptools_scm"]
4 |
5 | [tool.setuptools_scm]
6 |
7 | [project]
8 | name = "pytest-replay"
9 | authors = [
10 | { name = "ESSS", email = "foss@esss.co" },
11 | ]
12 | dynamic = ["version"]
13 | license = { text = "MIT" }
14 | urls = { Homepage = "https://github.com/ESSS/pytest-replay" }
15 | description = "Saves previous test runs and allow re-execute previous pytest runs to reproduce crashes or flaky tests"
16 | readme = "README.rst"
17 | requires-python = ">=3.9"
18 | dependencies = [
19 | "pytest",
20 | ]
21 | classifiers = [
22 | "Development Status :: 5 - Production/Stable",
23 | "Framework :: Pytest",
24 | "Intended Audience :: Developers",
25 | "Topic :: Software Development :: Testing",
26 | "Programming Language :: Python",
27 | "Programming Language :: Python :: 3",
28 | "Programming Language :: Python :: 3.9",
29 | "Programming Language :: Python :: 3.10",
30 | "Programming Language :: Python :: 3.11",
31 | "Programming Language :: Python :: 3.12",
32 | "Programming Language :: Python :: 3.13",
33 | "Programming Language :: Python :: 3.14",
34 | "Programming Language :: Python :: Implementation :: CPython",
35 | "Operating System :: OS Independent",
36 | "License :: OSI Approved :: MIT License",
37 | ]
38 |
39 | [tool.setuptools]
40 | packages = { find = { where = ["src"] } }
41 | package-dir = { "" = "src" }
42 |
43 | [project.entry-points.pytest11]
44 | replay = "pytest_replay"
45 |
--------------------------------------------------------------------------------
/tests/test_replay_xdist.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import pytest
4 |
5 |
6 | @pytest.fixture
7 | def suite_replay_xdist(suite, testdir):
8 | out_dir = testdir.tmpdir / "replay_xdist"
9 | out_dir.mkdir()
10 | file_gw0 = out_dir / "replay_gw0.txt"
11 | file_gw1 = out_dir / "replay_gw1.txt"
12 |
13 | file_gw0.write_text(
14 | """{"nodeid": "test_1.py::test_foo", "start": 0.5}
15 | {"nodeid": "test_1.py::test_foo", "start": 0.5, "finish": 1.0, "outcome": "passed"}
16 | {"nodeid": "test_1.py::test_bar", "start": 1.0}
17 | {"nodeid": "test_1.py::test_bar", "start": 1.0, "finish": 1.5, "outcome": "passed"}""",
18 | encoding="utf-8",
19 | )
20 | file_gw1.write_text(
21 | """{"nodeid": "test_2.py::test_zz", "start": 0.5}
22 | {"nodeid": "test_2.py::test_zz", "start": 0.5, "finish": 1.0, "outcome": "passed"}
23 | {"nodeid": "test_3.py::test_foobar", "start": 1.0}
24 | {"nodeid": "test_3.py::test_foobar", "start": 1.0, "finish": 1.5, "outcome": "passed"}""",
25 | encoding="utf-8",
26 | )
27 | yield file_gw0, file_gw1
28 |
29 |
30 | def test_run_multiple_files_with_xdist(testdir, suite_replay_xdist):
31 | file_gw0, file_gw1 = suite_replay_xdist
32 | result = testdir.runpytest(
33 | "--replay",
34 | str(file_gw0),
35 | str(file_gw1),
36 | "-v",
37 | )
38 | assert result.ret == 0
39 | assert result.parseoutcomes() == {"passed": 4}
40 | stdout = result.stdout.str()
41 | assert "created: 2/2 workers" in stdout
42 | assert re.search(r"\[gw1\] .* PASSED test_2\.py::test_zz", stdout)
43 | assert re.search(r"\[gw0\] .* PASSED test_1\.py::test_foo", stdout)
44 | assert re.search(r"\[gw1\] .* PASSED test_3\.py::test_foobar", stdout)
45 | assert re.search(r"\[gw0\] .* PASSED test_1\.py::test_bar", stdout)
46 |
47 |
48 | @pytest.mark.parametrize(
49 | "extra_args",
50 | [
51 | ["-n", "2"],
52 | ["-n=2"],
53 | ["--dist", "loadgroup"],
54 | ["--dist=loadgroup"],
55 | ["--maxprocesses", "2"],
56 | ["--maxprocesses=2"],
57 | ["--numprocesses", "2"],
58 | ["--numprocesses=2"],
59 | ["-n", "2", "--dist", "loadgroup"],
60 | ],
61 | )
62 | def test_exception_multiple_replay_files(testdir, suite_replay_xdist, extra_args):
63 | file_gw0, file_gw1 = suite_replay_xdist
64 | result = testdir.runpytest("--replay", str(file_gw0), str(file_gw1), *extra_args)
65 | assert result.ret == 4
66 |
--------------------------------------------------------------------------------
/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | 1.7.1
2 | =====
3 |
4 | *2025-12-23*
5 |
6 | * Fix parsing of custom command-line options from other plugins (`#105`_).
7 |
8 | .. _`#105`: https://github.com/ESSS/pytest-replay/issues/105
9 |
10 |
11 | 1.7.0
12 | =====
13 |
14 | *2025-12-09*
15 |
16 | * Support replaying tests from multiple replay files. When multiple files are
17 | passed to ``--replay``, the run is automatically distributed across workers
18 | using ``pytest-xdist``.
19 | * Internal: rename ``replay_file`` to ``replay_files``.
20 |
21 | 1.6.0
22 | =====
23 |
24 | *2025-02-05*
25 |
26 | * Add support for Python 3.13.
27 | * Dropped support for EOL Python 3.8.
28 | * Change build to use ``pyproject.toml``.
29 | * Allow customization of metadata in replay file (`#78`_).
30 |
31 | .. _`#78`: https://github.com/ESSS/pytest-replay/issues/78
32 |
33 |
34 | 1.5.3
35 | =====
36 |
37 | *2024-11-29*
38 |
39 | * Ignore empty and blank lines in the replay file (`#70`_).
40 |
41 | .. _`#70`: https://github.com/ESSS/pytest-replay/issues/70
42 |
43 | 1.5.2
44 | ==================
45 |
46 | *2024-09-03*
47 |
48 | * Fix test outcome reported in the replay file (`#64`_).
49 |
50 | .. _`#64`: https://github.com/ESSS/pytest-replay/issues/64
51 |
52 | 1.5.1
53 | =====
54 |
55 | *2024-01-11*
56 |
57 | * Dropped support for Python 3.6 and 3.7 (EOL).
58 | * Added official support for Python 3.10, 3.11 and 3.12.
59 | * Test execution order using ``--replay`` now follows the recorded order, not the collection order, as was always intended (`#52`_).
60 |
61 | .. _`#52`: https://github.com/ESSS/pytest-replay/pull/53
62 |
63 | 1.4.0
64 | =====
65 |
66 | *2021-06-09*
67 |
68 | * Introduce new ``--replay-skip-cleanup`` option that skips the cleanup before running the command. This allows to keep previously generated replay files when running new commands.
69 |
70 | 1.3.0
71 | =====
72 |
73 | *2020-12-09*
74 |
75 | * Replay files can now contain comments (``#`` or ``//``), to make it easy to comment out tests from them when trying to narrow the tests to find a culprit.
76 |
77 |
78 | 1.2.1
79 | =====
80 |
81 | *2020-08-24*
82 |
83 | * Add proper support when running with ``xdist`` in a frozen executable.
84 |
85 | 1.2.0
86 | =====
87 |
88 | *2019-11-14*
89 |
90 | * Change the format of the output to be able to add more information. The new output has new information such as
91 | start time, end time, outcome and the node identification, all these data is represented by each line being a ``json``
92 | format.
93 |
94 | 1.1.0
95 | =====
96 |
97 | *2019-11-11*
98 |
99 | * Introduce new ``--replay-base-name`` option that lets users configure a different name of the replay file. Defaults to ``.pytest-replay``.
100 |
101 | 1.0.0
102 | =====
103 |
104 | * Drop support for Python 2.
105 |
106 | 0.2.2
107 | =====
108 |
109 | * Normal runs and ``xdist`` runs no longer clean up each other's files.
110 |
111 | 0.2.1
112 | =====
113 |
114 | * Fix crash ``IOError`` when tests changed the current working directory in the middle
115 | of the testing session.
116 |
117 | 0.2.0
118 | =====
119 |
120 | * Replace the shell scripts by plain text files and add new
121 | ``--replay`` flag which accepts the generated files to run the tests.
122 |
123 | 0.1.1
124 | =====
125 |
126 | * Escape node ids in the generated shell scripts.
127 |
128 | 0.1.0
129 | =====
130 |
131 | * Initial release.
132 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | =============
2 | pytest-replay
3 | =============
4 |
5 |
6 | .. image:: http://img.shields.io/pypi/v/pytest-replay.svg
7 | :target: https://pypi.python.org/pypi/pytest-replay
8 |
9 | .. image:: https://anaconda.org/conda-forge/pytest-replay/badges/version.svg
10 | :target: https://anaconda.org/conda-forge/pytest-replay
11 |
12 | .. image:: https://github.com/ESSS/pytest-replay/workflows/test/badge.svg
13 | :target: https://github.com/ESSS/pytest-replay/actions?query=workflow%3Atest
14 |
15 | .. image:: https://img.shields.io/pypi/pyversions/pytest-replay.svg
16 | :target: https://pypi.python.org/pypi/pytest-replay
17 |
18 | .. image:: https://img.shields.io/badge/code%20style-black-000000.svg
19 | :target: https://github.com/psf/black
20 |
21 |
22 | Saves previous test runs and allow re-execute previous pytest runs to reproduce crashes or flaky tests
23 |
24 | ----
25 |
26 | This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `Cookiecutter-pytest-plugin`_ template.
27 |
28 |
29 | Features
30 | --------
31 |
32 | This plugin helps to reproduce random or flaky behavior when running tests with xdist. ``pytest-xdist`` executes tests
33 | in a non-predictable order, making it hard to reproduce a behavior seen in CI locally because there's no convenient way
34 | to track which test executed in which worker.
35 |
36 | This plugin records the executed node ids by each worker in the directory given by ``--replay-record-dir=
`` flag,
37 | and a ``--replay=`` can be used to re-run the tests from a previous run. For example::
38 |
39 | $ pytest -n auto --replay-record-dir=build/tests/replay
40 |
41 | This will generate files with each line being a ``json`` with the following content:
42 | node identification, start time, end time and outcome. It is interesting to note
43 | that usually the node id is repeated twice, that is necessary in case of a test
44 | suddenly crashes we will still have the record of that test started. After the
45 | test finishes, ``pytest-replay`` will add another ``json`` line with the
46 | complete information.
47 | That is also useful to analyze concurrent tests which might have some kind of
48 | race condition and interfere in each other.
49 |
50 | For example worker ``gw1`` will generate a file
51 | ``.pytest-replay-gw1.txt`` with contents like this::
52 |
53 | {"nodeid": "test_foo.py::test[1]", "start": 0.000}
54 | {"nodeid": "test_foo.py::test[1]", "start": 0.000, "finish": 1.5, "outcome": "passed"}
55 | {"nodeid": "test_foo.py::test[3]", "start": 1.5}
56 | {"nodeid": "test_foo.py::test[3]", "start": 1.5, "finish": 2.5, "outcome": "passed"}
57 | {"nodeid": "test_foo.py::test[5]", "start": 2.5}
58 | {"nodeid": "test_foo.py::test[5]", "start": 2.5, "finish": 3.5, "outcome": "passed"}
59 | {"nodeid": "test_foo.py::test[7]", "start": 3.5}
60 | {"nodeid": "test_foo.py::test[7]", "start": 3.5, "finish": 4.5, "outcome": "passed"}
61 | {"nodeid": "test_foo.py::test[8]", "start": 4.5}
62 | {"nodeid": "test_foo.py::test[8]", "start": 4.5, "finish": 5.5, "outcome": "passed"}
63 |
64 |
65 | If there is a crash or a flaky failure in the tests of the worker ``gw1``, one can take that file from the CI server and
66 | execute the tests in the same order with::
67 |
68 | $ pytest --replay=.pytest-replay-gw1.txt
69 |
70 | Hopefully this will make it easier to reproduce the problem and fix it.
71 |
72 |
73 | Replaying Multiple Files in Parallel
74 | -------------------------------------
75 |
76 | *Version added: 1.7*
77 |
78 | When you have multiple replay files from a distributed test run
79 | (such as ``.pytest-replay-gw0.txt``, ``.pytest-replay-gw1.txt``),
80 | you can replay them all at once in parallel with ``pytest-xdist`` installed.
81 | This is useful when you want to reproduce the exact
82 | execution environment that occurred during a CI run with multiple workers.
83 |
84 | Simply pass multiple replay files to the ``--replay`` option::
85 |
86 | $ pytest --replay .pytest-replay-gw0.txt .pytest-replay-gw1.txt
87 |
88 | ``pytest-replay`` will automatically:
89 |
90 | * Configure pytest-xdist with the appropriate number of workers (one per replay file)
91 | * Assign each replay file to a dedicated worker using xdist groups
92 | * Execute tests in parallel while maintaining the order within each replay file
93 |
94 | **Note:** Multiple replay files require ``pytest-xdist`` to be installed.
95 | If you try to use multiple files without xdist,
96 | ``pytest-replay`` will show an error message.
97 |
98 | **Important:** When using multiple replay files, you cannot manually specify xdist options like ``-n``, ``--dist``,
99 | ``--numprocesses``, or ``--maxprocesses``, as these are automatically configured based on the number of replay files provided.
100 |
101 |
102 | Additional metadata
103 | -------------------
104 |
105 | *Version added: 1.6*
106 |
107 | In cases where it is necessary to add new metadata to the replay file to make the test reproducible, `pytest-replay`
108 | provides a fixture called ``replay_metadata`` that allows new information to be added using the ``metadata``
109 | attribute.
110 |
111 | Example:
112 |
113 | .. code-block:: python
114 |
115 | import pytest
116 | import numpy as np
117 | import random
118 |
119 | @pytest.fixture
120 | def rng(replay_metadata):
121 | seed = replay_metadata.metadata.setdefault("seed", random.randint(0, 100))
122 | return np.random.default_rng(seed=seed)
123 |
124 | def test_random(rng):
125 | data = rng.standard_normal((100, 100))
126 | assert data.shape == (100, 100)
127 |
128 |
129 | When using it with pytest-replay it generates a replay file similar to
130 |
131 | .. code-block:: json
132 |
133 | {"nodeid": "test_bar.py::test_random", "start": 0.000}
134 | {"nodeid": "test_bar.py::test_random", "start": 0.000, "finish": 1.5, "outcome": "passed", "metadata": {"seed": 12}}
135 |
136 |
137 | FAQ
138 | ~~~
139 |
140 | 1. ``pytest`` has its own `cache `_, why use a different mechanism?
141 |
142 | The internal cache saves its data using ``json``, which is not suitable in the advent of a crash because the file
143 | will not be readable.
144 |
145 | 2. Shouldn't the ability of selecting tests from a file be part of the ``pytest`` core?
146 |
147 | Sure, but let's try to use this a bit as a separate plugin before proposing
148 | its inclusion into the core.
149 |
150 | Installation
151 | ------------
152 |
153 | You can install ``pytest-replay`` via `pip`_ from `PyPI`_::
154 |
155 | $ pip install pytest-replay
156 |
157 | Or with conda::
158 |
159 | $ conda install -c conda-forge pytest-replay
160 |
161 |
162 | Contributing
163 | ------------
164 |
165 | Contributions are very welcome.
166 |
167 | Tests can be run with `tox`_ if you are using a native Python installation.
168 |
169 | To run tests with `conda `_, first create a virtual environment and execute tests from there
170 | (conda with Python 3.5+ in the root environment)::
171 |
172 | $ python -m venv .env
173 | $ .env\scripts\activate
174 | $ pip install -e . pytest-xdist
175 | $ pytest tests
176 |
177 |
178 | Releases
179 | ~~~~~~~~
180 |
181 | Follow these steps to make a new release:
182 |
183 | 1. Create a new branch ``release-X.Y.Z`` from ``master``;
184 | 2. Update ``CHANGELOG.rst``;
185 | 3. Open a PR;
186 | 4. After it is **green** and **approved**, push a new tag in the format ``X.Y.Z``;
187 |
188 | GitHub Actions will deploy to PyPI automatically.
189 |
190 | Afterwards, update the recipe in `conda-forge/pytest-replay-feedstock `_.
191 |
192 |
193 | License
194 | -------
195 |
196 | Distributed under the terms of the `MIT`_ license.
197 |
198 |
199 | Issues
200 | ------
201 |
202 | If you encounter any problems, please `file an issue`_ along with a detailed description.
203 |
204 | .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter
205 | .. _`@hackebrot`: https://github.com/hackebrot
206 | .. _`MIT`: http://opensource.org/licenses/MIT
207 | .. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause
208 | .. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt
209 | .. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0
210 | .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin
211 | .. _`file an issue`: https://github.com/ESSS/pytest-replay/issues
212 | .. _`pytest`: https://github.com/pytest-dev/pytest
213 | .. _`tox`: https://tox.readthedocs.io/en/latest/
214 | .. _`pip`: https://pypi.python.org/pypi/pip/
215 | .. _`PyPI`: https://pypi.python.org/pypi
216 |
--------------------------------------------------------------------------------
/src/pytest_replay/__init__.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import dataclasses
3 | import json
4 | import os
5 | import time
6 | from dataclasses import asdict
7 | from glob import glob
8 | from pathlib import Path
9 | from typing import Any
10 | from typing import Optional
11 |
12 | import pytest
13 |
14 |
15 | def pytest_addoption(parser):
16 | group = parser.getgroup("replay")
17 | group.addoption(
18 | "--replay-record-dir",
19 | action="store",
20 | dest="replay_record_dir",
21 | default=None,
22 | help="Directory to write record files to reproduce runs.",
23 | )
24 | group.addoption(
25 | "--replay",
26 | action="extend",
27 | nargs="*",
28 | type=Path,
29 | dest="replay_files",
30 | default=[],
31 | help="Use a replay file to run the tests from that file only",
32 | )
33 | group.addoption(
34 | "--replay-base-name",
35 | action="store",
36 | dest="base_name",
37 | default=".pytest-replay",
38 | help="Base name for the output file.",
39 | )
40 | group.addoption(
41 | "--replay-skip-cleanup",
42 | action="store_true",
43 | dest="skip_cleanup",
44 | default=False,
45 | help="Skips cleanup scripts before running (does not remove previously "
46 | "generated replay files).",
47 | )
48 |
49 |
50 | @dataclasses.dataclass
51 | class ReplayTestInfo:
52 | nodeid: str
53 | start: float = 0.0
54 | finish: Optional[float] = None
55 | outcome: Optional[str] = None
56 | metadata: dict[str, Any] = dataclasses.field(default_factory=dict)
57 | xdist_group: Optional[str] = None
58 |
59 | def to_clean_dict(self) -> dict[str, Any]:
60 | return {k: v for k, v in asdict(self).items() if v}
61 |
62 |
63 | class _ReplayTestInfoDefaultDict(collections.defaultdict):
64 | def __missing__(self, key):
65 | self[key] = ReplayTestInfo(nodeid=key)
66 | return self[key]
67 |
68 |
69 | class ReplayPlugin:
70 | def __init__(self, config):
71 | self.dir = config.getoption("replay_record_dir")
72 | self.base_script_name = config.getoption("base_name")
73 | if self.dir:
74 | self.dir = os.path.abspath(self.dir)
75 | nprocs = config.getoption("numprocesses", 0)
76 | self.running_xdist = nprocs is not None and nprocs > 1
77 | self.xdist_worker_name = os.environ.get("PYTEST_XDIST_WORKER", "")
78 | self.ext = ".txt"
79 | self.written_nodeids = set()
80 | skip_cleanup = config.getoption("skip_cleanup", False)
81 | if not skip_cleanup:
82 | self.cleanup_scripts()
83 | self.nodes = _ReplayTestInfoDefaultDict()
84 | self.session_start_time = config.replay_start_time
85 |
86 | @pytest.fixture(scope="function")
87 | def replay_metadata(self, request):
88 | return self.nodes[request.node.nodeid]
89 |
90 | def cleanup_scripts(self):
91 | if self.xdist_worker_name:
92 | # only cleanup scripts on the master node
93 | return
94 | if self.dir:
95 | if os.path.isdir(self.dir):
96 | if self.running_xdist:
97 | mask = os.path.join(
98 | self.dir, self.base_script_name + "-*" + self.ext
99 | )
100 | else:
101 | mask = os.path.join(self.dir, self.base_script_name + self.ext)
102 | for fn in glob(mask):
103 | os.remove(fn)
104 | else:
105 | os.makedirs(self.dir)
106 |
107 | def pytest_runtest_logstart(self, nodeid):
108 | if self.running_xdist and not self.xdist_worker_name:
109 | # only workers report running tests when running in xdist
110 | return
111 | if self.dir:
112 | self.nodes[nodeid].start = time.perf_counter() - self.session_start_time
113 | json_content = json.dumps(self.nodes[nodeid].to_clean_dict())
114 | self.append_test_to_script(nodeid, json_content)
115 |
116 | @pytest.hookimpl(hookwrapper=True)
117 | def pytest_runtest_makereport(self, item):
118 | report = yield
119 | result = report.get_result()
120 | if self.dir:
121 | self.nodes[item.nodeid].outcome = (
122 | self.nodes[item.nodeid].outcome or result.outcome
123 | )
124 | current = self.nodes[item.nodeid].outcome
125 | if not result.passed and current != "failed":
126 | # do not overwrite a failed outcome with a skipped one
127 | self.nodes[item.nodeid].outcome = result.outcome
128 |
129 | if result.when == "teardown":
130 | self.nodes[item.nodeid].finish = (
131 | time.perf_counter() - self.session_start_time
132 | )
133 | json_content = json.dumps(self.nodes[item.nodeid].to_clean_dict())
134 | self.append_test_to_script(item.nodeid, json_content)
135 |
136 | def pytest_collection_modifyitems(self, items, config):
137 | replay_files = config.getoption("replay_files")
138 | if not replay_files:
139 | return
140 |
141 | enable_xdist = len(replay_files) > 1
142 |
143 | # Use a dict to deduplicate the node ids while keeping the order.
144 | nodeids = {}
145 | for num, single_rep in enumerate(replay_files):
146 | with open(single_rep, encoding="UTF-8") as f:
147 | for line in f.readlines():
148 | stripped = line.strip()
149 | # Ignore blank lines and comments. (#70)
150 | if stripped and not stripped.startswith(("#", "//")):
151 | node_info = json.loads(stripped)
152 | nodeid = node_info["nodeid"]
153 | if enable_xdist:
154 | node_info["xdist_group"] = f"replay-gw{num}"
155 | if "finish" in node_info:
156 | self.nodes[nodeid] = ReplayTestInfo(**node_info)
157 | nodeids[nodeid] = None
158 |
159 | items_dict = {item.nodeid: item for item in items}
160 | remaining = []
161 | # Make sure to respect the order from the JSON file (#52).
162 | for nodeid in nodeids:
163 | item = items_dict.pop(nodeid)
164 | if item:
165 | if xdist_group := self.nodes[nodeid].xdist_group:
166 | item.add_marker(pytest.mark.xdist_group(name=xdist_group))
167 | remaining.append(item)
168 | deselected = list(items_dict.values())
169 |
170 | if deselected:
171 | config.hook.pytest_deselected(items=deselected)
172 |
173 | items[:] = remaining
174 |
175 | def append_test_to_script(self, nodeid, line):
176 | suffix = "-" + self.xdist_worker_name if self.xdist_worker_name else ""
177 | fn = os.path.join(self.dir, self.base_script_name + suffix + self.ext)
178 | with open(fn, "a", encoding="UTF-8") as f:
179 | f.write(line + "\n")
180 | f.flush()
181 | self.written_nodeids.add(nodeid)
182 |
183 |
184 | class DeferPlugin:
185 | def pytest_configure_node(self, node):
186 | node.workerinput["replay_start_time"] = node.config.replay_start_time
187 |
188 |
189 | @pytest.hookimpl(tryfirst=True)
190 | def pytest_load_initial_conftests(early_config, parser, args):
191 | # Check both plugin names: "xdist" (normal install) and "xdist.plugin" (frozen executables with -p flag)
192 | is_xdist_enabled = early_config.pluginmanager.has_plugin(
193 | "xdist"
194 | ) or early_config.pluginmanager.has_plugin("xdist.plugin")
195 | replay_files = parser.parse_known_args(args).replay_files
196 |
197 | if len(replay_files) > 1 and not is_xdist_enabled:
198 | raise pytest.UsageError(
199 | "Cannot use --replay with multiple files without pytest-xdist installed."
200 | )
201 | if len(replay_files) > 1:
202 | if any(
203 | map(
204 | lambda x: any(
205 | x == arg or x.startswith(f"{arg}=")
206 | for arg in ("-n", "--dist", "--numprocesses", "--maxprocesses")
207 | ),
208 | args,
209 | )
210 | ):
211 | raise pytest.UsageError(
212 | "Cannot use --replay with --numprocesses or --dist or --maxprocesses."
213 | )
214 | args.extend(["-n", str(len(replay_files)), "--dist", "loadgroup"])
215 |
216 |
217 | def pytest_configure(config):
218 | if config.getoption("replay_record_dir") or config.getoption("replay_files"):
219 | if hasattr(config, "workerinput"):
220 | config.replay_start_time = config.workerinput["replay_start_time"]
221 | else:
222 | config.replay_start_time = time.perf_counter()
223 | # check for xdist and xdist.plugin: the former is the name of the plugin in normal
224 | # circumstances, the latter happens when xdist is loaded explicitly using '-p' in
225 | # a frozen executable
226 | if config.pluginmanager.has_plugin("xdist") or config.pluginmanager.has_plugin(
227 | "xdist.plugin"
228 | ):
229 | config.pluginmanager.register(DeferPlugin())
230 | config.pluginmanager.register(ReplayPlugin(config), "replay-writer")
231 |
232 |
233 | def pytest_report_header(config):
234 | if config.getoption("replay_record_dir"):
235 | return "replay dir: {}".format(config.getoption("replay_record_dir"))
236 |
--------------------------------------------------------------------------------
/tests/test_replay.py:
--------------------------------------------------------------------------------
1 | import itertools as it
2 | import json
3 | import re
4 | from pathlib import Path
5 |
6 | import pytest
7 |
8 |
9 | @pytest.mark.parametrize(
10 | "extra_option", [(None, ".pytest-replay"), ("--replay-base-name", "NEW-BASE-NAME")]
11 | )
12 | def test_normal_execution(suite, testdir, extra_option, monkeypatch):
13 | """Ensure scripts are created and the tests are executed when using --replay."""
14 |
15 | class MockTime:
16 | fake_time = 0.0
17 |
18 | @classmethod
19 | def perf_counter(cls):
20 | cls.fake_time += 1.0
21 | return cls.fake_time
22 |
23 | monkeypatch.setattr("pytest_replay.time", MockTime)
24 |
25 | extra_arg, base_name = extra_option
26 | dir = testdir.tmpdir / "replay"
27 | options = ["test_1.py", f"--replay-record-dir={dir}"]
28 |
29 | if extra_arg:
30 | options.append(f"{extra_arg}={base_name}")
31 |
32 | result = testdir.runpytest(*options)
33 |
34 | result.stdout.fnmatch_lines(f"*replay dir: {dir}")
35 |
36 | replay_file = dir / f"{base_name}.txt"
37 | contents = replay_file.readlines(True)
38 | contents = [json.loads(line.strip()) for line in contents]
39 | assert len(contents) == 4
40 | assert contents[0] == {"nodeid": "test_1.py::test_foo", "start": 1.0}
41 | assert contents[1] == {
42 | "nodeid": "test_1.py::test_foo",
43 | "start": 1.0,
44 | "finish": 2.0,
45 | "outcome": "passed",
46 | }
47 | assert contents[2] == {"nodeid": "test_1.py::test_bar", "start": 3.0}
48 | assert contents[3] == {
49 | "nodeid": "test_1.py::test_bar",
50 | "start": 3.0,
51 | "finish": 4.0,
52 | "outcome": "passed",
53 | }
54 | assert result.ret == 0
55 | result = testdir.runpytest(f"--replay={replay_file}")
56 | assert result.ret == 0
57 | result.stdout.fnmatch_lines(["test_1.py*100%*", "*= 2 passed, 2 deselected in *="])
58 |
59 |
60 | @pytest.mark.parametrize("comment_format", ["#", "//"])
61 | @pytest.mark.parametrize("name_to_comment, deselected", [("foo", 2), ("zz", 1)])
62 | def test_line_comments(suite, testdir, comment_format, name_to_comment, deselected):
63 | """Check line comments"""
64 |
65 | replay_dir = testdir.tmpdir / "replay"
66 | result = testdir.runpytest(f"--replay-record-dir={replay_dir}")
67 | replay_file = replay_dir / ".pytest-replay.txt"
68 |
69 | contents = replay_file.readlines(True)
70 | contents = [line.strip() for line in contents]
71 | contents = [
72 | (comment_format + line) if name_to_comment in line else line
73 | for line in contents
74 | ]
75 | replay_file_commented = replay_dir / ".pytest-replay_commneted.txt"
76 | replay_file_commented.write_text("\n".join(contents), encoding="utf-8")
77 |
78 | result = testdir.runpytest(f"--replay={replay_file_commented}")
79 | assert result.ret == 0
80 | passed = 4 - deselected
81 | result.stdout.fnmatch_lines([f"*= {passed} passed, {deselected} deselected in *="])
82 |
83 |
84 | @pytest.mark.parametrize("do_crash", [True, False])
85 | def test_crash(testdir, do_crash):
86 | testdir.makepyfile(
87 | test_crash="""
88 | import os
89 | def test_crash():
90 | if {do_crash}:
91 | os._exit(1)
92 | def test_normal():
93 | pass
94 | """.format(
95 | do_crash=do_crash
96 | )
97 | )
98 | dir = testdir.tmpdir / "replay"
99 | result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
100 |
101 | contents = (dir / ".pytest-replay.txt").read()
102 | test_id = "test_crash.py::test_normal"
103 | if do_crash:
104 | assert test_id not in contents
105 | assert result.ret != 0
106 | else:
107 | assert test_id in contents
108 | assert result.ret == 0
109 |
110 |
111 | def test_xdist(testdir):
112 | testdir.makepyfile(
113 | """
114 | import pytest
115 | @pytest.mark.parametrize('i', range(10))
116 | def test(i):
117 | pass
118 | """
119 | )
120 | dir = testdir.tmpdir / "replay"
121 | procs = 2
122 | testdir.runpytest_subprocess("-n", str(procs), f"--replay-record-dir={dir}")
123 |
124 | files = dir.listdir()
125 | assert len(files) == procs
126 | test_ids = set()
127 | for f in files:
128 | test_ids.update({json.loads(x.strip())["nodeid"] for x in f.readlines()})
129 | expected_ids = {f"test_xdist.py::test[{x}]" for x in range(10)}
130 | assert test_ids == expected_ids
131 |
132 |
133 | @pytest.mark.parametrize("reverse", [True, False])
134 | def test_alternate_serial_parallel_does_not_erase_runs(suite, testdir, reverse):
135 | """xdist and normal runs should not erase each other's files."""
136 | command_lines = [
137 | ("-n", "2", "--replay-record-dir=replay"),
138 | ("--replay-record-dir=replay",),
139 | ]
140 | if reverse:
141 | command_lines.reverse()
142 | for command_line in command_lines:
143 | result = testdir.runpytest_subprocess(*command_line)
144 | assert result.ret == 0
145 | assert {x.basename for x in (testdir.tmpdir / "replay").listdir()} == {
146 | ".pytest-replay.txt",
147 | ".pytest-replay-gw0.txt",
148 | ".pytest-replay-gw1.txt",
149 | }
150 |
151 |
152 | def test_skip_cleanup_does_not_erase_replay_files(suite, testdir):
153 | """--replay-skip-cleanup will not erase replay files, appending data on next run."""
154 | command_lines = [
155 | ("-n", "2", "--replay-record-dir=replay"),
156 | ("-n", "2", "--replay-record-dir=replay", "--replay-skip-cleanup"),
157 | ]
158 |
159 | expected_node_ids = [
160 | "test_1.py::test_foo",
161 | "test_1.py::test_foo",
162 | "test_1.py::test_bar",
163 | "test_1.py::test_bar",
164 | ]
165 |
166 | dir = testdir.tmpdir / "replay"
167 | expected = expected_node_ids[:]
168 | for command_line in command_lines:
169 | result = testdir.runpytest_subprocess(*command_line)
170 | assert result.ret == 0
171 | assert {x.basename for x in dir.listdir()} == {
172 | ".pytest-replay-gw0.txt",
173 | ".pytest-replay-gw1.txt",
174 | }
175 |
176 | replay_file = dir / ".pytest-replay-gw0.txt"
177 | contents = [json.loads(line)["nodeid"] for line in replay_file.readlines()]
178 | assert contents == expected
179 | # Next run will expect same tests appended again.
180 | expected.extend(expected_node_ids)
181 |
182 |
183 | def test_cwd_changed(testdir):
184 | """Ensure that the plugin works even if some tests changes cwd."""
185 | testdir.tmpdir.join("subdir").ensure(dir=1)
186 | testdir.makepyfile(
187 | """
188 | import os
189 | def test_1():
190 | os.chdir('subdir')
191 | def test_2():
192 | pass
193 | """
194 | )
195 | dir = testdir.tmpdir / "replay"
196 | result = testdir.runpytest_subprocess("--replay-record-dir={}".format("replay"))
197 | replay_file = dir / ".pytest-replay.txt"
198 | contents = {json.loads(line)["nodeid"] for line in replay_file.readlines()}
199 | expected = {"test_cwd_changed.py::test_1", "test_cwd_changed.py::test_2"}
200 | assert contents == expected
201 | assert result.ret == 0
202 |
203 |
204 | @pytest.mark.usefixtures("suite")
205 | def test_execution_different_order(testdir):
206 | """Ensure tests execute in the order defined by the JSON file, not collection (#52)."""
207 | dir = testdir.tmpdir / "replay"
208 | options = [f"--replay-record-dir={dir}"]
209 | result = testdir.runpytest(*options)
210 |
211 | replay_file = dir / ".pytest-replay.txt"
212 |
213 | with replay_file.open("r+") as f:
214 | content = f.readlines()
215 |
216 | # pairwise shuffle of replay file
217 | pairs = [(content[i], content[i + 1]) for i in range(0, len(content), 2)]
218 | pairs = [pairs[2], pairs[0], pairs[3], pairs[1]]
219 | content = list(it.chain.from_iterable(pairs))
220 |
221 | f.seek(0)
222 | f.writelines(content)
223 |
224 | result = testdir.runpytest(f"--replay={replay_file}", "-v")
225 | assert result.ret == 0
226 | result.stdout.fnmatch_lines(
227 | [
228 | "test_2.py::test_zz*25%*",
229 | "test_1.py::test_foo*50%*",
230 | "test_3.py::test_foobar*75%*",
231 | "test_1.py::test_bar*100%*",
232 | ],
233 | consecutive=True,
234 | )
235 |
236 |
237 | @pytest.mark.usefixtures("suite")
238 | def test_filter_out_tests_not_in_file(testdir):
239 | """Tests not found in the JSON file should not run."""
240 | dir = testdir.tmpdir / "replay"
241 | options = [f"--replay-record-dir={dir}", "-k", "foo"]
242 | result = testdir.runpytest(*options)
243 |
244 | replay_file = dir / ".pytest-replay.txt"
245 |
246 | result = testdir.runpytest(f"--replay={replay_file}", "-v")
247 | assert result.ret == 0
248 | result.stdout.fnmatch_lines(
249 | [
250 | "test_1.py::test_foo*50%*",
251 | "test_3.py::test_foobar*100%*",
252 | ],
253 | consecutive=True,
254 | )
255 |
256 |
257 | def test_metadata(pytester, tmp_path):
258 | pytester.makepyfile(
259 | """
260 | import pytest
261 |
262 | @pytest.fixture
263 | def seed(replay_metadata):
264 | assert replay_metadata.metadata == {}
265 | replay_metadata.metadata["seed"] = seed = 1234
266 | return seed
267 |
268 | def test_foo(seed):
269 | assert seed == 1234
270 | """
271 | )
272 | dir = tmp_path / "replay"
273 | result = pytester.runpytest(f"--replay-record-dir={dir}")
274 | assert result.ret == 0
275 |
276 | # Rewrite the fixture to always returns the metadata, as written previously.
277 | pytester.makepyfile(
278 | """
279 | import pytest
280 |
281 | @pytest.fixture
282 | def seed(replay_metadata):
283 | return replay_metadata.metadata["seed"]
284 |
285 | def test_foo(seed):
286 | assert seed == 1234
287 | """
288 | )
289 | result = pytester.runpytest(f"--replay={dir / '.pytest-replay.txt'}")
290 | assert result.ret == 0
291 |
292 |
293 | def test_replay_file_outcome_is_correct(testdir):
294 | """Tests that the outcomes in the replay file are correct."""
295 | testdir.makepyfile(
296 | test_module="""
297 | import pytest
298 |
299 | def test_success():
300 | pass
301 |
302 | def test_failure():
303 | assert False
304 |
305 | @pytest.fixture
306 | def failing_teardown_fixture():
307 | yield
308 | assert False
309 |
310 | def test_failure_fixture_teardown(failing_teardown_fixture):
311 | assert True
312 |
313 | @pytest.fixture
314 | def failing_setup_fixture():
315 | assert False
316 |
317 | def test_failure_fixture_setup(failing_setup_fixture):
318 | assert True
319 | """
320 | )
321 | dir = testdir.tmpdir / "replay"
322 | result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
323 | assert result.ret != 0
324 |
325 | contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()]
326 | outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r}
327 | assert outcomes == {
328 | "test_module.py::test_success": "passed",
329 | "test_module.py::test_failure": "failed",
330 | "test_module.py::test_failure_fixture_teardown": "failed",
331 | "test_module.py::test_failure_fixture_setup": "failed",
332 | }
333 |
334 |
335 | def test_replay_file_outcome_is_correct_xdist(testdir):
336 | """Tests that the outcomes in the replay file are correct when running in parallel."""
337 | testdir.makepyfile(
338 | test_module="""
339 | import pytest
340 |
341 | @pytest.mark.parametrize('i', range(10))
342 | def test_val(i):
343 | assert i < 5
344 | """
345 | )
346 | dir = testdir.tmpdir / "replay"
347 | procs = 2
348 | result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}", f"-n {procs}")
349 | assert result.ret != 0
350 |
351 | contents = [
352 | s
353 | for n in range(procs)
354 | for s in (dir / f".pytest-replay-gw{n}.txt").read().splitlines()
355 | ]
356 | pattern = re.compile(r"test_val\[(\d+)\]")
357 | for content in contents:
358 | parsed = json.loads(content)
359 | if "outcome" not in parsed:
360 | continue
361 |
362 | i = int(pattern.search(parsed["nodeid"]).group(1))
363 | if i < 5:
364 | assert parsed["outcome"] == "passed", i
365 | else:
366 | assert parsed["outcome"] == "failed", i
367 |
368 |
369 | def test_outcomes_in_replay_file(testdir):
370 | """Tests that checks how the outcomes are handled in the report hook when the various
371 | phases yield failure or skipped."""
372 | testdir.makepyfile(
373 | test_module="""
374 | import pytest
375 |
376 | @pytest.fixture()
377 | def skip_setup():
378 | pytest.skip("skipping")
379 | yield
380 |
381 | @pytest.fixture()
382 | def skip_teardown():
383 | yield
384 | pytest.skip("skipping")
385 |
386 | @pytest.fixture()
387 | def fail_setup():
388 | assert False
389 |
390 | @pytest.fixture()
391 | def fail_teardown():
392 | yield
393 | assert False
394 |
395 | def test_skip_fail(skip_setup, fail_teardown):
396 | pass
397 |
398 | def test_fail_skip(fail_setup, skip_teardown):
399 | pass
400 |
401 | def test_skip_setup(skip_setup):
402 | pass
403 |
404 | def test_skip_teardown(skip_teardown):
405 | pass
406 |
407 | def test_test_fail_skip_teardown(skip_teardown):
408 | assert False
409 | """
410 | )
411 | dir = testdir.tmpdir / "replay"
412 | testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
413 |
414 | contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()]
415 | outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r}
416 | assert outcomes == {
417 | "test_module.py::test_skip_fail": "skipped",
418 | "test_module.py::test_fail_skip": "failed",
419 | "test_module.py::test_skip_setup": "skipped",
420 | "test_module.py::test_skip_teardown": "skipped",
421 | "test_module.py::test_test_fail_skip_teardown": "failed",
422 | }
423 |
424 |
425 | @pytest.mark.usefixtures("suite")
426 | def test_empty_or_blank_lines(testdir):
427 | """Empty or blank line in replay files should be ignored."""
428 | dir = testdir.tmpdir / "replay"
429 | options = [f"--replay-record-dir={dir}"]
430 | result = testdir.runpytest(*options)
431 | replay_file: Path = dir / ".pytest-replay.txt"
432 |
433 | with replay_file.open("r+") as f:
434 | content = f.readlines()
435 |
436 | # Add empty line
437 | content.insert(1, "\n")
438 | # Add blank line
439 | content.insert(1, " \n")
440 | # Add empty line
441 | content.append("\n")
442 | # Add mixed blank line
443 | content.append("\t \n")
444 | f.seek(0)
445 | f.writelines(content)
446 |
447 | result = testdir.runpytest(f"--replay={replay_file}", "-v")
448 | assert result.ret == 0
449 |
450 |
451 | def test_custom_command_line_options(testdir):
452 | """Custom command-line options from other plugins should not break pytest-replay (#105)."""
453 | testdir.makeconftest(
454 | """
455 | def pytest_addoption(parser):
456 | parser.addoption(
457 | '--custom-option',
458 | action='store_true',
459 | default=False,
460 | help='A custom command-line option'
461 | )
462 | """
463 | )
464 | testdir.makepyfile(
465 | """
466 | def test_with_custom_option(request):
467 | assert request.config.getoption('custom_option') is True
468 | """
469 | )
470 | record_dir = testdir.tmpdir / "replay"
471 | result = testdir.runpytest(f"--replay-record-dir={record_dir}", "--custom-option")
472 | assert result.ret == 0
473 |
474 | replay_file = record_dir / ".pytest-replay.txt"
475 | result = testdir.runpytest(f"--replay={replay_file}", "--custom-option")
476 | assert result.ret == 0
477 |
--------------------------------------------------------------------------------