├── src └── pytest_subtests │ ├── py.typed │ ├── __init__.py │ └── plugin.py ├── tests ├── conftest.py └── test_subtests.py ├── pytest.ini ├── .github ├── dependabot.yml └── workflows │ ├── deploy.yml │ └── test.yml ├── tox.ini ├── pyproject.toml ├── .pre-commit-config.yaml ├── LICENSE ├── RELEASING.rst ├── setup.cfg ├── .gitignore ├── CHANGELOG.rst └── README.rst /src/pytest_subtests/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | pytest_plugins = "pytester" 2 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = -ra 3 | testpaths = tests 4 | -------------------------------------------------------------------------------- /src/pytest_subtests/__init__.py: -------------------------------------------------------------------------------- 1 | from .plugin import SubTests 2 | 3 | __all__ = ["SubTests"] 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | time: "03:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py39,py310,py311,py312,py313,pytest7 3 | 4 | [testenv] 5 | deps = 6 | pytest-xdist>=3.3.0 7 | pytest7: pytest ~=7.4 8 | 9 | commands = 10 | pytest {posargs:tests} 11 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools", 4 | "setuptools-scm[toml]>=6.2.3", 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | 8 | [tool.mypy] 9 | disallow_untyped_defs = true 10 | warn_unreachable = true 11 | warn_unused_configs = true 12 | warn_unused_ignores = true 13 | 14 | [tool.setuptools_scm] 15 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black-pre-commit-mirror 3 | rev: 25.9.0 4 | hooks: 5 | - id: black 6 | args: [--safe, --quiet] 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v6.0.0 9 | hooks: 10 | - id: trailing-whitespace 11 | - id: end-of-file-fixer 12 | - id: check-yaml 13 | - id: debug-statements 14 | - repo: https://github.com/asottile/reorder-python-imports 15 | rev: v3.16.0 16 | hooks: 17 | - id: reorder-python-imports 18 | - repo: https://github.com/pre-commit/mirrors-mypy 19 | rev: v1.18.2 20 | hooks: 21 | - id: mypy 22 | files: ^(src|tests) 23 | args: [] 24 | additional_dependencies: [attrs>=19.2.0, pytest>=7, typing-extensions] 25 | - repo: local 26 | hooks: 27 | - id: rst 28 | name: rst 29 | entry: rst-lint --encoding utf-8 30 | files: ^(CHANGELOG.rst|RELEASING.rst|README.rst)$ 31 | language: python 32 | additional_dependencies: [pygments, restructuredtext_lint] 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Bruno Oliveira 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /RELEASING.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Releasing pytest-subtests 3 | ========================= 4 | 5 | This document describes the steps to make a new ``pytest-subtests`` release. 6 | 7 | Version 8 | ------- 9 | 10 | ``master`` should always be green and a potential release candidate. ``pytest-subtests`` follows 11 | semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number 12 | one needs to look at the ``CHANGELOG.rst`` file: 13 | 14 | - If there any new feature, then we must make a new **minor** release: next 15 | release will be ``X.Y+1.0``. 16 | 17 | - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. 18 | 19 | 20 | Steps 21 | ----- 22 | 23 | To publish a new release ``X.Y.Z``, the steps are as follows: 24 | 25 | #. Create a new branch named ``release-X.Y.Z`` from the latest ``main``. 26 | 27 | #. Update the ``CHANGELOG.rst`` file with the new release information. 28 | 29 | #. Commit and push the branch to ``upstream`` and open a PR. 30 | 31 | #. Once the PR is **green** and **approved**, start the ``deploy`` workflow: 32 | 33 | .. code-block:: console 34 | 35 | gh workflow run deploy.yml -R pytest-dev/pytest-subtests --ref release-VERSION --field version=VERSION 36 | 37 | The PR will be automatically merged. 38 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version: 7 | description: 'Release version' 8 | required: true 9 | default: '1.2.3' 10 | 11 | jobs: 12 | 13 | package: 14 | runs-on: ubuntu-latest 15 | env: 16 | SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }} 17 | 18 | steps: 19 | - uses: actions/checkout@v5 20 | 21 | - name: Build and Check Package 22 | uses: hynek/build-and-inspect-python-package@v2.14 23 | 24 | deploy: 25 | needs: package 26 | runs-on: ubuntu-latest 27 | environment: deploy 28 | permissions: 29 | id-token: write # For PyPI trusted publishers. 30 | contents: write # For tag. 31 | 32 | steps: 33 | - uses: actions/checkout@v5 34 | 35 | - name: Download Package 36 | uses: actions/download-artifact@v6 37 | with: 38 | name: Packages 39 | path: dist 40 | 41 | - name: Publish package to PyPI 42 | uses: pypa/gh-action-pypi-publish@v1.13.0 43 | with: 44 | attestations: true 45 | 46 | - name: GitHub Release 47 | env: 48 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 49 | run: | 50 | gh release create v${{ github.event.inputs.version }} --target=${{ github.ref_name }} --title v${{ github.event.inputs.version }} 51 | gh pr merge ${{ github.ref_name }} --merge 52 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = pytest-subtests 3 | description = unittest subTest() support and subtests fixture 4 | long_description = file: README.rst 5 | long_description_content_type = text/x-rst 6 | url = https://github.com/pytest-dev/pytest-subtests 7 | author = Bruno Oliveira 8 | license = MIT 9 | license_file = LICENSE 10 | classifiers = 11 | Development Status :: 4 - Beta 12 | Framework :: Pytest 13 | Intended Audience :: Developers 14 | Topic :: Software Development :: Testing 15 | Programming Language :: Python 16 | Programming Language :: Python :: 3 17 | Programming Language :: Python :: 3.9 18 | Programming Language :: Python :: 3.10 19 | Programming Language :: Python :: 3.11 20 | Programming Language :: Python :: 3.12 21 | Programming Language :: Python :: 3.13 22 | Programming Language :: Python :: Implementation :: CPython 23 | Operating System :: OS Independent 24 | License :: OSI Approved :: MIT License 25 | Typing :: Typed 26 | keywords = test, unittest, pytest 27 | 28 | [options] 29 | install_requires = 30 | attrs>=19.2.0 31 | pytest>=7.4 32 | python_requires = >=3.9 33 | packages = find: 34 | package_dir = 35 | = src 36 | setup_requires = 37 | setuptools 38 | setuptools-scm>=6.0 39 | 40 | [options.packages.find] 41 | where = src 42 | 43 | [options.entry_points] 44 | pytest11 = 45 | subtests = pytest_subtests.plugin 46 | 47 | [options.package_data] 48 | pytest_subtests = py.typed 49 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - "test-me-*" 8 | 9 | pull_request: 10 | branches: 11 | - "*" 12 | 13 | 14 | # Cancel running jobs for the same workflow and branch. 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | 21 | package: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v5 25 | - name: Build and Check Package 26 | uses: hynek/build-and-inspect-python-package@v2.14 27 | 28 | test: 29 | needs: [package] 30 | runs-on: ${{ matrix.os }} 31 | strategy: 32 | fail-fast: false 33 | matrix: 34 | os: ["ubuntu-latest", "windows-latest"] 35 | python: ["3.9", "3.10", "3.11", "3.12", "3.13"] 36 | tox_env: ["py"] 37 | include: 38 | - os: "ubuntu-latest" 39 | python: "3.9" 40 | tox_env: "pytest7-py" 41 | 42 | steps: 43 | - uses: actions/checkout@v5 44 | 45 | - name: Download Package 46 | uses: actions/download-artifact@v6 47 | with: 48 | name: Packages 49 | path: dist 50 | 51 | - name: Set up Python 52 | uses: actions/setup-python@v6 53 | with: 54 | python-version: ${{ matrix.python }} 55 | 56 | - name: Install tox 57 | run: | 58 | python -m pip install --upgrade pip 59 | python -m pip install --upgrade tox 60 | 61 | - name: Test 62 | shell: bash 63 | run: | 64 | tox run -e ${{ matrix.tox_env }} --installpkg `find dist/*.tar.gz` 65 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # PyCharm. 107 | .idea/ 108 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | CHANGELOG 2 | ========= 3 | 4 | 0.15.0 5 | ------ 6 | 7 | *2025-10-20* 8 | 9 | * Added experimental ``--no-subtests-reports`` CLI option. This disables 10 | subtests output unless it's a failed subtest. (`#198`_) 11 | 12 | .. _#198: https://github.com/pytest-dev/pytest-subtests/pull/198 13 | 14 | 0.14.2 15 | ------ 16 | 17 | *2025-06-13* 18 | 19 | * Print output "dots" for successful unittest subtests (`#164`_). 20 | * Improved reporting in case subtests raise `pytest.xfail` (`#194`_). 21 | 22 | .. _#164: https://github.com/pytest-dev/pytest-subtests/issues/164 23 | .. _#194: https://github.com/pytest-dev/pytest-subtests/pull/194 24 | 25 | 0.14.1 26 | ------ 27 | 28 | *2024-12-09* 29 | 30 | * Fix ``self.instance._outcome`` is ``None`` case in #173 (`#174`_). 31 | 32 | .. _#174: https://github.com/pytest-dev/pytest-subtests/pull/174 33 | 34 | 0.14.0 35 | ------ 36 | 37 | *2024-12-07* 38 | 39 | * Add support for Python 3.13. 40 | 41 | * Dropped support for EOL Python 3.8. 42 | 43 | * Fixed output when using ``TestCase.skipTest`` (`#169`_). 44 | 45 | * Fixed ``pytest`` requirement to ``>=7.3`` (`#159`_). 46 | 47 | .. _#159: https://github.com/pytest-dev/pytest-subtests/issues/159 48 | .. _#169: https://github.com/pytest-dev/pytest-subtests/pull/169 49 | 50 | 0.13.1 51 | ------ 52 | 53 | *2024-07-16* 54 | 55 | * Fixed bug were an extra test would execute when ``-x/--exitfirst`` was used (`#139`_). 56 | 57 | .. _#139: https://github.com/pytest-dev/pytest-subtests/pull/139 58 | 59 | 0.13.0 60 | ------ 61 | 62 | *2024-07-07* 63 | 64 | * Dropped support for EOL Python 3.7. 65 | * Added support for ``-x/--exitfirst`` (`#134`_). 66 | * Hide the traceback inside the ``SubTests.test()`` method (`#131`_). 67 | 68 | .. _#131: https://github.com/pytest-dev/pytest-subtests/pull/131 69 | .. _#134: https://github.com/pytest-dev/pytest-subtests/pull/134 70 | 71 | 0.12.1 72 | ------ 73 | 74 | *2024-03-07* 75 | 76 | * Fixed compatibility with upcoming pytest ``8.1.x``. (`#125`_). 77 | 78 | .. _#125: https://github.com/pytest-dev/pytest-subtests/issues/125 79 | 80 | 0.12.0 81 | ------ 82 | 83 | *2024-03-06* 84 | 85 | * Python 3.12 is now officially supported (`#113`_). 86 | * Added typing support (`#115`_). 87 | * ``SubTests`` can be imported from ``pytest_subtests`` to type-annotate the ``subtests`` fixture. 88 | 89 | .. _#113: https://github.com/pytest-dev/pytest-subtests/pull/113 90 | .. _#115: https://github.com/pytest-dev/pytest-subtests/pull/115 91 | 92 | 93 | 0.11.0 94 | ------ 95 | 96 | *2023-05-15* 97 | 98 | * Logging is displayed for failing subtests (`#92`_) 99 | * Passing subtests no longer turn the pytest output to yellow (as if warnings have been issued) (`#86`_). Thanks to `Andrew-Brock`_ for providing the solution. 100 | * Now the ``msg`` contents of a subtest is displayed when running pytest with ``-v`` (`#6`_). 101 | 102 | .. _#6: https://github.com/pytest-dev/pytest-subtests/issues/6 103 | .. _#86: https://github.com/pytest-dev/pytest-subtests/issues/86 104 | .. _#92: https://github.com/pytest-dev/pytest-subtests/issues/87 105 | 106 | .. _`Andrew-Brock`: https://github.com/Andrew-Brock 107 | 108 | 0.10.0 109 | ------ 110 | 111 | *2022-02-15* 112 | 113 | * Added experimental support for suppressing subtest output dots in non-verbose mode with ``--no-subtests-shortletter`` -- this allows the native pytest column calculations to not be disrupted and minimizes unneeded output for large CI systems. 114 | 115 | 0.9.0 116 | ----- 117 | 118 | *2022-10-28* 119 | 120 | * Python 3.11 is officially supported. 121 | * Dropped support for Python 3.6. 122 | 123 | 0.8.0 124 | ----- 125 | 126 | *2022-05-26* 127 | 128 | * Now passing subtests are shown in the test run summary at the end (for example: ``10 failed, 1 passed, 10 subtests passed in 0.10s``) (`#70`_). 129 | 130 | .. _#70: https://github.com/pytest-dev/pytest-subtests/pull/70 131 | 132 | 0.7.0 133 | ----- 134 | 135 | *2022-02-13* 136 | 137 | * Fixed support for pytest 7.0, and ``pytest>=7.0`` is now required. 138 | 139 | 140 | 0.6.0 141 | ----- 142 | 143 | *2022-01-15* 144 | 145 | * ``pytest>=6.0`` is now required. 146 | * Added official support for Python 3.10. 147 | * Dropped support for Python 3.5. 148 | * Users no longer need to configure a warnings filter for the internal ``A private pytest class or function was used`` pytest warning (`#52`_). 149 | * **Experimental**: Use ``SUBPASS`` and ``,`` for passed subtests instead of general ``PASSED``, 150 | ``SUBFAIL`` and ``u`` for failed ones instead of ``FAILED`` (`#30`_). 151 | 152 | .. _#30: https://github.com/pytest-dev/pytest-subtests/pull/30 153 | .. _#52: https://github.com/pytest-dev/pytest-subtests/pull/52 154 | 155 | 0.5.0 156 | ----- 157 | 158 | *2021-05-29* 159 | 160 | * Added support for ``pytest.mark.xfail`` (`#40`_). 161 | 162 | .. _#40: https://github.com/pytest-dev/pytest-subtests/pull/40 163 | 164 | 0.4.0 165 | ----- 166 | 167 | *2020-12-13* 168 | 169 | * Added support for ``--pdb`` (`#22`_). 170 | 171 | .. _#22: https://github.com/pytest-dev/pytest-subtests/issues/22 172 | 173 | 0.3.2 174 | ----- 175 | 176 | *2020-08-01* 177 | 178 | * Fixed pytest 6.0 support. 179 | 180 | 0.3.1 181 | ----- 182 | 183 | *2020-05-20* 184 | 185 | * Fixed pytest 5.4 support. 186 | 187 | 0.3.0 188 | ----- 189 | 190 | *2020-01-22* 191 | 192 | * Dropped support for Python 3.4. 193 | * ``subtests`` now correctly captures and displays stdout/stderr (`#18`_). 194 | 195 | .. _#18: https://github.com/pytest-dev/pytest-subtests/issues/18 196 | 197 | 0.2.1 198 | ----- 199 | 200 | *2019-04-04* 201 | 202 | * Fixed verbose output reporting on Linux (`#7`_). 203 | 204 | .. _#7: https://github.com/pytest-dev/pytest-subtests/issues/7 205 | 206 | 0.2.0 207 | ----- 208 | 209 | *2019-04-03* 210 | 211 | * Subtests are correctly reported with ``pytest-xdist>=1.28``. 212 | 213 | 0.1.0 214 | ----- 215 | 216 | *2019-04-01* 217 | 218 | * First release to PyPI. 219 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | pytest-subtests 3 | =============== 4 | 5 | unittest ``subTest()`` support and ``subtests`` fixture. 6 | 7 | .. image:: https://img.shields.io/pypi/v/pytest-subtests.svg 8 | :target: https://pypi.org/project/pytest-subtests 9 | :alt: PyPI version 10 | 11 | .. image:: https://img.shields.io/conda/vn/conda-forge/pytest-subtests.svg 12 | :target: https://anaconda.org/conda-forge/pytest-subtests 13 | 14 | .. image:: https://img.shields.io/pypi/pyversions/pytest-subtests.svg 15 | :target: https://pypi.org/project/pytest-subtests 16 | :alt: Python versions 17 | 18 | .. image:: https://github.com/pytest-dev/pytest-subtests/workflows/test/badge.svg 19 | :target: https://github.com/pytest-dev/pytest-subtests/actions 20 | 21 | .. image:: https://img.shields.io/badge/code%20style-black-000000.svg 22 | :target: https://github.com/ambv/black 23 | 24 | IMPORTANT 25 | --------- 26 | 27 | This plugin has been integrated directly into pytest ``9.0``, so the plugin itself will no longer be maintained and the repository will be archived. 28 | 29 | 30 | Features 31 | -------- 32 | 33 | * Adds support for `TestCase.subTest `__. 34 | 35 | * New ``subtests`` fixture, providing similar functionality for pure pytest tests. 36 | 37 | 38 | Installation 39 | ------------ 40 | 41 | You can install ``pytest-subtests`` via `pip`_ from `PyPI`_:: 42 | 43 | $ pip install pytest-subtests 44 | 45 | 46 | 47 | Usage 48 | ----- 49 | 50 | unittest subTest() example 51 | ^^^^^^^^^^^^^^^^^^^^^^^^^^ 52 | 53 | .. code-block:: python 54 | 55 | import unittest 56 | 57 | 58 | class T(unittest.TestCase): 59 | def test_foo(self): 60 | for i in range(5): 61 | with self.subTest("custom message", i=i): 62 | self.assertEqual(i % 2, 0) 63 | 64 | 65 | if __name__ == "__main__": 66 | unittest.main() 67 | 68 | 69 | **Output** 70 | 71 | .. code-block:: 72 | 73 | λ pytest .tmp\test-unit-subtest.py 74 | ======================== test session starts ======================== 75 | ... 76 | collected 1 item 77 | 78 | .tmp\test-unit-subtest.py FF. [100%] 79 | 80 | ============================= FAILURES ============================== 81 | _________________ T.test_foo [custom message] (i=1) _________________ 82 | 83 | self = 84 | 85 | def test_foo(self): 86 | for i in range(5): 87 | with self.subTest('custom message', i=i): 88 | > self.assertEqual(i % 2, 0) 89 | E AssertionError: 1 != 0 90 | 91 | .tmp\test-unit-subtest.py:9: AssertionError 92 | _________________ T.test_foo [custom message] (i=3) _________________ 93 | 94 | self = 95 | 96 | def test_foo(self): 97 | for i in range(5): 98 | with self.subTest('custom message', i=i): 99 | > self.assertEqual(i % 2, 0) 100 | E AssertionError: 1 != 0 101 | 102 | .tmp\test-unit-subtest.py:9: AssertionError 103 | ================ 2 failed, 1 passed in 0.07 seconds ================= 104 | 105 | 106 | ``subtests`` fixture example 107 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 108 | 109 | .. code-block:: python 110 | 111 | def test(subtests): 112 | for i in range(5): 113 | with subtests.test(msg="custom message", i=i): 114 | assert i % 2 == 0 115 | 116 | 117 | **Output** 118 | 119 | .. code-block:: 120 | 121 | λ pytest .tmp\test-subtest.py 122 | ======================== test session starts ======================== 123 | ... 124 | collected 1 item 125 | 126 | .tmp\test-subtest.py .F.F.. [100%] 127 | 128 | ============================= FAILURES ============================== 129 | ____________________ test [custom message] (i=1) ____________________ 130 | 131 | def test(subtests): 132 | for i in range(5): 133 | with subtests.test(msg='custom message', i=i): 134 | > assert i % 2 == 0 135 | E assert (1 % 2) == 0 136 | 137 | .tmp\test-subtest.py:4: AssertionError 138 | ____________________ test [custom message] (i=3) ____________________ 139 | 140 | def test(subtests): 141 | for i in range(5): 142 | with subtests.test(msg='custom message', i=i): 143 | > assert i % 2 == 0 144 | E assert (3 % 2) == 0 145 | 146 | .tmp\test-subtest.py:4: AssertionError 147 | ================ 2 failed, 1 passed in 0.07 seconds ================= 148 | 149 | Contributing 150 | ------------ 151 | Contributions are very welcome. Tests can be run with `tox`_. 152 | 153 | License 154 | ------- 155 | 156 | Distributed under the terms of the `MIT`_ license, "pytest-subtests" is free and open source software 157 | 158 | 159 | Issues 160 | ------ 161 | 162 | If you encounter any problems, please `file an issue`_ along with a detailed description. 163 | 164 | .. _`Cookiecutter`: https://github.com/audreyr/cookiecutter 165 | .. _`@hackebrot`: https://github.com/hackebrot 166 | .. _`MIT`: http://opensource.org/licenses/MIT 167 | .. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin 168 | .. _`file an issue`: https://github.com/pytest-dev/pytest-subtests/issues 169 | .. _`pytest`: https://github.com/pytest-dev/pytest 170 | .. _`tox`: https://tox.readthedocs.io/en/latest/ 171 | .. _`pip`: https://pypi.org/project/pip/ 172 | .. _`PyPI`: https://pypi.org/project/pytest-subtests/ 173 | 174 | ---- 175 | 176 | This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template. 177 | -------------------------------------------------------------------------------- /src/pytest_subtests/plugin.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | import time 5 | from contextlib import contextmanager 6 | from contextlib import ExitStack 7 | from contextlib import nullcontext 8 | from typing import Any 9 | from typing import Callable 10 | from typing import ContextManager 11 | from typing import Generator 12 | from typing import Iterator 13 | from typing import Mapping 14 | from typing import TYPE_CHECKING 15 | from unittest import TestCase 16 | 17 | import attr 18 | import pluggy 19 | import pytest 20 | from _pytest._code import ExceptionInfo 21 | from _pytest.capture import CaptureFixture 22 | from _pytest.capture import FDCapture 23 | from _pytest.capture import SysCapture 24 | from _pytest.fixtures import SubRequest 25 | from _pytest.logging import catching_logs 26 | from _pytest.logging import LogCaptureHandler 27 | from _pytest.outcomes import OutcomeException 28 | from _pytest.reports import TestReport 29 | from _pytest.runner import CallInfo 30 | from _pytest.runner import check_interactive_exception 31 | from _pytest.unittest import TestCaseFunction 32 | 33 | 34 | if TYPE_CHECKING: 35 | from types import TracebackType 36 | 37 | from typing import Literal 38 | 39 | 40 | def pytest_addoption(parser: pytest.Parser) -> None: 41 | group = parser.getgroup("subtests") 42 | group.addoption( 43 | "--no-subtests-shortletter", 44 | action="store_true", 45 | dest="no_subtests_shortletter", 46 | default=False, 47 | help="Disables subtest output 'dots' in non-verbose mode (EXPERIMENTAL)", 48 | ) 49 | group.addoption( 50 | "--no-subtests-reports", 51 | action="store_true", 52 | dest="no_subtests_reports", 53 | default=False, 54 | help="Disables subtest output unless it's a failed subtest (EXPERIMENTAL)", 55 | ) 56 | 57 | 58 | @attr.s 59 | class SubTestContext: 60 | msg: str | None = attr.ib() 61 | kwargs: dict[str, Any] = attr.ib() 62 | 63 | 64 | @attr.s(init=False) 65 | class SubTestReport(TestReport): # type: ignore[misc] 66 | context: SubTestContext = attr.ib() 67 | 68 | @property 69 | def head_line(self) -> str: 70 | _, _, domain = self.location 71 | return f"{domain} {self.sub_test_description()}" 72 | 73 | def sub_test_description(self) -> str: 74 | parts = [] 75 | if isinstance(self.context.msg, str): 76 | parts.append(f"[{self.context.msg}]") 77 | if self.context.kwargs: 78 | params_desc = ", ".join( 79 | f"{k}={v!r}" for (k, v) in sorted(self.context.kwargs.items()) 80 | ) 81 | parts.append(f"({params_desc})") 82 | return " ".join(parts) or "()" 83 | 84 | def _to_json(self) -> dict: 85 | data = super()._to_json() 86 | del data["context"] 87 | data["_report_type"] = "SubTestReport" 88 | data["_subtest.context"] = attr.asdict(self.context) 89 | return data 90 | 91 | @classmethod 92 | def _from_json(cls, reportdict: dict[str, Any]) -> SubTestReport: 93 | report = super()._from_json(reportdict) 94 | context_data = reportdict["_subtest.context"] 95 | report.context = SubTestContext( 96 | msg=context_data["msg"], kwargs=context_data["kwargs"] 97 | ) 98 | return report 99 | 100 | @classmethod 101 | def _from_test_report(cls, test_report: TestReport) -> SubTestReport: 102 | return super()._from_json(test_report._to_json()) 103 | 104 | 105 | def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: 106 | from unittest.case import _SubTest # type: ignore[attr-defined] 107 | 108 | if isinstance(testcase, _SubTest): 109 | self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] 110 | if self._excinfo is not None: 111 | exc_info = self._excinfo[-1] 112 | self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] 113 | else: 114 | # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest 115 | # failures are processed by `_addSubTest`. (`self.instance._outcome` has no attribute `skipped/errors` anymore.) 116 | # For python < 3.11, we also need to check if `self.instance._outcome` is `None` (this happens if the test 117 | # class/method is decorated with `unittest.skip`, see #173). 118 | if sys.version_info < (3, 11) and self.instance._outcome is not None: 119 | subtest_errors = [ 120 | x 121 | for x, y in self.instance._outcome.errors 122 | if isinstance(x, _SubTest) and y is not None 123 | ] 124 | if len(subtest_errors) == 0: 125 | self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] 126 | else: 127 | self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] 128 | 129 | 130 | def _addSubTest( 131 | self: TestCaseFunction, 132 | test_case: Any, 133 | test: TestCase, 134 | exc_info: tuple[type[BaseException], BaseException, TracebackType] | None, 135 | ) -> None: 136 | msg = test._message if isinstance(test._message, str) else None # type: ignore[attr-defined] 137 | call_info = make_call_info( 138 | ExceptionInfo(exc_info, _ispytest=True) if exc_info else None, 139 | start=0, 140 | stop=0, 141 | duration=0, 142 | when="call", 143 | ) 144 | report = self.ihook.pytest_runtest_makereport(item=self, call=call_info) 145 | sub_report = SubTestReport._from_test_report(report) 146 | sub_report.context = SubTestContext(msg, dict(test.params)) # type: ignore[attr-defined] 147 | self.ihook.pytest_runtest_logreport(report=sub_report) 148 | if check_interactive_exception(call_info, sub_report): 149 | self.ihook.pytest_exception_interact( 150 | node=self, call=call_info, report=sub_report 151 | ) 152 | 153 | # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. 154 | if sys.version_info < (3, 11): 155 | from unittest.case import _SubTest # type: ignore[attr-defined] 156 | 157 | non_subtest_skip = [ 158 | (x, y) 159 | for x, y in self.instance._outcome.skipped 160 | if not isinstance(x, _SubTest) 161 | ] 162 | subtest_errors = [ 163 | (x, y) 164 | for x, y in self.instance._outcome.errors 165 | if isinstance(x, _SubTest) and y is not None 166 | ] 167 | # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in 168 | # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. 169 | if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: 170 | # Make sure we have processed the last subtest failure 171 | last_subset_error = subtest_errors[-1] 172 | if exc_info is last_subset_error[-1]: 173 | # Add non-subtest skips (as they could not be treated in `_addSkip`) 174 | for testcase, reason in non_subtest_skip: 175 | self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] 176 | 177 | 178 | def pytest_configure(config: pytest.Config) -> None: 179 | TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] 180 | TestCaseFunction.failfast = False # type: ignore[attr-defined] 181 | # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a 182 | # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in 183 | # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is 184 | # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. 185 | if not hasattr(TestCaseFunction, "_originaladdSkip"): 186 | TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] 187 | TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] 188 | 189 | # Hack (#86): the terminal does not know about the "subtests" 190 | # status, so it will by default turn the output to yellow. 191 | # This forcibly adds the new 'subtests' status. 192 | import _pytest.terminal 193 | 194 | new_types = tuple( 195 | f"subtests {outcome}" for outcome in ("passed", "failed", "skipped") 196 | ) 197 | # We need to check if we are not re-adding because we run our own tests 198 | # with pytester in-process mode, so this will be called multiple times. 199 | if new_types[0] not in _pytest.terminal.KNOWN_TYPES: 200 | _pytest.terminal.KNOWN_TYPES = _pytest.terminal.KNOWN_TYPES + new_types # type: ignore[assignment] 201 | 202 | _pytest.terminal._color_for_type.update( 203 | { 204 | f"subtests {outcome}": _pytest.terminal._color_for_type[outcome] 205 | for outcome in ("passed", "failed", "skipped") 206 | if outcome in _pytest.terminal._color_for_type 207 | } 208 | ) 209 | 210 | 211 | def pytest_unconfigure() -> None: 212 | if hasattr(TestCaseFunction, "addSubTest"): 213 | del TestCaseFunction.addSubTest 214 | if hasattr(TestCaseFunction, "failfast"): 215 | del TestCaseFunction.failfast 216 | if hasattr(TestCaseFunction, "_originaladdSkip"): 217 | TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] 218 | del TestCaseFunction._originaladdSkip 219 | 220 | 221 | @pytest.fixture 222 | def subtests(request: SubRequest) -> Generator[SubTests, None, None]: 223 | capmam = request.node.config.pluginmanager.get_plugin("capturemanager") 224 | if capmam is not None: 225 | suspend_capture_ctx = capmam.global_and_fixture_disabled 226 | else: 227 | suspend_capture_ctx = nullcontext 228 | yield SubTests(request.node.ihook, suspend_capture_ctx, request) 229 | 230 | 231 | @attr.s 232 | class SubTests: 233 | ihook: pluggy.HookRelay = attr.ib() 234 | suspend_capture_ctx: Callable[[], ContextManager] = attr.ib() 235 | request: SubRequest = attr.ib() 236 | 237 | @property 238 | def item(self) -> pytest.Item: 239 | return self.request.node 240 | 241 | def test( 242 | self, 243 | msg: str | None = None, 244 | **kwargs: Any, 245 | ) -> _SubTestContextManager: 246 | """ 247 | Context manager for subtests, capturing exceptions raised inside the subtest scope and handling 248 | them through the pytest machinery. 249 | 250 | Usage: 251 | 252 | .. code-block:: python 253 | 254 | with subtests.test(msg="subtest"): 255 | assert 1 == 1 256 | """ 257 | return _SubTestContextManager( 258 | self.ihook, 259 | msg, 260 | kwargs, 261 | request=self.request, 262 | suspend_capture_ctx=self.suspend_capture_ctx, 263 | ) 264 | 265 | 266 | @attr.s(auto_attribs=True) 267 | class _SubTestContextManager: 268 | """ 269 | Context manager for subtests, capturing exceptions raised inside the subtest scope and handling 270 | them through the pytest machinery. 271 | 272 | Note: initially this logic was implemented directly in SubTests.test() as a @contextmanager, however 273 | it is not possible to control the output fully when exiting from it due to an exception when 274 | in --exitfirst mode, so this was refactored into an explicit context manager class (#134). 275 | """ 276 | 277 | ihook: pluggy.HookRelay 278 | msg: str | None 279 | kwargs: dict[str, Any] 280 | suspend_capture_ctx: Callable[[], ContextManager] 281 | request: SubRequest 282 | 283 | def __enter__(self) -> None: 284 | __tracebackhide__ = True 285 | 286 | self._start = time.time() 287 | self._precise_start = time.perf_counter() 288 | self._exc_info = None 289 | 290 | self._exit_stack = ExitStack() 291 | self._captured_output = self._exit_stack.enter_context( 292 | capturing_output(self.request) 293 | ) 294 | self._captured_logs = self._exit_stack.enter_context( 295 | capturing_logs(self.request) 296 | ) 297 | 298 | def __exit__( 299 | self, 300 | exc_type: type[Exception] | None, 301 | exc_val: Exception | None, 302 | exc_tb: TracebackType | None, 303 | ) -> bool: 304 | __tracebackhide__ = True 305 | try: 306 | if exc_val is not None: 307 | exc_info = ExceptionInfo.from_exception(exc_val) 308 | else: 309 | exc_info = None 310 | finally: 311 | self._exit_stack.close() 312 | 313 | precise_stop = time.perf_counter() 314 | duration = precise_stop - self._precise_start 315 | stop = time.time() 316 | 317 | call_info = make_call_info( 318 | exc_info, start=self._start, stop=stop, duration=duration, when="call" 319 | ) 320 | report = self.ihook.pytest_runtest_makereport( 321 | item=self.request.node, call=call_info 322 | ) 323 | sub_report = SubTestReport._from_test_report(report) 324 | sub_report.context = SubTestContext(self.msg, self.kwargs.copy()) 325 | 326 | self._captured_output.update_report(sub_report) 327 | self._captured_logs.update_report(sub_report) 328 | 329 | with self.suspend_capture_ctx(): 330 | self.ihook.pytest_runtest_logreport(report=sub_report) 331 | 332 | if check_interactive_exception(call_info, sub_report): 333 | self.ihook.pytest_exception_interact( 334 | node=self.request.node, call=call_info, report=sub_report 335 | ) 336 | 337 | if exc_val is not None: 338 | if self.request.session.shouldfail: 339 | return False 340 | return True 341 | 342 | 343 | def make_call_info( 344 | exc_info: ExceptionInfo[BaseException] | None, 345 | *, 346 | start: float, 347 | stop: float, 348 | duration: float, 349 | when: Literal["collect", "setup", "call", "teardown"], 350 | ) -> CallInfo: 351 | return CallInfo( 352 | None, 353 | exc_info, 354 | start=start, 355 | stop=stop, 356 | duration=duration, 357 | when=when, 358 | _ispytest=True, 359 | ) 360 | 361 | 362 | @contextmanager 363 | def capturing_output(request: SubRequest) -> Iterator[Captured]: 364 | option = request.config.getoption("capture", None) 365 | 366 | # capsys or capfd are active, subtest should not capture. 367 | capman = request.config.pluginmanager.getplugin("capturemanager") 368 | capture_fixture_active = getattr(capman, "_capture_fixture", None) 369 | 370 | if option == "sys" and not capture_fixture_active: 371 | with ignore_pytest_private_warning(): 372 | fixture = CaptureFixture(SysCapture, request) 373 | elif option == "fd" and not capture_fixture_active: 374 | with ignore_pytest_private_warning(): 375 | fixture = CaptureFixture(FDCapture, request) 376 | else: 377 | fixture = None 378 | 379 | if fixture is not None: 380 | fixture._start() 381 | 382 | captured = Captured() 383 | try: 384 | yield captured 385 | finally: 386 | if fixture is not None: 387 | out, err = fixture.readouterr() 388 | fixture.close() 389 | captured.out = out 390 | captured.err = err 391 | 392 | 393 | @contextmanager 394 | def capturing_logs( 395 | request: SubRequest, 396 | ) -> Iterator[CapturedLogs | NullCapturedLogs]: 397 | logging_plugin = request.config.pluginmanager.getplugin("logging-plugin") 398 | if logging_plugin is None: 399 | yield NullCapturedLogs() 400 | else: 401 | handler = LogCaptureHandler() 402 | handler.setFormatter(logging_plugin.formatter) 403 | 404 | captured_logs = CapturedLogs(handler) 405 | with catching_logs(handler): 406 | yield captured_logs 407 | 408 | 409 | @contextmanager 410 | def ignore_pytest_private_warning() -> Generator[None, None, None]: 411 | import warnings 412 | 413 | with warnings.catch_warnings(): 414 | warnings.filterwarnings( 415 | "ignore", 416 | "A private pytest class or function was used.", 417 | category=pytest.PytestDeprecationWarning, 418 | ) 419 | yield 420 | 421 | 422 | @attr.s 423 | class Captured: 424 | out = attr.ib(default="", type=str) 425 | err = attr.ib(default="", type=str) 426 | 427 | def update_report(self, report: pytest.TestReport) -> None: 428 | if self.out: 429 | report.sections.append(("Captured stdout call", self.out)) 430 | if self.err: 431 | report.sections.append(("Captured stderr call", self.err)) 432 | 433 | 434 | class CapturedLogs: 435 | def __init__(self, handler: LogCaptureHandler) -> None: 436 | self._handler = handler 437 | 438 | def update_report(self, report: pytest.TestReport) -> None: 439 | report.sections.append(("Captured log call", self._handler.stream.getvalue())) 440 | 441 | 442 | class NullCapturedLogs: 443 | def update_report(self, report: pytest.TestReport) -> None: 444 | pass 445 | 446 | 447 | def pytest_report_to_serializable(report: pytest.TestReport) -> dict[str, Any] | None: 448 | if isinstance(report, SubTestReport): 449 | return report._to_json() 450 | return None 451 | 452 | 453 | def pytest_report_from_serializable(data: dict[str, Any]) -> SubTestReport | None: 454 | if data.get("_report_type") == "SubTestReport": 455 | return SubTestReport._from_json(data) 456 | return None 457 | 458 | 459 | @pytest.hookimpl(tryfirst=True) 460 | def pytest_report_teststatus( 461 | report: pytest.TestReport, 462 | config: pytest.Config, 463 | ) -> tuple[str, str, str | Mapping[str, bool]] | None: 464 | if report.when != "call" or not isinstance(report, SubTestReport): 465 | return None 466 | 467 | outcome = report.outcome 468 | description = report.sub_test_description() 469 | no_output = ("", "", "") 470 | 471 | if hasattr(report, "wasxfail"): 472 | if config.option.no_subtests_reports and outcome != "skipped": 473 | return no_output 474 | elif outcome == "skipped": 475 | category = "xfailed" 476 | short = "y" # x letter is used for regular xfail, y for subtest xfail 477 | status = "SUBXFAIL" 478 | elif outcome == "passed": 479 | category = "xpassed" 480 | short = "Y" # X letter is used for regular xpass, Y for subtest xpass 481 | status = "SUBXPASS" 482 | else: 483 | # This should not normally happen, unless some plugin is setting wasxfail without 484 | # the correct outcome. Pytest expects the call outcome to be either skipped or passed in case of xfail. 485 | # Let's pass this report to the next hook. 486 | return None 487 | short = "" if config.option.no_subtests_shortletter else short 488 | return f"subtests {category}", short, f"{description} {status}" 489 | 490 | if config.option.no_subtests_reports and outcome != "failed": 491 | return no_output 492 | elif report.passed: 493 | short = "" if config.option.no_subtests_shortletter else "," 494 | return f"subtests {outcome}", short, f"{description} SUBPASS" 495 | elif report.skipped: 496 | short = "" if config.option.no_subtests_shortletter else "-" 497 | return outcome, short, f"{description} SUBSKIP" 498 | elif outcome == "failed": 499 | short = "" if config.option.no_subtests_shortletter else "u" 500 | return outcome, short, f"{description} SUBFAIL" 501 | 502 | return None 503 | -------------------------------------------------------------------------------- /tests/test_subtests.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | from pathlib import Path 5 | from typing import Literal 6 | 7 | import pytest 8 | 9 | IS_PY311 = sys.version_info[:2] >= (3, 11) 10 | 11 | 12 | @pytest.mark.parametrize("mode", ["normal", "xdist"]) 13 | class TestFixture: 14 | """ 15 | Tests for ``subtests`` fixture. 16 | """ 17 | 18 | @pytest.fixture 19 | def simple_script(self, pytester: pytest.Pytester) -> None: 20 | pytester.makepyfile( 21 | """ 22 | def test_foo(subtests): 23 | for i in range(5): 24 | with subtests.test(msg="custom", i=i): 25 | assert i % 2 == 0 26 | """ 27 | ) 28 | 29 | def test_simple_terminal_normal( 30 | self, 31 | simple_script: None, 32 | pytester: pytest.Pytester, 33 | mode: Literal["normal", "xdist"], 34 | ) -> None: 35 | if mode == "normal": 36 | result = pytester.runpytest() 37 | expected_lines = ["collected 1 item"] 38 | else: 39 | assert mode == "xdist" 40 | pytest.importorskip("xdist") 41 | result = pytester.runpytest("-n1") 42 | expected_lines = ["1 worker [1 item]"] 43 | 44 | expected_lines += [ 45 | "* test_foo [[]custom[]] (i=1) *", 46 | "* test_foo [[]custom[]] (i=3) *", 47 | "* 2 failed, 1 passed, 3 subtests passed in *", 48 | ] 49 | result.stdout.fnmatch_lines(expected_lines) 50 | 51 | def test_simple_terminal_verbose( 52 | self, 53 | simple_script: None, 54 | pytester: pytest.Pytester, 55 | mode: Literal["normal", "xdist"], 56 | ) -> None: 57 | if mode == "normal": 58 | result = pytester.runpytest("-v") 59 | expected_lines = [ 60 | "*collected 1 item", 61 | "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=0) SUBPASS *100%*", 62 | "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", 63 | "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=2) SUBPASS *100%*", 64 | "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", 65 | "test_simple_terminal_verbose.py::test_foo [[]custom[]] (i=4) SUBPASS *100%*", 66 | "test_simple_terminal_verbose.py::test_foo PASSED *100%*", 67 | ] 68 | else: 69 | assert mode == "xdist" 70 | pytest.importorskip("xdist") 71 | result = pytester.runpytest("-n1", "-v") 72 | expected_lines = [ 73 | "1 worker [1 item]", 74 | "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", 75 | "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", 76 | "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", 77 | "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", 78 | "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", 79 | "*gw0*100%* test_simple_terminal_verbose.py::test_foo*", 80 | ] 81 | 82 | expected_lines += [ 83 | "* test_foo [[]custom[]] (i=1) *", 84 | "* test_foo [[]custom[]] (i=3) *", 85 | "* 2 failed, 1 passed, 3 subtests passed in *", 86 | ] 87 | result.stdout.fnmatch_lines(expected_lines) 88 | 89 | def test_skip( 90 | self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] 91 | ) -> None: 92 | pytester.makepyfile( 93 | """ 94 | import pytest 95 | def test_foo(subtests): 96 | for i in range(5): 97 | with subtests.test(msg="custom", i=i): 98 | if i % 2 == 0: 99 | pytest.skip('even number') 100 | """ 101 | ) 102 | if mode == "normal": 103 | result = pytester.runpytest() 104 | expected_lines = ["collected 1 item"] 105 | else: 106 | assert mode == "xdist" 107 | pytest.importorskip("xdist") 108 | result = pytester.runpytest("-n1") 109 | expected_lines = ["1 worker [1 item]"] 110 | expected_lines += ["* 1 passed, 3 skipped, 2 subtests passed in *"] 111 | result.stdout.fnmatch_lines(expected_lines) 112 | 113 | def test_xfail( 114 | self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] 115 | ) -> None: 116 | pytester.makepyfile( 117 | """ 118 | import pytest 119 | def test_foo(subtests): 120 | for i in range(5): 121 | with subtests.test(msg="custom", i=i): 122 | if i % 2 == 0: 123 | pytest.xfail('even number') 124 | """ 125 | ) 126 | if mode == "normal": 127 | result = pytester.runpytest() 128 | expected_lines = ["collected 1 item"] 129 | else: 130 | assert mode == "xdist" 131 | pytest.importorskip("xdist") 132 | result = pytester.runpytest("-n1") 133 | expected_lines = ["1 worker [1 item]"] 134 | expected_lines += ["* 1 passed, 2 subtests passed, 3 subtests xfailed in *"] 135 | result.stdout.fnmatch_lines(expected_lines) 136 | 137 | def test_typing_exported( 138 | self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] 139 | ) -> None: 140 | pytester.makepyfile( 141 | """ 142 | from pytest_subtests import SubTests 143 | 144 | def test_typing_exported(subtests: SubTests) -> None: 145 | assert isinstance(subtests, SubTests) 146 | """ 147 | ) 148 | if mode == "normal": 149 | result = pytester.runpytest() 150 | expected_lines = ["collected 1 item"] 151 | else: 152 | assert mode == "xdist" 153 | pytest.importorskip("xdist") 154 | result = pytester.runpytest("-n1") 155 | expected_lines = ["1 worker [1 item]"] 156 | expected_lines += ["* 1 passed *"] 157 | result.stdout.fnmatch_lines(expected_lines) 158 | 159 | def test_no_subtests_reports( 160 | self, pytester: pytest.Pytester, mode: Literal["normal", "xdist"] 161 | ) -> None: 162 | pytester.makepyfile( 163 | """ 164 | import pytest 165 | 166 | def test_foo(subtests): 167 | for i in range(5): 168 | with subtests.test(msg="custom", i=i): 169 | pass 170 | """ 171 | ) 172 | # Without `--no-subtests-reports`, subtests are reported normally. 173 | result = pytester.runpytest("-v") 174 | result.stdout.fnmatch_lines( 175 | [ 176 | "*collected 1 item*", 177 | "test_no_subtests_reports.py::test_foo * (i=0) SUBPASS*", 178 | "*test_no_subtests_reports.py::test_foo PASSED*", 179 | "* 1 passed, 5 subtests passed in*", 180 | ] 181 | ) 182 | 183 | # With `--no-subtests-reports`, passing subtests are no longer reported. 184 | result = pytester.runpytest("-v", "--no-subtests-reports") 185 | result.stdout.fnmatch_lines( 186 | [ 187 | "*collected 1 item*", 188 | "*test_no_subtests_reports.py::test_foo PASSED*", 189 | "* 1 passed in*", 190 | ] 191 | ) 192 | result.stdout.no_fnmatch_line("*SUBPASS*") 193 | 194 | # Rewrite the test file so the tests fail. Even with the flag, failed subtests are still reported. 195 | pytester.makepyfile( 196 | """ 197 | import pytest 198 | 199 | def test_foo(subtests): 200 | for i in range(5): 201 | with subtests.test(msg="custom", i=i): 202 | assert False 203 | """ 204 | ) 205 | result = pytester.runpytest("-v", "--no-subtests-reports") 206 | result.stdout.fnmatch_lines( 207 | [ 208 | "*collected 1 item*", 209 | "test_no_subtests_reports.py::test_foo * (i=0) SUBFAIL*", 210 | "*test_no_subtests_reports.py::test_foo PASSED*", 211 | "* 5 failed, 1 passed in*", 212 | ] 213 | ) 214 | 215 | 216 | class TestSubTest: 217 | """ 218 | Test Test.subTest functionality. 219 | """ 220 | 221 | @pytest.fixture 222 | def simple_script(self, pytester: pytest.Pytester) -> Path: 223 | return pytester.makepyfile( 224 | """ 225 | from unittest import TestCase, main 226 | 227 | class T(TestCase): 228 | 229 | def test_foo(self): 230 | for i in range(5): 231 | with self.subTest(msg="custom", i=i): 232 | self.assertEqual(i % 2, 0) 233 | 234 | if __name__ == '__main__': 235 | main() 236 | """ 237 | ) 238 | 239 | @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) 240 | def test_simple_terminal_normal( 241 | self, 242 | simple_script: Path, 243 | pytester: pytest.Pytester, 244 | runner: Literal["unittest", "pytest-normal", "pytest-xdist"], 245 | ) -> None: 246 | suffix = ".test_foo" if IS_PY311 else "" 247 | if runner == "unittest": 248 | result = pytester.run(sys.executable, simple_script) 249 | result.stderr.fnmatch_lines( 250 | [ 251 | f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", 252 | "AssertionError: 1 != 0", 253 | f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", 254 | "AssertionError: 1 != 0", 255 | "Ran 1 test in *", 256 | "FAILED (failures=2)", 257 | ] 258 | ) 259 | else: 260 | if runner == "pytest-normal": 261 | result = pytester.runpytest(simple_script) 262 | expected_lines = ["collected 1 item"] 263 | else: 264 | assert runner == "pytest-xdist" 265 | pytest.importorskip("xdist") 266 | result = pytester.runpytest(simple_script, "-n1") 267 | expected_lines = ["1 worker [1 item]"] 268 | result.stdout.fnmatch_lines( 269 | expected_lines 270 | + [ 271 | "* T.test_foo [[]custom[]] (i=1) *", 272 | "E * AssertionError: 1 != 0", 273 | "* T.test_foo [[]custom[]] (i=3) *", 274 | "E * AssertionError: 1 != 0", 275 | "* 2 failed, 1 passed, 3 subtests passed in *", 276 | ] 277 | ) 278 | 279 | @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) 280 | def test_simple_terminal_verbose( 281 | self, 282 | simple_script: Path, 283 | pytester: pytest.Pytester, 284 | runner: Literal["unittest", "pytest-normal", "pytest-xdist"], 285 | ) -> None: 286 | suffix = ".test_foo" if IS_PY311 else "" 287 | if runner == "unittest": 288 | result = pytester.run(sys.executable, simple_script, "-v") 289 | result.stderr.fnmatch_lines( 290 | [ 291 | f"test_foo (__main__.T{suffix}) ... ", 292 | f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=1)", 293 | "AssertionError: 1 != 0", 294 | f"FAIL: test_foo (__main__.T{suffix}) [custom] (i=3)", 295 | "AssertionError: 1 != 0", 296 | "Ran 1 test in *", 297 | "FAILED (failures=2)", 298 | ] 299 | ) 300 | else: 301 | if runner == "pytest-normal": 302 | result = pytester.runpytest(simple_script, "-v") 303 | expected_lines = [ 304 | "*collected 1 item", 305 | "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=1) SUBFAIL *100%*", 306 | "test_simple_terminal_verbose.py::T::test_foo [[]custom[]] (i=3) SUBFAIL *100%*", 307 | "test_simple_terminal_verbose.py::T::test_foo PASSED *100%*", 308 | ] 309 | else: 310 | assert runner == "pytest-xdist" 311 | pytest.importorskip("xdist") 312 | result = pytester.runpytest(simple_script, "-n1", "-v") 313 | expected_lines = [ 314 | "1 worker [1 item]", 315 | "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", 316 | "*gw0*100%* SUBFAIL test_simple_terminal_verbose.py::T::test_foo*", 317 | "*gw0*100%* PASSED test_simple_terminal_verbose.py::T::test_foo*", 318 | ] 319 | result.stdout.fnmatch_lines( 320 | expected_lines 321 | + [ 322 | "* T.test_foo [[]custom[]] (i=1) *", 323 | "E * AssertionError: 1 != 0", 324 | "* T.test_foo [[]custom[]] (i=3) *", 325 | "E * AssertionError: 1 != 0", 326 | "* 2 failed, 1 passed, 3 subtests passed in *", 327 | ] 328 | ) 329 | 330 | @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) 331 | def test_skip( 332 | self, 333 | pytester: pytest.Pytester, 334 | runner: Literal["unittest", "pytest-normal", "pytest-xdist"], 335 | ) -> None: 336 | p = pytester.makepyfile( 337 | """ 338 | from unittest import TestCase, main 339 | 340 | class T(TestCase): 341 | 342 | def test_foo(self): 343 | for i in range(5): 344 | with self.subTest(msg="custom", i=i): 345 | if i % 2 == 0: 346 | self.skipTest('even number') 347 | 348 | if __name__ == '__main__': 349 | main() 350 | """ 351 | ) 352 | if runner == "unittest": 353 | result = pytester.runpython(p) 354 | result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (skipped=3)"]) 355 | else: 356 | pytest.xfail("Not producing the expected results (#5)") 357 | result = pytester.runpytest(p) # type:ignore[unreachable] 358 | result.stdout.fnmatch_lines( 359 | ["collected 1 item", "* 3 skipped, 1 passed in *"] 360 | ) 361 | 362 | @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) 363 | @pytest.mark.xfail(reason="Not producing the expected results (#5)") 364 | def test_xfail( 365 | self, 366 | pytester: pytest.Pytester, 367 | runner: Literal["unittest", "pytest-normal", "pytest-xdist"], 368 | ) -> None: 369 | p = pytester.makepyfile( 370 | """ 371 | import pytest 372 | from unittest import expectedFailure, TestCase, main 373 | 374 | class T(TestCase): 375 | @expectedFailure 376 | def test_foo(self): 377 | for i in range(5): 378 | with self.subTest(msg="custom", i=i): 379 | if i % 2 == 0: 380 | raise pytest.xfail('even number') 381 | 382 | if __name__ == '__main__': 383 | main() 384 | """ 385 | ) 386 | if runner == "unittest": 387 | result = pytester.runpython(p) 388 | result.stderr.fnmatch_lines(["Ran 1 test in *", "OK (expected failures=3)"]) 389 | else: 390 | result = pytester.runpytest(p) 391 | result.stdout.fnmatch_lines( 392 | ["collected 1 item", "* 3 xfailed, 1 passed in *"] 393 | ) 394 | 395 | @pytest.mark.parametrize("runner", ["pytest-normal"]) 396 | def test_only_original_skip_is_called( 397 | self, 398 | pytester: pytest.Pytester, 399 | monkeypatch: pytest.MonkeyPatch, 400 | runner: Literal["pytest-normal"], 401 | ) -> None: 402 | """Regression test for #173.""" 403 | monkeypatch.setenv("COLUMNS", "200") 404 | p = pytester.makepyfile( 405 | """ 406 | import unittest 407 | from unittest import TestCase, main 408 | 409 | @unittest.skip("skip this test") 410 | class T(unittest.TestCase): 411 | def test_foo(self): 412 | assert 1 == 2 413 | 414 | if __name__ == '__main__': 415 | main() 416 | """ 417 | ) 418 | result = pytester.runpytest(p, "-v", "-rsf") 419 | result.stdout.fnmatch_lines( 420 | ["SKIPPED [1] test_only_original_skip_is_called.py:6: skip this test"] 421 | ) 422 | 423 | @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) 424 | def test_skip_with_failure( 425 | self, 426 | pytester: pytest.Pytester, 427 | monkeypatch: pytest.MonkeyPatch, 428 | runner: Literal["unittest", "pytest-normal", "pytest-xdist"], 429 | ) -> None: 430 | monkeypatch.setenv("COLUMNS", "200") 431 | p = pytester.makepyfile( 432 | """ 433 | import pytest 434 | from unittest import expectedFailure, TestCase, main 435 | 436 | class T(TestCase): 437 | def test_foo(self): 438 | for i in range(10): 439 | with self.subTest("custom message", i=i): 440 | if i < 4: 441 | self.skipTest(f"skip subtest i={i}") 442 | assert i < 4 443 | 444 | if __name__ == '__main__': 445 | main() 446 | """ 447 | ) 448 | if runner == "unittest": 449 | result = pytester.runpython(p) 450 | if sys.version_info < (3, 11): 451 | result.stderr.re_match_lines( 452 | [ 453 | r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", 454 | r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", 455 | r"Ran 1 test in .*", 456 | r"FAILED \(failures=6, skipped=4\)", 457 | ] 458 | ) 459 | else: 460 | result.stderr.re_match_lines( 461 | [ 462 | r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", 463 | r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", 464 | r"Ran 1 test in .*", 465 | r"FAILED \(failures=6, skipped=4\)", 466 | ] 467 | ) 468 | elif runner == "pytest-normal": 469 | result = pytester.runpytest(p, "-v", "-rsf") 470 | result.stdout.re_match_lines( 471 | [ 472 | r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", 473 | r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", 474 | r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", 475 | r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", 476 | "test_skip_with_failure.py::T::test_foo PASSED .*", 477 | r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", 478 | r"[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", 479 | r"[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", 480 | r"[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", 481 | r".* 6 failed, 1 passed, 4 skipped in .*", 482 | ] 483 | ) 484 | else: 485 | pytest.xfail("Not producing the expected results (#5)") 486 | result = pytester.runpytest(p) # type:ignore[unreachable] 487 | result.stdout.fnmatch_lines( 488 | ["collected 1 item", "* 3 skipped, 1 passed in *"] 489 | ) 490 | 491 | @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) 492 | def test_skip_with_failure_and_non_subskip( 493 | self, 494 | pytester: pytest.Pytester, 495 | monkeypatch: pytest.MonkeyPatch, 496 | runner: Literal["unittest", "pytest-normal", "pytest-xdist"], 497 | ) -> None: 498 | monkeypatch.setenv("COLUMNS", "200") 499 | p = pytester.makepyfile( 500 | """ 501 | import pytest 502 | from unittest import expectedFailure, TestCase, main 503 | 504 | class T(TestCase): 505 | def test_foo(self): 506 | for i in range(10): 507 | with self.subTest("custom message", i=i): 508 | if i < 4: 509 | self.skipTest(f"skip subtest i={i}") 510 | assert i < 4 511 | self.skipTest(f"skip the test") 512 | 513 | if __name__ == '__main__': 514 | main() 515 | """ 516 | ) 517 | if runner == "unittest": 518 | result = pytester.runpython(p) 519 | if sys.version_info < (3, 11): 520 | result.stderr.re_match_lines( 521 | [ 522 | r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", 523 | r"FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", 524 | r"Ran 1 test in .*", 525 | r"FAILED \(failures=6, skipped=5\)", 526 | ] 527 | ) 528 | else: 529 | result.stderr.re_match_lines( 530 | [ 531 | r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", 532 | r"FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", 533 | r"Ran 1 test in .*", 534 | r"FAILED \(failures=6, skipped=5\)", 535 | ] 536 | ) 537 | elif runner == "pytest-normal": 538 | result = pytester.runpytest(p, "-v", "-rsf") 539 | # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` 540 | result.stdout.re_match_lines( 541 | [ 542 | r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", 543 | r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", 544 | r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", 545 | r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", 546 | r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", 547 | r".* 6 failed, 5 skipped in .*", 548 | ] 549 | ) 550 | # Check with `--no-fold-skipped` (which gives the correct information). 551 | if sys.version_info >= (3, 10) and pytest.version_tuple[:2] >= (8, 3): 552 | result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") 553 | result.stdout.re_match_lines( 554 | [ 555 | r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", 556 | r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", 557 | r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", 558 | r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", 559 | r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", 560 | r".* 6 failed, 5 skipped in .*", 561 | ] 562 | ) 563 | else: 564 | pytest.xfail("Not producing the expected results (#5)") 565 | result = pytester.runpytest(p) # type:ignore[unreachable] 566 | result.stdout.fnmatch_lines( 567 | ["collected 1 item", "* 3 skipped, 1 passed in *"] 568 | ) 569 | 570 | 571 | class TestCapture: 572 | def create_file(self, pytester: pytest.Pytester) -> None: 573 | pytester.makepyfile( 574 | """ 575 | import sys 576 | def test(subtests): 577 | print() 578 | print('start test') 579 | 580 | with subtests.test(i='A'): 581 | print("hello stdout A") 582 | print("hello stderr A", file=sys.stderr) 583 | assert 0 584 | 585 | with subtests.test(i='B'): 586 | print("hello stdout B") 587 | print("hello stderr B", file=sys.stderr) 588 | assert 0 589 | 590 | print('end test') 591 | assert 0 592 | """ 593 | ) 594 | 595 | def test_capturing(self, pytester: pytest.Pytester) -> None: 596 | self.create_file(pytester) 597 | result = pytester.runpytest() 598 | result.stdout.fnmatch_lines( 599 | [ 600 | "*__ test (i='A') __*", 601 | "*Captured stdout call*", 602 | "hello stdout A", 603 | "*Captured stderr call*", 604 | "hello stderr A", 605 | "*__ test (i='B') __*", 606 | "*Captured stdout call*", 607 | "hello stdout B", 608 | "*Captured stderr call*", 609 | "hello stderr B", 610 | "*__ test __*", 611 | "*Captured stdout call*", 612 | "start test", 613 | "end test", 614 | ] 615 | ) 616 | 617 | def test_no_capture(self, pytester: pytest.Pytester) -> None: 618 | self.create_file(pytester) 619 | result = pytester.runpytest("-s") 620 | result.stdout.fnmatch_lines( 621 | [ 622 | "start test", 623 | "hello stdout A", 624 | "uhello stdout B", 625 | "uend test", 626 | "*__ test (i='A') __*", 627 | "*__ test (i='B') __*", 628 | "*__ test __*", 629 | ] 630 | ) 631 | result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"]) 632 | 633 | @pytest.mark.parametrize("fixture", ["capsys", "capfd"]) 634 | def test_capture_with_fixture( 635 | self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"] 636 | ) -> None: 637 | pytester.makepyfile( 638 | rf""" 639 | import sys 640 | 641 | def test(subtests, {fixture}): 642 | print('start test') 643 | 644 | with subtests.test(i='A'): 645 | print("hello stdout A") 646 | print("hello stderr A", file=sys.stderr) 647 | 648 | out, err = {fixture}.readouterr() 649 | assert out == 'start test\nhello stdout A\n' 650 | assert err == 'hello stderr A\n' 651 | """ 652 | ) 653 | result = pytester.runpytest() 654 | result.stdout.fnmatch_lines( 655 | [ 656 | "*1 passed*", 657 | ] 658 | ) 659 | 660 | 661 | class TestLogging: 662 | def create_file(self, pytester: pytest.Pytester) -> None: 663 | pytester.makepyfile( 664 | """ 665 | import logging 666 | 667 | def test_foo(subtests): 668 | logging.info("before") 669 | 670 | with subtests.test("sub1"): 671 | print("sub1 stdout") 672 | logging.info("sub1 logging") 673 | 674 | with subtests.test("sub2"): 675 | print("sub2 stdout") 676 | logging.info("sub2 logging") 677 | assert False 678 | """ 679 | ) 680 | 681 | def test_capturing(self, pytester: pytest.Pytester) -> None: 682 | self.create_file(pytester) 683 | result = pytester.runpytest("--log-level=INFO") 684 | result.stdout.fnmatch_lines( 685 | [ 686 | "*___ test_foo [[]sub2[]] __*", 687 | "*-- Captured stdout call --*", 688 | "sub2 stdout", 689 | "*-- Captured log call ---*", 690 | "INFO root:test_capturing.py:12 sub2 logging", 691 | "*== short test summary info ==*", 692 | ] 693 | ) 694 | 695 | def test_caplog(self, pytester: pytest.Pytester) -> None: 696 | pytester.makepyfile( 697 | """ 698 | import logging 699 | 700 | def test(subtests, caplog): 701 | caplog.set_level(logging.INFO) 702 | logging.info("start test") 703 | 704 | with subtests.test("sub1"): 705 | logging.info("inside %s", "subtest1") 706 | 707 | assert len(caplog.records) == 2 708 | assert caplog.records[0].getMessage() == "start test" 709 | assert caplog.records[1].getMessage() == "inside subtest1" 710 | """ 711 | ) 712 | result = pytester.runpytest() 713 | result.stdout.fnmatch_lines( 714 | [ 715 | "*1 passed*", 716 | ] 717 | ) 718 | 719 | def test_no_logging(self, pytester: pytest.Pytester) -> None: 720 | pytester.makepyfile( 721 | """ 722 | import logging 723 | 724 | def test(subtests): 725 | logging.info("start log line") 726 | 727 | with subtests.test("sub passing"): 728 | logging.info("inside %s", "passing log line") 729 | 730 | with subtests.test("sub failing"): 731 | logging.info("inside %s", "failing log line") 732 | assert False 733 | 734 | logging.info("end log line") 735 | """ 736 | ) 737 | result = pytester.runpytest("-p no:logging") 738 | result.stdout.fnmatch_lines( 739 | [ 740 | "*1 passed*", 741 | ] 742 | ) 743 | result.stdout.no_fnmatch_line("*root:test_no_logging.py*log line*") 744 | 745 | 746 | class TestDebugging: 747 | """Check --pdb support for subtests fixture and TestCase.subTest.""" 748 | 749 | class _FakePdb: 750 | """ 751 | Fake debugger class implementation that tracks which methods were called on it. 752 | """ 753 | 754 | quitting: bool = False 755 | calls: list[str] = [] 756 | 757 | def __init__(self, *_: object, **__: object) -> None: 758 | self.calls.append("init") 759 | 760 | def reset(self) -> None: 761 | self.calls.append("reset") 762 | 763 | def interaction(self, *_: object) -> None: 764 | self.calls.append("interaction") 765 | 766 | @pytest.fixture(autouse=True) 767 | def cleanup_calls(self) -> None: 768 | self._FakePdb.calls.clear() 769 | 770 | def test_pdb_fixture( 771 | self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 772 | ) -> None: 773 | pytester.makepyfile( 774 | """ 775 | def test(subtests): 776 | with subtests.test(): 777 | assert 0 778 | """ 779 | ) 780 | self.runpytest_and_check_pdb(pytester, monkeypatch) 781 | 782 | def test_pdb_unittest( 783 | self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 784 | ) -> None: 785 | pytester.makepyfile( 786 | """ 787 | from unittest import TestCase 788 | class Test(TestCase): 789 | def test(self): 790 | with self.subTest(): 791 | assert 0 792 | """ 793 | ) 794 | self.runpytest_and_check_pdb(pytester, monkeypatch) 795 | 796 | def runpytest_and_check_pdb( 797 | self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch 798 | ) -> None: 799 | # Install the fake pdb implementation in pytest_subtests so we can reference 800 | # it in the command line (any module would do). 801 | import pytest_subtests 802 | 803 | monkeypatch.setattr(pytest_subtests, "_CustomPdb", self._FakePdb, raising=False) 804 | result = pytester.runpytest("--pdb", "--pdbcls=pytest_subtests:_CustomPdb") 805 | 806 | # Ensure pytest entered in debugging mode when encountering the failing 807 | # assert. 808 | result.stdout.fnmatch_lines("*entering PDB*") 809 | assert self._FakePdb.calls == ["init", "reset", "interaction"] 810 | 811 | 812 | def test_exitfirst(pytester: pytest.Pytester) -> None: 813 | """ 814 | Validate that when passing --exitfirst the test exits after the first failed subtest. 815 | """ 816 | pytester.makepyfile( 817 | """ 818 | def test_foo(subtests): 819 | with subtests.test("sub1"): 820 | assert False 821 | 822 | with subtests.test("sub2"): 823 | assert False 824 | """ 825 | ) 826 | result = pytester.runpytest("--exitfirst") 827 | assert result.parseoutcomes()["failed"] == 2 828 | result.stdout.fnmatch_lines( 829 | [ 830 | "*[[]sub1[]] SUBFAIL test_exitfirst.py::test_foo - assert False*", 831 | "FAILED test_exitfirst.py::test_foo - assert False", 832 | "* stopping after 2 failures*", 833 | ], 834 | consecutive=True, 835 | ) 836 | result.stdout.no_fnmatch_line("*sub2*") # sub2 not executed. 837 | --------------------------------------------------------------------------------