├── tests ├── __init__.py ├── args │ ├── __init__.py │ ├── parse │ │ ├── __init__.py │ │ └── test_args.py │ ├── regex │ │ ├── __init__.py │ │ └── test_args.py │ ├── cfparse │ │ ├── __init__.py │ │ └── test_args.py │ └── test_common.py ├── steps │ ├── __init__.py │ ├── test_given.py │ ├── test_keyword.py │ ├── test_unicode.py │ └── test_docstring.py ├── datatable │ ├── __init__.py │ └── test_datatable.py ├── feature │ ├── __init__.py │ ├── test_no_scenario.py │ ├── test_same_function_name.py │ ├── test_wrong.py │ ├── test_alias.py │ ├── test_outline_empty_values.py │ ├── test_description.py │ ├── test_scenarios.py │ ├── test_background.py │ ├── test_rule_example_format.py │ ├── test_feature_base_dir.py │ ├── test_tags.py │ ├── test_cucumber_json.py │ └── test_report.py ├── generation │ ├── __init__.py │ └── test_generate_missing.py ├── library │ └── __init__.py ├── parser │ ├── __init__.py │ ├── test.feature │ └── test_errors.py ├── scripts │ ├── __init__.py │ ├── test_migrate.py │ ├── test_main.py │ └── test_generate.py ├── utils.py ├── conftest.py └── test_hooks.py ├── src └── pytest_bdd │ ├── py.typed │ ├── __init__.py │ ├── types.py │ ├── templates │ └── test.py.mak │ ├── exceptions.py │ ├── hooks.py │ ├── scripts.py │ ├── feature.py │ ├── compat.py │ ├── parsers.py │ ├── utils.py │ ├── plugin.py │ ├── gherkin_terminal_reporter.py │ ├── steps.py │ ├── generation.py │ ├── cucumber_json.py │ ├── reporting.py │ └── gherkin_parser.py ├── .vscode └── extensions.json ├── docs ├── index.rst ├── Makefile └── conf.py ├── pytest.ini ├── .editorconfig ├── .readthedocs.yaml ├── .envrc ├── CONTRIBUTING.md ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── main.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE.txt ├── AUTHORS.rst ├── tox.ini └── pyproject.toml /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/pytest_bdd/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/args/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/steps/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/args/parse/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/args/regex/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/datatable/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/feature/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/generation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/library/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/parser/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/args/cfparse/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "charliermarsh.ruff" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | .. include:: ../AUTHORS.rst 4 | 5 | .. include:: ../CHANGES.rst 6 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | filterwarnings = 4 | # only ignore errors from the pytest_bdd package 5 | error:::(src)?\.pytest_bdd.* 6 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | from packaging.utils import Version 5 | 6 | # We leave this here for the future as an easy way to do feature-based testing. 7 | PYTEST_VERSION = Version(pytest.__version__) 8 | -------------------------------------------------------------------------------- /src/pytest_bdd/__init__.py: -------------------------------------------------------------------------------- 1 | """pytest-bdd public API.""" 2 | 3 | from __future__ import annotations 4 | 5 | from pytest_bdd.scenario import scenario, scenarios 6 | from pytest_bdd.steps import given, step, then, when 7 | 8 | __all__ = ["given", "when", "step", "then", "scenario", "scenarios"] 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | charset = utf-8 11 | 12 | [*.py] 13 | indent_style = space 14 | indent_size = 4 15 | 16 | [*.yml] 17 | indent_style = space 18 | indent_size = 2 19 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | version: 2 6 | 7 | build: 8 | os: ubuntu-24.04 9 | tools: 10 | python: "3" 11 | 12 | sphinx: 13 | configuration: docs/conf.py 14 | formats: 15 | - epub 16 | - pdf 17 | - htmlzip 18 | 19 | python: 20 | install: 21 | - method: pip 22 | path: . 23 | -------------------------------------------------------------------------------- /src/pytest_bdd/types.py: -------------------------------------------------------------------------------- 1 | """Common type definitions.""" 2 | 3 | from __future__ import annotations 4 | 5 | import typing 6 | 7 | if typing.TYPE_CHECKING: 8 | from typing_extensions import Literal 9 | 10 | GIVEN: Literal["given"] = "given" 11 | WHEN: Literal["when"] = "when" 12 | THEN: Literal["then"] = "then" 13 | 14 | STEP_TYPES = (GIVEN, WHEN, THEN) 15 | 16 | STEP_TYPE_BY_PARSER_KEYWORD = { 17 | "Context": GIVEN, 18 | "Action": WHEN, 19 | "Outcome": THEN, 20 | } 21 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | # config file for `direnv`: https://direnv.net 2 | # load the poetry virtual environment when entering the project directory 3 | 4 | strict_env 5 | 6 | if [[ ! -f "pyproject.toml" ]]; then 7 | log_error 'No pyproject.toml found. Use `poetry new` or `poetry init` to create one first.' 8 | exit 2 9 | fi 10 | 11 | local VENV="$(poetry env info --path)" 12 | if [[ -z $VENV || ! -d $VENV/bin ]]; then 13 | log_error 'No poetry virtual environment found. Use `poetry install` to create one first.' 14 | exit 2 15 | fi 16 | 17 | source_env "$VENV/bin/activate" 18 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | pytest_plugins = "pytester" 6 | 7 | 8 | def pytest_generate_tests(metafunc): 9 | if "pytest_params" in metafunc.fixturenames: 10 | parametrizations = [ 11 | pytest.param([], id="no-import-mode"), 12 | pytest.param(["--import-mode=prepend"], id="--import-mode=prepend"), 13 | pytest.param(["--import-mode=append"], id="--import-mode=append"), 14 | pytest.param(["--import-mode=importlib"], id="--import-mode=importlib"), 15 | ] 16 | metafunc.parametrize("pytest_params", parametrizations) 17 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to setup development environment 2 | - Install poetry: https://python-poetry.org/docs/#installation 3 | - (Optional) Install pre-commit: https://pre-commit.com/#install 4 | - Run `poetry install` to install dependencies 5 | - Run `pre-commit install` to install pre-commit hooks 6 | 7 | # How to run tests 8 | - Run `poetry run pytest` 9 | - or run `tox` 10 | # How to make a release 11 | 12 | ```shell 13 | python -m pip install --upgrade build twine 14 | 15 | # cleanup the ./dist folder 16 | rm -rf ./dist 17 | 18 | # Build the distributions 19 | python -m build 20 | 21 | # Upload them 22 | 23 | twine upload dist/* 24 | ``` 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Write a [Minimal, Reproducible Example](https://stackoverflow.com/help/minimal-reproducible-example) to show the bug. 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Additional context** 20 | Add any other context about the problem here. 21 | 22 | **Version** 23 | - pytest version: 24 | - pytest-bdd version: 25 | - OS: 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.rej 2 | *.py[cod] 3 | /.env 4 | *.orig 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Packages 10 | *.egg 11 | *.egg-info 12 | dist 13 | build 14 | _build 15 | eggs 16 | parts 17 | bin 18 | var 19 | sdist 20 | develop-eggs 21 | .installed.cfg 22 | lib 23 | lib64 24 | 25 | # Installer logs 26 | pip-log.txt 27 | 28 | # Unit test / coverage reports 29 | htmlcov/ 30 | .tox/ 31 | .nox/ 32 | .coverage 33 | .coverage.* 34 | .cache 35 | coverage.xml 36 | *.cover 37 | *.py,cover 38 | .hypothesis/ 39 | .pytest_cache/ 40 | cover/ 41 | 42 | # Translations 43 | *.mo 44 | 45 | # Mr Developer 46 | .mr.developer.cfg 47 | .project 48 | .pydevproject 49 | .pytest_cache 50 | .ropeproject 51 | 52 | # Sublime 53 | /*.sublime-* 54 | 55 | #PyCharm 56 | /.idea 57 | -------------------------------------------------------------------------------- /src/pytest_bdd/templates/test.py.mak: -------------------------------------------------------------------------------- 1 | % if features: 2 | """${ features[0].name or features[0].rel_filename } feature tests.""" 3 | 4 | from pytest_bdd import ( 5 | given, 6 | scenario, 7 | then, 8 | when, 9 | ) 10 | 11 | 12 | % endif 13 | % for scenario in sorted(scenarios, key=lambda scenario: scenario.name): 14 | @scenario('${scenario.feature.rel_filename}', ${ make_string_literal(scenario.name)}) 15 | def test_${ make_python_name(scenario.name)}(): 16 | ${make_python_docstring(scenario.name)} 17 | 18 | 19 | % endfor 20 | % for step in steps: 21 | @${step.type}(${ make_string_literal(step.name)}) 22 | def _(): 23 | ${make_python_docstring(step.name)} 24 | raise NotImplementedError 25 | % if not loop.last: 26 | 27 | 28 | % endif 29 | % endfor 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: "cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b" # frozen: v5.0.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - repo: https://github.com/astral-sh/ruff-pre-commit 12 | rev: "895ebb389825c29bd4e0addcf7579d6c69d199cc" # frozen: v0.9.6 13 | hooks: 14 | - id: ruff 15 | args: [ --fix ] 16 | - id: ruff-format 17 | - repo: https://github.com/python-poetry/poetry 18 | rev: "bd500dd3bdfaec3de6894144c9cedb3a9358be84" # frozen: 2.0.1 19 | hooks: 20 | - id: poetry-check 21 | args: ["--lock"] 22 | -------------------------------------------------------------------------------- /tests/feature/test_no_scenario.py: -------------------------------------------------------------------------------- 1 | """Test no scenarios defined in the feature file.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_no_scenarios(pytester): 9 | """Test no scenarios defined in the feature file.""" 10 | features = pytester.mkdir("features") 11 | features.joinpath("test.feature").write_text( 12 | textwrap.dedent( 13 | """ 14 | Given foo 15 | When bar 16 | Then baz 17 | """ 18 | ), 19 | encoding="utf-8", 20 | ) 21 | pytester.makepyfile( 22 | textwrap.dedent( 23 | """ 24 | 25 | from pytest_bdd import scenarios 26 | 27 | scenarios('features') 28 | """ 29 | ) 30 | ) 31 | result = pytester.runpytest() 32 | result.stdout.fnmatch_lines(["*FeatureError: Step definition outside of a Scenario or a Background.*"]) 33 | -------------------------------------------------------------------------------- /tests/feature/test_same_function_name.py: -------------------------------------------------------------------------------- 1 | """Function name same as step name.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_when_function_name_same_as_step_name(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | same_name=textwrap.dedent( 12 | """\ 13 | Feature: Function name same as step name 14 | Scenario: When function name same as step name 15 | When something 16 | """ 17 | ), 18 | ) 19 | pytester.makepyfile( 20 | textwrap.dedent( 21 | """\ 22 | from pytest_bdd import when, scenario 23 | 24 | @scenario("same_name.feature", "When function name same as step name") 25 | def test_same_name(): 26 | pass 27 | 28 | @when("something") 29 | def _(): 30 | return "something" 31 | """ 32 | ) 33 | ) 34 | result = pytester.runpytest() 35 | result.assert_outcomes(passed=1) 36 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013-2014 Oleg Pidsadnyi, Anatoly Bubenkov and others 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | Authors 2 | ======= 3 | 4 | `Oleg Pidsadnyi `_ 5 | original idea, initial implementation and further improvements 6 | `Anatoly Bubenkov `_ 7 | key implementation idea and realization, many new features and improvements 8 | 9 | These people have contributed to `pytest-bdd`, in alphabetical order: 10 | 11 | * `Adam Coddington `_ 12 | * `Albert-Jan Nijburg `_ 13 | * `Alessio Bogon `_ 14 | * `Andrey Makhnach `_ 15 | * `Aron Curzon `_ 16 | * `Dmitrijs Milajevs `_ 17 | * `Dmitry Kolyagin `_ 18 | * `Florian Bruhin `_ 19 | * `Floris Bruynooghe `_ 20 | * `Harro van der Klauw `_ 21 | * `Hugo van Kemenade `_ 22 | * `Kyle Adams `_ 23 | * `Laurence Rowe `_ 24 | * `Leonardo Santagada `_ 25 | * `Milosz Sliwinski `_ 26 | * `Michiel Holtkamp `_ 27 | * `Robin Pedersen `_ 28 | * `Sergey Kraynev `_ 29 | -------------------------------------------------------------------------------- /tests/steps/test_given.py: -------------------------------------------------------------------------------- 1 | """Given tests.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_given_injection(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | given=textwrap.dedent( 12 | """\ 13 | Feature: Given 14 | Scenario: Test given fixture injection 15 | Given I have injecting given 16 | Then foo should be "injected foo" 17 | """ 18 | ), 19 | ) 20 | pytester.makepyfile( 21 | textwrap.dedent( 22 | """\ 23 | import pytest 24 | from pytest_bdd import given, then, scenario 25 | 26 | @scenario("given.feature", "Test given fixture injection") 27 | def test_given(): 28 | pass 29 | 30 | @given("I have injecting given", target_fixture="foo") 31 | def _(): 32 | return "injected foo" 33 | 34 | 35 | @then('foo should be "injected foo"') 36 | def _(foo): 37 | assert foo == "injected foo" 38 | 39 | """ 40 | ) 41 | ) 42 | result = pytester.runpytest() 43 | result.assert_outcomes(passed=1) 44 | -------------------------------------------------------------------------------- /tests/scripts/test_migrate.py: -------------------------------------------------------------------------------- 1 | """Test code generation command.""" 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | import sys 7 | import textwrap 8 | 9 | from pytest_bdd.scripts import main 10 | 11 | PATH = os.path.dirname(__file__) 12 | 13 | 14 | def test_migrate(monkeypatch, capsys, pytester): 15 | """Test if the code is migrated by a given file mask.""" 16 | tests = pytester.mkpydir("tests") 17 | 18 | tests.joinpath("test_foo.py").write_text( 19 | textwrap.dedent( 20 | ''' 21 | """Foo bar tests.""" 22 | from pytest_bdd import scenario 23 | 24 | test_foo = scenario('foo_bar.feature', 'Foo bar') 25 | ''' 26 | ) 27 | ) 28 | 29 | monkeypatch.setattr(sys, "argv", ["", "migrate", str(tests)]) 30 | main() 31 | out, err = capsys.readouterr() 32 | out = "\n".join(sorted(out.splitlines())) 33 | expected = textwrap.dedent( 34 | """ 35 | migrated: {0}/test_foo.py 36 | skipped: {0}/__init__.py""".format(str(tests))[1:] 37 | ) 38 | assert out == expected 39 | assert tests.joinpath("test_foo.py").read_text() == textwrap.dedent( 40 | ''' 41 | """Foo bar tests.""" 42 | from pytest_bdd import scenario 43 | 44 | @scenario('foo_bar.feature', 'Foo bar') 45 | def test_foo(): 46 | pass 47 | ''' 48 | ) 49 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | from __future__ import annotations 9 | 10 | from importlib import metadata as _metadata 11 | 12 | project = "pytest-bdd" 13 | copyright = "2013, Oleg Pidsadnyi" 14 | author = "Oleg Pidsadnyi" 15 | # The full version, including alpha/beta/rc tags. 16 | release = _metadata.version("pytest-bdd") 17 | 18 | # -- General configuration --------------------------------------------------- 19 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 20 | 21 | templates_path = ["_templates"] 22 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 23 | 24 | # -- Options for HTML output ------------------------------------------------- 25 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 26 | 27 | html_theme = "alabaster" 28 | html_static_path = ["_static"] 29 | 30 | html_sidebars = { 31 | "**": [ 32 | "about.html", 33 | "searchfield.html", 34 | # 'navigation.html', 35 | "relations.html", 36 | # 'donate.html', 37 | "localtoc.html", 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /tests/steps/test_keyword.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | 6 | def test_asterisk_keyword(pytester): 7 | pytester.makefile( 8 | ".feature", 9 | asterisk=textwrap.dedent( 10 | """\ 11 | Feature: Step continuation 12 | Scenario: Asterisk steps 13 | Given I am out shopping 14 | * I have eggs 15 | * I have milk 16 | * I have butter 17 | When I check my list 18 | Then I don't need anything 19 | """ 20 | ), 21 | ) 22 | pytester.makepyfile( 23 | textwrap.dedent( 24 | """\ 25 | import pytest 26 | from pytest_bdd import given, when, then, scenario 27 | 28 | @scenario("asterisk.feature", "Asterisk steps") 29 | def test_asterisk_steps(): 30 | pass 31 | 32 | @given("I am out shopping") 33 | def _(): 34 | pass 35 | 36 | 37 | @given("I have eggs") 38 | def _(): 39 | pass 40 | 41 | 42 | @given("I have milk") 43 | def _(): 44 | pass 45 | 46 | 47 | @given("I have butter") 48 | def _(): 49 | pass 50 | 51 | 52 | @when("I check my list") 53 | def _(): 54 | pass 55 | 56 | 57 | @then("I don't need anything") 58 | def _(): 59 | pass 60 | 61 | """ 62 | ) 63 | ) 64 | result = pytester.runpytest() 65 | result.assert_outcomes(passed=1) 66 | -------------------------------------------------------------------------------- /src/pytest_bdd/exceptions.py: -------------------------------------------------------------------------------- 1 | """pytest-bdd Exceptions.""" 2 | 3 | from __future__ import annotations 4 | 5 | 6 | class StepImplementationError(Exception): 7 | """Step implementation error.""" 8 | 9 | 10 | class ScenarioIsDecoratorOnly(Exception): 11 | """Scenario can be only used as decorator.""" 12 | 13 | 14 | class ScenarioValidationError(Exception): 15 | """Base class for scenario validation.""" 16 | 17 | 18 | class ScenarioNotFound(ScenarioValidationError): 19 | """Scenario Not Found.""" 20 | 21 | 22 | class StepDefinitionNotFoundError(Exception): 23 | """Step definition not found.""" 24 | 25 | 26 | class NoScenariosFound(Exception): 27 | """No scenarios found.""" 28 | 29 | 30 | class GherkinParseError(Exception): 31 | """Base class for all Gherkin parsing errors.""" 32 | 33 | def __init__(self, message: str, line: int, line_content: str, filename: str) -> None: 34 | super().__init__(message) 35 | self.message = message 36 | self.line = line 37 | self.line_content = line_content 38 | self.filename = filename 39 | 40 | def __str__(self) -> str: 41 | return f"{self.message}\nLine number: {self.line}\nLine: {self.line_content}\nFile: {self.filename}" 42 | 43 | 44 | class FeatureError(GherkinParseError): 45 | pass 46 | 47 | 48 | class BackgroundError(GherkinParseError): 49 | pass 50 | 51 | 52 | class ScenarioError(GherkinParseError): 53 | pass 54 | 55 | 56 | class StepError(GherkinParseError): 57 | pass 58 | 59 | 60 | class RuleError(GherkinParseError): 61 | pass 62 | 63 | 64 | class TokenError(GherkinParseError): 65 | pass 66 | -------------------------------------------------------------------------------- /tests/feature/test_wrong.py: -------------------------------------------------------------------------------- 1 | """Test wrong feature syntax.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_multiple_features_single_file(pytester): 9 | """Test validation error when multiple features are placed in a single file.""" 10 | pytester.makefile( 11 | ".feature", 12 | wrong=textwrap.dedent( 13 | """\ 14 | Feature: Feature One 15 | 16 | Background: 17 | Given I have A 18 | And I have B 19 | 20 | Scenario: Do something with A 21 | When I do something with A 22 | Then something about B 23 | 24 | Feature: Feature Two 25 | 26 | Background: 27 | Given I have A 28 | 29 | Scenario: Something that just needs A 30 | When I do something else with A 31 | Then something else about B 32 | 33 | Scenario: Something that needs B again 34 | Given I have B 35 | When I do something else with B 36 | Then something else about A and B 37 | """ 38 | ), 39 | ) 40 | pytester.makepyfile( 41 | textwrap.dedent( 42 | """\ 43 | import pytest 44 | from pytest_bdd import then, scenario 45 | 46 | @scenario("wrong.feature", "Do something with A") 47 | def test_wrong(): 48 | pass 49 | 50 | """ 51 | ) 52 | ) 53 | result = pytester.runpytest() 54 | result.assert_outcomes(errors=1) 55 | result.stdout.fnmatch_lines("*FeatureError: Multiple features are not allowed in a single feature file.*") 56 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{3.9,3.10,3.11}-pytest{7.0,7.1,7.2,7.3,7.4,8.0,8.1,8.2,8.3,latest}-gherkin_official{29,30,31,32,33,34,35,36,37,latest}-coverage 3 | py{3.12,3.13,3.14}-pytest{7.3,7.4,8.0,8.1,8.2,8.3,latest}-gherkin_official{29,30,31,32,33,34,35,36,37,latest}-coverage 4 | py3.12-pytestlatest-xdist-coverage 5 | mypy 6 | 7 | [testenv] 8 | parallel_show_output = true 9 | setenv = 10 | coverage: _PYTEST_CMD=coverage run -m pytest 11 | xdist: _PYTEST_MORE_ARGS=-n3 -rfsxX 12 | deps = 13 | gherkin_officiallatest: gherkin-official 14 | gherkin_official37: gherkin-official~=37.0.0 15 | gherkin_official36: gherkin-official~=36.0.0 16 | gherkin_official35: gherkin-official~=35.0.0 17 | gherkin_official34: gherkin-official~=34.0.0 18 | gherkin_official33: gherkin-official~=33.0.0 19 | gherkin_official32: gherkin-official~=32.0.0 20 | gherkin_official31: gherkin-official~=31.0.0 21 | gherkin_official30: gherkin-official~=30.0.0 22 | gherkin_official29: gherkin-official~=29.0.0 23 | 24 | pytestlatest: pytest 25 | pytest8.1: pytest~=8.1.0 26 | pytest8.0: pytest~=8.0.0 27 | pytest7.4: pytest~=7.4.0 28 | pytest7.3: pytest~=7.3.0 29 | pytest7.2: pytest~=7.2.0 30 | pytest7.1: pytest~=7.1.0 31 | pytest7.0: pytest~=7.0.0 32 | 33 | coverage: coverage[toml] 34 | xdist: pytest-xdist 35 | commands = {env:_PYTEST_CMD:pytest} {env:_PYTEST_MORE_ARGS:} {posargs:-vvl} 36 | 37 | [testenv:mypy] 38 | deps = 39 | poetry==2.0.0 40 | poetry-plugin-export 41 | allowlist_externals = sh 42 | commands_pre = 43 | sh -c "\ 44 | poetry export --only=dev --format requirements.txt > requirements.txt && \ 45 | pip install -r requirements.txt && \ 46 | :" 47 | commands = mypy 48 | -------------------------------------------------------------------------------- /tests/feature/test_alias.py: -------------------------------------------------------------------------------- 1 | """Test step alias when decorated multiple times.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_step_alias(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | alias=textwrap.dedent( 12 | """\ 13 | Feature: Step aliases 14 | Scenario: Multiple step aliases 15 | Given I have an empty list 16 | And I have foo (which is 1) in my list 17 | # Alias of the "I have foo (which is 1) in my list" 18 | And I have bar (alias of foo) in my list 19 | 20 | When I do crash (which is 2) 21 | And I do boom (alias of crash) 22 | Then my list should be [1, 1, 2, 2] 23 | """ 24 | ), 25 | ) 26 | 27 | pytester.makepyfile( 28 | textwrap.dedent( 29 | """\ 30 | import pytest 31 | from pytest_bdd import given, when, then, scenario 32 | 33 | @scenario("alias.feature", "Multiple step aliases") 34 | def test_alias(): 35 | pass 36 | 37 | 38 | @given("I have an empty list", target_fixture="results") 39 | def _(): 40 | return [] 41 | 42 | 43 | @given("I have foo (which is 1) in my list") 44 | @given("I have bar (alias of foo) in my list") 45 | def _(results): 46 | results.append(1) 47 | 48 | 49 | @when("I do crash (which is 2)") 50 | @when("I do boom (alias of crash)") 51 | def _(results): 52 | results.append(2) 53 | 54 | 55 | @then("my list should be [1, 1, 2, 2]") 56 | def _(results): 57 | assert results == [1, 1, 2, 2] 58 | """ 59 | ) 60 | ) 61 | result = pytester.runpytest() 62 | result.assert_outcomes(passed=1) 63 | -------------------------------------------------------------------------------- /tests/feature/test_outline_empty_values.py: -------------------------------------------------------------------------------- 1 | """Scenario Outline with empty example values tests.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | from pytest_bdd.utils import collect_dumped_objects 8 | 9 | STEPS = """\ 10 | from pytest_bdd import given, when, then, parsers 11 | from pytest_bdd.utils import dump_obj 12 | 13 | # Using `parsers.re` so that we can match empty values 14 | 15 | @given(parsers.re("there are (?P.*?) cucumbers")) 16 | def _(start): 17 | dump_obj(start) 18 | 19 | 20 | @when(parsers.re("I eat (?P.*?) cucumbers")) 21 | def _(eat): 22 | dump_obj(eat) 23 | 24 | 25 | @then(parsers.re("I should have (?P.*?) cucumbers")) 26 | def _(left): 27 | dump_obj(left) 28 | 29 | """ 30 | 31 | 32 | def test_scenario_with_empty_example_values(pytester): 33 | pytester.makefile( 34 | ".feature", 35 | outline=textwrap.dedent( 36 | """\ 37 | Feature: Outline 38 | Scenario Outline: Outlined with empty example values 39 | Given there are cucumbers 40 | When I eat cucumbers 41 | Then I should have cucumbers 42 | 43 | Examples: 44 | | start | eat | left | 45 | | # | | | 46 | """ 47 | ), 48 | ) 49 | pytester.makeconftest(textwrap.dedent(STEPS)) 50 | 51 | pytester.makepyfile( 52 | textwrap.dedent( 53 | """\ 54 | from pytest_bdd.utils import dump_obj 55 | from pytest_bdd import scenario 56 | import json 57 | 58 | @scenario("outline.feature", "Outlined with empty example values") 59 | def test_outline(): 60 | pass 61 | """ 62 | ) 63 | ) 64 | result = pytester.runpytest("-s") 65 | result.assert_outcomes(passed=1) 66 | assert collect_dumped_objects(result) == ["#", "", ""] 67 | -------------------------------------------------------------------------------- /tests/feature/test_description.py: -------------------------------------------------------------------------------- 1 | """Test descriptions.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_description(pytester): 9 | """Test description for the feature.""" 10 | pytester.makefile( 11 | ".feature", 12 | description=textwrap.dedent( 13 | """\ 14 | Feature: Description 15 | 16 | In order to achieve something 17 | I want something 18 | Because it will be cool 19 | 20 | 21 | Some description goes here. 22 | 23 | Scenario: Description 24 | Also, the scenario can have a description. 25 | 26 | It goes here between the scenario name 27 | and the first step. 28 | Given I have a bar 29 | """ 30 | ), 31 | ) 32 | 33 | pytester.makepyfile( 34 | textwrap.dedent( 35 | r''' 36 | import textwrap 37 | from pytest_bdd import given, scenario 38 | from pytest_bdd.scenario import scenario_wrapper_template_registry 39 | 40 | @scenario("description.feature", "Description") 41 | def test_description(): 42 | pass 43 | 44 | 45 | @given("I have a bar") 46 | def _(): 47 | return "bar" 48 | 49 | def test_feature_description(): 50 | scenario = scenario_wrapper_template_registry[test_description] 51 | assert scenario.feature.description == textwrap.dedent( 52 | "In order to achieve something\nI want something\nBecause it will be cool\n\n\nSome description goes here." 53 | ) 54 | 55 | def test_scenario_description(): 56 | scenario = scenario_wrapper_template_registry[test_description] 57 | assert scenario.description == textwrap.dedent( 58 | "Also, the scenario can have a description.\n\nIt goes here between the scenario name\nand the first step.""" 59 | ) 60 | ''' 61 | ) 62 | ) 63 | 64 | result = pytester.runpytest() 65 | result.assert_outcomes(passed=3) 66 | -------------------------------------------------------------------------------- /src/pytest_bdd/hooks.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Callable 4 | 5 | import pytest 6 | from _pytest.fixtures import FixtureRequest 7 | 8 | from pytest_bdd.parser import Feature, Scenario, Step 9 | 10 | """Pytest-bdd pytest hooks.""" 11 | 12 | 13 | def pytest_bdd_before_scenario(request: FixtureRequest, feature: Feature, scenario: Scenario) -> object: 14 | """Called before scenario is executed.""" 15 | 16 | 17 | def pytest_bdd_after_scenario(request: FixtureRequest, feature: Feature, scenario: Scenario) -> object: 18 | """Called after scenario is executed.""" 19 | 20 | 21 | def pytest_bdd_before_step( 22 | request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., object] 23 | ) -> object: 24 | """Called before step function is set up.""" 25 | 26 | 27 | def pytest_bdd_before_step_call( 28 | request: FixtureRequest, 29 | feature: Feature, 30 | scenario: Scenario, 31 | step: Step, 32 | step_func: Callable[..., object], 33 | step_func_args: dict[str, object], 34 | ) -> object: 35 | """Called before step function is executed.""" 36 | 37 | 38 | def pytest_bdd_after_step( 39 | request: FixtureRequest, 40 | feature: Feature, 41 | scenario: Scenario, 42 | step: Step, 43 | step_func: Callable[..., object], 44 | step_func_args: dict[str, object], 45 | ) -> object: 46 | """Called after step function is successfully executed.""" 47 | 48 | 49 | def pytest_bdd_step_error( 50 | request: FixtureRequest, 51 | feature: Feature, 52 | scenario: Scenario, 53 | step: Step, 54 | step_func: Callable[..., object], 55 | step_func_args: dict[str, object], 56 | exception: Exception, 57 | ) -> object: 58 | """Called when step function failed to execute.""" 59 | 60 | 61 | def pytest_bdd_step_func_lookup_error( 62 | request: FixtureRequest, feature: Feature, scenario: Scenario, step: Step, exception: Exception 63 | ) -> object: 64 | """Called when step lookup failed.""" 65 | 66 | 67 | @pytest.hookspec(firstresult=True) 68 | def pytest_bdd_apply_tag(tag: str, function: Callable[..., object]) -> object: 69 | """Apply a tag (from a ``.feature`` file) to the given scenario. 70 | 71 | The default implementation does the equivalent of 72 | ``getattr(pytest.mark, tag)(function)``, but you can override this hook and 73 | return ``True`` to do more sophisticated handling of tags. 74 | """ 75 | -------------------------------------------------------------------------------- /tests/scripts/test_main.py: -------------------------------------------------------------------------------- 1 | """Main command.""" 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | import sys 7 | import textwrap 8 | 9 | from pytest_bdd.scripts import main 10 | 11 | PATH = os.path.dirname(__file__) 12 | 13 | 14 | def test_main(monkeypatch, capsys): 15 | """Test if main command shows help when called without the subcommand.""" 16 | monkeypatch.setattr(sys, "argv", ["pytest-bdd"]) 17 | monkeypatch.setattr(sys, "exit", lambda x: x) 18 | main() 19 | out, err = capsys.readouterr() 20 | assert "usage: pytest-bdd [-h]" in err 21 | assert "pytest-bdd: error:" in err 22 | 23 | 24 | def test_step_definitions_found_using_main(pytester): 25 | """Issue 173: Ensure step definitions are found when using pytest.main.""" 26 | pytester.makefile( 27 | ".feature", 28 | outline=textwrap.dedent( 29 | """\ 30 | Feature: Outlined Scenarios 31 | 32 | Scenario Outline: Outlined given, when, then 33 | Given there are cucumbers 34 | When I eat cucumbers 35 | Then I should have cucumbers 36 | 37 | Examples: 38 | | start | eat | left | 39 | | 12 | 5 | 7 | 40 | """ 41 | ), 42 | ) 43 | 44 | pytester.makepyfile( 45 | textwrap.dedent( 46 | """\ 47 | from pytest_bdd import given, when, then, parsers, scenarios 48 | 49 | scenarios(".") 50 | 51 | @given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") 52 | def _(start): 53 | assert isinstance(start, int) 54 | return {"start": start} 55 | 56 | 57 | @when(parsers.parse("I eat {eat:g} cucumbers")) 58 | def _(cucumbers, eat): 59 | assert isinstance(eat, float) 60 | cucumbers["eat"] = eat 61 | 62 | 63 | @then(parsers.parse("I should have {left} cucumbers")) 64 | def _(cucumbers, left): 65 | assert isinstance(left, str) 66 | assert cucumbers["start"] - cucumbers["eat"] == int(left) 67 | """ 68 | ) 69 | ) 70 | 71 | pytester.makepyfile( 72 | main=textwrap.dedent( 73 | """\ 74 | import pytest 75 | import os 76 | 77 | # Programmatically run pytest 78 | if __name__ == "__main__": 79 | pytest.main([os.path.abspath("test_step_definitions_found_using_main.py")]) 80 | """ 81 | ) 82 | ) 83 | 84 | result = pytester.runpython(pytester.path / "main.py") 85 | result.assert_outcomes(passed=1, failed=0) 86 | -------------------------------------------------------------------------------- /src/pytest_bdd/scripts.py: -------------------------------------------------------------------------------- 1 | """pytest-bdd scripts.""" 2 | 3 | from __future__ import annotations 4 | 5 | import argparse 6 | import glob 7 | import os.path 8 | import re 9 | 10 | from .generation import generate_code, parse_feature_files 11 | 12 | MIGRATE_REGEX = re.compile(r"\s?(\w+)\s=\sscenario\((.+)\)", flags=re.MULTILINE) 13 | 14 | 15 | def migrate_tests(args: argparse.Namespace) -> None: 16 | """Migrate outdated tests to the most recent form.""" 17 | path = args.path 18 | for file_path in glob.iglob(os.path.join(os.path.abspath(path), "**", "*.py"), recursive=True): 19 | migrate_tests_in_file(file_path) 20 | 21 | 22 | def migrate_tests_in_file(file_path: str) -> None: 23 | """Migrate all bdd-based tests in the given test file.""" 24 | try: 25 | with open(file_path, "r+") as fd: 26 | content = fd.read() 27 | new_content = MIGRATE_REGEX.sub(r"\n@scenario(\2)\ndef \1():\n pass\n", content) 28 | if new_content != content: 29 | # the regex above potentially causes the end of the file to 30 | # have an extra newline 31 | new_content = new_content.rstrip("\n") + "\n" 32 | fd.seek(0) 33 | fd.write(new_content) 34 | print(f"migrated: {file_path}") 35 | else: 36 | print(f"skipped: {file_path}") 37 | except OSError: 38 | pass 39 | 40 | 41 | def check_existense(file_name: str) -> str: 42 | """Check file or directory name for existence.""" 43 | if not os.path.exists(file_name): 44 | raise argparse.ArgumentTypeError(f"{file_name} is an invalid file or directory name") 45 | return file_name 46 | 47 | 48 | def print_generated_code(args: argparse.Namespace) -> None: 49 | """Print generated test code for the given filenames.""" 50 | features, scenarios, steps = parse_feature_files(args.files) 51 | code = generate_code(features, scenarios, steps) 52 | print(code) 53 | 54 | 55 | def main() -> None: 56 | """Main entry point.""" 57 | parser = argparse.ArgumentParser(prog="pytest-bdd") 58 | subparsers = parser.add_subparsers(help="sub-command help", dest="command") 59 | subparsers.required = True 60 | parser_generate = subparsers.add_parser("generate", help="generate help") 61 | parser_generate.add_argument( 62 | "files", 63 | metavar="FEATURE_FILE", 64 | type=check_existense, 65 | nargs="+", 66 | help="Feature files to generate test code with", 67 | ) 68 | parser_generate.set_defaults(func=print_generated_code) 69 | 70 | parser_migrate = subparsers.add_parser("migrate", help="migrate help") 71 | parser_migrate.add_argument("path", metavar="PATH", help="Migrate outdated tests to the most recent form") 72 | parser_migrate.set_defaults(func=migrate_tests) 73 | 74 | args = parser.parse_args() 75 | if hasattr(args, "func"): 76 | args.func(args) 77 | -------------------------------------------------------------------------------- /tests/feature/test_scenarios.py: -------------------------------------------------------------------------------- 1 | """Test scenarios shortcut.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_scenarios(pytester, pytest_params): 9 | """Test scenarios shortcut (used together with @scenario for individual test override).""" 10 | pytester.makeini( 11 | """ 12 | [pytest] 13 | console_output_style=classic 14 | """ 15 | ) 16 | pytester.makeconftest( 17 | """ 18 | import pytest 19 | from pytest_bdd import given 20 | 21 | @given('I have a bar') 22 | def _(): 23 | print('bar!') 24 | return 'bar' 25 | """ 26 | ) 27 | features = pytester.mkdir("features") 28 | features.joinpath("test.feature").write_text( 29 | textwrap.dedent( 30 | """ 31 | Feature: Test scenarios 32 | Scenario: Test scenario 33 | Given I have a bar 34 | """ 35 | ), 36 | "utf-8", 37 | ) 38 | subfolder = features.joinpath("subfolder") 39 | subfolder.mkdir() 40 | subfolder.joinpath("test.feature").write_text( 41 | textwrap.dedent( 42 | """ 43 | Feature: Test scenarios 44 | Scenario: Test subfolder scenario 45 | Given I have a bar 46 | 47 | Scenario: Test failing subfolder scenario 48 | Given I have a failing bar 49 | 50 | Scenario: Test already bound scenario 51 | Given I have a bar 52 | 53 | Scenario: Test scenario 54 | Given I have a bar 55 | """ 56 | ), 57 | "utf-8", 58 | ) 59 | pytester.makepyfile( 60 | """ 61 | import pytest 62 | from pytest_bdd import scenarios, scenario 63 | 64 | @scenario('features/subfolder/test.feature', 'Test already bound scenario') 65 | def test_already_bound(): 66 | pass 67 | 68 | scenarios('features') 69 | """ 70 | ) 71 | result = pytester.runpytest_subprocess("-v", "-s", *pytest_params) 72 | result.assert_outcomes(passed=4, failed=1) 73 | result.stdout.fnmatch_lines(["*collected 5 items"]) 74 | result.stdout.fnmatch_lines(["*test_test_subfolder_scenario *bar!", "PASSED"]) 75 | result.stdout.fnmatch_lines(["*test_test_scenario *bar!", "PASSED"]) 76 | result.stdout.fnmatch_lines(["*test_test_failing_subfolder_scenario *FAILED"]) 77 | result.stdout.fnmatch_lines(["*test_already_bound *bar!", "PASSED"]) 78 | result.stdout.fnmatch_lines(["*test_test_scenario_1 *bar!", "PASSED"]) 79 | 80 | 81 | def test_scenarios_none_found(pytester, pytest_params): 82 | """Test scenarios shortcut when no scenarios found.""" 83 | testpath = pytester.makepyfile( 84 | """ 85 | import pytest 86 | from pytest_bdd import scenarios 87 | 88 | scenarios('.') 89 | """ 90 | ) 91 | result = pytester.runpytest_subprocess(testpath, *pytest_params) 92 | result.assert_outcomes(errors=1) 93 | result.stdout.fnmatch_lines(["*NoScenariosFound*"]) 94 | -------------------------------------------------------------------------------- /tests/feature/test_background.py: -------------------------------------------------------------------------------- 1 | """Test feature background.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | FEATURE = '''\ 8 | Feature: Background support 9 | 10 | Background: 11 | Given foo has a value "bar" 12 | And a background step with docstring: 13 | """ 14 | one 15 | two 16 | """ 17 | 18 | 19 | Scenario: Basic usage 20 | Then foo should have value "bar" 21 | 22 | Scenario: Background steps are executed first 23 | Given foo has no value "bar" 24 | And foo has a value "dummy" 25 | 26 | Then foo should have value "dummy" 27 | And foo should not have value "bar" 28 | ''' 29 | 30 | STEPS = r"""\ 31 | import re 32 | import pytest 33 | from pytest_bdd import given, then, parsers 34 | 35 | @pytest.fixture 36 | def foo(): 37 | return {} 38 | 39 | 40 | @given("a background step with docstring:") 41 | def _(foo, docstring): 42 | assert docstring == "one\ntwo" 43 | 44 | 45 | @given('foo has a value "bar"') 46 | def _(foo): 47 | foo["bar"] = "bar" 48 | return foo["bar"] 49 | 50 | 51 | @given('foo has a value "dummy"') 52 | def _(foo): 53 | foo["dummy"] = "dummy" 54 | return foo["dummy"] 55 | 56 | 57 | @given('foo has no value "bar"') 58 | def _(foo): 59 | assert foo["bar"] 60 | del foo["bar"] 61 | 62 | 63 | @then('foo should have value "bar"') 64 | def _(foo): 65 | assert foo["bar"] == "bar" 66 | 67 | 68 | @then('foo should have value "dummy"') 69 | def _(foo): 70 | assert foo["dummy"] == "dummy" 71 | 72 | 73 | @then('foo should not have value "bar"') 74 | def _(foo): 75 | assert "bar" not in foo 76 | 77 | """ 78 | 79 | 80 | def test_background_basic(pytester): 81 | """Test feature background.""" 82 | pytester.makefile(".feature", background=textwrap.dedent(FEATURE)) 83 | 84 | pytester.makeconftest(textwrap.dedent(STEPS)) 85 | 86 | pytester.makepyfile( 87 | textwrap.dedent( 88 | """\ 89 | from pytest_bdd import scenario 90 | 91 | @scenario("background.feature", "Basic usage") 92 | def test_background(): 93 | pass 94 | 95 | """ 96 | ) 97 | ) 98 | result = pytester.runpytest() 99 | result.assert_outcomes(passed=1) 100 | 101 | 102 | def test_background_check_order(pytester): 103 | """Test feature background to ensure that background steps are executed first.""" 104 | 105 | pytester.makefile(".feature", background=textwrap.dedent(FEATURE)) 106 | 107 | pytester.makeconftest(textwrap.dedent(STEPS)) 108 | 109 | pytester.makepyfile( 110 | textwrap.dedent( 111 | """\ 112 | from pytest_bdd import scenario 113 | 114 | @scenario("background.feature", "Background steps are executed first") 115 | def test_background(): 116 | pass 117 | 118 | """ 119 | ) 120 | ) 121 | result = pytester.runpytest() 122 | result.assert_outcomes(passed=1) 123 | -------------------------------------------------------------------------------- /src/pytest_bdd/feature.py: -------------------------------------------------------------------------------- 1 | """Feature. 2 | 3 | The way of describing the behavior is based on Gherkin language, but a very 4 | limited version. It doesn't support any parameter tables. 5 | If the parametrization is needed to generate more test cases it can be done 6 | on the fixture level of the pytest. 7 | The syntax can be used here to make a connection between steps and 8 | it will also validate the parameters mentioned in the steps with ones 9 | provided in the pytest parametrization table. 10 | 11 | Syntax example: 12 | 13 | Feature: Articles 14 | Scenario: Publishing the article 15 | Given I'm an author user 16 | And I have an article 17 | When I go to the article page 18 | And I press the publish button 19 | Then I should not see the error message 20 | And the article should be published # Note: will query the database 21 | 22 | :note: The "#" symbol is used for comments. 23 | :note: There are no multiline steps, the description of the step must fit in 24 | one line. 25 | """ 26 | 27 | from __future__ import annotations 28 | 29 | import glob 30 | import os.path 31 | from collections.abc import Iterable 32 | 33 | from .parser import Feature, FeatureParser 34 | 35 | # Global features dictionary 36 | features: dict[str, Feature] = {} 37 | 38 | 39 | def get_feature(base_path: str, filename: str, encoding: str = "utf-8") -> Feature: 40 | """Get a feature by the filename. 41 | 42 | :param str base_path: Base feature directory. 43 | :param str filename: Filename of the feature file. 44 | :param str encoding: Feature file encoding. 45 | 46 | :return: `Feature` instance from the parsed feature cache. 47 | 48 | :note: The features are parsed on the execution of the test and 49 | stored in the global variable cache to improve the performance 50 | when multiple scenarios are referencing the same file. 51 | """ 52 | __tracebackhide__ = True 53 | full_name = os.path.abspath(os.path.join(base_path, filename)) 54 | feature = features.get(full_name) 55 | if not feature: 56 | feature = FeatureParser(base_path, filename, encoding).parse() 57 | features[full_name] = feature 58 | return feature 59 | 60 | 61 | def get_features(paths: Iterable[str], encoding: str = "utf-8") -> list[Feature]: 62 | """Get features for given paths. 63 | 64 | :param list paths: `list` of paths (file or dirs) 65 | 66 | :return: `list` of `Feature` objects. 67 | """ 68 | seen_names = set() 69 | _features = [] 70 | for path in paths: 71 | if path not in seen_names: 72 | seen_names.add(path) 73 | if os.path.isdir(path): 74 | file_paths = list(glob.iglob(os.path.join(path, "**", "*.feature"), recursive=True)) 75 | _features.extend(get_features(file_paths, encoding=encoding)) 76 | else: 77 | base, name = os.path.split(path) 78 | feature = get_feature(base, name, encoding=encoding) 79 | _features.append(feature) 80 | _features.sort(key=lambda _feature: _feature.name or _feature.filename) 81 | return _features 82 | -------------------------------------------------------------------------------- /src/pytest_bdd/compat.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Sequence 4 | from importlib.metadata import version 5 | 6 | from _pytest.fixtures import FixtureDef, FixtureManager, FixtureRequest 7 | from _pytest.nodes import Node 8 | from packaging.version import parse as parse_version 9 | 10 | pytest_version = parse_version(version("pytest")) 11 | 12 | __all__ = ["getfixturedefs", "inject_fixture"] 13 | 14 | if pytest_version.release >= (8, 1): 15 | 16 | def getfixturedefs( 17 | fixturemanager: FixtureManager, fixturename: str, node: Node 18 | ) -> Sequence[FixtureDef[object]] | None: 19 | return fixturemanager.getfixturedefs(fixturename, node) 20 | 21 | def inject_fixture(request: FixtureRequest, arg: str, value: object) -> None: 22 | """Inject fixture into pytest fixture request. 23 | 24 | :param request: pytest fixture request 25 | :param arg: argument name 26 | :param value: argument value 27 | """ 28 | # Ensure there's a fixture definition for the argument 29 | request._fixturemanager._register_fixture( 30 | name=arg, 31 | func=lambda: value, 32 | nodeid=request.node.nodeid, 33 | ) 34 | # Note the fixture we just registered will have a lower priority 35 | # if there was already one registered, so we need to force its value 36 | # to the one we want to inject. 37 | fixture_def = request._get_active_fixturedef(arg) 38 | fixture_def.cached_result = (value, None, None) # type: ignore 39 | 40 | else: 41 | 42 | def getfixturedefs( 43 | fixturemanager: FixtureManager, fixturename: str, node: Node 44 | ) -> Sequence[FixtureDef[object]] | None: 45 | return fixturemanager.getfixturedefs(fixturename, node.nodeid) # type: ignore 46 | 47 | def inject_fixture(request: FixtureRequest, arg: str, value: object) -> None: 48 | """Inject fixture into pytest fixture request. 49 | 50 | :param request: pytest fixture request 51 | :param arg: argument name 52 | :param value: argument value 53 | """ 54 | fd = FixtureDef( 55 | fixturemanager=request._fixturemanager, # type: ignore 56 | baseid=None, 57 | argname=arg, 58 | func=lambda: value, 59 | scope="function", 60 | params=None, 61 | ) 62 | fd.cached_result = (value, 0, None) 63 | 64 | old_fd = request._fixture_defs.get(arg) 65 | add_fixturename = arg not in request.fixturenames 66 | 67 | def fin() -> None: 68 | request._fixturemanager._arg2fixturedefs[arg].remove(fd) 69 | 70 | if old_fd is not None: 71 | request._fixture_defs[arg] = old_fd 72 | 73 | if add_fixturename: 74 | request._pyfuncitem._fixtureinfo.names_closure.remove(arg) 75 | 76 | request.addfinalizer(fin) 77 | 78 | # inject fixture definition 79 | request._fixturemanager._arg2fixturedefs.setdefault(arg, []).append(fd) 80 | 81 | # inject fixture value in request cache 82 | request._fixture_defs[arg] = fd 83 | if add_fixturename: 84 | request._pyfuncitem._fixtureinfo.names_closure.append(arg) 85 | -------------------------------------------------------------------------------- /tests/args/parse/test_args.py: -------------------------------------------------------------------------------- 1 | """Step arguments tests.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_every_steps_takes_param_with_the_same_name(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | arguments=textwrap.dedent( 12 | """\ 13 | Feature: Step arguments 14 | Scenario: Every step takes a parameter with the same name 15 | Given I have 1 Euro 16 | When I pay 2 Euro 17 | And I pay 1 Euro 18 | Then I should have 0 Euro 19 | And I should have 999999 Euro 20 | 21 | """ 22 | ), 23 | ) 24 | 25 | pytester.makepyfile( 26 | textwrap.dedent( 27 | """\ 28 | import pytest 29 | from pytest_bdd import parsers, given, when, then, scenario 30 | 31 | @scenario("arguments.feature", "Every step takes a parameter with the same name") 32 | def test_arguments(): 33 | pass 34 | 35 | @pytest.fixture 36 | def values(): 37 | return [1, 2, 1, 0, 999999] 38 | 39 | 40 | @given(parsers.parse("I have {euro:d} Euro")) 41 | def _(euro, values): 42 | assert euro == values.pop(0) 43 | 44 | 45 | @when(parsers.parse("I pay {euro:d} Euro")) 46 | def _(euro, values, request): 47 | assert euro == values.pop(0) 48 | 49 | 50 | @then(parsers.parse("I should have {euro:d} Euro")) 51 | def _(euro, values): 52 | assert euro == values.pop(0) 53 | 54 | """ 55 | ) 56 | ) 57 | result = pytester.runpytest() 58 | result.assert_outcomes(passed=1) 59 | 60 | 61 | def test_argument_in_when_step_1(pytester): 62 | pytester.makefile( 63 | ".feature", 64 | arguments=textwrap.dedent( 65 | """\ 66 | Feature: Step arguments 67 | Scenario: Argument in when 68 | Given I have an argument 1 69 | When I get argument 5 70 | Then My argument should be 5 71 | """ 72 | ), 73 | ) 74 | 75 | pytester.makepyfile( 76 | textwrap.dedent( 77 | """\ 78 | import pytest 79 | from pytest_bdd import parsers, given, when, then, scenario 80 | 81 | @pytest.fixture 82 | def arguments(): 83 | return dict() 84 | 85 | 86 | @scenario("arguments.feature", "Argument in when") 87 | def test_arguments(): 88 | pass 89 | 90 | 91 | @given(parsers.parse("I have an argument {arg:Number}", extra_types=dict(Number=int))) 92 | def _(arguments, arg): 93 | arguments["arg"] = arg 94 | 95 | 96 | @when(parsers.parse("I get argument {arg:d}")) 97 | def _(arguments, arg): 98 | arguments["arg"] = arg 99 | 100 | 101 | @then(parsers.parse("My argument should be {arg:d}")) 102 | def _(arguments, arg): 103 | assert arguments["arg"] == arg 104 | 105 | """ 106 | ) 107 | ) 108 | result = pytester.runpytest() 109 | result.assert_outcomes(passed=1) 110 | -------------------------------------------------------------------------------- /tests/steps/test_unicode.py: -------------------------------------------------------------------------------- 1 | """Tests for testing cases when we have unicode in feature file.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_steps_in_feature_file_have_unicode(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | unicode=textwrap.dedent( 12 | """\ 13 | Feature: Юнікодні символи 14 | 15 | Scenario: Кроки в .feature файлі містять юнікод 16 | Given у мене є рядок який містить 'якийсь контент' 17 | Then I should see that the string equals to content 'якийсь контент' 18 | 19 | Scenario: Given names have unicode types 20 | Given I have an alias with a unicode type for foo 21 | Then foo should be "foo" 22 | """ 23 | ), 24 | ) 25 | 26 | pytester.makepyfile( 27 | textwrap.dedent( 28 | """\ 29 | import sys 30 | import pytest 31 | from pytest_bdd import parsers, given, then, scenario 32 | 33 | @scenario("unicode.feature", "Кроки в .feature файлі містять юнікод") 34 | def test_unicode(): 35 | pass 36 | 37 | @pytest.fixture 38 | def string(): 39 | return {"content": ""} 40 | 41 | 42 | @given(parsers.parse(u"у мене є рядок який містить '{content}'")) 43 | def _(content, string): 44 | string["content"] = content 45 | 46 | 47 | given(u"I have an alias with a unicode type for foo", target_fixture="foo") 48 | 49 | 50 | @then(parsers.parse("I should see that the string equals to content '{content}'")) 51 | def _(content, string): 52 | assert string["content"] == content 53 | """ 54 | ) 55 | ) 56 | result = pytester.runpytest() 57 | result.assert_outcomes(passed=1) 58 | 59 | 60 | def test_steps_in_py_file_have_unicode(pytester): 61 | pytester.makefile( 62 | ".feature", 63 | unicode=textwrap.dedent( 64 | """\ 65 | Feature: Юнікодні символи 66 | 67 | Scenario: Steps in .py file have unicode 68 | Given there is an other string with content 'якийсь контент' 69 | Then I should see that the other string equals to content 'якийсь контент' 70 | """ 71 | ), 72 | ) 73 | 74 | pytester.makepyfile( 75 | textwrap.dedent( 76 | """\ 77 | import pytest 78 | from pytest_bdd import given, then, scenario 79 | 80 | @scenario("unicode.feature", "Steps in .py file have unicode") 81 | def test_unicode(): 82 | pass 83 | 84 | @pytest.fixture 85 | def string(): 86 | return {"content": ""} 87 | 88 | 89 | @given("there is an other string with content 'якийсь контент'") 90 | def _(string): 91 | string["content"] = u"с каким-то контентом" 92 | 93 | @then("I should see that the other string equals to content 'якийсь контент'") 94 | def _(string): 95 | assert string["content"] == u"с каким-то контентом" 96 | 97 | """ 98 | ) 99 | ) 100 | result = pytester.runpytest() 101 | result.assert_outcomes(passed=1) 102 | -------------------------------------------------------------------------------- /tests/args/cfparse/test_args.py: -------------------------------------------------------------------------------- 1 | """Step arguments tests.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_every_step_takes_param_with_the_same_name(pytester): 9 | """Test every step takes param with the same name.""" 10 | pytester.makefile( 11 | ".feature", 12 | arguments=textwrap.dedent( 13 | """\ 14 | Feature: Step arguments 15 | Scenario: Every step takes a parameter with the same name 16 | Given I have 1 Euro 17 | When I pay 2 Euro 18 | And I pay 1 Euro 19 | Then I should have 0 Euro 20 | And I should have 999999 Euro 21 | 22 | """ 23 | ), 24 | ) 25 | 26 | pytester.makepyfile( 27 | textwrap.dedent( 28 | """\ 29 | import pytest 30 | from pytest_bdd import parsers, given, when, then, scenario 31 | 32 | @scenario("arguments.feature", "Every step takes a parameter with the same name") 33 | def test_arguments(): 34 | pass 35 | 36 | @pytest.fixture 37 | def values(): 38 | return [1, 2, 1, 0, 999999] 39 | 40 | 41 | @given(parsers.cfparse("I have {euro:d} Euro")) 42 | def _(euro, values): 43 | assert euro == values.pop(0) 44 | 45 | 46 | @when(parsers.cfparse("I pay {euro:d} Euro")) 47 | def _(euro, values, request): 48 | assert euro == values.pop(0) 49 | 50 | 51 | @then(parsers.cfparse("I should have {euro:d} Euro")) 52 | def _(euro, values): 53 | assert euro == values.pop(0) 54 | 55 | """ 56 | ) 57 | ) 58 | result = pytester.runpytest() 59 | result.assert_outcomes(passed=1) 60 | 61 | 62 | def test_argument_in_when(pytester): 63 | """Test step arguments in when steps.""" 64 | pytester.makefile( 65 | ".feature", 66 | arguments=textwrap.dedent( 67 | """\ 68 | Feature: Step arguments 69 | Scenario: Argument in when 70 | Given I have an argument 1 71 | When I get argument 5 72 | Then My argument should be 5 73 | """ 74 | ), 75 | ) 76 | 77 | pytester.makepyfile( 78 | textwrap.dedent( 79 | """\ 80 | import pytest 81 | from pytest_bdd import parsers, given, when, then, scenario 82 | 83 | @scenario("arguments.feature", "Argument in when") 84 | def test_arguments(): 85 | pass 86 | 87 | 88 | @pytest.fixture 89 | def arguments(): 90 | return dict() 91 | 92 | 93 | @given(parsers.cfparse("I have an argument {arg:Number}", extra_types=dict(Number=int))) 94 | def _(arguments, arg): 95 | arguments["arg"] = arg 96 | 97 | 98 | @when(parsers.cfparse("I get argument {arg:d}")) 99 | def _(arguments, arg): 100 | arguments["arg"] = arg 101 | 102 | 103 | @then(parsers.cfparse("My argument should be {arg:d}")) 104 | def _(arguments, arg): 105 | assert arguments["arg"] == arg 106 | 107 | """ 108 | ) 109 | ) 110 | result = pytester.runpytest() 111 | result.assert_outcomes(passed=1) 112 | -------------------------------------------------------------------------------- /src/pytest_bdd/parsers.py: -------------------------------------------------------------------------------- 1 | """Step parsers.""" 2 | 3 | from __future__ import annotations 4 | 5 | import abc 6 | import re as base_re 7 | from typing import Any, TypeVar, cast, overload 8 | 9 | import parse as base_parse 10 | from parse_type import cfparse as base_cfparse 11 | 12 | 13 | class StepParser(abc.ABC): 14 | """Parser of the individual step.""" 15 | 16 | def __init__(self, name: str) -> None: 17 | self.name = name 18 | 19 | @abc.abstractmethod 20 | def parse_arguments(self, name: str) -> dict[str, Any] | None: 21 | """Get step arguments from the given step name. 22 | 23 | :return: `dict` of step arguments 24 | """ 25 | ... 26 | 27 | @abc.abstractmethod 28 | def is_matching(self, name: str) -> bool: 29 | """Match given name with the step name.""" 30 | ... 31 | 32 | 33 | class re(StepParser): 34 | """Regex step parser.""" 35 | 36 | def __init__(self, name: str, *args: Any, **kwargs: Any) -> None: 37 | """Compile regex.""" 38 | super().__init__(name) 39 | self.regex = base_re.compile(self.name, *args, **kwargs) 40 | 41 | def parse_arguments(self, name: str) -> dict[str, str] | None: 42 | """Get step arguments. 43 | 44 | :return: `dict` of step arguments 45 | """ 46 | match = self.regex.fullmatch(name) 47 | if match is None: 48 | return None 49 | return match.groupdict() 50 | 51 | def is_matching(self, name: str) -> bool: 52 | """Match given name with the step name.""" 53 | return bool(self.regex.fullmatch(name)) 54 | 55 | 56 | class parse(StepParser): 57 | """parse step parser.""" 58 | 59 | def __init__(self, name: str, *args: Any, **kwargs: Any) -> None: 60 | """Compile parse expression.""" 61 | super().__init__(name) 62 | self.parser = base_parse.compile(self.name, *args, **kwargs) 63 | 64 | def parse_arguments(self, name: str) -> dict[str, Any]: 65 | """Get step arguments. 66 | 67 | :return: `dict` of step arguments 68 | """ 69 | return cast(dict[str, Any], self.parser.parse(name).named) 70 | 71 | def is_matching(self, name: str) -> bool: 72 | """Match given name with the step name.""" 73 | try: 74 | return bool(self.parser.parse(name)) 75 | except ValueError: 76 | return False 77 | 78 | 79 | class cfparse(parse): 80 | """cfparse step parser.""" 81 | 82 | def __init__(self, name: str, *args: Any, **kwargs: Any) -> None: 83 | """Compile parse expression.""" 84 | super(parse, self).__init__(name) 85 | self.parser = base_cfparse.Parser(self.name, *args, **kwargs) 86 | 87 | 88 | class string(StepParser): 89 | """Exact string step parser.""" 90 | 91 | def parse_arguments(self, name: str) -> dict: 92 | """No parameters are available for simple string step. 93 | 94 | :return: `dict` of step arguments 95 | """ 96 | return {} 97 | 98 | def is_matching(self, name: str) -> bool: 99 | """Match given name with the step name.""" 100 | return self.name == name 101 | 102 | 103 | TStepParser = TypeVar("TStepParser", bound=StepParser) 104 | 105 | 106 | @overload 107 | def get_parser(step_name: str) -> string: ... 108 | 109 | 110 | @overload 111 | def get_parser(step_name: TStepParser) -> TStepParser: ... 112 | 113 | 114 | def get_parser(step_name: str | StepParser) -> StepParser: 115 | """Get parser by given name.""" 116 | 117 | if isinstance(step_name, StepParser): 118 | return step_name 119 | 120 | return string(step_name) 121 | -------------------------------------------------------------------------------- /tests/args/test_common.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | from pytest_bdd.utils import collect_dumped_objects 6 | 7 | 8 | def test_reuse_same_step_different_converters(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | arguments=textwrap.dedent( 12 | """\ 13 | Feature: Reuse same step with different converters 14 | Scenario: Step function should be able to be decorated multiple times with different converters 15 | Given I have a foo with int value 42 16 | And I have a foo with str value 42 17 | And I have a foo with float value 42 18 | When pass 19 | Then pass 20 | """ 21 | ), 22 | ) 23 | 24 | pytester.makepyfile( 25 | textwrap.dedent( 26 | r""" 27 | import pytest 28 | from pytest_bdd import parsers, given, when, then, scenarios 29 | from pytest_bdd.utils import dump_obj 30 | 31 | scenarios("arguments.feature") 32 | 33 | @given(parsers.re(r"^I have a foo with int value (?P.*?)$"), converters={"value": int}) 34 | @given(parsers.re(r"^I have a foo with str value (?P.*?)$"), converters={"value": str}) 35 | @given(parsers.re(r"^I have a foo with float value (?P.*?)$"), converters={"value": float}) 36 | def _(value): 37 | dump_obj(value) 38 | return value 39 | 40 | 41 | @then("pass") 42 | @when("pass") 43 | def _(): 44 | pass 45 | """ 46 | ) 47 | ) 48 | result = pytester.runpytest("-s") 49 | result.assert_outcomes(passed=1) 50 | 51 | [int_value, str_value, float_value] = collect_dumped_objects(result) 52 | assert type(int_value) is int 53 | assert int_value == 42 54 | 55 | assert type(str_value) is str 56 | assert str_value == "42" 57 | 58 | assert type(float_value) is float 59 | assert float_value == 42.0 60 | 61 | 62 | def test_string_steps_dont_take_precedence(pytester): 63 | """Test that normal steps don't take precedence over the other steps.""" 64 | pytester.makefile( 65 | ".feature", 66 | arguments=textwrap.dedent( 67 | """\ 68 | Feature: Step precedence 69 | Scenario: String steps don't take precedence over other steps 70 | Given I have a foo with value 42 71 | When pass 72 | Then pass 73 | """ 74 | ), 75 | ) 76 | pytester.makeconftest( 77 | textwrap.dedent( 78 | """ 79 | from pytest_bdd import given, when, then, parsers 80 | from pytest_bdd.utils import dump_obj 81 | 82 | 83 | @given("I have a foo with value 42") 84 | def _(): 85 | dump_obj("str") 86 | return 42 87 | 88 | 89 | @then("pass") 90 | @when("pass") 91 | def _(): 92 | pass 93 | """ 94 | ) 95 | ) 96 | 97 | pytester.makepyfile( 98 | textwrap.dedent( 99 | r""" 100 | import pytest 101 | from pytest_bdd import parsers, given, when, then, scenarios 102 | from pytest_bdd.utils import dump_obj 103 | 104 | scenarios("arguments.feature") 105 | 106 | @given(parsers.re(r"^I have a foo with value (?P.*?)$")) 107 | def _(value): 108 | dump_obj("re") 109 | return 42 110 | 111 | """ 112 | ) 113 | ) 114 | result = pytester.runpytest("-s") 115 | result.assert_outcomes(passed=1) 116 | 117 | [which] = collect_dumped_objects(result) 118 | assert which == "re" 119 | -------------------------------------------------------------------------------- /src/pytest_bdd/utils.py: -------------------------------------------------------------------------------- 1 | """Various utility functions.""" 2 | 3 | from __future__ import annotations 4 | 5 | import base64 6 | import pickle 7 | import re 8 | from inspect import getframeinfo, signature 9 | from sys import _getframe 10 | from typing import TYPE_CHECKING, Callable, TypeVar, cast, overload 11 | from weakref import WeakKeyDictionary 12 | 13 | if TYPE_CHECKING: 14 | from _pytest.config import Config 15 | from _pytest.pytester import RunResult 16 | 17 | T = TypeVar("T") 18 | K = TypeVar("K") 19 | V = TypeVar("V") 20 | 21 | CONFIG_STACK: list[Config] = [] 22 | 23 | 24 | def get_required_args(func: Callable[..., object]) -> list[str]: 25 | """Get a list of argument that are required for a function. 26 | 27 | :param func: The function to inspect. 28 | 29 | :return: A list of argument names. 30 | """ 31 | params = signature(func).parameters.values() 32 | return [ 33 | param.name for param in params if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty 34 | ] 35 | 36 | 37 | def get_caller_module_locals(stacklevel: int = 1) -> dict[str, object]: 38 | """Get the caller module locals dictionary. 39 | 40 | We use sys._getframe instead of inspect.stack(0) because the latter is way slower, since it iterates over 41 | all the frames in the stack. 42 | """ 43 | return _getframe(stacklevel + 1).f_locals 44 | 45 | 46 | def get_caller_module_path(depth: int = 2) -> str: 47 | """Get the caller module path. 48 | 49 | We use sys._getframe instead of inspect.stack(0) because the latter is way slower, since it iterates over 50 | all the frames in the stack. 51 | """ 52 | frame = _getframe(depth) 53 | return getframeinfo(frame, context=0).filename 54 | 55 | 56 | _DUMP_START = "_pytest_bdd_>>>" 57 | _DUMP_END = "<<<_pytest_bdd_" 58 | 59 | 60 | def dump_obj(*objects: object) -> None: 61 | """Dump objects to stdout so that they can be inspected by the test suite.""" 62 | for obj in objects: 63 | dump = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL) 64 | encoded = base64.b64encode(dump).decode("ascii") 65 | print(f"{_DUMP_START}{encoded}{_DUMP_END}") 66 | 67 | 68 | def collect_dumped_objects(result: RunResult) -> list: 69 | """Parse all the objects dumped with `dump_object` from the result. 70 | 71 | Note: You must run the result with output to stdout enabled. 72 | For example, using ``pytester.runpytest("-s")``. 73 | """ 74 | stdout = str(result.stdout) 75 | payloads = re.findall(rf"{_DUMP_START}(.*?){_DUMP_END}", stdout) 76 | return [pickle.loads(base64.b64decode(payload)) for payload in payloads] 77 | 78 | 79 | def setdefault(obj: object, name: str, default: T) -> T: 80 | """Just like dict.setdefault, but for objects.""" 81 | try: 82 | return cast(T, getattr(obj, name)) 83 | except AttributeError: 84 | setattr(obj, name, default) 85 | return default 86 | 87 | 88 | def identity(x: T) -> T: 89 | """Return the argument.""" 90 | return x 91 | 92 | 93 | @overload 94 | def registry_get_safe(registry: WeakKeyDictionary[K, V], key: object, default: T) -> V | T: ... 95 | @overload 96 | def registry_get_safe(registry: WeakKeyDictionary[K, V], key: object, default: None = None) -> V | None: ... 97 | 98 | 99 | def registry_get_safe(registry: WeakKeyDictionary[K, V], key: object, default: T | None = None) -> T | V | None: 100 | """Get a value from a registry, or None if the key is not in the registry. 101 | It ensures that this works even if the key cannot be weak-referenced (normally this would raise a TypeError). 102 | """ 103 | try: 104 | return registry.get(key, default) # type: ignore[arg-type] 105 | except TypeError: 106 | return None 107 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pytest-bdd" 3 | version = "8.1.0" 4 | description = "BDD for pytest" 5 | authors = [ 6 | {name="Oleg Pidsadnyi", email="oleg.pidsadnyi@gmail.com"}, 7 | {name="Anatoly Bubenkov", email="bubenkoff@gmail.com"}, 8 | ] 9 | maintainers = [ 10 | {name="Alessio Bogon", email="778703+youtux@users.noreply.github.com"}, 11 | ] 12 | license = "MIT" 13 | readme = "README.rst" 14 | classifiers = [ 15 | "Development Status :: 6 - Mature", 16 | "Intended Audience :: Developers", 17 | "License :: OSI Approved :: MIT License", 18 | "Operating System :: POSIX", 19 | "Operating System :: Microsoft :: Windows", 20 | "Operating System :: MacOS :: MacOS X", 21 | "Topic :: Software Development :: Testing", 22 | "Topic :: Software Development :: Libraries", 23 | "Topic :: Utilities", 24 | "Programming Language :: Python :: 3", 25 | "Programming Language :: Python :: 3.9", 26 | "Programming Language :: Python :: 3.10", 27 | "Programming Language :: Python :: 3.11", 28 | "Programming Language :: Python :: 3.12", 29 | "Programming Language :: Python :: 3.13", 30 | ] 31 | requires-python = ">=3.9" 32 | dependencies = [ 33 | "Mako", 34 | "parse", 35 | "parse-type", 36 | "pytest>=7.0.0", 37 | "typing-extensions", 38 | "packaging", 39 | "gherkin-official>=29.0.0", 40 | ] 41 | 42 | [project.urls] 43 | homepage = "https://pytest-bdd.readthedocs.io/" 44 | documentation = "https://pytest-bdd.readthedocs.io/" 45 | repository = "https://github.com/pytest-dev/pytest-bdd" 46 | 47 | [project.entry-points."pytest11"] 48 | "pytest-bdd" = "pytest_bdd.plugin" 49 | 50 | [project.scripts] 51 | "pytest-bdd" = "pytest_bdd.scripts:main" 52 | 53 | [tool.poetry.group.dev.dependencies] 54 | tox = ">=4.11.3" 55 | mypy = ">=1.6.0" 56 | types-setuptools = ">=68.2.0.0" 57 | pytest-xdist = ">=3.3.1" 58 | coverage = {extras = ["toml"], version = ">=6.5.0"} 59 | Pygments = ">=2.13.0" # for code-block highlighting 60 | sphinx = "*" 61 | sphinx-autobuild = "*" 62 | 63 | [tool.poetry.group.build.dependencies] 64 | build = "*" 65 | twine = "*" 66 | 67 | [build-system] 68 | requires = ["poetry-core==2.2.1"] 69 | build-backend = "poetry.core.masonry.api" 70 | 71 | [tool.ruff] 72 | line-length = 120 73 | target-version = "py39" 74 | lint.select = [ 75 | "B", # flake8-bugbear 76 | "BLE", # flake8-blind-except 77 | "C4", # flake8-comprehensions 78 | "E4", # pycodestyle - error - import 79 | "E7", # pycodestyle - error - statement 80 | "E9", # pycodestyle - error - runtime 81 | "F", # pyflakes 82 | "I", # isort 83 | "ISC", # flake8-implicit-str-concat 84 | "PERF", # perflint 85 | "UP", # pyupgrade 86 | ] 87 | lint.ignore = [ 88 | # Covered by formatter 89 | "ISC001" # single-line-implicit-string-concatenation 90 | ] 91 | lint.isort.required-imports = [ 92 | "from __future__ import annotations", 93 | ] 94 | 95 | [tool.coverage.report] 96 | exclude_lines = [ 97 | "if TYPE_CHECKING:", 98 | "if typing\\.TYPE_CHECKING:", 99 | ] 100 | [tool.coverage.html] 101 | show_contexts = true 102 | 103 | [tool.coverage.run] 104 | branch = true 105 | # `parallel` will cause each tox env to put data into a different file, so that we can combine them later 106 | parallel = true 107 | source = ["pytest_bdd", "tests"] 108 | dynamic_context = "test_function" 109 | 110 | [tool.coverage.paths] 111 | # treat these directories as the same when combining 112 | # the first item is going to be the canonical dir 113 | source = [ 114 | "src/pytest_bdd", 115 | ".tox/*/lib/python*/site-packages/pytest_bdd", 116 | ] 117 | 118 | 119 | [tool.mypy] 120 | python_version = "3.9" 121 | warn_return_any = true 122 | warn_unused_configs = true 123 | files = "src/pytest_bdd/**/*.py" 124 | disallow_untyped_defs = true 125 | 126 | [[tool.mypy.overrides]] 127 | module = ["parse", "parse_type"] 128 | ignore_missing_imports = true 129 | -------------------------------------------------------------------------------- /tests/parser/test.feature: -------------------------------------------------------------------------------- 1 | # This is a comment 2 | Feature: User login 3 | 4 | As a registered user 5 | I want to be able to log in 6 | So that I can access my account 7 | 8 | Background: 9 | # Background steps run before each scenario 10 | Given the login page is open 11 | 12 | # Scenario within the rule 13 | Scenario: Successful login with valid credentials 14 | Given the user enters a valid username 15 | And the user enters a valid password 16 | When the user clicks the login button 17 | Then the user should see the dashboard 18 | 19 | Scenario Outline: Unsuccessful login with invalid credentials 20 | Given the user enters "" as username 21 | And the user enters "" as password 22 | When the user clicks the login button 23 | Then the user should see an error message "" 24 | 25 | # Examples table provides data for the scenario outline 26 | Examples: 27 | | username | password | error_message | 28 | | invalidUser | wrongPass | Invalid username or password | 29 | | user123 | incorrect | Invalid username or password | 30 | 31 | Scenario: Login with empty username 32 | Given the user enters an empty username 33 | And the user enters a valid password 34 | When the user clicks the login button 35 | Then the user should see an error message "Username cannot be empty" 36 | 37 | Scenario: Login with empty password 38 | Given the user enters a valid username 39 | And the user enters an empty password 40 | When the user clicks the login button 41 | Then the user should see an error message "Password cannot be empty" 42 | 43 | Scenario: Login with SQL injection attempt 44 | Given the user enters "admin' OR '1'='1" as username 45 | And the user enters "password" as password 46 | When the user clicks the login button 47 | Then the user should see an error message "Invalid username or password" 48 | 49 | @login @critical 50 | Scenario: Login button disabled for empty fields 51 | Given the user has not entered any username or password 52 | Then the login button should be disabled 53 | 54 | # Tags can be used to categorize scenarios 55 | @smoke 56 | Scenario: Login page loads correctly 57 | Given the login page is loaded 58 | Then the login form should be visible 59 | 60 | # Using Data Tables for more complex data 61 | Scenario: Login with multiple sets of credentials 62 | Given the following users are registered: 63 | | username | password | 64 | | user1 | pass1 | 65 | | user2 | pass2 | 66 | | user3 | pass3 | 67 | When the user tries to log in with the following credentials: 68 | | username | password | 69 | | user1 | pass1 | 70 | | user2 | wrongPass | 71 | Then the login attempts should result in: 72 | | username | result | 73 | | user1 | success | 74 | | user2 | failure | 75 | 76 | # Using Doc Strings for multi-line text 77 | Scenario: Check login error message with detailed explanation 78 | Given the user enters invalid credentials 79 | When the user clicks the login button 80 | Then the user should see the following error message: 81 | """ 82 | Your login attempt was unsuccessful. 83 | Please check your username and password and try again. 84 | If the problem persists, contact support. 85 | """ 86 | 87 | @some-tag 88 | Rule: a sale cannot happen if there is no stock 89 | # Unhappy path 90 | Example: No chocolates left 91 | Given the customer has 100 cents 92 | And there are no chocolate bars in stock 93 | When the customer tries to buy a 1 cent chocolate bar 94 | Then the sale should not happen 95 | 96 | Rule: A sale cannot happen if the customer does not have enough money 97 | # Unhappy path 98 | Example: Not enough money 99 | Given the customer has 100 cents 100 | And there are chocolate bars in stock 101 | When the customer tries to buy a 125 cent chocolate bar 102 | Then the sale should not happen 103 | 104 | # Happy path 105 | Example: Enough money 106 | Given the customer has 100 cents 107 | And there are chocolate bars in stock 108 | When the customer tries to buy a 75 cent chocolate bar 109 | Then the sale should happen 110 | -------------------------------------------------------------------------------- /tests/feature/test_rule_example_format.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | 6 | def test_rule_example_format(pytester): 7 | pytester.makefile( 8 | ".feature", 9 | rule_example=textwrap.dedent( 10 | """\ 11 | Feature: Calculator 12 | 13 | In order to perform basic arithmetic operations 14 | As a user 15 | I want to use a calculator 16 | 17 | Background: 18 | Given I have got my calculator ready 19 | 20 | Scenario: I check the calculator powers on 21 | Given I press the power button 22 | Then the screen turns on 23 | 24 | Rule: Addition 25 | In order to add two numbers 26 | As a user, I want the calculator to give me the sum. 27 | 28 | Background: 29 | Given I check the add button is working 30 | 31 | Example: Adding two positive numbers 32 | Given the first number is 3 33 | And the second number is 5 34 | When I press add 35 | Then the result should be 8 36 | 37 | Example: Adding a positive number and a negative number 38 | Given the first number is 7 39 | And the second number is -2 40 | When I press add 41 | Then the result should be 5 42 | 43 | Rule: Subtraction 44 | In order to subtract one number from another 45 | As a user, I want the calculator to give me the difference. 46 | 47 | Example: Subtracting a smaller number from a larger number 48 | Given the first number is 10 49 | And the second number is 4 50 | When I press subtract 51 | Then the result should be 6 52 | 53 | Example: Subtracting a larger number from a smaller number 54 | Given the first number is 3 55 | And the second number is 7 56 | When I press subtract 57 | Then the result should be -4 58 | """ 59 | ), 60 | ) 61 | 62 | pytester.makepyfile( 63 | textwrap.dedent( 64 | """\ 65 | import pytest 66 | from pytest_bdd import given, when, then, parsers, scenarios 67 | 68 | 69 | scenarios("rule_example.feature") 70 | 71 | 72 | @given("I have got my calculator ready") 73 | def _(): 74 | print("Calculator ready!") 75 | 76 | @given("I check the add button is working") 77 | def _(): 78 | print("Add button check.") 79 | 80 | @given("I press the power button") 81 | def _(): 82 | pass 83 | 84 | @then("the screen turns on") 85 | def _(): 86 | pass 87 | 88 | @given(parsers.parse("the first number is {first_number:d}"), target_fixture="first_number") 89 | def _(first_number): 90 | return first_number 91 | 92 | @given(parsers.parse("the second number is {second_number:d}"), target_fixture="second_number") 93 | def _(second_number): 94 | return second_number 95 | 96 | @when("I press add", target_fixture="result") 97 | def _(first_number, second_number): 98 | return first_number + second_number 99 | 100 | @when("I press subtract", target_fixture="result") 101 | def _(first_number, second_number): 102 | return first_number - second_number 103 | 104 | @then(parsers.parse("the result should be {expected_result:d}")) 105 | def _(result, expected_result): 106 | assert result == expected_result 107 | """ 108 | ) 109 | ) 110 | result = pytester.runpytest("-s") 111 | result.assert_outcomes(passed=5) 112 | 113 | def get_line_partial_match_count(pattern: str): 114 | return len([line for line in result.stdout.lines if pattern in line]) 115 | 116 | assert get_line_partial_match_count("Calculator ready!") == 5 117 | assert get_line_partial_match_count("Add button check.") == 2 118 | -------------------------------------------------------------------------------- /src/pytest_bdd/plugin.py: -------------------------------------------------------------------------------- 1 | """Pytest plugin entry point. Used for any fixtures needed.""" 2 | 3 | from __future__ import annotations 4 | 5 | from collections.abc import Generator 6 | from typing import TYPE_CHECKING, Callable, TypeVar, cast 7 | 8 | import pytest 9 | from typing_extensions import ParamSpec 10 | 11 | from . import cucumber_json, generation, gherkin_terminal_reporter, given, reporting, then, when 12 | from .utils import CONFIG_STACK 13 | 14 | if TYPE_CHECKING: 15 | from _pytest.config import Config, PytestPluginManager 16 | from _pytest.config.argparsing import Parser 17 | from _pytest.fixtures import FixtureRequest 18 | from _pytest.nodes import Item 19 | from _pytest.runner import CallInfo 20 | from pluggy._result import _Result 21 | 22 | from .parser import Feature, Scenario, Step 23 | 24 | 25 | P = ParamSpec("P") 26 | T = TypeVar("T") 27 | 28 | 29 | def pytest_addhooks(pluginmanager: PytestPluginManager) -> None: 30 | """Register plugin hooks.""" 31 | from pytest_bdd import hooks 32 | 33 | pluginmanager.add_hookspecs(hooks) 34 | 35 | 36 | @given("trace") 37 | @when("trace") 38 | @then("trace") 39 | def _() -> None: 40 | """Enter pytest's pdb trace.""" 41 | pytest.set_trace() 42 | 43 | 44 | @pytest.fixture 45 | def _pytest_bdd_example() -> dict: 46 | """The current scenario outline parametrization. 47 | 48 | This is used internally by pytest_bdd. 49 | 50 | If no outline is used, we just return an empty dict to render 51 | the current template without any actual variable. 52 | Otherwise, pytest_bdd will add all the context variables in this fixture 53 | from the example definitions in the feature file. 54 | """ 55 | return {} 56 | 57 | 58 | def pytest_addoption(parser: Parser) -> None: 59 | """Add pytest-bdd options.""" 60 | add_bdd_ini(parser) 61 | cucumber_json.add_options(parser) 62 | generation.add_options(parser) 63 | gherkin_terminal_reporter.add_options(parser) 64 | 65 | 66 | def add_bdd_ini(parser: Parser) -> None: 67 | parser.addini("bdd_features_base_dir", "Base features directory.") 68 | 69 | 70 | @pytest.hookimpl(trylast=True) 71 | def pytest_configure(config: Config) -> None: 72 | """Configure all subplugins.""" 73 | CONFIG_STACK.append(config) 74 | cucumber_json.configure(config) 75 | gherkin_terminal_reporter.configure(config) 76 | 77 | 78 | def pytest_unconfigure(config: Config) -> None: 79 | """Unconfigure all subplugins.""" 80 | if CONFIG_STACK: 81 | CONFIG_STACK.pop() 82 | cucumber_json.unconfigure(config) 83 | 84 | 85 | @pytest.hookimpl(hookwrapper=True) 86 | def pytest_runtest_makereport(item: Item, call: CallInfo) -> Generator[None, _Result, None]: 87 | outcome = yield 88 | reporting.runtest_makereport(item, call, outcome.get_result()) 89 | 90 | 91 | @pytest.hookimpl(tryfirst=True) 92 | def pytest_bdd_before_scenario(request: FixtureRequest, feature: Feature, scenario: Scenario) -> None: 93 | reporting.before_scenario(request, feature, scenario) 94 | 95 | 96 | @pytest.hookimpl(tryfirst=True) 97 | def pytest_bdd_step_error( 98 | request: FixtureRequest, 99 | feature: Feature, 100 | scenario: Scenario, 101 | step: Step, 102 | step_func: Callable[..., object], 103 | step_func_args: dict[str, object], 104 | exception: Exception, 105 | ) -> None: 106 | reporting.step_error(request, feature, scenario, step, step_func, step_func_args, exception) 107 | 108 | 109 | @pytest.hookimpl(tryfirst=True) 110 | def pytest_bdd_before_step( 111 | request: FixtureRequest, 112 | feature: Feature, 113 | scenario: Scenario, 114 | step: Step, 115 | step_func: Callable[..., object], 116 | ) -> None: 117 | reporting.before_step(request, feature, scenario, step, step_func) 118 | 119 | 120 | @pytest.hookimpl(tryfirst=True) 121 | def pytest_bdd_after_step( 122 | request: FixtureRequest, 123 | feature: Feature, 124 | scenario: Scenario, 125 | step: Step, 126 | step_func: Callable[..., object], 127 | step_func_args: dict[str, object], 128 | ) -> None: 129 | reporting.after_step(request, feature, scenario, step, step_func, step_func_args) 130 | 131 | 132 | def pytest_cmdline_main(config: Config) -> int | None: 133 | return generation.cmdline_main(config) 134 | 135 | 136 | def pytest_bdd_apply_tag(tag: str, function: Callable[P, T]) -> Callable[P, T]: 137 | mark = getattr(pytest.mark, tag) 138 | marked = mark(function) 139 | return cast(Callable[P, T], marked) 140 | -------------------------------------------------------------------------------- /tests/test_hooks.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | from pytest_bdd.utils import collect_dumped_objects 6 | 7 | 8 | def test_conftest_module_evaluated_twice(pytester): 9 | """Regression test for https://github.com/pytest-dev/pytest-bdd/issues/62""" 10 | pytester.makeconftest("") 11 | 12 | subdir = pytester.mkpydir("subdir") 13 | subdir.joinpath("conftest.py").write_text( 14 | textwrap.dedent( 15 | r""" 16 | def pytest_pyfunc_call(pyfuncitem): 17 | print('\npytest_pyfunc_call hook') 18 | 19 | def pytest_generate_tests(metafunc): 20 | print('\npytest_generate_tests hook') 21 | """ 22 | ) 23 | ) 24 | 25 | subdir.joinpath("test_foo.py").write_text( 26 | textwrap.dedent( 27 | r""" 28 | from pytest_bdd import scenario 29 | 30 | @scenario('foo.feature', 'Some scenario') 31 | def test_foo(): 32 | pass 33 | """ 34 | ) 35 | ) 36 | 37 | subdir.joinpath("foo.feature").write_text( 38 | textwrap.dedent( 39 | r""" 40 | Feature: The feature 41 | Scenario: Some scenario 42 | """ 43 | ) 44 | ) 45 | 46 | result = pytester.runpytest("-s") 47 | 48 | assert result.stdout.lines.count("pytest_pyfunc_call hook") == 1 49 | assert result.stdout.lines.count("pytest_generate_tests hook") == 1 50 | 51 | 52 | def test_item_collection_does_not_break_on_non_function_items(pytester): 53 | """Regression test for https://github.com/pytest-dev/pytest-bdd/issues/317""" 54 | pytester.makeconftest( 55 | """ 56 | import pytest 57 | 58 | @pytest.mark.tryfirst 59 | def pytest_collection_modifyitems(session, config, items): 60 | try: 61 | item_creator = CustomItem.from_parent # Only available in pytest >= 5.4.0 62 | except AttributeError: 63 | item_creator = CustomItem 64 | 65 | items[:] = [item_creator(name=item.name, parent=item.parent) for item in items] 66 | 67 | class CustomItem(pytest.Item): 68 | def runtest(self): 69 | assert True 70 | """ 71 | ) 72 | 73 | pytester.makepyfile( 74 | """ 75 | def test_convert_me_to_custom_item_and_assert_true(): 76 | assert False 77 | """ 78 | ) 79 | 80 | result = pytester.runpytest() 81 | result.assert_outcomes(passed=1) 82 | 83 | 84 | def test_pytest_bdd_after_scenario_called_after_scenario(pytester): 85 | """Regression test for https://github.com/pytest-dev/pytest-bdd/pull/577""" 86 | 87 | pytester.makefile( 88 | ".feature", 89 | foo=textwrap.dedent( 90 | """\ 91 | Feature: A feature 92 | Scenario: Scenario 1 93 | Given foo 94 | When bar 95 | Then baz 96 | 97 | Scenario: Scenario 2 98 | When bar 99 | Then baz 100 | """ 101 | ), 102 | ) 103 | 104 | pytester.makepyfile( 105 | """ 106 | import pytest 107 | from pytest_bdd import given, when, then, scenarios 108 | 109 | 110 | scenarios("foo.feature") 111 | 112 | 113 | @given("foo") 114 | @when("bar") 115 | @then("baz") 116 | def _(): 117 | pass 118 | """ 119 | ) 120 | 121 | pytester.makeconftest( 122 | """ 123 | from pytest_bdd.utils import dump_obj 124 | 125 | def pytest_bdd_after_scenario(request, feature, scenario): 126 | dump_obj([feature, scenario]) 127 | """ 128 | ) 129 | 130 | result = pytester.runpytest("-s") 131 | result.assert_outcomes(passed=2) 132 | 133 | hook_calls = collect_dumped_objects(result) 134 | assert len(hook_calls) == 2 135 | [(feature, scenario_1), (feature_2, scenario_2)] = hook_calls 136 | assert feature.name == feature_2.name == "A feature" 137 | 138 | assert scenario_1.name == "Scenario 1" 139 | assert scenario_2.name == "Scenario 2" 140 | 141 | 142 | def test_pytest_unconfigure_without_configure(pytester): 143 | """ 144 | Simulate a plugin forcing an exit during configuration before bdd is configured 145 | https://github.com/pytest-dev/pytest-bdd/issues/362 146 | """ 147 | pytester.makeconftest( 148 | """ 149 | import pytest 150 | 151 | def pytest_configure(config): 152 | pytest.exit("Exit during configure", 0) 153 | """ 154 | ) 155 | 156 | result = pytester.runpytest() 157 | assert result.ret == 0 158 | -------------------------------------------------------------------------------- /tests/feature/test_feature_base_dir.py: -------------------------------------------------------------------------------- 1 | """Test feature base dir.""" 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | 7 | import pytest 8 | 9 | NOT_EXISTING_FEATURE_PATHS = [".", "/does/not/exist/"] 10 | 11 | 12 | @pytest.mark.parametrize("base_dir", NOT_EXISTING_FEATURE_PATHS) 13 | def test_feature_path_not_found(pytester, base_dir): 14 | """Test feature base dir.""" 15 | prepare_testdir(pytester, base_dir) 16 | 17 | result = pytester.runpytest("-k", "test_not_found_by_ini") 18 | result.assert_outcomes(passed=2) 19 | 20 | 21 | def test_feature_path_ok(pytester): 22 | base_dir = "features" 23 | prepare_testdir(pytester, base_dir) 24 | 25 | result = pytester.runpytest("-k", "test_ok_by_ini") 26 | result.assert_outcomes(passed=2) 27 | 28 | 29 | def test_feature_path_ok_running_outside_rootdir(pytester): 30 | base_dir = "features" 31 | prepare_testdir(pytester, base_dir) 32 | 33 | old_dir = os.getcwd() 34 | os.chdir("/") 35 | try: 36 | result = pytester.runpytest(pytester.path, "-k", "test_ok_by_ini") 37 | result.assert_outcomes(passed=2) 38 | finally: 39 | os.chdir(old_dir) 40 | 41 | 42 | def test_feature_path_by_param_not_found(pytester): 43 | """As param takes precedence even if ini config is correct it should fail 44 | if passed param is incorrect""" 45 | base_dir = "features" 46 | prepare_testdir(pytester, base_dir) 47 | 48 | result = pytester.runpytest("-k", "test_not_found_by_param") 49 | result.assert_outcomes(passed=4) 50 | 51 | 52 | @pytest.mark.parametrize("base_dir", NOT_EXISTING_FEATURE_PATHS) 53 | def test_feature_path_by_param_ok(pytester, base_dir): 54 | """If ini config is incorrect but param path is fine it should be able 55 | to find features""" 56 | prepare_testdir(pytester, base_dir) 57 | 58 | result = pytester.runpytest("-k", "test_ok_by_param") 59 | result.assert_outcomes(passed=2) 60 | 61 | 62 | def prepare_testdir(pytester, ini_base_dir): 63 | pytester.makeini( 64 | f""" 65 | [pytest] 66 | bdd_features_base_dir={ini_base_dir} 67 | """ 68 | ) 69 | 70 | feature_file = pytester.mkdir("features").joinpath("steps.feature") 71 | feature_file.write_text( 72 | """ 73 | Feature: Feature path 74 | Scenario: When scenario found 75 | Given found 76 | """ 77 | ) 78 | 79 | pytester.makepyfile( 80 | f""" 81 | import os.path 82 | 83 | import pytest 84 | 85 | from pytest_bdd import scenario, scenarios 86 | 87 | FEATURE = 'steps.feature' 88 | 89 | 90 | @pytest.fixture(params=[ 91 | 'When scenario found', 92 | ]) 93 | def scenario_name(request): 94 | return request.param 95 | 96 | 97 | @pytest.mark.parametrize( 98 | 'multiple', [True, False] 99 | ) 100 | def test_not_found_by_ini(scenario_name, multiple): 101 | with pytest.raises(IOError) as exc: 102 | if multiple: 103 | scenarios(FEATURE) 104 | else: 105 | scenario(FEATURE, scenario_name) 106 | assert os.path.abspath(os.path.join('{ini_base_dir}', FEATURE)) in str(exc.value) 107 | 108 | 109 | @pytest.mark.parametrize( 110 | 'multiple', [True, False] 111 | ) 112 | def test_ok_by_ini(scenario_name, multiple): 113 | # Shouldn't raise any exception 114 | if multiple: 115 | scenarios(FEATURE) 116 | else: 117 | scenario(FEATURE, scenario_name) 118 | 119 | 120 | @pytest.mark.parametrize( 121 | 'multiple', [True, False] 122 | ) 123 | @pytest.mark.parametrize( 124 | 'param_base_dir', [ 125 | '.', 126 | '/does/not/exist/', 127 | ] 128 | ) 129 | def test_not_found_by_param(scenario_name, param_base_dir, multiple): 130 | with pytest.raises(IOError) as exc: 131 | if multiple: 132 | scenarios(FEATURE, features_base_dir=param_base_dir) 133 | else: 134 | scenario(FEATURE, scenario_name, features_base_dir=param_base_dir) 135 | assert os.path.abspath(os.path.join(param_base_dir, FEATURE)) in str(exc.value) 136 | 137 | 138 | @pytest.mark.parametrize( 139 | 'multiple', [True, False] 140 | ) 141 | def test_ok_by_param(scenario_name, multiple): 142 | # Shouldn't raise any exception no matter of bdd_features_base_dir in ini 143 | if multiple: 144 | scenarios(FEATURE, features_base_dir='features') 145 | else: 146 | scenario(FEATURE, scenario_name, features_base_dir='features') 147 | 148 | """ 149 | ) 150 | -------------------------------------------------------------------------------- /tests/generation/test_generate_missing.py: -------------------------------------------------------------------------------- 1 | """Code generation and assertion tests.""" 2 | 3 | from __future__ import annotations 4 | 5 | import itertools 6 | import textwrap 7 | 8 | from pytest_bdd.scenario import get_python_name_generator 9 | 10 | 11 | def test_python_name_generator(): 12 | """Test python name generator function.""" 13 | assert list(itertools.islice(get_python_name_generator("Some name"), 3)) == [ 14 | "test_some_name", 15 | "test_some_name_1", 16 | "test_some_name_2", 17 | ] 18 | 19 | 20 | def test_generate_missing(pytester): 21 | """Test generate missing command.""" 22 | pytester.makefile( 23 | ".feature", 24 | generation=textwrap.dedent( 25 | """\ 26 | Feature: Missing code generation 27 | 28 | Background: 29 | Given I have a foobar 30 | 31 | Scenario: Scenario tests which are already bound to the tests stay as is 32 | Given I have a bar 33 | 34 | 35 | Scenario: Code is generated for scenarios which are not bound to any tests 36 | Given I have a bar 37 | 38 | 39 | Scenario: Code is generated for scenario steps which are not yet defined(implemented) 40 | Given I have a custom bar 41 | """ 42 | ), 43 | ) 44 | 45 | pytester.makepyfile( 46 | textwrap.dedent( 47 | """\ 48 | import functools 49 | 50 | from pytest_bdd import scenario, given 51 | 52 | scenario = functools.partial(scenario, "generation.feature") 53 | 54 | @given("I have a bar") 55 | def _(): 56 | return "bar" 57 | 58 | @scenario("Scenario tests which are already bound to the tests stay as is") 59 | def test_foo(): 60 | pass 61 | 62 | @scenario("Code is generated for scenario steps which are not yet defined(implemented)") 63 | def test_missing_steps(): 64 | pass 65 | """ 66 | ) 67 | ) 68 | 69 | result = pytester.runpytest("--generate-missing", "--feature", "generation.feature") 70 | result.assert_outcomes(passed=0, failed=0, errors=0) 71 | assert not result.stderr.str() 72 | assert result.ret == 0 73 | 74 | result.stdout.fnmatch_lines( 75 | ['Scenario "Code is generated for scenarios which are not bound to any tests" is not bound to any test *'] 76 | ) 77 | 78 | result.stdout.fnmatch_lines( 79 | [ 80 | 'Step Given "I have a custom bar" is not defined in the scenario ' 81 | '"Code is generated for scenario steps which are not yet defined(implemented)" *' 82 | ] 83 | ) 84 | 85 | result.stdout.fnmatch_lines(['Background step Given "I have a foobar" is not defined*']) 86 | 87 | result.stdout.fnmatch_lines(["Please place the code above to the test file(s):"]) 88 | 89 | 90 | def test_generate_missing_with_step_parsers(pytester): 91 | """Test that step parsers are correctly discovered and won't be part of the missing steps.""" 92 | pytester.makefile( 93 | ".feature", 94 | generation=textwrap.dedent( 95 | """\ 96 | Feature: Missing code generation with step parsers 97 | 98 | Scenario: Step parsers are correctly discovered 99 | Given I use the string parser without parameter 100 | And I use parsers.parse with parameter 1 101 | And I use parsers.re with parameter 2 102 | And I use parsers.cfparse with parameter 3 103 | """ 104 | ), 105 | ) 106 | 107 | pytester.makepyfile( 108 | textwrap.dedent( 109 | """\ 110 | import functools 111 | 112 | from pytest_bdd import scenarios, given, parsers 113 | 114 | scenarios("generation.feature") 115 | 116 | @given("I use the string parser without parameter") 117 | def _(): 118 | return None 119 | 120 | @given(parsers.parse("I use parsers.parse with parameter {param}")) 121 | def _(param): 122 | return param 123 | 124 | @given(parsers.re(r"^I use parsers.re with parameter (?P.*?)$")) 125 | def _(param): 126 | return param 127 | 128 | @given(parsers.cfparse("I use parsers.cfparse with parameter {param:d}")) 129 | def _(param): 130 | return param 131 | """ 132 | ) 133 | ) 134 | 135 | result = pytester.runpytest("--generate-missing", "--feature", "generation.feature") 136 | result.assert_outcomes(passed=0, failed=0, errors=0) 137 | assert not result.stderr.str() 138 | assert result.ret == 0 139 | 140 | output = str(result.stdout) 141 | 142 | assert "I use the string parser" not in output 143 | assert "I use parsers.parse" not in output 144 | assert "I use parsers.re" not in output 145 | assert "I use parsers.cfparse" not in output 146 | -------------------------------------------------------------------------------- /src/pytest_bdd/gherkin_terminal_reporter.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import typing 4 | 5 | from _pytest.terminal import TerminalReporter 6 | 7 | from .reporting import test_report_context_registry 8 | 9 | if typing.TYPE_CHECKING: 10 | from _pytest.config import Config 11 | from _pytest.config.argparsing import Parser 12 | from _pytest.reports import TestReport 13 | 14 | 15 | def add_options(parser: Parser) -> None: 16 | group = parser.getgroup("terminal reporting", "reporting", after="general") 17 | group._addoption( 18 | "--gherkin-terminal-reporter", 19 | action="store_true", 20 | dest="gherkin_terminal_reporter", 21 | default=False, 22 | help="enable gherkin output", 23 | ) 24 | 25 | 26 | def configure(config: Config) -> None: 27 | if config.option.gherkin_terminal_reporter: 28 | # Get the standard terminal reporter plugin and replace it with our 29 | current_reporter = config.pluginmanager.getplugin("terminalreporter") 30 | if current_reporter.__class__ != TerminalReporter: 31 | raise Exception( 32 | "gherkin-terminal-reporter is not compatible with any other terminal reporter." 33 | "You can use only one terminal reporter." 34 | f"Currently '{current_reporter.__class__}' is used." 35 | f"Please decide to use one by deactivating {current_reporter.__class__} " 36 | "or gherkin-terminal-reporter." 37 | ) 38 | gherkin_reporter = GherkinTerminalReporter(config) 39 | config.pluginmanager.unregister(current_reporter) 40 | config.pluginmanager.register(gherkin_reporter, "terminalreporter") 41 | if config.pluginmanager.getplugin("dsession"): 42 | raise Exception("gherkin-terminal-reporter is not compatible with 'xdist' plugin.") 43 | 44 | 45 | class GherkinTerminalReporter(TerminalReporter): # type: ignore[misc] 46 | def __init__(self, config: Config) -> None: 47 | super().__init__(config) 48 | self.current_rule: str | None = None 49 | 50 | def pytest_runtest_logreport(self, report: TestReport) -> None: 51 | rep = report 52 | res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) 53 | cat, letter, word = res 54 | 55 | if not letter and not word: 56 | # probably passed setup/teardown 57 | return None 58 | 59 | if isinstance(word, tuple): 60 | word, word_markup = word 61 | elif rep.passed: 62 | word_markup = {"green": True} 63 | elif rep.failed: 64 | word_markup = {"red": True} 65 | elif rep.skipped: 66 | word_markup = {"yellow": True} 67 | feature_markup = {"blue": True} 68 | scenario_markup = word_markup 69 | rule_markup = {"purple": True} 70 | 71 | try: 72 | scenario = test_report_context_registry[report].scenario 73 | except KeyError: 74 | scenario = None 75 | 76 | if self.verbosity <= 0 or scenario is None: 77 | return super().pytest_runtest_logreport(rep) 78 | 79 | rule = scenario.get("rule") 80 | indent = " " if rule else "" 81 | 82 | if self.verbosity == 1: 83 | self.ensure_newline() 84 | self._tw.write(f"{scenario['feature']['keyword']}: ", **feature_markup) 85 | self._tw.write(scenario["feature"]["name"], **feature_markup) 86 | self._tw.write("\n") 87 | 88 | if rule and rule["name"] != self.current_rule: 89 | self._tw.write(f" {rule['keyword']}: ", **rule_markup) 90 | self._tw.write(rule["name"], **rule_markup) 91 | self._tw.write("\n") 92 | self.current_rule = rule["name"] 93 | 94 | self._tw.write(f"{indent} {scenario['keyword']}: ", **scenario_markup) 95 | self._tw.write(scenario["name"], **scenario_markup) 96 | self._tw.write(" ") 97 | self._tw.write(word, **word_markup) 98 | self._tw.write("\n") 99 | elif self.verbosity > 1: 100 | self.ensure_newline() 101 | self._tw.write(f"{scenario['feature']['keyword']}: ", **feature_markup) 102 | self._tw.write(scenario["feature"]["name"], **feature_markup) 103 | self._tw.write("\n") 104 | 105 | if rule and rule["name"] != self.current_rule: 106 | self._tw.write(f" {rule['keyword']}: ", **rule_markup) 107 | self._tw.write(rule["name"], **rule_markup) 108 | self._tw.write("\n") 109 | self.current_rule = rule["name"] 110 | 111 | self._tw.write(f"{indent} {scenario['keyword']}: ", **scenario_markup) 112 | self._tw.write(scenario["name"], **scenario_markup) 113 | self._tw.write("\n") 114 | for step in scenario["steps"]: 115 | self._tw.write(f"{indent} {step['keyword']} {step['name']}\n", **scenario_markup) 116 | self._tw.write(f"{indent} {word}", **word_markup) 117 | self._tw.write("\n\n") 118 | 119 | self.stats.setdefault(cat, []).append(rep) 120 | -------------------------------------------------------------------------------- /tests/args/regex/test_args.py: -------------------------------------------------------------------------------- 1 | """Step arguments tests.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_every_steps_takes_param_with_the_same_name(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | arguments=textwrap.dedent( 12 | """\ 13 | Feature: Step arguments 14 | Scenario: Every step takes a parameter with the same name 15 | Given I have 1 Euro 16 | When I pay 2 Euro 17 | And I pay 1 Euro 18 | Then I should have 0 Euro 19 | And I should have 999999 Euro 20 | 21 | """ 22 | ), 23 | ) 24 | 25 | pytester.makepyfile( 26 | textwrap.dedent( 27 | r""" 28 | import pytest 29 | from pytest_bdd import parsers, given, when, then, scenario 30 | 31 | @scenario("arguments.feature", "Every step takes a parameter with the same name") 32 | def test_arguments(): 33 | pass 34 | 35 | @pytest.fixture 36 | def values(): 37 | return [1, 2, 1, 0, 999999] 38 | 39 | @given(parsers.re(r"I have (?P\d+) Euro"), converters=dict(euro=int)) 40 | def _(euro, values): 41 | assert euro == values.pop(0) 42 | 43 | 44 | @when(parsers.re(r"I pay (?P\d+) Euro"), converters=dict(euro=int)) 45 | def _(euro, values, request): 46 | assert euro == values.pop(0) 47 | 48 | 49 | @then(parsers.re(r"I should have (?P\d+) Euro"), converters=dict(euro=int)) 50 | def _(euro, values): 51 | assert euro == values.pop(0) 52 | 53 | """ 54 | ) 55 | ) 56 | result = pytester.runpytest() 57 | result.assert_outcomes(passed=1) 58 | 59 | 60 | def test_exact_match(pytester): 61 | """Test that parsers.re does an exact match (fullmatch) of the whole string. 62 | 63 | This tests exists because in the past we only used re.match, which only finds a match at the beginning 64 | of the string, so if there were any more characters not matching at the end, they were ignored""" 65 | 66 | pytester.makefile( 67 | ".feature", 68 | arguments=textwrap.dedent( 69 | """\ 70 | Feature: Step arguments 71 | Scenario: Every step takes a parameter with the same name 72 | Given I have 2 Euro 73 | # Step that should not be found: 74 | When I pay 1 Euro by mistake 75 | Then I should have 1 Euro left 76 | """ 77 | ), 78 | ) 79 | 80 | pytester.makepyfile( 81 | textwrap.dedent( 82 | r""" 83 | import pytest 84 | from pytest_bdd import parsers, given, when, then, scenarios 85 | 86 | scenarios("arguments.feature") 87 | 88 | @given(parsers.re(r"I have (?P\d+) Euro"), converters={"amount": int}, target_fixture="wallet") 89 | def _(amount): 90 | return {"EUR": amount} 91 | 92 | 93 | # Purposefully using a re that will not match the step "When I pay 1 Euro and 50 cents" 94 | @when(parsers.re(r"I pay (?P\d+) Euro"), converters={"amount": int}) 95 | def _(amount, wallet): 96 | wallet["EUR"] -= amount 97 | 98 | 99 | @then(parsers.re(r"I should have (?P\d+) Euro left"), converters={"amount": int}) 100 | def _(amount, wallet): 101 | assert wallet["EUR"] == amount 102 | 103 | """ 104 | ) 105 | ) 106 | result = pytester.runpytest() 107 | result.assert_outcomes(failed=1) 108 | result.stdout.fnmatch_lines( 109 | '*StepDefinitionNotFoundError: Step definition is not found: When "I pay 1 Euro by mistake"*' 110 | ) 111 | 112 | 113 | def test_argument_in_when(pytester): 114 | pytester.makefile( 115 | ".feature", 116 | arguments=textwrap.dedent( 117 | """\ 118 | Feature: Step arguments 119 | Scenario: Argument in when, step 1 120 | Given I have an argument 1 121 | When I get argument 5 122 | Then My argument should be 5 123 | """ 124 | ), 125 | ) 126 | 127 | pytester.makepyfile( 128 | textwrap.dedent( 129 | r""" 130 | import pytest 131 | from pytest_bdd import parsers, given, when, then, scenario 132 | 133 | 134 | @pytest.fixture 135 | def arguments(): 136 | return dict() 137 | 138 | 139 | @scenario("arguments.feature", "Argument in when, step 1") 140 | def test_arguments(): 141 | pass 142 | 143 | @given(parsers.re(r"I have an argument (?P\d+)")) 144 | def _(arguments, arg): 145 | arguments["arg"] = arg 146 | 147 | 148 | @when(parsers.re(r"I get argument (?P\d+)")) 149 | def _(arguments, arg): 150 | arguments["arg"] = arg 151 | 152 | 153 | @then(parsers.re(r"My argument should be (?P\d+)")) 154 | def _(arguments, arg): 155 | assert arguments["arg"] == arg 156 | 157 | """ 158 | ) 159 | ) 160 | result = pytester.runpytest() 161 | result.assert_outcomes(passed=1) 162 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Main testing workflow 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | env: 9 | POETRY_VERSION: "2.2.1" 10 | 11 | jobs: 12 | build: 13 | name: Build package 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | cache: "pip" 21 | - name: Install poetry 22 | run: python3 -m pip install poetry==${{ env.POETRY_VERSION }} 23 | - name: Install build dependencies 24 | run: poetry install --only build 25 | - name: Build a binary wheel and a source tarball 26 | run: poetry run python -m build 27 | - name: Check the distribution files with `twine` 28 | run: poetry run twine check --strict dist/* 29 | - name: Upload artifact 30 | id: artifact-upload-step 31 | uses: actions/upload-artifact@v4 32 | with: 33 | name: dist-files 34 | path: dist/* 35 | if-no-files-found: error 36 | compression-level: 0 # They are already compressed 37 | test-run: 38 | runs-on: ubuntu-latest 39 | needs: build 40 | strategy: 41 | fail-fast: false 42 | matrix: 43 | include: 44 | - python-version: "3.9" 45 | toxfactor: py3.9 46 | ignore-typecheck-outcome: false 47 | ignore-test-outcome: false 48 | - python-version: "3.10" 49 | toxfactor: py3.10 50 | ignore-typecheck-outcome: false 51 | ignore-test-outcome: false 52 | - python-version: "3.11" 53 | toxfactor: py3.11 54 | ignore-typecheck-outcome: false 55 | ignore-test-outcome: false 56 | - python-version: "3.12" 57 | toxfactor: py3.12 58 | ignore-typecheck-outcome: false 59 | ignore-test-outcome: false 60 | - python-version: "3.13" 61 | toxfactor: py3.13 62 | ignore-typecheck-outcome: false 63 | ignore-test-outcome: false 64 | 65 | steps: 66 | - uses: actions/checkout@v4 67 | 68 | - name: Set up Python ${{ matrix.python-version }} 69 | uses: actions/setup-python@v4 70 | id: setup-python 71 | with: 72 | python-version: ${{ matrix.python-version }} 73 | 74 | - name: Install poetry 75 | run: | 76 | # When upgrading poetry, remember to upgrade poetry-core in `pyproject.toml` (`build-system.requires`) 77 | python -m pip install poetry==${{ env.POETRY_VERSION }} 78 | 79 | - name: Configure poetry 80 | run: | 81 | python -m poetry config virtualenvs.in-project true 82 | 83 | - name: Cache the virtualenv 84 | id: poetry-dependencies-cache 85 | uses: actions/cache@v3 86 | with: 87 | path: ./.venv 88 | key: ${{ runner.os }}-venv-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} 89 | 90 | - name: Install dev dependencies 91 | if: steps.poetry-dependencies-cache.outputs.cache-hit != 'true' 92 | run: | 93 | python -m poetry install --only=dev 94 | 95 | - name: Download artifact 96 | uses: actions/download-artifact@v4 97 | with: 98 | name: dist-files 99 | path: dist/ 100 | 101 | - name: Type checking 102 | # Ignore errors for older pythons 103 | continue-on-error: ${{ matrix.ignore-typecheck-outcome }} 104 | run: | 105 | source .venv/bin/activate 106 | tox -e mypy 107 | 108 | - name: Test with tox 109 | continue-on-error: ${{ matrix.ignore-test-outcome }} 110 | run: | 111 | source .venv/bin/activate 112 | coverage erase 113 | # Using `--parallel 4` as it's the number of CPUs in the GitHub Actions runner 114 | # Using `installpkg dist/*.tar.gz` because we want to install the pre-built package (want to test against that) 115 | tox run-parallel -f ${{ matrix.toxfactor }} --parallel 4 --parallel-no-spinner --parallel-live --installpkg dist/*.whl 116 | coverage combine 117 | coverage xml 118 | 119 | - uses: codecov/codecov-action@v4 120 | with: 121 | # Explicitly using the token to avoid Codecov rate limit errors 122 | # See https://community.codecov.com/t/upload-issues-unable-to-locate-build-via-github-actions-api/3954 123 | token: ${{ secrets.CODECOV_TOKEN }} 124 | fail_ci_if_error: false 125 | verbose: true # optional (default = false) 126 | 127 | pypi-publish: 128 | name: Upload release to PyPI 129 | runs-on: ubuntu-latest 130 | environment: 131 | name: pypi 132 | url: https://pypi.org/p/pytest-bdd 133 | permissions: 134 | id-token: write # IMPORTANT: this permission is mandatory for trusted publishing 135 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') 136 | needs: 137 | - "test-run" 138 | - "build" 139 | steps: 140 | - name: Download artifact 141 | uses: actions/download-artifact@v4 142 | with: 143 | name: dist-files 144 | path: dist/ 145 | - name: Publish package distributions to PyPI 146 | uses: pypa/gh-action-pypi-publish@release/v1 147 | -------------------------------------------------------------------------------- /tests/feature/test_tags.py: -------------------------------------------------------------------------------- 1 | """Test tags.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | 8 | def test_tags_selector(pytester): 9 | """Test tests selection by tags.""" 10 | pytester.makefile( 11 | ".ini", 12 | pytest=textwrap.dedent( 13 | """ 14 | [pytest] 15 | markers = 16 | feature_tag_1 17 | feature_tag_2 18 | scenario_tag_01 19 | scenario_tag_02 20 | scenario_tag_10 21 | scenario_tag_20 22 | """ 23 | ), 24 | ) 25 | pytester.makefile( 26 | ".feature", 27 | test=""" 28 | @feature_tag_1 @feature_tag_2 29 | Feature: Tags 30 | 31 | @scenario_tag_01 @scenario_tag_02 32 | Scenario: Tags 33 | Given I have a bar 34 | 35 | @rule_tag_01 36 | Rule: Rule tag 37 | 38 | @scenario_tag_10 @scenario_tag_20 39 | Scenario: Tags 2 40 | Given I have a bar 41 | 42 | """, 43 | ) 44 | pytester.makepyfile( 45 | """ 46 | import pytest 47 | from pytest_bdd import given, scenarios 48 | 49 | @given('I have a bar') 50 | def _(): 51 | return 'bar' 52 | 53 | scenarios('test.feature') 54 | """ 55 | ) 56 | result = pytester.runpytest("-m", "scenario_tag_10 and not scenario_tag_01", "-vv") 57 | outcomes = result.parseoutcomes() 58 | assert outcomes["passed"] == 1 59 | assert outcomes["deselected"] == 1 60 | 61 | result = pytester.runpytest("-m", "scenario_tag_01 and not scenario_tag_10", "-vv").parseoutcomes() 62 | assert result["passed"] == 1 63 | assert result["deselected"] == 1 64 | 65 | result = pytester.runpytest("-m", "feature_tag_1", "-vv").parseoutcomes() 66 | assert result["passed"] == 2 67 | 68 | result = pytester.runpytest("-m", "feature_tag_10", "-vv").parseoutcomes() 69 | assert result["deselected"] == 2 70 | 71 | result = pytester.runpytest("-m", "rule_tag_01", "-vv").parseoutcomes() 72 | assert result["deselected"] == 1 73 | 74 | 75 | def test_tags_after_background_issue_160(pytester): 76 | """Make sure using a tag after background works.""" 77 | pytester.makefile( 78 | ".ini", 79 | pytest=textwrap.dedent( 80 | """ 81 | [pytest] 82 | markers = tag 83 | """ 84 | ), 85 | ) 86 | pytester.makefile( 87 | ".feature", 88 | test=""" 89 | Feature: Tags after background 90 | 91 | Background: 92 | Given I have a bar 93 | 94 | @tag 95 | Scenario: Tags 96 | Given I have a baz 97 | 98 | Scenario: Tags 2 99 | Given I have a baz 100 | """, 101 | ) 102 | pytester.makepyfile( 103 | """ 104 | import pytest 105 | from pytest_bdd import given, scenarios 106 | 107 | @given('I have a bar') 108 | def _(): 109 | return 'bar' 110 | 111 | @given('I have a baz') 112 | def _(): 113 | return 'baz' 114 | 115 | scenarios('test.feature') 116 | """ 117 | ) 118 | result = pytester.runpytest("-m", "tag", "-vv").parseoutcomes() 119 | assert result["passed"] == 1 120 | assert result["deselected"] == 1 121 | 122 | 123 | def test_apply_tag_hook(pytester): 124 | pytester.makeconftest( 125 | """ 126 | import pytest 127 | 128 | @pytest.hookimpl(tryfirst=True) 129 | def pytest_bdd_apply_tag(tag, function): 130 | if tag == 'todo': 131 | marker = pytest.mark.skipif(True, reason="Not implemented yet") 132 | marker(function) 133 | return True 134 | else: 135 | # Fall back to pytest-bdd's default behavior 136 | return None 137 | """ 138 | ) 139 | pytester.makefile( 140 | ".feature", 141 | test=""" 142 | Feature: Customizing tag handling 143 | 144 | @todo 145 | Scenario: Tags 146 | Given I have a bar 147 | 148 | @xfail 149 | Scenario: Tags 2 150 | Given I have a bar 151 | """, 152 | ) 153 | pytester.makepyfile( 154 | """ 155 | from pytest_bdd import given, scenarios 156 | 157 | @given('I have a bar') 158 | def _(): 159 | return 'bar' 160 | 161 | scenarios('test.feature') 162 | """ 163 | ) 164 | result = pytester.runpytest("-rsx") 165 | result.stdout.fnmatch_lines(["SKIP*: Not implemented yet"]) 166 | result.stdout.fnmatch_lines(["*= 1 skipped, 1 xpassed*=*"]) 167 | 168 | 169 | def test_at_in_scenario(pytester): 170 | pytester.makefile( 171 | ".feature", 172 | test=""" 173 | Feature: At sign in a scenario 174 | 175 | Scenario: Tags 176 | Given I have a foo@bar 177 | 178 | Scenario: Second 179 | Given I have a baz 180 | """, 181 | ) 182 | pytester.makepyfile( 183 | """ 184 | from pytest_bdd import given, scenarios 185 | 186 | @given('I have a foo@bar') 187 | def _(): 188 | return 'foo@bar' 189 | 190 | @given('I have a baz') 191 | def _(): 192 | return 'baz' 193 | 194 | scenarios('test.feature') 195 | """ 196 | ) 197 | strict_option = "--strict-markers" 198 | result = pytester.runpytest_subprocess(strict_option) 199 | result.stdout.fnmatch_lines(["*= 2 passed * =*"]) 200 | 201 | 202 | def test_multiline_tags(pytester): 203 | pytester.makefile( 204 | ".feature", 205 | test=""" 206 | Feature: Scenario with tags over multiple lines 207 | 208 | @tag1 209 | @tag2 210 | Scenario: Tags 211 | Given I have a foo 212 | 213 | Scenario: Second 214 | Given I have a baz 215 | """, 216 | ) 217 | pytester.makepyfile( 218 | """ 219 | from pytest_bdd import given, scenarios 220 | 221 | @given('I have a foo') 222 | def _(): 223 | pass 224 | 225 | @given('I have a baz') 226 | def _(): 227 | pass 228 | 229 | scenarios('test.feature') 230 | """ 231 | ) 232 | result = pytester.runpytest("-m", "tag1", "-vv") 233 | result.assert_outcomes(passed=1, deselected=1) 234 | 235 | result = pytester.runpytest("-m", "tag2", "-vv") 236 | result.assert_outcomes(passed=1, deselected=1) 237 | -------------------------------------------------------------------------------- /tests/steps/test_docstring.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | from src.pytest_bdd.utils import collect_dumped_objects 6 | 7 | 8 | def test_steps_with_docstrings(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | docstring=textwrap.dedent( 12 | ''' 13 | Feature: Docstring 14 | 15 | Scenario: Step with plain docstring as multiline step 16 | Given a step has a docstring 17 | """ 18 | This is a given docstring 19 | """ 20 | 21 | When a step provides a docstring with lower indentation 22 | """ 23 | This is a when docstring 24 | """ 25 | 26 | And this step has no docstring 27 | 28 | Then this step has a greater indentation 29 | """ 30 | This is a then docstring 31 | """ 32 | ''' 33 | ), 34 | ) 35 | 36 | pytester.makeconftest( 37 | textwrap.dedent( 38 | r""" 39 | from pytest_bdd import given, when, then 40 | from pytest_bdd.utils import dump_obj 41 | 42 | 43 | @given("a step has a docstring") 44 | def _(docstring): 45 | given_docstring = docstring 46 | dump_obj(given_docstring) 47 | 48 | 49 | @when("a step provides a docstring with lower indentation") 50 | def _(docstring): 51 | when_docstring = docstring 52 | dump_obj(when_docstring) 53 | 54 | 55 | @when("this step has no docstring") 56 | def _(): 57 | pass 58 | 59 | 60 | @then("this step has a greater indentation") 61 | def _(docstring): 62 | then_docstring = docstring 63 | dump_obj(then_docstring) 64 | """ 65 | ) 66 | ) 67 | 68 | pytester.makepyfile( 69 | textwrap.dedent( 70 | """\ 71 | from pytest_bdd import scenarios 72 | 73 | scenarios("docstring.feature") 74 | """ 75 | ) 76 | ) 77 | 78 | result = pytester.runpytest("-s") 79 | result.assert_outcomes(passed=1) 80 | 81 | docstrings = collect_dumped_objects(result) 82 | assert docstrings == ["This is a given docstring", "This is a when docstring", "This is a then docstring"] 83 | 84 | 85 | def test_steps_with_missing_docstring(pytester): 86 | pytester.makefile( 87 | ".feature", 88 | missing_docstring=textwrap.dedent( 89 | '''\ 90 | Feature: Missing docstring 91 | 92 | Scenario: Docstring is missing for a step 93 | Given this step has a docstring 94 | """ 95 | This is a given docstring 96 | """ 97 | 98 | When this step has no docstring but tries to use the docstring argument 99 | Then an error is thrown 100 | ''' 101 | ), 102 | ) 103 | pytester.makeconftest( 104 | textwrap.dedent( 105 | """\ 106 | from pytest_bdd import given, when, then 107 | 108 | 109 | @given("this step has a docstring") 110 | def _(docstring): 111 | print(docstring) 112 | 113 | 114 | @when("this step has no docstring but tries to use the docstring argument") 115 | def _(docstring): 116 | print(docstring) 117 | 118 | 119 | @then("an error is thrown") 120 | def _(): 121 | pass 122 | 123 | """ 124 | ) 125 | ) 126 | 127 | pytester.makepyfile( 128 | textwrap.dedent( 129 | """\ 130 | from pytest_bdd import scenarios 131 | 132 | scenarios("missing_docstring.feature") 133 | """ 134 | ) 135 | ) 136 | result = pytester.runpytest("-s") 137 | result.assert_outcomes(failed=1) 138 | result.stdout.fnmatch_lines(["*fixture 'docstring' not found*"]) 139 | 140 | 141 | def test_docstring_argument_in_step_impl_is_optional(pytester): 142 | pytester.makefile( 143 | ".feature", 144 | optional_docstring_arg=textwrap.dedent( 145 | '''\ 146 | Feature: Missing docstring 147 | 148 | Scenario: Docstring arg is missing for a step definition 149 | Given this step has a docstring 150 | """ 151 | This is a given docstring 152 | """ 153 | 154 | When this step has a docstring but no docstring argument 155 | """ 156 | This is a when docstring 157 | """ 158 | 159 | Then the test passes 160 | ''' 161 | ), 162 | ) 163 | pytester.makeconftest( 164 | textwrap.dedent( 165 | """\ 166 | from pytest_bdd import given, when, then 167 | 168 | 169 | @given("this step has a docstring") 170 | def _(docstring): 171 | print(docstring) 172 | 173 | 174 | @when("this step has a docstring but no docstring argument") 175 | def _(): 176 | pass 177 | 178 | 179 | @then("the test passes") 180 | def _(): 181 | pass 182 | 183 | """ 184 | ) 185 | ) 186 | 187 | pytester.makepyfile( 188 | textwrap.dedent( 189 | """\ 190 | from pytest_bdd import scenarios 191 | 192 | scenarios("optional_docstring_arg.feature") 193 | """ 194 | ) 195 | ) 196 | result = pytester.runpytest("-s") 197 | result.assert_outcomes(passed=1) 198 | 199 | 200 | def test_docstring_step_argument_is_reserved_and_cannot_be_used(pytester): 201 | pytester.makefile( 202 | ".feature", 203 | reserved_docstring_arg=textwrap.dedent( 204 | """\ 205 | Feature: Reserved docstring argument 206 | 207 | Scenario: Reserved docstring argument 208 | Given this step has a {docstring} argument 209 | Then the test fails 210 | """ 211 | ), 212 | ) 213 | 214 | pytester.makepyfile( 215 | textwrap.dedent( 216 | """\ 217 | from pytest_bdd import scenario, given, then, parsers 218 | 219 | @scenario("reserved_docstring_arg.feature", "Reserved docstring argument") 220 | def test_docstring(): 221 | pass 222 | 223 | 224 | @given(parsers.parse("this step has a {docstring} argument")) 225 | def _(docstring): 226 | pass 227 | 228 | 229 | @then("the test fails") 230 | def _(): 231 | pass 232 | """ 233 | ) 234 | ) 235 | 236 | result = pytester.runpytest() 237 | result.assert_outcomes(failed=1) 238 | result.stdout.fnmatch_lines( 239 | [ 240 | "*Step 'this step has a {docstring} argument' defines argument names that are reserved: 'docstring'. Please use different names.*" 241 | ] 242 | ) 243 | -------------------------------------------------------------------------------- /tests/scripts/test_generate.py: -------------------------------------------------------------------------------- 1 | """Test code generation command.""" 2 | 3 | from __future__ import annotations 4 | 5 | import os 6 | import sys 7 | import textwrap 8 | 9 | from pytest_bdd.scripts import main 10 | 11 | PATH = os.path.dirname(__file__) 12 | 13 | 14 | def test_generate(pytester, monkeypatch, capsys): 15 | """Test if the code is generated by a given feature.""" 16 | 17 | features = pytester.mkdir("scripts") 18 | feature = features.joinpath("generate.feature") 19 | feature.write_text( 20 | textwrap.dedent( 21 | """\ 22 | Feature: Code generation 23 | 24 | Scenario: Given and when using the same fixture should not evaluate it twice 25 | Given I have an empty list 26 | And 1 have a fixture (appends 1 to a list) in reuse syntax 27 | 28 | When I use this fixture 29 | 30 | Then my list should be [1] 31 | """ 32 | ), 33 | "utf-8", 34 | ) 35 | 36 | monkeypatch.setattr(sys, "argv", ["", "generate", str(feature)]) 37 | main() 38 | out, err = capsys.readouterr() 39 | assert out == textwrap.dedent( 40 | '''\ 41 | """Code generation feature tests.""" 42 | 43 | from pytest_bdd import ( 44 | given, 45 | scenario, 46 | then, 47 | when, 48 | ) 49 | 50 | 51 | @scenario('scripts/generate.feature', 'Given and when using the same fixture should not evaluate it twice') 52 | def test_given_and_when_using_the_same_fixture_should_not_evaluate_it_twice(): 53 | """Given and when using the same fixture should not evaluate it twice.""" 54 | 55 | 56 | @given('1 have a fixture (appends 1 to a list) in reuse syntax') 57 | def _(): 58 | """1 have a fixture (appends 1 to a list) in reuse syntax.""" 59 | raise NotImplementedError 60 | 61 | 62 | @given('I have an empty list') 63 | def _(): 64 | """I have an empty list.""" 65 | raise NotImplementedError 66 | 67 | 68 | @when('I use this fixture') 69 | def _(): 70 | """I use this fixture.""" 71 | raise NotImplementedError 72 | 73 | 74 | @then('my list should be [1]') 75 | def _(): 76 | """my list should be [1].""" 77 | raise NotImplementedError 78 | 79 | ''' 80 | ) 81 | 82 | 83 | def test_generate_with_quotes(pytester): 84 | """Test that code generation escapes quote characters properly.""" 85 | pytester.makefile( 86 | ".feature", 87 | generate_with_quotes=textwrap.dedent( 88 | '''\ 89 | Feature: Handling quotes in code generation 90 | 91 | Scenario: A step definition with quotes should be escaped as needed 92 | Given I have a fixture with 'single' quotes 93 | And I have a fixture with "double" quotes 94 | And I have a fixture with single-quote \'\'\'triple\'\'\' quotes 95 | And I have a fixture with double-quote """triple""" quotes 96 | 97 | When I generate the code 98 | 99 | Then The generated string should be written 100 | ''' 101 | ), 102 | ) 103 | 104 | result = pytester.run("pytest-bdd", "generate", "generate_with_quotes.feature") 105 | assert str(result.stdout) == textwrap.dedent( 106 | '''\ 107 | """Handling quotes in code generation feature tests.""" 108 | 109 | from pytest_bdd import ( 110 | given, 111 | scenario, 112 | then, 113 | when, 114 | ) 115 | 116 | 117 | @scenario('generate_with_quotes.feature', 'A step definition with quotes should be escaped as needed') 118 | def test_a_step_definition_with_quotes_should_be_escaped_as_needed(): 119 | """A step definition with quotes should be escaped as needed.""" 120 | 121 | 122 | @given('I have a fixture with "double" quotes') 123 | def _(): 124 | """I have a fixture with "double" quotes.""" 125 | raise NotImplementedError 126 | 127 | 128 | @given('I have a fixture with \\'single\\' quotes') 129 | def _(): 130 | """I have a fixture with 'single' quotes.""" 131 | raise NotImplementedError 132 | 133 | 134 | @given('I have a fixture with double-quote """triple""" quotes') 135 | def _(): 136 | """I have a fixture with double-quote \\"\\"\\"triple\\"\\"\\" quotes.""" 137 | raise NotImplementedError 138 | 139 | 140 | @given('I have a fixture with single-quote \\'\\'\\'triple\\'\\'\\' quotes') 141 | def _(): 142 | """I have a fixture with single-quote \'\'\'triple\'\'\' quotes.""" 143 | raise NotImplementedError 144 | 145 | 146 | @when('I generate the code') 147 | def _(): 148 | """I generate the code.""" 149 | raise NotImplementedError 150 | 151 | 152 | @then('The generated string should be written') 153 | def _(): 154 | """The generated string should be written.""" 155 | raise NotImplementedError 156 | ''' 157 | ) 158 | 159 | 160 | def test_unicode_characters(pytester, monkeypatch): 161 | """Test generating code with unicode characters. 162 | 163 | Primary purpose is to ensure compatibility with Python2. 164 | """ 165 | 166 | pytester.makefile( 167 | ".feature", 168 | unicode_characters=textwrap.dedent( 169 | """\ 170 | Feature: Generating unicode characters 171 | 172 | Scenario: Calculating the circumference of a circle 173 | Given We have a circle 174 | When We want to know its circumference 175 | Then We calculate 2 * ℼ * 𝑟 176 | """ 177 | ), 178 | ) 179 | 180 | result = pytester.run("pytest-bdd", "generate", "unicode_characters.feature") 181 | expected_output = textwrap.dedent( 182 | '''\ 183 | """Generating unicode characters feature tests.""" 184 | 185 | from pytest_bdd import ( 186 | given, 187 | scenario, 188 | then, 189 | when, 190 | ) 191 | 192 | 193 | @scenario('unicode_characters.feature', 'Calculating the circumference of a circle') 194 | def test_calculating_the_circumference_of_a_circle(): 195 | """Calculating the circumference of a circle.""" 196 | 197 | 198 | @given('We have a circle') 199 | def _(): 200 | """We have a circle.""" 201 | raise NotImplementedError 202 | 203 | 204 | @when('We want to know its circumference') 205 | def _(): 206 | """We want to know its circumference.""" 207 | raise NotImplementedError 208 | 209 | 210 | @then('We calculate 2 * ℼ * 𝑟') 211 | def _(): 212 | """We calculate 2 * ℼ * 𝑟.""" 213 | raise NotImplementedError 214 | ''' 215 | ) 216 | assert str(result.stdout) == expected_output 217 | -------------------------------------------------------------------------------- /src/pytest_bdd/steps.py: -------------------------------------------------------------------------------- 1 | """Step decorators. 2 | 3 | Example: 4 | 5 | @given("I have an article", target_fixture="article") 6 | def _(author): 7 | return create_test_article(author=author) 8 | 9 | 10 | @when("I go to the article page") 11 | def _(browser, article): 12 | browser.visit(urljoin(browser.url, "/articles/{0}/".format(article.id))) 13 | 14 | 15 | @then("I should not see the error message") 16 | def _(browser): 17 | with pytest.raises(ElementDoesNotExist): 18 | browser.find_by_css(".message.error").first 19 | 20 | 21 | Multiple names for the steps: 22 | 23 | @given("I have an article") 24 | @given("there is an article") 25 | def _(author): 26 | return create_test_article(author=author) 27 | 28 | 29 | Reusing existing fixtures for a different step name: 30 | 31 | 32 | @given("I have a beautiful article") 33 | def _(article): 34 | pass 35 | 36 | """ 37 | 38 | from __future__ import annotations 39 | 40 | import enum 41 | from collections.abc import Iterable 42 | from dataclasses import dataclass, field 43 | from itertools import count 44 | from typing import Callable, Literal, TypeVar 45 | from weakref import WeakKeyDictionary 46 | 47 | import pytest 48 | from typing_extensions import ParamSpec 49 | 50 | from .parser import Step 51 | from .parsers import StepParser, get_parser 52 | from .utils import get_caller_module_locals 53 | 54 | P = ParamSpec("P") 55 | T = TypeVar("T") 56 | 57 | step_function_context_registry: WeakKeyDictionary[Callable[..., object], StepFunctionContext] = WeakKeyDictionary() 58 | 59 | 60 | @enum.unique 61 | class StepNamePrefix(enum.Enum): 62 | step_def = "pytestbdd_stepdef" 63 | step_impl = "pytestbdd_stepimpl" 64 | 65 | 66 | @dataclass 67 | class StepFunctionContext: 68 | type: Literal["given", "when", "then"] | None 69 | step_func: Callable[..., object] 70 | parser: StepParser 71 | converters: dict[str, Callable[[str], object]] = field(default_factory=dict) 72 | target_fixture: str | None = None 73 | 74 | 75 | def get_step_fixture_name(step: Step) -> str: 76 | """Get step fixture name""" 77 | return f"{StepNamePrefix.step_impl.value}_{step.type}_{step.name}" 78 | 79 | 80 | def given( 81 | name: str | StepParser, 82 | converters: dict[str, Callable[[str], object]] | None = None, 83 | target_fixture: str | None = None, 84 | stacklevel: int = 1, 85 | ) -> Callable[[Callable[P, T]], Callable[P, T]]: 86 | """Given step decorator. 87 | 88 | :param name: Step name or a parser object. 89 | :param converters: Optional `dict` of the argument or parameter converters in form 90 | {: }. 91 | :param target_fixture: Target fixture name to replace by steps definition function. 92 | :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. 93 | 94 | :return: Decorator function for the step. 95 | """ 96 | return step(name, "given", converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) 97 | 98 | 99 | def when( 100 | name: str | StepParser, 101 | converters: dict[str, Callable[[str], object]] | None = None, 102 | target_fixture: str | None = None, 103 | stacklevel: int = 1, 104 | ) -> Callable[[Callable[P, T]], Callable[P, T]]: 105 | """When step decorator. 106 | 107 | :param name: Step name or a parser object. 108 | :param converters: Optional `dict` of the argument or parameter converters in form 109 | {: }. 110 | :param target_fixture: Target fixture name to replace by steps definition function. 111 | :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. 112 | 113 | :return: Decorator function for the step. 114 | """ 115 | return step(name, "when", converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) 116 | 117 | 118 | def then( 119 | name: str | StepParser, 120 | converters: dict[str, Callable[[str], object]] | None = None, 121 | target_fixture: str | None = None, 122 | stacklevel: int = 1, 123 | ) -> Callable[[Callable[P, T]], Callable[P, T]]: 124 | """Then step decorator. 125 | 126 | :param name: Step name or a parser object. 127 | :param converters: Optional `dict` of the argument or parameter converters in form 128 | {: }. 129 | :param target_fixture: Target fixture name to replace by steps definition function. 130 | :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. 131 | 132 | :return: Decorator function for the step. 133 | """ 134 | return step(name, "then", converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) 135 | 136 | 137 | def step( 138 | name: str | StepParser, 139 | type_: Literal["given", "when", "then"] | None = None, 140 | converters: dict[str, Callable[[str], object]] | None = None, 141 | target_fixture: str | None = None, 142 | stacklevel: int = 1, 143 | ) -> Callable[[Callable[P, T]], Callable[P, T]]: 144 | """Generic step decorator. 145 | 146 | :param name: Step name as in the feature file. 147 | :param type_: Step type ("given", "when" or "then"). If None, this step will work for all the types. 148 | :param converters: Optional step arguments converters mapping. 149 | :param target_fixture: Optional fixture name to replace by step definition. 150 | :param stacklevel: Stack level to find the caller frame. This is used when injecting the step definition fixture. 151 | 152 | :return: Decorator function for the step. 153 | 154 | Example: 155 | >>> @step("there is a wallet", target_fixture="wallet") 156 | >>> def _() -> dict[str, int]: 157 | >>> return {"eur": 0, "usd": 0} 158 | 159 | """ 160 | if converters is None: 161 | converters = {} 162 | 163 | def decorator(func: Callable[P, T]) -> Callable[P, T]: 164 | parser = get_parser(name) 165 | 166 | context = StepFunctionContext( 167 | type=type_, 168 | step_func=func, 169 | parser=parser, 170 | converters=converters, 171 | target_fixture=target_fixture, 172 | ) 173 | 174 | def step_function_marker() -> StepFunctionContext: 175 | return context 176 | 177 | step_function_context_registry[step_function_marker] = context 178 | 179 | caller_locals = get_caller_module_locals(stacklevel=stacklevel) 180 | fixture_step_name = find_unique_name( 181 | f"{StepNamePrefix.step_def.value}_{type_ or '*'}_{parser.name}", seen=caller_locals.keys() 182 | ) 183 | caller_locals[fixture_step_name] = pytest.fixture(name=fixture_step_name)(step_function_marker) 184 | return func 185 | 186 | return decorator 187 | 188 | 189 | def find_unique_name(name: str, seen: Iterable[str]) -> str: 190 | """Find a unique name among a set of strings. 191 | 192 | New names are generated by appending an increasing number at the end of the name. 193 | 194 | Example: 195 | >>> find_unique_name("foo", ["foo", "foo_1"]) 196 | 'foo_2' 197 | """ 198 | seen = set(seen) 199 | if name not in seen: 200 | return name 201 | 202 | # Generate new names with increasing numbers 203 | for i in count(1): 204 | new_name = f"{name}_{i}" 205 | if new_name not in seen: 206 | return new_name 207 | 208 | # This line will never be reached, but it's here to satisfy mypy 209 | raise RuntimeError("Unable to find a unique name") 210 | -------------------------------------------------------------------------------- /src/pytest_bdd/generation.py: -------------------------------------------------------------------------------- 1 | """pytest-bdd missing test code generation.""" 2 | 3 | from __future__ import annotations 4 | 5 | import itertools 6 | import os.path 7 | from typing import TYPE_CHECKING, cast 8 | 9 | from _pytest._io import TerminalWriter 10 | from _pytest.python import Function 11 | from mako.lookup import TemplateLookup # type: ignore 12 | 13 | from .compat import getfixturedefs 14 | from .feature import get_features 15 | from .parser import Feature, ScenarioTemplate, Step 16 | from .scenario import ( 17 | inject_fixturedefs_for_step, 18 | make_python_docstring, 19 | make_python_name, 20 | make_string_literal, 21 | scenario_wrapper_template_registry, 22 | ) 23 | from .steps import get_step_fixture_name 24 | from .types import STEP_TYPES 25 | 26 | if TYPE_CHECKING: 27 | from collections.abc import Sequence 28 | 29 | from _pytest.config import Config 30 | from _pytest.config.argparsing import Parser 31 | from _pytest.fixtures import FixtureDef, FixtureManager 32 | from _pytest.main import Session 33 | from _pytest.nodes import Node 34 | 35 | 36 | template_lookup = TemplateLookup(directories=[os.path.join(os.path.dirname(__file__), "templates")]) 37 | 38 | 39 | def add_options(parser: Parser) -> None: 40 | """Add pytest-bdd options.""" 41 | group = parser.getgroup("bdd", "Generation") 42 | 43 | group._addoption( 44 | "--generate-missing", 45 | action="store_true", 46 | dest="generate_missing", 47 | default=False, 48 | help="Generate missing bdd test code for given feature files and exit.", 49 | ) 50 | 51 | group._addoption( 52 | "--feature", 53 | metavar="FILE_OR_DIR", 54 | action="append", 55 | dest="features", 56 | help="Feature file or directory to generate missing code for. Multiple allowed.", 57 | ) 58 | 59 | 60 | def cmdline_main(config: Config) -> int | None: 61 | """Check config option to show missing code.""" 62 | if config.option.generate_missing: 63 | return show_missing_code(config) 64 | return None # Make mypy happy 65 | 66 | 67 | def generate_code(features: list[Feature], scenarios: list[ScenarioTemplate], steps: list[Step]) -> str: 68 | """Generate test code for the given filenames.""" 69 | grouped_steps = group_steps(steps) 70 | template = template_lookup.get_template("test.py.mak") 71 | code = template.render( 72 | features=features, 73 | scenarios=scenarios, 74 | steps=grouped_steps, 75 | make_python_name=make_python_name, 76 | make_python_docstring=make_python_docstring, 77 | make_string_literal=make_string_literal, 78 | ) 79 | return cast(str, code) 80 | 81 | 82 | def show_missing_code(config: Config) -> int: 83 | """Wrap pytest session to show missing code.""" 84 | from _pytest.main import wrap_session 85 | 86 | return wrap_session(config, _show_missing_code_main) 87 | 88 | 89 | def print_missing_code(scenarios: list[ScenarioTemplate], steps: list[Step]) -> None: 90 | """Print missing code with TerminalWriter.""" 91 | tw = TerminalWriter() 92 | scenario = step = None 93 | 94 | for scenario in scenarios: 95 | tw.line() 96 | tw.line( 97 | ( 98 | f'Scenario "{scenario.name}" is not bound to any test in the feature "{scenario.feature.name}" ' 99 | f"in the file {scenario.feature.filename}:{scenario.line_number}" 100 | ), 101 | red=True, 102 | ) 103 | 104 | if scenario: 105 | tw.sep("-", red=True) 106 | 107 | for step in steps: 108 | tw.line() 109 | if step.scenario is not None: 110 | tw.line( 111 | ( 112 | f'Step {step} is not defined in the scenario "{step.scenario.name}" ' 113 | f'in the feature "{step.scenario.feature.name}" in the file ' 114 | f"{step.scenario.feature.filename}:{step.line_number}" 115 | ), 116 | red=True, 117 | ) 118 | elif step.background is not None: 119 | message = f"Background step {step} is not defined." 120 | tw.line(message, red=True) 121 | 122 | if step: 123 | tw.sep("-", red=True) 124 | 125 | tw.line("Please place the code above to the test file(s):") 126 | tw.line() 127 | 128 | features = sorted( 129 | (scenario.feature for scenario in scenarios), key=lambda feature: feature.name or feature.filename 130 | ) 131 | code = generate_code(features, scenarios, steps) 132 | tw.write(code) 133 | 134 | 135 | def _find_step_fixturedef( 136 | fixturemanager: FixtureManager, item: Node, step: Step 137 | ) -> Sequence[FixtureDef[object]] | None: 138 | """Find step fixturedef.""" 139 | with inject_fixturedefs_for_step(step=step, fixturemanager=fixturemanager, node=item): 140 | bdd_name = get_step_fixture_name(step=step) 141 | return getfixturedefs(fixturemanager, bdd_name, item) 142 | 143 | 144 | def parse_feature_files( 145 | paths: list[str], encoding: str = "utf-8" 146 | ) -> tuple[list[Feature], list[ScenarioTemplate], list[Step]]: 147 | """Parse feature files of given paths. 148 | 149 | :param paths: `list` of paths (file or dirs) 150 | 151 | :return: `list` of `tuple` in form: 152 | (`list` of `Feature` objects, `list` of `Scenario` objects, `list` of `Step` objects). 153 | """ 154 | features = get_features(paths, encoding=encoding) 155 | scenarios = sorted( 156 | itertools.chain.from_iterable(feature.scenarios.values() for feature in features), 157 | key=lambda scenario: (scenario.feature.name or scenario.feature.filename, scenario.name), 158 | ) 159 | steps = sorted((step for scenario in scenarios for step in scenario.steps), key=lambda step: step.name) 160 | return features, scenarios, steps 161 | 162 | 163 | def group_steps(steps: list[Step]) -> list[Step]: 164 | """Group steps by type.""" 165 | steps = sorted(steps, key=lambda step: step.type) 166 | seen_steps = set() 167 | grouped_steps = [] 168 | for step in itertools.chain.from_iterable( 169 | sorted(group, key=lambda step: step.name) for _, group in itertools.groupby(steps, lambda step: step.type) 170 | ): 171 | if step.name not in seen_steps: 172 | grouped_steps.append(step) 173 | seen_steps.add(step.name) 174 | grouped_steps.sort(key=lambda step: STEP_TYPES.index(step.type)) 175 | return grouped_steps 176 | 177 | 178 | def _show_missing_code_main(config: Config, session: Session) -> None: 179 | """Preparing fixture duplicates for output.""" 180 | tw = TerminalWriter() 181 | session.perform_collect() 182 | 183 | fm = session._fixturemanager 184 | 185 | if config.option.features is None: 186 | tw.line("The --feature parameter is required.", red=True) 187 | session.exitstatus = 100 188 | return 189 | 190 | features, scenarios, steps = parse_feature_files(config.option.features) 191 | 192 | for item in session.items: 193 | if not isinstance(item, Function): 194 | continue 195 | if (scenario := scenario_wrapper_template_registry.get(item.obj)) is not None: 196 | if scenario in scenarios: 197 | scenarios.remove(scenario) 198 | for step in scenario.steps: 199 | if _find_step_fixturedef(fm, item, step=step): # type: ignore 200 | try: 201 | steps.remove(step) 202 | except ValueError: 203 | pass 204 | for scenario in scenarios: 205 | for step in scenario.steps: 206 | if step.background is None: 207 | steps.remove(step) 208 | grouped_steps = group_steps(steps) 209 | print_missing_code(scenarios, grouped_steps) 210 | 211 | if scenarios or steps: 212 | session.exitstatus = 100 213 | -------------------------------------------------------------------------------- /src/pytest_bdd/cucumber_json.py: -------------------------------------------------------------------------------- 1 | """Cucumber json output formatter.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | import math 7 | import os 8 | import time 9 | from typing import TYPE_CHECKING, Literal, TypedDict 10 | 11 | from typing_extensions import NotRequired 12 | 13 | from .reporting import FeatureDict, ScenarioReportDict, StepReportDict, test_report_context_registry 14 | 15 | if TYPE_CHECKING: 16 | from _pytest.config import Config 17 | from _pytest.config.argparsing import Parser 18 | from _pytest.reports import TestReport 19 | from _pytest.terminal import TerminalReporter 20 | 21 | 22 | class ResultElementDict(TypedDict): 23 | status: Literal["passed", "failed", "skipped"] 24 | duration: int # in nanoseconds 25 | error_message: NotRequired[str] 26 | 27 | 28 | class TagElementDict(TypedDict): 29 | name: str 30 | line: int 31 | 32 | 33 | class MatchElementDict(TypedDict): 34 | location: str 35 | 36 | 37 | class StepElementDict(TypedDict): 38 | keyword: str 39 | name: str 40 | line: int 41 | match: MatchElementDict 42 | result: ResultElementDict 43 | 44 | 45 | class ScenarioElementDict(TypedDict): 46 | keyword: str 47 | id: str 48 | name: str 49 | line: int 50 | description: str 51 | tags: list[TagElementDict] 52 | type: Literal["scenario"] 53 | steps: list[StepElementDict] 54 | 55 | 56 | class FeatureElementDict(TypedDict): 57 | keyword: str 58 | uri: str 59 | name: str 60 | id: str 61 | line: int 62 | description: str 63 | language: str 64 | tags: list[TagElementDict] 65 | elements: list[ScenarioElementDict] 66 | 67 | 68 | class FeaturesDict(TypedDict): 69 | features: dict[str, FeatureElementDict] 70 | 71 | 72 | def add_options(parser: Parser) -> None: 73 | """Add pytest-bdd options.""" 74 | group = parser.getgroup("bdd", "Cucumber JSON") 75 | group.addoption( 76 | "--cucumberjson", 77 | "--cucumber-json", 78 | action="store", 79 | dest="cucumber_json_path", 80 | metavar="path", 81 | default=None, 82 | help="create cucumber json style report file at given path.", 83 | ) 84 | 85 | 86 | def configure(config: Config) -> None: 87 | cucumber_json_path = config.option.cucumber_json_path 88 | # prevent opening json log on worker nodes (xdist) 89 | if cucumber_json_path and not hasattr(config, "workerinput"): 90 | config._bddcucumberjson = LogBDDCucumberJSON(cucumber_json_path) # type: ignore[attr-defined] 91 | config.pluginmanager.register(config._bddcucumberjson) # type: ignore[attr-defined] 92 | 93 | 94 | def unconfigure(config: Config) -> None: 95 | xml = getattr(config, "_bddcucumberjson", None) # type: ignore[attr-defined] 96 | if xml is not None: 97 | del config._bddcucumberjson # type: ignore[attr-defined] 98 | config.pluginmanager.unregister(xml) 99 | 100 | 101 | class LogBDDCucumberJSON: 102 | """Logging plugin for cucumber like json output.""" 103 | 104 | def __init__(self, logfile: str) -> None: 105 | logfile = os.path.expanduser(os.path.expandvars(logfile)) 106 | self.logfile = os.path.normpath(os.path.abspath(logfile)) 107 | self.features: dict[str, FeatureElementDict] = {} 108 | 109 | def _get_result(self, step: StepReportDict, report: TestReport, error_message: bool = False) -> ResultElementDict: 110 | """Get scenario test run result. 111 | 112 | :param step: `Step` step we get result for 113 | :param report: pytest `Report` object 114 | :return: `dict` in form {"status": "", ["error_message": ""]} 115 | """ 116 | status: Literal["passed", "failed", "skipped"] 117 | res_message = None 118 | if report.outcome == "passed" or not step["failed"]: # ignore setup/teardown 119 | status = "passed" 120 | elif report.outcome == "failed": 121 | status = "failed" 122 | res_message = str(report.longrepr) if error_message else "" 123 | elif report.outcome == "skipped": 124 | status = "skipped" 125 | else: 126 | raise ValueError(f"Unknown test outcome {report.outcome}") 127 | res: ResultElementDict = {"status": status, "duration": int(math.floor((10**9) * step["duration"]))} # nanosec 128 | if res_message is not None: 129 | res["error_message"] = res_message 130 | return res 131 | 132 | def _serialize_tags(self, item: FeatureDict | ScenarioReportDict) -> list[TagElementDict]: 133 | """Serialize item's tags. 134 | 135 | :param item: json-serialized `Scenario` or `Feature`. 136 | :return: `list` of `dict` in the form of: 137 | [ 138 | { 139 | "name": "", 140 | "line": 2, 141 | } 142 | ] 143 | """ 144 | return [{"name": tag, "line": item["line_number"] - 1} for tag in item["tags"]] 145 | 146 | def pytest_runtest_logreport(self, report: TestReport) -> None: 147 | try: 148 | scenario = test_report_context_registry[report].scenario 149 | except KeyError: 150 | # skip reporting for non-bdd tests 151 | return 152 | 153 | if not scenario["steps"] or report.when != "call": 154 | # skip if there isn't a result or scenario has no steps 155 | return 156 | 157 | def stepmap(step: StepReportDict) -> StepElementDict: 158 | error_message = False 159 | if step["failed"] and not scenario.setdefault("failed", False): 160 | scenario["failed"] = True 161 | error_message = True 162 | 163 | step_name = step["name"] 164 | 165 | return { 166 | "keyword": step["keyword"], 167 | "name": step_name, 168 | "line": step["line_number"], 169 | "match": {"location": ""}, 170 | "result": self._get_result(step, report, error_message), 171 | } 172 | 173 | if scenario["feature"]["filename"] not in self.features: 174 | self.features[scenario["feature"]["filename"]] = { 175 | "keyword": scenario["feature"]["keyword"], 176 | "uri": scenario["feature"]["rel_filename"], 177 | "name": scenario["feature"]["name"] or scenario["feature"]["rel_filename"], 178 | "id": scenario["feature"]["rel_filename"].lower().replace(" ", "-"), 179 | "line": scenario["feature"]["line_number"], 180 | "description": scenario["feature"]["description"], 181 | "language": scenario["feature"]["language"], 182 | "tags": self._serialize_tags(scenario["feature"]), 183 | "elements": [], 184 | } 185 | 186 | self.features[scenario["feature"]["filename"]]["elements"].append( 187 | { 188 | "keyword": scenario["keyword"], 189 | "id": test_report_context_registry[report].name, 190 | "name": scenario["name"], 191 | "line": scenario["line_number"], 192 | "description": scenario["description"], 193 | "tags": self._serialize_tags(scenario), 194 | "type": "scenario", 195 | "steps": [stepmap(step) for step in scenario["steps"]], 196 | } 197 | ) 198 | 199 | def pytest_sessionstart(self) -> None: 200 | self.suite_start_time = time.time() 201 | 202 | def pytest_sessionfinish(self) -> None: 203 | with open(self.logfile, "w", encoding="utf-8") as logfile: 204 | logfile.write(json.dumps(list(self.features.values()))) 205 | 206 | def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: 207 | terminalreporter.write_sep("-", f"generated json file: {self.logfile}") 208 | -------------------------------------------------------------------------------- /tests/parser/test_errors.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | 6 | def test_multiple_features_error(pytester): 7 | """Test multiple features in a single feature file.""" 8 | features = pytester.mkdir("features") 9 | features.joinpath("test.feature").write_text( 10 | textwrap.dedent( 11 | """ 12 | Feature: First Feature 13 | Scenario: First Scenario 14 | Given a step 15 | 16 | Feature: Second Feature 17 | Scenario: Second Scenario 18 | Given another step 19 | """ 20 | ), 21 | encoding="utf-8", 22 | ) 23 | pytester.makepyfile( 24 | textwrap.dedent( 25 | """ 26 | from pytest_bdd import scenarios 27 | 28 | scenarios('features') 29 | """ 30 | ) 31 | ) 32 | 33 | result = pytester.runpytest() 34 | result.stdout.fnmatch_lines(["*FeatureError: Multiple features are not allowed in a single feature file.*"]) 35 | 36 | 37 | def test_step_outside_scenario_or_background_error(pytester): 38 | """Test step outside of a Scenario or Background.""" 39 | features = pytester.mkdir("features") 40 | features.joinpath("test.feature").write_text( 41 | textwrap.dedent( 42 | """ 43 | Feature: Invalid Feature 44 | # Step not inside a scenario or background 45 | Given a step that is not inside a scenario or background 46 | 47 | Scenario: A valid scenario 48 | Given a step inside a scenario 49 | 50 | """ 51 | ), 52 | encoding="utf-8", 53 | ) 54 | 55 | pytester.makepyfile( 56 | textwrap.dedent( 57 | """ 58 | from pytest_bdd import scenarios, given 59 | 60 | @given("a step inside a scenario") 61 | def step_inside_scenario(): 62 | pass 63 | 64 | scenarios('features') 65 | """ 66 | ) 67 | ) 68 | 69 | result = pytester.runpytest() 70 | 71 | # Expect the FeatureError for the step outside of scenario or background 72 | result.stdout.fnmatch_lines(["*FeatureError: Step definition outside of a Scenario or a Background.*"]) 73 | 74 | 75 | def test_multiple_backgrounds_error(pytester): 76 | """Test multiple backgrounds in a single feature.""" 77 | features = pytester.mkdir("features") 78 | features.joinpath("test.feature").write_text( 79 | textwrap.dedent( 80 | """ 81 | Feature: Feature with multiple backgrounds 82 | Background: First background 83 | Given a first background step 84 | 85 | Background: Second background 86 | Given a second background step 87 | 88 | Scenario: A valid scenario 89 | Given a step in the scenario 90 | """ 91 | ), 92 | encoding="utf-8", 93 | ) 94 | pytester.makepyfile( 95 | textwrap.dedent( 96 | """ 97 | from pytest_bdd import scenarios 98 | 99 | scenarios('features') 100 | """ 101 | ) 102 | ) 103 | 104 | result = pytester.runpytest() 105 | result.stdout.fnmatch_lines( 106 | ["*BackgroundError: Multiple 'Background' sections detected. Only one 'Background' is allowed per feature.*"] 107 | ) 108 | 109 | 110 | def test_misplaced_scenario_error(pytester): 111 | """Test misplaced or incorrect Scenario keywords.""" 112 | features = pytester.mkdir("features") 113 | features.joinpath("test.feature").write_text( 114 | textwrap.dedent( 115 | """ 116 | Scenario: First scenario 117 | Given a step 118 | 119 | Scenario: Misplaced scenario 120 | Given another step 121 | When I have something wrong 122 | """ 123 | ), 124 | encoding="utf-8", 125 | ) 126 | pytester.makepyfile( 127 | textwrap.dedent( 128 | """ 129 | from pytest_bdd import scenarios, given, when 130 | 131 | @given("a step") 132 | def a_step(): 133 | pass 134 | 135 | @given("another step") 136 | def another_step(): 137 | pass 138 | 139 | @when("I have something wrong") 140 | def something_wrong(): 141 | pass 142 | 143 | scenarios('features') 144 | """ 145 | ) 146 | ) 147 | 148 | result = pytester.runpytest() 149 | 150 | # Expect that no ScenarioError will actually be raised here 151 | result.stdout.fnmatch_lines( 152 | [ 153 | "*ScenarioError: Misplaced or incorrect 'Scenario' keyword. Ensure it's correctly placed. There might be a missing Feature section.*" 154 | ] 155 | ) 156 | 157 | 158 | def test_misplaced_rule_error(pytester): 159 | """Test misplaced or incorrectly formatted Rule.""" 160 | features = pytester.mkdir("features") 161 | features.joinpath("test.feature").write_text( 162 | textwrap.dedent( 163 | """ 164 | Rule: Misplaced rule 165 | Feature: Feature with misplaced rule 166 | Scenario: A scenario inside a rule 167 | Given a step 168 | """ 169 | ), 170 | encoding="utf-8", 171 | ) 172 | pytester.makepyfile( 173 | textwrap.dedent( 174 | """ 175 | from pytest_bdd import given, scenarios 176 | 177 | scenarios('features') 178 | 179 | @given("a step") 180 | def a_step(): 181 | pass 182 | """ 183 | ) 184 | ) 185 | 186 | result = pytester.runpytest() 187 | result.stdout.fnmatch_lines( 188 | ["*RuleError: Misplaced or incorrectly formatted 'Rule'. Ensure it follows the feature structure.*"] 189 | ) 190 | 191 | 192 | def test_improper_step_error(pytester): 193 | """Test improper step without keyword.""" 194 | features = pytester.mkdir("features") 195 | features.joinpath("test.feature").write_text( 196 | textwrap.dedent( 197 | """ 198 | Feature: Feature with improper step 199 | Scenario: Scenario with improper step 200 | Given a valid step 201 | InvalidStep I have an invalid step 202 | """ 203 | ), 204 | encoding="utf-8", 205 | ) 206 | pytester.makepyfile( 207 | textwrap.dedent( 208 | """ 209 | from pytest_bdd import scenarios 210 | 211 | scenarios('features') 212 | """ 213 | ) 214 | ) 215 | 216 | result = pytester.runpytest() 217 | result.stdout.fnmatch_lines(["*TokenError: Unexpected token found. Check Gherkin syntax near the reported error.*"]) 218 | 219 | 220 | def test_improper_initial_keyword(pytester): 221 | """Test first step using incorrect initial keyword.""" 222 | features = pytester.mkdir("features") 223 | features.joinpath("test.feature").write_text( 224 | textwrap.dedent( 225 | """ 226 | Feature: Incorrect initial keyword 227 | 228 | Scenario: No initial Given, When or Then 229 | And foo 230 | """ 231 | ), 232 | encoding="utf-8", 233 | ) 234 | pytester.makepyfile( 235 | textwrap.dedent( 236 | """ 237 | from pytest_bdd import given, scenarios 238 | 239 | scenarios('features') 240 | 241 | @given("foo") 242 | def foo(): 243 | pass 244 | 245 | @then("bar") 246 | def bar(): 247 | pass 248 | """ 249 | ) 250 | ) 251 | 252 | result = pytester.runpytest() 253 | result.stdout.fnmatch_lines( 254 | ["*StepError: First step in a scenario or background must start with 'Given', 'When' or 'Then', but got And.*"] 255 | ) 256 | -------------------------------------------------------------------------------- /src/pytest_bdd/reporting.py: -------------------------------------------------------------------------------- 1 | """Reporting functionality. 2 | 3 | Collection of the scenario execution statuses, timing and other information 4 | that enriches the pytest test reporting. 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import time 10 | from dataclasses import dataclass 11 | from typing import TYPE_CHECKING, Callable, TypedDict 12 | from weakref import WeakKeyDictionary 13 | 14 | from typing_extensions import NotRequired 15 | 16 | if TYPE_CHECKING: 17 | from _pytest.fixtures import FixtureRequest 18 | from _pytest.nodes import Item 19 | from _pytest.reports import TestReport 20 | from _pytest.runner import CallInfo 21 | 22 | from .parser import Feature, Scenario, Step 23 | 24 | scenario_reports_registry: WeakKeyDictionary[Item, ScenarioReport] = WeakKeyDictionary() 25 | test_report_context_registry: WeakKeyDictionary[TestReport, ReportContext] = WeakKeyDictionary() 26 | 27 | 28 | class FeatureDict(TypedDict): 29 | keyword: str 30 | name: str 31 | filename: str 32 | rel_filename: str 33 | language: str 34 | line_number: int 35 | description: str 36 | tags: list[str] 37 | 38 | 39 | class RuleDict(TypedDict): 40 | keyword: str 41 | name: str 42 | description: str 43 | tags: list[str] 44 | 45 | 46 | class StepReportDict(TypedDict): 47 | name: str 48 | type: str 49 | keyword: str 50 | line_number: int 51 | failed: bool 52 | duration: float 53 | 54 | 55 | class ScenarioReportDict(TypedDict): 56 | steps: list[StepReportDict] 57 | keyword: str 58 | name: str 59 | line_number: int 60 | tags: list[str] 61 | feature: FeatureDict 62 | description: str 63 | rule: NotRequired[RuleDict] 64 | failed: NotRequired[bool] 65 | 66 | 67 | class StepReport: 68 | """Step execution report.""" 69 | 70 | failed: bool = False 71 | stopped: float | None = None 72 | 73 | def __init__(self, step: Step) -> None: 74 | """Step report constructor. 75 | 76 | :param pytest_bdd.parser.Step step: Step. 77 | """ 78 | self.step = step 79 | self.started = time.perf_counter() 80 | 81 | def serialize(self) -> StepReportDict: 82 | """Serialize the step execution report. 83 | 84 | :return: Serialized step execution report. 85 | """ 86 | return { 87 | "name": self.step.name, 88 | "type": self.step.type, 89 | "keyword": self.step.keyword, 90 | "line_number": self.step.line_number, 91 | "failed": self.failed, 92 | "duration": self.duration, 93 | } 94 | 95 | def finalize(self, failed: bool) -> None: 96 | """Stop collecting information and finalize the report. 97 | 98 | :param bool failed: Whether the step execution is failed. 99 | """ 100 | self.stopped = time.perf_counter() 101 | self.failed = failed 102 | 103 | @property 104 | def duration(self) -> float: 105 | """Step execution duration. 106 | 107 | :return: Step execution duration. 108 | :rtype: float 109 | """ 110 | if self.stopped is None: 111 | return 0 112 | 113 | return self.stopped - self.started 114 | 115 | 116 | class ScenarioReport: 117 | """Scenario execution report.""" 118 | 119 | def __init__(self, scenario: Scenario) -> None: 120 | """Scenario report constructor. 121 | 122 | :param pytest_bdd.parser.Scenario scenario: Scenario. 123 | """ 124 | self.scenario: Scenario = scenario 125 | self.step_reports: list[StepReport] = [] 126 | 127 | @property 128 | def current_step_report(self) -> StepReport: 129 | """Get current step report. 130 | 131 | :return: Last or current step report. 132 | :rtype: pytest_bdd.reporting.StepReport 133 | """ 134 | return self.step_reports[-1] 135 | 136 | def add_step_report(self, step_report: StepReport) -> None: 137 | """Add new step report. 138 | 139 | :param step_report: New current step report. 140 | :type step_report: pytest_bdd.reporting.StepReport 141 | """ 142 | self.step_reports.append(step_report) 143 | 144 | def serialize(self) -> ScenarioReportDict: 145 | """Serialize scenario execution report in order to transfer reporting from nodes in the distributed mode. 146 | 147 | :return: Serialized report. 148 | """ 149 | scenario = self.scenario 150 | feature = scenario.feature 151 | 152 | serialized: ScenarioReportDict = { 153 | "steps": [step_report.serialize() for step_report in self.step_reports], 154 | "keyword": scenario.keyword, 155 | "name": scenario.name, 156 | "line_number": scenario.line_number, 157 | "tags": sorted(scenario.tags), 158 | "description": scenario.description, 159 | "feature": { 160 | "keyword": feature.keyword, 161 | "name": feature.name, 162 | "filename": feature.filename, 163 | "rel_filename": feature.rel_filename, 164 | "language": feature.language, 165 | "line_number": feature.line_number, 166 | "description": feature.description, 167 | "tags": sorted(feature.tags), 168 | }, 169 | } 170 | 171 | if scenario.rule: 172 | rule_dict: RuleDict = { 173 | "keyword": scenario.rule.keyword, 174 | "name": scenario.rule.name, 175 | "description": scenario.rule.description, 176 | "tags": sorted(scenario.rule.tags), 177 | } 178 | serialized["rule"] = rule_dict 179 | 180 | return serialized 181 | 182 | def fail(self) -> None: 183 | """Stop collecting information and finalize the report as failed.""" 184 | self.current_step_report.finalize(failed=True) 185 | remaining_steps = self.scenario.steps[len(self.step_reports) :] 186 | 187 | # Fail the rest of the steps and make reports. 188 | for step in remaining_steps: 189 | report = StepReport(step=step) 190 | report.finalize(failed=True) 191 | self.add_step_report(report) 192 | 193 | 194 | @dataclass 195 | class ReportContext: 196 | scenario: ScenarioReportDict 197 | name: str 198 | 199 | 200 | def runtest_makereport(item: Item, call: CallInfo, rep: TestReport) -> None: 201 | """Store item in the report object.""" 202 | try: 203 | scenario_report: ScenarioReport = scenario_reports_registry[item] 204 | except KeyError: 205 | return 206 | 207 | test_report_context_registry[rep] = ReportContext(scenario=scenario_report.serialize(), name=item.name) 208 | 209 | 210 | def before_scenario(request: FixtureRequest, feature: Feature, scenario: Scenario) -> None: 211 | """Create scenario report for the item.""" 212 | scenario_reports_registry[request.node] = ScenarioReport(scenario=scenario) 213 | 214 | 215 | def step_error( 216 | request: FixtureRequest, 217 | feature: Feature, 218 | scenario: Scenario, 219 | step: Step, 220 | step_func: Callable[..., object], 221 | step_func_args: dict[str, object], 222 | exception: Exception, 223 | ) -> None: 224 | """Finalize the step report as failed.""" 225 | scenario_reports_registry[request.node].fail() 226 | 227 | 228 | def before_step( 229 | request: FixtureRequest, 230 | feature: Feature, 231 | scenario: Scenario, 232 | step: Step, 233 | step_func: Callable[..., object], 234 | ) -> None: 235 | """Store step start time.""" 236 | scenario_reports_registry[request.node].add_step_report(StepReport(step=step)) 237 | 238 | 239 | def after_step( 240 | request: FixtureRequest, 241 | feature: Feature, 242 | scenario: Scenario, 243 | step: Step, 244 | step_func: Callable, 245 | step_func_args: dict, 246 | ) -> None: 247 | """Finalize the step report as successful.""" 248 | scenario_reports_registry[request.node].current_step_report.finalize(failed=False) 249 | -------------------------------------------------------------------------------- /tests/datatable/test_datatable.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import textwrap 4 | 5 | from src.pytest_bdd.utils import collect_dumped_objects 6 | 7 | 8 | def test_steps_with_datatables(pytester): 9 | pytester.makefile( 10 | ".feature", 11 | datatable=textwrap.dedent( 12 | """\ 13 | Feature: Manage user accounts 14 | 15 | Scenario: Creating a new user with roles and permissions 16 | Given the following user details: 17 | | name | email | age | 18 | | John | john@example.com | 30 | 19 | | Alice | alice@example.com | 25 | 20 | 21 | When the user is assigned the following roles: 22 | | role | description | 23 | | Admin | Full access to the system | 24 | | Contributor | Can add content | 25 | 26 | And this step has no datatable 27 | 28 | Then the user should have the following permissions: 29 | | permission | allowed | 30 | | view dashboard | true | 31 | | edit content | true | 32 | | delete content | false | 33 | """ 34 | ), 35 | ) 36 | pytester.makeconftest( 37 | textwrap.dedent( 38 | """\ 39 | from pytest_bdd import given, when, then 40 | from pytest_bdd.utils import dump_obj 41 | 42 | 43 | @given("the following user details:") 44 | def _(datatable): 45 | given_datatable = datatable 46 | dump_obj(given_datatable) 47 | 48 | 49 | @when("the user is assigned the following roles:") 50 | def _(datatable): 51 | when_datatable = datatable 52 | dump_obj(when_datatable) 53 | 54 | 55 | @when("this step has no datatable") 56 | def _(): 57 | pass 58 | 59 | 60 | @then("the user should have the following permissions:") 61 | def _(datatable): 62 | then_datatable = datatable 63 | dump_obj(then_datatable) 64 | 65 | """ 66 | ) 67 | ) 68 | pytester.makepyfile( 69 | textwrap.dedent( 70 | """\ 71 | from pytest_bdd import scenario 72 | 73 | @scenario("datatable.feature", "Creating a new user with roles and permissions") 74 | def test_datatable(): 75 | pass 76 | """ 77 | ) 78 | ) 79 | 80 | result = pytester.runpytest("-s") 81 | result.assert_outcomes(passed=1) 82 | 83 | datatables = collect_dumped_objects(result) 84 | assert datatables[0] == [ 85 | ["name", "email", "age"], 86 | ["John", "john@example.com", "30"], 87 | ["Alice", "alice@example.com", "25"], 88 | ] 89 | assert datatables[1] == [ 90 | ["role", "description"], 91 | ["Admin", "Full access to the system"], 92 | ["Contributor", "Can add content"], 93 | ] 94 | assert datatables[2] == [ 95 | ["permission", "allowed"], 96 | ["view dashboard", "true"], 97 | ["edit content", "true"], 98 | ["delete content", "false"], 99 | ] 100 | 101 | 102 | def test_datatable_argument_in_step_impl_is_optional(pytester): 103 | pytester.makefile( 104 | ".feature", 105 | optional_arg_datatable=textwrap.dedent( 106 | """\ 107 | Feature: Missing data table 108 | 109 | Scenario: Data table is missing for a step 110 | Given this step has a data table: 111 | | name | email | age | 112 | | John | john@example.com | 30 | 113 | | Alice | alice@example.com | 25 | 114 | 115 | When this step has no data table but tries to use the datatable argument 116 | Then an error is thrown 117 | """ 118 | ), 119 | ) 120 | pytester.makeconftest( 121 | textwrap.dedent( 122 | """\ 123 | from pytest_bdd import given, when, then 124 | 125 | 126 | @given("this step has a data table:") 127 | def _(datatable): 128 | print(datatable) 129 | 130 | 131 | @when("this step has no data table but tries to use the datatable argument") 132 | def _(datatable): 133 | print(datatable) 134 | 135 | 136 | @then("an error is thrown") 137 | def _(datatable): 138 | pass 139 | 140 | """ 141 | ) 142 | ) 143 | 144 | pytester.makepyfile( 145 | textwrap.dedent( 146 | """\ 147 | from pytest_bdd import scenarios 148 | 149 | scenarios("optional_arg_datatable.feature") 150 | """ 151 | ) 152 | ) 153 | result = pytester.runpytest("-s") 154 | result.assert_outcomes(failed=1) 155 | result.stdout.fnmatch_lines(["*fixture 'datatable' not found*"]) 156 | 157 | 158 | def test_steps_with_datatable_missing_argument_in_step(pytester): 159 | pytester.makefile( 160 | ".feature", 161 | missing_datatable_arg=textwrap.dedent( 162 | """\ 163 | Feature: Missing datatable 164 | 165 | Scenario: Datatable arg is missing for a step definition 166 | Given this step has a datatable 167 | | name | email | age | 168 | | John | john@example.com | 30 | 169 | 170 | When this step has a datatable but no datatable argument 171 | | name | email | age | 172 | | John | john@example.com | 30 | 173 | 174 | Then the test passes 175 | """ 176 | ), 177 | ) 178 | pytester.makeconftest( 179 | textwrap.dedent( 180 | """\ 181 | from pytest_bdd import given, when, then 182 | 183 | 184 | @given("this step has a datatable") 185 | def _(datatable): 186 | print(datatable) 187 | 188 | 189 | @when("this step has a datatable but no datatable argument") 190 | def _(): 191 | pass 192 | 193 | 194 | @then("the test passes") 195 | def _(): 196 | pass 197 | 198 | """ 199 | ) 200 | ) 201 | 202 | pytester.makepyfile( 203 | textwrap.dedent( 204 | """\ 205 | from pytest_bdd import scenario 206 | 207 | @scenario("missing_datatable_arg.feature", "Datatable arg is missing for a step definition") 208 | def test_datatable(): 209 | pass 210 | """ 211 | ) 212 | ) 213 | result = pytester.runpytest("-s") 214 | result.assert_outcomes(passed=1) 215 | 216 | 217 | def test_datatable_step_argument_is_reserved_and_cannot_be_used(pytester): 218 | pytester.makefile( 219 | ".feature", 220 | reserved_datatable_arg=textwrap.dedent( 221 | """\ 222 | Feature: Reserved datatable argument 223 | 224 | Scenario: Reserved datatable argument 225 | Given this step has a {datatable} argument 226 | Then the test fails 227 | """ 228 | ), 229 | ) 230 | 231 | pytester.makepyfile( 232 | textwrap.dedent( 233 | """\ 234 | from pytest_bdd import scenario, given, then, parsers 235 | 236 | @scenario("reserved_datatable_arg.feature", "Reserved datatable argument") 237 | def test_datatable(): 238 | pass 239 | 240 | 241 | @given(parsers.parse("this step has a {datatable} argument")) 242 | def _(datatable): 243 | pass 244 | 245 | 246 | @then("the test fails") 247 | def _(): 248 | pass 249 | """ 250 | ) 251 | ) 252 | 253 | result = pytester.runpytest() 254 | result.assert_outcomes(failed=1) 255 | result.stdout.fnmatch_lines( 256 | [ 257 | "*Step 'this step has a {datatable} argument' defines argument names that are reserved: 'datatable'. Please use different names.*" 258 | ] 259 | ) 260 | -------------------------------------------------------------------------------- /tests/feature/test_cucumber_json.py: -------------------------------------------------------------------------------- 1 | """Test cucumber json output.""" 2 | 3 | from __future__ import annotations 4 | 5 | import json 6 | import os.path 7 | import textwrap 8 | from typing import TYPE_CHECKING, Any 9 | 10 | if TYPE_CHECKING: 11 | from _pytest.pytester import Pytester, RunResult 12 | 13 | 14 | def runandparse(pytester: Pytester, *args: Any) -> tuple[RunResult, list[dict[str, Any]]]: 15 | """Run tests in testdir and parse json output.""" 16 | resultpath = pytester.path.joinpath("cucumber.json") 17 | result = pytester.runpytest(f"--cucumberjson={resultpath}", "-s", *args) 18 | with resultpath.open() as f: 19 | jsonobject = json.load(f) 20 | return result, jsonobject 21 | 22 | 23 | class OfType: 24 | """Helper object to help compare object type to initialization type""" 25 | 26 | def __init__(self, type: type | None = None) -> None: 27 | self.type = type 28 | 29 | def __eq__(self, other: object) -> bool: 30 | return isinstance(other, self.type) if self.type else True 31 | 32 | 33 | def test_step_trace(pytester): 34 | """Test step trace.""" 35 | pytester.makefile( 36 | ".ini", 37 | pytest=textwrap.dedent( 38 | """ 39 | [pytest] 40 | markers = 41 | scenario-passing-tag 42 | scenario-failing-tag 43 | scenario-outline-passing-tag 44 | feature-tag 45 | """ 46 | ), 47 | ) 48 | pytester.makefile( 49 | ".feature", 50 | test=textwrap.dedent( 51 | """ 52 | @feature-tag 53 | Feature: One passing scenario, one failing scenario 54 | This is a feature description 55 | 56 | @scenario-passing-tag 57 | Scenario: Passing 58 | This is a scenario description 59 | 60 | Given a passing step 61 | And some other passing step 62 | 63 | @scenario-failing-tag 64 | Scenario: Failing 65 | Given a passing step 66 | And a failing step 67 | 68 | @scenario-outline-passing-tag 69 | Scenario Outline: Passing outline 70 | Given type and value 71 | 72 | Examples: example1 73 | | type | value | 74 | | str | hello | 75 | | int | 42 | 76 | | float | 1.0 | 77 | """ 78 | ), 79 | ) 80 | pytester.makepyfile( 81 | textwrap.dedent( 82 | """ 83 | import pytest 84 | from pytest_bdd import given, when, scenario, parsers 85 | 86 | @given('a passing step') 87 | def _(): 88 | return 'pass' 89 | 90 | @given('some other passing step') 91 | def _(): 92 | return 'pass' 93 | 94 | @given('a failing step') 95 | def _(): 96 | raise Exception('Error') 97 | 98 | @given(parsers.parse('type {type} and value {value}')) 99 | def _(): 100 | return 'pass' 101 | 102 | @scenario('test.feature', 'Passing') 103 | def test_passing(): 104 | pass 105 | 106 | @scenario('test.feature', 'Failing') 107 | def test_failing(): 108 | pass 109 | 110 | @scenario('test.feature', 'Passing outline') 111 | def test_passing_outline(): 112 | pass 113 | """ 114 | ) 115 | ) 116 | result, jsonobject = runandparse(pytester) 117 | result.assert_outcomes(passed=4, failed=1) 118 | 119 | assert result.ret 120 | expected = [ 121 | { 122 | "description": "This is a feature description", 123 | "elements": [ 124 | { 125 | "description": "This is a scenario description", 126 | "id": "test_passing", 127 | "keyword": "Scenario", 128 | "line": 6, 129 | "name": "Passing", 130 | "steps": [ 131 | { 132 | "keyword": "Given", 133 | "line": 9, 134 | "match": {"location": ""}, 135 | "name": "a passing step", 136 | "result": {"status": "passed", "duration": OfType(int)}, 137 | }, 138 | { 139 | "keyword": "And", 140 | "line": 10, 141 | "match": {"location": ""}, 142 | "name": "some other passing step", 143 | "result": {"status": "passed", "duration": OfType(int)}, 144 | }, 145 | ], 146 | "tags": [{"name": "scenario-passing-tag", "line": 5}], 147 | "type": "scenario", 148 | }, 149 | { 150 | "description": "", 151 | "id": "test_failing", 152 | "keyword": "Scenario", 153 | "line": 13, 154 | "name": "Failing", 155 | "steps": [ 156 | { 157 | "keyword": "Given", 158 | "line": 14, 159 | "match": {"location": ""}, 160 | "name": "a passing step", 161 | "result": {"status": "passed", "duration": OfType(int)}, 162 | }, 163 | { 164 | "keyword": "And", 165 | "line": 15, 166 | "match": {"location": ""}, 167 | "name": "a failing step", 168 | "result": {"error_message": OfType(str), "status": "failed", "duration": OfType(int)}, 169 | }, 170 | ], 171 | "tags": [{"name": "scenario-failing-tag", "line": 12}], 172 | "type": "scenario", 173 | }, 174 | { 175 | "description": "", 176 | "keyword": "Scenario Outline", 177 | "tags": [{"line": 17, "name": "scenario-outline-passing-tag"}], 178 | "steps": [ 179 | { 180 | "line": 19, 181 | "match": {"location": ""}, 182 | "result": {"status": "passed", "duration": OfType(int)}, 183 | "keyword": "Given", 184 | "name": "type str and value hello", 185 | } 186 | ], 187 | "line": 18, 188 | "type": "scenario", 189 | "id": "test_passing_outline[str-hello]", 190 | "name": "Passing outline", 191 | }, 192 | { 193 | "description": "", 194 | "keyword": "Scenario Outline", 195 | "tags": [{"line": 17, "name": "scenario-outline-passing-tag"}], 196 | "steps": [ 197 | { 198 | "line": 19, 199 | "match": {"location": ""}, 200 | "result": {"status": "passed", "duration": OfType(int)}, 201 | "keyword": "Given", 202 | "name": "type int and value 42", 203 | } 204 | ], 205 | "line": 18, 206 | "type": "scenario", 207 | "id": "test_passing_outline[int-42]", 208 | "name": "Passing outline", 209 | }, 210 | { 211 | "description": "", 212 | "keyword": "Scenario Outline", 213 | "tags": [{"line": 17, "name": "scenario-outline-passing-tag"}], 214 | "steps": [ 215 | { 216 | "line": 19, 217 | "match": {"location": ""}, 218 | "result": {"status": "passed", "duration": OfType(int)}, 219 | "keyword": "Given", 220 | "name": "type float and value 1.0", 221 | } 222 | ], 223 | "line": 18, 224 | "type": "scenario", 225 | "id": "test_passing_outline[float-1.0]", 226 | "name": "Passing outline", 227 | }, 228 | ], 229 | "id": os.path.join("test_step_trace0", "test.feature"), 230 | "keyword": "Feature", 231 | "language": "en", 232 | "line": 2, 233 | "name": "One passing scenario, one failing scenario", 234 | "tags": [{"name": "feature-tag", "line": 1}], 235 | "uri": os.path.join(pytester.path.name, "test.feature"), 236 | } 237 | ] 238 | 239 | assert jsonobject == expected 240 | -------------------------------------------------------------------------------- /tests/feature/test_report.py: -------------------------------------------------------------------------------- 1 | """Test scenario reporting.""" 2 | 3 | from __future__ import annotations 4 | 5 | import textwrap 6 | 7 | import pytest 8 | 9 | from pytest_bdd.reporting import test_report_context_registry 10 | 11 | 12 | class OfType: 13 | """Helper object comparison to which is always 'equal'.""" 14 | 15 | def __init__(self, type: type | None = None) -> None: 16 | self.type = type 17 | 18 | def __eq__(self, other: object) -> bool: 19 | return isinstance(other, self.type) if self.type else True 20 | 21 | 22 | def test_step_trace(pytester): 23 | """Test step trace.""" 24 | pytester.makefile( 25 | ".ini", 26 | pytest=textwrap.dedent( 27 | """ 28 | [pytest] 29 | markers = 30 | feature-tag 31 | scenario-passing-tag 32 | scenario-failing-tag 33 | """ 34 | ), 35 | ) 36 | feature = pytester.makefile( 37 | ".feature", 38 | test=textwrap.dedent( 39 | """ 40 | @feature-tag 41 | Feature: One passing scenario, one failing scenario 42 | 43 | @scenario-passing-tag 44 | Scenario: Passing 45 | Given a passing step 46 | And some other passing step 47 | 48 | @scenario-failing-tag 49 | Scenario: Failing 50 | Given a passing step 51 | And a failing step 52 | 53 | Scenario Outline: Outlined 54 | Given there are cucumbers 55 | When I eat cucumbers 56 | Then I should have cucumbers 57 | 58 | Examples: 59 | | start | eat | left | 60 | | 12 | 5 | 7 | 61 | | 5 | 4 | 1 | 62 | """ 63 | ), 64 | ) 65 | relpath = feature.relative_to(pytester.path.parent) 66 | pytester.makepyfile( 67 | textwrap.dedent( 68 | """ 69 | import pytest 70 | from pytest_bdd import given, when, then, scenarios, parsers 71 | 72 | @given('a passing step') 73 | def _(): 74 | return 'pass' 75 | 76 | @given('some other passing step') 77 | def _(): 78 | return 'pass' 79 | 80 | @given('a failing step') 81 | def _(): 82 | raise Exception('Error') 83 | 84 | @given(parsers.parse('there are {start:d} cucumbers'), target_fixture="cucumbers") 85 | def _(start): 86 | assert isinstance(start, int) 87 | return {"start": start} 88 | 89 | 90 | @when(parsers.parse('I eat {eat:g} cucumbers')) 91 | def _(cucumbers, eat): 92 | assert isinstance(eat, float) 93 | cucumbers['eat'] = eat 94 | 95 | 96 | @then(parsers.parse('I should have {left} cucumbers')) 97 | def _(cucumbers, left): 98 | assert isinstance(left, str) 99 | assert cucumbers['start'] - cucumbers['eat'] == int(left) 100 | 101 | 102 | scenarios('test.feature') 103 | """ 104 | ) 105 | ) 106 | result = pytester.inline_run("-vvl") 107 | assert result.ret 108 | report = result.matchreport("test_passing", when="call") 109 | scenario = test_report_context_registry[report].scenario 110 | expected = { 111 | "feature": { 112 | "description": "", 113 | "keyword": "Feature", 114 | "language": "en", 115 | "filename": str(feature), 116 | "line_number": 2, 117 | "name": "One passing scenario, one failing scenario", 118 | "rel_filename": str(relpath), 119 | "tags": ["feature-tag"], 120 | }, 121 | "keyword": "Scenario", 122 | "line_number": 5, 123 | "name": "Passing", 124 | "description": "", 125 | "steps": [ 126 | { 127 | "duration": OfType(float), 128 | "failed": False, 129 | "keyword": "Given", 130 | "line_number": 6, 131 | "name": "a passing step", 132 | "type": "given", 133 | }, 134 | { 135 | "duration": OfType(float), 136 | "failed": False, 137 | "keyword": "And", 138 | "line_number": 7, 139 | "name": "some other passing step", 140 | "type": "given", 141 | }, 142 | ], 143 | "tags": ["scenario-passing-tag"], 144 | } 145 | 146 | assert scenario == expected 147 | 148 | report = result.matchreport("test_failing", when="call") 149 | scenario = test_report_context_registry[report].scenario 150 | expected = { 151 | "feature": { 152 | "description": "", 153 | "keyword": "Feature", 154 | "language": "en", 155 | "filename": str(feature), 156 | "line_number": 2, 157 | "name": "One passing scenario, one failing scenario", 158 | "rel_filename": str(relpath), 159 | "tags": ["feature-tag"], 160 | }, 161 | "keyword": "Scenario", 162 | "line_number": 10, 163 | "name": "Failing", 164 | "description": "", 165 | "steps": [ 166 | { 167 | "duration": OfType(float), 168 | "failed": False, 169 | "keyword": "Given", 170 | "line_number": 11, 171 | "name": "a passing step", 172 | "type": "given", 173 | }, 174 | { 175 | "duration": OfType(float), 176 | "failed": True, 177 | "keyword": "And", 178 | "line_number": 12, 179 | "name": "a failing step", 180 | "type": "given", 181 | }, 182 | ], 183 | "tags": ["scenario-failing-tag"], 184 | } 185 | assert scenario == expected 186 | 187 | report = result.matchreport("test_outlined[12-5-7]", when="call") 188 | scenario = test_report_context_registry[report].scenario 189 | expected = { 190 | "feature": { 191 | "description": "", 192 | "keyword": "Feature", 193 | "language": "en", 194 | "filename": str(feature), 195 | "line_number": 2, 196 | "name": "One passing scenario, one failing scenario", 197 | "rel_filename": str(relpath), 198 | "tags": ["feature-tag"], 199 | }, 200 | "keyword": "Scenario Outline", 201 | "line_number": 14, 202 | "name": "Outlined", 203 | "description": "", 204 | "steps": [ 205 | { 206 | "duration": OfType(float), 207 | "failed": False, 208 | "keyword": "Given", 209 | "line_number": 15, 210 | "name": "there are 12 cucumbers", 211 | "type": "given", 212 | }, 213 | { 214 | "duration": OfType(float), 215 | "failed": False, 216 | "keyword": "When", 217 | "line_number": 16, 218 | "name": "I eat 5 cucumbers", 219 | "type": "when", 220 | }, 221 | { 222 | "duration": OfType(float), 223 | "failed": False, 224 | "keyword": "Then", 225 | "line_number": 17, 226 | "name": "I should have 7 cucumbers", 227 | "type": "then", 228 | }, 229 | ], 230 | "tags": [], 231 | } 232 | assert scenario == expected 233 | 234 | report = result.matchreport("test_outlined[5-4-1]", when="call") 235 | scenario = test_report_context_registry[report].scenario 236 | expected = { 237 | "feature": { 238 | "description": "", 239 | "keyword": "Feature", 240 | "language": "en", 241 | "filename": str(feature), 242 | "line_number": 2, 243 | "name": "One passing scenario, one failing scenario", 244 | "rel_filename": str(relpath), 245 | "tags": ["feature-tag"], 246 | }, 247 | "keyword": "Scenario Outline", 248 | "line_number": 14, 249 | "name": "Outlined", 250 | "description": "", 251 | "steps": [ 252 | { 253 | "duration": OfType(float), 254 | "failed": False, 255 | "keyword": "Given", 256 | "line_number": 15, 257 | "name": "there are 5 cucumbers", 258 | "type": "given", 259 | }, 260 | { 261 | "duration": OfType(float), 262 | "failed": False, 263 | "keyword": "When", 264 | "line_number": 16, 265 | "name": "I eat 4 cucumbers", 266 | "type": "when", 267 | }, 268 | { 269 | "duration": OfType(float), 270 | "failed": False, 271 | "keyword": "Then", 272 | "line_number": 17, 273 | "name": "I should have 1 cucumbers", 274 | "type": "then", 275 | }, 276 | ], 277 | "tags": [], 278 | } 279 | assert scenario == expected 280 | 281 | 282 | def test_complex_types(pytester, pytestconfig): 283 | """Test serialization of the complex types.""" 284 | if not pytestconfig.pluginmanager.has_plugin("xdist"): 285 | pytest.skip("Execnet not installed") 286 | 287 | import execnet.gateway_base 288 | 289 | pytester.makefile( 290 | ".feature", 291 | test=textwrap.dedent( 292 | """ 293 | Feature: Report serialization containing parameters of complex types 294 | 295 | Scenario Outline: Complex 296 | Given there is a coordinate 297 | 298 | Examples: 299 | | point | 300 | | 10,20 | 301 | """ 302 | ), 303 | ) 304 | pytester.makepyfile( 305 | textwrap.dedent( 306 | """ 307 | import pytest 308 | from pytest_bdd import given, when, then, scenario, parsers 309 | 310 | class Point: 311 | 312 | def __init__(self, x, y): 313 | self.x = x 314 | self.y = y 315 | 316 | @classmethod 317 | def parse(cls, value): 318 | return cls(*(int(x) for x in value.split(','))) 319 | 320 | class Alien(object): 321 | pass 322 | 323 | @given( 324 | parsers.parse('there is a coordinate {point}'), 325 | target_fixture="point", 326 | converters={"point": Point.parse}, 327 | ) 328 | def given_there_is_a_point(point): 329 | assert isinstance(point, Point) 330 | return point 331 | 332 | 333 | @pytest.mark.parametrize('alien', [Alien()]) 334 | @scenario('test.feature', 'Complex') 335 | def test_complex(alien): 336 | pass 337 | 338 | """ 339 | ) 340 | ) 341 | result = pytester.inline_run("-vvl") 342 | report = result.matchreport("test_complex[10,20-alien0]", when="call") 343 | assert report.passed 344 | 345 | report_context = test_report_context_registry[report] 346 | assert execnet.gateway_base.dumps(report_context.name) 347 | assert execnet.gateway_base.dumps(report_context.scenario) 348 | -------------------------------------------------------------------------------- /src/pytest_bdd/gherkin_parser.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import linecache 4 | import re 5 | import textwrap 6 | import typing 7 | from collections.abc import Mapping, Sequence 8 | from dataclasses import dataclass, field 9 | from typing import Any 10 | 11 | from gherkin.errors import CompositeParserException # type: ignore 12 | from gherkin.parser import Parser # type: ignore 13 | 14 | from . import exceptions 15 | 16 | if typing.TYPE_CHECKING: 17 | from typing_extensions import Self 18 | 19 | 20 | ERROR_PATTERNS = [ 21 | ( 22 | re.compile(r"expected:.*got 'Feature.*'"), 23 | exceptions.FeatureError, 24 | "Multiple features are not allowed in a single feature file.", 25 | ), 26 | ( 27 | re.compile(r"expected:.*got '(?:Given|When|Then|And|But).*'"), 28 | exceptions.FeatureError, 29 | "Step definition outside of a Scenario or a Background.", 30 | ), 31 | ( 32 | re.compile(r"expected:.*got 'Background.*'"), 33 | exceptions.BackgroundError, 34 | "Multiple 'Background' sections detected. Only one 'Background' is allowed per feature.", 35 | ), 36 | ( 37 | re.compile(r"expected:.*got 'Scenario.*'"), 38 | exceptions.ScenarioError, 39 | "Misplaced or incorrect 'Scenario' keyword. Ensure it's correctly placed. There might be a missing Feature section.", 40 | ), 41 | ( 42 | re.compile(r"expected:.*got 'Given.*'"), 43 | exceptions.StepError, 44 | "Improper step keyword detected. Ensure correct order and indentation for steps (Given, When, Then, etc.).", 45 | ), 46 | ( 47 | re.compile(r"expected:.*got 'Rule.*'"), 48 | exceptions.RuleError, 49 | "Misplaced or incorrectly formatted 'Rule'. Ensure it follows the feature structure.", 50 | ), 51 | ( 52 | re.compile(r"expected:.*got '.*'"), 53 | exceptions.TokenError, 54 | "Unexpected token found. Check Gherkin syntax near the reported error.", 55 | ), 56 | ] 57 | 58 | 59 | @dataclass 60 | class Location: 61 | column: int 62 | line: int 63 | 64 | @classmethod 65 | def from_dict(cls, data: dict[str, Any]) -> Self: 66 | return cls(column=data["column"], line=data["line"]) 67 | 68 | 69 | @dataclass 70 | class Comment: 71 | location: Location 72 | text: str 73 | 74 | @classmethod 75 | def from_dict(cls, data: dict[str, Any]) -> Self: 76 | return cls(location=Location.from_dict(data["location"]), text=data["text"]) 77 | 78 | 79 | @dataclass 80 | class Cell: 81 | location: Location 82 | value: str 83 | 84 | @classmethod 85 | def from_dict(cls, data: dict[str, Any]) -> Self: 86 | return cls(location=Location.from_dict(data["location"]), value=_to_raw_string(data["value"])) 87 | 88 | 89 | @dataclass 90 | class Row: 91 | id: str 92 | location: Location 93 | cells: list[Cell] 94 | 95 | @classmethod 96 | def from_dict(cls, data: dict[str, Any]) -> Self: 97 | return cls( 98 | id=data["id"], 99 | location=Location.from_dict(data["location"]), 100 | cells=[Cell.from_dict(cell) for cell in data["cells"]], 101 | ) 102 | 103 | 104 | @dataclass 105 | class ExamplesTable: 106 | location: Location 107 | tags: list[Tag] 108 | name: str | None = None 109 | table_header: Row | None = None 110 | table_body: list[Row] | None = field(default_factory=list) 111 | 112 | @classmethod 113 | def from_dict(cls, data: dict[str, Any]) -> Self: 114 | return cls( 115 | location=Location.from_dict(data["location"]), 116 | name=data.get("name"), 117 | table_header=Row.from_dict(data["tableHeader"]) if data.get("tableHeader") else None, 118 | table_body=[Row.from_dict(row) for row in data.get("tableBody", [])], 119 | tags=[Tag.from_dict(tag) for tag in data["tags"]], 120 | ) 121 | 122 | 123 | @dataclass 124 | class DataTable: 125 | location: Location 126 | rows: list[Row] 127 | 128 | @classmethod 129 | def from_dict(cls, data: dict[str, Any]) -> Self: 130 | return cls( 131 | location=Location.from_dict(data["location"]), rows=[Row.from_dict(row) for row in data.get("rows", [])] 132 | ) 133 | 134 | def raw(self) -> Sequence[Sequence[object]]: 135 | return [[cell.value for cell in row.cells] for row in self.rows] 136 | 137 | 138 | @dataclass 139 | class DocString: 140 | content: str 141 | delimiter: str 142 | location: Location 143 | 144 | @classmethod 145 | def from_dict(cls, data: dict[str, Any]) -> Self: 146 | return cls( 147 | content=textwrap.dedent(data["content"]), 148 | delimiter=data["delimiter"], 149 | location=Location.from_dict(data["location"]), 150 | ) 151 | 152 | 153 | @dataclass 154 | class Step: 155 | id: str 156 | location: Location 157 | keyword: str 158 | keyword_type: str 159 | text: str 160 | datatable: DataTable | None = None 161 | docstring: DocString | None = None 162 | 163 | @classmethod 164 | def from_dict(cls, data: dict[str, Any]) -> Self: 165 | return cls( 166 | id=data["id"], 167 | location=Location.from_dict(data["location"]), 168 | keyword=data["keyword"].strip(), 169 | keyword_type=data["keywordType"], 170 | text=data["text"], 171 | datatable=DataTable.from_dict(data["dataTable"]) if data.get("dataTable") else None, 172 | docstring=DocString.from_dict(data["docString"]) if data.get("docString") else None, 173 | ) 174 | 175 | 176 | @dataclass 177 | class Tag: 178 | id: str 179 | location: Location 180 | name: str 181 | 182 | @classmethod 183 | def from_dict(cls, data: dict[str, Any]) -> Self: 184 | return cls(id=data["id"], location=Location.from_dict(data["location"]), name=data["name"]) 185 | 186 | 187 | @dataclass 188 | class Scenario: 189 | id: str 190 | location: Location 191 | keyword: str 192 | name: str 193 | description: str 194 | steps: list[Step] 195 | tags: list[Tag] 196 | examples: list[ExamplesTable] = field(default_factory=list) 197 | 198 | @classmethod 199 | def from_dict(cls, data: dict[str, Any]) -> Self: 200 | return cls( 201 | id=data["id"], 202 | location=Location.from_dict(data["location"]), 203 | keyword=data["keyword"], 204 | name=data["name"], 205 | description=data["description"], 206 | steps=[Step.from_dict(step) for step in data["steps"]], 207 | tags=[Tag.from_dict(tag) for tag in data["tags"]], 208 | examples=[ExamplesTable.from_dict(example) for example in data["examples"]], 209 | ) 210 | 211 | 212 | @dataclass 213 | class Rule: 214 | id: str 215 | location: Location 216 | keyword: str 217 | name: str 218 | description: str 219 | tags: list[Tag] 220 | children: list[Child] 221 | 222 | @classmethod 223 | def from_dict(cls, data: dict[str, Any]) -> Self: 224 | return cls( 225 | id=data["id"], 226 | location=Location.from_dict(data["location"]), 227 | keyword=data["keyword"], 228 | name=data["name"], 229 | description=data["description"], 230 | tags=[Tag.from_dict(tag) for tag in data["tags"]], 231 | children=[Child.from_dict(child) for child in data["children"]], 232 | ) 233 | 234 | 235 | @dataclass 236 | class Background: 237 | id: str 238 | location: Location 239 | keyword: str 240 | name: str 241 | description: str 242 | steps: list[Step] 243 | 244 | @classmethod 245 | def from_dict(cls, data: dict[str, Any]) -> Self: 246 | return cls( 247 | id=data["id"], 248 | location=Location.from_dict(data["location"]), 249 | keyword=data["keyword"], 250 | name=data["name"], 251 | description=data["description"], 252 | steps=[Step.from_dict(step) for step in data["steps"]], 253 | ) 254 | 255 | 256 | @dataclass 257 | class Child: 258 | background: Background | None = None 259 | rule: Rule | None = None 260 | scenario: Scenario | None = None 261 | 262 | @classmethod 263 | def from_dict(cls, data: dict[str, Any]) -> Self: 264 | return cls( 265 | background=Background.from_dict(data["background"]) if data.get("background") else None, 266 | rule=Rule.from_dict(data["rule"]) if data.get("rule") else None, 267 | scenario=Scenario.from_dict(data["scenario"]) if data.get("scenario") else None, 268 | ) 269 | 270 | 271 | @dataclass 272 | class Feature: 273 | location: Location 274 | language: str 275 | keyword: str 276 | tags: list[Tag] 277 | name: str 278 | description: str 279 | children: list[Child] 280 | 281 | @classmethod 282 | def from_dict(cls, data: dict[str, Any]) -> Self: 283 | return cls( 284 | location=Location.from_dict(data["location"]), 285 | language=data["language"], 286 | keyword=data["keyword"], 287 | tags=[Tag.from_dict(tag) for tag in data["tags"]], 288 | name=data["name"], 289 | description=data["description"], 290 | children=[Child.from_dict(child) for child in data["children"]], 291 | ) 292 | 293 | 294 | @dataclass 295 | class GherkinDocument: 296 | feature: Feature 297 | comments: list[Comment] 298 | 299 | @classmethod 300 | def from_dict(cls, data: Mapping[str, Any]) -> Self: 301 | return cls( 302 | feature=Feature.from_dict(data["feature"]), 303 | comments=[Comment.from_dict(comment) for comment in data["comments"]], 304 | ) 305 | 306 | 307 | def _to_raw_string(normal_string: str) -> str: 308 | return normal_string.replace("\\", "\\\\") 309 | 310 | 311 | def get_gherkin_document(abs_filename: str, encoding: str = "utf-8") -> GherkinDocument: 312 | with open(abs_filename, encoding=encoding) as f: 313 | feature_file_text = f.read() 314 | 315 | try: 316 | gherkin_data = Parser().parse(feature_file_text) 317 | except CompositeParserException as e: 318 | message = e.args[0] 319 | line = e.errors[0].location["line"] 320 | line_content = linecache.getline(abs_filename, e.errors[0].location["line"]).rstrip("\n") 321 | filename = abs_filename 322 | handle_gherkin_parser_error(message, line, line_content, filename, e) 323 | # If no patterns matched, raise a generic GherkinParserError 324 | raise exceptions.GherkinParseError(f"Unknown parsing error: {message}", line, line_content, filename) from e 325 | 326 | # At this point, the `gherkin_data` should be valid if no exception was raised 327 | return GherkinDocument.from_dict(gherkin_data) 328 | 329 | 330 | def handle_gherkin_parser_error( 331 | raw_error: str, line: int, line_content: str, filename: str, original_exception: Exception | None = None 332 | ) -> None: 333 | """Map the error message to a specific exception type and raise it.""" 334 | # Split the raw_error into individual lines 335 | error_lines = raw_error.splitlines() 336 | 337 | # Check each line against all error patterns 338 | for error_line in error_lines: 339 | for pattern, exception_class, message in ERROR_PATTERNS: 340 | if pattern.search(error_line): 341 | # If a match is found, raise the corresponding exception with the formatted message 342 | if original_exception: 343 | raise exception_class(message, line, line_content, filename) from original_exception 344 | else: 345 | raise exception_class(message, line, line_content, filename) 346 | --------------------------------------------------------------------------------