├── tests ├── __init__.py ├── conftest.py └── test_jsonreport.py ├── pytest_jsonreport ├── __init__.py ├── serialize.py └── plugin.py ├── .coveragerc ├── .gitignore ├── .github └── workflows │ └── main.yml ├── tox.ini ├── LICENSE ├── setup.py ├── README.md └── sample_report.json /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pytest_jsonreport/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = pytest_jsonreport 4 | 5 | [paths] 6 | source = 7 | pytest_jsonreport 8 | .tox/*/lib/python*/site-packages/pytest_jsonreport 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | *$py.class 4 | *.egg-info/ 5 | .coverage 6 | .cache/ 7 | .report.json 8 | TODO 9 | .tox 10 | build/ 11 | dist/ 12 | .pytest_cache/ 13 | .coverage.* 14 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | workflow_dispatch: 9 | 10 | jobs: 11 | test: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ['3.7', '3.8', '3.9', '3.10'] 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | - name: Set up tox 23 | run: | 24 | pip install --upgrade pip tox 25 | - name: Run tox 26 | run: | 27 | tox -e py 28 | lint: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v2 32 | - name: Set up Python 33 | uses: actions/setup-python@v2 34 | - name: Set up tox 35 | run: | 36 | pip install --upgrade pip tox 37 | - name: Run tox 38 | run: | 39 | tox -e lint 40 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{37,38,39,310},lint,coverage-report 3 | 4 | [testenv] 5 | deps = 6 | coverage 7 | pytest 8 | pytest-xdist 9 | flaky 10 | commands = 11 | coverage run --parallel -m pytest -v {posargs} 12 | 13 | [testenv:coverage-report] 14 | basepython = python3.10 15 | skip_install = true 16 | deps = coverage 17 | commands = 18 | coverage combine 19 | coverage report 20 | 21 | [testenv:lint] 22 | deps = 23 | flake8 24 | pylint 25 | commands = 26 | flake8 --max-line-length 100 27 | pylint --rcfile tox.ini pytest_jsonreport/ 28 | 29 | [testenv:release] 30 | deps = 31 | wheel 32 | twine 33 | commands = 34 | rm -rf *.egg-info build/ dist/ 35 | python setup.py bdist_wheel sdist 36 | twine upload -r pypi dist/* 37 | rm -rf *.egg-info build/ dist/ 38 | 39 | [pylint] 40 | disable = 41 | missing-docstring, 42 | invalid-name, 43 | unused-argument, 44 | too-few-public-methods, 45 | too-many-public-methods, 46 | protected-access, 47 | no-self-use, 48 | too-many-instance-attributes, 49 | fixme, 50 | consider-using-f-string, 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Arminius 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import sys 3 | 4 | from setuptools import setup 5 | 6 | # Open encoding isn't available for Python 2.7 (sigh) 7 | if sys.version_info < (3, 0): 8 | from io import open 9 | 10 | 11 | this_directory = path.abspath(path.dirname(__file__)) 12 | with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 13 | long_description = f.read() 14 | 15 | 16 | setup( 17 | name='pytest-json-report', 18 | description='A pytest plugin to report test results as JSON files', 19 | long_description=long_description, 20 | long_description_content_type='text/markdown', 21 | packages=['pytest_jsonreport'], 22 | author='numirias', 23 | author_email='numirias@users.noreply.github.com', 24 | version='1.5.0', 25 | url='https://github.com/numirias/pytest-json-report', 26 | license='MIT', 27 | install_requires=[ 28 | 'pytest>=3.8.0', 29 | 'pytest-metadata', 30 | ], 31 | entry_points={ 32 | 'pytest11': [ 33 | 'pytest_jsonreport = pytest_jsonreport.plugin', 34 | ] 35 | }, 36 | classifiers=[ 37 | 'Development Status :: 4 - Beta', 38 | 'Programming Language :: Python', 39 | 'Programming Language :: Python :: 3', 40 | 'Programming Language :: Python :: 3.7', 41 | 'Programming Language :: Python :: 3.8', 42 | 'Programming Language :: Python :: 3.9', 43 | 'Programming Language :: Python :: 3.10', 44 | 'Framework :: Pytest', 45 | ], 46 | ) 47 | -------------------------------------------------------------------------------- /pytest_jsonreport/serialize.py: -------------------------------------------------------------------------------- 1 | """Functions for making test data JSON-serializable. 2 | 3 | """ 4 | from collections import Counter 5 | import json 6 | 7 | 8 | def serializable(obj): 9 | """Return whether `obj` is JSON-serializable.""" 10 | try: 11 | json.dumps(obj) 12 | except (TypeError, OverflowError): 13 | return False 14 | return True 15 | 16 | 17 | def make_collector(report, result): 18 | """Return JSON-serializable collector node.""" 19 | collector = { 20 | 'nodeid': report.nodeid, 21 | # This is the outcome of the collection, not the test outcome 22 | 'outcome': report.outcome, 23 | 'result': result, 24 | } 25 | if report.longrepr: 26 | # The collection report doesn't provide crash details, so we can only 27 | # add the message, but no traceback etc. 28 | collector['longrepr'] = str(report.longrepr) 29 | return collector 30 | 31 | 32 | def make_collectitem(item): 33 | """Return JSON-serializable collection item.""" 34 | json_item = { 35 | 'nodeid': item.nodeid, 36 | 'type': item.__class__.__name__, 37 | } 38 | try: 39 | location = item.location 40 | except AttributeError: 41 | pass 42 | else: 43 | json_item['lineno'] = location[1] 44 | return json_item 45 | 46 | 47 | def make_testitem(nodeid, keywords, location): 48 | """Return JSON-serializable test item.""" 49 | item = { 50 | 'nodeid': nodeid, 51 | 'lineno': location[1], 52 | # The outcome will be overridden in case of failure 53 | 'outcome': 'passed', 54 | } 55 | if keywords: 56 | item['keywords'] = keywords 57 | return item 58 | 59 | 60 | def make_teststage(report, stdout, stderr, log, omit_traceback): 61 | """Return JSON-serializable test stage (setup/call/teardown).""" 62 | stage = { 63 | 'duration': report.duration, 64 | 'outcome': report.outcome, 65 | } 66 | crash = getattr(report.longrepr, 'reprcrash', None) 67 | if crash is not None: 68 | stage['crash'] = make_fileloc(crash) 69 | if not omit_traceback: 70 | try: 71 | stage['traceback'] = [make_fileloc(x.reprfileloc) for x in 72 | report.longrepr.reprtraceback.reprentries] 73 | except AttributeError: 74 | # Happens if no detailed tb entries are available (e.g. due to 75 | # `--tb=native`, see `_pytest._code.code.ReprTracebackNative`). 76 | # Then we can't provide any tb info beyond the raw error text 77 | # in `longrepr`, so just pass quietly. 78 | pass 79 | if stdout: 80 | stage['stdout'] = stdout 81 | if stderr: 82 | stage['stderr'] = stderr 83 | if log: 84 | stage['log'] = log 85 | # Error representation string (attr is computed property, so get only once) 86 | longrepr = report.longreprtext 87 | if longrepr: 88 | stage['longrepr'] = longrepr 89 | return stage 90 | 91 | 92 | def make_fileloc(loc): 93 | """Return JSON-serializable file location representation. 94 | 95 | See `_pytest._code.code.ReprFileLocation`. 96 | """ 97 | return { 98 | 'path': loc.path, 99 | 'lineno': loc.lineno, 100 | 'message': loc.message, 101 | } 102 | 103 | 104 | def make_summary(tests, **kwargs): 105 | """Return JSON-serializable test result summary.""" 106 | summary = Counter([t['outcome'] for t in tests.values()]) 107 | summary['total'] = sum(summary.values()) 108 | summary.update(kwargs) 109 | return summary 110 | 111 | 112 | def make_warning(warning_message, when): 113 | # `warning_message` is a stdlib warnings.WarningMessage object 114 | return { 115 | 'message': str(warning_message.message), 116 | 'category': warning_message.category.__name__, 117 | 'when': when, 118 | 'filename': warning_message.filename, 119 | 'lineno': warning_message.lineno 120 | } 121 | 122 | 123 | def make_report(**kwargs): 124 | return dict(kwargs) 125 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | pytest_plugins = 'pytester' 6 | miss_map = { 7 | 'V': 'Different values', 8 | 'K': 'Different keys', 9 | 'T': 'Different types', 10 | } 11 | # Some test cases borrowed from github.com/mattcl/pytest-json 12 | FILE = """ 13 | from __future__ import print_function 14 | import sys 15 | import pytest 16 | 17 | 18 | @pytest.fixture 19 | def setup_teardown_fixture(request): 20 | print('setup') 21 | print('setuperr', file=sys.stderr) 22 | def fn(): 23 | print('teardown') 24 | print('teardownerr', file=sys.stderr) 25 | request.addfinalizer(fn) 26 | 27 | @pytest.fixture 28 | def fail_setup_fixture(request): 29 | assert False 30 | 31 | @pytest.fixture 32 | def fail_teardown_fixture(request): 33 | def fn(): 34 | assert False 35 | request.addfinalizer(fn) 36 | 37 | 38 | def test_pass(): 39 | assert True 40 | 41 | def test_fail_with_fixture(setup_teardown_fixture): 42 | print('call') 43 | print('callerr', file=sys.stderr) 44 | assert False 45 | 46 | @pytest.mark.xfail(reason='testing xfail') 47 | def test_xfail(): 48 | assert False 49 | 50 | @pytest.mark.xfail(reason='testing xfail') 51 | def test_xfail_but_passing(): 52 | assert True 53 | 54 | def test_fail_during_setup(fail_setup_fixture): 55 | assert True 56 | 57 | def test_fail_during_teardown(fail_teardown_fixture): 58 | assert True 59 | 60 | @pytest.mark.skipif(True, reason='testing skip') 61 | def test_skip(): 62 | assert False 63 | 64 | def test_fail_nested(): 65 | def baz(o=1): 66 | c = 3 67 | return 2 - c - None 68 | def bar(m, n=5): 69 | b = 2 70 | print(m) 71 | print('bar') 72 | return baz() 73 | def foo(): 74 | a = 1 75 | print('foo') 76 | v = [bar(x) for x in range(3)] 77 | return v 78 | foo() 79 | 80 | @pytest.mark.parametrize('x', [1, 2]) 81 | def test_parametrized(x): 82 | assert x == 1 83 | """ 84 | 85 | 86 | @pytest.fixture 87 | def misc_testdir(testdir): 88 | testdir.makepyfile(FILE) 89 | return testdir 90 | 91 | 92 | @pytest.fixture 93 | def json_data(make_json): 94 | return make_json() 95 | 96 | 97 | @pytest.fixture 98 | def tests(json_data): 99 | return tests_only(json_data) 100 | 101 | 102 | def tests_only(json_data): 103 | return {test['nodeid'].split('::')[-1][5:]: test for test in 104 | json_data['tests']} 105 | 106 | 107 | # Each test run should work with and without xdist (-n specifies workers) 108 | @pytest.fixture(params=[0, 1, 4]) 109 | def num_processes(request): 110 | return request.param 111 | 112 | 113 | @pytest.fixture 114 | def make_json(num_processes, testdir): 115 | def func(content=FILE, args=['-vv', '--json-report', '-n=%d' % 116 | num_processes], path='.report.json'): 117 | testdir.makepyfile(content) 118 | testdir.runpytest(*args) 119 | with open(str(testdir.tmpdir / path)) as f: 120 | data = json.load(f) 121 | return data 122 | return func 123 | 124 | 125 | @pytest.fixture 126 | def match_reports(): 127 | def f(a, b): 128 | diffs = list(diff(normalize_report(a), normalize_report(b))) 129 | if not diffs: 130 | return True 131 | for kind, path, a_, b_ in diffs: 132 | path_str = '.'.join(path) 133 | kind_str = miss_map[kind] 134 | if kind == 'V': 135 | print(kind_str, path_str) 136 | print('\t', a_) 137 | print('\t', b_) 138 | else: 139 | print(kind_str + ':', path_str, a_, b_) 140 | return False 141 | return f 142 | 143 | 144 | def normalize_report(report): 145 | report['created'] = 0 146 | report['duration'] = 0 147 | # xdist doesn't report successful node collections 148 | report['collectors'] = [] 149 | 150 | for test in report['tests']: 151 | for stage_name in ('setup', 'call', 'teardown'): 152 | try: 153 | stage = test[stage_name] 154 | except KeyError: 155 | continue 156 | stage['duration'] = 0 157 | if 'longrepr' not in stage: 158 | stage['longrepr'] = '' 159 | return report 160 | 161 | 162 | def diff(a, b, path=None): 163 | """Return differences between reports a and b.""" 164 | if path is None: 165 | path = [] 166 | # We can't compare "longrepr" because they may be different between runs 167 | # with and without workers 168 | if path and path[-1] != 'longrepr': 169 | return 170 | if type(a) != type(b): 171 | yield ('T', path, a, b) 172 | return 173 | if type(a) == dict: 174 | a_keys = sorted(a.keys()) 175 | b_keys = sorted(b.keys()) 176 | if a_keys != b_keys: 177 | yield ('K', path, a_keys, b_keys) 178 | return 179 | for ak, bk in zip(a_keys, b_keys): 180 | for item in diff(a[ak], b[bk], path + [str(ak)]): 181 | yield item 182 | return 183 | if type(a) == list: 184 | for i, (ai, bi) in enumerate(zip(a, b)): 185 | for item in diff(ai, bi, path + [str(i)]): 186 | yield item 187 | return 188 | if a != b: 189 | yield ('V', path, repr(a), repr(b)) 190 | return 191 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pytest JSON Report 2 | 3 | [![CI](https://github.com/numirias/pytest-json-report/actions/workflows/main.yml/badge.svg)](https://github.com/numirias/pytest-json-report/actions/workflows/main.yml) 4 | [![PyPI Version](https://img.shields.io/pypi/v/pytest-json-report.svg)](https://pypi.python.org/pypi/pytest-json-report) 5 | [![Python Versions](https://img.shields.io/pypi/pyversions/pytest-json-report.svg)](https://pypi.python.org/pypi/pytest-json-report) 6 | 7 | This pytest plugin creates test reports as JSON. This makes it easy to process test results in other applications. 8 | 9 | It can report a summary, test details, captured output, logs, exception tracebacks and more. Additionally, you can use the available fixtures and hooks to [add metadata](#metadata) and [customize](#modifying-the-report) the report as you like. 10 | 11 | ## Table of contents 12 | 13 | * [Installation](#installation) 14 | * [Options](#options) 15 | * [Usage](#usage) 16 | * [Metadata](#metadata) 17 | * [Modifying the report](#modifying-the-report) 18 | * [Direct invocation](#direct-invocation) 19 | * [Format](#format) 20 | * [Summary](#summary) 21 | * [Environment](#environment) 22 | * [Collectors](#collectors) 23 | * [Tests](#tests) 24 | * [Test stage](#test-stage) 25 | * [Log](#log) 26 | * [Warnings](#warnings) 27 | * [Related tools](#related-tools) 28 | 29 | ## Installation 30 | 31 | ``` 32 | pip install pytest-json-report --upgrade 33 | ``` 34 | 35 | ## Options 36 | 37 | | Option | Description | 38 | | --- | --- | 39 | | `--json-report` | Create JSON report | 40 | | `--json-report-file=PATH` | Target path to save JSON report (use "none" to not save the report) | 41 | | `--json-report-summary` | Just create a summary without per-test details | 42 | | `--json-report-omit=FIELD_LIST` | List of fields to omit in the report (choose from: `collectors`, `log`, `traceback`, `streams`, `warnings`, `keywords`) | 43 | | `--json-report-indent=LEVEL` | Pretty-print JSON with specified indentation level | 44 | | `--json-report-verbosity=LEVEL` | Set verbosity (default is value of `--verbosity`) | 45 | 46 | ## Usage 47 | 48 | Just run pytest with `--json-report`. The report is saved in `.report.json` by default. 49 | 50 | ```bash 51 | $ pytest --json-report -v tests/ 52 | $ cat .report.json 53 | {"created": 1518371686.7981803, ... "tests":[{"nodeid": "test_foo.py", "outcome": "passed", ...}, ...]} 54 | ``` 55 | 56 | If you just need to know how many tests passed or failed and don't care about details, you can produce a summary only: 57 | 58 | ```bash 59 | $ pytest --json-report --json-report-summary 60 | ``` 61 | 62 | Many fields can be omitted to keep the report size small. E.g., this will leave out keywords and stdout/stderr output: 63 | 64 | ```bash 65 | $ pytest --json-report --json-report-omit keywords streams 66 | ``` 67 | 68 | If you don't like to have the report saved, you can specify `none` as the target file name: 69 | 70 | ```bash 71 | $ pytest --json-report --json-report-file none 72 | ``` 73 | 74 | ## Advanced usage 75 | 76 | ### Metadata 77 | 78 | The easiest way to add your own metadata to a test item is by using the `json_metadata` [test fixture](https://docs.pytest.org/en/stable/fixture.html): 79 | 80 | ```python 81 | def test_something(json_metadata): 82 | json_metadata['foo'] = {"some": "thing"} 83 | json_metadata['bar'] = 123 84 | ``` 85 | 86 | Or use the `pytest_json_runtest_metadata` [hook](https://docs.pytest.org/en/stable/reference.html#hooks) (in your `conftest.py`) to add metadata based on the current test run. The dict returned will automatically be merged with any existing metadata. E.g., this adds the start and stop time of each test's `call` stage: 87 | 88 | ```python 89 | def pytest_json_runtest_metadata(item, call): 90 | if call.when != 'call': 91 | return {} 92 | return {'start': call.start, 'stop': call.stop} 93 | ``` 94 | 95 | Also, you could add metadata using [pytest-metadata's `--metadata` switch](https://github.com/pytest-dev/pytest-metadata#additional-metadata) which will add metadata to the report's `environment` section, but not to a specific test item. You need to make sure all your metadata is JSON-serializable. 96 | 97 | ### A note on hooks 98 | 99 | If you're using a `pytest_json_*` hook although the plugin is not installed or not active (not using `--json-report`), pytest doesn't recognize it and may fail with an internal error like this: 100 | ``` 101 | INTERNALERROR> pluggy.manager.PluginValidationError: unknown hook 'pytest_json_runtest_metadata' in plugin 102 | ``` 103 | You can avoid this by declaring the hook implementation optional: 104 | 105 | ```python 106 | import pytest 107 | @pytest.hookimpl(optionalhook=True) 108 | def pytest_json_runtest_metadata(item, call): 109 | ... 110 | ``` 111 | 112 | ### Modifying the report 113 | 114 | You can modify the entire report before it's saved by using the `pytest_json_modifyreport` hook. 115 | 116 | Just implement the hook in your `conftest.py`, e.g.: 117 | 118 | ```python 119 | def pytest_json_modifyreport(json_report): 120 | # Add a key to the report 121 | json_report['foo'] = 'bar' 122 | # Delete the summary from the report 123 | del json_report['summary'] 124 | ``` 125 | 126 | After `pytest_sessionfinish`, the report object is also directly available to script via `config._json_report.report`. So you can access it using some built-in hook: 127 | 128 | ```python 129 | def pytest_sessionfinish(session): 130 | report = session.config._json_report.report 131 | print('exited with', report['exitcode']) 132 | ``` 133 | 134 | If you *really* want to change how the result of a test stage run is turned into JSON, you can use the `pytest_json_runtest_stage` hook. It takes a [`TestReport`](https://docs.pytest.org/en/latest/reference.html#_pytest.runner.TestReport) and returns a JSON-serializable dict: 135 | 136 | ```python 137 | def pytest_json_runtest_stage(report): 138 | return {'outcome': report.outcome} 139 | ``` 140 | 141 | ### Direct invocation 142 | 143 | You can use the plugin when invoking `pytest.main()` directly from code: 144 | 145 | ```python 146 | import pytest 147 | from pytest_jsonreport.plugin import JSONReport 148 | 149 | plugin = JSONReport() 150 | pytest.main(['--json-report-file=none', 'test_foo.py'], plugins=[plugin]) 151 | ``` 152 | 153 | You can then access the `report` object: 154 | 155 | ```python 156 | print(plugin.report) 157 | ``` 158 | 159 | And save the report manually: 160 | 161 | ```python 162 | plugin.save_report('/tmp/my_report.json') 163 | ``` 164 | 165 | 166 | ## Format 167 | 168 | The JSON report contains metadata of the session, a summary, collectors, tests and warnings. You can find a sample report in [`sample_report.json`](sample_report.json). 169 | 170 | | Key | Description | 171 | | --- | --- | 172 | | `created` | Report creation date. (Unix time) | 173 | | `duration` | Session duration in seconds. | 174 | | `exitcode` | Process exit code as listed [in the pytest docs](https://docs.pytest.org/en/latest/usage.html#possible-exit-codes). The exit code is a quick way to tell if any tests failed, an internal error occurred, etc. | 175 | | `root` | Absolute root path from which the session was started. | 176 | | `environment` | [Environment](#environment) entry. | 177 | | `summary` | [Summary](#summary) entry. | 178 | | `collectors` | [Collectors](#collectors) entry. (absent if `--json-report-summary` or if no collectors) | 179 | | `tests` | [Tests](#tests) entry. (absent if `--json-report-summary`) | 180 | | `warnings` | [Warnings](#warnings) entry. (absent if `--json-report-summary` or if no warnings) | 181 | 182 | #### Example 183 | 184 | ```python 185 | { 186 | "created": 1518371686.7981803, 187 | "duration": 0.1235666275024414, 188 | "exitcode": 1, 189 | "root": "/path/to/tests", 190 | "environment": ENVIRONMENT, 191 | "summary": SUMMARY, 192 | "collectors": COLLECTORS, 193 | "tests": TESTS, 194 | "warnings": WARNINGS, 195 | } 196 | ``` 197 | 198 | ### Summary 199 | 200 | Number of outcomes per category and the total number of test items. 201 | 202 | | Key | Description | 203 | | --- | --- | 204 | | `collected` | Total number of tests collected. | 205 | | `total` | Total number of tests run. | 206 | | `deselected` | Total number of tests deselected. (absent if number is 0) | 207 | | `` | Number of tests with that outcome. (absent if number is 0) | 208 | 209 | #### Example 210 | 211 | ```python 212 | { 213 | "collected": 10, 214 | "passed": 2, 215 | "failed": 3, 216 | "xfailed": 1, 217 | "xpassed": 1, 218 | "error": 2, 219 | "skipped": 1, 220 | "total": 10 221 | } 222 | ``` 223 | 224 | ### Environment 225 | 226 | The environment section is provided by [pytest-metadata](https://github.com/pytest-dev/pytest-metadata). All metadata given by that plugin will be added here, so you need to make sure it is JSON-serializable. 227 | 228 | #### Example 229 | 230 | ```python 231 | { 232 | "Python": "3.6.4", 233 | "Platform": "Linux-4.56.78-9-ARCH-x86_64-with-arch", 234 | "Packages": { 235 | "pytest": "3.4.0", 236 | "py": "1.5.2", 237 | "pluggy": "0.6.0" 238 | }, 239 | "Plugins": { 240 | "json-report": "0.4.1", 241 | "xdist": "1.22.0", 242 | "metadata": "1.5.1", 243 | "forked": "0.2", 244 | "cov": "2.5.1" 245 | }, 246 | "foo": "bar", # Custom metadata entry passed via pytest-metadata 247 | } 248 | ``` 249 | 250 | ### Collectors 251 | 252 | A list of collector nodes. These are useful to check what tests are available without running them, or to debug an error during test discovery. 253 | 254 | | Key | Description | 255 | | --- | --- | 256 | | `nodeid` | ID of the collector node. ([See docs](https://docs.pytest.org/en/latest/example/markers.html#node-id)) The root node has an empty node ID. | 257 | | `outcome` | Outcome of the collection. (Not the test outcome!) | 258 | | `result` | Nodes collected by the collector. | 259 | | `longrepr` | Representation of the collection error. (absent if no error occurred) | 260 | 261 | The `result` is a list of the collected nodes: 262 | 263 | | Key | Description | 264 | | --- | --- | 265 | | `nodeid` | ID of the node. | 266 | | `type` | Type of the collected node. | 267 | | `lineno` | Line number. (absent if not applicable) | 268 | | `deselected` | `true` if the test is deselected. (absent if not deselected) | 269 | 270 | #### Example 271 | 272 | ```python 273 | [ 274 | { 275 | "nodeid": "", 276 | "outcome": "passed", 277 | "result": [ 278 | { 279 | "nodeid": "test_foo.py", 280 | "type": "Module" 281 | } 282 | ] 283 | }, 284 | { 285 | "nodeid": "test_foo.py", 286 | "outcome": "passed", 287 | "result": [ 288 | { 289 | "nodeid": "test_foo.py::test_pass", 290 | "type": "Function", 291 | "lineno": 24, 292 | "deselected": true 293 | }, 294 | ... 295 | ] 296 | }, 297 | { 298 | "nodeid": "test_bar.py", 299 | "outcome": "failed", 300 | "result": [], 301 | "longrepr": "/usr/lib/python3.6 ... invalid syntax" 302 | }, 303 | ... 304 | ] 305 | ``` 306 | 307 | ### Tests 308 | 309 | A list of test nodes. Each completed test stage produces a stage object (`setup`, `call`, `teardown`) with its own `outcome`. 310 | 311 | | Key | Description | 312 | | --- | --- | 313 | | `nodeid` | ID of the test node. | 314 | | `lineno` | Line number where the test starts. | 315 | | `keywords` | List of keywords and markers associated with the test. | 316 | | `outcome` | Outcome of the test run. | 317 | | `{setup, call, teardown}` | [Test stage](#test-stage) entry. To find the error in a failed test you need to check all stages. (absent if stage didn't run) | 318 | | `metadata` | [Metadata](#metadata) item. (absent if no metadata) | 319 | 320 | #### Example 321 | 322 | ```python 323 | [ 324 | { 325 | "nodeid": "test_foo.py::test_fail", 326 | "lineno": 50, 327 | "keywords": [ 328 | "test_fail", 329 | "test_foo.py", 330 | "test_foo0" 331 | ], 332 | "outcome": "failed", 333 | "setup": TEST_STAGE, 334 | "call": TEST_STAGE, 335 | "teardown": TEST_STAGE, 336 | "metadata": { 337 | "foo": "bar", 338 | } 339 | }, 340 | ... 341 | ] 342 | ``` 343 | 344 | 345 | ### Test stage 346 | 347 | A test stage item. 348 | 349 | | Key | Description | 350 | | --- | --- | 351 | | `duration` | Duration of the test stage in seconds. | 352 | | `outcome` | Outcome of the test stage. (can be different from the overall test outcome) | 353 | | `crash` | Crash entry. (absent if no error occurred) | 354 | | `traceback` | List of traceback entries. (absent if no error occurred; affected by `--tb` option) | 355 | | `stdout` | Standard output. (absent if none available) | 356 | | `stderr` | Standard error. (absent if none available) | 357 | | `log` | [Log](#log) entry. (absent if none available) | 358 | | `longrepr` | Representation of the error. (absent if no error occurred; format affected by `--tb` option) | 359 | 360 | #### Example 361 | 362 | ```python 363 | { 364 | "duration": 0.00018835067749023438, 365 | "outcome": "failed", 366 | "crash": { 367 | "path": "/path/to/tests/test_foo.py", 368 | "lineno": 54, 369 | "message": "TypeError: unsupported operand type(s) for -: 'int' and 'NoneType'" 370 | }, 371 | "traceback": [ 372 | { 373 | "path": "test_foo.py", 374 | "lineno": 65, 375 | "message": "" 376 | }, 377 | { 378 | "path": "test_foo.py", 379 | "lineno": 63, 380 | "message": "in foo" 381 | }, 382 | { 383 | "path": "test_foo.py", 384 | "lineno": 63, 385 | "message": "in " 386 | }, 387 | { 388 | "path": "test_foo.py", 389 | "lineno": 54, 390 | "message": "TypeError" 391 | } 392 | ], 393 | "stdout": "foo\nbar\n", 394 | "stderr": "baz\n", 395 | "log": LOG, 396 | "longrepr": "def test_fail_nested():\n ..." 397 | } 398 | ``` 399 | 400 | ### Log 401 | 402 | A list of log records. The fields of a log record are the [`logging.LogRecord` attributes](https://docs.python.org/3/library/logging.html#logrecord-attributes), with the exception that the fields `exc_info` and `args` are always empty and `msg` contains the formatted log message. 403 | 404 | You can apply [`logging.makeLogRecord()`](https://docs.python.org/3/library/logging.html#logging.makeLogRecord) on a log record to convert it back to a `logging.LogRecord` object. 405 | 406 | #### Example 407 | 408 | ```python 409 | [ 410 | { 411 | "name": "root", 412 | "msg": "This is a warning.", 413 | "args": null, 414 | "levelname": "WARNING", 415 | "levelno": 30, 416 | "pathname": "/path/to/tests/test_foo.py", 417 | "filename": "test_foo.py", 418 | "module": "test_foo", 419 | "exc_info": null, 420 | "exc_text": null, 421 | "stack_info": null, 422 | "lineno": 8, 423 | "funcName": "foo", 424 | "created": 1519772464.291738, 425 | "msecs": 291.73803329467773, 426 | "relativeCreated": 332.90839195251465, 427 | "thread": 140671803118912, 428 | "threadName": "MainThread", 429 | "processName": "MainProcess", 430 | "process": 31481 431 | }, 432 | ... 433 | ] 434 | ``` 435 | 436 | 437 | ### Warnings 438 | 439 | A list of warnings that occurred during the session. (See the [pytest docs on warnings](https://docs.pytest.org/en/latest/warnings.html).) 440 | 441 | | Key | Description | 442 | | --- | --- | 443 | | `filename` | File name. | 444 | | `lineno` | Line number. | 445 | | `message` | Warning message. | 446 | | `when` | When the warning was captured. (`"config"`, `"collect"` or `"runtest"` as listed [here](https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_warning_captured)) | 447 | 448 | #### Example 449 | 450 | ```python 451 | [ 452 | { 453 | "code": "C1", 454 | "path": "/path/to/tests/test_foo.py", 455 | "nodeid": "test_foo.py::TestFoo", 456 | "message": "cannot collect test class 'TestFoo' because it has a __init__ constructor" 457 | } 458 | ] 459 | ``` 460 | 461 | ## Related tools 462 | 463 | - [pytest-json](https://github.com/mattcl/pytest-json) has some great features but appears to be unmaintained. I borrowed some ideas and test cases from there. 464 | 465 | - [tox has a switch](http://tox.readthedocs.io/en/latest/example/result.html) to create a JSON report including a test result summary. However, it just provides the overall outcome without any per-test details. 466 | -------------------------------------------------------------------------------- /pytest_jsonreport/plugin.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from collections import OrderedDict 3 | from contextlib import contextmanager 4 | import json 5 | import logging 6 | import os 7 | import time 8 | import warnings 9 | 10 | import pytest 11 | import _pytest.hookspec 12 | 13 | from . import serialize 14 | 15 | 16 | class JSONReportBase: 17 | 18 | def __init__(self, config=None): 19 | self._config = config 20 | self._logger = logging.getLogger() 21 | 22 | def pytest_configure(self, config): 23 | # When the plugin is used directly from code, it may have been 24 | # initialized without a config. 25 | if self._config is None: 26 | self._config = config 27 | if not hasattr(config, '_json_report'): 28 | self._config._json_report = self 29 | # If the user sets --tb=no, always omit the traceback from the report 30 | if self._config.option.tbstyle == 'no' and \ 31 | not self._must_omit('traceback'): 32 | self._config.option.json_report_omit.append('traceback') 33 | 34 | def pytest_addhooks(self, pluginmanager): 35 | pluginmanager.add_hookspecs(Hooks) 36 | 37 | @pytest.hookimpl(hookwrapper=True) 38 | def pytest_runtest_protocol(self, item, nextitem): 39 | item._json_report_extra = {} 40 | yield 41 | del item._json_report_extra 42 | 43 | @contextmanager 44 | def _capture_log(self, item, when): 45 | handler = LoggingHandler() 46 | self._logger.addHandler(handler) 47 | try: 48 | yield 49 | finally: 50 | self._logger.removeHandler(handler) 51 | item._json_report_extra[when]['log'] = handler.records 52 | 53 | @pytest.hookimpl(hookwrapper=True) 54 | def pytest_runtest_setup(self, item): 55 | item._json_report_extra['setup'] = {} 56 | if self._must_omit('log'): 57 | yield 58 | else: 59 | with self._capture_log(item, 'setup'): 60 | yield 61 | 62 | @pytest.hookimpl(hookwrapper=True) 63 | def pytest_runtest_call(self, item): 64 | item._json_report_extra['call'] = {} 65 | if self._must_omit('log'): 66 | yield 67 | else: 68 | with self._capture_log(item, 'call'): 69 | yield 70 | 71 | @pytest.hookimpl(hookwrapper=True) 72 | def pytest_runtest_teardown(self, item): 73 | item._json_report_extra['teardown'] = {} 74 | if self._must_omit('log'): 75 | yield 76 | else: 77 | with self._capture_log(item, 'teardown'): 78 | yield 79 | 80 | @pytest.hookimpl(hookwrapper=True) 81 | def pytest_runtest_makereport(self, item, call): 82 | # Hook runtest_makereport to access the item *and* the report 83 | report = (yield).get_result() 84 | if not self._must_omit('streams'): 85 | streams = {key: val for when_, key, val in item._report_sections if 86 | when_ == report.when and key in ['stdout', 'stderr']} 87 | item._json_report_extra[call.when].update(streams) 88 | for dict_ in self._config.hook.pytest_json_runtest_metadata(item=item, 89 | call=call): 90 | if not dict_: 91 | continue 92 | item._json_report_extra.setdefault('metadata', {}).update(dict_) 93 | self._validate_metadata(item) 94 | # Attach the JSON details to the report. If this is an xdist worker, 95 | # the details will be serialized and relayed with the other attributes 96 | # of the report. 97 | report._json_report_extra = item._json_report_extra 98 | 99 | @staticmethod 100 | def _validate_metadata(item): 101 | """Ensure that `item` has JSON-serializable metadata, otherwise delete 102 | it.""" 103 | if 'metadata' not in item._json_report_extra: 104 | return 105 | if not serialize.serializable(item._json_report_extra['metadata']): 106 | warnings.warn( 107 | 'Metadata of {} is not JSON-serializable.'.format(item.nodeid)) 108 | del item._json_report_extra['metadata'] 109 | 110 | def _must_omit(self, key): 111 | return key in self._config.option.json_report_omit 112 | 113 | 114 | class JSONReport(JSONReportBase): 115 | """The JSON report pytest plugin.""" 116 | 117 | def __init__(self, *args, **kwargs): 118 | JSONReportBase.__init__(self, *args, **kwargs) 119 | self._start_time = None 120 | self._json_tests = OrderedDict() 121 | self._json_collectors = [] 122 | self._json_warnings = [] 123 | self._num_deselected = 0 124 | self._terminal_summary = '' 125 | # Min verbosity required to print to terminal 126 | self._terminal_min_verbosity = 0 127 | self.report = None 128 | 129 | def pytest_sessionstart(self, session): 130 | self._start_time = time.time() 131 | 132 | def pytest_collectreport(self, report): 133 | if self._must_omit('collectors'): 134 | return 135 | json_result = [] 136 | for item in report.result: 137 | json_item = serialize.make_collectitem(item) 138 | item._json_collectitem = json_item 139 | json_result.append(json_item) 140 | self._json_collectors.append(serialize.make_collector(report, 141 | json_result)) 142 | 143 | def pytest_deselected(self, items): 144 | self._num_deselected += len(items) 145 | if self._must_omit('collectors'): 146 | return 147 | for item in items: 148 | try: 149 | item._json_collectitem['deselected'] = True 150 | # Happens when the item has not been collected before (i.e. didn't 151 | # go through `pytest_collectreport`), e.g. due to `--last-failed` 152 | except AttributeError: 153 | continue 154 | 155 | @pytest.hookimpl(hookwrapper=True) 156 | def pytest_collection_modifyitems(self, items): 157 | yield 158 | if self._must_omit('collectors'): 159 | return 160 | for item in items: 161 | try: 162 | del item._json_collectitem 163 | except AttributeError: 164 | pass 165 | 166 | def pytest_runtest_logreport(self, report): 167 | # The `_json_report_extra` attr may have been lost, e.g. when the 168 | # original report object got replaced due to a crashed xdist worker (#75) 169 | if not hasattr(report, '_json_report_extra'): 170 | report._json_report_extra = {} 171 | 172 | nodeid = report.nodeid 173 | try: 174 | json_testitem = self._json_tests[nodeid] 175 | except KeyError: 176 | json_testitem = serialize.make_testitem( 177 | nodeid, 178 | # report.keywords is a dict (for legacy reasons), but we just 179 | # need the keys 180 | None if self._must_omit('keywords') else list(report.keywords), 181 | report.location, 182 | ) 183 | self._json_tests[nodeid] = json_testitem 184 | metadata = report._json_report_extra.get('metadata') 185 | if metadata: 186 | json_testitem['metadata'] = metadata 187 | # Add user properties in teardown stage if attribute exists and is non-empty 188 | if report.when == 'teardown' and getattr(report, 'user_properties', None): 189 | user_properties = [{str(key): val} for key, val in report.user_properties] 190 | if serialize.serializable(user_properties): 191 | json_testitem['user_properties'] = user_properties 192 | else: 193 | warnings.warn('User properties of {} are not JSON-serializable.'.format(nodeid)) 194 | 195 | # Update total test outcome, if necessary. The total outcome can be 196 | # different from the outcome of the setup/call/teardown stage. 197 | outcome = self._config.hook.pytest_report_teststatus( 198 | report=report, config=self._config)[0] 199 | if outcome not in ['passed', '']: 200 | json_testitem['outcome'] = outcome 201 | json_testitem[report.when] = \ 202 | self._config.hook.pytest_json_runtest_stage(report=report) 203 | 204 | @pytest.hookimpl(trylast=True) 205 | def pytest_json_runtest_stage(self, report): 206 | stage_details = report._json_report_extra.get(report.when, {}) 207 | return serialize.make_teststage( 208 | report, 209 | # TODO Can we use pytest's BaseReport.capstdout/err/log here? 210 | stage_details.get('stdout'), 211 | stage_details.get('stderr'), 212 | stage_details.get('log'), 213 | self._must_omit('traceback'), 214 | ) 215 | 216 | @pytest.hookimpl(tryfirst=True) 217 | def pytest_sessionfinish(self, session): 218 | summary_data = { 219 | # Need to add deselected count to get correct number of collected 220 | # tests (see pytest-dev/pytest#9614) 221 | 'collected': session.testscollected + self._num_deselected 222 | } 223 | if self._num_deselected: 224 | summary_data['deselected'] = self._num_deselected 225 | 226 | json_report = serialize.make_report( 227 | created=time.time(), 228 | duration=time.time() - self._start_time, 229 | exitcode=session.exitstatus, 230 | root=str(session.fspath), 231 | environment=getattr(self._config, '_metadata', {}), 232 | summary=serialize.make_summary(self._json_tests, **summary_data), 233 | ) 234 | if not self._config.option.json_report_summary: 235 | if self._json_collectors: 236 | json_report['collectors'] = self._json_collectors 237 | json_report['tests'] = list(self._json_tests.values()) 238 | if self._json_warnings: 239 | json_report['warnings'] = self._json_warnings 240 | 241 | self._config.hook.pytest_json_modifyreport(json_report=json_report) 242 | # After the session has finished, other scripts may want to use report 243 | # object directly 244 | self.report = json_report 245 | path = self._config.option.json_report_file 246 | if path: 247 | try: 248 | self.save_report(path) 249 | except OSError as e: 250 | self._terminal_summary = 'could not save report: {}'.format(e) 251 | else: 252 | self._terminal_summary = 'report saved to: {}'.format(path) 253 | else: 254 | self._terminal_summary = 'report auto-save skipped' 255 | self._terminal_min_verbosity = 1 256 | 257 | def save_report(self, path): 258 | """Save the JSON report to `path`. 259 | 260 | Raises an exception if saving failed. 261 | """ 262 | if self.report is None: 263 | raise Exception('could not save report: no report available') 264 | # Create path if it doesn't exist 265 | dirname = os.path.dirname(path) 266 | if dirname: 267 | try: 268 | os.makedirs(dirname) 269 | # Mimick FileExistsError for py2.7 compatibility 270 | except OSError as e: 271 | import errno # pylint: disable=import-outside-toplevel 272 | if e.errno != errno.EEXIST: 273 | raise 274 | with open(path, 'w', encoding='utf-8') as f: 275 | json.dump( 276 | self.report, 277 | f, 278 | default=str, 279 | indent=self._config.option.json_report_indent, 280 | ) 281 | 282 | def pytest_warning_recorded(self, warning_message, when): 283 | if self._config is None: 284 | # If pytest is invoked directly from code, it may try to capture 285 | # warnings before the config is set. 286 | return 287 | if not self._must_omit('warnings'): 288 | self._json_warnings.append( 289 | serialize.make_warning(warning_message, when)) 290 | 291 | # Warning hook fallback (warning_recorded is available from pytest>=6) 292 | if not hasattr(_pytest.hookspec, 'pytest_warning_recorded'): 293 | pytest_warning_captured = pytest_warning_recorded 294 | del pytest_warning_recorded 295 | 296 | def pytest_terminal_summary(self, terminalreporter): 297 | if self._terminal_min_verbosity > ( 298 | self._config.option.json_report_verbosity if 299 | self._config.option.json_report_verbosity is not None else 300 | terminalreporter.verbosity): 301 | return 302 | terminalreporter.write_sep('-', 'JSON report') 303 | terminalreporter.write_line(self._terminal_summary) 304 | 305 | 306 | class JSONReportWorker(JSONReportBase): 307 | 308 | pass 309 | 310 | 311 | class LoggingHandler(logging.Handler): 312 | 313 | def __init__(self): 314 | super().__init__() 315 | self.records = [] 316 | 317 | def emit(self, record): 318 | d = dict(record.__dict__) 319 | d['msg'] = record.getMessage() 320 | d['args'] = None 321 | d['exc_info'] = None 322 | d.pop('message', None) 323 | self.records.append(d) 324 | 325 | 326 | class Hooks: 327 | 328 | def pytest_json_modifyreport(self, json_report): 329 | """Called after building JSON report and before saving it. 330 | 331 | Plugins can use this hook to modify the report before it's saved. 332 | """ 333 | 334 | @pytest.hookspec(firstresult=True) 335 | def pytest_json_runtest_stage(self, report): 336 | """Return a dict used as the JSON representation of `report` (the 337 | `_pytest.runner.TestReport` of the current test stage). 338 | 339 | Called from `pytest_runtest_logreport`. Plugins can use this hook to 340 | overwrite how the result of a test stage run gets turned into JSON. 341 | """ 342 | 343 | def pytest_json_runtest_metadata(self, item, call): 344 | """Return a dict which will be added to the current test item's JSON 345 | metadata. 346 | 347 | Called from `pytest_runtest_makereport`. Plugins can use this hook to 348 | add metadata based on the current test run. 349 | """ 350 | 351 | 352 | @pytest.fixture 353 | def json_metadata(request): 354 | """Fixture to add metadata to the current test item.""" 355 | try: 356 | return request.node._json_report_extra.setdefault('metadata', {}) 357 | except AttributeError: 358 | if not request.config.option.json_report: 359 | # The user didn't request a JSON report, so the plugin didn't 360 | # prepare a metadata context. We return a dummy dict, so the 361 | # fixture can be used as expected without causing internal errors. 362 | return {} 363 | raise 364 | 365 | 366 | def pytest_addoption(parser): 367 | group = parser.getgroup('jsonreport', 'reporting test results as JSON') 368 | group.addoption( 369 | '--json-report', default=False, action='store_true', 370 | help='create JSON report') 371 | group.addoption( 372 | '--json-report-file', default='.report.json', 373 | # The case-insensitive string "none" will make the value None 374 | type=lambda x: None if x.lower() == 'none' else x, 375 | help='target path to save JSON report (use "none" to not save the ' 376 | 'report)') 377 | group.addoption( 378 | '--json-report-omit', default=[], nargs='+', help='list of fields to ' 379 | 'omit in the report (choose from: collectors, log, traceback, ' 380 | 'streams, warnings, keywords)') 381 | group.addoption( 382 | '--json-report-summary', default=False, 383 | action='store_true', help='only create a summary without per-test ' 384 | 'details') 385 | group.addoption( 386 | '--json-report-indent', type=int, help='pretty-print JSON with ' 387 | 'specified indentation level') 388 | group._addoption( 389 | '--json-report-verbosity', type=int, help='set verbosity (default is ' 390 | 'value of --verbosity)') 391 | 392 | 393 | def pytest_configure(config): 394 | if not config.option.json_report: 395 | return 396 | if hasattr(config, 'workerinput'): 397 | Plugin = JSONReportWorker 398 | else: 399 | Plugin = JSONReport 400 | plugin = Plugin(config) 401 | config._json_report = plugin 402 | config.pluginmanager.register(plugin) 403 | 404 | 405 | def pytest_unconfigure(config): 406 | plugin = getattr(config, '_json_report', None) 407 | if plugin is not None: 408 | del config._json_report 409 | config.pluginmanager.unregister(plugin) 410 | -------------------------------------------------------------------------------- /sample_report.json: -------------------------------------------------------------------------------- 1 | { 2 | "created": 1548185151.2609472, 3 | "duration": 0.04589128494262695, 4 | "exitcode": 1, 5 | "root": "/tmp/pytest-of-user/pytest-40/test_create_report0", 6 | "environment": { 7 | "Python": "3.7.1", 8 | "Platform": "Linux-1.23.45-arch1-1-ARCH-x86_64-with-arch", 9 | "Packages": { 10 | "pytest": "4.1.1", 11 | "py": "1.7.0", 12 | "pluggy": "0.8.1" 13 | }, 14 | "Plugins": { 15 | "xdist": "1.26.0", 16 | "metadata": "1.8.0", 17 | "json-report": "0.8.0", 18 | "forked": "1.0.1" 19 | } 20 | }, 21 | "summary": { 22 | "passed": 2, 23 | "failed": 3, 24 | "xfailed": 1, 25 | "xpassed": 1, 26 | "error": 2, 27 | "skipped": 1, 28 | "total": 10 29 | }, 30 | "collectors": [ 31 | { 32 | "nodeid": "", 33 | "outcome": "passed", 34 | "result": [ 35 | { 36 | "nodeid": "test_create_report.py", 37 | "type": "Module" 38 | } 39 | ] 40 | }, 41 | { 42 | "nodeid": "test_create_report.py", 43 | "outcome": "passed", 44 | "result": [ 45 | { 46 | "nodeid": "test_create_report.py::test_pass", 47 | "type": "Function", 48 | "lineno": 25 49 | }, 50 | { 51 | "nodeid": "test_create_report.py::test_fail_with_fixture", 52 | "type": "Function", 53 | "lineno": 28 54 | }, 55 | { 56 | "nodeid": "test_create_report.py::test_xfail", 57 | "type": "Function", 58 | "lineno": 33 59 | }, 60 | { 61 | "nodeid": "test_create_report.py::test_xfail_but_passing", 62 | "type": "Function", 63 | "lineno": 37 64 | }, 65 | { 66 | "nodeid": "test_create_report.py::test_fail_during_setup", 67 | "type": "Function", 68 | "lineno": 41 69 | }, 70 | { 71 | "nodeid": "test_create_report.py::test_fail_during_teardown", 72 | "type": "Function", 73 | "lineno": 44 74 | }, 75 | { 76 | "nodeid": "test_create_report.py::test_skip", 77 | "type": "Function", 78 | "lineno": 47 79 | }, 80 | { 81 | "nodeid": "test_create_report.py::test_fail_nested", 82 | "type": "Function", 83 | "lineno": 51 84 | }, 85 | { 86 | "nodeid": "test_create_report.py::test_parametrized[1]", 87 | "type": "Function", 88 | "lineno": 67 89 | }, 90 | { 91 | "nodeid": "test_create_report.py::test_parametrized[2]", 92 | "type": "Function", 93 | "lineno": 67 94 | } 95 | ] 96 | } 97 | ], 98 | "tests": [ 99 | { 100 | "nodeid": "test_create_report.py::test_pass", 101 | "lineno": 25, 102 | "outcome": "passed", 103 | "keywords": [ 104 | "test_create_report0", 105 | "test_create_report.py", 106 | "test_pass" 107 | ], 108 | "setup": { 109 | "duration": 0.00013637542724609375, 110 | "outcome": "passed" 111 | }, 112 | "call": { 113 | "duration": 0.00010704994201660156, 114 | "outcome": "passed" 115 | }, 116 | "teardown": { 117 | "duration": 0.0004982948303222656, 118 | "outcome": "passed" 119 | } 120 | }, 121 | { 122 | "nodeid": "test_create_report.py::test_fail_with_fixture", 123 | "lineno": 28, 124 | "outcome": "failed", 125 | "keywords": [ 126 | "test_create_report0", 127 | "test_create_report.py", 128 | "test_fail_with_fixture" 129 | ], 130 | "setup": { 131 | "duration": 0.0001995563507080078, 132 | "outcome": "passed", 133 | "stdout": "setup\n", 134 | "stderr": "setuperr\n" 135 | }, 136 | "call": { 137 | "duration": 0.00015997886657714844, 138 | "outcome": "failed", 139 | "crash": { 140 | "path": "/tmp/pytest-of-user/pytest-40/test_create_report0/test_create_report.py", 141 | "lineno": 32, 142 | "message": "assert False" 143 | }, 144 | "traceback": [ 145 | { 146 | "path": "test_create_report.py", 147 | "lineno": 32, 148 | "message": "AssertionError" 149 | } 150 | ], 151 | "stdout": "call\n", 152 | "stderr": "callerr\n", 153 | "longrepr": "setup_teardown_fixture = None\n\n def test_fail_with_fixture(setup_teardown_fixture):\n print('call')\n print('callerr', file=sys.stderr)\n> assert False\nE assert False\n\ntest_create_report.py:32: AssertionError" 154 | }, 155 | "teardown": { 156 | "duration": 0.00015306472778320312, 157 | "outcome": "passed", 158 | "stdout": "teardown\n", 159 | "stderr": "teardownerr\n" 160 | } 161 | }, 162 | { 163 | "nodeid": "test_create_report.py::test_xfail", 164 | "lineno": 33, 165 | "outcome": "xfailed", 166 | "keywords": [ 167 | "test_create_report0", 168 | "test_create_report.py", 169 | "pytestmark", 170 | "test_xfail", 171 | "xfail" 172 | ], 173 | "setup": { 174 | "duration": 0.0004124641418457031, 175 | "outcome": "passed" 176 | }, 177 | "call": { 178 | "duration": 0.00012755393981933594, 179 | "outcome": "skipped", 180 | "crash": { 181 | "path": "/tmp/pytest-of-user/pytest-40/test_create_report0/test_create_report.py", 182 | "lineno": 36, 183 | "message": "assert False" 184 | }, 185 | "traceback": [ 186 | { 187 | "path": "test_create_report.py", 188 | "lineno": 36, 189 | "message": "AssertionError" 190 | } 191 | ], 192 | "longrepr": "@pytest.mark.xfail(reason='testing xfail')\n def test_xfail():\n> assert False\nE assert False\n\ntest_create_report.py:36: AssertionError" 193 | }, 194 | "teardown": { 195 | "duration": 0.00011134147644042969, 196 | "outcome": "passed" 197 | } 198 | }, 199 | { 200 | "nodeid": "test_create_report.py::test_xfail_but_passing", 201 | "lineno": 37, 202 | "outcome": "xpassed", 203 | "keywords": [ 204 | "test_create_report0", 205 | "test_create_report.py", 206 | "pytestmark", 207 | "xfail", 208 | "test_xfail_but_passing" 209 | ], 210 | "setup": { 211 | "duration": 0.00011181831359863281, 212 | "outcome": "passed" 213 | }, 214 | "call": { 215 | "duration": 9.918212890625e-05, 216 | "outcome": "passed" 217 | }, 218 | "teardown": { 219 | "duration": 9.632110595703125e-05, 220 | "outcome": "passed" 221 | } 222 | }, 223 | { 224 | "nodeid": "test_create_report.py::test_fail_during_setup", 225 | "lineno": 41, 226 | "outcome": "error", 227 | "keywords": [ 228 | "test_fail_during_setup", 229 | "test_create_report0", 230 | "test_create_report.py" 231 | ], 232 | "setup": { 233 | "duration": 0.00017833709716796875, 234 | "outcome": "failed", 235 | "crash": { 236 | "path": "/tmp/pytest-of-user/pytest-40/test_create_report0/test_create_report.py", 237 | "lineno": 17, 238 | "message": "assert False" 239 | }, 240 | "traceback": [ 241 | { 242 | "path": "test_create_report.py", 243 | "lineno": 17, 244 | "message": "AssertionError" 245 | } 246 | ], 247 | "longrepr": "request = >\n\n @pytest.fixture\n def fail_setup_fixture(request):\n> assert False\nE assert False\n\ntest_create_report.py:17: AssertionError" 248 | }, 249 | "teardown": { 250 | "duration": 0.00012302398681640625, 251 | "outcome": "passed" 252 | } 253 | }, 254 | { 255 | "nodeid": "test_create_report.py::test_fail_during_teardown", 256 | "lineno": 44, 257 | "outcome": "error", 258 | "keywords": [ 259 | "test_create_report0", 260 | "test_create_report.py", 261 | "test_fail_during_teardown" 262 | ], 263 | "setup": { 264 | "duration": 0.00017595291137695312, 265 | "outcome": "passed" 266 | }, 267 | "call": { 268 | "duration": 0.00010180473327636719, 269 | "outcome": "passed" 270 | }, 271 | "teardown": { 272 | "duration": 0.00014543533325195312, 273 | "outcome": "failed", 274 | "crash": { 275 | "path": "/tmp/pytest-of-user/pytest-40/test_create_report0/test_create_report.py", 276 | "lineno": 22, 277 | "message": "assert False" 278 | }, 279 | "traceback": [ 280 | { 281 | "path": "test_create_report.py", 282 | "lineno": 22, 283 | "message": "AssertionError" 284 | } 285 | ], 286 | "longrepr": "def fn():\n> assert False\nE assert False\n\ntest_create_report.py:22: AssertionError" 287 | } 288 | }, 289 | { 290 | "nodeid": "test_create_report.py::test_skip", 291 | "lineno": 47, 292 | "outcome": "skipped", 293 | "keywords": [ 294 | "skipif", 295 | "test_create_report0", 296 | "test_create_report.py", 297 | "pytestmark", 298 | "test_skip" 299 | ], 300 | "setup": { 301 | "duration": 9.298324584960938e-05, 302 | "outcome": "skipped", 303 | "longrepr": "('test_create_report.py', 47, 'Skipped: testing skip')" 304 | }, 305 | "teardown": { 306 | "duration": 9.393692016601562e-05, 307 | "outcome": "passed" 308 | } 309 | }, 310 | { 311 | "nodeid": "test_create_report.py::test_fail_nested", 312 | "lineno": 51, 313 | "outcome": "failed", 314 | "keywords": [ 315 | "test_fail_nested", 316 | "test_create_report0", 317 | "test_create_report.py" 318 | ], 319 | "setup": { 320 | "duration": 0.00010728836059570312, 321 | "outcome": "passed" 322 | }, 323 | "call": { 324 | "duration": 0.00012683868408203125, 325 | "outcome": "failed", 326 | "crash": { 327 | "path": "/tmp/pytest-of-user/pytest-40/test_create_report0/test_create_report.py", 328 | "lineno": 55, 329 | "message": "TypeError: unsupported operand type(s) for -: 'int' and 'NoneType'" 330 | }, 331 | "traceback": [ 332 | { 333 | "path": "test_create_report.py", 334 | "lineno": 66, 335 | "message": "" 336 | }, 337 | { 338 | "path": "test_create_report.py", 339 | "lineno": 64, 340 | "message": "in foo" 341 | }, 342 | { 343 | "path": "test_create_report.py", 344 | "lineno": 64, 345 | "message": "in " 346 | }, 347 | { 348 | "path": "test_create_report.py", 349 | "lineno": 60, 350 | "message": "in bar" 351 | }, 352 | { 353 | "path": "test_create_report.py", 354 | "lineno": 55, 355 | "message": "TypeError" 356 | } 357 | ], 358 | "stdout": "foo\n0\nbar\n", 359 | "longrepr": "def test_fail_nested():\n def baz(o=1):\n c = 3\n return 2 - c - None\n def bar(m, n=5):\n b = 2\n print(m)\n print('bar')\n return baz()\n def foo():\n a = 1\n print('foo')\n v = [bar(x) for x in range(3)]\n return v\n> foo()\n\ntest_create_report.py:66: \n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \ntest_create_report.py:64: in foo\n v = [bar(x) for x in range(3)]\ntest_create_report.py:64: in \n v = [bar(x) for x in range(3)]\ntest_create_report.py:60: in bar\n return baz()\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \n\no = 1\n\n def baz(o=1):\n c = 3\n> return 2 - c - None\nE TypeError: unsupported operand type(s) for -: 'int' and 'NoneType'\n\ntest_create_report.py:55: TypeError" 360 | }, 361 | "teardown": { 362 | "duration": 0.00011205673217773438, 363 | "outcome": "passed" 364 | } 365 | }, 366 | { 367 | "nodeid": "test_create_report.py::test_parametrized[1]", 368 | "lineno": 67, 369 | "outcome": "passed", 370 | "keywords": [ 371 | "test_create_report0", 372 | "test_create_report.py", 373 | "test_parametrized[1]", 374 | "pytestmark", 375 | "1", 376 | "parametrize" 377 | ], 378 | "setup": { 379 | "duration": 0.0001621246337890625, 380 | "outcome": "passed" 381 | }, 382 | "call": { 383 | "duration": 0.00010347366333007812, 384 | "outcome": "passed" 385 | }, 386 | "teardown": { 387 | "duration": 0.00011467933654785156, 388 | "outcome": "passed" 389 | } 390 | }, 391 | { 392 | "nodeid": "test_create_report.py::test_parametrized[2]", 393 | "lineno": 67, 394 | "outcome": "failed", 395 | "keywords": [ 396 | "2", 397 | "test_create_report0", 398 | "test_create_report.py", 399 | "pytestmark", 400 | "parametrize", 401 | "test_parametrized[2]" 402 | ], 403 | "setup": { 404 | "duration": 0.0001544952392578125, 405 | "outcome": "passed" 406 | }, 407 | "call": { 408 | "duration": 0.00020122528076171875, 409 | "outcome": "failed", 410 | "crash": { 411 | "path": "/tmp/pytest-of-user/pytest-40/test_create_report0/test_create_report.py", 412 | "lineno": 70, 413 | "message": "assert 2 == 1" 414 | }, 415 | "traceback": [ 416 | { 417 | "path": "test_create_report.py", 418 | "lineno": 70, 419 | "message": "AssertionError" 420 | } 421 | ], 422 | "longrepr": "x = 2\n\n @pytest.mark.parametrize('x', [1, 2])\n def test_parametrized(x):\n> assert x == 1\nE assert 2 == 1\n\ntest_create_report.py:70: AssertionError" 423 | }, 424 | "teardown": { 425 | "duration": 0.00012826919555664062, 426 | "outcome": "passed" 427 | } 428 | } 429 | ] 430 | } 431 | -------------------------------------------------------------------------------- /tests/test_jsonreport.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os.path 3 | import sys 4 | import pytest 5 | 6 | from pytest_jsonreport.plugin import JSONReport 7 | from .conftest import tests_only, FILE 8 | 9 | 10 | def test_arguments_in_help(misc_testdir): 11 | res = misc_testdir.runpytest('--help') 12 | res.stdout.fnmatch_lines([ 13 | '*json-report*', 14 | '*json-report-file*', 15 | ]) 16 | 17 | 18 | def test_no_report(misc_testdir): 19 | misc_testdir.runpytest() 20 | assert not (misc_testdir.tmpdir / '.report.json').exists() 21 | 22 | 23 | def test_create_report(misc_testdir): 24 | misc_testdir.runpytest('--json-report') 25 | assert (misc_testdir.tmpdir / '.report.json').exists() 26 | 27 | 28 | def test_create_report_file_from_arg(misc_testdir): 29 | misc_testdir.runpytest('--json-report', '--json-report-file=arg.json') 30 | assert (misc_testdir.tmpdir / 'arg.json').exists() 31 | 32 | 33 | def test_create_no_report(misc_testdir): 34 | misc_testdir.runpytest('--json-report', '--json-report-file=NONE') 35 | assert not (misc_testdir.tmpdir / '.report.json').exists() 36 | 37 | 38 | def test_terminal_summary(misc_testdir): 39 | res = misc_testdir.runpytest('--json-report') 40 | res.stdout.fnmatch_lines(['-*JSON report*-', '*report saved*.report.json*']) 41 | 42 | res = misc_testdir.runpytest('--json-report', '--json-report-file=./') 43 | res.stdout.fnmatch_lines(['*could not save report*']) 44 | 45 | res = misc_testdir.runpytest('--json-report', '--json-report-file=NONE') 46 | res.stdout.no_fnmatch_line('-*JSON report*-') 47 | 48 | res = misc_testdir.runpytest('--json-report', '--json-report-file=NONE', '-v') 49 | res.stdout.fnmatch_lines(['*auto-save skipped*']) 50 | 51 | res = misc_testdir.runpytest('--json-report', '-q') 52 | res.stdout.no_fnmatch_line('-*JSON report*-') 53 | 54 | res = misc_testdir.runpytest('--json-report', '-q', '--json-report-verbosity=0') 55 | res.stdout.fnmatch_lines(['-*JSON report*-']) 56 | 57 | res = misc_testdir.runpytest( 58 | '--json-report', '--json-report-file=NONE', '-vv', '--json-report-verbosity=0') 59 | res.stdout.no_fnmatch_line('-*JSON report*-') 60 | 61 | 62 | def test_report_keys(num_processes, make_json): 63 | data = make_json() 64 | keys = set([ 65 | 'created', 'duration', 'environment', 'collectors', 'tests', 'summary', 66 | 'root', 'exitcode' 67 | ]) 68 | if num_processes > 0: 69 | # xdist only reports failing collectors 70 | keys.remove('collectors') 71 | assert set(data) == keys 72 | assert isinstance(data['created'], float) 73 | assert isinstance(data['duration'], float) 74 | assert os.path.isabs(data['root']) 75 | assert data['exitcode'] == 1 76 | 77 | 78 | def test_report_collectors(num_processes, make_json): 79 | collectors = make_json().get('collectors', []) 80 | if num_processes > 0: 81 | # xdist only reports failing collectors 82 | assert len(collectors) == 0 83 | return 84 | assert len(collectors) == 2 85 | assert all(c['outcome'] == 'passed' for c in collectors) 86 | assert collectors[0] == { 87 | 'nodeid': '', 88 | 'outcome': 'passed', 89 | 'result': [ 90 | { 91 | 'nodeid': 'test_report_collectors.py', 92 | 'type': 'Module', 93 | } 94 | ] 95 | } 96 | assert { 97 | 'nodeid': 'test_report_collectors.py::test_pass', 98 | 'type': 'Function', 99 | 'lineno': 25, 100 | } in collectors[1]['result'] 101 | 102 | 103 | def test_report_failed_collector(num_processes, make_json): 104 | data = make_json(""" 105 | syntax error 106 | def test_foo(): 107 | assert True 108 | """) 109 | 110 | collectors = data['collectors'] 111 | assert data['tests'] == [] 112 | if num_processes == 0: 113 | assert collectors[0]['outcome'] == 'passed' 114 | assert collectors[1]['outcome'] == 'failed' 115 | assert collectors[1]['result'] == [] 116 | assert 'longrepr' in collectors[1] 117 | else: 118 | # xdist only reports failing collectors 119 | assert collectors[0]['outcome'] == 'failed' 120 | assert collectors[0]['result'] == [] 121 | assert 'longrepr' in collectors[0] 122 | 123 | 124 | def test_report_failed_collector2(num_processes, make_json): 125 | data = make_json(""" 126 | import nonexistent 127 | def test_foo(): 128 | pass 129 | """) 130 | collectors = data['collectors'] 131 | # xdist only reports failing collectors 132 | idx = 1 if num_processes == 0 else 0 133 | assert collectors[idx]['longrepr'].startswith('ImportError') 134 | 135 | 136 | def test_report_item_keys(tests): 137 | assert set(tests['pass']) == set([ 138 | 'nodeid', 'lineno', 'outcome', 'keywords', 'setup', 'call', 139 | 'teardown']) 140 | 141 | 142 | def test_report_outcomes(tests): 143 | assert len(tests) == 10 144 | assert tests['pass']['outcome'] == 'passed' 145 | assert tests['fail_with_fixture']['outcome'] == 'failed' 146 | assert tests['xfail']['outcome'] == 'xfailed' 147 | assert tests['xfail_but_passing']['outcome'] == 'xpassed' 148 | assert tests['fail_during_setup']['outcome'] == 'error' 149 | assert tests['fail_during_teardown']['outcome'] == 'error' 150 | assert tests['skip']['outcome'] == 'skipped' 151 | 152 | 153 | def test_report_summary(make_json): 154 | assert make_json()['summary'] == { 155 | 'total': 10, 156 | 'passed': 2, 157 | 'failed': 3, 158 | 'skipped': 1, 159 | 'xpassed': 1, 160 | 'xfailed': 1, 161 | 'error': 2, 162 | 'collected': 10, 163 | } 164 | 165 | 166 | def test_report_longrepr(tests): 167 | assert 'assert False' in tests['fail_with_fixture']['call']['longrepr'] 168 | 169 | 170 | def test_report_crash_and_traceback(tests): 171 | assert 'traceback' not in tests['pass']['call'] 172 | call = tests['fail_nested']['call'] 173 | assert call['crash']['path'].endswith('test_report_crash_and_traceback.py') 174 | assert call['crash']['lineno'] == 55 175 | assert call['crash']['message'].startswith('TypeError: unsupported ') 176 | traceback = [ 177 | { 178 | 'path': 'test_report_crash_and_traceback.py', 179 | 'lineno': 66, 180 | 'message': '' 181 | }, 182 | { 183 | 'path': 'test_report_crash_and_traceback.py', 184 | 'lineno': 64, 185 | 'message': 'in foo' 186 | }, 187 | { 188 | 'path': 'test_report_crash_and_traceback.py', 189 | 'lineno': 64, 190 | 'message': 'in ' 191 | }, 192 | { 193 | 'path': 'test_report_crash_and_traceback.py', 194 | 'lineno': 60, 195 | 'message': 'in bar' 196 | }, 197 | { 198 | 'path': 'test_report_crash_and_traceback.py', 199 | 'lineno': 55, 200 | 'message': 'TypeError' 201 | } 202 | ] 203 | if sys.version_info < (3,): 204 | del traceback[2] 205 | assert call['traceback'] == traceback 206 | 207 | 208 | def test_report_traceback_styles(make_json): 209 | """Handle different traceback styles (`--tb=...`).""" 210 | code = ''' 211 | def test_raise(): assert False 212 | def test_raise_nested(): f = lambda: g; f() 213 | ''' 214 | for style in ('long', 'short'): 215 | data = make_json(code, ['--json-report', '--tb=%s' % style]) 216 | for i in (0, 1): 217 | assert isinstance(data['tests'][i]['call']['traceback'], list) 218 | 219 | for style in ('native', 'line', 'no'): 220 | data = make_json(code, ['--json-report', '--tb=%s' % style]) 221 | for i in (0, 1): 222 | assert 'traceback' not in data['tests'][i]['call'] 223 | 224 | 225 | def test_report_item_deselected(make_json): 226 | data = make_json(""" 227 | import pytest 228 | @pytest.mark.good 229 | def test_first(): 230 | pass 231 | @pytest.mark.bad 232 | def test_second(): 233 | pass 234 | """, ['--json-report', '-m', 'not bad']) 235 | assert data['summary']['collected'] == 2 236 | assert data['summary']['total'] == 1 237 | assert data['summary']['deselected'] == 1 238 | assert not data['collectors'][1]['result'][0].get('deselected') 239 | assert data['collectors'][1]['result'][1].get('deselected') 240 | 241 | 242 | def test_no_traceback(make_json): 243 | data = make_json(FILE, ['--json-report', '--json-report-omit=traceback']) 244 | tests_ = tests_only(data) 245 | assert 'traceback' not in tests_['fail_nested']['call'] 246 | 247 | 248 | def test_pytest_no_traceback(make_json): 249 | data = make_json(FILE, ['--json-report', '--tb=no']) 250 | tests_ = tests_only(data) 251 | assert 'traceback' not in tests_['fail_nested']['call'] 252 | 253 | 254 | def test_no_streams(make_json): 255 | data = make_json(FILE, ['--json-report', '--json-report-omit=streams']) 256 | call = tests_only(data)['fail_with_fixture']['call'] 257 | assert 'stdout' not in call 258 | assert 'stderr' not in call 259 | 260 | 261 | def test_summary_only(make_json): 262 | data = make_json(FILE, ['--json-report', '--json-report-summary']) 263 | assert 'summary' in data 264 | assert 'tests' not in data 265 | assert 'collectors' not in data 266 | assert 'warnings' not in data 267 | 268 | 269 | def test_report_streams(tests): 270 | test = tests['fail_with_fixture'] 271 | assert test['setup']['stdout'] == 'setup\n' 272 | assert test['setup']['stderr'] == 'setuperr\n' 273 | assert test['call']['stdout'] == 'call\n' 274 | assert test['call']['stderr'] == 'callerr\n' 275 | assert test['teardown']['stdout'] == 'teardown\n' 276 | assert test['teardown']['stderr'] == 'teardownerr\n' 277 | assert 'stdout' not in tests['pass']['call'] 278 | assert 'stderr' not in tests['pass']['call'] 279 | 280 | 281 | def test_record_property(make_json, num_processes): 282 | data = make_json(""" 283 | def test_record_property(record_property): 284 | record_property('foo', 42) 285 | record_property('bar', ['baz', {'x': 'y'}]) 286 | record_property('foo', 43) 287 | record_property(123, 456) 288 | 289 | def test_record_property_empty(record_property): 290 | assert True 291 | 292 | def test_record_property_unserializable(record_property): 293 | record_property('foo', b'somebytes') 294 | """) 295 | tests_ = tests_only(data) 296 | assert tests_['record_property']['user_properties'] == \ 297 | [{'foo': 42}, {'bar': ['baz', {'x': 'y'}]}, {'foo': 43}, {'123': 456}] 298 | assert 'user_properties' not in tests_['record_property_empty'].keys() 299 | # TODO Relay warnings correctly when using xdist 300 | if num_processes == 0: 301 | assert len(data['warnings']) == 1 and ( 302 | 'not JSON-serializable' in data['warnings'][0]['message']) 303 | 304 | 305 | def test_json_metadata(make_json): 306 | data = make_json(""" 307 | def test_metadata1(json_metadata): 308 | json_metadata['x'] = 'foo' 309 | json_metadata['y'] = [1, {'a': 2}] 310 | 311 | def test_metadata2(json_metadata): 312 | json_metadata['z'] = 1 313 | assert False 314 | 315 | def test_unused_metadata(json_metadata): 316 | assert True 317 | 318 | def test_empty_metadata(json_metadata): 319 | json_metadata.update({}) 320 | 321 | def test_unserializable_metadata(json_metadata): 322 | json_metadata['a'] = object() 323 | 324 | import pytest 325 | @pytest.fixture 326 | def stage(json_metadata): 327 | json_metadata['a'] = 1 328 | yield 329 | json_metadata['c'] = 3 330 | 331 | def test_multi_stage_metadata(json_metadata, stage): 332 | json_metadata['b'] = 2 333 | """) 334 | tests_ = tests_only(data) 335 | assert tests_['metadata1']['metadata'] == {'x': 'foo', 'y': [1, {'a': 2}]} 336 | assert tests_['metadata2']['metadata'] == {'z': 1} 337 | assert 'metadata' not in tests_['unused_metadata'] 338 | assert 'metadata' not in tests_['empty_metadata'] 339 | assert 'metadata' not in tests_['unserializable_metadata'] 340 | assert len(data['warnings']) == 1 and ( 341 | 'test_unserializable_metadata is not JSON-serializable' in 342 | data['warnings'][0]['message']) 343 | assert \ 344 | tests_['multi_stage_metadata']['metadata'] == {'a': 1, 'b': 2, 'c': 3} 345 | 346 | 347 | def test_metadata_fixture_without_report_flag(testdir): 348 | """Using the json_metadata fixture without --json-report should not raise 349 | internal errors.""" 350 | testdir.makepyfile(''' 351 | def test_metadata(json_metadata): 352 | json_metadata['x'] = 'foo' 353 | ''') 354 | res = testdir.runpytest() 355 | assert res.ret == 0 356 | assert not (testdir.tmpdir / '.report.json').exists() 357 | 358 | 359 | def test_environment_via_metadata_plugin(make_json): 360 | data = make_json('', ['--json-report', '--metadata', 'x', 'y']) 361 | assert 'Python' in data['environment'] 362 | assert data['environment']['x'] == 'y' 363 | 364 | 365 | def test_modifyreport_hook(testdir, make_json): 366 | testdir.makeconftest(""" 367 | def pytest_json_modifyreport(json_report): 368 | json_report['foo'] = 'bar' 369 | del json_report['summary'] 370 | """) 371 | data = make_json(""" 372 | def test_foo(): 373 | assert False 374 | """) 375 | assert data['foo'] == 'bar' 376 | assert 'summary' not in data 377 | 378 | 379 | def test_runtest_stage_hook(testdir, make_json): 380 | testdir.makeconftest(""" 381 | def pytest_json_runtest_stage(report): 382 | return {'outcome': report.outcome} 383 | """) 384 | data = make_json(""" 385 | def test_foo(): 386 | assert False 387 | """) 388 | test = data['tests'][0] 389 | assert test['setup'] == {'outcome': 'passed'} 390 | assert test['call'] == {'outcome': 'failed'} 391 | assert test['teardown'] == {'outcome': 'passed'} 392 | 393 | 394 | def test_runtest_metadata_hook(testdir, make_json): 395 | testdir.makeconftest(""" 396 | def pytest_json_runtest_metadata(item, call): 397 | if call.when != 'call': 398 | return {} 399 | return {'id': item.nodeid, 'start': call.start, 'stop': call.stop} 400 | """) 401 | data = make_json(""" 402 | def test_foo(): 403 | assert False 404 | """) 405 | test = data['tests'][0] 406 | assert test['metadata']['id'].endswith('::test_foo') 407 | assert isinstance(test['metadata']['start'], float) 408 | assert isinstance(test['metadata']['stop'], float) 409 | 410 | 411 | def test_warnings(make_json, num_processes): 412 | warnings = make_json(""" 413 | class TestFoo: 414 | def __init__(self): 415 | pass 416 | def test_foo(self): 417 | assert True 418 | """)['warnings'] 419 | assert len(warnings) == max(1, num_processes) 420 | assert set(warnings[0]) == { 421 | 'category', 'filename', 'lineno', 'message', 'when' 422 | } 423 | assert warnings[0]['category'] in ( 424 | 'PytestCollectionWarning', 425 | 'PytestWarning' 426 | ) 427 | assert warnings[0]['filename'].endswith('.py') 428 | assert warnings[0]['lineno'] == 1 429 | assert warnings[0]['when'] == 'collect' 430 | assert '__init__' in warnings[0]['message'] 431 | 432 | 433 | def test_process_report(testdir, make_json): 434 | testdir.makeconftest(""" 435 | def pytest_sessionfinish(session): 436 | assert session.config._json_report.report['exitcode'] == 0 437 | """) 438 | testdir.makepyfile(""" 439 | def test_foo(): 440 | assert True 441 | """) 442 | res = testdir.runpytest('--json-report') 443 | assert res.ret == 0 444 | 445 | 446 | def test_indent(testdir, make_json): 447 | testdir.runpytest('--json-report') 448 | with open(str(testdir.tmpdir / '.report.json')) as f: 449 | assert len(f.readlines()) == 1 450 | testdir.runpytest('--json-report', '--json-report-indent=4') 451 | with open(str(testdir.tmpdir / '.report.json')) as f: 452 | assert f.readlines()[1].startswith(' "') 453 | 454 | 455 | def test_logging(make_json): 456 | data = make_json(""" 457 | import logging 458 | import pytest 459 | 460 | @pytest.fixture 461 | def fixture(request): 462 | logging.info('log info') 463 | def f(): 464 | logging.warn('log warn') 465 | request.addfinalizer(f) 466 | 467 | def test_foo(fixture): 468 | logging.error('log error') 469 | try: 470 | raise 471 | except (RuntimeError, TypeError): # TypeError is raised in Py 2.7 472 | logging.getLogger().debug('log %s', 'debug', exc_info=True) 473 | """, ['--json-report', '--log-level=DEBUG']) 474 | 475 | test = data['tests'][0] 476 | assert test['setup']['log'][0]['msg'] == 'log info' 477 | assert test['call']['log'][0]['msg'] == 'log error' 478 | assert test['call']['log'][1]['msg'] == 'log debug' 479 | assert test['teardown']['log'][0]['msg'] == 'log warn' 480 | 481 | record = logging.makeLogRecord(test['call']['log'][1]) 482 | assert record.getMessage() == record.msg == 'log debug' 483 | 484 | 485 | def test_no_logs(make_json): 486 | data = make_json(""" 487 | import logging 488 | def test_foo(): 489 | logging.error('log error') 490 | """, ['--json-report']) 491 | assert 'log' in data['tests'][0]['call'] 492 | 493 | data = make_json(""" 494 | import logging 495 | def test_foo(): 496 | logging.error('log error') 497 | """, ['--json-report', '--json-report-omit=log']) 498 | assert 'log' not in data['tests'][0]['call'] 499 | 500 | 501 | def test_no_keywords(make_json): 502 | data = make_json() 503 | assert 'keywords' in data['tests'][0] 504 | 505 | data = make_json(args=['--json-report', '--json-report-omit=keywords']) 506 | assert 'keywords' not in data['tests'][0] 507 | 508 | 509 | def test_no_collectors(make_json, num_processes): 510 | data = make_json() 511 | if num_processes == 0: 512 | # xdist only reports failing collectors 513 | assert 'collectors' in data 514 | 515 | data = make_json(args=['--json-report', '--json-report-omit=collectors']) 516 | assert 'collectors' not in data 517 | 518 | 519 | def test_no_warnings(make_json, num_processes): 520 | assert 'warnings' not in make_json(""" 521 | class TestFoo: 522 | def __init__(self): 523 | pass 524 | def test_foo(self): 525 | assert True 526 | """, args=['--json-report', '--json-report-omit=warnings']) 527 | 528 | 529 | def test_direct_invocation(testdir): 530 | test_file = testdir.makepyfile(""" 531 | def test_foo(): 532 | assert True 533 | """) 534 | plugin = JSONReport() 535 | res = pytest.main([test_file.strpath], plugins=[plugin]) 536 | assert res == 0 537 | assert plugin.report['exitcode'] == 0 538 | assert plugin.report['summary']['total'] == 1 539 | 540 | report_path = testdir.tmpdir / 'foo_report.json' 541 | assert not report_path.exists() 542 | plugin.save_report(str(report_path)) 543 | assert report_path.exists() 544 | 545 | 546 | def test_xdist(make_json, match_reports): 547 | r1 = make_json(FILE, ['--json-report']) 548 | r2 = make_json(FILE, ['--json-report', '-n=1']) 549 | r3 = make_json(FILE, ['--json-report', '-n=4']) 550 | assert match_reports(r1, r2) 551 | assert match_reports(r2, r3) 552 | 553 | 554 | def test_bug_31(make_json): 555 | data = make_json(''' 556 | from flaky import flaky 557 | 558 | FLAKY_RUNS = 0 559 | 560 | @flaky 561 | def test_flaky_pass(): 562 | assert 1 + 1 == 2 563 | 564 | @flaky 565 | def test_flaky_fail(): 566 | global FLAKY_RUNS 567 | FLAKY_RUNS += 1 568 | assert FLAKY_RUNS == 2 569 | ''') 570 | assert set(data['summary'].items()) == { 571 | ('total', 2), 572 | ('passed', 2), 573 | ('collected', 2), 574 | } 575 | 576 | 577 | def test_bug_37(testdir): 578 | """#37: Report is not accessible via config._json_report when pytest is run 579 | from code via pytest.main(). 580 | """ 581 | test_file = testdir.makepyfile(""" 582 | def test_foo(): 583 | assert True 584 | """) 585 | testdir.makeconftest(""" 586 | def pytest_sessionfinish(session): 587 | assert session.config._json_report.report['exitcode'] == 0 588 | """) 589 | plugin = JSONReport() 590 | pytest.main([test_file.strpath], plugins=[plugin]) 591 | 592 | 593 | def test_bug_41(misc_testdir): 594 | """#41: Create report file path if it doesn't exist.""" 595 | misc_testdir.runpytest('--json-report', '--json-report-file=x/report.json') 596 | assert (misc_testdir.tmpdir / 'x/report.json').exists() 597 | 598 | 599 | def test_bug_69(testdir): 600 | """#69: Handle deselection of test items that have not been collected.""" 601 | fn = testdir.makepyfile(''' 602 | def test_pass(): 603 | assert True 604 | def test_fail(): 605 | assert False 606 | ''').strpath 607 | assert testdir.runpytest('--json-report', fn).ret == 1 608 | # In this second run, `--last-failed` causes `test_pass` to not be 609 | # *collected* but still explicitly *deselected*, so we assert there is no 610 | # internal error caused by trying to access the collector obj. 611 | assert testdir.runpytest('--json-report', '--last-failed', fn).ret == 1 612 | 613 | 614 | def test_bug_75(make_json, num_processes): 615 | """#75: Check that a crashing xdist worker doesn't kill the whole test run.""" 616 | if num_processes < 1: 617 | pytest.skip("This test only makes sense with xdist.") 618 | 619 | data = make_json(''' 620 | import pytest 621 | import os 622 | 623 | @pytest.mark.parametrize("n", range(10)) 624 | def test_crash_one_worker(n): 625 | if n == 0: 626 | os._exit(1) 627 | ''') 628 | assert data['exitcode'] == 1 629 | assert data['summary']['passed'] == 9 630 | assert data['summary']['failed'] == 1 631 | --------------------------------------------------------------------------------