├── setup.cfg ├── tests ├── conftest.py ├── __init__.py ├── test_pytest_fold_2.py ├── test_pytest_fold_xpass_xfail.py ├── test_pytest_fold_warning.py └── test_pytest_fold_1.py ├── pytest.ini ├── pytest_fold ├── __init__.py ├── stash │ ├── tuit2.py │ └── tuit.py ├── observations ├── old │ ├── tui-hover.py │ ├── tuit2.py │ ├── tui_asciimatics.py │ └── tuit.py ├── tui_textual2.py ├── tui_pytermtk.py ├── plugin.py ├── tui_textual1.py └── utils.py ├── requirements.txt ├── LICENSE ├── .gitignore ├── RELEASE_INSTRUCTIONS ├── setup.py └── README.md /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | pytest_plugins = ["pytester"] 2 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | ; addopts = -rA -v 3 | testpaths = 4 | tests 5 | ; log_cli = True 6 | ; log_cli_level = WARNING 7 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from single_source import get_version 3 | 4 | __version__ = get_version(__name__, Path(__file__).parent.parent / "setup.py") 5 | -------------------------------------------------------------------------------- /pytest_fold/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from single_source import get_version 3 | 4 | __version__ = get_version(__name__, Path(__file__).parent.parent / "setup.py") 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | asciimatics==1.13.0 2 | attrs==21.4.0 3 | Faker==13.0.0 4 | future==0.18.2 5 | iniconfig==1.1.1 6 | packaging==21.3 7 | Pillow==9.0.1 8 | pluggy==1.0.0 9 | py==1.11.0 10 | pyfiglet==0.8.post1 11 | pyparsing==3.0.7 12 | pyTermTk==0.9.0a43 13 | pytest>=6.2.5 14 | single-source==0.2.0 15 | strip-ansi==0.1.1 16 | textual==0.1.17 17 | toml==0.10.2 18 | wcwidth==0.2.5 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2021 Jeff Wright 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /tests/test_pytest_fold_2.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | import sys 4 | 5 | LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") 6 | logger = logging.getLogger() 7 | logger.setLevel(logging.NOTSET) 8 | logger.propagate = True 9 | stdout_handler = logging.StreamHandler(sys.stdout) 10 | logger.addHandler(stdout_handler) 11 | logging.getLogger("faker").setLevel(logging.ERROR) 12 | 13 | 14 | @pytest.fixture 15 | def error_fixture(): 16 | assert 0 17 | 18 | 19 | def test_i_ok(): 20 | print("ok") 21 | 22 | 23 | def test_ii_fail(): 24 | assert 0 25 | 26 | 27 | def test_iii_error(error_fixture): 28 | pass 29 | 30 | 31 | def test_iv_skip(): 32 | pytest.skip("skipping this test") 33 | 34 | 35 | def test_v_xfail(): 36 | pytest.xfail("xfailing this test") 37 | 38 | 39 | def test_vi_fail_compare_dicts_for_pytest_icdiff(): 40 | listofStrings = ["Hello", "hi", "there", "at", "this"] 41 | listofInts = [7, 10, 45, 23, 77] 42 | assert len(listofStrings) == len(listofInts) 43 | assert listofStrings == listofInts 44 | 45 | 46 | @pytest.mark.xfail(reason="always xfail") 47 | def test_vi_xpass(): 48 | pass 49 | -------------------------------------------------------------------------------- /tests/test_pytest_fold_xpass_xfail.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import faker 3 | import logging 4 | import random 5 | import sys 6 | import warnings 7 | 8 | LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") 9 | logger = logging.getLogger() 10 | logger.setLevel(logging.NOTSET) 11 | logger.propagate = True 12 | stdout_handler = logging.StreamHandler(sys.stdout) 13 | logger.addHandler(stdout_handler) 14 | logging.getLogger("faker").setLevel(logging.ERROR) 15 | 16 | 17 | def test_xfail_by_inline(): 18 | logger.debug("Debug level log line") 19 | logger.info("info level log line") 20 | logger.warning("Warning level log line") 21 | logger.error("Error level log line") 22 | logger.critical("Critical level log line") 23 | pytest.xfail("xfailing this test with 'pytest.xfail()'") 24 | 25 | assert False 26 | 27 | 28 | @pytest.mark.xfail(reason="Here's my reason for xfail: None") 29 | def test_xfail_by_decorator(): 30 | logger.debug("Debug level log line") 31 | logger.info("info level log line") 32 | logger.warning("Warning level log line") 33 | logger.error("Error level log line") 34 | logger.critical("Critical level log line") 35 | 36 | assert False 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Pyfold 2 | console_output.fold 3 | console_output.pickle 4 | textual.log 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | env/ 17 | venv/ 18 | .venv/ 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *,cover 53 | .hypothesis/ 54 | 55 | # Pytest and plugins 56 | .pytest_cache 57 | report.html 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | 67 | # Flask instance folder 68 | instance/ 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # MkDocs documentation 74 | /site/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # IPython Notebook 80 | .ipynb_checkpoints 81 | 82 | # pyenv 83 | .python-version 84 | 85 | # Sublime projects/workspaces 86 | *.sublime-* 87 | 88 | # IDEs 89 | .vscode 90 | -------------------------------------------------------------------------------- /RELEASE_INSTRUCTIONS: -------------------------------------------------------------------------------- 1 | RELEASE_INSTRUCTIONS 2 | -------------------- 3 | 1 Verify package installs and runs correctly locally (from toplevel): 4 | cd 5 | pip install -e . 6 | 7 | 2 Install/upgrade build & release tools: 8 | pip install --upgrade setuptools wheel twine 9 | 10 | 3 Clear out old builds: 11 | rm dist/* 12 | 13 | 4 Build: 14 | python setup.py sdist bdist_wheel 15 | 16 | 5 Publish to TestPyPi: 17 | python -m twine upload --repository testpypi dist/* 18 | 19 | 6 Test the installation in a fresh directory: 20 | cd ~ 21 | mkdir pytest-fold-testing 22 | cd pytest-fold-testing 23 | pyenv local 3.9.9 24 | python -m venv venv 25 | source venv/bin/activate 26 | python -m pip install --index-url https://test.pypi.org/simple/ pytest-fold 27 | mkdir -p ./tests/ && p ../pytest-fold/tests/* ./tests/ 28 | 29 | 7 Verify PyTermTk TUI: 30 | pytest --fold 31 | 32 | 8 Verify Textual TUIs: 33 | pytest --fold --ft=t1 34 | pytest --fold --ft=t2 35 | 36 | 9 Verify No-TUI: 37 | pytest --fold --ft=n 38 | 39 | 10 Make any adjustments required, then git-commit and push, then start over at step 1 40 | 41 | 11 Publish to Pypi: 42 | cd 43 | python -m twine upload dist/* 44 | 45 | 11 Test the installation as per Step 6 46 | 47 | 12 Run a test run to make sure it works (verify TUI): 48 | pytest --fold 49 | 50 | App A Encode screencasts to /webm for posting on GitHub 51 | ffmpeg -i ".mp4" -b:v 0 -crf 30 -pass 1 -an -f webm -y /dev/null 52 | ffmpeg -i "filename.mp4" -b:v 0 -crf 30 -pass 2 outputfile.webm 53 | -------------------------------------------------------------------------------- /tests/test_pytest_fold_warning.py: -------------------------------------------------------------------------------- 1 | import faker 2 | import random 3 | import warnings 4 | 5 | 6 | def fake_data(min: int = 30, max: int = 120) -> str: 7 | return faker.Faker().text(random.randint(min, max)) 8 | 9 | 10 | def test_one_warning_fails(): 11 | warnings.warn(Warning(fake_data(50, 200))) 12 | warnings.warn(UserWarning(fake_data(55, 205))) 13 | warnings.warn(DeprecationWarning(fake_data(55, 205))) 14 | warnings.warn(SyntaxWarning(fake_data(55, 205))) 15 | warnings.warn(RuntimeWarning(fake_data(55, 205))) 16 | warnings.warn(FutureWarning(fake_data(55, 205))) 17 | warnings.warn(PendingDeprecationWarning(fake_data(55, 205))) 18 | warnings.warn(ImportWarning(fake_data(55, 205))) 19 | warnings.warn(UnicodeWarning(fake_data(55, 205))) 20 | warnings.warn(BytesWarning(fake_data(55, 205))) 21 | warnings.warn(ResourceWarning(fake_data(55, 205))) 22 | warnings.warn((fake_data(55, 205))) 23 | assert False 24 | 25 | 26 | def test_two_warning_passes(): 27 | warnings.warn(Warning(fake_data(50, 200))) 28 | warnings.warn(UserWarning(fake_data(55, 205))) 29 | warnings.warn(DeprecationWarning(fake_data(55, 205))) 30 | warnings.warn(SyntaxWarning(fake_data(55, 205))) 31 | warnings.warn(RuntimeWarning(fake_data(55, 205))) 32 | warnings.warn(FutureWarning(fake_data(55, 205))) 33 | warnings.warn(PendingDeprecationWarning(fake_data(55, 205))) 34 | warnings.warn(ImportWarning(fake_data(55, 205))) 35 | warnings.warn(UnicodeWarning(fake_data(55, 205))) 36 | warnings.warn(BytesWarning(fake_data(55, 205))) 37 | warnings.warn(ResourceWarning(fake_data(55, 205))) 38 | warnings.warn((fake_data(55, 205))) 39 | assert True 40 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import codecs 6 | from setuptools import setup, find_packages 7 | 8 | 9 | def read(fname): 10 | file_path = os.path.join(os.path.dirname(__file__), fname) 11 | return codecs.open(file_path, encoding="utf-8").read() 12 | 13 | 14 | setup( 15 | name="pytest-fold", 16 | version="0.8.4", 17 | author="Jeff Wright", 18 | author_email="jeff.washcloth@gmail.com", 19 | license="MIT", 20 | url="https://github.com/jeffwright13/pytest-fold", 21 | description="Capture Pytest output and when test run is complete, drop user into interactive text user interface", 22 | long_description=read("README.md"), 23 | long_description_content_type="text/markdown", 24 | # packages=["pytest_fold"], 25 | packages=find_packages(), 26 | py_modules=["pytest_fold"], 27 | python_requires=">=3.8", 28 | install_requires=[ 29 | "Faker>=13.0.0", 30 | "pytest>=6.2.5", 31 | "pyTermTk>=0.9.0a43", 32 | "single-source>=0.2.0", 33 | "strip-ansi>=0.1.1", 34 | "textual>=0.1.17", 35 | ], 36 | classifiers=[ 37 | "Framework :: Pytest", 38 | "Development Status :: 4 - Beta", 39 | "Intended Audience :: Developers", 40 | "Topic :: Software Development :: Testing", 41 | "Programming Language :: Python :: 3.8", 42 | "Programming Language :: Python :: 3.9", 43 | "Programming Language :: Python :: 3.10", 44 | "Operating System :: OS Independent", 45 | "License :: OSI Approved :: MIT License", 46 | ], 47 | keywords="pytest testing fold output logs fail pytermtk asciimatics textual single-source", 48 | entry_points={ 49 | "pytest11": ["pytest_fold = pytest_fold.plugin"], 50 | "console_scripts": [ 51 | "tuitxt = pytest_fold.tui_textual1:main", 52 | "tuitxt2 = pytest_fold.tui_textual2:main", 53 | "tuitk = pytest_fold.tui_pytermtk:main", 54 | ], 55 | }, 56 | ) 57 | -------------------------------------------------------------------------------- /pytest_fold/stash/tuit2.py: -------------------------------------------------------------------------------- 1 | from rich.console import RenderableType 2 | from rich.text import Text 3 | from textual import events 4 | from textual.app import App 5 | from textual.reactive import Reactive 6 | from textual.views import DockView 7 | from textual.widgets import Header, Footer, TreeControl, ScrollView, TreeClick 8 | from pytest_fold.utils import OUTCOMES, Results 9 | 10 | 11 | class PytestFoldApp(App): 12 | 13 | async def on_load(self, event: events.Load) -> None: 14 | await self.bind("q", "quit", "Quit") 15 | 16 | # Get test result sections 17 | self.test_results = Results() 18 | self.summary_results = self.test_results.Sections["LAST_LINE"].content.replace( 19 | "=", "" 20 | ) 21 | self.unmarked_output = self.test_results.unmarked_output 22 | self.marked_output = self.test_results.marked_output 23 | 24 | async def on_mount(self) -> None: 25 | header1 = Header(style="bold white on black") 26 | header1.title = self.summary_results 27 | await self.view.dock(header1, edge="top", size=1) 28 | 29 | self.body = ScrollView() 30 | self.dock_view = DockView() 31 | 32 | tree = TreeControl("pytest --fold", {}) 33 | for results_key in OUTCOMES: 34 | await tree.add(tree.root.id, results_key, {"results": self.test_results.tests_failures}) 35 | await tree.root.expand() 36 | 37 | await self.view.dock(ScrollView(tree), edge="left", size=48, name="sidebar") 38 | await self.view.dock(self.dock_view) 39 | await self.dock_view.dock(self.body, edge="top", size=48) 40 | 41 | async def handle_tree_click(self, message: TreeClick[dict]) -> None: 42 | """Called in response to a tree click.""" 43 | label = self.text = message.node.label 44 | self.text = message.node.data.get("results")[label] 45 | 46 | text: RenderableType 47 | text = Text.from_ansi(self.text) 48 | await self.body.update(text) 49 | 50 | 51 | def main(): 52 | PytestFoldApp(title="pytest --fold results").run() 53 | 54 | 55 | if __name__ == "__main__": 56 | main() 57 | -------------------------------------------------------------------------------- /pytest_fold/observations: -------------------------------------------------------------------------------- 1 | - There are only three outcomes as far as Pytest is concerned: 'passed', 'failed', 'skipped' 2 | - Most tests are made up of three phases ("when"): 'setup', 'call', 'teardown' 3 | - The actual test outcome is shown during the 'call' phase: 4 | - for 'passed' tests, all three phases show outcome = 'passed' 5 | - for 'failed' tests, only the 'call' phase shows outcome = 'failed'; 'setup' and 'teardown' show 'passed' 6 | - for 'failed' tests, the traceback info is located in the "longreprtext" field (but stripped of ANSI codes) 7 | - Some tests only have two phases: 8 | - for 'skipped' tests, only 'setup' and 'teardown' exist IF they were marked as skip with the `pytest.mark.skip` decorator 9 | - for 'skipped' tests, all three phases exist IF they were forced to skip with the `pytest.skip` statement inside the test 10 | - for 'skipped' tests, 'setup' phase has outcome = 'skipped' 11 | 12 | - For skipped tests, check this logic: 13 | if hasattr(report, "wasxfail"): 14 | if report.skipped: 15 | return "xfailed", "x", "XFAIL" 16 | elif report.passed: 17 | return "xpassed", "X", "XPASS" 18 | 19 | 20 | - Tests with a warning are 'passed' tests (outcome='passed' for all three phases) 21 | - stdout, stderr and stdlog info is contained in the 'call' phase only 22 | - If a test results in 'error', no stdout/stderr/stdlog will be shown, just the error msg 23 | - When a test passes despite being expected to fail (marked with pytest.mark.xfail), it’s an xpass and will be reported in the test summary 24 | - It *appears* as if Pytest marks an Xpass test with outcome=passed, and keyword: xfail. This is the only example of a test with keyword: xfail that i have seen. 25 | 26 | - All the above notwithstanding, pytest still marks tests with a final category of one of the following, and then sends to console: 27 | KNOWN_TYPES = ( 28 | "failed", 29 | "passed", 30 | "skipped", 31 | "deselected", 32 | "xfailed", 33 | "xpassed", 34 | "warnings", 35 | "error", 36 | ) 37 | 38 | - Console prints the following sections, delineated with separator line consisting of multiple "=" characters, with information embedded in middle of "=" characters (see terminal.py): 39 | session_start: self.write_sep("=", "test session starts", bold=True) 40 | summary_warnings: self.write_sep("=", "warnings summary", yellow=True, bold=False) 41 | summary_errors: self.write_sep("=", "ERRORS") 42 | summary_failures: self.write_sep("=", "FAILURES") 43 | summary_passes: self.write_sep("=", "PASSES") 44 | summary_stats: self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) 45 | self.write_sep("=", "short test summary info") 46 | 47 | WILL POSSIBLY NEED: 48 | FIRSTLINE 49 | LAST_LINE 50 | WARNINGS_SUMMARY 51 | SHORT_TEST_SUMMARY_INFO 52 | TEST_TEST_SESSION_STARTSS 53 | -------------------------------------------------------------------------------- /pytest_fold/old/tui-hover.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from collections import Counter 3 | from pathlib import Path 4 | from rich.panel import Panel 5 | from rich.text import Text 6 | from textual import events 7 | from textual.app import App 8 | from textual.widget import Widget 9 | from textual.widgets import Header, Footer 10 | from textual.reactive import Reactive 11 | 12 | from ck_widgets_lv import ListViewUo 13 | 14 | from pytest_fold.utils import MARKERS, OUTFILE, sectionize 15 | 16 | 17 | class ResultsData: 18 | """ 19 | Class to read in results from a 'pytest --fold' session (which inserts markers 20 | around each failed test), and sectionize the results into individual sections for 21 | display on the TUI 22 | """ 23 | 24 | def __init__(self, path: Path = OUTFILE) -> None: 25 | self.results_file = path 26 | self.sections = [] 27 | self.parsed_sections = [] 28 | 29 | def _sectionize_results(self) -> None: 30 | with open(self.results_file, "r") as results_file: 31 | results_lines = results_file.readlines() 32 | self.sections = sectionize(results_lines) 33 | 34 | def get_results(self) -> list: 35 | self._sectionize_results() 36 | return self.sections 37 | 38 | 39 | class Hover(Widget): 40 | mouse_over = Reactive(False) 41 | folded = Reactive(False) 42 | 43 | def __init__(self, size: tuple = (0, 0), text: str = "") -> None: 44 | super().__init__(size) 45 | self.text = text 46 | self.panel = Panel(self.text) 47 | self.folded = True 48 | self.collapsed_size = 3 49 | self.full_size = Counter(self.text.plain)["\n"] + 5 50 | 51 | def render(self) -> Panel: 52 | return Panel( 53 | self.text, 54 | style=("italic" if not self.mouse_over else "bold"), 55 | height=self.collapsed_size if self.folded else self.full_size, 56 | ) 57 | 58 | def on_enter(self) -> None: 59 | self.mouse_over = True 60 | 61 | def on_click(self) -> None: 62 | self.folded = not (self.folded) 63 | 64 | def on_leave(self) -> None: 65 | self.mouse_over = False 66 | 67 | 68 | class HoverApp(App): 69 | async def on_load(self, event: events.Load) -> None: 70 | await self.bind("t", "view.toggle('topbar')", "Pytest Fold") 71 | await self.bind("q", "quit", "Quit") 72 | 73 | async def on_mount(self) -> None: 74 | await self.view.dock(Header(), edge="top", size=1) 75 | await self.view.dock(Footer(), edge="bottom") 76 | 77 | sections = ResultsData().get_results() 78 | hovers = [ 79 | Hover(text=Text.from_ansi(section["content"])) for section in sections 80 | ] 81 | await self.view.dock(ListViewUo(widgets=hovers, edge="top")) 82 | 83 | 84 | def main(): 85 | HoverApp.run(log="textual.log") 86 | # HoverApp.run(css_file="tuit.css", watch_css=True, log="textual.log") 87 | 88 | 89 | if __name__ == "__main__": 90 | main() 91 | # 92 | -------------------------------------------------------------------------------- /pytest_fold/old/tuit2.py: -------------------------------------------------------------------------------- 1 | import re 2 | from pathlib import Path 3 | 4 | from rich.console import RenderableType 5 | from rich.text import Text 6 | from rich import print 7 | from rich.panel import Panel 8 | from rich.style import Style 9 | 10 | from textual import events 11 | from textual.app import App 12 | from textual.reactive import Reactive 13 | 14 | from textual.views import DockView 15 | from textual.widgets import ( 16 | Header, 17 | Footer, 18 | TreeControl, 19 | ScrollView, 20 | TreeClick, 21 | Placeholder, 22 | ) 23 | 24 | from pytest_fold.utils import MARKERS, OUTFILE, sectionize 25 | 26 | 27 | class ResultsData: 28 | """ 29 | Class to read in results from a 'pytest --fold' session (which inserts markers 30 | around each failed test), and sectionize the results into individual sections for 31 | display on the TUI. Relies on utils.py. 32 | """ 33 | 34 | def __init__(self, path: Path = OUTFILE) -> None: 35 | self.results_file = path 36 | self.sections = [] 37 | self.parsed_sections = [] 38 | 39 | def _sectionize_results(self) -> None: 40 | with open(self.results_file, "r") as results_file: 41 | results_lines = results_file.readlines() 42 | self.sections = sectionize(results_lines) 43 | 44 | def get_results(self) -> list: 45 | self._sectionize_results() 46 | return self.sections 47 | 48 | def get_results_dict(self) -> dict: 49 | self.results = self.get_results() 50 | d = {} 51 | for section in self.results: 52 | if section["test_title"]: 53 | d[section["test_title"]] = section["content"] 54 | else: 55 | d[section["name"]] = section["content"] 56 | return d 57 | 58 | 59 | class PytestFoldApp(App): 60 | 61 | results = ResultsData().get_results_dict() 62 | 63 | async def on_load(self, event: events.Load) -> None: 64 | await self.bind("b", "view.toggle('sidebar')", "Toggle sidebar") 65 | await self.bind("q", "quit", "Quit") 66 | 67 | async def on_mount(self) -> None: 68 | footer_title = re.sub("=", "", self.results["LASTLINE"]) 69 | await self.view.dock(Header(tall=False), edge="top", size=1) 70 | await self.view.dock(Footer(), edge="bottom") 71 | 72 | self.body = ScrollView() 73 | self.dock_view = DockView() 74 | self.placeholder = Placeholder() 75 | 76 | tree = TreeControl("pytest --fold", {}) 77 | for results_key in self.results.keys(): 78 | await tree.add(tree.root.id, results_key, {"results": self.results}) 79 | await tree.root.expand() 80 | 81 | await self.view.dock(ScrollView(tree), edge="left", size=48, name="sidebar") 82 | await self.view.dock(self.dock_view) 83 | await self.dock_view.dock(self.body, edge="top", size=48) 84 | 85 | async def handle_tree_click(self, message: TreeClick[dict]) -> None: 86 | """Called in response to a tree click.""" 87 | label = self.text = message.node.label 88 | self.text = message.node.data.get("results")[label] 89 | 90 | text: RenderableType 91 | text = Text.from_ansi(self.text) 92 | await self.body.update(text) 93 | 94 | 95 | def main(): 96 | PytestFoldApp(title="pytest --fold results").run() 97 | 98 | 99 | if __name__ == "__main__": 100 | main() 101 | -------------------------------------------------------------------------------- /pytest_fold/tui_textual2.py: -------------------------------------------------------------------------------- 1 | from rich.console import RenderableType 2 | from rich.text import Text 3 | from textual import events 4 | from textual.app import App 5 | from textual.views import DockView 6 | from textual.widgets import Header, Footer, TreeControl, ScrollView, TreeClick 7 | from pytest_fold.utils import Results 8 | 9 | TREE_WIDTH = 30 10 | 11 | SECTIONS = { 12 | "PASSES": "bold green underline", 13 | "FAILURES": "bold red underline", 14 | "ERRORS": "bold magenta underline", 15 | "WARNINGS_SUMMARY": "bold yellow underline", 16 | } 17 | 18 | CATEGORIES = { 19 | "PASSES": "bold green underline", 20 | "FAILURES": "bold red underline", 21 | "ERRORS": "bold magenta underline", 22 | "SKIPPED": "bold cyan underline", 23 | "XFAILS": "bold indian_red underline", 24 | "XPASSES": "bold chartreuse1 underline", 25 | } 26 | 27 | 28 | class PytestFoldApp(App): 29 | async def on_load(self, event: events.Load) -> None: 30 | # Load results from OUTFILE; bind actions to heaader/footer widgets 31 | self.test_results = Results() 32 | self.summary_results = self.test_results.Sections["LAST_LINE"].content.replace( 33 | "=", "" 34 | ) 35 | self.unmarked_output = self.test_results.unmarked_output 36 | self.marked_output = self.test_results.marked_output 37 | await self.bind("b", "view.toggle('sidebar')", "Toggle Tree") 38 | await self.bind("q", "quit", "Quit") 39 | await self.bind("~", None, f"{self.summary_results}") 40 | 41 | async def on_mount(self) -> None: 42 | # Create and dock header and footer widgets 43 | header = Header(style="bold white on black") 44 | header.title = self.summary_results 45 | await self.view.dock(header, edge="top", size=1) 46 | 47 | footer = Footer() 48 | await self.view.dock(footer, edge="bottom") 49 | 50 | tree = TreeControl("TEST RESULTS:", {}) 51 | 52 | for category in CATEGORIES: 53 | category_text = Text(category) 54 | category_text.stylize(CATEGORIES[category]) 55 | await tree.add( 56 | tree.root.id, 57 | category_text, 58 | {"results": eval(f"self.test_results.tests_{category.lower()}")}, 59 | ) 60 | for testname in eval(f"self.test_results.tests_{category.lower()}"): 61 | _test_text = Text(testname) 62 | _test_text.stylize("italic") 63 | await tree.add(tree.root.id, _test_text, {}) 64 | 65 | await tree.root.expand() 66 | 67 | # Create and dock the results header tree, and individual results 68 | self.body = ScrollView() 69 | self.dock_view = DockView() 70 | await self.view.dock( 71 | ScrollView(tree), edge="left", size=TREE_WIDTH, name="sidebar" 72 | ) 73 | await self.view.dock(self.dock_view) 74 | await self.dock_view.dock(self.body, edge="top") 75 | 76 | async def handle_tree_click(self, message: TreeClick[dict]) -> None: 77 | # Display results in body when category header is clicked; 78 | # but don't try processing the category titles 79 | label = message.node.label 80 | if label.plain in CATEGORIES: 81 | return 82 | 83 | for category in CATEGORIES: 84 | try: 85 | test_category = f"tests_{category.lower()}" 86 | self.text = eval( 87 | f"self.test_results.{test_category}[message.node.label.plain]" 88 | ) 89 | except: 90 | pass 91 | 92 | text: RenderableType 93 | text = Text.from_ansi(self.text) 94 | await self.body.update(text) 95 | 96 | 97 | def main(): 98 | app = PytestFoldApp() 99 | app.run() 100 | 101 | 102 | if __name__ == "__main__": 103 | main() 104 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NOTE: THIS REPO DEPRECATED. USE [pytest-tui](https://github.com/jeffwright13/pytest-tui) INSTEAD. 2 | 3 | # pytest-fold 4 | ## A Pytest plugin to make console output more manageable 5 | 6 | ### Using PyTermTk: 7 | ![output2](https://user-images.githubusercontent.com/4308435/162344632-552f1284-51a4-46c4-b389-0352636f65bb.gif) 8 | 9 | 10 | ### ...using Textual TUI: 11 | ![ezgif com-gif-maker](https://user-images.githubusercontent.com/4308435/154848960-391fd62f-4859-4d2b-8d03-9f55f4b04cad.gif) 12 | 13 | ## Introduction 14 | Do you run long Pytest campaigns and get lots of failures? And then spend the next 10 minutes scrolling back in your console to find the one traceback that you're interested in drilling down into? Well, maybe `pytest-fold` can help. `pytest-fold` is a plugin that captures the output from your Pytest test runs, then automatically launches an interactive Text User Interface (TUI) where all your test results are categorized by (a) outcome [Pass|Fail|Error|Skipped|Xpass|Xfail], and (b) output section [Summary|Full|Errors|Passes|Failures|Warnings]. The intent it to make it easier for you to find the specific result you want so you can examine it without all the other results to get in your way. 15 | 16 | ## Features 17 | - Choice of two TUIs: Textual and PyTermTk 18 | - Ability to immediately launch TUIs with existing data using console scripts 19 | - ANSI text markup support - whatever the output on your console looks like is how things are going to show up in the TUI 20 | - Mouse and keyboard support (including scrolling) 21 | - Support for all output formats/modes: 22 | - `-v`, `-vv`, `-no-header`, `--showlocals`, `--color=` 23 | - all variants of `--tb` except "native" 24 | - Support for other, simple output-manipulating plugins: 25 | - `pytest-clarity` 26 | - `pytest-emoji` 27 | - `pytest-icdiff` 28 | - etc. 29 | - Not supported: plugins that take over the console in other ways, like 30 | - `pytest-sugar` 31 | - `pytest-emoji-output` 32 | 33 | ## Requirements 34 | - Pytest >= 6.2.5 35 | - Python >= 3.8 36 | 37 | ## Installation 38 | `pip install pytest-fold` 39 | 40 | ## Usage 41 | 42 | From top-level directory: 43 | 44 | * `pytest --fold` 45 | 46 | Or, if you want to get technical about it: 47 | 48 | * `pytest --fold [--fold-tui textual1|textual2|pytermtk|none] ` 49 | 50 | See 'pytest --help' for more info. 51 | 52 | To quit the Textual TUI, either click the Quit button, or press `Q`. To quit the PyTermTk TUI, click the Quit button in the upper right. 53 | 54 | If you have already exited the TUI and would like to re-enter it with the same data generated from the last Pytest run, simply type: 55 | 56 | * `termtxt` (to launch Textual) 57 | * `termtk` (to launch PyTermTk) 58 | 59 | You can also run with the `--fold` option enabled but bypass auto-launch of the TUI with the `--ft=n` option. 60 | 61 | ## Known Limitations / Issues 62 | - Rudimentary user interfaces that need a lot of love: 63 | - Textual interface can be slow, esp. if run within an IDE 64 | - PyTermTk interface sometimes gets corrupted if resized 65 | - Not fully tested with all combinations of output formats. Probably some use-cases where things won't work 100% right. 66 | - `pytest-fold` does not mark stderr or stdout sections for folding. It is assumed that the tester is interested in seeing such output. 67 | - `pytest-fold` is currently incompatible with `--tb=native` and will cause an INTERNALERROR if run together. (TODO: Fix this.) 68 | 69 | ## Contributing 70 | Contributions are very welcome. If you are slick with user interfaces, I would love some help there. 71 | Please run pyflakes and black on any code before submitting a PR. 72 | 73 | ## License 74 | Distributed under the terms of the `MIT`_ license, "pytest-fold" is free and open source software. 75 | 76 | ## Issues 77 | If you encounter any problems, have feedback or requests, or anything else, please [file an issue](https://github.com/jeffwright13/pytest-fold/issues/new), along with a detailed description. 78 | -------------------------------------------------------------------------------- /pytest_fold/stash/tuit.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from rich.console import RenderableType 3 | from rich.text import Text 4 | from textual import events 5 | from textual.app import App 6 | from textual.views import DockView 7 | from textual.widgets import Header, TreeControl, ScrollView, TreeClick 8 | from pytest_fold.utils import OUTFILE, sectionize, Results 9 | 10 | TREE_WIDTH = 30 11 | SECTIONS = { 12 | "FIRSTLINE": "bold blue underline", 13 | "FAILURES": "bold red underline", 14 | "ERRORS": "bold magenta underline", 15 | "WARNINGS_SUMMARY": "bold yellow underline", 16 | "TERMINAL_SUMMARY": "bold green underline", 17 | "LASTLINE": "bold blue underline", 18 | } 19 | 20 | 21 | class ResultsData: 22 | """ 23 | Class to read in results from a 'pytest --fold' session (which inserts markers 24 | around each failed test), and sectionize the results into individual sections for 25 | display on the TUI. Relies on utils.py. 26 | """ 27 | 28 | def __init__(self, path: Path = OUTFILE) -> None: 29 | self.results_file = path 30 | self.sections = [] 31 | self.parsed_sections = [] 32 | 33 | def _sectionize_results(self) -> None: 34 | with open(self.results_file, "r") as results_file: 35 | results_lines = results_file.readlines() 36 | self.sections = sectionize(results_lines) 37 | 38 | def get_results(self) -> list: 39 | self._sectionize_results() 40 | return self.sections 41 | 42 | def get_results_dict(self) -> dict: 43 | self.results = self.get_results() 44 | d = {} 45 | for section in self.results: 46 | if section["test_title"]: 47 | d[section["test_title"]] = section["content"] 48 | else: 49 | d[section["name"]] = section["content"] 50 | return d 51 | 52 | 53 | class FoldApp(App): 54 | """ 55 | Textual class inherited from App 56 | Provides docking and data population for test session headers and results 57 | """ 58 | 59 | async def on_load(self, event: events.Load) -> None: 60 | # Load results from OUTFILE; bind actions to heaader/footer widgets 61 | self.results = ResultsData().get_results_dict() 62 | self.summary_text = ( 63 | Text.from_ansi(self.results["LASTLINE"]).markup.replace("=", "").strip() 64 | ) 65 | await self.bind("b", "view.toggle('sidebar')", "Toggle sidebar") 66 | await self.bind("q", "quit", "Quit") 67 | 68 | async def on_mount(self) -> None: 69 | # Create and dock header and footer widgets 70 | self.title = self.summary_text 71 | header1 = Header(tall=False, style="white on black underline") 72 | header2 = Header(tall=False, style="white on black", clock = False) 73 | await self.view.dock(header1, edge="top", size=1) 74 | await self.view.dock(header2, edge="bottom", size=1) 75 | 76 | # Stylize the results-tree section headers 77 | tree = TreeControl("SESSION RESULTS:", {}) 78 | for results_key in self.results.keys(): 79 | await tree.add(tree.root.id, Text(results_key), {"results": self.results}) 80 | for k, v in SECTIONS.items(): 81 | if tree.nodes[tree.id].label.plain == k: 82 | tree.nodes[tree.id].label.stylize(v) 83 | continue 84 | else: 85 | tree.nodes[tree.id].label.stylize("italic") 86 | await tree.root.expand() 87 | 88 | # Create and dock the results header tree, and individual results 89 | self.body = ScrollView() 90 | self.sections = DockView() 91 | await self.view.dock( 92 | ScrollView(tree), edge="left", size=TREE_WIDTH, name="sidebar" 93 | ) 94 | await self.view.dock(self.sections) 95 | await self.sections.dock(self.body, edge="top") 96 | 97 | async def handle_tree_click(self, message: TreeClick[dict]) -> None: 98 | # Display results in body when section header is clicked 99 | label = message.node.label 100 | self.text = message.node.data.get("results")[label._text[0]] 101 | text: RenderableType 102 | text = Text.from_ansi(self.text) 103 | await self.body.update(text) 104 | 105 | 106 | def main(): 107 | app = FoldApp() 108 | app.run() 109 | 110 | 111 | if __name__ == "__main__": 112 | main() 113 | -------------------------------------------------------------------------------- /pytest_fold/tui_pytermtk.py: -------------------------------------------------------------------------------- 1 | from pytest_fold.utils import OUTCOMES, Results 2 | 3 | import TermTk as ttk 4 | 5 | 6 | class TkTui: 7 | def __init__(self) -> None: 8 | self.test_results = Results() 9 | self.summary_results = ( 10 | self.test_results.Sections["LAST_LINE"] 11 | .content.replace("=", "") 12 | .replace("\n", "") 13 | ) 14 | 15 | # Create root TTk object 16 | self.root = ttk.TTk(layout=ttk.TTkGridLayout()) 17 | 18 | def create_top_frame(self) -> None: 19 | self.top_frame = ttk.TTkFrame( 20 | border=True, 21 | layout=ttk.TTkHBoxLayout(), 22 | ) 23 | self.top_label = ttk.TTkLabel( 24 | parent=self.top_frame, text=ttk.TTkString(self.summary_results) 25 | ) 26 | self.root.layout().addWidget(self.top_frame, 0, 0) 27 | 28 | def create_quit_button(self) -> None: 29 | self.quit_button = ttk.TTkButton(text="Quit", border=True, maxSize=(6, 3)) 30 | self.quit_button.clicked.connect(self.root.quit) 31 | self.root.layout().addWidget(self.quit_button, 0, 1) 32 | 33 | def create_tab_widget(self) -> None: 34 | # Create tabs with results from individual sections 35 | self.tab_widget = ttk.TTkTabWidget(border=False) 36 | # self.tab_widget.setPadding(3, 0, 0, 0) 37 | self.root.layout().addWidget(self.tab_widget, 1, 0, 1, 2) 38 | 39 | def create_section_tabs(self) -> None: 40 | text = ( 41 | self.test_results.Sections["TEST_SESSION_STARTS"].content 42 | + self.test_results.Sections["SHORT_TEST_SUMMARY"].content 43 | ) 44 | tab_label = "Summary" 45 | text_area = ttk.TTkTextEdit(parent=self.tab_widget) 46 | # text_area.lineWrapMode == TTkK.WidgetWidth 47 | text_area.setText(text) 48 | text_areas = {tab_label: text_area} 49 | self.tab_widget.addTab(text_area, f" {tab_label} ") 50 | 51 | text = self.test_results.unmarked_output 52 | tab_label = "Full Output" 53 | text_area = ttk.TTkTextEdit(parent=self.tab_widget) 54 | text_areas[tab_label] = text_area 55 | text_area.setText(text) 56 | self.tab_widget.addTab(text_area, f" {tab_label} ") 57 | 58 | # text = self.test_results.Sections["PASSES_SECTION"].content 59 | # tab_label = "Passes Section" 60 | # text_area = ttk.TTkTextEdit(parent=self.tab_widget) 61 | # text_area.setText(text) 62 | # text_areas[tab_label] = text_area 63 | # self.tab_widget.addTab(text_area, f" {tab_label}") 64 | 65 | # text = self.test_results.Sections["FAILURES_SECTION"].content 66 | # tab_label = "Failures Section" 67 | # text_area = ttk.TTkTextEdit(parent=self.tab_widget) 68 | # text_area.setText(text) 69 | # text_areas[tab_label] = text_area 70 | # self.tab_widget.addTab(text_area, f" {tab_label}") 71 | 72 | text = self.test_results.Sections["ERRORS_SECTION"].content 73 | tab_label = "Errors" 74 | text_area = ttk.TTkTextEdit(parent=self.tab_widget) 75 | text_area.setText(text) 76 | text_areas[tab_label] = text_area 77 | self.tab_widget.addTab(text_area, f" {tab_label} ") 78 | 79 | text = self.test_results.Sections["WARNINGS_SUMMARY"].content 80 | tab_label = "Warnings" 81 | text_area = ttk.TTkTextEdit(parent=self.tab_widget) 82 | text_area.setText(text) 83 | text_areas[tab_label] = text_area 84 | self.tab_widget.addTab(text_area, f" {tab_label} ") 85 | 86 | def create_test_result_tabs(self) -> None: 87 | # Create tabs with results from individual sections 88 | 89 | for outcome in OUTCOMES: 90 | tab_label = outcome 91 | 92 | results_list = ttk.TTkList() 93 | results_view = ttk.TTkTextEdit() 94 | results_view.setLineWrapMode(ttk.TTkK.WidgetWidth) 95 | results_view.setWordWrapMode(ttk.TTkK.WrapAnywhere) 96 | 97 | @ttk.pyTTkSlot(str) 98 | def callback( 99 | test_name: str, rlist=results_list, rview=results_view 100 | ) -> None: 101 | ttk.TTkLog.info(f"Clicked test: {test_name}") 102 | rview.clear() 103 | for label in rlist.selectedLabels(): 104 | rview.append(self.test_results.tests_all[label]) 105 | 106 | width = 10 107 | for result in eval(f"self.test_results.tests_{outcome.lower()}"): 108 | results_list.addItem(result) 109 | results_list.textClicked.connect(callback) 110 | width = max(width, len(result)) 111 | 112 | results_splitter = ttk.TTkSplitter() 113 | results_splitter.addWidget(results_list, width) 114 | results_splitter.addWidget(results_view) 115 | 116 | self.tab_widget.addTab(results_splitter, f" {tab_label} ") 117 | 118 | 119 | def main(): 120 | tui = TkTui() 121 | 122 | tui.create_top_frame() 123 | tui.create_quit_button() 124 | tui.create_tab_widget() 125 | tui.create_section_tabs() 126 | tui.create_test_result_tabs() 127 | 128 | tui.root.mainloop() 129 | 130 | 131 | if __name__ == "__main__": 132 | main() 133 | -------------------------------------------------------------------------------- /pytest_fold/old/tui_asciimatics.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | from pathlib import Path 3 | from asciimatics.exceptions import ResizeScreenError, StopApplication 4 | from asciimatics.event import KeyboardEvent 5 | from asciimatics.parsers import AnsiTerminalParser 6 | from asciimatics.screen import Screen 7 | from asciimatics.scene import Scene 8 | from asciimatics.widgets import Frame, TextBox, Layout, CheckBox, Button 9 | from pytest_fold.utils import MARKERS, OUTFILE, sectionize 10 | 11 | 12 | DEBUG = True 13 | 14 | 15 | class ResultsData: 16 | """ 17 | Class to read in results from a 'pytest --fold' session (which inserts markers 18 | around each failed test), and sectionize the results into individual sections for 19 | display on the TUI 20 | """ 21 | 22 | def __init__(self, path: Path = OUTFILE) -> None: 23 | self.results_file = path 24 | self.sections = [] 25 | self.parsed_sections = [] 26 | 27 | def _sectionize_results(self) -> None: 28 | with open(self.results_file, "r") as results_file: 29 | results_lines = results_file.readlines() 30 | self.sections = sectionize(results_lines) 31 | 32 | def get_results(self) -> list: 33 | self._sectionize_results() 34 | return self.sections 35 | 36 | 37 | class ResultsLayout(Layout): 38 | """ 39 | This Layout handles both folded and unfolded results. There are two columns: 40 | 1) a checkbox (height:1) to fold/unfold the results textbox 41 | 2) a textbox (height:[1 | N]) to display data; height:1 => "folded" results, 42 | height:N => "unfolded" results 43 | """ 44 | 45 | def __init__( 46 | self, 47 | screen: Screen, 48 | folded: bool = True, 49 | textboxheight: int = 4, 50 | value: str = "No data!", 51 | ) -> None: 52 | # 4 to make room for '[ ]' plus a space; -6 to offset from end of line in frame 53 | super(ResultsLayout, self).__init__(columns=[4, screen.width - 6]) 54 | self.screen = screen 55 | self.textboxheight = textboxheight 56 | self.value = value 57 | self.folded = folded 58 | self.parser = AnsiTerminalParser() 59 | 60 | def add_widgets(self) -> None: 61 | cb = CheckBox(text="", on_change=self._toggle_checkbox) 62 | self.add_widget(cb, column=0) 63 | ht = 1 if self.folded else self.textboxheight 64 | tb = TextBox( 65 | height=ht, 66 | line_wrap=False, 67 | readonly=True, 68 | as_string=True, 69 | parser=self.parser, 70 | ) 71 | tb.value = self.value[: self.screen.width] if self.folded else self.value 72 | self.add_widget(tb, column=1) 73 | 74 | def _toggle_checkbox(self) -> None: 75 | self.folded = not self.folded 76 | self.clear_widgets() 77 | self.add_widgets() 78 | self._frame.fix() 79 | 80 | 81 | class QuitterLayout(Layout): 82 | """ 83 | Layout class to quit the whole application 84 | """ 85 | 86 | def __init__(self, screen: Screen) -> None: 87 | # 4 to make room for '[ ]' plus a space; -6 to offset from end of line in frame 88 | super(QuitterLayout, self).__init__(columns=[4, screen.width - 6]) 89 | 90 | def add_widgets(self) -> None: 91 | self.add_widget(Button(text="Quit (Ctrl-X)", on_click=self._quit), 1) 92 | 93 | def _quit(self) -> None: 94 | raise StopApplication("User requested exit by clicking 'Quit'") 95 | 96 | 97 | class ResultsFrame(Frame): 98 | """ 99 | Asciimatics Frame class to display layouts & their widgets 100 | """ 101 | 102 | def __init__(self, screen: Screen) -> None: 103 | super(ResultsFrame, self).__init__( 104 | screen=screen, 105 | height=screen.height, 106 | width=screen.width, 107 | can_scroll=True, 108 | hover_focus=True, 109 | ) 110 | 111 | # Snarf data from results file, sectionize, then add Layout for the resulting 112 | # sections to the ResultsFrame 113 | results_data = ResultsData() 114 | sections = results_data.get_results() 115 | 116 | for section in sections: 117 | c = Counter(section["content"]) 118 | 119 | if section["name"] in [ 120 | MARKERS["pytest_fold_test_session_starts"], 121 | MARKERS["pytest_fold_failures_section"], 122 | MARKERS["pytest_fold_last_line"], 123 | ]: 124 | # Unfolded layouts: first & last sections, and "--- FAILURES ---" banner 125 | self.add_layout( 126 | ResultsLayout( 127 | screen=screen, 128 | folded=False, 129 | textboxheight=c["\n"] + 1, 130 | value=section["content"], 131 | ) 132 | ) 133 | else: 134 | # Individual folded layouts, one per failure section from Pytest run 135 | self.add_layout( 136 | ResultsLayout( 137 | screen=screen, 138 | folded=True, 139 | textboxheight=c["\n"] + 1, 140 | value=section["content"], 141 | ) 142 | ) 143 | 144 | # Last layout sections is the Quitter 145 | self.add_layout(QuitterLayout(screen)) 146 | 147 | # Add widgets to all layouts (needs to be done after layouts are added to frame) 148 | for layout in self._layouts: 149 | layout.add_widgets() 150 | 151 | # Set color theme; fix the layouts and calculate locations of all widgets 152 | self.set_theme("monochrome") 153 | self.fix() 154 | 155 | 156 | def global_shortcuts(event): 157 | # Event handler for global keys, used here to quit app with Ctrl keys 158 | if isinstance(event, KeyboardEvent): 159 | code = event.key_code 160 | # Stop on ctrl+q or ctrl+x 161 | if code in (17, 24): 162 | raise StopApplication(f"User terminated app with {code}") 163 | 164 | 165 | def demo(screen: Screen, scene: Scene) -> None: 166 | scenes = [Scene([ResultsFrame(screen)], duration=-1)] 167 | screen.play( 168 | scenes, 169 | stop_on_resize=True, 170 | start_scene=scenes[0], 171 | allow_int=True, 172 | unhandled_input=global_shortcuts, 173 | ) 174 | 175 | 176 | def main(): 177 | last_scene = None 178 | while True: 179 | try: 180 | Screen.wrapper(demo, catch_interrupt=True, arguments=[last_scene]) 181 | quit() 182 | except ResizeScreenError as e: 183 | last_scene = e.scene 184 | 185 | 186 | if __name__ == "__main__": 187 | main() 188 | -------------------------------------------------------------------------------- /tests/test_pytest_fold_1.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import faker 3 | import logging 4 | import random 5 | import sys 6 | import warnings 7 | 8 | LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") 9 | logger = logging.getLogger() 10 | logger.setLevel(logging.NOTSET) 11 | logger.propagate = True 12 | stdout_handler = logging.StreamHandler(sys.stdout) 13 | logger.addHandler(stdout_handler) 14 | logging.getLogger("faker").setLevel(logging.ERROR) 15 | 16 | 17 | @pytest.fixture 18 | def error_fixture(): 19 | assert 0 20 | 21 | 22 | def test_a_ok(): 23 | print("ok") 24 | 25 | 26 | def test_b_fail(): 27 | assert 0 28 | 29 | 30 | def test_c_error(error_fixture): 31 | pass 32 | 33 | 34 | def test_d_skip(): 35 | pytest.skip("skipping this test") 36 | 37 | 38 | def test_e_xfail(): 39 | pytest.xfail("xfailing this test") 40 | 41 | 42 | @pytest.mark.xfail(reason="always xfail") 43 | def test_f_xpass(): 44 | pass 45 | 46 | 47 | @pytest.mark.parametrize("test_input, expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)]) 48 | def test_g_eval_parameterized(test_input, expected): 49 | assert eval(test_input) == expected 50 | 51 | 52 | @pytest.fixture 53 | def log_testname(): 54 | logger.info(f"Running test {__name__}...") 55 | logger.info("Setting test up...") 56 | logger.info("Executing test...") 57 | # logger.info(faker.Faker().text(random.randint(50, 200))) 58 | logger.info("Tearing test down...") 59 | 60 | 61 | def fake_data(min: int = 30, max: int = 120) -> str: 62 | return faker.Faker().text(random.randint(min, max)) 63 | 64 | 65 | def test_1_passes_and_has_logging_output(log_testname): 66 | logger.critical(fake_data()) 67 | logger.error(fake_data()) 68 | logger.warning(fake_data()) 69 | logger.info(fake_data()) 70 | logger.debug(fake_data()) 71 | assert True 72 | 73 | 74 | def test_2_fails_and_has_logging_output(log_testname): 75 | logger.critical(fake_data()) 76 | logger.error(fake_data()) 77 | logger.warning(fake_data()) 78 | logger.info(fake_data()) 79 | logger.debug(fake_data()) 80 | assert 0 == 1 81 | 82 | 83 | def test_3_fails(log_testname): 84 | assert 0 85 | 86 | 87 | def test_4_passes(log_testname): 88 | assert True 89 | 90 | 91 | @pytest.mark.skip 92 | def test_5_marked_SKIP(log_testname): 93 | assert 1 94 | 95 | 96 | @pytest.mark.xfail 97 | def test_6_marked_xfail_but_passes(log_testname): 98 | assert 1 99 | 100 | 101 | @pytest.mark.xfail 102 | def test_7_marked_xfail_and_fails(log_testname): 103 | assert 0 104 | 105 | 106 | # Method and its test that causes warnings 107 | def api_v1(log_testname): 108 | warnings.warn(UserWarning("api v1, should use functions from v2")) 109 | return 1 110 | 111 | 112 | def test_8_causes_a_warning(log_testname): 113 | assert api_v1() == 1 114 | 115 | 116 | # # These tests are helpful in showing how pytest deals with various types 117 | # # of output (stdout, stderr, log) 118 | def test_9_lorem_fails(capsys): 119 | lorem = """"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. 120 | 121 | Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? 122 | 123 | At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.""" 124 | print(lorem) 125 | assert False 126 | 127 | 128 | def test_10_fail_capturing(capsys): 129 | print("FAIL this stdout is captured") 130 | print("FAIL this stderr is captured", file=sys.stderr) 131 | logger.warning("FAIL this log is captured") 132 | with capsys.disabled(): 133 | print("FAIL stdout not captured, going directly to sys.stdout") 134 | print("FAIL stderr not captured, going directly to sys.stderr", file=sys.stderr) 135 | logger.warning("FAIL is this log captured?") 136 | print("FAIL this stdout is also captured") 137 | print("FAIL this stderr is also captured", file=sys.stderr) 138 | logger.warning("FAIL this log is also captured") 139 | assert False 140 | 141 | 142 | def test_11_pass_capturing(capsys): 143 | print("\nPASS this stdout is captured") 144 | print("PASS this stderr is captured", file=sys.stderr) 145 | logger.warning("PASS this log is captured") 146 | with capsys.disabled(log_testname): 147 | print( 148 | "PASS stdout not captured (capsys disabled), going directly to sys.stdout" 149 | ) 150 | print( 151 | "PASS stderr not captured (capsys disabled), going directly to sys.stderr", 152 | file=sys.stderr, 153 | ) 154 | logger.warning("is this log captured?") 155 | print("PASS this stdout is also captured") 156 | print("PASS this stderr is also captured", file=sys.stderr) 157 | logger.warning("PASS this log is also captured") 158 | assert True 159 | 160 | 161 | def test_12_fails_and_has_stdout(capsys): 162 | print("this test fails") 163 | assert 0 == 1 164 | 165 | 166 | def test_13_passes_and_has_stdout(capsys): 167 | print("this test passes") # stdout is consumed by pytest 168 | assert True 169 | 170 | 171 | # These 2 tests can intentionally cause an error - useful for testing output of 172 | # folding - if the fixture is commented out, the test throws an error at setup. 173 | # 174 | # @pytest.fixture() 175 | # def fixture_for_fun(log_testname): 176 | # pass 177 | 178 | 179 | def test_14_causes_error_pass_stderr_stdout_stdlog(fixture_for_fun): 180 | print("PASS this stdout is captured") 181 | print("PASS this stderr is captured", file=sys.stderr) 182 | logger.warning("PASS this log is captured") 183 | assert 1 184 | 185 | 186 | def test_15_causes_error_fail_stderr_stdout_stdlog(fixture_for_fun): 187 | print("FAIL this stdout is captured") 188 | print("FAIL this stderr is captured", file=sys.stderr) 189 | logger.warning("FAIL this log is captured") 190 | assert 0 191 | -------------------------------------------------------------------------------- /pytest_fold/old/tuit.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from pathlib import Path 3 | from rich.console import RenderableType 4 | from rich.text import Text 5 | from textual import events 6 | from textual.app import App 7 | from textual.views import DockView 8 | from textual.widgets import Header, Footer, TreeControl, ScrollView, TreeClick 9 | from pytest_fold.utils import OUTFILE, PICKLEFILE, sectionize 10 | 11 | TREE_WIDTH = 30 12 | SECTIONS = { 13 | "FIRSTLINE": "bold blue underline", 14 | "FAILURES": "bold red underline", 15 | "ERRORS": "bold magenta underline", 16 | "WARNINGS": "bold yellow underline", 17 | "SUMMARY": "bold green underline", 18 | "LAST_LINE": "bold blue underline", 19 | } 20 | 21 | 22 | class ResultsData: 23 | """ 24 | Class to read in results from a 'pytest --fold' session (which inserts markers 25 | around each failed test), and sectionize the results into individual sections for 26 | display on the TUI. Relies on utils.py. 27 | """ 28 | 29 | def __init__( 30 | self, results_file_path: Path = OUTFILE, pickle_file_path: Path = PICKLEFILE 31 | ) -> None: 32 | self.results_file = results_file_path 33 | self.pass_file = pickle_file_path 34 | self.sections = [] 35 | self.parsed_sections = [] 36 | 37 | def _sectionize_results(self) -> None: 38 | with open(self.results_file, "r") as results_file: 39 | results_lines = results_file.readlines() 40 | self.sections = sectionize(results_lines) 41 | 42 | def get_results(self) -> list: 43 | self._sectionize_results() 44 | return self.sections 45 | 46 | def get_results_dict(self) -> dict: 47 | self.results = self.get_results() 48 | d = {} 49 | for section in self.results: 50 | if section["test_title"]: 51 | d[section["test_title"]] = section["content"] 52 | else: 53 | d[section["name"]] = section["content"] 54 | return d 55 | 56 | def read_passes(self) -> None: 57 | with open(self.pass_file, "rb") as pass_file: 58 | passes = pickle.load(pass_file) 59 | return passes 60 | 61 | 62 | class FoldFooter(Footer): 63 | # Override default Footer method 'make_key_text' to allow customizations 64 | def make_key_text(self) -> Text: 65 | """Create text containing all the keys.""" 66 | text = Text( 67 | style="bold encircle white on black", 68 | no_wrap=True, 69 | overflow="ellipsis", 70 | justify="center", 71 | end="", 72 | ) 73 | for binding in self.app.bindings.shown_keys: 74 | key_display = ( 75 | binding.key.upper() 76 | if binding.key_display is None 77 | else binding.key_display 78 | ) 79 | hovered = self.highlight_key == binding.key 80 | key_text = Text.assemble( 81 | (f" {key_display} ", "reverse" if hovered else "default on default"), 82 | f" {binding.description} ", 83 | meta={"@click": f"app.press('{binding.key}')", "key": binding.key}, 84 | ) 85 | text.append_text(key_text) 86 | return text 87 | 88 | 89 | class FoldApp(App): 90 | """ 91 | Textual class inherited from App 92 | Provides docking and data population for test session headers and results 93 | """ 94 | 95 | async def on_load(self, event: events.Load) -> None: 96 | # Populate footer with quit and toggle info 97 | await self.bind("t", "view.toggle('results_tree')", "Toggle Results Tree |") 98 | await self.bind("q", "quit", "Quit") 99 | 100 | # Load results from OUTFILE; bind actions to heaader/footer widgets 101 | results_data = ResultsData() 102 | self.results = results_data.get_results_dict() 103 | self.summary_text = ( 104 | Text.from_ansi(self.results["LAST_LINE"]).markup.replace("=", "").strip() 105 | ) 106 | 107 | # Load passed file results from PICKLEFILE 108 | self.passes = results_data.read_passes() 109 | 110 | async def on_mount(self) -> None: 111 | # Create and dock header and footer widgets 112 | self.title = self.summary_text 113 | header1 = Header(tall=False, style="bold white on black underline") 114 | header1.title = self.summary_text 115 | await self.view.dock(header1, edge="top", size=1) 116 | footer = FoldFooter() 117 | await self.view.dock(footer, edge="bottom") 118 | 119 | # Stylize the results-tree section headers 120 | tree = TreeControl(Text("TEST RUN RESULTS:", style="bold white underline"), {}) 121 | for results_key in self.results.keys(): 122 | if results_key in ("LAST_LINE", "SUMMARY"): 123 | continue 124 | await tree.add(tree.root.id, Text(results_key), {"results": self.results}) 125 | if tree.nodes[tree.id].label.plain == "FIRSTLINE": 126 | tree.nodes[tree.id].label.stylize("bold blue underline") 127 | elif tree.nodes[tree.id].label.plain == "FAILURES": 128 | tree.nodes[tree.id].label.stylize("bold red underline") 129 | elif tree.nodes[tree.id].label.plain == "ERRORS": 130 | tree.nodes[tree.id].label.stylize("bold magenta underline") 131 | elif tree.nodes[tree.id].label.plain == "WARNINGS": 132 | tree.nodes[tree.id].label.stylize("bold yellow underline") 133 | elif tree.nodes[tree.id].label.plain == "SUMMARY": 134 | tree.nodes[tree.id].label.stylize("bold green underline") 135 | else: 136 | tree.nodes[tree.id].label.stylize("encircle red") 137 | await tree.add(tree.root.id, Text("PASSES"), {}) 138 | tree.nodes[tree.id].label.stylize("bold green underline") 139 | for item in self.passes: 140 | await tree.add( 141 | tree.root.id, 142 | Text(item.title), 143 | { 144 | "item": { 145 | "caplog": item.caplog, 146 | "capstderr": item.capstderr, 147 | "capstdout": item.capstdout, 148 | } 149 | }, 150 | ) 151 | tree.nodes[tree.id].label.stylize("cyan") 152 | await tree.root.expand() 153 | 154 | # Create and dock the results tree 155 | self.sections = DockView() 156 | await self.view.dock( 157 | ScrollView(tree), edge="left", size=TREE_WIDTH, name="results_tree" 158 | ) 159 | await self.view.dock(self.sections) 160 | 161 | # Create and dock the test result ('body') view 162 | self.body = ScrollView() 163 | await self.sections.dock(self.body, edge="top") 164 | 165 | async def handle_tree_click(self, message: TreeClick[dict]) -> None: 166 | # Display results in body when section header is clicked 167 | label = message.node.label.plain 168 | if label in ("ERRORS", "FAILURES", "PASSES"): 169 | return 170 | try: 171 | self.text = message.node.data.get("results")[label] 172 | except TypeError: 173 | caplog = message.node.data.get("item")["caplog"] 174 | capstderr = message.node.data.get("item")["capstderr"] 175 | capstdout = message.node.data.get("item")["capstdout"] 176 | self.text = caplog + capstderr + capstdout 177 | if len(self.text) == 0: 178 | self.text = "<>" 179 | except Exception as e: 180 | return 181 | text: RenderableType 182 | text = Text.from_ansi(self.text) 183 | await self.body.update(text) 184 | 185 | 186 | def main(): 187 | # Instantiate app and run it 188 | app = FoldApp() 189 | app.run() 190 | 191 | 192 | if __name__ == "__main__": 193 | main() 194 | -------------------------------------------------------------------------------- /pytest_fold/plugin.py: -------------------------------------------------------------------------------- 1 | import re 2 | import pickle 3 | import tempfile 4 | import pytest 5 | 6 | from _pytest.config import Config 7 | from _pytest._io.terminalwriter import TerminalWriter 8 | from _pytest.reports import TestReport 9 | from pytest_fold.tui_pytermtk import main as tuitk 10 | from pytest_fold.tui_textual1 import main as tuitxt1 11 | from pytest_fold.tui_textual2 import main as tuitxt2 12 | from pytest_fold.utils import ( 13 | test_session_starts_matcher, 14 | errors_section_matcher, 15 | failures_section_matcher, 16 | warnings_summary_matcher, 17 | passes_section_matcher, 18 | short_test_summary_matcher, 19 | lastline_matcher, 20 | MARKERS, 21 | REPORTFILE, 22 | MARKEDTERMINALOUTPUTFILE, 23 | UNMARKEDTERMINALOUTPUTFILE, 24 | ) 25 | 26 | 27 | # Don't collect tests from any of these files 28 | collect_ignore = [ 29 | "setup.py", 30 | "plugin.py", 31 | ] 32 | 33 | # A list of TestReport objects generated by Pytest during test run. 34 | # Each TestReport represents a single test's operation during one of 35 | # Pytest's three phases: setup | call | teardown 36 | reports = [] 37 | 38 | 39 | def pytest_addoption(parser): 40 | """Define the plugin's option flags as presented by Pytest""" 41 | group = parser.getgroup("fold") 42 | group.addoption( 43 | "--fold", 44 | action="store_true", 45 | help="fold failed test output sections", 46 | ) 47 | group.addoption( 48 | "--fold-tui", 49 | "--ft", 50 | action="store", 51 | default="pytermtk", 52 | help="specify user interface ('pytermtk' ' k' | 'textual1' 't1' | 'textual2' 't2' | 'none' 'n')", 53 | choices=["pytermtk", "k", "textual1", "t1", "textual2", "t2", "none", "n"], 54 | ) 55 | 56 | 57 | def pytest_report_teststatus(report: TestReport, config: Config): 58 | """Construct list(s) of individial TestReport instances""" 59 | reports.append(report) 60 | 61 | 62 | @pytest.hookimpl(trylast=True) 63 | def pytest_configure(config: Config) -> None: 64 | """ 65 | Write console output to a file for use by TUI 66 | This code works by looking at every line sent by Pytest to the terminal, 67 | and based on its category, marking or not marking it 68 | """ 69 | config.option.verbose = ( 70 | 1 # force verbose mode for easier parsing of final test results 71 | ) 72 | config.option.reportchars = ( 73 | "A" # force "display all" mode so all results can be shown 74 | ) 75 | 76 | if config.option.fold: 77 | tr = config.pluginmanager.getplugin("terminalreporter") 78 | if tr is not None: 79 | # identify and mark the very first line of terminal output 80 | try: 81 | config._pyfoldfirsttime 82 | except AttributeError: 83 | config._pyfoldfirsttime = True 84 | 85 | config._pyfold_unmarked_outputfile = tempfile.TemporaryFile("wb+") 86 | config._pyfold_marked_outputfile = tempfile.TemporaryFile("wb+") 87 | oldwrite = tr._tw.write 88 | 89 | # identify and mark each results section 90 | def tee_write(s, **kwargs): 91 | if re.search(test_session_starts_matcher, s): 92 | config._pyfold_marked_outputfile.write( 93 | (MARKERS["pytest_fold_test_session_starts"] + "\n").encode( 94 | "utf-8" 95 | ) 96 | ) 97 | 98 | if re.search(errors_section_matcher, s): 99 | config._pyfold_marked_outputfile.write( 100 | (MARKERS["pytest_fold_errors_section"] + "\n").encode("utf-8") 101 | ) 102 | 103 | if re.search(failures_section_matcher, s): 104 | config._pyfold_marked_outputfile.write( 105 | (MARKERS["pytest_fold_failures_section"] + "\n").encode("utf-8") 106 | ) 107 | 108 | if re.search(warnings_summary_matcher, s): 109 | config._pyfold_marked_outputfile.write( 110 | (MARKERS["pytest_fold_warnings_summary"] + "\n").encode("utf-8") 111 | ) 112 | 113 | if re.search(passes_section_matcher, s): 114 | config._pyfold_marked_outputfile.write( 115 | (MARKERS["pytest_fold_passes_section"] + "\n").encode("utf-8") 116 | ) 117 | 118 | if re.search(short_test_summary_matcher, s): 119 | config._pyfold_marked_outputfile.write( 120 | (MARKERS["pytest_fold_short_test_summary"] + "\n").encode( 121 | "utf-8" 122 | ) 123 | ) 124 | 125 | if re.search(lastline_matcher, s): 126 | config._pyfold_marked_outputfile.write( 127 | (MARKERS["pytest_fold_last_line"] + "\n").encode("utf-8") 128 | ) 129 | 130 | # Write this line's text along with its markup info to console 131 | oldwrite(s, **kwargs) 132 | 133 | # Mark up this line's text by passing it to an instance of TerminalWriter's 134 | # 'markup' method. Do not pass "flush" to the method or it will throw an error. 135 | s1 = s 136 | kwargs.pop("flush") if "flush" in kwargs.keys() else None 137 | s1 = TerminalWriter().markup(s, **kwargs) 138 | 139 | # Encode the marked up line so it can be written to the config object. 140 | # The Pytest config object can be used by plugins for conveying staeful 141 | # info across an entire test run session. 142 | if isinstance(s1, str): 143 | marked_up = s1.encode("utf-8") 144 | config._pyfold_marked_outputfile.write(marked_up) 145 | 146 | # Write this line's original (unmarked) text to unmarked file 147 | s_orig = s 148 | kwargs.pop("flush") if "flush" in kwargs.keys() else None 149 | s_orig = TerminalWriter().markup(s, **kwargs) 150 | if isinstance(s_orig, str): 151 | unmarked_up = s_orig.encode("utf-8") 152 | config._pyfold_unmarked_outputfile.write(unmarked_up) 153 | 154 | # Write to both terminal/console and tempfiles: 155 | # _pyfold_marked_outputfile, _pyfold_unmarked_outputfile 156 | tr._tw.write = tee_write 157 | 158 | 159 | def pytest_unconfigure(config: Config): 160 | """ 161 | Write terminal and test results info to files for use by TUI 162 | """ 163 | # Write terminal output to file 164 | if hasattr(config, "_pyfold_marked_outputfile"): 165 | # get terminal contents, then write file 166 | config._pyfold_marked_outputfile.seek(0) 167 | markedsessionlog = config._pyfold_marked_outputfile.read() 168 | config._pyfold_marked_outputfile.close() 169 | 170 | if hasattr(config, "_pyfold_unmarked_outputfile"): 171 | # get terminal contents, then write file 172 | config._pyfold_unmarked_outputfile.seek(0) 173 | unmarkedsessionlog = config._pyfold_unmarked_outputfile.read() 174 | config._pyfold_unmarked_outputfile.close() 175 | 176 | # Undo our patching in the terminal reporter 177 | config.pluginmanager.getplugin("terminalreporter") 178 | 179 | # Write marked-up results to file 180 | with open(MARKEDTERMINALOUTPUTFILE, "wb") as marked_file: 181 | marked_file.write(markedsessionlog) 182 | 183 | # Write un-marked-up results to file 184 | with open(UNMARKEDTERMINALOUTPUTFILE, "wb") as unmarked_file: 185 | unmarked_file.write(unmarkedsessionlog) 186 | 187 | # Write the reports list to file 188 | with open(REPORTFILE, "wb") as report_file: 189 | pickle.dump(reports, report_file) 190 | 191 | # Launch the TUI 192 | if config.getoption("--fold") == True: 193 | pyfold_tui(config) 194 | 195 | 196 | def pyfold_tui(config: Config) -> None: 197 | """ 198 | Final code invocation after Pytest run has completed. 199 | This method calls the Pyfold TUI to display final results. 200 | """ 201 | # disable capturing while TUI runs to avoid error `redirected stdin is pseudofile, has 202 | # no fileno()`; adapted from https://githubmemory.com/repo/jsbueno/terminedia/issues/25 203 | if not config.getoption("--fold"): 204 | return 205 | capmanager = config.pluginmanager.getplugin("capturemanager") 206 | try: 207 | capmanager.suspend_global_capture(in_=True) 208 | finally: 209 | if config.getoption("--ft") in ["k", "pytermtk"]: 210 | tuitk() 211 | elif config.getoption("--ft") in ["t1", "textual1"]: 212 | tuitxt1() 213 | elif config.getoption("--ft") in ["t2", "textual2"]: 214 | tuitxt2() 215 | elif config.getoption("--ft") not in ["n", "none"]: 216 | print(f"Incorrect choice for fold-tui: {config.getoption('--ft')}") 217 | capmanager.resume_global_capture() 218 | -------------------------------------------------------------------------------- /pytest_fold/tui_textual1.py: -------------------------------------------------------------------------------- 1 | from rich.console import RenderableType 2 | from rich.text import Text 3 | from textual import events 4 | from textual.app import App 5 | from textual import messages 6 | from textual.views import DockView, GridView 7 | from textual.widgets import Header, Footer, TreeControl, ScrollView, TreeClick 8 | from pytest_fold.utils import Results 9 | 10 | 11 | class FoldFooter(Footer): 12 | # Override default Footer method 'make_key_text' to allow customizations 13 | def make_key_text(self) -> Text: 14 | """Create text containing all the keys.""" 15 | text = Text( 16 | style="bold encircle white on black", 17 | no_wrap=True, 18 | overflow="ellipsis", 19 | justify="center", 20 | end="", 21 | ) 22 | for binding in self.app.bindings.shown_keys: 23 | key_display = ( 24 | binding.key.upper() 25 | if binding.key_display is None 26 | else binding.key_display 27 | ) 28 | hovered = self.highlight_key == binding.key 29 | key_text = Text.assemble( 30 | (f" {key_display} ", "reverse" if hovered else "default on default"), 31 | f" {binding.description} ", 32 | meta={"@click": f"app.press('{binding.key}')", "key": binding.key}, 33 | ) 34 | text.append_text(key_text) 35 | return text 36 | 37 | 38 | class FoldApp(App): 39 | """ 40 | Textual class inherited from App 41 | Provides docking and data population for test session headers and results 42 | """ 43 | 44 | async def action_toggle_tree(self, names: list) -> None: 45 | # self.trees = {child.name: child for child in self.children} 46 | if type(names) == str: 47 | names = [names] 48 | for name in names: 49 | widget = self.view.named_widgets[ 50 | name 51 | ] # <= self here is View; see end of view.py 52 | widget.visible = not widget.visible # <= 'visible' is attr on Widget class 53 | await self.post_message(messages.Layout(self)) 54 | 55 | async def on_load(self, event: events.Load) -> None: 56 | # Populate footer with quit and toggle info 57 | await self.bind("u", "toggle_tree('unmarked')", "Unmarked Output ⁞") 58 | await self.bind("1", "toggle_tree('summary')", "Summary ⁞") 59 | await self.bind("e", "toggle_tree('error_tree')", "Error ⁞") 60 | await self.bind("f", "toggle_tree('fail_tree')", "Fail ⁞") 61 | await self.bind("p", "toggle_tree('pass_tree')", "Pass ⁞") 62 | await self.bind("s", "toggle_tree('skip_tree')", "Skipped ⁞") 63 | await self.bind("y", "toggle_tree('xpass_tree')", "Xpass ⁞") 64 | await self.bind("z", "toggle_tree('xfail_tree')", "Xfail ⁞") 65 | await self.bind( 66 | "a", 67 | "toggle_tree(['unmarked', 'summary', 'error_tree', 'pass_tree', 'fail_tree', 'skip_tree', 'xpass_tree', 'xfail_tree'])", 68 | "Toggle All ⁞", 69 | ) 70 | await self.bind("q", "quit", "Quit") 71 | 72 | # Get test result sections 73 | self.test_results = Results() 74 | self.summary_results = self.test_results.Sections["LAST_LINE"].content.replace( 75 | "=", "" 76 | ) 77 | self.unmarked_output = self.test_results.unmarked_output 78 | self.marked_output = self.test_results.marked_output 79 | print("") 80 | 81 | async def on_mount(self) -> None: 82 | # Create and dock header and footer widgets 83 | self.title = self.summary_results 84 | header1 = Header(style="bold white on black") 85 | header1.title = self.summary_results 86 | await self.view.dock(header1, edge="top", size=1) 87 | footer = FoldFooter() 88 | await self.view.dock(footer, edge="bottom") 89 | 90 | # Stylize the results-tree section headers 91 | self.fail_tree = TreeControl( 92 | Text("Failures:", style="bold red underline"), 93 | {"results": self.test_results.Sections["FAILURES_SECTION"].content}, 94 | name="fail_tree", 95 | ) 96 | self.pass_tree = TreeControl( 97 | Text("Passes:", style="bold green underline"), {}, name="pass_tree" 98 | ) 99 | self.error_tree = TreeControl( 100 | Text("Errors:", style="bold magenta underline"), {}, name="error_tree" 101 | ) 102 | self.skip_tree = TreeControl( 103 | Text("Skips:", style="bold red underline"), {}, name="skip_tree" 104 | ) 105 | self.xpass_tree = TreeControl( 106 | Text("Xpasses:", style="bold green underline"), {}, name="xpass_tree" 107 | ) 108 | self.xfail_tree = TreeControl( 109 | Text("Xfails:", style="bold magenta underline"), {}, name="xfail_tree" 110 | ) 111 | self.unmarked = TreeControl( 112 | Text("Full Output", style="dark_slate_gray2 underline"), 113 | {"results": self.test_results.unmarked_output}, 114 | name="unmarked", 115 | ) 116 | self.summary = TreeControl( 117 | Text("Summary", style="bold white underline"), 118 | {"results": self.test_results.Sections["TEST_SESSION_STARTS"].content}, 119 | name="summary", 120 | ) 121 | 122 | for failed in self.test_results.tests_failures: 123 | await self.fail_tree.add( 124 | self.fail_tree.root.id, 125 | Text(failed), 126 | {"results": self.test_results.tests_failures}, 127 | ) 128 | for passed in self.test_results.tests_passes: 129 | await self.pass_tree.add( 130 | self.pass_tree.root.id, 131 | Text(passed), 132 | {"results": self.test_results.tests_passes}, 133 | ) 134 | for errored in self.test_results.tests_errors: 135 | await self.error_tree.add( 136 | self.error_tree.root.id, 137 | Text(errored), 138 | {"results": self.test_results.tests_errors}, 139 | ) 140 | for skipped in self.test_results.tests_skipped: 141 | await self.skip_tree.add( 142 | self.skip_tree.root.id, 143 | Text(skipped), 144 | {"results": self.test_results.tests_skipped}, 145 | ) 146 | for xpassed in self.test_results.tests_xpasses: 147 | await self.xpass_tree.add( 148 | self.xpass_tree.root.id, 149 | Text(xpassed), 150 | {"results": self.test_results.tests_xpasses}, 151 | ) 152 | for xfailed in self.test_results.tests_xfails: 153 | await self.xfail_tree.add( 154 | self.xfail_tree.root.id, 155 | Text(xfailed), 156 | {"results": self.test_results.tests_xfails}, 157 | ) 158 | 159 | await self.fail_tree.root.expand() 160 | await self.pass_tree.root.expand() 161 | await self.error_tree.root.expand() 162 | await self.skip_tree.root.expand() 163 | await self.xpass_tree.root.expand() 164 | await self.xfail_tree.root.expand() 165 | await self.unmarked.root.expand() 166 | await self.summary.root.expand() 167 | 168 | # Create and dock the results tree 169 | await self.view.dock( 170 | ScrollView(self.summary), 171 | edge="top", 172 | size=len(self.summary.nodes) + 2, 173 | name="summary", 174 | ) 175 | await self.view.dock( 176 | ScrollView(self.pass_tree), 177 | edge="top", 178 | size=len(self.pass_tree.nodes) + 2, 179 | name="pass_tree", 180 | ) 181 | await self.view.dock( 182 | ScrollView(self.fail_tree), 183 | edge="top", 184 | size=len(self.fail_tree.nodes) + 2, 185 | name="fail_tree", 186 | ) 187 | await self.view.dock( 188 | ScrollView(self.error_tree), 189 | edge="top", 190 | size=len(self.error_tree.nodes) + 2, 191 | name="error_tree", 192 | ) 193 | await self.view.dock( 194 | ScrollView(self.skip_tree), 195 | edge="top", 196 | size=len(self.skip_tree.nodes) + 2, 197 | name="skip_tree", 198 | ) 199 | await self.view.dock( 200 | ScrollView(self.xfail_tree), 201 | edge="top", 202 | size=len(self.xfail_tree.nodes) + 2, 203 | name="xfail_tree", 204 | ) 205 | await self.view.dock( 206 | ScrollView(self.xpass_tree), 207 | edge="top", 208 | size=len(self.xpass_tree.nodes) + 2, 209 | name="xpass_tree", 210 | ) 211 | await self.view.dock( 212 | ScrollView(self.unmarked), 213 | edge="top", 214 | size=len(self.unmarked.nodes) + 2, 215 | name="unmarked", 216 | ) 217 | 218 | self.dockview = DockView() 219 | self.gridview = GridView() 220 | await self.view.dock(self.dockview) 221 | 222 | # Create and dock the test result ('body') view 223 | self.body = ScrollView() 224 | self.body.border = 1 225 | self.body.border_style = "green" 226 | await self.dockview.dock(self.body, edge="right") 227 | 228 | async def handle_tree_click(self, message: TreeClick[dict]) -> None: 229 | label = message.node.label.plain 230 | 231 | # Click the category headers to toggle on/off (future; 232 | # right now, just ignore those clicks) 233 | if label in ( 234 | "Failures:", 235 | "Passes:", 236 | "Errors:", 237 | "Skipped:", 238 | "Xpasses:", 239 | "Xfails:", 240 | ): 241 | return 242 | 243 | # Display results when test name is clicked 244 | if "Full Output" in label or "Summary" in label: 245 | self.text = message.node.data.get("results") 246 | else: 247 | self.text = message.node.data.get("results")[label] 248 | print("") 249 | 250 | text: RenderableType 251 | text = Text.from_ansi(self.text) 252 | await self.body.update(text) 253 | 254 | 255 | def main(): 256 | app = FoldApp() 257 | app.run() 258 | 259 | 260 | if __name__ == "__main__": 261 | main() 262 | -------------------------------------------------------------------------------- /pytest_fold/utils.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import re 3 | import pickle 4 | from dataclasses import dataclass 5 | from pathlib import Path 6 | from strip_ansi import strip_ansi 7 | from typing import Match, Pattern 8 | 9 | # Files generated by plugin.py 10 | REPORTFILE = Path.cwd() / "report_objects.bin" 11 | MARKEDTERMINALOUTPUTFILE = Path.cwd() / "marked_output.bin" 12 | UNMARKEDTERMINALOUTPUTFILE = Path.cwd() / "unmarked_output.bin" 13 | 14 | # regex matching patterns for Pytest sections 15 | test_session_starts_matcher = re.compile(r"^==.*\stest session starts\s==+") 16 | errors_section_matcher = re.compile(r"^==.*\sERRORS\s==+") 17 | failures_section_matcher = re.compile(r"^==.*\sFAILURES\s==+") 18 | warnings_summary_matcher = re.compile(r"^==.*\swarnings summary\s.*==+") 19 | passes_section_matcher = re.compile(r"^==.*\sPASSES\s==+") 20 | short_test_summary_matcher = re.compile(r"^==.*\sshort test summary info\s.*==+") 21 | lastline_matcher = re.compile(r"^==.*in\s\d+.\d+s.*=+") 22 | ansi_failed_test_name_matcher = re.compile(r"\x1b\[31m\x1b\[1m__+\W(\S+)\W__+\x1b\[0m") 23 | ansi_passed_test_name_matcher = re.compile(r"\x1b\[32m\x1b\[1m__+\W(\S+)\W__+\x1b\[0m") 24 | section_name_matcher = re.compile(r"~~>PYTEST_FOLD_(\w+)") 25 | standard_test_matcher = re.compile( 26 | r".*\::(\S+)\s(PASSED|FAILED|ERROR|SKIPPED|XFAIL|XPASS)" 27 | ) 28 | live_log_testname_matcher = re.compile(r".*::(\S+)", re.MULTILINE) 29 | live_log_outcome_matcher = re.compile( 30 | r"^(PASSED|FAILED|ERROR|SKIPPED|XFAIL|XPASS)\W.+(\[\W?.*?\])", re.MULTILINE 31 | ) 32 | 33 | MARKERS = { 34 | "pytest_fold_test_session_starts": "~~>PYTEST_FOLD_TEST_SESSION_STARTS<~~", 35 | "pytest_fold_errors_section": "~~>PYTEST_FOLD_ERRORS_SECTION<~~", 36 | "pytest_fold_failures_section": "~~>PYTEST_FOLD_FAILURES_SECTION<~~", 37 | "pytest_fold_warnings_summary": "~~>PYTEST_FOLD_WARNINGS_SUMMARY<~~", 38 | "pytest_fold_passes_section": "~~>PYTEST_FOLD_PASSES_SECTION<~~", 39 | "pytest_fold_short_test_summary": "~~>PYTEST_FOLD_SHORT_TEST_SUMMARY<~~", 40 | "pytest_fold_last_line": "~~>PYTEST_FOLD_LAST_LINE<~~", 41 | } 42 | 43 | 44 | OUTCOMES = ( 45 | "Failures", 46 | "Passes", 47 | "Errors", 48 | "Skipped", 49 | "Xfails", 50 | "Xpasses", 51 | ) 52 | 53 | 54 | @dataclass 55 | class SectionInfo: 56 | """Info relevant to each Pytest output section""" 57 | 58 | name: str = "" 59 | label: str = "" 60 | matcher: Match = None 61 | content: str = r"" 62 | outcome: str = "" 63 | 64 | 65 | @dataclass 66 | class TestInfo: 67 | """Info relevant for a single test""" 68 | 69 | title: str = "" 70 | category: str = "" 71 | outcome: str = "" 72 | caplog: str = "" 73 | capstderr: str = "" 74 | capstdout: str = "" 75 | text: str = "" 76 | keywords: set = () 77 | 78 | 79 | class Results: 80 | """ 81 | This class holds all pertinent information for a given Pytest test run. 82 | """ 83 | 84 | def __init__(self): 85 | self.reports = [] 86 | 87 | self.Sections = self._init_sections() 88 | self.unmarked_output = self._get_unmarked_output() 89 | self.marked_output = MarkedSections(self.Sections) 90 | self.test_results = self._get_test_results() 91 | 92 | # This code presents categorized test results 93 | self._categorize_tests() 94 | self._update_testinfo_category() 95 | 96 | self.tests_errors = self._get_result_by_outcome("ERROR") 97 | self.tests_passes = self._get_result_by_outcome("PASSED") 98 | self.tests_failures = self._get_result_by_outcome("FAILED") 99 | self.tests_skipped = self._get_result_by_outcome("SKIPPED") 100 | self.tests_xfails = self._get_result_by_outcome("XFAIL") 101 | self.tests_xpasses = self._get_result_by_outcome("XPASS") 102 | 103 | self.tests_all = {} 104 | self.tests_all.update(self.tests_errors) 105 | self.tests_all.update(self.tests_passes) 106 | self.tests_all.update(self.tests_failures) 107 | self.tests_all.update(self.tests_skipped) 108 | self.tests_all.update(self.tests_xfails) 109 | self.tests_all.update(self.tests_xpasses) 110 | 111 | # Dict holding failed testnames and thei ANSI-encoded traceback info 112 | self.failed_tracebacks = {} 113 | 114 | def _init_sections(self): 115 | """ 116 | Initialize SectionInfo dataclass instances""" 117 | return { 118 | "TEST_SESSION_STARTS": SectionInfo( 119 | name="TEST_SESSION_STARTS", 120 | label="Session Start", 121 | matcher=test_session_starts_matcher, 122 | ), 123 | "ERRORS_SECTION": SectionInfo( 124 | name="ERRORS_SECTION", 125 | label="Errors", 126 | matcher=errors_section_matcher, 127 | outcome="", 128 | ), 129 | "FAILURES_SECTION": SectionInfo( 130 | name="FAILURES_SECTION", 131 | label="Failures", 132 | matcher=failures_section_matcher, 133 | ), 134 | "WARNINGS_SUMMARY": SectionInfo( 135 | name="WARNINGS_SUMMARY", 136 | label="Warnings", 137 | matcher=warnings_summary_matcher, 138 | ), 139 | "PASSES_SECTION": SectionInfo( 140 | name="PASSES_SECTION", label="Passes", matcher=passes_section_matcher 141 | ), 142 | "SHORT_TEST_SUMMARY": SectionInfo( 143 | name="SHORT_TEST_SUMMARY", 144 | label="Short Test Summary", 145 | matcher=short_test_summary_matcher, 146 | ), 147 | "LAST_LINE": SectionInfo( 148 | name="LAST_LINE", label=None, matcher=lastline_matcher 149 | ), 150 | } 151 | 152 | def _get_unmarked_output( 153 | self, unmarked_file_path: Path = UNMARKEDTERMINALOUTPUTFILE 154 | ) -> list: 155 | """Get full Pytest terminal output""" 156 | with open(unmarked_file_path, "r") as umfile: 157 | return umfile.read() 158 | 159 | def _get_test_results(self): 160 | """ 161 | Process TestReport objects from Pytest output; remove duplicates; 162 | extract ANSI-encoded traceback info for failures. 163 | """ 164 | self.failed_tracebacks = self._get_tracebacks( 165 | "FAILURES_SECTION", ansi_failed_test_name_matcher 166 | ) 167 | self.passed_tracebacks = self._get_tracebacks( 168 | "PASSES_SECTION", ansi_passed_test_name_matcher 169 | ) 170 | processed_reports = self._process_reports() 171 | return list({item.title: item for item in processed_reports}.values()) 172 | 173 | def _get_tracebacks(self, section_name: str, regex: Pattern) -> dict: 174 | # get ANSI-coded traceback text for each test in failures section, in the 175 | # form of a dictionary: {'test_title': 'ansi-encoded traceback text'} 176 | 177 | testname = "" 178 | tracebacks = {} 179 | 180 | lines = re.split("\n", self.Sections[section_name].content) 181 | for line in lines: 182 | result = re.search(regex, line) 183 | if result: 184 | testname = result.groups()[0] 185 | tracebacks[testname] = "" 186 | else: 187 | if not testname: 188 | continue 189 | existing_data = tracebacks[testname] 190 | tracebacks[testname] = existing_data + "\n" + line 191 | 192 | return tracebacks 193 | 194 | def _process_reports(self): 195 | """Extract individual test results from full list of Pytest's TestReport instances""" 196 | 197 | test_infos = [] 198 | for report in self._unpickle(): 199 | test_info = TestInfo() 200 | self.reports.append(report) 201 | 202 | # populate the TestInfo instance with pertinent data from report 203 | test_info.outcome = report.outcome 204 | test_info.caplog = report.caplog 205 | test_info.capstderr = report.capstderr 206 | test_info.capstdout = report.capstdout 207 | test_info.title = report.head_line 208 | test_info.keywords = set(report.keywords) 209 | 210 | test_infos.append(test_info) 211 | return test_infos 212 | 213 | def _update_testinfo_category(self): 214 | for report, test_info in itertools.product(self.reports, self.test_results): 215 | # for failed test cases, we want the ANSI coded output, not longreprtext; 216 | # longreprtext has no ANSI codes and all text will be rendered w/o markup 217 | if ( 218 | test_info.category == "FAILED" 219 | and report.when == "call" 220 | and test_info.title in self.failed_tracebacks 221 | ): 222 | test_info.text = self.failed_tracebacks[test_info.title] 223 | # if test_info.category == "PASSED" and report.when == "call" and test_info.title in self.passed_tracebacks: 224 | # test_info.text = self.passed_tracebacks[test_info.title] 225 | 226 | def _update_test_result_by_testname(self, title: str, result: str) -> None: 227 | for test_result in self.test_results: 228 | if title == test_result.title: 229 | test_result.category = result 230 | 231 | def _categorize_tests(self) -> None: 232 | """ 233 | Extract test title and outcome from each line. 234 | 235 | Line formats are different depending on setting of Pytest config option 'log_cli'. 236 | Hence the two regex matcher flavors (standard / live_log), and the two 237 | sections of per-line regex analysys. (Creating a single regex that captures both 238 | formats reliably was very difficult, hence worked around with with some per-line 239 | logic. 240 | """ 241 | look_for_live_log_outcome = False 242 | 243 | for line in self.Sections["TEST_SESSION_STARTS"].content.split("\n"): 244 | stripped_line = strip_ansi(line).rstrip() 245 | 246 | # Start out by looking for non-live-log results 247 | standard_match = re.search(standard_test_matcher, stripped_line) 248 | if standard_match: 249 | title = standard_match.groups()[0] 250 | outcome = standard_match.groups()[1] 251 | if title and outcome: 252 | self._update_test_result_by_testname(title, outcome) 253 | title = outcome = None 254 | continue 255 | 256 | # If the line doesn't match non-live-log format, look for live-log matches; 257 | # outcomes and testnames in separate searches 258 | live_log_testname_match = re.search( 259 | live_log_testname_matcher, stripped_line 260 | ) 261 | if live_log_testname_match: 262 | title = live_log_testname_match.groups()[0].strip() 263 | look_for_live_log_outcome = True 264 | continue 265 | 266 | live_log_outcome_match = re.search(live_log_outcome_matcher, stripped_line) 267 | if look_for_live_log_outcome and live_log_outcome_match: 268 | outcome = live_log_outcome_match.groups()[0].strip() 269 | look_for_live_log_outcome = False 270 | self._update_test_result_by_testname(title, outcome) 271 | title = outcome = None 272 | 273 | def _get_result_by_outcome(self, outcome: str) -> None: 274 | # dict of {testname: log+stderr+stdout) for each test, per-outcome 275 | if outcome == "FAILED": 276 | return { 277 | test_result.title: test_result.text 278 | for test_result in self.test_results 279 | if test_result.category == outcome 280 | } 281 | else: 282 | return { 283 | test_result.title: test_result.text 284 | + test_result.caplog 285 | + test_result.capstderr 286 | + test_result.capstdout 287 | for test_result in self.test_results 288 | if test_result.category == outcome 289 | } 290 | 291 | def _unpickle(self): 292 | """Unpack pickled Pytest TestReport objects from file""" 293 | with open(REPORTFILE, "rb") as rfile: 294 | return pickle.load(rfile) 295 | 296 | 297 | class MarkedSections: 298 | """ 299 | This class processes a Pytest output file that has been marked by pytest-fold, 300 | and identifies its sections. Pytest defines the following possible sections in 301 | its console output (not all show by default; they are dictated by option settings, 302 | e.g. in pytest.ini, on cmd line, etc.): 303 | 304 | "=== test session starts ===" 305 | "=== ERRORS ===" 306 | "=== FAILURES ===" 307 | "=== warnings summary ===" 308 | "=== PASSES ===" 309 | "=== short test summary info ===" 310 | "==== failed passed skipped xfailed xpassed warnings errors in 1.23s ===" 311 | """ 312 | 313 | def __init__( 314 | self, Sections: dict, marked_file_path: Path = MARKEDTERMINALOUTPUTFILE 315 | ) -> None: 316 | self.Sections = Sections 317 | self._marked_lines = self._get_marked_lines(marked_file_path) 318 | self._sections = self._sectionize(self._marked_lines) 319 | print("") 320 | 321 | def get_section(self, name: str) -> str: 322 | # return marked section, or if not found (e.g. didn't occur in output), 323 | # return blank dict w/ /no section content 324 | if name in self.Sections: 325 | return next( 326 | (section for section in self._sections if name == section.name), 327 | SectionInfo(), 328 | ) 329 | else: 330 | raise NameError(f"Cannot retrieve section by name: '{name}'") 331 | 332 | def _get_marked_lines( 333 | self, marked_file_path: Path = MARKEDTERMINALOUTPUTFILE 334 | ) -> list: 335 | """Return a list of all lines from the marked output file""" 336 | with open(MARKEDTERMINALOUTPUTFILE, "r") as mfile: 337 | return mfile.readlines() 338 | 339 | def _line_is_a_marker(self, line: str) -> bool: 340 | """Determine if the current line is a marker, or part of Pytest output""" 341 | return ( 342 | line.strip() 343 | in ( 344 | MARKERS["pytest_fold_test_session_starts"], 345 | MARKERS["pytest_fold_errors_section"], 346 | MARKERS["pytest_fold_failures_section"], 347 | MARKERS["pytest_fold_passes_section"], 348 | MARKERS["pytest_fold_warnings_summary"], 349 | MARKERS["pytest_fold_short_test_summary"], 350 | MARKERS["pytest_fold_last_line"], 351 | ) 352 | if line.strip() 353 | else False 354 | ) 355 | 356 | def _sectionize(self, lines: list) -> dict: 357 | """ 358 | Parse marked lines from test run console output; 359 | build dictionary of SectionInfo objects 360 | """ 361 | section_name = "" 362 | 363 | for line in lines: 364 | if self._line_is_a_marker(line): 365 | if MARKERS["pytest_fold_last_line"] in line: 366 | continue 367 | section_name = re.search(section_name_matcher, line).groups()[0] 368 | self.Sections[section_name].content = r"" 369 | elif section_name: 370 | self.Sections[section_name].content += line 371 | self.Sections["LAST_LINE"].content = lines[-1] 372 | return self.Sections 373 | --------------------------------------------------------------------------------