├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── conftest.py ├── demo-tests ├── conftest.py ├── html_post_process.py ├── test_0.py ├── test_1.py ├── test_2.py ├── test_basic.py ├── test_class.py ├── test_errors.py ├── test_fold_pytesthtml.py ├── test_fold_regex.py ├── test_hoefling.py ├── test_issue_1004.py ├── test_logging.py ├── test_random_results.py ├── test_regex.py ├── test_rerun_fixed.py ├── test_rerun_random.py ├── test_single_xpass_xfail.py ├── test_sleep.py ├── test_warnings.py └── test_xpass_xfail.py ├── log_config.py ├── misc ├── RELEASE_INSTRUCTIONS └── outcome_questions.txt ├── noxfile.py ├── pytest.ini ├── pytest_tui ├── __init__.py ├── html_gen.py ├── log_experiments │ ├── debug_context.py │ ├── debug_html_logger.py │ ├── foldable_loggers.py │ ├── test_debug_logger_html.py │ ├── test_me.py │ └── tui_logger.py ├── plugin.py ├── resources │ ├── scripts.js │ └── styles.css ├── stuff │ ├── __main__.py │ ├── devnotes.md │ ├── nonprintable_​​characters.md │ ├── nonprintable_​​characters.txt │ └── tui_regexes_npc.txt ├── tree_control.py ├── tui_gen.py └── utils.py ├── reqts ├── requirements-dev.in ├── requirements-dev.txt ├── requirements.in └── requirements.txt ├── setup.cfg ├── setup.py ├── test_error.py └── testing ├── bash ├── test.sh └── tui_expect.tcl ├── pytester ├── examples │ ├── example_regex.txt │ ├── test_0.py │ ├── test_empty.py │ └── test_pass.py ├── ideas.md ├── test_plugin_options.py └── test_tui_with_pytester.py ├── python ├── conftest.ini └── test_pytest_tui.py ├── robot ├── Resources │ ├── common.resource │ └── vars.py └── Tests │ ├── 001_test_basic.robot │ ├── 002_test_tui.robot │ └── 003_test_tuih.robot └── sb ├── conftest.py └── test_html_report.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Pytest-tui 2 | *.bin 3 | console_output.fold 4 | console_output.pickle 5 | textual.log 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | venv*/ 19 | .venv*/ 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *,cover 54 | .hypothesis/ 55 | 56 | # Robot Framework 57 | log.html 58 | report.html 59 | output.xml 60 | 61 | # Pytest and plugins 62 | .pytest_cache 63 | report.html 64 | tui_files/ 65 | 66 | # Translations 67 | *.mo 68 | *.pot 69 | 70 | # Django stuff: 71 | *.log 72 | local_settings.py 73 | 74 | # Flask instance folder 75 | instance/ 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # MkDocs documentation 81 | /site/ 82 | 83 | # PyBuilder 84 | target/ 85 | 86 | # IPython Notebook 87 | .ipynb_checkpoints 88 | 89 | # pyenv 90 | .python-version 91 | 92 | # Sublime projects/workspaces 93 | *.sublime-* 94 | 95 | # IDEs 96 | .vscode/ 97 | .idea/ 98 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile=black 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 22.10.0 4 | hooks: 5 | - id: black 6 | args: [--preview] 7 | additional_dependencies: ['click==8.0.4'] 8 | language_version: python3 9 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project tries to adhere to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## UNRELEASED 9 | 10 | ## [2.1.0] 2023-12-07 11 | - Fixed minor issue with start/stop regex folding algorithm, where the start and stop markers were being printed in the output. 12 | - Added foldable output to individual test cases. 13 | - Fixed issue 110, where tui_files/ folder was getting created all over the place. 14 | - Change `requirements/` directory to `reqts/` to avoid confusion with `requirements.txt` file. 15 | 16 | ## [2.0.0] 2023-06-12 17 | - Removed `--tui-fold-level` option, now replaced by modified ``--tui-regexfile` implementation (previous bullet point); allows for regex(es) to be specified in a file, rather than on the command line. 18 | - Fixed Issue 100 (obviated by previous change in fold behavior). 19 | - Changed output files folder to ./tui_files. 20 | - Added new buttons to About tab to remove/restore/invert colors. 21 | - Introduced tests to test the plugin itself, using pytest's 'pytester' fixture. 22 | - New buttons in HTML report to invert/remove/restore colors. 23 | - Added Faker library to standard dependencies. 24 | 25 | ## [1.10.0] 2023-04-12 26 | - Added new folding mode `--tui-fold-regex`. This mode allows specifying reg-ex's marking the beginning and end of a section that the user wants to fold. See README for details. 27 | - Bumped `rich` library version to 12.6.0 to allow for use of SeleniumBase library during testing. 28 | - Fixed issue 100, where if the user does not specify the `--tui-fold-level` option, the HTML report will not render individual test cases correctly (they won't open/close when clicked). `--tui-fold-level ` is still supported but now if user does not specify it, the level defaults to WARNING, and displays the new Folded Output section anyway. 29 | - Added comprehensive HTML page testing using SelniumBase. 30 | - Fixed minor issue with bash test script not installing faker lib. 31 | - Fixed exception issue when specifying non-default output filename for HTML report. 32 | 33 | ## [1.9.1] 2023-03-30 34 | - Re-implemented the folding feature for HTML report. This version doesn't rely on the user having to do anything with their tests other than smartly partition their log statements into the proper debug levels for their application (i.e. no clunky logfile shananigans). The folding feature automatically folds all consecutive log statements that are less than a configurable level. This is controlled with new command line option '--tui-fold-level'. Also, there is a new 'Actions' button in the HTML which folds/unfolds all fold sections. 35 | - Fix sticky issue with topbar. 36 | - Pin requirements with pip-tools. 37 | 38 | ## [1.9.0] 2023-03-20 39 | - Added initial implementation for foldable HTML lines (see docstring on class TuiLogger in tui_logger.py for details). This feature is experimental. 40 | - Minor improvements to HTML report "About" page. 41 | 42 | ## [1.8.0] 2023-01-30 43 | - New command-line option `tui-reportfile`, which allows user to specify the name of the HTML file produced when the console script `tuih` is run. 44 | 45 | ## [1.7.2] 2023-01-29 46 | - Fixed Issue 94 (KeyError when user env has no JAVA_HOME). 47 | 48 | ## [1.7.1] 2023-01-28 49 | - Fixed Issue 84 (dataclass exception with Python 3.11). 50 | 51 | ## [1.7.0] 2023-01-12 52 | 53 | - Implemented internal tracking of Rerun tests. 54 | - Added Rerun test sections to the HTML page. 55 | - Added duration to existing start and stop times for test session. 56 | - Changed look 'n' feel of About page in the HTML report (uses accordian buttons now). 57 | - Moved initalization of pytest-tui-specific Config object attributes from pytest-sessionstart to pytest_cmdline_main, as that method seems to be visited by the code earlier. This is to prevent AttributeError seen while testing latest build: "AttributeError: 'Config' object has no attribute '_tui_test_results'." 58 | - Internal implementation of pickling now uses a single file for TestReport and Section data. The pickled data is in the form of a dict, and also has some timedate info in it. 59 | - Tweaked and formatted a bunch of the tests in /demo-tests. 60 | ## [1.6.1] 2022-09-23 61 | 62 | - Added '*' .gitignore to tui_files/ so that when people run pytest --tui in other directories they don't see the tui_files/ dir. 63 | 64 | ## [1.6.0] - 2022-09-20 65 | 66 | - Addressed issue 63: 67 | - Fixed tuih and tui console scripts not running 68 | - Removed autolaunch feature (TUI and HTML pages must now be launched by the user using 'tui' or 'tuih') 69 | - Removed config script 'tuiconf' 70 | - Pinned dependencies. 71 | - Added pytest-metadata to dependencies. 72 | - Removed bullet from dependencies. 73 | 74 | ## [1.5.0] - 2022-09-16 75 | 76 | - Added "All Tests" results tab. 77 | - Removed "nth child" css code which was masking result button highlighting on hover. 78 | - Added start_time and outcome to each test result button in HTML. 79 | 80 | ## [1.4.3] - 2022-09-14 81 | 82 | - Fixed yet another crash issue when `pytest` was invoked by itself: `INTERNALERROR> AttributeError: 'Config' object has no attribute '_tui_test_results'` 83 | - Removed some unnecessary import in setup.py 84 | 85 | ## [1.4.2] - 2022-09-14 86 | 87 | - Fixed issue 66: refactored globals in plugin.py to reside within pytest Config object, rendering impossible the previous weirdness when files were attempted to be close that either didn't exist, or we re already closed. 88 | 89 | ## [1.4.1] - 2022-09-07 90 | 91 | - Fixed issue where if pytest was invoked only with `--version` flag, a `ResourceWarning: unclosed file` message was generated. 92 | 93 | ## [1.4.0] - 2022-09-04 94 | 95 | - Open up HTML real estate with dropdown containing console output sections (which are presumably lesser-used). 96 | - Make autolaunch False by default for both TUI and HTML. 97 | 98 | ## [1.3.3] - 2022-09-04 99 | 100 | - Fixed error msg re: open file at end of run when pytest is run w/o --tui option. 101 | - Fixed persistent non-wrapping
 text in HTML output.
102 | 
103 | ## [1.3.2] - 2022-08-31
104 | 
105 | - Fixed issue where 'passes_section' was being rendered even if no Passed testcases.
106 | 
107 | ## [1.3.1] - 2022-08-27
108 | 
109 | - Tweaked colors.
110 | - Cleaned up CSS a bit.
111 | 
112 | ## [1.3.0] - 2022-08-27
113 | 
114 | - Changed to output HTML as one file, with all included CSS and JS content. This makes it portable when sharing results files.
115 | - Removed unuse "Reruns" section. Reruns are still supported, just not broken out individually. This is more in line with how pytest treats the Reruns section anyway. Reruns are categorized P/F/S/XP/XF just as normal tests are.
116 | - Remove duplicate 'lastline' in About section.
117 | - Implemented dynamic inclusion/removal of section and results tabs, depending on if they have content or not.
118 | - Added 'sticky' CSS styling to top-bar buttons. The top bar now shows up even when scrolling down long pages.
119 | - Fixed a few persistent ANSI issues: no blue markup (was misssing CSS for \x1b94m, 'bright blue'); and non-marked-up section content.
120 | 
121 | ## [1.2.1] - 2022-08-22
122 | 
123 | - Revamp console-line categorization algorithm to accomodate variations in user environment more easily
124 | - Add support for `live log` sections
125 | - Fix multiple bugs (although several remain)
126 | 
127 | ## [1.2.0] - 2022-08-19
128 | 
129 | - Working/revamped HTML file output, with more modern look/feel.
130 | - Fixed tui1 (Textual) so that it works with new internal implementation.
131 | - Retiring tui2 (PyTermTk) for now.
132 | - Reordered/removed some menu items in tuiconf to fit new content/choices.
133 | - Changed output files folder to ./tui_files.
134 | 
135 | ## [1.1.3] - 2022-08-13
136 | 
137 | - Fixed bug where if config file existed but was empty, an exception would occur on launching HTML file.
138 | 
139 | ## [1.1.2] - 2022-08-10
140 | 
141 | - Added chronological results section.
142 | - Cleaned up HTML.
143 | 
144 | ## [1.1.1] - 2022-08-08
145 | 
146 | - Added TUI autolaunch config variable (default False).
147 | - Cleaned up HTML, and added Metadata show/hide button.
148 | 
149 | ## [1.1.0] - 2022-08-07
150 | 
151 | - Refactored HTML code to fix several small but annoying issues.
152 | - Added new configuration console script `tuiconf` that allows user to change/store settings for:
153 |   - TUI choice (`tui1` (Textual) or `tui2` (PyTermTk))
154 |   - HTML "light" or "dark" page coloring scheme
155 |   - HTML autolaunch (y or n)
156 |   - Custom coloring capability for HTML scheme
157 | - Removed old `tui1` and `tui2` console scripts, replacing with a single `tui` version that launches the configured TUI as set using `tuiconf`
158 | - Added new console script `tuih`, which creates and optionally launches the HTML output from the last testrun session.
159 | - Changed name of output file folder to /pytest_tui_files. This is where the .bin and .html files now reside.
160 | - Fixed issue where if no tests were run and either TUI was launched, they would crash.
161 | - Replaced previous dummy environment button data with actual environment data from pytests's output.
162 | 
163 | ## [1.0.1] - 2022-08-02
164 | 
165 | - Implemented threading in final testing stage so that HTML and TUI can be launched at the same time.
166 | - Fixed --co bug where TUIs were called although no tests had been run.
167 | - Added this changelog.
168 | 
169 | ## [1.0.0] - 2022-08-01
170 | 
171 | - Refactored individual results classification to use pytest's 'short test summary info' section, instead of TestReport outcome. This ensures that pytest-tui results are always the same as those of pytest itself.
172 | - Implemented basic HTML report.
173 | - Updated folder structure to place .bin and .html output files in /release.
174 | 


--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
 1 | 
 2 | The MIT License (MIT)
 3 | 
 4 | Copyright (c) 2022 Jeff Wright
 5 | 
 6 | Permission is hereby granted, free of charge, to any person obtaining a copy
 7 | of this software and associated documentation files (the "Software"), to deal
 8 | in the Software without restriction, including without limitation the rights
 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 | 
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 | 
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
23 | 


--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
  1 | [![Build Status](https://app.travis-ci.com/jsh/trendlist.svg?branch=master)](https://app.travis-ci.com/jsh/trendlist)
  2 | [![Documentation Status](https://readthedocs.org/projects/trendlist/badge/?version=latest)](https://trendlist.readthedocs.io/en/latest/?badge=latest)
  3 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/jsh/trendlist-notebooks/master)
  4 | [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-908a85?logo=gitpod)](https://gitpod.io/#https://github.com/jsh/trendlist)
  5 | 
  6 | # pytest-tui
  7 | ## A pytest plugin for viewing test run results, with console scripts to launch a Text User Interface (TUI) or an HTML page
  8 | 
  9 | ### TUI:
 10 | ![2022-05-01 19 25 19](https://user-images.githubusercontent.com/4308435/166174159-b442a5b5-416d-42a0-badd-7401e9980e47.gif)
 11 | 
 12 | ### HTML:
 13 | ![2022-08-27 08 07 11](https://user-images.githubusercontent.com/4308435/187034046-312b1ee8-0f7b-49a1-994f-9c38a9d3941c.gif)
 14 | 
 15 | ### Log Folding:
 16 | ![2023-04-11 23 56 57](https://user-images.githubusercontent.com/4308435/231364763-132e8c35-cb61-4172-9686-176d84c038ca.gif)
 17 | 
 18 | ## Introduction
 19 | When you run Pytest campaigns that produce a lot of terminal output (e.g. with many tests, very detailed output, or with multiple failures), the standard Pytest output can make it difficult to examine the results. You end up scrolling way back in the terminal, looking for that one test you want to examine more closely. Pytest-tui provides a Text User Interface (TUI) and an HTML page that aim to make it easier to find the information you're looking for.
 20 | 
 21 | Test results are categorized in the same way Pytest does it:
 22 | 
 23 | - By outcome: `[Pass|Fail|Error|Skipped|Xpass|Xfail]`
 24 | - By output section: `[Summary|Full|Errors|Passes|Failures|Warnings]`
 25 | 
 26 | The intent it to make it easier for you to find the specific results you want so you can examine it without all the other results getting in your way.
 27 | 
 28 | How does it work in practice? Easy. You just run your Pytest campaigns like you normally would, adding the command line option `--tui` (`pytest --tui`). Your test session will proceed as it always does (always in verbose mode), showing you the familiar terminal output while running. Then, at the end of the session, a TUI or an HTML page can be launched via the included console scripts (`tui` and/or `tuih`). The results are displayed on-screen or in-browser for you to examine. When you're done, just exit the TUI to go back to the terminal, or close the HTML page. Don't worry about losing your test session data. Results are stored to local disk and you can always relaunch the TUI or HTML page using those same console scripts.
 29 | 
 30 | Output sections and individual test results are expandable/collapsible, and test summary statistics are displayed for convenience. Both the TUI and the HTML page retain the original pytest ANSI-encoded color output, lending a familiar look and feel.
 31 | 
 32 | ## Features
 33 | - **New** in 1.10.0 Regex-based folding on the HTML page, configurable by user-provided regex! See "Python Regex Folding" section below.
 34 | - **New** in 1.9.1 Log message folding on the HTML page, configurable by log level. See "Python Log Message Folding" section below.
 35 | - Launch either or both of the [Textual](https://github.com/Textualize/textual) TUI or the HTML page using built in console scripts
 36 | - ANSI text markup support - whatever the output on your console looks like is how things are going to show up in the TUI
 37 | - Mouse and keyboard support (including scrolling)
 38 | - Support for all output formats/modes:
 39 |   - `-v`, `-vv`, `-no-header`, `--showlocals`, `--color=`
 40 |   - all variants of `--tb` except "native"
 41 |   - "live-log" (aka log_cli)
 42 | - Support for other, simple output-manipulating plugins:
 43 |   - `pytest-clarity`
 44 |   - `pytest-emoji`
 45 |   - `pytest-icdiff`
 46 |   - `pytest-rerunfailures`
 47 |   - etc.
 48 | - Not supported: plugins that take over the console in other ways, like
 49 |   - `pytest-sugar`
 50 |   - `pytest-emoji-output`
 51 |   - `pytest-timestamp`
 52 | - Untested:
 53 |   - `pytest-xdist`
 54 |   - `loguru`
 55 | 
 56 | ## Requirements
 57 | - Pytest >= 6.2.5
 58 | - Python >= 3.8 (but see "Known Limitations/Issues" below if you want to run 3.10+)
 59 | 
 60 | ## Installation
 61 | 
 62 | For most users, simply issue the command `pip install pytest-tui` and you are good to go.
 63 | 
 64 | For those users wishing to install via a requirements.txt file, they are located in the /reqts/ directory.
 65 | 
 66 | ## Usage
 67 | 
 68 | ### Running Your Tests
 69 | 
 70 | Pretty much just run pytest like you always do, adding the `--tui` option to the list of command line options:
 71 | 
 72 | `pytest --tui `
 73 | 
 74 | In some environments, where the working directory for pytest has been changed from the default, it may be necessary to cd into the working directory in order to successfully launch the TUI or HTML. Basically, you need to be in the parent directory of wherever the `/tui_files` folder has been placed by the plugin after a test run. This is a known issue and will be fixed at some point.
 75 | 
 76 | ### Sample / Demo Tests
 77 | 
 78 | If you would like some dummy tests that will allow you to take pytest-tui for a testdrive, copy all the files at https://github.com/jeffwright13/pytest-tui/tree/main/demo-tests into a folder called `demo-tests/` where your test environment resides. You will need the additional libraries listed in /reqts/requirements-dev.txt, so install them (`pip install -r requirements-dev.txt`). Then:
 79 | 
 80 | `pytest demo-tests/`
 81 | 
 82 | ### Looking at Results After Quitting TUI
 83 | 
 84 | If you have already exited the TUI and would like to re-enter it with the same data generated from the last Pytest run, simply type `tui`. To re-launch the HTML page using your default browser, issue the command `tuih`.
 85 | 
 86 | ### TUI Copy/Paste
 87 | 
 88 | On Linux terminals, you can typically press and hold the SHIFT key on your keyboard to temporarily bypass the TUI and access the terminal's native mouse copy/paste functionality (commonly, click-drag-release or double-click to select text, middle-click to paste). This copy/paste works with the terminal's selection buffer, as opposed to the TUI's buffer.
 89 | 
 90 | On Windows, use the ALT key while click-dragging the mouse. Mac users can get the same effect with the Option key.
 91 | 
 92 | ### Generating and viewing the HTML File
 93 | 
 94 | The HTML output file is located at `/tui_files/html_report.html`. The HTML file is automatically generated when a test run is completed with the "--tui" option. It can also be generated manually with the `tuih` script by invoking it on the command line.
 95 | 
 96 | ### Visibility
 97 | 
 98 | Sometimes it can be difficult to read the terminal output when rendered on the HTML report. Pytest embeds ANSI color codes in its output, which are interpreted by a terminal program to display various colors for text. Pytest-tui takes these ANSI color codes and translates them to HTML (using the [ansi2html](https://pypi.org/project/ansi2html/) librray). Because the dhe default color scheme for the HTML report is a light background with dark text, it can be difficult to see some of the colors. To address this, there are three buttons that can help. The first ("Toggle Background") allow you to toggle the bakcground color of all console output. This should result in a page that closely resembles the output you would get in a standard terminal environment (assuming you have white text on a black background). The other two buttons, Invert Colors and Remove/Restore Colors, are a bit more drastic in that they affect all text in the report. Experiment and see what works for you. Also note that if you have your browser set to dark mode, or have a theme that changes the default color scheme, this can also affect the visibility of the text.
 99 | ### "Folding" output in the HTML report
100 | 
101 | New in 1.11.0 is the integrated "folding" feature, which will automatically roll up any output lines from your test's output which match a regex (or regexes) specified in the file given on the command line. This option allows you to match on specific lines of console output from pytest, and 'fold' them (hide them).
102 | 
103 | The folding feature is activated by passing the `--tui-regexfile` option (see `pytest --help`), and setting the path of a file containing the desired regex or regexes.
104 | 
105 | The file itself must contain plain text (UTF-8 encoded) with either a single regex, specified on a single line of the file; or two 'marker' patterns, specified in two consecutive lines of the file. If there is a single line in the file, that line is assumed to contain a regular expressoin that will cause the folding action to be used on any line in the console output of pytest if that line matches the regex. Consecutive lines that match will be folded into the same section. If there are two lines in the regex file, the first line is assumed to be a start marker, and the second line is assumed to be a stop marker. The folding action will be applied to all lines between the start and stop markers
106 | 
107 | Ideas and tips for folding:
108 | - Run all tests with DEBUG level logging, but only view those DEBUG messages when necessary. I find this option particularly helpful when trying to debug a test that is only failing intermittently.
109 | - Mark certain sections of a test's output with a pair of start/end markers. If you have test output that is very chatty, but you only want to see it when you need to, this is a good option. For example, if you have a test that is making a bunch of API calls, and you want to see the output of those calls, but only when the test fails, you can mark the start and stop of the API calls with a pair of markers, and then fold them away when you don't need to see them.
110 | - Use the non-printable characters 'ZWS' and 'ZWJ' ((Zero Width Space)[https://en.wikipedia.org/wiki/Zero-width_space] / (Zero Width Joiner)[https://en.wikipedia.org/wiki/Zero-width_joiner]) as start and stop markers. The visual impact on the output is minimal (only inserts one visible space), and the regex pattern is very unlikely to match anything else in the output. The repo contains a file called `nonprintable_​​characters.txt` that contains cobinations of these characters, which can be used as a starting point for your own regexes.
111 | 
112 | ## Known Limitations / Issues
113 | 
114 | - Python support for 3.10+ is not guaranteed. Changes were made to the `importlib.metadata` library that are not backwards-compatible, and may result in exceptions when attempting to run. I have not had the chance to chase this down definitively, so until such a time that I fully understand the issue, I recommend using Python 3.8 or 3.9. Of course, YMMV...give it a try, and let me know how things go. :-)
115 | - User interfaces need work:
116 |   - Overall layouts need optimization (I am definitely not a UX guy)
117 |   - Textual interface may be sluggish, esp. if run within an IDE
118 |   - All code here is like a sausage factory: pleasant enough, until you look inside - do so at your own peril!
119 | - Not fully tested with all combinations of output formats. Probably some use-cases where things won't work 100% right.
120 | - `pytest-tui` is currently incompatible with pytest command line option `--tb=native`, and will cause an INTERNALERROR if the two are used together.
121 | - HTML page cannot offer clickable links to local filesystem. This is one of the workflows I depend on when using iTerm2...traceback lines with a `file://` URL to a locally-hosted resource are clickable, and open up my IDE to that line in that file. Unfortunately, web browsers are much more security-minded than terminal apps, and actions like this are strictly disallowed.
122 | 
123 | ## History
124 | 
125 | This project was originally envisioned to only show test failures, and allow the user to 'fold' the details of the failed tests by clicking a line so that the details would alternately show/hide. In fact, the original repo was called `pytest-fold`. As development progressed, it became clear that what was really needed was a real TUI, one that organized the output in such a way that all of pytest's output was available in a more streamlined way.
126 | 
127 | Several TUIs (using different TUI libraries) have been cycled through this project. The Textual interface is the only one currently supported, since some internal optimization has been done to make the results simpler to consume. However, other TUIs should be able to be integrated without too much work (e.g. Asciimatics, PyTermTk, pyermgui, etc.). Same would be true of a GUI. Contact the author if you have a desire to implement one of these. The results of any given testrun are collected and sorted in such a way that it should relatively simple to take them and put them into the presentation mode of choice.
128 | 
129 | The HTML feature was put into place because of some minor limitations the author found in the available HTML plugins (miscounted totals in some corner cases, no color-coded output, inability to show output from the pytest `live logs` option). There is no intent to replace existing HTML plugins, but if you like this one, please do spread the word. :-)
130 | 
131 | ## Reporting Issues
132 | 
133 | If you encounter any problems, have feedback or requests, or anything else, please [file an issue](https://github.com/jeffwright13/pytest-tui/issues/new), along with a detailed description.
134 | 
135 | ## Contributing
136 | 
137 | Contributions are welcome. Please run pyflakes, isort and black on any code before submitting a PR.
138 | 
139 | I have tried to make the TUIs and the HTML page as clean as possible, but I am not a UI expert and I am sure many improvements could be made. If you are slick with user interfaces, I would love some help!
140 | 
141 | ## License
142 | 
143 | Distributed under the terms of the MIT license, "pytest-tui" is free and open source software.
144 | 


--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | pytest_plugins = ["pytester"]
2 | 


--------------------------------------------------------------------------------
/demo-tests/conftest.py:
--------------------------------------------------------------------------------
  1 | import pytest
  2 | import random
  3 | 
  4 | 
  5 | import logging
  6 | import logging.config
  7 | import time
  8 | from datetime import datetime, timezone
  9 | 
 10 | 
 11 | class UTCFormatter(logging.Formatter):
 12 |     def formatTime(self, record, datefmt=None):
 13 |         dt = datetime.fromtimestamp(record.created, tz=timezone.utc)
 14 |         t = dt.strftime("%Y-%m-%d %H:%M:%S")
 15 |         return f"{t} UTC"
 16 | 
 17 | 
 18 | @pytest.fixture(scope="module")
 19 | def logger():
 20 |     logging_config = {
 21 |         "version": 1,
 22 |         "disable_existing_loggers": False,
 23 |         "incremental": False,
 24 |         "formatters": {
 25 |             "default": {
 26 |                 "format": "%(levelname)-8s %(asctime)s %(name)-10s %(message)s",
 27 |                 "()": UTCFormatter,  # Use custom UTCFormatter
 28 |             },
 29 |         },
 30 |         "handlers": {
 31 |             "console": {
 32 |                 "class": "logging.StreamHandler",
 33 |                 "level": "INFO",
 34 |                 "formatter": "default",
 35 |                 "stream": "ext://sys.stderr",
 36 |             },
 37 |         },
 38 |         "root": {
 39 |             "level": "INFO",
 40 |             "handlers": ["console"],
 41 |         },
 42 |         "loggers": {
 43 |             "my_logger": {"level": "DEBUG"},  # Set the logger level to DEBUG
 44 |         },
 45 |     }
 46 | 
 47 |     logging.config.dictConfig(logging_config)
 48 |     logging.Formatter.converter = time.gmtime
 49 |     logger = logging.getLogger("my_logger")
 50 |     logger.setLevel(logging.DEBUG)
 51 |     return logger
 52 | 
 53 | 
 54 | data = [
 55 |     "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
 56 |     "Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
 57 |     (
 58 |         "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut"
 59 |         " aliquip ex ea commodo consequat."
 60 |     ),
 61 |     (
 62 |         "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore"
 63 |         " eu fugiat nulla pariatur."
 64 |     ),
 65 |     (
 66 |         "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia"
 67 |         " deserunt mollit anim id est laborum."
 68 |     ),
 69 |     "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
 70 |     (
 71 |         "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium"
 72 |         " doloremque laudantium."
 73 |     ),
 74 |     (
 75 |         "Totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi"
 76 |         " architecto beatae vitae dicta sunt explicabo."
 77 |     ),
 78 |     "Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit.",
 79 |     "Sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt.",
 80 |     (
 81 |         "Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur,"
 82 |         " adipisci velit."
 83 |     ),
 84 |     "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
 85 |     "Nunc eget lorem ac lectus eleifend blandit.",
 86 |     "Aenean feugiat urna nec nulla ultrices consequat.",
 87 |     "Proin placerat odio a justo bibendum, ut dapibus nulla commodo.",
 88 |     "Vivamus et ultrices nunc, vitae tempus neque.",
 89 |     "Donec eget purus nec quam pretium mollis quis ut velit.",
 90 |     "Nam eu lacus euismod, sodales ipsum non, lacinia purus.",
 91 |     "Mauris varius sapien sed turpis congue, ac ullamcorper tortor tincidunt.",
 92 |     "Nam sit amet nisl vel purus dignissim blandit.",
 93 |     "Vivamus volutpat tristique ante quis vestibulum.",
 94 |     "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
 95 |     (
 96 |         "Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac"
 97 |         " turpis egestas."
 98 |     ),
 99 |     "Duis ut commodo risus, non fringilla justo.",
100 |     "Praesent commodo commodo est, at maximus metus bibendum vitae.",
101 |     "Donec eu justo ut massa posuere semper sit amet quis arcu.",
102 |     "Proin eu est vel risus varius commodo id ut enim.",
103 |     (
104 |         "Morbi ornare, nisi vel consectetur bibendum, nibh elit mollis quam, ac"
105 |         " vestibulum velit est at turpis."
106 |     ),
107 |     (
108 |         "Donec finibus, sapien eget facilisis ultricies, velit risus faucibus lorem,"
109 |         " euismod efficitur quam mauris ut turpis."
110 |     ),
111 |     "Ut commodo augue ut eros malesuada, vitae elementum risus suscipit.",
112 |     "Nam hendrerit sapien vitae lorem sagittis, quis hendrerit ex scelerisque.",
113 |     "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
114 |     "Nullam tincidunt sem eu turpis efficitur mollis.",
115 |     "Duis nec turpis interdum, dapibus justo non, fringilla lorem.",
116 |     "Praesent feugiat vitae ante eu pharetra.",
117 |     "Mauris malesuada metus ac augue dictum fringilla.",
118 |     "Praesent in metus feugiat, gravida mi ac, sagittis nisl.",
119 |     "Cras vulputate semper sapien, ac faucibus enim volutpat a.",
120 |     (
121 |         "Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere"
122 |         " cubilia Curae; Nulla facilisi."
123 |     ),
124 |     (
125 |         "Suspendisse vestibulum, purus eu sollicitus.pytestLorem ipsum dolor sit amet,"
126 |         " consectetur adipiscing elit."
127 |     ),
128 |     (
129 |         "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium"
130 |         " doloremque laudantium."
131 |     ),
132 |     (
133 |         "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis"
134 |         " praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias"
135 |         " excepturi sint occaecati cupiditate non provident, similique sunt in culpa"
136 |         " qui officia deserunt mollitia animi, id est laborum et dolorum fuga."
137 |     ),
138 |     "Et harum quidem rerum facilis est et expedita distinctio.",
139 |     (
140 |         "Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit"
141 |         " quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda"
142 |         " est, omnis dolor repellendus."
143 |     ),
144 |     (
145 |         "Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus"
146 |         " saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae."
147 |     ),
148 |     (
149 |         "Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis"
150 |         " voluptatibus maiores alias consequatur aut perferendis doloribus asperiores"
151 |         " repellat."
152 |     ),
153 |     (
154 |         "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore"
155 |         " eu fugiat nulla pariatur."
156 |     ),
157 |     (
158 |         "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia"
159 |         " deserunt mollit anim id est laborum."
160 |     ),
161 |     (
162 |         "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium"
163 |         " doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore"
164 |         " veritatis et quasi architecto beatae vitae dicta sunt explicabo."
165 |     ),
166 |     (
167 |         "Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit,"
168 |         " sed quia consequuntur magni dolores eos qui ratione voluptatem sequi"
169 |         " nesciunt."
170 |     ),
171 |     (
172 |         "Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur,"
173 |         " adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et"
174 |         " dolore magnam aliquam quaerat voluptatem."
175 |     ),
176 |     (
177 |         "Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit"
178 |         " laboriosam, nisi ut aliquid ex ea commodi consequatur."
179 |     ),
180 |     (
181 |         "Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam"
182 |         " nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas"
183 |         " nulla pariatur?"
184 |     ),
185 |     (
186 |         "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis"
187 |         " prameris."
188 |     ),
189 | ]
190 | 
191 | 
192 | @pytest.fixture()
193 | def fake_data() -> str:
194 |     num = random.randint(2, 6)
195 |     fake = [random.choice(data) for _ in range(num)]
196 |     return " ".join(fake)
197 | 


--------------------------------------------------------------------------------
/demo-tests/html_post_process.py:
--------------------------------------------------------------------------------
 1 | import re
 2 | import sys
 3 | 
 4 | 
 5 | def post_process_html_report(html_report_path):
 6 |     with open(html_report_path, "r", encoding="utf-8") as file:
 7 |         content = file.read()
 8 | 
 9 |     # This regex pattern needs to match the log output format precisely
10 |     pattern = re.compile(
11 |         r"\[DETAILS\](.*?)\[SUMMARY\](.*?)\[/SUMMARY\](.*?)\[/DETAILS\]", re.DOTALL
12 |     )
13 | 
14 |     # Replace the pattern with the HTML details/summary tags
15 |     def replace_with_tags(match):
16 |         return f"
{match.group(2)}{match.group(3)}
" 17 | 18 | content = pattern.sub(replace_with_tags, content) 19 | 20 | with open(html_report_path, "w", encoding="utf-8") as file: 21 | file.write(content) 22 | 23 | 24 | if __name__ == "__main__": 25 | # Check for command line arguments for the report path 26 | if len(sys.argv) < 2: 27 | print("Usage: python script.py ") 28 | sys.exit(1) 29 | 30 | report_path = sys.argv[1] 31 | post_process_html_report(report_path) 32 | -------------------------------------------------------------------------------- /demo-tests/test_0.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import warnings 4 | import sys 5 | import pytest 6 | 7 | 8 | # 3 consecutive ZWS 9 | ZWS_X3 = r"""​​​""" 10 | # 1 BOM followed by 1 ZWS 11 | BOM_ZWS = r"""​""" 12 | # 3 consecutive ZWJ 13 | ZWJ_X3 = r"""‍‍‍""" 14 | # 1 BOM followed by 1 ZWJ 15 | BOM_ZWJ = r"""‍""" 16 | 17 | 18 | def test0_10_fail_capturing(capsys, fake_data, logger): 19 | logger.info(ZWS_X3) 20 | print("FAIL this stdout is captured") 21 | print("FAIL this stderr is captured", file=sys.stderr) 22 | logger.warning("FAIL this log is captured") 23 | with capsys.disabled(): 24 | print("FAIL stdout not captured, going directly to sys.stdout") 25 | print("FAIL stderr not captured, going directly to sys.stderr", file=sys.stderr) 26 | logger.warning("FAIL is this log captured?") 27 | print("FAIL this stdout is also captured") 28 | print("FAIL this stderr is also captured", file=sys.stderr) 29 | logger.warning("FAIL this log is also captured") 30 | logger.critical(fake_data) 31 | logger.error(fake_data) 32 | logger.warning(fake_data) 33 | logger.info(fake_data) 34 | logger.debug(fake_data) 35 | logger.info(ZWJ_X3) 36 | assert False 37 | 38 | 39 | def test0_pass_1(logger): 40 | print("Test Pass 1!") 41 | logger.info(ZWS_X3) 42 | logger.critical("CRITICAL") 43 | logger.error("ERROR") 44 | logger.warning("WARNING") 45 | logger.info("INFO") 46 | logger.debug("DEBUG") 47 | logger.info(ZWJ_X3) 48 | assert True 49 | 50 | 51 | def test0_pass_2_logs(logger): 52 | print("Test Pass 2!") 53 | logger.info(ZWS_X3) 54 | logger.critical("CRITICAL") 55 | logger.error("ERROR") 56 | logger.warning("WARNING") 57 | logger.info("INFO") 58 | logger.debug("DEBUG") 59 | logger.info(ZWJ_X3) 60 | assert True 61 | 62 | 63 | @pytest.fixture 64 | def error_fixt(logger): 65 | raise Exception("Error in fixture") 66 | 67 | 68 | def test0_pass_3_error_in_fixture(error_fixt): 69 | print("Test Pass 3!") 70 | assert True 71 | 72 | 73 | def test0_fail_1(logger): 74 | print("Test Fail 1!") 75 | assert 1 == 2 76 | 77 | 78 | pytest.mark.skip(reason="Skipping this test with decorator.") 79 | 80 | 81 | def test0_skip(logger): 82 | assert True 83 | 84 | 85 | @pytest.mark.xfail() 86 | def test0_xfail(logger): 87 | print("Test 0 XFail") 88 | logger.info(ZWS_X3) 89 | logger.critical("CRITICAL") 90 | logger.error("ERROR") 91 | logger.warning("WARNING") 92 | logger.info("INFO") 93 | logger.debug("DEBUG") 94 | logger.info(ZWJ_X3) 95 | assert False 96 | 97 | 98 | @pytest.mark.xfail() 99 | def test0_xpass(logger): 100 | # logger.name = __name__ 101 | print("Test 0 XPass") 102 | # logger.info(ZWS_X3) 103 | # logger.critical("CRITICAL") 104 | # logger.error("ERROR") 105 | logger.warning("WARNING") 106 | logger.info("INFO") 107 | logger.debug("DEBUG") 108 | logger.info(ZWJ_X3) 109 | assert True 110 | 111 | 112 | # Method and its test that causes warnings 113 | def api_v1(): 114 | warnings.warn(UserWarning("api v1, should use functions from v2")) 115 | return 1 116 | 117 | 118 | def test0_warning(): 119 | assert api_v1() == 1 120 | 121 | 122 | @pytest.mark.flaky(reruns=5) 123 | def test_flaky_3(): 124 | assert random.choice([True, False]) 125 | -------------------------------------------------------------------------------- /demo-tests/test_1.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import sys 4 | import warnings 5 | 6 | import faker 7 | import pytest 8 | 9 | LOG_LEVELS = [ 10 | logging.DEBUG, 11 | logging.INFO, 12 | logging.WARNING, 13 | logging.ERROR, 14 | logging.CRITICAL, 15 | ] 16 | 17 | 18 | def log_making_fixture(fake_data, logger): 19 | for _ in range(random.randint(1, 10)): 20 | logger.log(random.choice(LOG_LEVELS), fake_data()) 21 | logger.log(random.choice(LOG_LEVELS), fake_data()) 22 | logger.log(random.choice(LOG_LEVELS), fake_data()) 23 | logger.log(random.choice(LOG_LEVELS), fake_data()) 24 | pass 25 | 26 | 27 | def test_random_logs(fake_data, logger): 28 | log_making_fixture() 29 | 30 | 31 | @pytest.fixture 32 | def error_fixture(fake_data, logger): 33 | logger.critical(fake_data) 34 | logger.error(fake_data()) 35 | logger.warning(fake_data()) 36 | logger.info(fake_data()) 37 | logger.debug(fake_data()) 38 | assert 0 39 | 40 | 41 | def test_a_ok(fake_data, logger): 42 | print("This test doesn't have much to say, but it passes - ok!!") 43 | logger.critical(fake_data) 44 | logger.error(fake_data()) 45 | logger.warning(fake_data()) 46 | logger.info(fake_data()) 47 | logger.debug(fake_data()) 48 | pass 49 | 50 | 51 | def test_b_fail(fake_data, logger): 52 | logger.critical(fake_data) 53 | logger.error(fake_data()) 54 | logger.warning(fake_data()) 55 | logger.info(fake_data()) 56 | logger.debug(fake_data()) 57 | assert 0 58 | 59 | 60 | def test_c_error(error_fixture, logger): 61 | print("This test should be marked as an Error.") 62 | logger.critical(fake_data) 63 | logger.error(fake_data()) 64 | logger.warning(fake_data()) 65 | logger.info(fake_data()) 66 | logger.debug(fake_data()) 67 | pass 68 | 69 | 70 | def test_d1_skip_inline(fake_data, logger): 71 | logger.critical(fake_data) 72 | logger.error(fake_data()) 73 | logger.warning(fake_data()) 74 | logger.info(fake_data()) 75 | logger.debug(fake_data()) 76 | pytest.skip("Skipping this test with inline call to 'pytest.skip()'.") 77 | 78 | 79 | pytest.mark.skip(reason="Skipping this test with decorator.") 80 | 81 | 82 | def test_d2_skip(fake_data, logger): 83 | logger.critical(fake_data) 84 | logger.error(fake_data()) 85 | logger.warning(fake_data()) 86 | logger.info(fake_data()) 87 | logger.debug(fake_data()) 88 | 89 | 90 | def test_d3_skip_decorator(fake_data, logger): 91 | logger.critical(fake_data) 92 | logger.error(fake_data()) 93 | logger.warning(fake_data()) 94 | logger.info(fake_data()) 95 | logger.debug(fake_data()) 96 | pytest.skip("Skipping this test with inline call to 'pytest.skip()'.") 97 | 98 | 99 | def test_e1_xfail_by_inline_and_has_reason(fake_data, logger): 100 | logger.critical(fake_data) 101 | logger.error(fake_data()) 102 | logger.warning(fake_data()) 103 | logger.info(fake_data()) 104 | logger.debug(fake_data()) 105 | pytest.xfail("Marked as Xfail with inline call to 'pytest.xfail()'.") 106 | 107 | 108 | @pytest.mark.xfail(reason="Marked as Xfail with decorator.") 109 | def test_e2_xfail_by_decorator_and_has_reason(fake_data, logger): 110 | logger.critical(fake_data) 111 | logger.error(fake_data()) 112 | logger.warning(fake_data()) 113 | logger.info(fake_data()) 114 | logger.debug(fake_data()) 115 | pytest.xfail("Marked as Xfail with decorator.") 116 | 117 | 118 | def test_f1_xfails_by_inline_even_though_assertTrue_happens_before_pytestDotXfail( 119 | fake_data, logger 120 | ): 121 | logger.critical(fake_data) 122 | logger.error(fake_data()) 123 | logger.warning(fake_data()) 124 | logger.info(fake_data()) 125 | logger.debug(fake_data()) 126 | assert True 127 | pytest.xfail("Marked as Xfail with inline call to 'pytest.xfail()'.") 128 | 129 | 130 | @pytest.mark.xfail(reason="Marked as Xfail with decorator.") 131 | def test_f2_xpass_by_xfail_decorator_and_has_reason(fake_data, logger): 132 | print("This test is marked Xfail by use of decorator '@pytest.mark.xfail'.") 133 | print("However, because its outcome is a PASS, it is classified as XPass instead.") 134 | logger.critical(fake_data) 135 | logger.error(fake_data()) 136 | logger.warning(fake_data()) 137 | logger.info(fake_data()) 138 | logger.debug(fake_data()) 139 | pass 140 | 141 | 142 | @pytest.mark.parametrize("test_input, expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)]) 143 | def test_g_eval_parameterized(test_input, expected, logger): 144 | print(f"Testing {test_input} == {expected}") 145 | logger.critical(fake_data) 146 | logger.error(fake_data()) 147 | logger.warning(fake_data()) 148 | logger.info(fake_data()) 149 | logger.debug(fake_data()) 150 | assert eval(test_input) == expected 151 | 152 | 153 | @pytest.fixture 154 | def log_testname(fake_data, logger): 155 | logger.info(f"Running test {__name__}...") 156 | logger.info("Setting test up...") 157 | logger.info("Executing test...") 158 | logger.info(faker.Faker().text(random.randint(50, 200))) 159 | logger.info("Tearing test down...") 160 | 161 | 162 | def test_1_passes_and_has_logging_output(fake_data, logger): 163 | logger.critical(fake_data) 164 | logger.error(fake_data()) 165 | logger.warning(fake_data()) 166 | logger.info(fake_data()) 167 | logger.debug(fake_data()) 168 | assert True 169 | 170 | 171 | def test_2_fails_and_has_logging_output(fake_data, logger): 172 | logger.critical(fake_data) 173 | logger.error(fake_data()) 174 | logger.warning(fake_data()) 175 | logger.info(fake_data()) 176 | logger.debug(fake_data()) 177 | assert 0 == 1 178 | 179 | 180 | def test_3_fails(fake_data, logger): 181 | logger.critical(fake_data) 182 | logger.error(fake_data()) 183 | logger.warning(fake_data()) 184 | logger.info(fake_data()) 185 | logger.debug(fake_data()) 186 | assert 0 187 | 188 | 189 | def test_4_passes(log_testname, logger): 190 | logger.critical(fake_data) 191 | logger.error(fake_data()) 192 | logger.warning(fake_data()) 193 | logger.info(fake_data()) 194 | logger.debug(fake_data()) 195 | assert True 196 | 197 | 198 | @pytest.mark.skip 199 | def test_5_marked_SKIP(log_testname, logger): 200 | logger.critical(fake_data) 201 | logger.error(fake_data()) 202 | logger.warning(fake_data()) 203 | logger.info(fake_data()) 204 | logger.debug(fake_data()) 205 | assert 1 206 | 207 | 208 | @pytest.mark.xfail 209 | def test_6_marked_xfail_by_decorator_but_passes_and_has_no_reason(log_testname, logger): 210 | logger.critical(fake_data) 211 | logger.error(fake_data()) 212 | logger.warning(fake_data()) 213 | logger.info(fake_data()) 214 | logger.debug(fake_data()) 215 | assert 1 216 | 217 | 218 | @pytest.mark.xfail 219 | def test_7_marked_xfail_by_decorator_and_fails_and_has_no_reason(log_testname, logger): 220 | logger.critical(fake_data) 221 | logger.error(fake_data()) 222 | logger.warning(fake_data()) 223 | logger.info(fake_data()) 224 | logger.debug(fake_data()) 225 | assert 0 226 | 227 | 228 | # Method and its test that causes warnings 229 | def api_v1(log_testname, logger): 230 | warnings.warn(UserWarning("api v1, should use functions from v2")) 231 | logger.critical(fake_data) 232 | logger.error(fake_data()) 233 | logger.warning(fake_data()) 234 | logger.info(fake_data()) 235 | logger.debug(fake_data()) 236 | return 1 237 | 238 | 239 | def test_8_causes_a_warning(log_testname, logger): 240 | logger.critical(fake_data) 241 | logger.error(fake_data()) 242 | logger.warning(fake_data()) 243 | logger.info(fake_data()) 244 | logger.debug(fake_data()) 245 | assert api_v1() == 1 246 | 247 | 248 | # # These tests are helpful in showing how pytest deals with various types 249 | # # of output (stdout, stderr, log) 250 | def test_9_lorem_fails(capsys, logger): 251 | lorem = """"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. 252 | 253 | Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? 254 | 255 | At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.""" 256 | print(lorem) 257 | assert False 258 | 259 | 260 | def test_10_fail_capturing(fake_data, capsys, logger): 261 | print("FAIL this stdout is captured") 262 | print("FAIL this stderr is captured", file=sys.stderr) 263 | logger.warning("FAIL this log is captured") 264 | with capsys.disabled(logger): 265 | print("FAIL stdout not captured, going directly to sys.stdout") 266 | print("FAIL stderr not captured, going directly to sys.stderr", file=sys.stderr) 267 | logger.warning("FAIL is this log captured?") 268 | print("FAIL this stdout is also captured") 269 | print("FAIL this stderr is also captured", file=sys.stderr) 270 | logger.warning("FAIL this log is also captured") 271 | logger.critical(fake_data) 272 | logger.error(fake_data()) 273 | logger.warning(fake_data()) 274 | logger.info(fake_data()) 275 | logger.debug(fake_data()) 276 | assert False 277 | 278 | 279 | def test_10b_failed_capturing(fake_data, capsys, logger): 280 | print("FAILED this stdout is captured") 281 | print("FAILED this stderr is captured", file=sys.stderr) 282 | logger.warning("FAILED this log is captured") 283 | with capsys.disabled(logger): 284 | print("FAILED stdout not captured, going directly to sys.stdout") 285 | print( 286 | "FAILED stderr not captured, going directly to sys.stderr", file=sys.stderr 287 | ) 288 | logger.warning("FAIL is this log captured?") 289 | print("FAILED this stdout is also captured") 290 | print("FAILED this stderr is also captured", file=sys.stderr) 291 | logger.warning("FAILED this log is also captured") 292 | logger.critical(fake_data) 293 | logger.error(fake_data()) 294 | logger.warning(fake_data()) 295 | logger.info(fake_data()) 296 | logger.debug(fake_data()) 297 | assert False 298 | 299 | 300 | def test_11_pass_capturing(fake_data, capsys, logger): 301 | print("\nPASS this stdout is captured") 302 | print("PASS this stderr is captured", file=sys.stderr) 303 | logger.warning("PASS this log is captured") 304 | with capsys.disabled(log_testname, logger): 305 | print( 306 | "PASS stdout not captured (capsys disabled), going directly to sys.stdout" 307 | ) 308 | print( 309 | "PASS stderr not captured (capsys disabled), going directly to sys.stderr", 310 | file=sys.stderr, 311 | ) 312 | logger.warning("is this log captured?") 313 | print("PASS this stdout is also captured") 314 | print("PASS this stderr is also captured", file=sys.stderr) 315 | logger.warning("PASS this log is also captured") 316 | logger.critical(fake_data) 317 | logger.error(fake_data()) 318 | logger.warning(fake_data()) 319 | logger.info(fake_data()) 320 | logger.debug(fake_data()) 321 | assert True 322 | 323 | 324 | def test_12_fails_and_has_stdout(fake_data, logger): 325 | print("this test fails") 326 | logger.critical(fake_data) 327 | logger.error(fake_data()) 328 | logger.warning(fake_data()) 329 | logger.info(fake_data()) 330 | logger.debug(fake_data()) 331 | assert 0 == 1 332 | 333 | 334 | def test_13_passes_and_has_stdout(fake_data, logger): 335 | print( 336 | "This test passes. This message is a 'print' and is consumed by Pytest via" 337 | " stdout." 338 | ) # stdout is consumed by pytest 339 | logger.critical(fake_data) 340 | logger.error(fake_data()) 341 | logger.warning(fake_data()) 342 | logger.info(fake_data()) 343 | logger.debug(fake_data()) 344 | assert True 345 | 346 | 347 | # These 2 tests can intentionally cause an error - useful for testing; 348 | # if the fixture is commented out, the test throws an error at setup. 349 | # 350 | # @pytest.fixture() 351 | # def fixture_for_fun(log_testname, logger): 352 | # pass 353 | 354 | 355 | def test_14_causes_error_pass_stderr_stdout_stdlog(fake_data, fixture_for_fun, logger): 356 | print("PASS this stdout is captured") 357 | print("PASS this stderr is captured", file=sys.stderr) 358 | logger.warning("PASS this log is captured") 359 | logger.critical(fake_data) 360 | logger.error(fake_data()) 361 | logger.warning(fake_data()) 362 | logger.info(fake_data()) 363 | logger.debug(fake_data()) 364 | assert 1 365 | 366 | 367 | def test_15_causes_error_fail_stderr_stdout_stdlog(fake_data, fixture_for_fun, logger): 368 | print("FAIL this stdout is captured") 369 | print("FAIL this stderr is captured", file=sys.stderr) 370 | logger.warning("FAIL this log is captured") 371 | logger.critical(fake_data) 372 | logger.error(fake_data()) 373 | logger.warning(fake_data()) 374 | logger.info(fake_data()) 375 | logger.debug(fake_data()) 376 | assert 0 377 | 378 | 379 | def test_16_fail_compare_dicts_for_pytest_icdiff(logger): 380 | listofStrings = ["Hello", "hi", "there", "look", "at", "this"] 381 | listofInts = [7, 10, 45, 23, 18, 77] 382 | assert len(listofStrings) == len(listofInts) 383 | assert listofStrings == listofInts 384 | 385 | 386 | import random 387 | import time 388 | 389 | import pytest 390 | 391 | 392 | @pytest.mark.flaky(reruns=0) 393 | def test_flaky_0(logger): 394 | # time.sleep(random.uniform(0.1, 0.75)) 395 | assert random.choice([True, False]) 396 | 397 | 398 | @pytest.mark.flaky(reruns=1) 399 | def test_flaky_1(logger): 400 | # time.sleep(random.uniform(0.1, 0.75)) 401 | assert random.choice([True, False]) 402 | 403 | 404 | @pytest.mark.flaky(reruns=2) 405 | def test_flaky_2(logger): 406 | # time.sleep(random.uniform(0.1, 0.75)) 407 | assert random.choice([True, False]) 408 | 409 | 410 | @pytest.mark.flaky(reruns=3) 411 | def test_flaky_3(logger): 412 | # time.sleep(random.uniform(0.1, 0.75)) 413 | assert random.choice([True, False]) 414 | 415 | 416 | @pytest.mark.flaky(reruns=2) 417 | def test_flaky_always_fail(logger): 418 | # time.sleep(random.uniform(0.1, 0.75)) 419 | assert False 420 | 421 | 422 | @pytest.mark.flaky(reruns=2) 423 | def test_flaky_always_pass(logger): 424 | # time.sleep(random.uniform(0.1, 0.75)) 425 | assert True 426 | -------------------------------------------------------------------------------- /demo-tests/test_2.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") 4 | 5 | # These tests have the same name as in testfile test_1.py 6 | # Used for testing ability to handle duplicate test names 7 | # across different files 8 | 9 | 10 | @pytest.fixture 11 | def error_fixture(fake_data, logger): 12 | logger.critical(fake_data) 13 | logger.error(fake_data) 14 | logger.warning(fake_data) 15 | logger.info(fake_data) 16 | logger.debug(fake_data) 17 | assert 0 18 | 19 | 20 | def test_a_ok(fake_data, logger): 21 | print("This test doesn't have much to say, but it passes - ok!!") 22 | logger.critical(fake_data) 23 | logger.error(fake_data) 24 | logger.warning(fake_data) 25 | logger.info(fake_data) 26 | logger.debug(fake_data) 27 | 28 | 29 | def test_b_fail(fake_data, logger): 30 | logger.critical(fake_data) 31 | logger.error(fake_data()) 32 | logger.warning(fake_data()) 33 | logger.info(fake_data()) 34 | logger.debug(fake_data()) 35 | assert 0 36 | 37 | 38 | def test_c_error(fake_data, error_fixture, logger): 39 | print("This test should be marked as an Error.") 40 | logger.critical(fake_data) 41 | logger.error(fake_data()) 42 | logger.warning(fake_data()) 43 | logger.info(fake_data()) 44 | logger.debug(fake_data()) 45 | pass 46 | 47 | 48 | def test_d1_skip(fake_data, logger): 49 | logger.critical(fake_data) 50 | logger.error(fake_data()) 51 | logger.warning(fake_data()) 52 | logger.info(fake_data()) 53 | logger.debug(fake_data()) 54 | pytest.skip("Skipping this test with inline call to 'pytest.skip()'.") 55 | 56 | 57 | pytest.mark.skip(reason="Skipping this test with decorator.") 58 | 59 | 60 | def test_d2_skip(fake_data, logger): 61 | logger.critical(fake_data) 62 | logger.error(fake_data()) 63 | logger.warning(fake_data()) 64 | logger.info(fake_data()) 65 | logger.debug(fake_data()) 66 | 67 | 68 | def test_d3_skip(fake_data, logger): 69 | logger.critical(fake_data) 70 | logger.error(fake_data()) 71 | logger.warning(fake_data()) 72 | logger.info(fake_data()) 73 | logger.debug(fake_data()) 74 | pytest.skip("Skipping this test with inline call to 'pytest.skip()'.") 75 | 76 | 77 | def test_e1(fake_data, logger): 78 | logger.critical(fake_data) 79 | logger.error(fake_data()) 80 | logger.warning(fake_data()) 81 | logger.info(fake_data()) 82 | logger.debug(fake_data()) 83 | 84 | 85 | @pytest.mark.xfail(reason="Marked as Xfail with decorator.") 86 | def test_e2(fake_data, logger): 87 | logger.critical(fake_data) 88 | logger.error(fake_data()) 89 | logger.warning(fake_data()) 90 | logger.info(fake_data()) 91 | logger.debug(fake_data()) 92 | 93 | 94 | def test_f1(fake_data, logger): 95 | logger.critical(fake_data) 96 | logger.error(fake_data()) 97 | logger.warning(fake_data()) 98 | logger.info(fake_data()) 99 | logger.debug(fake_data()) 100 | assert True 101 | -------------------------------------------------------------------------------- /demo-tests/test_basic.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import warnings 4 | 5 | import pytest 6 | 7 | 8 | def test_basic_pass_1(fake_data, logger): 9 | logger.debug(fake_data) 10 | logger.debug(fake_data) 11 | logger.debug(fake_data) 12 | logger.debug(fake_data) 13 | logger.debug(fake_data) 14 | logger.debug(fake_data) 15 | logger.debug(fake_data) 16 | logger.debug(fake_data) 17 | logger.debug(fake_data) 18 | logger.debug(fake_data) 19 | logger.debug(fake_data) 20 | assert True 21 | 22 | 23 | @pytest.fixture 24 | def error_fixt(fake_data, logger): 25 | raise Exception("Error in fixture") 26 | 27 | 28 | def test_basic_pass_3_error_in_fixture(error_fixt, logger, fake_data): 29 | logger.debug(fake_data) 30 | logger.debug(fake_data) 31 | logger.debug(fake_data) 32 | logger.debug(fake_data) 33 | logger.debug(fake_data) 34 | logger.debug(fake_data) 35 | logger.debug(fake_data) 36 | logger.debug(fake_data) 37 | logger.debug(fake_data) 38 | logger.debug(fake_data) 39 | logger.debug(fake_data) 40 | assert True 41 | 42 | 43 | def test_basic_fail_1(fake_data, logger): 44 | logger.debug(fake_data) 45 | logger.debug(fake_data) 46 | logger.debug(fake_data) 47 | logger.debug(fake_data) 48 | logger.debug(fake_data) 49 | logger.debug(fake_data) 50 | logger.debug(fake_data) 51 | logger.debug(fake_data) 52 | logger.debug(fake_data) 53 | logger.debug(fake_data) 54 | logger.debug(fake_data) 55 | assert 1 == 2 56 | 57 | 58 | pytest.mark.skip(reason="Skipping this test with decorator.") 59 | 60 | 61 | def test_basic_skip(fake_data, logger): 62 | logger.debug(fake_data) 63 | logger.debug(fake_data) 64 | logger.debug(fake_data) 65 | logger.debug(fake_data) 66 | logger.debug(fake_data) 67 | logger.debug(fake_data) 68 | logger.debug(fake_data) 69 | logger.debug(fake_data) 70 | logger.debug(fake_data) 71 | logger.debug(fake_data) 72 | logger.debug(fake_data) 73 | assert True 74 | 75 | 76 | @pytest.mark.xfail() 77 | def test_basic_xfail(fake_data, logger): 78 | logger.debug(fake_data) 79 | logger.debug(fake_data) 80 | logger.debug(fake_data) 81 | logger.debug(fake_data) 82 | logger.debug(fake_data) 83 | logger.debug(fake_data) 84 | logger.debug(fake_data) 85 | logger.debug(fake_data) 86 | logger.debug(fake_data) 87 | logger.debug(fake_data) 88 | logger.debug(fake_data) 89 | assert False 90 | 91 | 92 | @pytest.mark.xfail() 93 | def test_basic_xpass(fake_data, logger): 94 | logger.debug(fake_data) 95 | logger.debug(fake_data) 96 | logger.debug(fake_data) 97 | logger.debug(fake_data) 98 | logger.debug(fake_data) 99 | logger.debug(fake_data) 100 | logger.debug(fake_data) 101 | logger.debug(fake_data) 102 | logger.debug(fake_data) 103 | logger.debug(fake_data) 104 | logger.debug(fake_data) 105 | assert True 106 | 107 | 108 | # Method and its test that causes warnings 109 | def api_v1(): 110 | warnings.warn(UserWarning("api v1, should use functions from v2")) 111 | return 1 112 | 113 | 114 | def test_basic_warning(): 115 | assert api_v1() == 1 116 | -------------------------------------------------------------------------------- /demo-tests/test_class.py: -------------------------------------------------------------------------------- 1 | class TestClass1: 2 | def test_one(self): 3 | x = "this" 4 | assert "h" in x 5 | 6 | def test_two(self): 7 | x = "hello" 8 | assert hasattr(x, "check") 9 | 10 | 11 | # Path: test_class2.py 12 | 13 | 14 | class TestClass2: 15 | def test_one(self): 16 | x = "that" 17 | assert "e" in x 18 | 19 | def test_two(self): 20 | x = "goodbye" 21 | assert x == "hello" 22 | -------------------------------------------------------------------------------- /demo-tests/test_errors.py: -------------------------------------------------------------------------------- 1 | import random 2 | import warnings 3 | 4 | import faker 5 | 6 | 7 | def fake_data(min: int = 30, max: int = 120) -> str: 8 | return faker.Faker().text(random.randint(min, max)) 9 | 10 | 11 | def test_1_fails_with_warnings(): 12 | print("This test fails with warnings. See Warnings section for info.") 13 | warnings.warn(Warning(fake_data(50, 200))) 14 | warnings.warn(UserWarning(fake_data(55, 205))) 15 | warnings.warn(DeprecationWarning(fake_data(55, 205))) 16 | warnings.warn(SyntaxWarning(fake_data(55, 205))) 17 | warnings.warn(RuntimeWarning(fake_data(55, 205))) 18 | warnings.warn(FutureWarning(fake_data(55, 205))) 19 | warnings.warn(PendingDeprecationWarning(fake_data(55, 205))) 20 | warnings.warn(ImportWarning(fake_data(55, 205))) 21 | warnings.warn(UnicodeWarning(fake_data(55, 205))) 22 | warnings.warn(BytesWarning(fake_data(55, 205))) 23 | warnings.warn(ResourceWarning(fake_data(55, 205))) 24 | warnings.warn((fake_data(55, 205))) 25 | assert False 26 | 27 | 28 | def test_2_passes_with_warnings(): 29 | print("This test passes, but with warnings. See Warnings section for info.") 30 | warnings.warn(Warning(fake_data(50, 200))) 31 | warnings.warn(UserWarning(fake_data(55, 205))) 32 | warnings.warn(DeprecationWarning(fake_data(55, 205))) 33 | warnings.warn(SyntaxWarning(fake_data(55, 205))) 34 | warnings.warn(RuntimeWarning(fake_data(55, 205))) 35 | warnings.warn(FutureWarning(fake_data(55, 205))) 36 | warnings.warn(PendingDeprecationWarning(fake_data(55, 205))) 37 | warnings.warn(ImportWarning(fake_data(55, 205))) 38 | warnings.warn(UnicodeWarning(fake_data(55, 205))) 39 | warnings.warn(BytesWarning(fake_data(55, 205))) 40 | warnings.warn(ResourceWarning(fake_data(55, 205))) 41 | warnings.warn((fake_data(55, 205))) 42 | assert True 43 | -------------------------------------------------------------------------------- /demo-tests/test_fold_pytesthtml.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import warnings 4 | 5 | import pytest 6 | 7 | 8 | def test_0_single(logger): 9 | logger.info( 10 | "[DETAILS][SUMMARY]Summary of the iteration[/SUMMARY]Verbose logs for iteration" 11 | " x: ...[/DETAILS]" 12 | ) 13 | print("Test has run.") 14 | -------------------------------------------------------------------------------- /demo-tests/test_fold_regex.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import warnings 4 | 5 | import pytest 6 | 7 | 8 | @pytest.fixture() 9 | def regex(): 10 | return r""" *->""" 11 | 12 | 13 | def test_0_regex_single(regex): 14 | print("Pre-foldy stuff...") 15 | print("Test Pass 1!") 16 | print(regex) 17 | print("More pre-foldy stuff...") 18 | print("This line marks the middle of the RegEx fold.") 19 | print(regex) 20 | print("This line marks the middle of the RegEx fold.") 21 | print("This line marks the middle of the RegEx fold.") 22 | print("This line marks the middle of the RegEx fold.") 23 | print(regex) 24 | print(regex) 25 | print(regex) 26 | print(regex) 27 | print(regex) 28 | print(regex) 29 | print(regex) 30 | print(regex) 31 | print(regex) 32 | print(regex) 33 | print("Middle of the foldy stuff...") 34 | print("This line marks the middle of the RegEx fold.") 35 | print("This line marks the middle of the RegEx fold.") 36 | print("This line marks the middle of the RegEx fold.") 37 | print("Middle of the foldy stuff...") 38 | print("This line marks the middle of the RegEx fold.") 39 | print("This line marks the middle of the RegEx fold.") 40 | print("Middle of the foldy stuff...") 41 | print("This line marks the middle of the RegEx fold.") 42 | print("This line marks the middle of the RegEx fold.") 43 | print("Middle of the foldy stuff...") 44 | print("This line marks the middle of the RegEx fold.") 45 | print("This line marks the middle of the RegEx fold.") 46 | print("This line marks the middle of the RegEx fold.") 47 | print("Post-foldy stuff...") 48 | print("More post-foldy stuff...") 49 | assert True 50 | 51 | 52 | def test_0_regex_double(regex): 53 | print("Pre-foldy stuff...") 54 | print("Test Pass 1!") 55 | print(regex) 56 | print("More pre-foldy stuff...") 57 | print(f"​​​This line marks the beginning of the RegEx fold.") 58 | print("This line marks the middle of the RegEx fold.") 59 | print(regex) 60 | print("This line marks the middle of the RegEx fold.") 61 | print("This line marks the middle of the RegEx fold.") 62 | print("This line marks the middle of the RegEx fold.") 63 | print(regex) 64 | print(regex) 65 | print(regex) 66 | print(regex) 67 | print(regex) 68 | print(regex) 69 | print(regex) 70 | print(regex) 71 | print(regex) 72 | print(regex) 73 | print("Middle of the foldy stuff...") 74 | print("This line marks the middle of the RegEx fold.") 75 | print("This line marks the middle of the RegEx fold.") 76 | print("Middle of the foldy stuff...") 77 | print("This line marks the middle of the RegEx fold.") 78 | print("This line marks the middle of the RegEx fold.") 79 | print(f"​This line marks the end of the RegEx fold.") 80 | print("Post-foldy stuff...") 81 | print("More post-foldy stuff...") 82 | assert True 83 | 84 | 85 | def test_0_regex_logs(): 86 | print("This line is DEBUG level.") 87 | print("This line is INFO level.") 88 | print("This line is WARNING level.") 89 | print("This line is ERROR level.") 90 | print("This line is CRITICAL level.") 91 | print("This line is NOT ANY level.") 92 | print("This line is DEBUG level #1.") 93 | print("This line is DEBUG level #2.") 94 | print("This line is DEBUG level #3.") 95 | print("This line is DEBUG level #4.") 96 | print("This line is DEBUG level #5.") 97 | print("This line is NOT ANY level.") 98 | print("This line is INFO level #1.") 99 | print("This line is INFO level #2.") 100 | print("This line is INFO level #3.") 101 | print("This line is DEBUG level #1.") 102 | print("This line is DEBUG level #2.") 103 | print("This line is DEBUG level #3.") 104 | print("This line is NOT ANY level.") 105 | print("This line is WARNING level #1.") 106 | print("This line is WARNING level #2.") 107 | print("This line is WARNING level #3.") 108 | print("This line is INFO level #1.") 109 | print("This line is INFO level #2.") 110 | print("This line is INFO level #3.") 111 | print("This line is DEBUG level #1.") 112 | print("This line is DEBUG level #2.") 113 | print("This line is DEBUG level #3.") 114 | print("This line is NOT ANY level.") 115 | assert True 116 | 117 | 118 | def test_0_regex_logs_firstline(): 119 | print("This line is DEBUG level.") 120 | print("This line is INFO level.") 121 | print("This line is WARNING level.") 122 | print("This line is ERROR level.") 123 | print("This line is CRITICAL level.") 124 | print("This line is NOT ANY level.") 125 | print("This line is DEBUG level #1.") 126 | print("This line is DEBUG level #2.") 127 | print("This line is DEBUG level #3.") 128 | print("This line is DEBUG level #4.") 129 | print("This line is DEBUG level #5.") 130 | print("This line is NOT ANY level.") 131 | print("This line is INFO level #1.") 132 | print("This line is INFO level #2.") 133 | print("This line is INFO level #3.") 134 | print("This line is DEBUG level #1.") 135 | print("This line is DEBUG level #2.") 136 | print("This line is DEBUG level #3.") 137 | print("This line is NOT ANY level.") 138 | print("This line is WARNING level #1.") 139 | print("This line is WARNING level #2.") 140 | print("This line is WARNING level #3.") 141 | print("This line is INFO level #1.") 142 | print("This line is INFO level #2.") 143 | print("This line is INFO level #3.") 144 | print("This line is DEBUG level #1.") 145 | print("This line is DEBUG level #2.") 146 | print("This line is DEBUG level #3.") 147 | print("This line is NOT ANY level.") 148 | 149 | print("DEBUG level.") 150 | print("INFO level.") 151 | print("WARNING level.") 152 | print("ERROR level.") 153 | print("CRITICAL level.") 154 | print("NOT ANY level.") 155 | print("DEBUG level #1.") 156 | print("DEBUG level #2.") 157 | print("This line is DEBUG level #3.") 158 | print("DEBUG level #4.") 159 | print("DEBUG level #5.") 160 | print("NOT ANY level.") 161 | print("INFO level #1.") 162 | print("This line is INFO level #2.") 163 | print("This line is INFO level #3.") 164 | print("DEBUG level #1.") 165 | print("DEBUG level #2.") 166 | print("This line is DEBUG level #3.") 167 | print("This line is NOT ANY level.") 168 | print("This line is WARNING level #1.") 169 | print("This line is WARNING level #2.") 170 | print("This line is WARNING level #3.") 171 | print("This line is INFO level #1.") 172 | print("This line is INFO level #2.") 173 | print("This line is INFO level #3.") 174 | print("This line is DEBUG level #1.") 175 | print("DEBUG level #2.") 176 | print("DEBUG level #3.") 177 | print("This line is NOT ANY level.") 178 | assert True 179 | -------------------------------------------------------------------------------- /demo-tests/test_hoefling.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | 5 | 6 | LOREM = """ 7 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec rutrum molestie arcu, id consectetur nisl commodo luctus. Curabitur ac eros efficitur, bibendum nibh volutpat, lobortis arcu. Nam gravida condimentum felis eu porttitor. Fusce at mi et purus condimentum facilisis et nec felis. Vivamus aliquet, elit eu sagittis bibendum, elit velit scelerisque tellus, et ornare lectus nulla eget diam. Mauris eleifend lectus vel ipsum vehicula malesuada. Ut vitae arcu ac elit bibendum elementum. Aliquam quis sagittis justo. Maecenas sit amet sodales velit. 8 | 9 | Curabitur vel felis finibus, auctor ligula ut, tempus leo. Aenean turpis lectus, aliquet non euismod a, sagittis non nisl. Nulla pretium ultricies augue ut egestas. Mauris vel ex nec lorem rutrum varius. Phasellus laoreet elit eu volutpat accumsan. Morbi justo ligula, accumsan sed efficitur sit amet, ornare vel massa. Proin a tempor risus, at imperdiet augue. Cras sed felis sagittis, pellentesque dui vel, luctus nunc. Sed sed elementum nibh. 10 | 11 | Sedsodalesauctorlaoreet.Pellentesqueinaccumsanleo, idultriciesarcu. Inegestas,arcuidtristiquepulvinar, nullasapienpharetraerat, amollisrisustortoractellus. Quisquetemporodioquislacusmaximus, vitaeconguejustomattis. Nuncsollicitudinaloremetvestibulum.Etiamquispretiumvelit. Nullavelduisitametnunclobortisviverra. Proinconsequat, purusetlaoreetfeugiat, risusvelitsagittismassa, acimperdietlectusdiamsitametodio. Vestibulumalaciniaquam. 12 | """ 13 | 14 | 15 | def test_1(logger): 16 | logger.critical(LOREM) 17 | logger.error(LOREM) 18 | logger.warning(LOREM) 19 | logger.info(LOREM) 20 | logger.debug(LOREM) 21 | assert False 22 | 23 | 24 | def test_2(logger): 25 | logger.critical(LOREM) 26 | logger.error(LOREM) 27 | logger.warning(LOREM) 28 | logger.info(LOREM) 29 | logger.debug(LOREM) 30 | raise RuntimeError("call error") 31 | 32 | 33 | @pytest.fixture 34 | def f(logger): 35 | logger.critical(LOREM) 36 | logger.error(LOREM) 37 | logger.warning(LOREM) 38 | logger.info(LOREM) 39 | logger.debug(LOREM) 40 | raise RuntimeError("setup error") 41 | 42 | 43 | def test_3(f): 44 | logger.critical(LOREM) 45 | logger.error(LOREM) 46 | logger.warning(LOREM) 47 | logger.info(LOREM) 48 | logger.debug(LOREM) 49 | assert True 50 | -------------------------------------------------------------------------------- /demo-tests/test_issue_1004.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def bad(): 6 | yield 7 | raise Exception 8 | 9 | 10 | def test_foo(bad): 11 | assert True 12 | 13 | 14 | def test_foo2(bad): 15 | assert False 16 | 17 | 18 | @pytest.fixture 19 | def good(): 20 | yield 21 | pass 22 | 23 | 24 | def test_foo(good): 25 | assert True 26 | 27 | 28 | def test_foo2(good): 29 | assert False 30 | -------------------------------------------------------------------------------- /demo-tests/test_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import warnings 4 | 5 | import pytest 6 | 7 | 8 | logging.basicConfig( 9 | format="%(asctime)s%(levelname)s:%(message)s", 10 | level=logging.DEBUG, 11 | datefmt="%m/%d/%Y %I:%M:%S %p", 12 | ) 13 | 14 | 15 | def fake_data() -> str: 16 | sentence = f"{random.choice(data.split('.'))}{random.choice(['.', ';', '?'])}" 17 | if sentence in [".", ";", "?"]: 18 | sentence = fake_data() 19 | if sentence.endswith(";"): 20 | sentence += fake_data() 21 | return sentence 22 | 23 | 24 | def fake_logs(level, num=1) -> None: 25 | for _ in range(num): 26 | fake = fake_data() 27 | exec(f"logging.{level}('{fake}')") 28 | 29 | 30 | def random_bulk_logs() -> None: 31 | levels = ["debug", "info", "warning", "error", "critical"] 32 | fake_logs(random.choice(levels), random.randint(4, 16)) 33 | 34 | 35 | def test_random_bulk_logs(): 36 | for _ in range(8): 37 | random_bulk_logs() 38 | assert True 39 | 40 | 41 | data = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.. Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur. Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur. At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat.""" 42 | -------------------------------------------------------------------------------- /demo-tests/test_random_results.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | from dataclasses import dataclass 4 | 5 | import faker 6 | import pytest 7 | 8 | OUTCOMES = [ 9 | "failed", 10 | "passed", 11 | "skipped", 12 | "xfailed", 13 | "xpassed", 14 | "warning", 15 | "error", 16 | "rerun", 17 | ] 18 | WEIGHTS = [0.15, 0.60, 0.05, 0.03, 0.02, 0.07, 0.03, 0.05] 19 | 20 | 21 | @pytest.fixture 22 | def random_result_loglevel(faker): 23 | @dataclass 24 | class Result: 25 | outcome: str 26 | log_msg: str 27 | log_level: str 28 | 29 | choice = random.choices(OUTCOMES, WEIGHTS)[0] 30 | if choice == "passed": 31 | return Result( 32 | outcome=choice, log_msg=f"Passed: {faker.sentence()}", log_level="info" 33 | ) 34 | elif choice == "failed": 35 | return Result( 36 | outcome=choice, log_msg=f"Failed: {faker.paragraph()}", log_level="error" 37 | ) 38 | elif choice == "skipped": 39 | return Result( 40 | outcome=choice, log_msg=f"Skipped: {faker.sentence()}", log_level="info" 41 | ) 42 | elif choice == "xfailed": 43 | return Result( 44 | outcome=choice, log_msg=f"XFailed: {faker.sentence()}", log_level="info" 45 | ) 46 | elif choice == "xpassed": 47 | return Result( 48 | outcome=choice, log_msg=f"XPassed: {faker.sentence()}", log_level="info" 49 | ) 50 | elif choice == "warning": 51 | return Result( 52 | outcome=choice, log_msg=f"Warning: {faker.sentence()}", log_level="warning" 53 | ) 54 | elif choice == "error": 55 | return Result( 56 | outcome=choice, log_msg=f"Error: {faker.sentence()}", log_level="error" 57 | ) 58 | elif choice == "rerun": 59 | return Result( 60 | outcome=choice, log_msg=f"Rerun: {faker.sentence()}", log_level="info" 61 | ) 62 | 63 | 64 | def random_result_regex( 65 | pattern: str, seed: int = 0, num: int = 10, rarity: int = 100 66 | ) -> str: 67 | fake = faker.Faker() 68 | ret = "" 69 | random.seed(seed) 70 | for _ in range(10): 71 | text = random.choice( 72 | [f"{fake.sentence()}", f"{fake.paragraph()}", f"{fake.text()}"] 73 | ) 74 | for j, word in enumerate(text.split(" ")): 75 | if j % rarity == random.randint(1, rarity): 76 | ret += f" { pattern }{word} " 77 | else: 78 | ret += f" {word}" 79 | return ret 80 | 81 | 82 | def test_0(logger): 83 | logger.info(random_result_regex(" *-> ", 13, 10, 30)) 84 | assert True 85 | 86 | 87 | def test_1(logger): 88 | logger.info(random_result_regex(" *-> ", 13, 10, 30)) 89 | assert True 90 | -------------------------------------------------------------------------------- /demo-tests/test_regex.py: -------------------------------------------------------------------------------- 1 | import random 2 | import pytest 3 | import logging 4 | import faker 5 | 6 | fake = faker.Faker() 7 | 8 | 9 | def generate_random_text_with_pattern(pattern, num_lines, pattern_interval): 10 | lines = [] 11 | for i in range(num_lines): 12 | if i % pattern_interval == 0: 13 | line = f"This line contains the pattern: {pattern}" 14 | line += "\n" 15 | line += f"This line contains no pattern" 16 | line += "\n" 17 | line += f"This line contains the pattern: {pattern}" 18 | line += "\n" 19 | line += f"This line contains the pattern: {pattern}" 20 | line += "\n" 21 | line += f"This line contains the pattern: {pattern}" 22 | line += "\n" 23 | else: 24 | line = "Random text line" 25 | lines.append(line) 26 | return lines 27 | 28 | 29 | # @pytest.mark.parametrize( 30 | # "pattern, num_lines, pattern_interval, expected", 31 | # [ 32 | # (" *->", 10, 3), 33 | 34 | # def test_random_text_with_pattern(capsys, logger): 35 | # 36 | # pattern = " *->" 37 | # num_lines = random.randint(10,100) 38 | # pattern_interval = 3 39 | 40 | # lines = generate_random_text_with_pattern(pattern, num_lines, pattern_interval) 41 | 42 | # for line in lines: 43 | # print(line) 44 | # logger.info(line) 45 | 46 | # captured = capsys.readouterr() 47 | # console_output = captured.out 48 | 49 | # assert pattern in console_output 50 | -------------------------------------------------------------------------------- /demo-tests/test_rerun_fixed.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | 4 | import pytest 5 | 6 | new_outcome = False 7 | outcome = new_outcome 8 | 9 | 10 | @pytest.mark.flaky(reruns=2) 11 | def test_flaky_1(): 12 | global outcome 13 | global new_outcome 14 | new_outcome = True 15 | assert outcome 16 | 17 | 18 | new_outcome = False 19 | outcome = new_outcome 20 | 21 | 22 | @pytest.mark.flaky(reruns=2) 23 | def test_flaky_2(): 24 | global outcome 25 | global new_outcome 26 | new_outcome = True 27 | assert outcome 28 | -------------------------------------------------------------------------------- /demo-tests/test_rerun_random.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | 4 | import pytest 5 | 6 | 7 | @pytest.mark.flaky(reruns=0) 8 | def test_flaky_0(): 9 | # time.sleep(random.uniform(0.1, 0.75)) 10 | assert random.choice([True, False]) 11 | 12 | 13 | @pytest.mark.flaky(reruns=1) 14 | def test_flaky_1(): 15 | # time.sleep(random.uniform(0.1, 0.75)) 16 | assert random.choice([True, False]) 17 | 18 | 19 | @pytest.mark.flaky(reruns=2) 20 | def test_flaky_2(): 21 | # time.sleep(random.uniform(0.1, 0.75)) 22 | assert random.choice([True, False]) 23 | 24 | 25 | @pytest.mark.flaky(reruns=3) 26 | def test_flaky_3(): 27 | # time.sleep(random.uniform(0.1, 0.75)) 28 | assert random.choice([True, False]) 29 | 30 | 31 | @pytest.mark.flaky(reruns=2) 32 | def test_flaky_always_fail(): 33 | # time.sleep(random.uniform(0.1, 0.75)) 34 | assert False 35 | 36 | 37 | @pytest.mark.flaky(reruns=2) 38 | def test_flaky_always_pass(): 39 | # time.sleep(random.uniform(0.1, 0.75)) 40 | assert True 41 | -------------------------------------------------------------------------------- /demo-tests/test_single_xpass_xfail.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import warnings 3 | 4 | import pytest 5 | 6 | 7 | @pytest.mark.xfail() 8 | def test0_xfail(logger): 9 | print("Test 0 XFail") 10 | logger.critical("CRITICAL") 11 | logger.error("ERROR") 12 | logger.warning("WARNING") 13 | logger.info("INFO") 14 | logger.debug("DEBUG") 15 | warnings.warn(Warning("You ave been warned!")) 16 | assert False 17 | 18 | 19 | @pytest.mark.xfail() 20 | def test0_xpass(logger): 21 | print("Test 0 XPass") 22 | logger.critical("CRITICAL") 23 | logger.error("ERROR") 24 | logger.warning("WARNING") 25 | logger.info("INFO") 26 | logger.debug("DEBUG") 27 | warnings.warn(Warning("You ave been warned!")) 28 | assert True 29 | -------------------------------------------------------------------------------- /demo-tests/test_sleep.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import warnings 4 | import time 5 | 6 | import pytest 7 | 8 | 9 | @pytest.mark.timeout(0) 10 | def test_timeout_sleep(logger): 11 | logger.warning("Sleeping 3...") 12 | time.sleep(1) 13 | # logger.warning("Sleeping 3...") 14 | # time.sleep(1) 15 | # logger.warning("Sleeping 3...") 16 | # time.sleep(1) 17 | # logger.warning("Sleeping 3...") 18 | # time.sleep(1) 19 | # logger.warning("Sleeping 3...") 20 | # time.sleep(1) 21 | assert True 22 | -------------------------------------------------------------------------------- /demo-tests/test_warnings.py: -------------------------------------------------------------------------------- 1 | import random 2 | import warnings 3 | 4 | import faker 5 | 6 | 7 | def fake_data(min: int = 30, max: int = 120) -> str: 8 | return faker.Faker().text(random.randint(min, max)) 9 | 10 | 11 | def test_1_fails_with_warnings(): 12 | print("This test fails with warnings. See Warnings section for info.") 13 | warnings.warn(Warning(fake_data(50, 200))) 14 | warnings.warn(UserWarning(fake_data(55, 205))) 15 | warnings.warn(DeprecationWarning(fake_data(55, 205))) 16 | warnings.warn(SyntaxWarning(fake_data(55, 205))) 17 | warnings.warn(RuntimeWarning(fake_data(55, 205))) 18 | warnings.warn(FutureWarning(fake_data(55, 205))) 19 | warnings.warn(PendingDeprecationWarning(fake_data(55, 205))) 20 | warnings.warn(ImportWarning(fake_data(55, 205))) 21 | warnings.warn(UnicodeWarning(fake_data(55, 205))) 22 | warnings.warn(BytesWarning(fake_data(55, 205))) 23 | warnings.warn(ResourceWarning(fake_data(55, 205))) 24 | warnings.warn((fake_data(55, 205))) 25 | assert False 26 | 27 | 28 | def test_2_passes_with_warnings(): 29 | print("This test passes, but with warnings. See Warnings section for info.") 30 | warnings.warn(Warning(fake_data(50, 200))) 31 | warnings.warn(UserWarning(fake_data(55, 205))) 32 | warnings.warn(DeprecationWarning(fake_data(55, 205))) 33 | warnings.warn(SyntaxWarning(fake_data(55, 205))) 34 | warnings.warn(RuntimeWarning(fake_data(55, 205))) 35 | warnings.warn(FutureWarning(fake_data(55, 205))) 36 | warnings.warn(PendingDeprecationWarning(fake_data(55, 205))) 37 | warnings.warn(ImportWarning(fake_data(55, 205))) 38 | warnings.warn(UnicodeWarning(fake_data(55, 205))) 39 | warnings.warn(BytesWarning(fake_data(55, 205))) 40 | warnings.warn(ResourceWarning(fake_data(55, 205))) 41 | warnings.warn((fake_data(55, 205))) 42 | assert True 43 | -------------------------------------------------------------------------------- /demo-tests/test_xpass_xfail.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import random 3 | import sys 4 | import warnings 5 | 6 | import faker 7 | import pytest 8 | 9 | LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") 10 | 11 | 12 | def test_xfail_by_inline(logger): 13 | logger.debug("Debug level log line") 14 | logger.info("info level log line") 15 | logger.warning("Warning level log line") 16 | logger.error("Error level log line") 17 | logger.critical("Critical level log line") 18 | pytest.xfail("xfailing this test with 'pytest.xfail()'") 19 | 20 | assert False 21 | 22 | 23 | @pytest.mark.xfail(reason="Here's my reason for xfail: None") 24 | def test_xfail_by_decorator(logger): 25 | logger.debug("Debug level log line") 26 | logger.info("info level log line") 27 | logger.warning("Warning level log line") 28 | logger.error("Error level log line") 29 | logger.critical("Critical level log line") 30 | 31 | assert False 32 | -------------------------------------------------------------------------------- /log_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.config 3 | from datetime import datetime, timezone 4 | 5 | 6 | class UTCFormatter(logging.Formatter): 7 | def formatTime(self, record, datefmt=None): 8 | dt = datetime.fromtimestamp(record.created, tz=timezone.utc) 9 | t = dt.strftime("%Y-%m-%d %H:%M:%S") 10 | return f"{t} UTC" 11 | 12 | 13 | logging_config = { 14 | "version": 1, 15 | "disable_existing_loggers": False, 16 | "formatters": { 17 | "utc": { 18 | "format": " %(asctime)s %(levelname)-8s %(name)-12s %(message)s", 19 | "()": UTCFormatter, # special nomenclature to use custom formatter 20 | }, 21 | }, 22 | "handlers": { 23 | "console": { 24 | "class": "logging.StreamHandler", 25 | "level": "INFO", 26 | "formatter": "utc", # Use the UTCFormatter here 27 | "stream": "ext://sys.stderr", 28 | }, 29 | }, 30 | "root": { 31 | "level": "INFO", 32 | "handlers": ["console"], 33 | }, 34 | "loggers": {}, 35 | } 36 | 37 | # Explicitly set the formatter for the root logger 38 | logging.getLogger().handlers[0].setFormatter(logging_config["formatters"]["utc"]) 39 | 40 | for logger_name, logger_config in logging_config["loggers"].items(): 41 | logger_config["handlers"] = ["console"] 42 | logger_config["propagate"] = False # Avoid double logging 43 | 44 | logging.config.dictConfig(logging_config) 45 | -------------------------------------------------------------------------------- /misc/RELEASE_INSTRUCTIONS: -------------------------------------------------------------------------------- 1 | RELEASE_INSTRUCTIONS 2 | -------------------- 3 | 4 | - First (VERY IMPORTANT) verify your Python and Pip versions so you know what you're building with. This is especially important if you're using pyenv to manage multiple Python versions. Current recommendation is latest Python 3.9 (3.9.16). 5 | $ `which python` --version 6 | $ `which pip` --version 7 | 8 | If one of these points to a version different than the one you think you're dealing with, you can specify it manually on the command line like this: 9 | $ python3.9 --version 10 | $ pip3.9 --version 11 | 12 | - Verify package installs from source and runs correctly in editable mode: 13 | $ git clone git@github.com:jeffwright13/pytest-tui.git dev-dir 14 | $ cd dev-dir 15 | $ pyenv local 3.9.16 16 | $ python -m venv venv 17 | $ source venv/bin/activate 18 | $ pip install pip-tools 19 | $ pip-compile --no-emit-index-url reqts/requirements.in && pip-compile --no-emit-index-url reqts/requirements-dev.in 20 | $ pip install -r reqts/requirements.txt 21 | $ pip install -e . 22 | $ pytest --tui 23 | $ tui 24 | $ tuih 25 | 26 | - Install/upgrade build & release tools: 27 | $ `which pip` install --upgrade setuptools wheel twine 28 | 29 | - Clear out old builds: 30 | $ rm dist/* 31 | 32 | - Build: 33 | $ `which python` setup.py sdist bdist_wheel 34 | 35 | - Publish to TestPyPi: 36 | $ `which python` -m twine upload --repository testpypi dist/* 37 | 38 | - Test the installation in a fresh directory by running the 'test.sh' bash script. 39 | Verify TUI and HTML page for each iteration/Python version. 40 | $ cd ~/coding/pytest-tui (or top level of name of repo) 41 | $ deactivate 42 | $ pyenv local 3.8.10 3.9.9 43 | : 44 | $ ./testing/bash/test.sh --version 3.8.10 45 | $ ./testing/bash/test.sh --version 3.9.9 46 | $ source venv/bin/activate 47 | $ cd testing/robot 48 | $ robot . 49 | 50 | - Verify basic functionality without plugin: 51 | $ pytest --version 52 | $ pytest --co 53 | $ pytest -k test0 54 | 55 | - Verify basic functionality with plugin: 56 | $ pytest --tui --version 57 | $ pytest --tui --co 58 | $ pytest --tui -k test0 59 | $ tui 60 | $ tuih 61 | 62 | - Publish to Pypi: 63 | $ deactivate 64 | $ cd ~/coding/dev-dir 65 | $ source venv/bin/activate 66 | $ python -m twine upload dist/* 67 | -------------------------------------------------------------------------------- /misc/outcome_questions.txt: -------------------------------------------------------------------------------- 1 | What is the difference between an Outcome (which, per comments in /pytest/src/_pytest/reports.py, can only be one of Pass/Fail/Skip), and a result (which could be any of those, PLUS XF, XP, Err, Warn, or even Rerun or possibly something else if a plugin is installed)? 2 | There are multiple third-part plugins that don't get the same tallies as the console stats: pytest-html, pytest-json, pytest-tui. Their tallies don't agree with what Pytest itself claims on the final `=== short test summary info ===` line in the console at the end of a test run. And if anything should be correct, it's that, right? To wit: 3 | console: 17 failed, 19 passed, 5 skipped, 7 xfailed, 4 xpassed, 21 warnings, 9 errors 4 | pytest-html: 17 failed, 17 passed, 5 skipped, 7 xfailed, 4 xpassed, 8 errors () 5 | pytest-json: 16 failed, 17 passed, 5 skipped, 7 xfailed, 4 xpassed, 9 errors (57) 6 | pytest-tui: 16 failed, 17 passed, 5 skipped, 7 xfailed, 4 xpassed, 9 errors 7 | Discrepancies: 8 | pytest-tui misses FAILED: 9 | test_issue_1004.py::test_foo3 <= https://github.com/pytest-dev/pytest/issues/1004 10 | pytest-tui misses PASSED: 11 | test_hoefling.py::test_4 12 | Confusion on this is not just mine: 13 | https://stackoverflow.com/questions/51711988/how-can-i-access-the-overall-test-result-of-a-pytest-test-run-during-runtime 14 | ... 15 | Acoording to outcomes.py, an outcome could be Xfail? Or at least it has a class dedicated to it just as do Fail and Skipped? And where is the class for Pass? 16 | -------------------------------------------------------------------------------- /noxfile.py: -------------------------------------------------------------------------------- 1 | import nox 2 | 3 | # nox.options.sessions = ["install_requirements", "install_requirements_plus_package", "install_requirements_dev", "install_requirements_dev_plus_package", "test_with_pytest", "tests"] 4 | 5 | nox.options.sessions = [ 6 | "install_requirements", 7 | "install_requirements_plus_package", 8 | "install_requirements_dev", 9 | "install_requirements_dev_plus_package", 10 | "test_with_pytest", 11 | "test_with_selenium_base", 12 | "test_with_pytest_tui_logfold", 13 | "test_with_pytest_tui_regexfold", 14 | "test_with_selenium_bas_logfold", 15 | "test_with_selenium_bas_regexfold", 16 | ] 17 | 18 | 19 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["install", "basic"]) 20 | def install_requirements_user(session): 21 | session.install("-r", "reqts/requirements.txt") 22 | 23 | 24 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["install"]) 25 | def install_requirements_user_plus_pytest_tui_package(session): 26 | install_requirements_user(session) 27 | session.install(".") 28 | 29 | 30 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["install", "dev"]) 31 | def install_requirements_dev(session): 32 | session.install("-r", "reqts/requirements-dev.txt") 33 | 34 | 35 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["install", "dev"]) 36 | def install_requirements_dev_plus_pytest_tui_package(session): 37 | install_requirements_dev(session) 38 | session.install(".") 39 | 40 | 41 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["tui"]) 42 | def test_with_pytest_tui(session): 43 | session.install("-r", "reqts/requirements-dev.txt") 44 | session.install(".") 45 | session.run("pytest", "demo-tests/", "--tui") 46 | 47 | 48 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["tui", "fold", "log"]) 49 | def test_with_pytest_tui_logfold(session): 50 | install_requirements_dev_plus_pytest_tui_package(session) 51 | session.run("pytest", "demo-tests/", "--tui", "tui-fold-level=debug") 52 | 53 | 54 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["tui", "fold", "log"]) 55 | def test_with_selenium_base_logfold(session): 56 | test_with_pytest_tui_logfold(session) 57 | session.install("--upgrade", "-Iv", "rich==13.3.0") 58 | session.install("selenium-base") 59 | session.run("pytest", "testing/sb") 60 | 61 | 62 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["tui", "fold", "regex"]) 63 | def test_with_pytest_tui_regexfold(session): 64 | install_requirements_dev_plus_pytest_tui_package(session) 65 | session.run("pytest", "demo-tests/", "--tui", "tui-fold-regex=​​​;​") # ZWS & ZWJ 66 | 67 | 68 | @nox.session(python=["3.8", "3.9", "3.10", "3.11"], tags=["tui", "fold", "regex"]) 69 | def test_with_selenium_base_regexfold(session): 70 | test_with_pytest_tui_regexfold(session) 71 | session.install("--upgrade", "-Iv", "rich==13.3.0") 72 | session.install("selenium-base") 73 | session.run("pytest", "testing/sb") 74 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | minversion = 6.2.5 3 | addopts = --ignore=tui_files --ignore=sb/ --ignore=stuff --ignore=misc --ignore=log_experiments 4 | norecursedirs = tui_files/* sb/* stuff/* misc/* log_experiments/* 5 | 6 | ; log_format = %(levelname)-8s %(asctime)s %(name)-30s %(message)s 7 | ; log_date_format = %Y-%m-%d %H:%M:%S 8 | 9 | ; ; set this to True for 'live log' output 10 | ; log_cli = True 11 | ; log_cli_level = DEBUG 12 | 13 | ; pytest-tui options 14 | ; tui = False 15 | ; tui_html = tuireport.html 16 | ; tui_regexfile = [] 17 | 18 | markers = 19 | test_tui_with_pytester: mark tests used for testing the pytest-tui plugin itself 20 | -------------------------------------------------------------------------------- /pytest_tui/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from single_source import get_version 4 | 5 | __version__ = get_version(__name__, Path(__file__).parent.parent / "setup.py") 6 | -------------------------------------------------------------------------------- /pytest_tui/log_experiments/debug_context.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | class debug_context: 5 | """Debug context to trace any function calls inside the context""" 6 | 7 | def __init__(self, name): 8 | self.name = name 9 | 10 | def __enter__(self): 11 | print("Entering Debug Decorated func") 12 | # Set the trace function to the trace_calls function 13 | # So all events are now traced 14 | sys.settrace(self.trace_calls) 15 | 16 | def __exit__(self, *args, **kwargs): 17 | # Stop tracing all events 18 | sys.settrace = None 19 | 20 | def trace_calls(self, frame, event, arg): 21 | # We want to only trace our call to the decorated function 22 | if event != "call": 23 | return 24 | elif frame.f_code.co_name != self.name: 25 | return 26 | # return the trace function to use when you go into that 27 | # function call 28 | return self.trace_lines 29 | 30 | def trace_lines(self, frame, event, arg): 31 | # If you want to print local variables each line 32 | # keep the check for the event 'line' 33 | # If you want to print local variables only on return 34 | # check only for the 'return' event 35 | if event not in ["line", "return"]: 36 | return 37 | co = frame.f_code 38 | func_name = co.co_name 39 | line_no = frame.f_lineno 40 | filename = co.co_filename 41 | local_vars = frame.f_locals 42 | print(" {0} {1} {2} locals: {3}".format(func_name, event, line_no, local_vars)) 43 | 44 | 45 | def debug_decorator(func): 46 | """Debug decorator to call the function within the debug context""" 47 | 48 | def decorated_func(*args, **kwargs): 49 | with debug_context(func.__name__): 50 | return_value = func(*args, **kwargs) 51 | return return_value 52 | 53 | return decorated_func 54 | 55 | 56 | if __name__ == "__main__": 57 | 58 | @debug_decorator 59 | def testing(): 60 | a = 10 61 | b = 20 62 | c = a + b 63 | 64 | testing() 65 | -------------------------------------------------------------------------------- /pytest_tui/log_experiments/debug_html_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Tuple, Union 3 | 4 | from pytest_tui.utils import TUI_FOLD_TITLE_BEGIN, TUI_FOLD_TITLE_END 5 | 6 | 7 | class DebugLogger(logging.getLoggerClass()): 8 | """A logger that logs everything DEBUG level and above""" 9 | 10 | def __init__(self, name: str, level: int = logging.DEBUG) -> None: 11 | self.level = level 12 | super().__init__(name, level) 13 | self.foldable_message = "" 14 | 15 | def send_foldable_message(self) -> None: 16 | super()._log(self.level, self.foldable_message, None, None) 17 | 18 | def title(self, title: str = None) -> None: 19 | t = title or "Folded Message" 20 | self.foldable_message += f"
{t}" 21 | # message += f"{msg}
" 22 | 23 | def content(self, msg: str = None, end: bool = False) -> None: 24 | self.foldable_message += f"{msg}" 25 | if end: 26 | self.foldable_message += "" 27 | self.send_foldable_message() 28 | 29 | def inv_start(self, msg: str = None) -> None: 30 | self.foldable_message += f"
{msg}" 31 | 32 | 33 | class DebugLogHandler(logging.Handler): 34 | """A handler that only logs DEBUG level messages""" 35 | 36 | def __init__(self, level: int = logging.DEBUG) -> None: 37 | self.level = level 38 | super().__init__(level=level) 39 | 40 | def emit(self, record: logging.LogRecord) -> None: 41 | if record.levelno != logging.DEBUG: 42 | return 43 | else: 44 | print(record.msg) 45 | 46 | 47 | class DebugLoggers: 48 | """ 49 | Custom logger class to add HTML markup to debug log messages. 50 | 51 | Attributes: 52 | debug_logger_class: custom class to use for handling debug msgs 53 | 54 | Methods: 55 | localize(name=__name__, level=logging.WARNING): 56 | localize the loggers to the current module 57 | get_loggers(): 58 | return the loggers, ready to use 59 | 60 | Usage: 61 | >>> from pytest_tui.debug_html_logger import DebugLoggers 62 | >>> debug_loggers = DebugLoggers() 63 | >>> debug_loggers.localize(name=__name__, ) 64 | >>> debug_logger = debug_loggers.get_loggers() 65 | 66 | >>> debug_logger.debug(msg) # should show up as folded in HTML reports 67 | """ 68 | 69 | # def __init__(self): 70 | # self.debug_logger_class = DebugLogger 71 | 72 | # def localize(self, name: str=__name__, level: Union[str, int]=logging.DEBUG): 73 | # self.debug_logger_class.name = name 74 | # self.debug_logger_class.setLevel(10) 75 | # self.debug_logger_class.addHandler(DebugLogHandler) 76 | 77 | # def get_logger(self): 78 | # logging.setLoggerClass(DebugLogger) 79 | # logger = logging.getLogger(logging.getLoggerClass().__name__) 80 | # logger.addHandler(DebugLogHandler) 81 | 82 | def __init__(self): 83 | logging.setLoggerClass(DebugLogger) 84 | self.logger = logging.getLogger("DebugLogger") 85 | self.logger.setLevel(logging.DEBUG) 86 | self.logger.addHandler(DebugLogHandler()) 87 | 88 | def get_debug_logger(self): 89 | return self.logger 90 | -------------------------------------------------------------------------------- /pytest_tui/log_experiments/foldable_loggers.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Tuple, Union 3 | 4 | from pytest_tui.utils import TUI_FOLD_TITLE_BEGIN, TUI_FOLD_TITLE_END 5 | 6 | 7 | class FoldableLogger(logging.getLoggerClass()): 8 | """A logger that logs everything DEBUG level and above""" 9 | 10 | def __init__(self, name: str, level: int) -> None: 11 | self.level = level 12 | super().__init__(name, level) 13 | self.foldable_message = "" 14 | 15 | def send_foldable_message(self) -> None: 16 | super()._log(self.level, self.foldable_message, None, None) 17 | 18 | def title(self, title: str = None) -> None: 19 | t = title or "Folded Message" 20 | self.foldable_message += f"
{t}" 21 | # message += f"{msg}
" 22 | 23 | def content(self, msg: str = None, end: bool = False) -> None: 24 | self.foldable_message += f"{msg}" 25 | if end: 26 | self.foldable_message += "
" 27 | self.send_foldable_message() 28 | 29 | def inv_start(self, msg: str = None) -> None: 30 | self.foldable_message += f"
{msg}" 31 | 32 | 33 | class FoldableLogHandler(logging.Handler): 34 | """A handler that only logs DEBUG level messages""" 35 | 36 | def __init__(self, level: int) -> None: 37 | self.level = level 38 | super().__init__(level=level) 39 | 40 | def emit(self, record: logging.LogRecord) -> None: 41 | if record.levelno != self.level: 42 | return 43 | else: 44 | print(record.msg) 45 | 46 | 47 | class FoldableLoggers: 48 | """ 49 | Custom logger class to add HTML markup to log messages. 50 | 51 | Attributes: 52 | debug_logger_class: custom class to use for handling debug msgs 53 | 54 | Methods: 55 | localize(name=__name__, level=logging.WARNING): 56 | localize the loggers to the current module 57 | get_loggers(): 58 | return the loggers, ready to use 59 | 60 | Usage: 61 | >>> from pytest_tui.debug_html_logger import DebugLoggers 62 | >>> debug_loggers = DebugLoggers() 63 | >>> debug_loggers.localize(name=__name__, ) 64 | >>> debug_logger = debug_loggers.get_loggers() 65 | 66 | >>> debug_logger.debug(msg) # should show up as folded in HTML reports 67 | """ 68 | 69 | # def __init__(self): 70 | # self.debug_logger_class = DebugLogger 71 | 72 | # def localize(self, name: str=__name__, level: Union[str, int]=logging.DEBUG): 73 | # self.debug_logger_class.name = name 74 | # self.debug_logger_class.setLevel(10) 75 | # self.debug_logger_class.addHandler(DebugLogHandler) 76 | 77 | # def get_logger(self): 78 | # logging.setLoggerClass(DebugLogger) 79 | # logger = logging.getLogger(logging.getLoggerClass().__name__) 80 | # logger.addHandler(DebugLogHandler) 81 | 82 | def __init__(self): 83 | logging.setLoggerClass(FoldableLogger) 84 | self.logger = logging.getLogger("FoldableLogger") 85 | self.logger.setLevel(logging.DEBUG) 86 | self.logger.addHandler(FoldableLogHandler()) 87 | 88 | def get_debug_logger(self): 89 | return self.logger 90 | -------------------------------------------------------------------------------- /pytest_tui/log_experiments/test_debug_logger_html.py: -------------------------------------------------------------------------------- 1 | # import logging 2 | 3 | # logger = logging.getLogger(__name__) 4 | 5 | # from pytest_tui.debug_html_logger import DebugLogger, DebugLoggers, DebugLogHandler 6 | 7 | # # debug_loggers = DebugLoggers() 8 | # # debug_loggers.localize(name=__name__) 9 | # # debug_logger = debug_loggers.get_debug_loggers() 10 | 11 | # debug_logger = DebugLoggers().get_debug_logger() 12 | # print() 13 | 14 | 15 | # def lorem() -> str: 16 | # return """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. 17 | 18 | # Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci.""" 19 | 20 | 21 | # def calculate(a, b, c) -> float: 22 | # """Calculate the three-phase power factor of a, b and c""" 23 | # return (a**3 / 100 + b**3 / 100 + c**3 / 100) ** 0.5 24 | 25 | 26 | # def test_debug_logger_html(): 27 | # """Test debug logger with HTML report.""" 28 | # logger.warning("Hello, world!") 29 | # logger.warning(lorem()) 30 | # debug_logger.title() 31 | # debug_logger.content(lorem()) 32 | # debug_logger.content(calculate(1, 2, 3)) 33 | # debug_logger.content(lorem(), end=True) 34 | # assert True 35 | -------------------------------------------------------------------------------- /pytest_tui/log_experiments/test_me.py: -------------------------------------------------------------------------------- 1 | # from haggis.logs import add_logging_level 2 | 3 | # from pytest_tui.utils import ( 4 | # TUI_FOLD_CONTENT_BEGIN, 5 | # TUI_FOLD_CONTENT_END, 6 | # TUI_FOLD_TITLE, 7 | # ) 8 | # import logging 9 | # logger = logging.getLogger() 10 | 11 | # def test_me(): 12 | # print("Test me!") 13 | # logger.critical("CRITICAL") 14 | # logger.error("ERROR") 15 | # logger.warning("WARNING") 16 | # logger.info("INFO") 17 | # logger.debug("DEBUG") 18 | # assert True 19 | 20 | 21 | # add_logging_level("TUI_FOLD", logging.DEBUG - 5) 22 | 23 | # def test0_verbose_1(): 24 | # logger.tui_fold(TUI_FOLD_TITLE) 25 | # logger.info("===> * Running next test iteration * <===") 26 | # logger.tui_fold(TUI_FOLD_CONTENT_BEGIN) 27 | # logger.info("st Next iteration: setting soc on SiteEss to 50%, current_soc is 50%") 28 | # logger.info("st Sending P & Q signals of -4.84 MW, -4.84 MVAr using CommandRealPowerSignal and CommandReactivePowerSignal") 29 | # logger.info("t Setting soc on SiteEss to 10% - current effectiveSoc is 50%") 30 | # logger.info("commanded_p is below ac_charge_plimit, set to ac_charge_plimit: -4.84 MW") 31 | # logger.info("P is preferred over Q, adjust q_limit") 32 | # logger.info("q_limit calculated to be 0 VAr") 33 | # logger.info("commanded_q is below -q_limit, set to 0 VAr") 34 | # logger.info("PowerDevice.totalReactivePowerSetPoint is 0 VAr; expected 0 VAr") 35 | # logger.info("PowerDevice.totalRealPowerSetPoint is -4.84 MW; expected -9.68 MW") 36 | # logger.info("PowerDevice.totalRealPowerSetPoint is -4.84 MW; expected -9.68 MW") 37 | # logger.info("PowerDevice.totalRealPowerSetPoint is -4.84 MW; expected -9.68 MW") 38 | # logger.info("PowerDevice.totalRealPowerSetPoint is -4.84 MW; expected -9.68 MW") 39 | # logger.info("PowerDevice.totalRealPowerSetPoint is -4.84 MW; expected -9.68 MW") 40 | # logger.info("PowerDevice.totalRealPowerSetPoint is -4.84 MW; expected -9.68 MW") 41 | # logger.info("PowerDevice.totalRealPowerSetPoint is -9.68 MW; expected -9.68 MW") 42 | # logger.info("Verifying site responds with expected power, using input params:") 43 | # logger.info(" Site rated real power: 4.84 MW") 44 | # logger.info(" Site rated reactive power: 4.84 MVAr") 45 | # logger.info(" Site ac discharge real power limit: 4.84 MW") 46 | # logger.info(" Site ac charge real power limit: 4.84 MW") 47 | # logger.info(" Commanded real power: -9.68 MW") 48 | # logger.info(" Commanded reactive power: 0 VAr") 49 | # logger.info("Commanded Real Power was at or below site's acChargePowerLimit of -4.84 MW, so expecting -4.84 MW") 50 | # logger.info("Commanded Reactive Power was within Site limits, so expecting Q of 0 VAr") 51 | # logger.info("Verified site real power -4.84 MW is within 5% of expected real power -4.84 MW") 52 | # logger.info("Verified site reactive power 0 VAr is within 5% of expected reactive power 0 VAr") 53 | # logger.info("True") 54 | # logger.tui_fold(TUI_FOLD_CONTENT_END) 55 | -------------------------------------------------------------------------------- /pytest_tui/log_experiments/tui_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from pytest_tui.utils import ( 4 | TUI_FOLD_CONTENT_BEGIN, 5 | TUI_FOLD_CONTENT_END, 6 | TUI_FOLD_TITLE_BEGIN, 7 | TUI_FOLD_TITLE_END, 8 | ) 9 | 10 | # import logging 11 | # loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] 12 | 13 | # from haggis import logs as haggis_logs 14 | # haggis_logs.add_logging_level("FOLD", logging.WARNING + 1) 15 | # logging.getLogger(__name__) 16 | 17 | 18 | # class CustomHandler(logging.Handler): 19 | # def __init__(self): 20 | # super().__init__() 21 | 22 | # def emit(self, record): 23 | # log_entry = self.format(record) 24 | # # Here you could define how you want to handle the log entry, 25 | # # for example, writing to a file, sending an email, or printing to the console. 26 | # print(f"GO AHEAD {log_entry}") 27 | 28 | 29 | class TitleLogger(logging.getLoggerClass()): 30 | """Custom logger class to add TUI fold title formatting to log messages.""" 31 | 32 | def __init__(self, name, level=logging.ERROR): 33 | super().__init__(name, level) 34 | # self.addHandler(CustomHandler) 35 | 36 | def _log(self, level, msg, args, exc_info=None): 37 | msg = TUI_FOLD_TITLE_BEGIN + msg + TUI_FOLD_TITLE_END 38 | super()._log(level, msg, args, exc_info) 39 | 40 | 41 | class ContentBeginLogger(logging.getLoggerClass()): 42 | """Custom logger class to add TUI fold content-begin formatting to log messages.""" 43 | 44 | def __init__(self, name, level=logging.ERROR): 45 | super().__init__(name, level) 46 | # self.addHandler(CustomHandler) 47 | 48 | def _log(self, level, msg, args, exc_info=None): 49 | msg = TUI_FOLD_CONTENT_BEGIN + msg 50 | super()._log(level, msg, args, exc_info) 51 | 52 | 53 | class ContentLogger(logging.getLoggerClass()): 54 | """Custom logger class to add TUI fold interim content formatting to log messages. 55 | """ 56 | 57 | def __init__(self, name, level=logging.ERROR): 58 | super().__init__(name, level) 59 | # self.addHandler(CustomHandler) 60 | 61 | def _log(self, level, msg, args, exc_info=None): 62 | super()._log(level, msg, args, exc_info) 63 | 64 | 65 | class ContentEndLogger(logging.getLoggerClass()): 66 | """Custom logger class to add TUI fold content-end formatting to log messages.""" 67 | 68 | def __init__(self, name, level=logging.ERROR): 69 | super().__init__(name, level) 70 | # self.addHandler(CustomHandler) 71 | 72 | def _log(self, level, msg, args, exc_info=None): 73 | msg = msg + TUI_FOLD_CONTENT_END 74 | super()._log(level, msg, args, exc_info) 75 | 76 | 77 | class TuiLoggers: 78 | """ 79 | Custom logger class to add TUI fold formatting to log messages. 80 | This formatting is consumed by html_gen.py in order to generate 81 | a folded line for each log message usng the TUI fold feature. 82 | 83 | Attributes: 84 | title_logger (TitleLogger): logger for TUI fold title 85 | content_begin_logger (ContentBeginLogger): logger for TUI fold content-begin 86 | content_logger (ContentLogger): logger for TUI fold interim content 87 | content_end_logger (ContentEndLogger): logger for TUI fold content-end 88 | 89 | Methods: 90 | localize(name=__name__, level=logging.WARNING): localize the loggers to the current module 91 | get_tui_loggers(): return the loggers 92 | 93 | Usage: 94 | >>> from pytest_tui.tui_logger import TuiLoggers 95 | >>> tui_loggers = TuiLoggers() 96 | >>> tui_loggers.localize(name=__name__, ) 97 | >>> title_logger, content_begin_logger, content_logger, content_end_logger = tui_loggers.get_tui_loggers() 98 | 99 | >>> logger.info(msg) # non-folding, regular log message, if defined in test 100 | >>> title_logger.warning("Title") # 101 | >>> content_begin_logger.warning("Content Begin") # non-folding, regular log message 102 | >>> content_logger.warning("Content") # non-folding, regular log message 103 | >>> content_end_logger.warning("Content End") # non-folding, regular log message 104 | """ 105 | 106 | def __init__(self): 107 | self.title_logger = TitleLogger 108 | self.content_begin_logger = ContentBeginLogger 109 | self.content_logger = ContentLogger 110 | self.content_end_logger = ContentEndLogger 111 | 112 | def localize(self, name=__name__, level=logging.WARNING): 113 | for tui_logger in self.get_tui_loggers(): 114 | tui_logger.name = name 115 | tui_logger.setLevel(level) 116 | 117 | def get_tui_loggers(self): 118 | tui_loggers = [] 119 | for tui_logger in ( 120 | TitleLogger, 121 | ContentBeginLogger, 122 | ContentLogger, 123 | ContentEndLogger, 124 | ): 125 | logging.setLoggerClass(tui_logger) 126 | tui_loggers.append(logging.getLogger(logging.getLoggerClass().__name__)) 127 | return tui_loggers 128 | -------------------------------------------------------------------------------- /pytest_tui/resources/scripts.js: -------------------------------------------------------------------------------- 1 | function openTab(evt, tabName) { var i, tabcontent, tablinks; tabcontent = document.getElementsByClassName("tabcontent"); for (i = 0; i < tabcontent.length; i++) { tabcontent[i].style.display = "none"; } tablinks = document.getElementsByClassName("tablinks"); for (i = 0; i < tablinks.length; i++) { tablinks[i].className = tablinks[i].className.replace(" active", ""); } document.getElementById(tabName).style.display = "block"; evt.currentTarget.className += " active"; } 2 | 3 | var coll = document.getElementsByClassName("collapsible"); var i; for (i = 0; i < coll.length; i++) { coll[i].addEventListener("click", function() { this.classList.toggle("active"); var content = this.nextElementSibling; if (content.style.display === "block") { content.style.display = "none"; } else { content.style.display = "block"; } }); } 4 | 5 | document.getElementById("defaultOpen").click(); 6 | 7 | var i, acc = document.getElementsByClassName("accordion-open"); for (i=0; i{t.hidden=!t.hidden}))} 14 | 15 | function toggleDetailsElements(){const t=document.querySelectorAll("details");t.forEach((t=>{t.hidden=!t.hidden}))} 16 | 17 | function toggleAllDetails() { const details = document.getElementsByTagName("details"); for (let i = 0; i < details.length; i++) { if (details[i].hasAttribute("open")) { details[i].removeAttribute("open"); } else { details[i].setAttribute("open", ""); } } } 18 | 19 | function removeColor() { var all = document.getElementsByTagName("*"); for (var i=0, max=all.length; i < max; i++) { all[i].style.color = 'initial'; all[i].style.backgroundColor = 'initial'; } } 20 | 21 | var originalColors = new Map(); function removeOrRestoreColor() { if (originalColors.size === 0) { var all = document.getElementsByTagName("*"); for (var i=0, max=all.length; i < max; i++) { var computedStyle = window.getComputedStyle(all[i]); originalColors.set(all[i], computedStyle.color); all[i].style.color = "black"; all[i].style.backgroundColor = "white"; } } else { originalColors.forEach((color, element) => { element.style.color = color; }); originalColors.clear(); } } 22 | 23 | var overrideStyleSheet = null; function removeOrRestoreColor() { if (overrideStyleSheet === null) { overrideStyleSheet = document.createElement('style'); document.head.appendChild(overrideStyleSheet); var sheet = overrideStyleSheet.sheet; sheet.insertRule("* { color: black !important; background-color: white !important; }", 0); } else { overrideStyleSheet.parentNode.removeChild(overrideStyleSheet); overrideStyleSheet = null; } } 24 | 25 | function invertColors() { document.body.classList.toggle('invert-colors'); } 26 | 27 | var originalColor = document.body.style.backgroundColor; function toggleBackground() { var body = document.getElementsByTagName("body")[0]; var currentColor = body.style.backgroundColor; if (currentColor === "" || currentColor === "white") { body.style.backgroundColor = "black"; } else if (currentColor === "black") { body.style.backgroundColor = originalColor; } else { body.style.backgroundColor = "white"; } } 28 | 29 | var preSection = document.getElementById("preSection"); var originalBackgroundColor = preSection.style.backgroundColor; var originalTextColor = preSection.style.color; function togglePreBackground() { preSection.classList.toggle("pre-bg-black"); if (preSection.classList.contains("pre-bg-black")) { preSection.style.backgroundColor = originalBackgroundColor; preSection.style.color = originalTextColor; } else { preSection.style.backgroundColor = "black"; preSection.style.color = "white"; } } 30 | 31 | var isBackgroundBlack = false; function togglePreBackground() { var preElements = document.querySelectorAll("pre"); for (var i = 0; i < preElements.length; i++) { if (isBackgroundBlack) { preElements[i].style.backgroundColor = "#E6E6E6"; preElements[i].style.color = "#000000"; } else { preElements[i].style.backgroundColor = "#000000"; preElements[i].style.color = "#FFFFFF"; } } isBackgroundBlack = !isBackgroundBlack; } 32 | -------------------------------------------------------------------------------- /pytest_tui/stuff/__main__.py: -------------------------------------------------------------------------------- 1 | # import configparser 2 | # import platform 3 | # import subprocess 4 | # import sys 5 | 6 | # from blessed import Terminal 7 | # from bullet import Bullet, Input, YesNo, colors 8 | # from rich import print 9 | 10 | # from pytest_tui.tui import main as tui 11 | # from pytest_tui.utils import CONFIGFILE 12 | 13 | 14 | # class DefaultConfig: 15 | # def __init__(self): 16 | # self.tui_autolaunch = False 17 | # self.html_autolaunch = False 18 | 19 | 20 | # class Cli: 21 | # def __init__(self): 22 | # self.term = Terminal() 23 | # self.default_config = DefaultConfig() 24 | # self.config_parser = configparser.ConfigParser() 25 | # try: 26 | # self.config_parser.read(CONFIGFILE) 27 | # except Exception: 28 | # self.apply_default_config() 29 | 30 | # def _clear_terminal(self) -> None: 31 | # if platform.system() == "Windows": 32 | # subprocess.Popen("cls", shell=True).communicate() 33 | # else: 34 | # print("\033c", end="") 35 | 36 | # def _enter_to_continue(self) -> None: 37 | # print("\nPress ENTER to continue...") 38 | # with self.term.cbreak(): 39 | # while not self.term.inkey(timeout=0.01).lower(): 40 | # pass 41 | # self._clear_terminal() 42 | 43 | # def _prompt(self) -> str: 44 | # return "==> pytest-tui configuration menu <==\n" 45 | 46 | # def menu_items(self) -> dict: 47 | # return { 48 | # "Display current config settings": self.display_current_config, 49 | # "Apply default config settings": self.apply_default_config_plus_enter, 50 | # "Set TUI autolaunch option": self.set_tui_autolaunch, 51 | # "Set HTML autolaunch option": self.set_html_autolaunch, 52 | # "Quit": self.quit, 53 | # } 54 | 55 | # def read_config_file(self) -> None: 56 | # try: 57 | # self.config_parser.read(CONFIGFILE) 58 | # except Exception: 59 | # self.apply_default_config() 60 | # if not ( 61 | # self.config_parser.has_section("TUI") 62 | # and self.config_parser.has_section("HTML") 63 | # ): 64 | # self.apply_default_config() 65 | # self.tui_autolaunch = self.config_parser.getboolean("TUI", "tui_autolaunch") 66 | # self.html_autolaunch = self.config_parser.getboolean("HTML", "html_autolaunch") 67 | 68 | # def apply_default_config_plus_enter(self) -> None: 69 | # """Wrapper around 'apply_default_config' to allow for Enter prompt afterwards.""" 70 | # self.apply_default_config() 71 | # self._enter_to_continue() 72 | 73 | # def apply_default_config(self) -> None: 74 | # """Generate default config, store in local config_parser instance, and write it to file.""" 75 | # if not self.config_parser.has_section("TUI"): 76 | # self.config_parser.add_section("TUI") 77 | # self.config_parser.set( 78 | # "TUI", "tui_autolaunch", str(self.default_config.tui_autolaunch) 79 | # ) 80 | # if not self.config_parser.has_section("HTML"): 81 | # self.config_parser.add_section("HTML") 82 | # self.config_parser.set( 83 | # "HTML", "html_autolaunch", str(self.default_config.html_autolaunch) 84 | # ) 85 | # self.write_current_config_to_file() 86 | 87 | # def display_current_config(self) -> None: 88 | # """Print the current config settings to the terminal.""" 89 | # self._clear_terminal() 90 | # for section in self.config_parser.sections(): 91 | # print(f"{section}:") 92 | # for option in self.config_parser.options(section): 93 | # print(f" {option}: {self.config_parser.get(section, option)}") 94 | # self._enter_to_continue() 95 | 96 | # def write_current_config_to_file(self) -> None: 97 | # """Write the current config settings to the config file.""" 98 | # with open(CONFIGFILE, "w+") as configfile: 99 | # self.config_parser.write(configfile) 100 | 101 | # def set_tui_autolaunch(self) -> None: 102 | # self._clear_terminal() 103 | # tui_autolaunch = YesNo( 104 | # "Autolaunch TUI when test session is complete: " 105 | # ).launch() 106 | # if not self.config_parser.has_section("TUI"): 107 | # self.config_parser.add_section("TUI") 108 | # self.config_parser.set("TUI", "tui_autolaunch", str(tui_autolaunch)) 109 | # self.write_current_config_to_file() 110 | # self._enter_to_continue() 111 | 112 | # def set_html_autolaunch(self) -> None: 113 | # self._clear_terminal() 114 | # html_autolaunch = YesNo("Auto-launch HTML when generated: ").launch() 115 | # if not self.config_parser.has_section("HTML"): 116 | # self.config_parser.add_section("HTML") 117 | # self.config_parser.set("HTML", "html_autolaunch", str(html_autolaunch)) 118 | # self.write_current_config_to_file() 119 | # self._enter_to_continue() 120 | 121 | # def quit(self) -> None: 122 | # self._clear_terminal() 123 | # print("Exiting...") 124 | # sys.exit() 125 | 126 | # def run(self) -> None: 127 | # self._clear_terminal() 128 | # self.cli = Bullet( 129 | # # prompt = self._prompt(), 130 | # choices=list(self.menu_items().keys()), 131 | # bullet="==> ", 132 | # word_color=colors.bright(colors.foreground["white"]), 133 | # word_on_switch=colors.bright(colors.foreground["black"]), 134 | # background_color=colors.bright(colors.background["black"]), 135 | # background_on_switch=colors.bright(colors.background["white"]), 136 | # ) 137 | # self.menu_item = self.cli.launch() 138 | # while True: 139 | # self._clear_terminal() 140 | # self.menu_items()[self.menu_item]() 141 | # self.menu_item = self.cli.launch() 142 | 143 | 144 | # def tui_run(): 145 | # tui() 146 | 147 | 148 | # def tui_launch(): 149 | # tuicli = Cli() 150 | # tuicli.read_config_file() 151 | # if tuicli.config_parser["TUI"].get("tui_autolaunch") == "True": 152 | # tui() 153 | 154 | 155 | # def tui_config(): 156 | # tuicli = Cli() 157 | # tuicli.read_config_file() 158 | # tuicli.run() 159 | 160 | 161 | # if __name__ == "__main__": 162 | # tui_config() 163 | -------------------------------------------------------------------------------- /pytest_tui/stuff/devnotes.md: -------------------------------------------------------------------------------- 1 | ### Foldable (collapsible) HTML using logging class overrides to mark up certain lines ### 2 | 3 | Summary: 4 | - See pytest_tui/tui_logger.py for class definitions 5 | - Used in some tests in /demo_tests (e.g. test_tui_logger.py, test_me.py) 6 | - html_gen.py has been augmented to process the inline markup and display it 7 | - To run, simply `pytest --tui demo_tests/test_tui_logger.py`, then `tuih` to view the HTML output (currently written to separate outfile NEW.html) 8 | - Issues: 9 | - there are issues (e.g. losing
 markup within the foldable sections)
10 |  - would like to use invisible chars so noone can see the inline markup when monitoring standard console or output files
11 |  - have to figure out how to deal with pytests's own output handling (stdlog/stdout/stderr)
12 | 
13 | Test Ideas:
14 | - in iPython, instantiate basic logger and check logging messsages of various levels with varoius log levels configured on logger
15 | - try with pytest and see how things change:
16 |   - deafault behavior
17 |   - -rA
18 |   - live logs
19 | - do same as above with subclassed tui_logger instances
20 | - try with spread code; nested-call code
21 | 
22 | 
23 | NOTES 2022-03-19
24 | ================
25 | - File pytest_tui/tui_logger.py contains the TuiLogger class, which is a subclass of logging.Logger
26 | - Also included is the TuiLoggers class, aggregating the various TuiLogger classes, along with class methods used to initialize the custom classes in a test:
27 |   -
28 | 


--------------------------------------------------------------------------------
/pytest_tui/stuff/nonprintable_​​characters.md:
--------------------------------------------------------------------------------
 1 | ​​​​
 2 |  ​​
 3 | ​​​
 4 | 
 5 | 
 6 | 
 7 | # 3 consecutive ZWS
 8 | # TUI_FOLD_TITLE_BEGIN = r"""​​​"""
 9 | # # 1 BOM followed by 1 ZWS
10 | # TUI_FOLD_TITLE_END = r"""​"""
11 | # # 3 consecutive ZWJ
12 | # TUI_FOLD_CONTENT_BEGIN = r"""‍‍‍"""
13 | # # 1 BOM followed by 1 ZWJ
14 | # TUI_FOLD_CONTENT_END = r"""‍"""
15 | 
16 | 
17 | 
18 | # Zero Width Space (ZWS): U+200B
19 | "​"
20 | # Zero Width Joiner (ZWJ): U+200D
21 | "‍"
22 | # Byte Order Mark (BOM): U+FEFF
23 | ""
24 | # Object Replacement Character: U+FFFC
25 | ""
26 | 
27 | # 3 consecutive ZWS
28 | TUI_FOLD_TITLE_BEGIN = r"""​​​"""
29 | 
30 | # 1 BOM followed by 1 ZWS
31 | TUI_FOLD_TITLE_END = r"""​"""
32 | 
33 | # 3 consecutive ZWJ
34 | TUI_FOLD_CONTENT_BEGIN = r"""‍‍‍"""
35 | 
36 | # 1 BOM followed by 1 ZWJ
37 | TUI_FOLD_CONTENT_END = r"""‍"""
38 | 


--------------------------------------------------------------------------------
/pytest_tui/stuff/nonprintable_​​characters.txt:
--------------------------------------------------------------------------------
 1 | ​​​​
 2 |  ​​
 3 | ​​​
 4 | 
 5 | 
 6 | 
 7 | # Zero Width Space (ZWS): U+200B
 8 | "​"
 9 | # Zero Width Joiner (ZWJ): U+200D
10 | "‍"
11 | # Byte Order Mark (BOM): U+FEFF
12 | ""
13 | # Object Replacement Character: U+FFFC
14 | ""
15 | 
16 | # 3 consecutive ZWS
17 | ZWS_X3 = r"""​​​"""
18 | 
19 | # 1 BOM followed by 1 ZWS
20 | BOM_ZWS = r"""​"""
21 | 
22 | # 3 consecutive ZWJ
23 | ZWJ_X3 = r"""‍‍‍"""
24 | 
25 | # 1 BOM followed by 1 ZWJ
26 | BOM_ZWJ = r"""‍"""
27 | 


--------------------------------------------------------------------------------
/pytest_tui/stuff/tui_regexes_npc.txt:
--------------------------------------------------------------------------------
1 | r"""​​​"""
2 | r"""‍‍‍"""
3 | 


--------------------------------------------------------------------------------
/pytest_tui/tree_control.py:
--------------------------------------------------------------------------------
  1 | from __future__ import annotations
  2 | 
  3 | from typing import Generic, Iterator, NewType, TypeVar
  4 | 
  5 | import rich.repr
  6 | from rich.console import RenderableType
  7 | from rich.padding import PaddingDimensions
  8 | from rich.text import Text, TextType
  9 | from rich.tree import Tree
 10 | from textual import events
 11 | from textual._types import MessageTarget
 12 | from textual.message import Message
 13 | from textual.messages import CursorMove
 14 | from textual.reactive import Reactive
 15 | from textual.widget import Widget
 16 | 
 17 | NodeID = NewType("NodeID", int)
 18 | 
 19 | 
 20 | NodeDataType = TypeVar("NodeDataType")
 21 | 
 22 | 
 23 | @rich.repr.auto
 24 | class TreeNode(Generic[NodeDataType]):
 25 |     def __init__(
 26 |         self,
 27 |         parent: TreeNode[NodeDataType] | None,
 28 |         node_id: NodeID,
 29 |         control: TreeControl,
 30 |         tree: Tree,
 31 |         label: TextType,
 32 |         data: NodeDataType,
 33 |     ) -> None:
 34 |         self.parent = parent
 35 |         self.id = node_id
 36 |         self._control = control
 37 |         self._tree = tree
 38 |         self.label = label
 39 |         self.data = data
 40 |         self.loaded = False
 41 |         self._expanded = False
 42 |         self._empty = False
 43 |         self._tree.expanded = False
 44 |         self.children: list[TreeNode] = []
 45 | 
 46 |     def __rich_repr__(self) -> rich.repr.Result:
 47 |         yield "id", self.id
 48 |         yield "label", self.label
 49 |         yield "data", self.data
 50 | 
 51 |     @property
 52 |     def control(self) -> TreeControl:
 53 |         return self._control
 54 | 
 55 |     @property
 56 |     def empty(self) -> bool:
 57 |         return self._empty
 58 | 
 59 |     @property
 60 |     def expanded(self) -> bool:
 61 |         return self._expanded
 62 | 
 63 |     @property
 64 |     def is_cursor(self) -> bool:
 65 |         return self.control.cursor == self.id and self.control.show_cursor
 66 | 
 67 |     @property
 68 |     def tree(self) -> Tree:
 69 |         return self._tree
 70 | 
 71 |     @property
 72 |     def next_node(self) -> TreeNode[NodeDataType] | None:
 73 |         """The next node in the tree, or None if at the end."""
 74 | 
 75 |         if self.expanded and self.children:
 76 |             return self.children[0]
 77 |         else:
 78 |             sibling = self.next_sibling
 79 |             if sibling is not None:
 80 |                 return sibling
 81 | 
 82 |             node = self
 83 |             while True:
 84 |                 if node.parent is None:
 85 |                     return None
 86 |                 sibling = node.parent.next_sibling
 87 |                 if sibling is not None:
 88 |                     return sibling
 89 |                 else:
 90 |                     node = node.parent
 91 | 
 92 |     @property
 93 |     def previous_node(self) -> TreeNode[NodeDataType] | None:
 94 |         """The previous node in the tree, or None if at the end."""
 95 | 
 96 |         sibling = self.previous_sibling
 97 |         if sibling is not None:
 98 | 
 99 |             def last_sibling(node) -> TreeNode[NodeDataType]:
100 |                 if node.expanded and node.children:
101 |                     return last_sibling(node.children[-1])
102 |                 else:
103 |                     return (
104 |                         node.children[-1] if (node.children and node.expanded) else node
105 |                     )
106 | 
107 |             return last_sibling(sibling)
108 | 
109 |         if self.parent is None:
110 |             return None
111 |         return self.parent
112 | 
113 |     @property
114 |     def next_sibling(self) -> TreeNode[NodeDataType] | None:
115 |         """The next sibling, or None if last sibling."""
116 |         if self.parent is None:
117 |             return None
118 |         iter_siblings = iter(self.parent.children)
119 |         try:
120 |             for node in iter_siblings:
121 |                 if node is self:
122 |                     return next(iter_siblings)
123 |         except StopIteration:
124 |             pass
125 |         return None
126 | 
127 |     @property
128 |     def previous_sibling(self) -> TreeNode[NodeDataType] | None:
129 |         """Previous sibling or None if first sibling."""
130 |         if self.parent is None:
131 |             return None
132 |         iter_siblings = iter(self.parent.children)
133 |         sibling: TreeNode[NodeDataType] | None = None
134 | 
135 |         for node in iter_siblings:
136 |             if node is self:
137 |                 return sibling
138 |             sibling = node
139 |         return None
140 | 
141 |     async def expand(self, expanded: bool = True) -> None:
142 |         self._expanded = expanded
143 |         self._tree.expanded = expanded
144 |         self._control.refresh(layout=True)
145 | 
146 |     async def toggle(self) -> None:
147 |         await self.expand(not self._expanded)
148 | 
149 |     async def add(self, label: TextType, data: NodeDataType) -> None:
150 |         await self._control.add(self.id, label, data=data)
151 |         self._control.refresh(layout=True)
152 |         self._empty = False
153 | 
154 |     def __rich__(self) -> RenderableType:
155 |         return self._control.render_node(self)
156 | 
157 | 
158 | @rich.repr.auto
159 | class TreeClick(Generic[NodeDataType], Message, bubble=True):
160 |     def __init__(self, sender: MessageTarget, node: TreeNode[NodeDataType]) -> None:
161 |         self.node = node
162 |         super().__init__(sender)
163 | 
164 |     def __rich_repr__(self) -> rich.repr.Result:
165 |         yield "node", self.node
166 | 
167 | 
168 | class TreeControl(Generic[NodeDataType], Widget):
169 |     def __init__(
170 |         self,
171 |         label: TextType,
172 |         data: NodeDataType,
173 |         *,
174 |         name: str | None = None,
175 |         padding: PaddingDimensions = (1, 1),
176 |     ) -> None:
177 |         self.data = data
178 | 
179 |         self.underlined = False
180 | 
181 |         self.id = NodeID(0)
182 |         self.nodes: dict[NodeID, TreeNode[NodeDataType]] = {}
183 |         self._tree = Tree(label)
184 |         self.root: TreeNode[NodeDataType] = TreeNode(
185 |             None, self.id, self, self._tree, label, data
186 |         )
187 | 
188 |         self._tree.label = self.root
189 |         self.nodes[NodeID(self.id)] = self.root
190 |         super().__init__(name=name)
191 |         self.padding = padding
192 | 
193 |     hover_node: Reactive[NodeID | None] = Reactive(None)
194 |     cursor: Reactive[NodeID] = Reactive(NodeID(0), layout=True)
195 |     cursor_line: Reactive[int] = Reactive(0, repaint=False)
196 |     show_cursor: Reactive[bool] = Reactive(False, layout=True)
197 | 
198 |     def watch_show_cursor(self, value: bool) -> None:
199 |         self.emit_no_wait(CursorMove(self, self.cursor_line))
200 | 
201 |     def watch_cursor_line(self, value: int) -> None:
202 |         if self.show_cursor:
203 |             self.emit_no_wait(CursorMove(self, value + self.gutter.top))
204 | 
205 |     async def add(
206 |         self,
207 |         node_id: NodeID,
208 |         label: TextType,
209 |         data: NodeDataType,
210 |     ) -> None:
211 |         parent = self.nodes[node_id]
212 |         self.id = NodeID(self.id + 1)
213 |         child_tree = parent._tree.add(label)
214 |         child_node: TreeNode[NodeDataType] = TreeNode(
215 |             parent, self.id, self, child_tree, label, data
216 |         )
217 |         parent.children.append(child_node)
218 |         child_tree.label = child_node
219 |         self.nodes[self.id] = child_node
220 | 
221 |         self.refresh(layout=True)
222 | 
223 |     def find_cursor(self) -> int | None:
224 |         """Find the line location for the cursor node."""
225 | 
226 |         node_id = self.cursor
227 |         line = 0
228 | 
229 |         stack: list[Iterator[TreeNode[NodeDataType]]]
230 |         stack = [iter([self.root])]
231 | 
232 |         pop = stack.pop
233 |         push = stack.append
234 |         while stack:
235 |             iter_children = pop()
236 |             try:
237 |                 node = next(iter_children)
238 |             except StopIteration:
239 |                 continue
240 |             else:
241 |                 if node.id == node_id:
242 |                     return line
243 |                 line += 1
244 |                 push(iter_children)
245 |                 if node.children and node.expanded:
246 |                     push(iter(node.children))
247 |         return None
248 | 
249 |     def render(self) -> RenderableType:
250 |         return self._tree
251 | 
252 |     def render_node(self, node: TreeNode[NodeDataType]) -> RenderableType:
253 |         label = (
254 |             Text(node.label, no_wrap=True, overflow="ellipsis")
255 |             if isinstance(node.label, str)
256 |             else node.label
257 |         )
258 |         if node.id == self.hover_node:
259 |             label.stylize("underline")
260 |         else:
261 |             label.stylize("not underline")
262 |         label.apply_meta({"@click": f"click_label({node.id})", "tree_node": node.id})
263 |         return label
264 | 
265 |     async def action_click_label(self, node_id: NodeID) -> None:
266 |         node = self.nodes[node_id]
267 |         self.cursor = node.id
268 |         self.cursor_line = self.find_cursor() or 0
269 |         self.show_cursor = False
270 |         await self.post_message(TreeClick(self, node))
271 | 
272 |     async def on_mouse_move(self, event: events.MouseMove) -> None:
273 |         self.hover_node = event.style.meta.get("tree_node")
274 | 
275 |     async def on_key(self, event: events.Key) -> None:
276 |         await self.dispatch_key(event)
277 | 
278 |     async def key_down(self, event: events.Key) -> None:
279 |         event.stop()
280 |         await self.cursor_down()
281 | 
282 |     async def key_up(self, event: events.Key) -> None:
283 |         event.stop()
284 |         await self.cursor_up()
285 | 
286 |     async def key_enter(self, event: events.Key) -> None:
287 |         cursor_node = self.nodes[self.cursor]
288 |         event.stop()
289 |         await self.post_message(TreeClick(self, cursor_node))
290 | 
291 |     async def cursor_down(self) -> None:
292 |         if not self.show_cursor:
293 |             self.show_cursor = True
294 |             return
295 |         cursor_node = self.nodes[self.cursor]
296 |         next_node = cursor_node.next_node
297 |         if next_node is not None:
298 |             self.cursor_line += 1
299 |             self.cursor = next_node.id
300 | 
301 |     async def cursor_up(self) -> None:
302 |         if not self.show_cursor:
303 |             self.show_cursor = True
304 |             return
305 |         cursor_node = self.nodes[self.cursor]
306 |         previous_node = cursor_node.previous_node
307 |         if previous_node is not None:
308 |             self.cursor_line -= 1
309 |             self.cursor = previous_node.id
310 | 
311 | 
312 | if __name__ == "__main__":
313 |     from textual.app import App
314 | 
315 |     class TreeApp(App):
316 |         async def on_mount(self, event: events.Mount) -> None:
317 |             await self.view.dock(TreeControl("Tree Root", data="foo"))
318 | 
319 |         async def handle_tree_click(self, message: TreeClick) -> None:
320 |             if message.node.empty:
321 |                 await message.node.add("foo")
322 |                 await message.node.add("bar")
323 |                 await message.node.add("baz")
324 |                 await message.node.expand()
325 |             else:
326 |                 await message.node.toggle()
327 | 
328 |     TreeApp.run(log="textual.log")
329 | 


--------------------------------------------------------------------------------
/pytest_tui/tui_gen.py:
--------------------------------------------------------------------------------
  1 | from sys import exit
  2 | from typing import Dict
  3 | 
  4 | from rich.console import RenderableType
  5 | from rich.panel import Panel
  6 | from rich.text import Text
  7 | from textual import events
  8 | from textual.app import App
  9 | from textual.widget import Widget
 10 | from textual.widgets import ScrollView, TreeClick
 11 | 
 12 | from pytest_tui.tree_control import TreeControl
 13 | from pytest_tui.utils import Results
 14 | 
 15 | TREE_WIDTH = 120
 16 | SECTIONS = {
 17 |     "PASSES": "bold green underline",
 18 |     "FAILURES": "bold red underline",
 19 |     "ERRORS": "bold magenta underline",
 20 |     "WARNINGS_SUMMARY": "bold yellow underline",
 21 | }
 22 | CATEGORIES = {
 23 |     "PASSES": "bold green underline",
 24 |     "FAILURES": "bold red underline",
 25 |     "ERRORS": "bold magenta underline",
 26 |     "SKIPPED": "bold cyan underline",
 27 |     "XFAILS": "bold indian_red underline",
 28 |     "XPASSES": "bold chartreuse1 underline",
 29 | }
 30 | 
 31 | 
 32 | class Tab(Widget):
 33 |     def __init__(
 34 |         self,
 35 |         label: str,
 36 |         style: str,
 37 |         content_type: str,  # either 'section' or 'tree'
 38 |     ) -> None:
 39 |         super().__init__()
 40 |         self.label = label
 41 |         self.content_type = content_type
 42 |         self.rich_text = Text(label, style=style)
 43 | 
 44 | 
 45 | class Tabs(Widget):
 46 |     def __init__(
 47 |         self,
 48 |         tabs: Dict[str, Tab],
 49 |     ) -> None:
 50 |         super().__init__()
 51 |         self.tabs = tabs
 52 | 
 53 |     async def action_clicked_tab(self, label: str) -> None:
 54 |         # Handle tabs being clicked
 55 |         if label == "Quit":
 56 |             quit()
 57 | 
 58 |         body = self.parent.parent.body
 59 |         results = self.parent.parent.results
 60 |         section_content = {
 61 |             "Summary": results.tui_sections.lastline.content
 62 |             + results.tui_sections.test_session_starts.content
 63 |             + results.tui_sections.short_test_summary.content,
 64 |             "Warnings": results.tui_sections.warnings_summary.content,
 65 |             "Errors": results.tui_sections.errors.content,
 66 |             "Full Output": results.terminal_output,
 67 |         }
 68 |         tree_names = {
 69 |             "Passes": "passes_tree",
 70 |             "Failures": "failures_tree",
 71 |             "Skipped": "skipped_tree",
 72 |             "Xfails": "xfails_tree",
 73 |             "Xpasses": "xpasses_tree",
 74 |         }
 75 | 
 76 |         # Render the clicked tab with bold underline
 77 |         for tab_name in self.tabs:
 78 |             if tab_name == label:
 79 |                 self.tabs[tab_name].rich_text.stylize("bold underline")
 80 |             else:
 81 |                 self.tabs[tab_name].rich_text.stylize("not bold not underline")
 82 |             self.refresh()
 83 | 
 84 |         # Render section info
 85 |         if self.tabs[label].content_type == "section":
 86 |             self.parent.parent.body.visible = True
 87 |             await body.update(Text.from_ansi(section_content[label]))
 88 |         # Render tree info
 89 |         elif self.tabs[label].content_type == "tree":
 90 |             self.parent.parent.view.refresh()
 91 |             self.tree_name = tree_names[label]
 92 |             await body.update(eval(f"self.parent.parent.{self.tree_name}"))
 93 | 
 94 |     def render(self) -> RenderableType:
 95 |         # Build up renderable Text instance from a series of Tabs;
 96 |         # this simulates a tabbed widget as a workaround until Textual's
 97 |         # Tabs object has been released
 98 |         text = Text()
 99 |         text.append("│ ")
100 |         for tab_name in self.tabs:
101 |             text.append(self.tabs[tab_name].rich_text)
102 |             text.append(" │ ")
103 |             self.tabs[tab_name].rich_text.on(click=f"clicked_tab('{tab_name}')")
104 |         return Panel(text, height=3)
105 | 
106 | 
107 | class TuiApp(App):
108 |     async def on_load(self, event: events.Load) -> None:
109 |         # Get test result sections
110 |         self.results = Results()
111 |         if not self.results.tui_sections and self.results.tui_test_results:
112 |             exit()
113 |         self.summary_results = self.results.tui_sections.lastline.content.replace(
114 |             "=", ""
115 |         )
116 |         await self.bind("q", "quit", "Quit")
117 | 
118 |     async def on_mount(self) -> None:
119 |         tabs = {
120 |             "Summary": Tab("Summary", "cyan bold underline", content_type="section"),
121 |             "Passes": Tab("Passes", "green", content_type="tree"),
122 |             "Failures": Tab("Failures", "red", content_type="tree"),
123 |             "Skipped": Tab("Skipped", "yellow", content_type="tree"),
124 |             "Xfails": Tab("Xfails", "yellow", content_type="tree"),
125 |             "Xpasses": Tab("Xpasses", "yellow", content_type="tree"),
126 |             "Warnings": Tab("Warnings", "yellow", content_type="section"),
127 |             "Errors": Tab("Errors", "magenta", content_type="section"),
128 |             "Full Output": Tab("Full Output", "cyan", content_type="section"),
129 |             "Quit": Tab("Quit (Q)", "white", content_type="quit"),
130 |         }
131 |         self.tabs = Tabs(tabs)
132 |         await self.view.dock(self.tabs, edge="top", size=3)
133 | 
134 |         # Body (to display result sections or result trees)
135 |         self.body = ScrollView(
136 |             Text.from_ansi(
137 |                 self.results.tui_sections.lastline.content
138 |                 + self.results.tui_sections.test_session_starts.content
139 |                 + self.results.tui_sections.short_test_summary.content
140 |             ),
141 |             auto_width=True,
142 |         )
143 |         await self.view.dock(self.body)
144 | 
145 |         # Define the results trees
146 |         self.failures_tree = TreeControl(
147 |             Text("Failures:", style="bold red underline"),
148 |             {},
149 |             name="failures_tree",
150 |         )
151 |         self.passes_tree = TreeControl(
152 |             Text("Passes:", style="bold green underline"), {}, name="passes_tree"
153 |         )
154 |         self.errors_tree = TreeControl(
155 |             Text("Errors:", style="bold magenta underline"), {}, name="errors_tree"
156 |         )
157 |         self.skipped_tree = TreeControl(
158 |             Text("Skipped:", style="bold yellow underline"), {}, name="skipped_tree"
159 |         )
160 |         self.xpasses_tree = TreeControl(
161 |             Text("Xpasses:", style="bold yellow underline"), {}, name="xpasses_tree"
162 |         )
163 |         self.xfails_tree = TreeControl(
164 |             Text("Xfails:", style="bold yellow underline"), {}, name="xfails_tree"
165 |         )
166 |         for result in self.results.tui_test_results.all_failures():
167 |             await self.failures_tree.add(
168 |                 self.failures_tree.root.id,
169 |                 Text(result.fqtn),
170 |                 {
171 |                     "results": (
172 |                         f"{result.longreprtext or result.capstdout + result.capstderr + result.caplog}"
173 |                     )
174 |                 },
175 |             )
176 |         for result in self.results.tui_test_results.all_passes():
177 |             await self.passes_tree.add(
178 |                 self.passes_tree.root.id,
179 |                 Text(result.fqtn),
180 |                 {
181 |                     "results": (
182 |                         f"{result.longreprtext or result.capstdout + result.capstderr + result.caplog}"
183 |                     )
184 |                 },
185 |             )
186 |         for result in self.results.tui_test_results.all_errors():
187 |             await self.errors_tree.add(
188 |                 self.errors_tree.root.id,
189 |                 Text(result.fqtn),
190 |                 {
191 |                     "results": (
192 |                         f"{result.longreprtext or result.capstdout + result.capstderr + result.caplog}"
193 |                     )
194 |                 },
195 |             )
196 |         for result in self.results.tui_test_results.all_skipped():
197 |             await self.skipped_tree.add(
198 |                 self.skipped_tree.root.id,
199 |                 Text(result.fqtn),
200 |                 {
201 |                     "results": (
202 |                         f"{result.longreprtext or result.capstdout + result.capstderr + result.caplog}"
203 |                     )
204 |                 },
205 |             )
206 |         for result in self.results.tui_test_results.all_xpasses():
207 |             await self.xpasses_tree.add(
208 |                 self.xpasses_tree.root.id,
209 |                 Text(result.fqtn),
210 |                 {
211 |                     "results": (
212 |                         f"{result.longreprtext or result.capstdout + result.capstderr + result.caplog}"
213 |                     )
214 |                 },
215 |             )
216 |         for result in self.results.tui_test_results.all_xfails():
217 |             await self.xfails_tree.add(
218 |                 self.xfails_tree.root.id,
219 |                 Text(result.fqtn),
220 |                 {
221 |                     "results": (
222 |                         f"{result.longreprtext or result.capstdout + result.capstderr + result.caplog}"
223 |                     )
224 |                 },
225 |             )
226 | 
227 |         await self.failures_tree.root.expand()
228 |         await self.passes_tree.root.expand()
229 |         await self.errors_tree.root.expand()
230 |         await self.skipped_tree.root.expand()
231 |         await self.xpasses_tree.root.expand()
232 |         await self.xfails_tree.root.expand()
233 | 
234 |     async def handle_tree_click(self, message: TreeClick[dict]) -> None:
235 |         # Display results in body when category header is clicked;
236 |         # but don't try processing the category titles
237 |         label = message.node.label
238 |         if label.plain.upper().rstrip(":") in CATEGORIES:
239 |             return
240 |         category = message.sender.name.rstrip("_tree")
241 |         all_tests_in_category = eval(
242 |             f"self.results.tui_test_results.all_{category.lower()}()"
243 |         )
244 |         for test in all_tests_in_category:
245 |             if test.fqtn == label.plain:
246 |                 self.text = Text.from_ansi(
247 |                     f"{test.longreprtext or test.capstdout + test.capstderr + test.caplog}"
248 |                 )
249 |                 break
250 | 
251 |         await self.body.update(self.text.markup)
252 | 
253 | 
254 | def main():
255 |     app = TuiApp()
256 |     app.run()
257 | 
258 | 
259 | if __name__ == "__main__":
260 |     main()
261 | 


--------------------------------------------------------------------------------
/pytest_tui/utils.py:
--------------------------------------------------------------------------------
  1 | import ast
  2 | import pickle
  3 | import re
  4 | from dataclasses import dataclass, field
  5 | from datetime import datetime
  6 | from pathlib import Path
  7 | from typing import Dict, List
  8 | 
  9 | # Files generated by plugin.py
 10 | PYTEST_TUI_FILES_DIR = Path.cwd().resolve() / "tui_files"
 11 | TUI_RESULTS_FILE = PYTEST_TUI_FILES_DIR / "tui_results.pickle"
 12 | TUI_RESULT_OBJECTS_FILE = PYTEST_TUI_FILES_DIR / "tui_result_objects.pickle"
 13 | TUI_SECTIONS_FILE = PYTEST_TUI_FILES_DIR / "tui_sections.pickle"
 14 | TERMINAL_OUTPUT_FILE = PYTEST_TUI_FILES_DIR / "terminal_output.ansi"
 15 | DEFAULT_HTML_FILE = PYTEST_TUI_FILES_DIR / "html_report.html"
 16 | 
 17 | # regex matching patterns for Pytest sections
 18 | # live_log_sessionstart_matcher = re.compile(r"^==.*\s live log sessionstart\s==+")
 19 | test_session_starts_matcher = re.compile(r"^==.*\stest session starts\s==+")
 20 | test_session_starts_results_grabber = re.compile(r"(collected\s\d+\sitems[\s\S]+)")
 21 | test_session_starts_test_matcher = r"^(.*::.*)"
 22 | errors_section_matcher = re.compile(r"^==.*\sERRORS\s==+")
 23 | failures_section_matcher = re.compile(r"^==.*\sFAILURES\s==+")
 24 | warnings_summary_matcher = re.compile(r"^==.*\swarnings summary\s.*==+")
 25 | passes_section_matcher = re.compile(r"^==.*\sPASSES\s==+")
 26 | rerun_test_summary_matcher = re.compile(r"^==.*\srerun test summary info\s.*==+")
 27 | short_test_summary_matcher = re.compile(r"^==.*\sshort test summary info\s.*==+")
 28 | short_test_summary_test_matcher = re.compile(
 29 |     r"^(PASSED|FAILED|ERROR|SKIPPED|XFAIL|XPASS|RERUN)\s+(?:\[\d+\]\s)?(\S+)(?:.*)?$"
 30 | )
 31 | warnings_summary_test_matcher = re.compile(r"^([^\n]+:{1,2}[^\n]+)\n([^\n]+\n)+")
 32 | lastline_matcher = re.compile(r"^==.*in\s\d+.\d+s.*=+")
 33 | section_name_matcher = re.compile(r"~~>PYTEST_TUI_(\w+)")
 34 | standard_test_matcher = re.compile(
 35 |     r"(.*\::\S+)\s(PASSED|FAILED|ERROR|SKIPPED|XFAIL|XPASS|RERUN)"
 36 | )
 37 | 
 38 | 
 39 | OUTCOMES = (
 40 |     "Failures",
 41 |     "Passes",
 42 |     "Errors",
 43 |     "Skipped",
 44 |     "Xfails",
 45 |     "Xpasses",
 46 |     "Reruns",
 47 | )
 48 | 
 49 | 
 50 | @dataclass
 51 | class TuiTestResult:
 52 |     fqtn: str = ""
 53 |     outcome: str = ""
 54 |     start_time: datetime = None
 55 |     duration: float = 0.0
 56 |     caplog: str = ""
 57 |     capstderr: str = ""
 58 |     capstdout: str = ""
 59 |     longreprtext: str = ""
 60 | 
 61 |     @staticmethod
 62 |     def categories():
 63 |         return [
 64 |             "fqtn",
 65 |             "outcome",
 66 |             "start_time",
 67 |             "duration",
 68 |             "caplog",
 69 |             "capstderr",
 70 |             "capstdout",
 71 |             "longreprtext",
 72 |         ]
 73 | 
 74 |     def to_list(self):
 75 |         return [
 76 |             self.fqtn,
 77 |             self.outcome,
 78 |             self.start_time,
 79 |             self.duration,
 80 |             self.caplog,
 81 |             self.capstderr,
 82 |             self.capstdout,
 83 |             self.longreprtext,
 84 |         ]
 85 | 
 86 |     def to_dict(self):
 87 |         return {
 88 |             "fqtn": self.fqtn,
 89 |             "outcome": self.outcome,
 90 |             "start_time": self.start_time,
 91 |             "duration": self.duration,
 92 |             "caplog": self.caplog,
 93 |             "capstderr": self.capstderr,
 94 |             "capstdout": self.capstdout,
 95 |             "longreprtext": self.longreprtext,
 96 |         }
 97 | 
 98 | 
 99 | @dataclass
100 | class TuiTestResults:
101 |     test_results: List[TuiTestResult] = field(default_factory=list)
102 | 
103 |     @staticmethod
104 |     def categories() -> List[str]:
105 |         return TuiTestResult.categories()
106 | 
107 |     def to_list(self) -> List[TuiTestResult]:
108 |         return list(self.test_results)
109 | 
110 |     def to_dict(self) -> Dict[str, TuiTestResult]:
111 |         return {test_result.fqtn: test_result for test_result in self.test_results}
112 | 
113 |     def to_dict_dict(self) -> Dict[str, Dict[str, TuiTestResult]]:
114 |         return {
115 |             test_result.fqtn: test_result.to_dict() for test_result in self.test_results
116 |         }
117 | 
118 |     def all_by_time(self) -> List[TuiTestResult]:
119 |         return sorted(self.test_results, key=lambda x: x.start_time)
120 | 
121 |     def all_by_fqtn(self) -> List[TuiTestResult]:
122 |         return sorted(self.test_results, key=lambda x: x.fqtn)
123 | 
124 |     def all_by_outcome(self) -> List[TuiTestResult]:
125 |         return sorted(self.test_results, key=lambda x: x.outcome)
126 | 
127 |     def all_by_outcome_then_time(self) -> List[TuiTestResult]:
128 |         return sorted(self.test_results, key=lambda x: (x.outcome, x.start_time))
129 | 
130 |     def all_tests(self) -> List[TuiTestResult]:
131 |         return list(self.test_results)
132 | 
133 |     def all_failures(self) -> List[TuiTestResult]:
134 |         return [
135 |             test_result
136 |             for test_result in self.test_results
137 |             if test_result.outcome == "FAILED"
138 |         ]
139 | 
140 |     def all_passes(self) -> List[TuiTestResult]:
141 |         return [
142 |             test_result
143 |             for test_result in self.test_results
144 |             if test_result.outcome == "PASSED"
145 |         ]
146 | 
147 |     def all_skipped(self) -> List[TuiTestResult]:
148 |         return [
149 |             test_result
150 |             for test_result in self.test_results
151 |             if test_result.outcome == "SKIPPED"
152 |         ]
153 | 
154 |     def all_xfails(self) -> List[TuiTestResult]:
155 |         return [
156 |             test_result
157 |             for test_result in self.test_results
158 |             if test_result.outcome == "XFAIL"
159 |         ]
160 | 
161 |     def all_xpasses(self) -> List[TuiTestResult]:
162 |         return [
163 |             test_result
164 |             for test_result in self.test_results
165 |             if test_result.outcome == "XPASS"
166 |         ]
167 | 
168 |     def all_errors(self) -> List[TuiTestResult]:
169 |         return [
170 |             test_result
171 |             for test_result in self.test_results
172 |             if test_result.outcome == "ERROR"
173 |         ]
174 | 
175 |     def all_reruns(self) -> List[TuiTestResult]:
176 |         return [
177 |             test_result
178 |             for test_result in self.test_results
179 |             if test_result.outcome == "RERUN"
180 |         ]
181 | 
182 | 
183 | @dataclass
184 | class TuiSection:
185 |     name: str = ""
186 |     content: str = ""
187 | 
188 | 
189 | @dataclass
190 | class TuiSections:
191 |     live_log_sessionstart: TuiSection
192 |     test_session_starts: TuiSection
193 |     errors: TuiSection
194 |     failures: TuiSection
195 |     passes: TuiSection
196 |     warnings_summary: TuiSection
197 |     rerun_test_summary: TuiSection
198 |     short_test_summary: TuiSection
199 |     lastline: TuiSection
200 | 
201 | 
202 | @dataclass
203 | class TuiRerunTestGroup:
204 |     # A 'rerun test group' consists of a single test that has been run multiple times with the
205 |     # 'pytest-rerunfailures' plugin.
206 |     # 'fqtn': fully-qualified test name (same for all tests in a TuiRerunTestGroup);
207 |     # 'final_outcome': final outcome of the test;
208 |     # 'final_test' TuiTestResult object for the final test (with outcome != RERUN);
209 |     # 'forerunners':list of TuiTestResult objects for all test that preceded final outcome.
210 |     fqtn: str = ""
211 |     final_outcome: str = ""
212 |     final_test: TuiTestResult = None
213 |     forerunners: List[TuiTestResult] = field(default_factory=list)
214 |     full_test_list: List[TuiTestResult] = field(default_factory=list)
215 | 
216 | 
217 | class Results:
218 |     """
219 |     This class holds all pertinent information for a given Pytest test run.
220 |     """
221 | 
222 |     def __init__(self):
223 |         """Top-level class attributes: TuiTestResults, TuiSections, and full console output w/ ANSI
224 |         """
225 |         self.tui_test_info = self._unpickle_tui_test_info()
226 |         self.tui_session_start_time = self.tui_test_info["session_start_time"]
227 |         self.tui_session_end_time = self.tui_test_info["session_end_time"]
228 |         self.tui_session_duration = self.tui_test_info["session_duration"]
229 |         self.tui_test_results = self.tui_test_info["tui_test_results"]
230 |         self.tui_rerun_test_groups = self.tui_test_info["tui_rerun_test_groups"]
231 |         self.tui_sections = self.tui_test_info["tui_sections"]
232 |         self.tui_htmlfile = self.tui_test_info["tui_htmlfile"]
233 |         self.tui_regexfile = self.tui_test_info["tui_regexfile"]
234 |         self.terminal_output = self._get_terminal_output()
235 | 
236 |     def _unpickle_tui_test_info(self):
237 |         """Unpack pickled results file"""
238 |         try:
239 |             with open(TUI_RESULTS_FILE, "rb") as rfile:
240 |                 return pickle.load(rfile)
241 |         except FileNotFoundError as e:
242 |             raise FileNotFoundError(
243 |                 f"Cannot find {TUI_RESULTS_FILE}. Have you run pytest with the '--tui'"
244 |                 " option yet?"
245 |             ) from e
246 | 
247 |     def _get_terminal_output(self, file_path: Path = TERMINAL_OUTPUT_FILE) -> list:
248 |         """Get full Pytest terminal output"""
249 |         try:
250 |             with open(file_path, "r") as file:
251 |                 return file.read()
252 |         except FileNotFoundError as e:
253 |             raise FileNotFoundError(
254 |                 f"Cannot find {file_path}. Have you run pytest with the '--tui' option"
255 |                 " yet?"
256 |             ) from e
257 | 
258 | 
259 | def create_tui_files_directory():
260 |     tui_files_dir = Path.cwd().resolve() / "tui_files"
261 |     tui_files_dir.mkdir(exist_ok=True)
262 |     return tui_files_dir
263 | 
264 | 
265 | # def get_regex(tui_regexfile: Path) -> List[str]:
266 | #     """Read regex file and return list of regexes"""
267 | #     try:
268 | #         with open(tui_regexfile, "r") as file:
269 | #             # lines = [ast.literal_eval(line) for line in file.readlines() if line]
270 | #             lines = [eval(line) for line in file.readlines() if line]
271 | #             return [line.rstrip() for line in lines if line]
272 | #     except FileNotFoundError as e:
273 | #         print(e)
274 | #         return []
275 | 
276 | 
277 | # TUI_REGEXES = get_regex(Path.cwd().resolve() / "tui_regexes.txt")
278 | # # ^^^ really bad idea, since it can fail even if --tui option is not specified
279 | 


--------------------------------------------------------------------------------
/reqts/requirements-dev.in:
--------------------------------------------------------------------------------
 1 | -c requirements.txt
 2 | 
 3 | black==23.3.0
 4 | flake8==6.0.0
 5 | isort==5.12.0
 6 | nox==2022.11.21
 7 | pre-commit==3.2.0
 8 | pyflakes==3.0.0
 9 | # seleniumbase==4.9.11
10 | # seleniumbase==4.13.21
11 | 


--------------------------------------------------------------------------------
/reqts/requirements-dev.txt:
--------------------------------------------------------------------------------
 1 | #
 2 | # This file is autogenerated by pip-compile with Python 3.9
 3 | # by the following command:
 4 | #
 5 | #    pip-compile --no-emit-index-url reqts/requirements-dev.in
 6 | #
 7 | argcomplete==2.1.2
 8 |     # via nox
 9 | black==23.3.0
10 |     # via -r reqts/requirements-dev.in
11 | cfgv==3.3.1
12 |     # via pre-commit
13 | click==8.1.3
14 |     # via black
15 | colorlog==6.7.0
16 |     # via nox
17 | distlib==0.3.6
18 |     # via virtualenv
19 | filelock==3.12.1
20 |     # via virtualenv
21 | flake8==6.0.0
22 |     # via -r reqts/requirements-dev.in
23 | identify==2.5.24
24 |     # via pre-commit
25 | isort==5.12.0
26 |     # via -r reqts/requirements-dev.in
27 | mccabe==0.7.0
28 |     # via flake8
29 | mypy-extensions==1.0.0
30 |     # via black
31 | nodeenv==1.8.0
32 |     # via pre-commit
33 | nox==2022.11.21
34 |     # via -r reqts/requirements-dev.in
35 | packaging==23.1
36 |     # via
37 |     #   -c reqts/requirements.txt
38 |     #   black
39 |     #   nox
40 | pathspec==0.11.1
41 |     # via black
42 | platformdirs==3.5.3
43 |     # via
44 |     #   black
45 |     #   virtualenv
46 | pre-commit==3.2.0
47 |     # via -r reqts/requirements-dev.in
48 | pycodestyle==2.10.0
49 |     # via flake8
50 | pyflakes==3.0.0
51 |     # via
52 |     #   -r reqts/requirements-dev.in
53 |     #   flake8
54 | pyyaml==6.0
55 |     # via pre-commit
56 | tomli==2.0.1
57 |     # via
58 |     #   -c reqts/requirements.txt
59 |     #   black
60 | typing-extensions==4.6.3
61 |     # via black
62 | virtualenv==20.23.0
63 |     # via
64 |     #   nox
65 |     #   pre-commit
66 | 
67 | # The following packages are considered to be unsafe in a requirements file:
68 | # setuptools
69 | 


--------------------------------------------------------------------------------
/reqts/requirements.in:
--------------------------------------------------------------------------------
1 | ansi2html==1.8.0
2 | faker==18.3.1
3 | json2table==1.1.5
4 | pytest-metadata==2.0.4
5 | single-source
6 | strip-ansi
7 | textual==0.1.18
8 | 


--------------------------------------------------------------------------------
/reqts/requirements.txt:
--------------------------------------------------------------------------------
 1 | #
 2 | # This file is autogenerated by pip-compile with Python 3.9
 3 | # by the following command:
 4 | #
 5 | #    pip-compile --no-emit-index-url reqts/requirements.in
 6 | #
 7 | ansi2html==1.8.0
 8 |     # via -r reqts/requirements.in
 9 | commonmark==0.9.1
10 |     # via rich
11 | exceptiongroup==1.1.1
12 |     # via pytest
13 | faker==18.3.1
14 |     # via -r reqts/requirements.in
15 | iniconfig==2.0.0
16 |     # via pytest
17 | json2table==1.1.5
18 |     # via -r reqts/requirements.in
19 | packaging==23.1
20 |     # via pytest
21 | pluggy==1.0.0
22 |     # via pytest
23 | pygments==2.15.1
24 |     # via rich
25 | pytest==7.3.2
26 |     # via pytest-metadata
27 | pytest-metadata==2.0.4
28 |     # via -r reqts/requirements.in
29 | python-dateutil==2.8.2
30 |     # via faker
31 | rich==12.6.0
32 |     # via textual
33 | single-source==0.3.0
34 |     # via -r reqts/requirements.in
35 | six==1.16.0
36 |     # via python-dateutil
37 | strip-ansi==0.1.1
38 |     # via -r reqts/requirements.in
39 | textual==0.1.18
40 |     # via -r reqts/requirements.in
41 | tomli==2.0.1
42 |     # via pytest
43 | 


--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal=1
3 | 


--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding: utf-8 -*-
 3 | 
 4 | import codecs
 5 | import os
 6 | 
 7 | from setuptools import find_packages, setup
 8 | 
 9 | 
10 | def read(fname):
11 |     file_path = os.path.join(os.path.dirname(__file__), fname)
12 |     return codecs.open(file_path, encoding="utf-8").read()
13 | 
14 | 
15 | setup(
16 |     name="pytest-tui",
17 |     version="2.1.0",
18 |     author="Jeff Wright",
19 |     author_email="jeff.washcloth@gmail.com",
20 |     license="MIT",
21 |     url="https://github.com/jeffwright13/pytest-tui",
22 |     description="Text User Interface (TUI) and HTML report for Pytest test runs",
23 |     long_description=read("README.md"),
24 |     long_description_content_type="text/markdown",
25 |     packages=find_packages(),
26 |     py_modules=["pytest_tui"],
27 |     python_requires=">=3.8",
28 |     install_requires=[
29 |         "ansi2html==1.8.0",
30 |         "faker==18.3.1",
31 |         "json2table==1.1.5",
32 |         "pytest-metadata==2.0.4",
33 |         "single-source==0.3.0",
34 |         "strip-ansi==0.1.1",
35 |         "textual==0.1.18",
36 |     ],
37 |     setup_requires=["setuptools_scm"],
38 |     include_package_data=True,
39 |     classifiers=[
40 |         "Framework :: Pytest",
41 |         "Development Status :: 4 - Beta",
42 |         "Intended Audience :: Developers",
43 |         "Topic :: Software Development :: Testing",
44 |         "Programming Language :: Python :: 3.8",
45 |         "Programming Language :: Python :: 3.9",
46 |         "Operating System :: OS Independent",
47 |         "License :: OSI Approved :: MIT License",
48 |     ],
49 |     keywords="pytest pytest-plugin testing tui textual html",
50 |     entry_points={
51 |         "pytest11": ["pytest_tui = pytest_tui.plugin"],
52 |         "console_scripts": [
53 |             "tui = pytest_tui.tui_gen:main",
54 |             "tuih = pytest_tui.html_gen:main",
55 |         ],
56 |     },
57 | )
58 | 


--------------------------------------------------------------------------------
/test_error.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | 
 3 | 
 4 | @pytest.fixture
 5 | def error_fixt():
 6 |     raise Exception("Error in fixture")
 7 | 
 8 | 
 9 | def test0_pass_3_error_in_fixture(error_fixt):
10 |     print("Test Pass 3!")
11 |     assert True
12 | 


--------------------------------------------------------------------------------
/testing/bash/test.sh:
--------------------------------------------------------------------------------
 1 | #!/bin/bash
 2 | 
 3 | function usage() {
 4 |   printf "\nUsage: test.sh [--version PYTHON_VERSION] [--help]\n"
 5 |   exit 1
 6 | }
 7 | 
 8 | clean_up() {
 9 |   test -d "$1" && rm -rf "$1"
10 | }
11 | 
12 | while [[ $# -gt 0 ]]; do
13 |   case $1 in
14 |     --version) shift; pyversion="$1" ;;
15 |     --help) usage ;;
16 |     --) shift; break ;;
17 |     -*) echo "Unknown flag '$1'" 1>&2; usage ;;
18 |     *) break ;;
19 |   esac
20 |   shift
21 | done
22 | 
23 | printf "%s %s\n" "$0" "$pyversion"
24 | 
25 | tmpdir=$( mktemp -d -t pytest-tui )
26 | printf "Creating temporary directory %s\n" "$tmpdir"
27 | cd "$tmpdir" || exit
28 | 
29 | # Use Python version specified on command line
30 | printf "Creating virtual Python environment with Python version %s\n" "$pyversion"
31 | pyenv local "$pyversion"
32 | 
33 | # Verify Python version being used is one being tested
34 | [[ $pyversion == $(python --version | awk '{print $2}') ]] || { echo "Python version being used is not the one being tested - are you running this script from a virtual environment? Exiting..."; exit 1; }
35 | 
36 | python -m venv venv
37 | source ./venv/bin/activate
38 | 
39 | printf "Upgrading build tools\n"
40 | pip install --upgrade pip setuptools wheel
41 | 
42 | printf "Installing pytest-tui from Test-PyPi\n"
43 | pip install -i https://test.pypi.org/simple/ pytest-tui
44 | pip install pytest-rerunfailures
45 | pip install faker
46 | 
47 | printf "Cloning pytest-tui so we can use its demo-tests\n"
48 | git clone git@github.com:jeffwright13/pytest-tui.git
49 | mkdir demo-tests
50 | cp pytest-tui/demo-tests/* ./demo-tests/
51 | clean_up pytest-tui
52 | ls -la demo-tests/
53 | rm -f conftest.py
54 | 
55 | printf "Executing pytest-tui\n"
56 | pytest --tui --tui-htmlfile=hacked.html
57 | 
58 | printf "Launching TUI and verifying content\n"
59 | expect <(cat <<'EOD'
60 |   spawn tui
61 |   # interact
62 |   expect {"Summary"}
63 |   # expect {"Passes"}
64 |   # expect {"Failures"}
65 |   # expect {"Skipped"}
66 |   # expect {"Xfails"}
67 |   # expect {"Xpasses"}
68 |   # expect {"Warnings"}
69 |   # expect {"Errors"}
70 |   # expect {"Full Output"}
71 |   # expect {"Quit (Q)"}
72 |   sleep 5
73 |   send "q"
74 |   exit
75 | EOD
76 | )
77 | # Recover from any ANSI corruption that may have occured as a result of running pytest-tui
78 | reset
79 | 
80 | printf "Launching HTML\n"
81 | tuih
82 | printf "Check for exisence of output file '.tui_files/hacked.html..."
83 | FILE=tui_files/hacked.html
84 | if [ -f "$FILE" ]; then
85 |     echo "$FILE exists."
86 | else
87 |     echo "$FILE does not exist!"
88 | fi
89 | 
90 | 
91 | clean_up "$tmpdir"
92 | printf "Script finished"
93 | 


--------------------------------------------------------------------------------
/testing/bash/tui_expect.tcl:
--------------------------------------------------------------------------------
 1 | spawn tui
 2 | expect {
 3 |   "│ Summary │ Passes │ Failures │ Skipped │ Xfails │ Xpasses │ Warnings │ Errors │ Full Output │ Quit (Q) │"
 4 |   {
 5 |     send "q"
 6 |     sleep 5
 7 |     exit
 8 |   }
 9 | }
10 | 


--------------------------------------------------------------------------------
/testing/pytester/examples/example_regex.txt:
--------------------------------------------------------------------------------
1 | r"""[DEBUG|INFO]"""
2 | 


--------------------------------------------------------------------------------
/testing/pytester/examples/test_0.py:
--------------------------------------------------------------------------------
 1 | import logging
 2 | import random
 3 | import warnings
 4 | 
 5 | import pytest
 6 | 
 7 | logger = logging.getLogger()
 8 | 
 9 | 
10 | def test0_pass_1():
11 |     print("Test Pass 1!")
12 |     assert True
13 | 
14 | 
15 | def test0_pass_2_logs():
16 |     print("Test Pass 2!")
17 |     logger.critical("CRITICAL")
18 |     logger.error("ERROR")
19 |     logger.warning("WARNING")
20 |     logger.info("INFO")
21 |     logger.debug("DEBUG")
22 |     assert True
23 | 
24 | 
25 | @pytest.fixture
26 | def error_fixt():
27 |     raise Exception("Error in fixture")
28 | 
29 | 
30 | def test0_pass_3_error_in_fixture(error_fixt):
31 |     print("Test Pass 3!")
32 |     assert True
33 | 
34 | 
35 | def test0_fail_1():
36 |     print("Test Fail 1!")
37 |     assert 1 == 2
38 | 
39 | 
40 | pytest.mark.skip(reason="Skipping this test with decorator.")
41 | 
42 | 
43 | def test0_skip():
44 |     assert True
45 | 
46 | 
47 | @pytest.mark.xfail()
48 | def test0_xfail():
49 |     print("Test 0 XFail")
50 |     logger.critical("CRITICAL")
51 |     logger.error("ERROR")
52 |     logger.warning("WARNING")
53 |     logger.info("INFO")
54 |     logger.debug("DEBUG")
55 |     assert False
56 | 
57 | 
58 | @pytest.mark.xfail()
59 | def test0_xpass():
60 |     print("Test 0 XPass")
61 |     logger.critical("CRITICAL")
62 |     logger.error("ERROR")
63 |     logger.warning("WARNING")
64 |     logger.info("INFO")
65 |     logger.debug("DEBUG")
66 |     assert True
67 | 
68 | 
69 | # Method and its test that causes warnings
70 | def api_v1():
71 |     warnings.warn(UserWarning("api v1, should use functions from v2"))
72 |     return 1
73 | 
74 | 
75 | def test0_warning():
76 |     assert api_v1() == 1
77 | 
78 | 
79 | @pytest.mark.flaky(reruns=5)
80 | def test_flaky_3():
81 |     assert random.choice([True, False])
82 | 


--------------------------------------------------------------------------------
/testing/pytester/examples/test_empty.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | 


--------------------------------------------------------------------------------
/testing/pytester/examples/test_pass.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | 
3 | 
4 | def test_pass():
5 |     assert True
6 | 


--------------------------------------------------------------------------------
/testing/pytester/ideas.md:
--------------------------------------------------------------------------------
 1 | # Plugin Test Ideas
 2 | ## Command-line options verification
 3 | - invoke `pytest --help` and verify that the plugin is listed
 4 | - invoke `pytest --tui --help` and verify that the plugin is listed
 5 | - invoke `pytest --co` and verify a collected list of tests is displayed
 6 | - invoke `pytest --co --tui` and verify a collected list of tests is displayed
 7 | - invoke `pytest --tui-html` and verify error msg (requires `--tui`)
 8 | - invoke `pytest --tui-regexfile` and verify error msg (requires `--tui`)
 9 | - invoke `pytest --tui` and verify:
10 |  - default console output is displayed
11 | 
12 | ## ini options verification
13 | - --ignore flag ignores tests in certain dirs
14 | - test with live logging `log_cli`
15 | 
16 | ## Execution of tests
17 | - invoke `pytest` and verify that the plugin is not invoked
18 | 


--------------------------------------------------------------------------------
/testing/pytester/test_plugin_options.py:
--------------------------------------------------------------------------------
 1 | # import pytest
 2 | 
 3 | # @pytest.mark.test_tui_with_pytester
 4 | # def test_plugin_options(pytester):
 5 | #     """Test the plugin options."""
 6 | #     # Run pytest without any options
 7 | #     result = pytester.runpytest()
 8 | 
 9 | #     # Assert that the plugin options are not set
10 | #     assert result.ret == 0
11 | #     assert result.parseopt("_tui") is None
12 | #     assert result.parseopt("_tui_htmlfile") is None
13 | #     assert result.parseopt("_tui_regexfile") is None
14 | 
15 | #     # Run pytest with the --tui option
16 | #     result = pytester.runpytest("--tui")
17 | 
18 | #     # Assert that the --tui option is set
19 | #     assert result.ret == 0
20 | #     assert result.parseopt("_tui").value == True
21 | #     assert result.parseopt("_tui_htmlfile") is None
22 | #     assert result.parseopt("_tui_regexfile") is None
23 | 
24 | #     # Run pytest with the --tui-html option and a custom file path
25 | #     html_file = "custom_html_report.html"
26 | #     result = pytester.runpytest(f"--tui-html={html_file}")
27 | 
28 | #     # Assert that the --tui-html option is set with the correct file path
29 | #     assert result.ret == 0
30 | #     assert result.parseopt("_tui").value == True
31 | #     assert result.parseopt("_tui_htmlfile").value == html_file
32 | #     assert result.parseopt("_tui_regexfile") is None
33 | 
34 | #     # Run pytest with the --tui-regexfile option and a custom file path
35 | #     regex_file = "custom_regex.txt"
36 | #     result = pytester.runpytest(f"--tui-regexfile={regex_file}")
37 | 
38 | #     # Assert that the --tui-regexfile option is set with the correct file path
39 | #     assert result.ret == 0
40 | #     assert result.parseopt("_tui").value == True
41 | #     assert result.parseopt("_tui_htmlfile") is None
42 | #     assert result.parseopt("_tui_regexfile").value == regex_file
43 | 


--------------------------------------------------------------------------------
/testing/pytester/test_tui_with_pytester.py:
--------------------------------------------------------------------------------
  1 | import pytest
  2 | from pathlib import Path
  3 | from _pytest.config import ExitCode
  4 | 
  5 | 
  6 | class Defaults:
  7 |     def __init__(self):
  8 |         self.tui = None
  9 |         self.htmlfile = Path(f"{Path.cwd()}/tui_files/html_report.html")
 10 |         self.regexfile = None
 11 | 
 12 | 
 13 | class Consts(Defaults):
 14 |     def __init__(self):
 15 |         super().__init__()
 16 |         self.tui = True
 17 | 
 18 | 
 19 | class NonDefaults:
 20 |     def __init__(self):
 21 |         self.tui = True
 22 |         self.htmlfile = Path(f"{Path.cwd()}/test_files/test_report.html")
 23 |         self.regexfile = Path(f"{Path.cwd()}/test_files/test_regex.txt")
 24 | 
 25 | 
 26 | class Examples:
 27 |     def __init__(self):
 28 |         self.htmlfile = Path(f"{Path.cwd()}/tui_files/example_html_report.html")
 29 |         self.regexfile = Path(f"{Path.cwd()}/tui_files/example_regex.txt")
 30 | 
 31 | 
 32 | @pytest.fixture
 33 | def defaults():
 34 |     return Defaults()
 35 | 
 36 | 
 37 | @pytest.fixture
 38 | def consts():
 39 |     return Consts()
 40 | 
 41 | 
 42 | @pytest.fixture
 43 | def nondefaults():
 44 |     return NonDefaults()
 45 | 
 46 | 
 47 | @pytest.fixture
 48 | def examples():
 49 |     return Examples()
 50 | 
 51 | 
 52 | @pytest.mark.test_tui_with_pytester
 53 | def test_help_menu_has_tui_info(pytester):
 54 |     """Verifies that the --tui option exists in the help menu."""
 55 |     result = pytester.runpytest("--help")
 56 |     assert any("tui" in line for line in result.outlines)
 57 |     result = pytester.runpytest("--help", "--tui")
 58 |     assert any("tui" in line for line in result.outlines)
 59 | 
 60 | 
 61 | @pytest.mark.test_tui_with_pytester
 62 | def test_verify_commandline_options_missing(pytester, defaults):
 63 |     """Verifies that when --tui is not passed on the command line, it is None when args
 64 |     are parsed; and when --tui is passed on the command line, it is True ."""
 65 |     # test_path = pytester.copy_example("testing/pytester/examples/test_pass.py")
 66 |     # result = pytester.runpytest()
 67 | 
 68 |     cfg = pytester.parseconfig()
 69 | 
 70 |     assert cfg.getoption("_tui") == defaults.tui
 71 |     assert cfg.getoption("_tui_htmlfile") == defaults.htmlfile
 72 |     assert cfg.getoption("_tui_regexfile") == defaults.regexfile
 73 | 
 74 | 
 75 | def test_verify_commandline_options_invalid_option(pytester):
 76 |     """Verifies the response to an invalid command line option."""
 77 | 
 78 |     with pytest.raises(SystemExit):
 79 |         pytester.parseconfig("--tui", "--invalid-option")
 80 | 
 81 | 
 82 | @pytest.mark.test_tui_with_pytester
 83 | def test_illegal_input_options(pytester):
 84 |     """Verifies that illegal combinations of command line options raise appropriate errors.
 85 |     """
 86 | 
 87 |     # Case when both --tui-htmlfile and --tui-regexfile are provided without a value
 88 |     with pytest.raises(SystemExit):
 89 |         pytester.parseconfig("--tui", "--tui-htmlfile=", "--tui-regexfile=")
 90 | 
 91 |     # Case when --tui is not provided but --tui-htmlfile and --tui-regexfile are
 92 |     with pytest.raises(SystemExit):
 93 |         pytester.parseconfig(
 94 |             "--tui-htmlfile=test_report.html", "--tui-regexfile=test_regex.txt"
 95 |         )
 96 | 
 97 |     # Add other illegal combinations as needed
 98 | 
 99 | 
100 | @pytest.mark.test_tui_with_pytester
101 | def test_verify_commandline_options_tui_only(pytester, consts):
102 |     """Verifies that when only '--tui' is passsed on command line, it is True"""
103 |     cfg = pytester.parseconfig("--tui")
104 | 
105 |     assert cfg.getoption("_tui") == consts.tui
106 |     assert cfg.getoption("_tui_htmlfile") == consts.htmlfile
107 |     assert cfg.getoption("_tui_regexfile") == consts.regexfile
108 | 
109 | 
110 | @pytest.mark.test_tui_with_pytester
111 | def test_verify_commandline_options_tui_regexfile(pytester, defaults, nondefaults):
112 |     """Verifies that when '--tui' and '--tui-htmlfile' are passsed on command line, '_tui'
113 |     is True and '_tui_htmlfile' is as specified."""
114 | 
115 |     cfg = pytester.parseconfig("--tui", f"--tui-regexfile=")
116 |     # cfg = pytester.parseconfig("--tui", f"--tui-regexfile={regexfile}")
117 | 
118 |     assert cfg.getoption("_tui") == nondefaults.tui
119 |     assert cfg.getoption("_tui_htmlfile") == defaults.htmlfile
120 |     assert cfg.getoption("_tui_regexfile") == ""
121 |     # assert cfg.getoption("_tui_regexfile") == examples.regexfile
122 | 
123 | 
124 | # TODO: This test is failing.  Fix it.
125 | @pytest.mark.xfail(reason="Not implemented yet")
126 | @pytest.mark.test_tui_with_pytester
127 | def test_verify_commandline_options_tui_htmlfile(pytester, examples, nondefaults):
128 |     """Verifies that when '--tui' and '--tui-htmlfile' are passsed on command line, '_tui'
129 |     is True and '_tui_htmlfile' is as specified."""
130 | 
131 |     cfg = pytester.parseconfig(
132 |         "--tui",
133 |         f"--tui-htmlfile={examples.htmlfile}",
134 |         f"--tui-regexfile={examples.regexfile}",
135 |     )
136 | 
137 |     assert cfg.getoption("_tui") == nondefaults.tui
138 |     assert cfg.getoption("_tui_htmlfile") == examples.htmlfile
139 |     assert cfg.getoption("_tui_regexfile") == examples.regexfile
140 | 
141 | 
142 | @pytest.mark.test_tui_with_pytester
143 | def test_run_with_empty_testfile(pytester):
144 |     """Verifies that the pytest-tui plugin handles being paseed an empty test file.
145 |     By emmpty, we mean a test file that has no tests in it."""
146 | 
147 |     test_path = pytester.copy_example("testing/pytester/examples/test_empty.py")
148 |     result = pytester.runpytest("--tui")
149 | 
150 |     assert any("== no tests ran" in outline for outline in result.outlines)
151 |     assert result.ret == ExitCode.NO_TESTS_COLLECTED
152 |     result.assert_outcomes(failed=0, passed=0)
153 | 
154 | 
155 | @pytest.mark.test_tui_with_pytester
156 | def test_verify_commandline_options_tui_htmlfile_regexfile(pytester, examples):
157 |     """Verifies that when '--tui', '--tui-htmlfile', and '--tui-regexfile' are passed on the command line,
158 |     '_tui' is True, '_tui_htmlfile' is as specified, and '_tui_regexfile' is as specified.
159 |     """
160 | 
161 |     cfg = pytester.parseconfig(
162 |         "--tui",
163 |         f"--tui-htmlfile={examples.htmlfile}",
164 |         f"--tui-regexfile={examples.regexfile}",
165 |     )
166 | 
167 |     assert cfg.getoption("_tui") is True
168 |     assert cfg.getoption("_tui_htmlfile") == examples.htmlfile
169 |     assert cfg.getoption("_tui_regexfile") == examples.regexfile
170 | 
171 | 
172 | @pytest.mark.test_tui_with_pytester
173 | def test_verify_commandline_options_tui_htmlfile_no_value(pytester, consts):
174 |     """Verifies that when '--tui' and '--tui-htmlfile' are passed on the command line without a value,
175 |     '_tui' is True and '_tui_htmlfile' is the default value."""
176 | 
177 |     cfg = pytester.parseconfig("--tui", "--tui-htmlfile=")
178 | 
179 |     assert cfg.getoption("_tui") is True
180 |     assert cfg.getoption("_tui_htmlfile") == consts.htmlfile
181 |     assert cfg.getoption("_tui_regexfile") is None
182 | 
183 | 
184 | @pytest.mark.test_tui_with_pytester
185 | def test_verify_commandline_options_tui_regexfile_no_value(pytester, consts):
186 |     """Verifies that when '--tui' and '--tui-regexfile' are passed on the command line without a value,
187 |     '_tui' is True and '_tui_regexfile' is the default value."""
188 | 
189 |     cfg = pytester.parseconfig("--tui", "--tui-regexfile=")
190 | 
191 |     assert cfg.getoption("_tui") is True
192 |     assert cfg.getoption("_tui_htmlfile") is None
193 |     assert cfg.getoption("_tui_regexfile") == consts.regexfile
194 | 
195 | 
196 | @pytest.mark.test_tui_with_pytester
197 | def test_verify_commandline_options_regexfile_does_not_exist(pytester, consts):
198 |     """Verifies that when '--tui' and '--tui-regexfile' are passed on the command line without a value,
199 |     '_tui' is True and '_tui_regexfile' is the default value."""
200 | 
201 |     cfg = pytester.parseconfig("--tui", "--tui-regexfile=does_not_exist.txt")
202 | 
203 |     assert cfg.getoption("_tui") is True
204 |     assert cfg.getoption("_tui_htmlfile") is None
205 |     assert cfg.getoption("_tui_regexfile") == consts.regexfile
206 | 
207 | 
208 | @pytest.mark.test_tui_with_pytester
209 | def test_verify_commandline_options_tui_htmlfile_regexfile_no_value(pytester, consts):
210 |     """Verifies that when '--tui', '--tui-htmlfile', and '--tui-regexfile' are passed on the command line without a value,
211 |     '_tui' is True, '_tui_htmlfile' is the default value, and '_tui_regexfile' is the default value.
212 |     """
213 | 
214 |     cfg = pytester.parseconfig("--tui", "--tui-htmlfile=", "--tui-regexfile=")
215 | 
216 |     assert cfg.getoption("_tui") is True
217 |     assert cfg.getoption("_tui_htmlfile") == consts.htmlfile
218 |     assert cfg.getoption("_tui_regexfile") == consts.regexfile
219 | 
220 | 
221 | '''
222 | @pytest.mark.test_tui_with_pytester
223 | def test_true_assertion(pytester):
224 |     pytester.makepyfile(
225 |         """
226 |         def test_foo():
227 |             assert True
228 |         """
229 |     )
230 |     result = pytester.runpytest()
231 |     result.assert_outcomes(failed=0, passed=1)
232 | 
233 | 
234 | @pytest.mark.test_tui_with_pytester
235 | def test_false_assertion(pytester):
236 |     pytester.makepyfile(
237 |         """
238 |         def test_foo():
239 |             assert False
240 |         """
241 |     )
242 |     result = pytester.runpytest()
243 |     result.assert_outcomes(failed=1, passed=0)
244 | '''
245 | 


--------------------------------------------------------------------------------
/testing/python/conftest.ini:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jeffwright13/pytest-tui/8b476d6be6b6d2b3afddf26f934c9e0b3e76c28b/testing/python/conftest.ini


--------------------------------------------------------------------------------
/testing/python/test_pytest_tui.py:
--------------------------------------------------------------------------------
 1 | def test_true_assertion(pytester):
 2 |     pytester.makepyfile(
 3 |         """
 4 |         def test_foo():
 5 |             assert True
 6 |     """
 7 |     )
 8 |     result = pytester.runpytest()
 9 |     result.assert_outcomes(failed=0, passed=1)
10 | 
11 | 
12 | def test_false_assertion(testdir):
13 |     testdir.makepyfile(
14 |         """
15 |         def test_foo():
16 |             assert False
17 |     """
18 |     )
19 |     result = testdir.runpytest()
20 |     result.assert_outcomes(failed=1, passed=0)
21 | 


--------------------------------------------------------------------------------
/testing/robot/Resources/common.resource:
--------------------------------------------------------------------------------
1 | *** Settings ***
2 | Variables     ./vars.py
3 | Library       Collections
4 | Library       DebugLibrary
5 | Library       SeleniumLibrary
6 | 


--------------------------------------------------------------------------------
/testing/robot/Resources/vars.py:
--------------------------------------------------------------------------------
 1 | ##############################
 2 | ## Robot Framework Settings ##
 3 | ##############################
 4 | RF_DEFAULT_TIMEOUT = 60
 5 | RF_OPTIONAL_POPUP_TIMEOUT = 15
 6 | RF_DEBUG_TRUE = True
 7 | 
 8 | #########################
 9 | ## pytest-tui settings ##
10 | #########################
11 | REPO_BASE_DIR = "~/coding/pytest-tui/"
12 | 
13 | #######################
14 | ## Selenium Settings ##
15 | #######################
16 | BROWSER = "chrome"
17 | # BROWSER = "edge"
18 | # BROWSER = "firefox"
19 | 


--------------------------------------------------------------------------------
/testing/robot/Tests/001_test_basic.robot:
--------------------------------------------------------------------------------
 1 | *** Settings ***
 2 | Resource    ../Resources/common.resource
 3 | Library     Process
 4 | 
 5 | *** Test Cases ***
 6 | Print Environment Variables
 7 |     Log To Console    %{PYTHON_VERSION}
 8 |     Log To Console    %{PYTEST_TUI_VERSION}
 9 | 
10 | Verify Python Version
11 |     ${result} =         Run Process         python          --version
12 |     Should Contain      ${result.stdout}    %{PYTHON_VERSION}
13 | 
14 | Verify Pytest-Tui Option Shows in Pytest Help
15 |     ${result} =         Run Process         pytest          --help
16 |     Should Contain      ${result.stdout}    tui:
17 |     Should Contain      ${result.stdout}    --tui
18 |     Should Contain      ${result.stdout}    Enable the pytest-tui plugin.
19 | 
20 | Verify Pytest-Tui Version
21 |     ${result} =         Run Process         pytest          -VV         --co
22 |     Should Contain      ${result.stdout}    %{PYTEST_TUI_VERSION}
23 | 
24 | Verify Basic Test Run
25 |     ${result} =         Run Process         pytest          --tui      -k   test_0
26 |     Log To Console      ${result.stdout}
27 |     Should Contain      ${result.stdout}    short test summary info
28 | 
29 | *** Comments ***
30 | Verify Full Test Run
31 |     ${result} =         Run Process         pytest          --tui
32 |     # Log To Console      ${result.stdout}
33 |     Should Contain      ${result.stdout}    short test summary info
34 | 


--------------------------------------------------------------------------------
/testing/robot/Tests/002_test_tui.robot:
--------------------------------------------------------------------------------
 1 | *** Settings ***
 2 | Resource    ../Resources/common.resource
 3 | Library     Process
 4 | Library     SeleniumLibrary
 5 | 
 6 | *** Test Cases ***
 7 | Verify Basic Test Run
 8 |     ${result} =         Run Process         pytest          --tui      -k   test_0
 9 |     Should Contain      ${result.stdout}    == short test summary info ==
10 | 
11 | Verify Script Launch: 'tui'
12 |     ${result} =         Run Process         tui         timeout=10sec
13 |     Log To Console      ${result.stdout}
14 |     Should Contain      ${result.stdout}    ${HEADER}   timeout=10
15 | 
16 | Quit TUI
17 |     Press Keys          None    q
18 | 
19 | 
20 | *** Variables ***
21 | ${HEADER} =    │ Summary │ Passes │ Failures │ Skipped │ Xfails │ Xpasses │ Warnings │ Errors │ Full Output │ Quit (Q) │
22 | 
23 | 
24 | *** Comments ***
25 | 


--------------------------------------------------------------------------------
/testing/robot/Tests/003_test_tuih.robot:
--------------------------------------------------------------------------------
 1 | *** Settings ***
 2 | Resource    ../Resources/common.resource
 3 | Library     Process
 4 | Library     SeleniumLibrary
 5 | 
 6 | *** Test Cases ***
 7 | Verify Script Launch: 'tuih'
 8 |     ${result} =         Run Process     python %{TOX_ROOT}/pytest_tui/html_gen.py   timeout=1min
 9 |     Should Be Equal As Integers         ${result.rc}	0
10 | 
11 | *** Variables ***
12 | 
13 | 
14 | *** Comments ***
15 | 


--------------------------------------------------------------------------------
/testing/sb/conftest.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | import faker
 3 | import random
 4 | 
 5 | 
 6 | @pytest.fixture
 7 | def fake_data(min: int = 30, max: int = 60) -> str:
 8 |     return faker.Faker().text(random.randint(min, max))
 9 | 
10 | 
11 | @pytest.fixture
12 | def pom_vars():
13 |     return {
14 |         "TITLE": "Test Run Results",
15 |         "ABOUT_TAB": "#defaultOpen",
16 |         "ABOUT_TAB_FINAL_TEST_SUMMARY_BUTTON": "#About > button:nth-child(3)",
17 |         "ABOUT_TAB_FINAL_TEST_SUMMARY_EXPANDED_TEXT": "short test summary info",
18 |         "ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_BUTTON": "#About > button:nth-child(6)",
19 |         "ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_1": "==",
20 |         "ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_2": "collected",
21 |         "ABOUT_TAB_TEST_EXECUTION_INFO_BUTTON": "#About > button:nth-child(9)",
22 |         "ABOUT_TAB_TEST_EXECUTION_INFO_EXPANDED_TEXT": "Test run started",
23 |         "ABOUT_TAB_ENVIRONMENT_BUTTON": "#About > button:nth-child(12)",
24 |         "ABOUT_TAB_ENVIRONMENT_EXPANDED_TEXT": "Plugins",
25 |         "ALL_TESTS_TAB": "body > div > span.tab > div > span:nth-child(2) > button",
26 |         "ALL_TESTS_TAB_FIRST_TEST": "#All\ Tests > button:nth-child(1)",
27 |         "ALL_TESTS_TAB_FIRST_TEST_RESULT": "#All\ Tests > div:nth-child(2) > pre",
28 |         "FAILURES_TAB": "body > div > span.tab > div > span:nth-child(3) > button",
29 |         "FAILURES_TAB_FIRST_TEST": "#Failures > button:nth-child(1)",
30 |         "FAILURES_TAB_FIRST_TEST_RESULT": "#Failures > div:nth-child(2) > pre",
31 |         "PASSES_TAB": "body > div > span.tab > div > span:nth-child(4) > button",
32 |         "PASSES_TAB_FIRST_TEST": "#Passes > button:nth-child(1)",
33 |         "PASSES_TAB_FIRST_TEST_RESULT": "#Passes > div:nth-child(2) > pre",
34 |         "SKIPPED_TAB": "body > div > span.tab > div > span:nth-child(5) > button",
35 |         "SKIPPED_TAB_FIRST_TEST": "#Skipped > button:nth-child(1)",
36 |         "SKIPPED_TAB_FIRST_TEST_RESULT": "#Skipped > div:nth-child(2) > pre",
37 |         "XFAILS_TAB": "body > div > span.tab > div > span:nth-child(6) > button",
38 |         "XFAILS_TAB_FIRST_TEST": "#Xfails > button:nth-child(1)",
39 |         "XFAILS_TAB_FIRST_TEST_RESULT": "#Xfails > div:nth-child(2) > pre",
40 |         "XPASSES_TAB": "body > div > span.tab > div > span:nth-child(7) > button",
41 |         "XPASSES_TAB_FIRST_TEST": "#Xpasses > button:nth-child(1)",
42 |         "XPASSES_TAB_FIRST_TEST_RESULT": "#Xpasses > div:nth-child(2) > pre",
43 |         "RERUNS_TAB": "body > div > span.tab > div > span:nth-child(8) > button",
44 |         "RERUNS_TAB_FIRST_TEST": "#Reruns > button:nth-child(1)",
45 |         "RERUNS_TAB_FIRST_TEST_RESULT": "#Reruns > div:nth-child(2) > pre",
46 |         "FULL_OUTPUT_TAB": "body > div > span.tab > div > span:nth-child(11) > button",
47 |         "FULL_OUTPUT_TAB_EXPANDED_TEXT": "#Full\ Output > pre",
48 |         "FOLDED_OUTPUT_TAB": (
49 |             "body > div > span.tab > div > span:nth-child(12) > button"
50 |         ),
51 |         # "FOLDED_OUTPUT_TAB_SUMMARY_SECTION": (
52 |         "OUTPUT_SECTIONS_TAB": (
53 |             "body > div > span.tab > div > span:nth-child(9) > button"
54 |         ),
55 |         "OUTPUT_SECTIONS_TAB_SUMMARY_SECTION": (
56 |             "body > div > span.tab > div > span:nth-child(9) > span > span:nth-child(1)"
57 |             " > button"
58 |         ),
59 |         "OUTPUT_SECTIONS_TAB_SUMMARY_SECTION_TEXT": "short test summary info",
60 |         "OUTPUT_SECTIONS_TAB_FAILURES_SECTION": (
61 |             "body > div > span.tab > div > span:nth-child(9) > span > span:nth-child(2)"
62 |             " > button"
63 |         ),
64 |         "OUTPUT_SECTIONS_TAB_FAILURES_SECTION_TEXT": "= FAILURES =",
65 |         "OUTPUT_SECTIONS_TAB_PASSES_SECTION": (
66 |             "body > div > span.tab > div > span:nth-child(9) > span > span:nth-child(3)"
67 |             " > button"
68 |         ),
69 |         "OUTPUT_SECTIONS_TAB_PASSES_SECTION_TEXT": "= PASSES =",
70 |         "OUTPUT_SECTIONS_TAB_WARNINGS_SECTION": (
71 |             "body > div > span.tab > div > span:nth-child(9) > span > span:nth-child(4)"
72 |             " > button"
73 |         ),
74 |         "OUTPUT_SECTIONS_TAB_WARNINGS_SECTION_TEXT": "= warnings summary =",
75 |         "OUTPUT_SECTIONS_TAB_ERRORS_SECTION": (
76 |             "body > div > span.tab > div > span:nth-child(9) > span > span:nth-child(5)"
77 |             " > button"
78 |         ),
79 |         "OUTPUT_SECTIONS_TAB_ERRORS_SECTION_TEXT": "= ERRORS =",
80 |         "OUTPUT_SECTIONS_TAB_RERUNS_SECTION": (
81 |             "body > div > span.tab > div > span:nth-child(9) > span > span:nth-child(6)"
82 |             " > button"
83 |         ),
84 |         "OUTPUT_SECTIONS_TAB_RERUNS_SECTION_TEXT": "= rerun test summary info =",
85 |         "FOLD_ACTIONS_TAB": "body > div > span.tab > div > span:nth-child(10) > button",
86 |         "FOLD_ACTIONS_TAB_FOLD_UNFOLD_ACTION": (
87 |             "body > div > span.tab > div > span:nth-child(10) > span >"
88 |             " span:nth-child(1) > button"
89 |         ),
90 |         "FOLD_ACTIONS_TAB_FOLD_UNFOLD_ACTION_TEXT": "Fold/Unfold Action",
91 |         "FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION": "#toggle-details",
92 |         "FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION_TEXT": "Folded",
93 |     }
94 | 


--------------------------------------------------------------------------------
/testing/sb/test_html_report.py:
--------------------------------------------------------------------------------
  1 | import pytest
  2 | 
  3 | WORKDIR = "/Users/jwr003/coding/pytest-tui"
  4 | 
  5 | 
  6 | def test_html_report_about_tab(sb, pom_vars: dict[str, str]):
  7 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
  8 |     sb.assert_title(pom_vars["TITLE"])
  9 |     sb.assert_element(f"{pom_vars['ABOUT_TAB']}:contains('About')")
 10 | 
 11 |     sb.assert_element(
 12 |         f"{pom_vars['ABOUT_TAB_FINAL_TEST_SUMMARY_BUTTON']}:contains('Final Test"
 13 |         " Summary')"
 14 |     )
 15 |     sb.assert_text_not_visible(pom_vars["ABOUT_TAB_FINAL_TEST_SUMMARY_EXPANDED_TEXT"])
 16 |     sb.click(pom_vars["ABOUT_TAB_FINAL_TEST_SUMMARY_BUTTON"])
 17 |     sb.assert_text_visible(pom_vars["ABOUT_TAB_FINAL_TEST_SUMMARY_EXPANDED_TEXT"])
 18 |     sb.click(pom_vars["ABOUT_TAB_FINAL_TEST_SUMMARY_BUTTON"])
 19 |     sb.assert_text_not_visible(pom_vars["ABOUT_TAB_FINAL_TEST_SUMMARY_EXPANDED_TEXT"])
 20 | 
 21 |     sb.assert_element(
 22 |         f"{pom_vars['ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_BUTTON']}:contains('Live Test"
 23 |         " Session Summary')"
 24 |     )
 25 |     sb.assert_text_not_visible(
 26 |         pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_1"]
 27 |     )
 28 |     sb.assert_text_not_visible(
 29 |         pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_2"]
 30 |     )
 31 |     sb.click(pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_BUTTON"])
 32 |     sb.assert_text_visible(
 33 |         pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_1"]
 34 |     )
 35 |     sb.assert_text_visible(
 36 |         pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_2"]
 37 |     )
 38 |     sb.click(pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_BUTTON"])
 39 |     sb.assert_text_not_visible(
 40 |         pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_1"]
 41 |     )
 42 |     sb.assert_text_not_visible(
 43 |         pom_vars["ABOUT_TAB_LIVE_TEST_SESSION_SUMMARY_EXPANDED_TEXT_2"]
 44 |     )
 45 | 
 46 |     sb.assert_element(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_BUTTON"])
 47 |     sb.assert_text_visible(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_EXPANDED_TEXT"])
 48 |     sb.click(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_BUTTON"])
 49 |     sb.click(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_BUTTON"])
 50 |     sb.assert_text_not_visible(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_EXPANDED_TEXT"])
 51 |     sb.click(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_BUTTON"])
 52 |     sb.assert_text_visible(pom_vars["ABOUT_TAB_TEST_EXECUTION_INFO_EXPANDED_TEXT"])
 53 | 
 54 |     sb.assert_element(pom_vars["ABOUT_TAB_ENVIRONMENT_BUTTON"])
 55 |     sb.assert_text_visible(pom_vars["ABOUT_TAB_ENVIRONMENT_EXPANDED_TEXT"])
 56 |     sb.click(pom_vars["ABOUT_TAB_ENVIRONMENT_BUTTON"])
 57 |     sb.click(pom_vars["ABOUT_TAB_ENVIRONMENT_BUTTON"])
 58 |     sb.assert_text_not_visible(pom_vars["ABOUT_TAB_ENVIRONMENT_EXPANDED_TEXT"])
 59 |     sb.click(pom_vars["ABOUT_TAB_ENVIRONMENT_BUTTON"])
 60 |     sb.assert_text_visible(pom_vars["ABOUT_TAB_ENVIRONMENT_EXPANDED_TEXT"])
 61 | 
 62 | 
 63 | def test_html_report_all_tests_tab(sb, pom_vars: dict[str, str]):
 64 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
 65 |     sb.assert_title(pom_vars["TITLE"])
 66 |     sb.assert_element(f"{pom_vars['ALL_TESTS_TAB']}:contains('All Tests')")
 67 | 
 68 |     sb.click(f"{pom_vars['ALL_TESTS_TAB']}")
 69 |     sb.assert_element_not_visible(pom_vars["ALL_TESTS_TAB_FIRST_TEST_RESULT"])
 70 |     sb.click(f"{pom_vars['ALL_TESTS_TAB_FIRST_TEST']}")
 71 |     sb.assert_element_visible(pom_vars["ALL_TESTS_TAB_FIRST_TEST_RESULT"])
 72 |     sb.click(f"{pom_vars['ALL_TESTS_TAB_FIRST_TEST']}")
 73 |     sb.assert_element_not_visible(pom_vars["ALL_TESTS_TAB_FIRST_TEST_RESULT"])
 74 | 
 75 | 
 76 | def test_html_report_failures_tab(sb, pom_vars: dict[str, str]):
 77 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
 78 |     sb.assert_title(pom_vars["TITLE"])
 79 |     sb.assert_element(f"{pom_vars['FAILURES_TAB']}:contains('Failures')")
 80 |     sb.click(f"{pom_vars['FAILURES_TAB']}")
 81 |     sb.assert_element_not_visible(pom_vars["FAILURES_TAB_FIRST_TEST_RESULT"])
 82 |     sb.click(f"{pom_vars['FAILURES_TAB_FIRST_TEST']}")
 83 |     sb.assert_element_visible(pom_vars["FAILURES_TAB_FIRST_TEST_RESULT"])
 84 |     sb.click(f"{pom_vars['FAILURES_TAB_FIRST_TEST']}")
 85 |     sb.assert_element_not_visible(pom_vars["FAILURES_TAB_FIRST_TEST_RESULT"])
 86 | 
 87 | 
 88 | def test_html_report_test_passes_tab(sb, pom_vars: dict[str, str]):
 89 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
 90 |     sb.assert_title(pom_vars["TITLE"])
 91 |     sb.assert_element(f"{pom_vars['PASSES_TAB']}:contains('Passes')")
 92 |     sb.click(f"{pom_vars['PASSES_TAB']}")
 93 |     sb.assert_element_not_visible(pom_vars["PASSES_TAB_FIRST_TEST_RESULT"])
 94 |     sb.click(f"{pom_vars['PASSES_TAB_FIRST_TEST']}")
 95 |     sb.assert_element_visible(pom_vars["PASSES_TAB_FIRST_TEST_RESULT"])
 96 |     sb.click(f"{pom_vars['PASSES_TAB_FIRST_TEST']}")
 97 |     sb.assert_element_not_visible(pom_vars["PASSES_TAB_FIRST_TEST_RESULT"])
 98 | 
 99 | 
100 | def test_html_report_test_skipped_tab(sb, pom_vars: dict[str, str]):
101 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
102 |     sb.assert_title(pom_vars["TITLE"])
103 |     sb.assert_element(f"{pom_vars['SKIPPED_TAB']}:contains('Skipped')")
104 |     sb.click(f"{pom_vars['SKIPPED_TAB']}")
105 |     sb.assert_element_not_visible(pom_vars["SKIPPED_TAB_FIRST_TEST_RESULT"])
106 |     sb.click(f"{pom_vars['SKIPPED_TAB_FIRST_TEST']}")
107 |     sb.assert_element_visible(pom_vars["SKIPPED_TAB_FIRST_TEST_RESULT"])
108 |     sb.click(f"{pom_vars['SKIPPED_TAB_FIRST_TEST']}")
109 |     sb.assert_element_not_visible(pom_vars["SKIPPED_TAB_FIRST_TEST_RESULT"])
110 | 
111 | 
112 | def test_html_report_test_xfails_tab(sb, pom_vars: dict[str, str]):
113 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
114 |     sb.assert_title(pom_vars["TITLE"])
115 |     sb.assert_element(f"{pom_vars['XFAILS_TAB']}:contains('Xfails')")
116 |     sb.click(f"{pom_vars['XFAILS_TAB']}")
117 |     sb.assert_element_not_visible(pom_vars["XFAILS_TAB_FIRST_TEST_RESULT"])
118 |     sb.click(f"{pom_vars['XFAILS_TAB_FIRST_TEST']}")
119 |     sb.assert_element_visible(pom_vars["XFAILS_TAB_FIRST_TEST_RESULT"])
120 |     sb.click(f"{pom_vars['XFAILS_TAB_FIRST_TEST']}")
121 |     sb.assert_element_not_visible(pom_vars["XFAILS_TAB_FIRST_TEST_RESULT"])
122 | 
123 | 
124 | def test_html_report_test_xpasses_tab(sb, pom_vars: dict[str, str]):
125 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
126 |     sb.assert_title(pom_vars["TITLE"])
127 |     sb.assert_element(f"{pom_vars['XPASSES_TAB']}:contains('Xpasses')")
128 |     sb.click(f"{pom_vars['XPASSES_TAB']}")
129 |     sb.assert_element_not_visible(pom_vars["XPASSES_TAB_FIRST_TEST_RESULT"])
130 |     sb.click(f"{pom_vars['XPASSES_TAB_FIRST_TEST']}")
131 |     sb.assert_element_visible(pom_vars["XPASSES_TAB_FIRST_TEST_RESULT"])
132 |     sb.click(f"{pom_vars['XPASSES_TAB_FIRST_TEST']}")
133 |     sb.assert_element_not_visible(pom_vars["XPASSES_TAB_FIRST_TEST_RESULT"])
134 | 
135 | 
136 | def test_html_report_test_reruns_tab(sb, pom_vars: dict[str, str]):
137 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
138 |     sb.assert_title(pom_vars["TITLE"])
139 |     sb.assert_element(f"{pom_vars['RERUNS_TAB']}:contains('Reruns')")
140 |     sb.click(f"{pom_vars['RERUNS_TAB']}")
141 |     sb.assert_element_not_visible(pom_vars["RERUNS_TAB_FIRST_TEST_RESULT"])
142 |     sb.click(f"{pom_vars['RERUNS_TAB_FIRST_TEST']}")
143 |     sb.assert_element_visible(pom_vars["RERUNS_TAB_FIRST_TEST_RESULT"])
144 |     sb.click(f"{pom_vars['RERUNS_TAB_FIRST_TEST']}")
145 |     sb.assert_element_not_visible(pom_vars["RERUNS_TAB_FIRST_TEST_RESULT"])
146 | 
147 | 
148 | def test_html_report_test_full_output_tab(sb, pom_vars: dict[str, str]):
149 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
150 |     sb.assert_title(pom_vars["TITLE"])
151 |     sb.assert_element(f"{pom_vars['FULL_OUTPUT_TAB']}:contains('Full Output')")
152 |     sb.click(f"{pom_vars['FULL_OUTPUT_TAB']}")
153 |     sb.assert_element(
154 |         f"{pom_vars['FULL_OUTPUT_TAB_EXPANDED_TEXT']}:contains('== test session starts"
155 |         " ==')"
156 |     )
157 |     sb.assert_element(
158 |         f"{pom_vars['FULL_OUTPUT_TAB_EXPANDED_TEXT']}:contains('== ERRORS ==')"
159 |     )
160 |     sb.assert_element(
161 |         f"{pom_vars['FULL_OUTPUT_TAB_EXPANDED_TEXT']}:contains('== FAILURES ==')"
162 |     )
163 |     sb.assert_element(
164 |         f"{pom_vars['FULL_OUTPUT_TAB_EXPANDED_TEXT']}:contains('== PASSES ==')"
165 |     )
166 |     sb.assert_element(
167 |         f"{pom_vars['FULL_OUTPUT_TAB_EXPANDED_TEXT']}:contains('== short test summary"
168 |         " info ==')"
169 |     )
170 |     sb.assert_element(
171 |         f"{pom_vars['FULL_OUTPUT_TAB_EXPANDED_TEXT']}:contains('== rerun test summary"
172 |         " info ==')"
173 |     )
174 | 
175 | 
176 | def test_html_report_test_folded_output_tab(sb, pom_vars: dict[str, str]):
177 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
178 |     sb.assert_title(pom_vars["TITLE"])
179 |     sb.assert_element(f"{pom_vars['FOLDED_OUTPUT_TAB']}:contains('Folded Output')")
180 | 
181 | 
182 | def test_html_report_test_output_sections_tab(sb, pom_vars: dict[str, str]):
183 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
184 |     sb.assert_title(pom_vars["TITLE"])
185 |     sb.assert_element(f"{pom_vars['OUTPUT_SECTIONS_TAB']}:contains('Output Sections')")
186 | 
187 |     sb.assert_element_not_visible(pom_vars["OUTPUT_SECTIONS_TAB_SUMMARY_SECTION"])
188 |     sb.assert_element_not_visible(pom_vars["OUTPUT_SECTIONS_TAB_FAILURES_SECTION"])
189 |     sb.assert_element_not_visible(pom_vars["OUTPUT_SECTIONS_TAB_PASSES_SECTION"])
190 |     sb.assert_element_not_visible(pom_vars["OUTPUT_SECTIONS_TAB_WARNINGS_SECTION"])
191 |     sb.assert_element_not_visible(pom_vars["OUTPUT_SECTIONS_TAB_ERRORS_SECTION"])
192 |     sb.assert_element_not_visible(pom_vars["OUTPUT_SECTIONS_TAB_RERUNS_SECTION"])
193 | 
194 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB']}")
195 |     sb.assert_element_visible(pom_vars["OUTPUT_SECTIONS_TAB_SUMMARY_SECTION"])
196 |     sb.assert_element_visible(pom_vars["OUTPUT_SECTIONS_TAB_FAILURES_SECTION"])
197 |     sb.assert_element_visible(pom_vars["OUTPUT_SECTIONS_TAB_PASSES_SECTION"])
198 |     sb.assert_element_visible(pom_vars["OUTPUT_SECTIONS_TAB_WARNINGS_SECTION"])
199 |     sb.assert_element_visible(pom_vars["OUTPUT_SECTIONS_TAB_ERRORS_SECTION"])
200 |     sb.assert_element_visible(pom_vars["OUTPUT_SECTIONS_TAB_RERUNS_SECTION"])
201 | 
202 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB_SUMMARY_SECTION']}")
203 |     sb.click(f"{pom_vars['OUTPUT_SECTIONS_TAB_SUMMARY_SECTION']}")
204 |     sb.assert_text(pom_vars["OUTPUT_SECTIONS_TAB_SUMMARY_SECTION_TEXT"])
205 | 
206 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB']}")
207 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB_FAILURES_SECTION']}")
208 |     sb.click(f"{pom_vars['OUTPUT_SECTIONS_TAB_FAILURES_SECTION']}")
209 |     sb.assert_text(pom_vars["OUTPUT_SECTIONS_TAB_FAILURES_SECTION_TEXT"])
210 | 
211 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB']}")
212 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB_PASSES_SECTION']}")
213 |     sb.click(f"{pom_vars['OUTPUT_SECTIONS_TAB_PASSES_SECTION']}")
214 |     sb.assert_text(pom_vars["OUTPUT_SECTIONS_TAB_PASSES_SECTION_TEXT"])
215 | 
216 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB']}")
217 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB_WARNINGS_SECTION']}")
218 |     sb.click(f"{pom_vars['OUTPUT_SECTIONS_TAB_WARNINGS_SECTION']}")
219 |     sb.assert_text(pom_vars["OUTPUT_SECTIONS_TAB_WARNINGS_SECTION_TEXT"])
220 | 
221 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB']}")
222 |     sb.click(f"{pom_vars['OUTPUT_SECTIONS_TAB_ERRORS_SECTION']}")
223 |     sb.assert_text(pom_vars["OUTPUT_SECTIONS_TAB_ERRORS_SECTION_TEXT"])
224 | 
225 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB']}")
226 |     sb.hover(f"{pom_vars['OUTPUT_SECTIONS_TAB_RERUNS_SECTION']}")
227 |     sb.click(f"{pom_vars['OUTPUT_SECTIONS_TAB_RERUNS_SECTION']}")
228 |     sb.assert_text(pom_vars["OUTPUT_SECTIONS_TAB_RERUNS_SECTION_TEXT"])
229 | 
230 | 
231 | def test_html_report_test_fold_actions_tab(sb, pom_vars: dict[str, str]):
232 |     sb.open(f"file:///{WORKDIR}/tui_files/html_report.html")
233 |     sb.click(pom_vars["FOLDED_OUTPUT_TAB"])
234 |     sb.scroll_into_view("Summary", by="tag name")
235 |     sb.assert_element_not_visible(pom_vars["FOLD_ACTIONS_TAB_FOLD_UNFOLD_ACTION"])
236 |     sb.assert_element_not_visible(pom_vars["FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION"])
237 |     sb.hover(f"{pom_vars['FOLD_ACTIONS_TAB']}")
238 |     sb.find_element(pom_vars["FOLD_ACTIONS_TAB_FOLD_UNFOLD_ACTION"])
239 |     sb.assert_element_visible(pom_vars["FOLD_ACTIONS_TAB_FOLD_UNFOLD_ACTION"])
240 |     sb.find_element(pom_vars["FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION"])
241 |     sb.assert_element_visible(pom_vars["FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION"])
242 |     sb.click(pom_vars["FOLD_ACTIONS_TAB_FOLD_UNFOLD_ACTION"])
243 |     sb.wait_for_element_clickable("Summary", by="tag name")
244 |     sb.click(pom_vars["FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION"])
245 |     sb.click(pom_vars["FOLD_ACTIONS_TAB_SHOW_HIDE_ACTION"])
246 |     sb.wait_for_element_not_visible("Summary", by="tag name")
247 | 
248 | 
249 | def main():
250 |     retcode = pytest.main(["-x", f"{WORKDIR}/demo-tests", "--tui", "-vv", "--co"])
251 | 
252 | 
253 | if __name__ == "__main__":
254 |     main()
255 | 


--------------------------------------------------------------------------------