├── .github
├── CODEOWNERS
├── pull_request_template.md
└── workflows
│ └── main.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .travis.yml
├── CHANGELOG.txt
├── LICENSE
├── MANIFEST.in
├── README.rst
├── codecov.yml
├── html
├── archive_body.html
├── archive_row.html
├── floating_error.html
├── screenshot_details.html
├── suite_row.html
├── template.html
└── test_row.html
├── html_page
├── __init__.py
├── archive_body.py
├── archive_row.py
├── floating_error.py
├── page_decor.py
├── screenshot_details.py
├── suite_row.py
├── template.py
└── test_row.py
├── pytest.ini
├── pytest_html_reporter
├── __init__.py
├── const_vars.py
├── html_reporter.py
├── plugin.py
├── time_converter.py
└── util.py
├── requirements.txt
├── setup.py
├── tests
├── functional
│ ├── Readme.md
│ ├── test_approx.py
│ ├── test_autouse.py
│ ├── test_fixture.py
│ ├── test_mark.py
│ ├── test_parameterize.py
│ ├── test_screenshot.py
│ ├── test_selenium.py
│ ├── test_simple.py
│ ├── test_skip_xfail_xpass.py
│ ├── test_usefixtures.py
│ └── test_yield_fixture.py
└── unit
│ ├── helper.py
│ ├── test_mvc_pages.py
│ ├── test_plugin.py
│ ├── test_time_converter.py
│ └── test_util.py
└── tox.ini
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # Library
2 | /pytest_html_reporter/ @prashanth-sams
3 | /tests/ @prashanth-sams
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | # Merge checklist
2 | - [ ] TravisCI tests passed
3 | - [ ] Documentation
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | branches: [ master ]
4 | name: CI
5 | env:
6 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
7 | jobs:
8 | lint:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v2
12 | - uses: cclauss/Find-Python-syntax-errors-action@master
13 | unit-test:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Install Dependencies
18 | run: |
19 | pip3 install -r requirements.txt
20 | - name: Run Tests
21 | run: |
22 | python3 -m py.test --cov ./pytest_html_reporter/ tests/unit/
23 | coveralls --service=github
24 | bash <(curl -s https://codecov.io/bash)
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | venv/
3 | .DS_Store
4 | */**/.DS_Store
5 | */.DS_Store
6 | .pytest_cache/
7 | pytest_report.html
8 | pytest_html_reporter/__pycache__/
9 | pytest_html_reporter.egg-info/
10 | tests/__pycache__/
11 | dist/
12 | test_draft/
13 | report/
14 | __pycache__/
15 | .coverage
16 | archive
17 | output.json
18 | pytest_html_report.html
19 | pytest_screenshots
20 | coverage.xml
21 | build
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 |
3 | - repo: https://gitlab.com/pycqa/flake8
4 | rev: 3.7.7
5 | hooks:
6 | - id: flake8
7 | exclude: docs
8 | language_version: python3
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "3.8"
4 | env:
5 | jobs:
6 | - TRAVIS_PULL_REQUEST=false
7 | branches:
8 | only:
9 | - master
10 | install: pip3 install -r requirements.txt
11 | script: python3 -m py.test --cov ./pytest_html_reporter/ tests/unit/
12 | after_success:
13 | - coveralls
14 | - bash <(curl -s https://codecov.io/bash)
--------------------------------------------------------------------------------
/CHANGELOG.txt:
--------------------------------------------------------------------------------
1 | Change Log
2 | ==========
3 |
4 | 0.2.9 (13/02/2022)
5 | -------------------
6 | - Fixed desktop css inconsistencies
7 |
8 | 0.2.8 (11/02/2022)
9 | -------------------
10 | - Fixed all the code related inconsistencies
11 |
12 | 0.2.7 (11/02/2022)
13 | -------------------
14 | - Added custom title on Dashboard
15 |
16 | 0.2.6 (25/04/2021)
17 | -------------------
18 | - Dashboard Execution Time in H:M:S Hour:Minute:Second format
19 | - Shortened long error text to open in a dialog box
20 |
21 | 0.2.5 (20/04/2021)
22 | -------------------
23 | - Fixed incorrect execution time in the test metrics
24 |
25 | 0.2.4 (19/04/2021)
26 | -------------------
27 | - Added support to download the current report (dashboard) in pdf format
28 |
29 | 0.2.3 (28/09/2020)
30 | -------------------
31 | - Fixed UI bugs in dashboard and Archives layout
32 | - Added error details on failed test cases
33 |
34 | 0.2.2 (13/09/2020)
35 | -------------------
36 | - Codecov
37 | - Added unit tests for plugin
38 | - UI Alignment fixes
39 |
40 | 0.2.1 (12/09/2020)
41 | -------------------
42 | - Hot Fix css issue
43 |
44 | 0.1.9 (12/09/2020)
45 | -------------------
46 | - Optimized library size
47 | - Added unit tests
48 |
49 | 0.1.8 (12/09/2020)
50 | -------------------
51 | - Replaced report path CLI
52 | - Coverage measurement
53 |
54 | 0.1.7 (08/09/2020)
55 | -------------------
56 | - Screenshots on failure
57 | - Updated loader
58 |
59 | 0.1.6 (29/08/2020)
60 | -------------------
61 | - pytest-rerunfailures library support
62 |
63 | 0.1.5 (23/08/2020)
64 | -------------------
65 | - Added report icon
66 | - Updated tooltips for trends
67 | - Added reusable CSS file
68 | - Fixed Test suite details
69 |
70 | 0.0.1 (13/07/2020)
71 | -------------------
72 | - First Release
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | The MIT License (MIT)
3 |
4 | Copyright (c) 2020 Prashanth Sams
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining
7 | a copy of this software and associated documentation files (the
8 | "Software"), to deal in the Software without restriction, including
9 | without limitation the rights to use, copy, modify, merge, publish,
10 | distribute, sublicense, and/or sell copies of the Software, and to
11 | permit persons to whom the Software is furnished to do so, subject to
12 | the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be
15 | included in all copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.rst
3 |
4 | global-include *.py
5 | prune tests/
6 | prune test_draft/
7 | prune venv/
8 | exclude CHANGELOG.txt
9 | exclude requirements.txt
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | =====================
2 | pytest-html-reporter
3 | =====================
4 |
5 | .. image:: https://badges.gitter.im/prashanth-sams/pytest-html-reporter.svg
6 | :alt: Join the chat at https://gitter.im/prashanth-sams/pytest-html-reporter
7 | :target: https://gitter.im/prashanth-sams/pytest-html-reporter?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
8 |
9 | .. image:: https://badge.fury.io/py/pytest-html-reporter.svg
10 | :target: https://badge.fury.io/py/pytest-html-reporter
11 | :alt: PyPI version
12 |
13 | .. image:: https://travis-ci.com/prashanth-sams/pytest-html-reporter.svg?branch=master
14 | :target: https://travis-ci.com/prashanth-sams/pytest-html-reporter
15 | :alt: Build Status
16 |
17 | .. image:: https://coveralls.io/repos/github/prashanth-sams/pytest-html-reporter/badge.svg?branch=master
18 | :target: https://coveralls.io/github/prashanth-sams/pytest-html-reporter?branch=master
19 |
20 | .. image:: https://pepy.tech/badge/pytest-html-reporter
21 | :target: https://pepy.tech/project/pytest-html-reporter
22 | :alt: Downloads
23 |
24 |
25 | ..
26 |
27 | Generates a static html report based on ``pytest`` framework
28 |
29 |
30 | .. image:: https://i.imgur.com/4TYia5j.png
31 | :alt: pytest-html-reporter
32 |
33 | Features
34 | ------------
35 | * Generic information
36 |
37 | - Overview
38 | - Trends
39 | - Suite Highlights
40 | - Test suite details
41 | * Archives / History
42 | * Screenshots on failure
43 | * Test Rerun support
44 |
45 | Installation
46 | ------------
47 |
48 | .. code-block:: console
49 |
50 | $ pip3 install pytest-html-reporter
51 |
52 |
53 | Usage
54 | ------------
55 |
56 | By default, the filename used is ``pytest_html_reporter.html`` and path chosen is ``report``; you can skip both or
57 | either one of them if not needed::
58 |
59 | $ pytest tests/
60 |
61 |
62 | ..
63 |
64 | Custom path, filename, and title
65 |
66 | Add ``--html-report`` tag followed by path location and filename to customize the report location and filename::
67 |
68 | $ pytest tests/ --html-report=./report
69 | $ pytest tests/ --html-report=./report/report.html
70 |
71 | Add ``--title`` tag followed by the report title::
72 |
73 | $ pytest tests/ --html-report=./report --title='PYTEST REPORT'
74 |
75 | Add ``--archive-count`` tag followed by an integer to limit showing the number of builds in the ``Archives`` section::
76 |
77 | $ pytest tests/ --archive-count 7
78 | $ pytest tests/ --html-report=./report --archive-count 7
79 |
80 | ..
81 |
82 | pytest.ini
83 |
84 | Alternate option is to add this snippet in the ``pytest.ini`` file::
85 |
86 | [pytest]
87 | addopts = -vs -rf --html-report=./report --title='PYTEST REPORT'
88 |
89 | **Note:** If you fail to provide ``--html-report`` tag, it consider your project's home directory as the base
90 |
91 | screenshots on failure
92 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
93 |
94 | Import ``attach`` from the library and call it with the selenium command as given below::
95 |
96 | from pytest_html_reporter import attach
97 |
98 | ...
99 | attach(data=self.driver.get_screenshot_as_png())
100 |
101 | .. image:: https://img.shields.io/badge/Attach_screenshot_snippet-000?style=for-the-badge&logo=ko-fi&logoColor=white
102 | :target: https://gist.github.com/prashanth-sams/f0cc2102fc3619b11748e0cbda22598b
103 |
104 |
105 | .. image:: https://i.imgur.com/1HSYkdC.gif
106 |
107 |
108 | Is there a demo available for this gem?
109 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
110 |
111 | Yes, you can use this demo as an example, https://github.com/prashanth-sams/pytest-html-reporter::
112 |
113 | $ pytest tests/functional/
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default:
5 | target: 0%
6 | threshold: 70%
7 |
8 | patch: false
9 |
10 | precision: 3
11 |
12 | comment:
13 | layout: "diff"
14 | behavior: default
15 | require_changes: true
--------------------------------------------------------------------------------
/html/archive_body.html:
--------------------------------------------------------------------------------
1 |
758 |
759 |
760 |
761 |
762 |
763 |
764 |
765 |
766 |
767 | Time taken %(execution_time)%
768 |
769 |
770 |
771 |
776 |
779 |
780 | %(total)%
781 |
782 |
783 | TEST CASES
784 |
785 |
786 |
791 |
825 |
826 |
827 |
828 |
829 |
830 |
831 |
832 |
833 |
834 |
835 |
839 |
840 |
841 |
842 |
843 |
844 |
845 |
846 |
847 |
848 |
854 |
855 |
856 |
857 |
876 |
877 |
878 |
879 |
880 |
886 |
894 |
895 |
896 |
897 |
898 |
899 | Suite |
900 | Pass |
901 | Fail |
902 | Skip |
903 | xPass |
904 | xFail |
905 | Error |
906 | Rerun |
907 |
908 |
909 |
910 | %(suite_metrics_row)%
911 |
912 |
913 |
916 |
917 |
918 |
919 |
920 |
921 | Suite |
922 | Test Case |
923 | Status |
924 | Time (s) |
925 | Error Message |
926 |
927 |
928 |
929 | %(test_metrics_row)%
930 |
931 |
932 |
935 |
936 |
937 |
938 | %(archive_status)%
939 |
940 |
941 |
942 | %(archive_body_content)%
943 |
944 |
945 |
946 |
947 |
948 |
949 |
950 |
951 |
952 | %(attach_screenshot_details)%
953 |
954 |
955 |
956 |
957 |
958 |
959 |
960 |
961 |
1035 |
1116 |
1126 |
1142 |
1163 |
1167 |
1170 |
1268 |
1269 |
1327 |
1328 |
1419 |
1480 |
1500 |
1501 |
1502 |
--------------------------------------------------------------------------------
/html/test_row.html:
--------------------------------------------------------------------------------
1 |
2 | %(sname)% |
3 | %(name)% |
4 | %(stat)% |
5 | %(dur)% |
6 |
7 | %(msg)%
8 | %(floating_error_text)%
9 | |
10 |
--------------------------------------------------------------------------------
/html_page/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/prashanth-sams/pytest-html-reporter/f126a72353540a226c118cb7c238fe07389bd0b8/html_page/__init__.py
--------------------------------------------------------------------------------
/html_page/archive_body.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class ArchiveBody:
6 | """
7 | archive_body
8 | """
--------------------------------------------------------------------------------
/html_page/archive_row.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class ArchiveRow:
6 | """
7 | archive_row
8 | """
--------------------------------------------------------------------------------
/html_page/floating_error.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class FloatingError:
6 | """
7 | floating_error
8 | """
--------------------------------------------------------------------------------
/html_page/page_decor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import sys
4 |
5 |
6 | def rindex(lst, value):
7 | return len(lst) - lst[::-1].index(value) - 1
8 |
9 |
10 | def html_page(cls):
11 | def __init__(self, *args, **kwargs):
12 | if args:
13 | raise ValueError("HTML code behind class receives keyword only parameters. Positional are not allowed.")
14 |
15 | self.__content = None
16 | self.__inline_attributes = list()
17 | self.__inline_code_snippets = list()
18 |
19 | for inline_attribute in self.inline_attributes:
20 | if inline_attribute in dict(kwargs).keys():
21 | setattr(self, inline_attribute, kwargs[inline_attribute])
22 | else:
23 | setattr(self, inline_attribute, "")
24 |
25 | def __str__(self):
26 | res = self.content
27 | for inln in self.inline_attributes:
28 | res = res.replace(f"%({inln})%", getattr(self, inln))
29 |
30 | for cd_snpt in self.inline_code_snippets:
31 | res = res.replace(f"$({cd_snpt})$", eval(cd_snpt))
32 |
33 | return res
34 |
35 | @property
36 | def content(self):
37 | if not self.__content:
38 | fname_list = os.path.abspath(__file__).split(os.path.sep)
39 | fname = os.path.join(*fname_list[:rindex(fname_list, "pytest-html-reporter") + 1]) \
40 | if sys.platform.startswith("win") or sys.platform == "cygwin" \
41 | else os.path.join(os.path.sep, *fname_list[:rindex(fname_list, "pytest-html-reporter") + 1])
42 |
43 | with open(os.path.join(fname, "html", f"{cls.__doc__.strip()}.html")) as html:
44 | self.__content = html.read()
45 |
46 | return self.__content
47 |
48 | @property
49 | def inline_attributes(self):
50 | if not self.__inline_attributes:
51 | self.__inline_attributes = re.findall("%\((.+?)\)%", self.content)
52 |
53 | return self.__inline_attributes
54 |
55 | @property
56 | def inline_code_snippets(self):
57 | if not self.__inline_code_snippets:
58 | self.__inline_code_snippets = re.findall("\$\((.+?)\)$", self.content)
59 |
60 | return self.__inline_code_snippets
61 |
62 | def format(self, **params):
63 | return self.template.format(**params)
64 |
65 | def replace(self, one, another):
66 | return str(self).replace(one, another)
67 |
68 | cls.__init__ = __init__
69 | cls.__str__ = __str__
70 | cls.content = content
71 | cls.inline_attributes = inline_attributes
72 | cls.inline_code_snippets = inline_code_snippets
73 | cls.format = format
74 | cls.replace = replace
75 |
76 | return cls
77 |
--------------------------------------------------------------------------------
/html_page/screenshot_details.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class ScreenshotDetails:
6 | """
7 | screenshot_details
8 | """
--------------------------------------------------------------------------------
/html_page/suite_row.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class SuiteRow:
6 | """
7 | suite_row
8 | """
--------------------------------------------------------------------------------
/html_page/template.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class HtmlTemplate:
6 | """
7 | template
8 | """
--------------------------------------------------------------------------------
/html_page/test_row.py:
--------------------------------------------------------------------------------
1 | from html_page.page_decor import html_page
2 |
3 |
4 | @html_page
5 | class TestRow:
6 | """
7 | test_row
8 | """
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | slow: mark as slow tests
4 | fast: mark as fast tests
5 | addopts = -vs -rf
--------------------------------------------------------------------------------
/pytest_html_reporter/__init__.py:
--------------------------------------------------------------------------------
1 | from .util import screenshot as attach
--------------------------------------------------------------------------------
/pytest_html_reporter/const_vars.py:
--------------------------------------------------------------------------------
1 | class ConfigVars:
2 | _total = _executed = 0
3 | _pass = _fail = 0
4 | _skip = _error = 0
5 | _xpass = _xfail = 0
6 | _apass = _afail = 0
7 | _askip = _aerror = 0
8 | _axpass = _axfail = 0
9 | _astotal = 0
10 | _aspass = 0
11 | _asfail = 0
12 | _asskip = 0
13 | _aserror = 0
14 | _asxpass = 0
15 | _asxfail = 0
16 | _asrerun = 0
17 | _current_error = ""
18 | _suite_name = None
19 | _test_name = None
20 | _scenario = []
21 | _test_suite_name = []
22 | _test_pass_list = []
23 | _test_fail_list = []
24 | _test_skip_list = []
25 | _test_xpass_list = []
26 | _test_xfail_list = []
27 | _test_error_list = []
28 | _test_status = None
29 | _start_execution_time = 0
30 | _execution_time = _duration = 0
31 | _test_metrics_content = ""
32 | _suite_metrics_content = ""
33 | _previous_suite_name = "None"
34 | _initial_trigger = True
35 | _spass_tests = 0
36 | _sfail_tests = 0
37 | _sskip_tests = 0
38 | _serror_tests = 0
39 | _srerun_tests = 0
40 | _sxfail_tests = 0
41 | _sxpass_tests = 0
42 | _suite_length = 0
43 | _archive_tab_content = ""
44 | _archive_body_content = ""
45 | _archive_count = ""
46 | archive_pass = 0
47 | archive_fail = 0
48 | archive_skip = 0
49 | archive_xpass = 0
50 | archive_xfail = 0
51 | archive_error = 0
52 | archives = {}
53 | highlights = {}
54 | p_highlights = {}
55 | max_failure_suite_name = ''
56 | max_failure_suite_name_final = ''
57 | max_failure_suite_count = 0
58 | similar_max_failure_suite_count = 0
59 | max_failure_total_tests = 0
60 | max_failure_percent = ''
61 | trends_label = []
62 | tpass = []
63 | tfail = []
64 | tskip = []
65 | _previous_test_name = ''
66 | _suite_error = 0
67 | _suite_fail = 0
68 | _pvalue = 0
69 | screen_base = ''
70 | screen_img = None
71 | _attach_screenshot_details = ''
72 | _title = 'PYTEST REPORT'
--------------------------------------------------------------------------------
/pytest_html_reporter/html_reporter.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import json
3 | import logging
4 | import os
5 | import time
6 | import shutil
7 | from datetime import date, datetime
8 | from os.path import isfile, join
9 |
10 | import pytest
11 |
12 | from html_page.archive_body import ArchiveBody
13 | from html_page.archive_row import ArchiveRow
14 | from html_page.floating_error import FloatingError
15 | from html_page.screenshot_details import ScreenshotDetails
16 | from html_page.suite_row import SuiteRow
17 | from html_page.template import HtmlTemplate
18 | from html_page.test_row import TestRow
19 | from pytest_html_reporter.util import suite_highlights, generate_suite_highlights, max_rerun
20 | from pytest_html_reporter.time_converter import time_converter
21 | from pytest_html_reporter.const_vars import ConfigVars
22 |
23 |
24 | class HTMLReporter(object):
25 | def __init__(self, path, archive_count, config):
26 | self.json_data = {'content': {'suites': {0: {'status': {}, 'tests': {0: {}}, }, }}}
27 | self.path = path
28 | self.archive_count = archive_count
29 | self.config = config
30 | has_rerun = config.pluginmanager.hasplugin("rerunfailures")
31 | self.rerun = 0 if has_rerun else None
32 |
33 | def pytest_runtest_teardown(self, item, nextitem):
34 | ConfigVars._test_name = item.name
35 |
36 | _test_end_time = time.time()
37 | ConfigVars._duration = _test_end_time - ConfigVars._start_execution_time
38 |
39 | if (self.rerun is not None) and (max_rerun() is not None): self.previous_test_name(ConfigVars._test_name)
40 | self._test_names(ConfigVars._test_name)
41 | self.append_test_metrics_row()
42 |
43 | def previous_test_name(self, _test_name):
44 | if ConfigVars._previous_test_name == _test_name:
45 | self.rerun += 1
46 | else:
47 | ConfigVars._scenario.append(_test_name)
48 | self.rerun = 0
49 | ConfigVars._previous_test_name = _test_name
50 |
51 | def pytest_runtest_setup(item):
52 | ConfigVars._start_execution_time = time.time()
53 |
54 | def pytest_sessionfinish(self, session):
55 | if ConfigVars._suite_name is not None: self.append_suite_metrics_row(ConfigVars._suite_name)
56 |
57 | def archive_data(self, base, filename):
58 | path = os.path.join(base, filename)
59 |
60 | if os.path.isfile(path) is True:
61 | os.makedirs(base + '/archive', exist_ok=True)
62 | f = 'output.json'
63 |
64 | if isfile(join(base, f)):
65 | fname = os.path.splitext(f)
66 | os.rename(base + '/' + f, os.path.join(base + '/archive', fname[0] + '_' +
67 | str(ConfigVars._start_execution_time) + fname[1]))
68 |
69 | @property
70 | def report_path(self):
71 | if '.html' in self.path:
72 | path = '.' if '.html' in self.path.rsplit('/', 1)[0] else self.path.rsplit('/', 1)[0]
73 | if path == '': path = '.'
74 | logfile = os.path.expanduser(os.path.expandvars(path))
75 | HTMLReporter.base_path = os.path.abspath(logfile)
76 | return os.path.abspath(logfile), self.path.split('/')[-1]
77 | else:
78 | logfile = os.path.expanduser(os.path.expandvars(self.path))
79 | HTMLReporter.base_path = os.path.abspath(logfile)
80 | return os.path.abspath(logfile), 'pytest_html_report.html'
81 |
82 | def remove_old_archives(self):
83 | archive_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(self.path))) + '/archive'
84 |
85 | if self.archive_count != '':
86 | if int(self.archive_count) == 0:
87 | if os.path.isdir(archive_dir):
88 | shutil.rmtree(archive_dir)
89 | return
90 |
91 | archive_count = int(self.archive_count) - 1
92 | if os.path.isdir(archive_dir):
93 | archives = os.listdir(archive_dir)
94 | archives.sort(key=lambda f: os.path.getmtime(os.path.join(archive_dir, f)))
95 | for i in range(0, len(archives) - archive_count):
96 | os.remove(os.path.join(archive_dir, archives[i]))
97 |
98 | @pytest.hookimpl(hookwrapper=True)
99 | def pytest_terminal_summary(self, terminalreporter, exitstatus, config):
100 |
101 | yield
102 | _execution_time = time.time() - terminalreporter._sessionstarttime
103 |
104 | if ConfigVars._execution_time < 60:
105 | ConfigVars._execution_time = str(round(_execution_time, 2)) + " secs"
106 | else:
107 | _execution_time = str(time.strftime("%H:%M:%S", time.gmtime(round(_execution_time)))) + " Hrs"
108 | ConfigVars._total = ConfigVars._pass + ConfigVars._fail + ConfigVars._xpass + ConfigVars._xfail + ConfigVars._skip + ConfigVars._error
109 |
110 | if ConfigVars._suite_name is not None:
111 | base = self.report_path[0]
112 | path = os.path.join(base, self.report_path[1])
113 |
114 | os.makedirs(base, exist_ok=True)
115 | self.archive_data(base, self.report_path[1])
116 |
117 | # generate json file
118 | self.generate_json_data(base)
119 |
120 | # generate trends
121 | self.update_trends(base)
122 |
123 | # generate archive template
124 | self.remove_old_archives()
125 | self.update_archives_template(base) if self.archive_count != '0' else None
126 |
127 | # generate suite highlights
128 | generate_suite_highlights()
129 |
130 | # generate html report
131 | live_logs_file = open(path, 'w')
132 | message = self.renew_template_text('https://i.imgur.com/LRSRHJO.png')
133 | live_logs_file.write(message)
134 | live_logs_file.close()
135 |
136 | @pytest.hookimpl(tryfirst=True, hookwrapper=True)
137 | def pytest_runtest_makereport(self, item, call):
138 |
139 | outcome = yield
140 | rep = outcome.get_result()
141 | ConfigVars._suite_name = rep.nodeid.split("::")[0]
142 |
143 | if ConfigVars._initial_trigger:
144 | self.update_previous_suite_name()
145 | self.set_initial_trigger()
146 |
147 | if str(ConfigVars._previous_suite_name) != str(ConfigVars._suite_name):
148 | self.append_suite_metrics_row(ConfigVars._previous_suite_name)
149 | self.update_previous_suite_name()
150 | else:
151 | self.update_counts(rep)
152 |
153 | if rep.when == "call" and rep.passed:
154 | if hasattr(rep, "wasxfail"):
155 | self.increment_xpass()
156 | self.update_test_status("xPASS")
157 | self.update_test_error("")
158 | else:
159 | self.increment_pass()
160 | self.update_test_status("PASS")
161 | self.update_test_error("")
162 |
163 | if rep.failed:
164 | if getattr(rep, "when", None) == "call":
165 | if hasattr(rep, "wasxfail"):
166 | self.increment_xpass()
167 | self.update_test_status("xPASS")
168 | self.update_test_error("")
169 | else:
170 | self.increment_fail()
171 | self.update_test_status("FAIL")
172 | if rep.longrepr:
173 | longerr = ""
174 | for line in rep.longreprtext.splitlines():
175 | exception = line.startswith("E ")
176 | if exception:
177 | longerr += line + "\n"
178 | self.update_test_error(longerr.replace("E ", ""))
179 | else:
180 | self.increment_error()
181 | self.update_test_status("ERROR")
182 | if rep.longrepr:
183 | longerr = ""
184 | for line in rep.longreprtext.splitlines():
185 | longerr += line + "\n"
186 | self.update_test_error(longerr)
187 |
188 | if rep.skipped:
189 | if hasattr(rep, "wasxfail"):
190 | self.increment_xfail()
191 | self.update_test_status("xFAIL")
192 | if rep.longrepr:
193 | longerr = ""
194 | for line in rep.longreprtext.splitlines():
195 | exception = line.startswith("E ")
196 | if exception:
197 | longerr += line + "\n"
198 | self.update_test_error(longerr.replace("E ", ""))
199 | else:
200 | self.increment_skip()
201 | self.update_test_status("SKIP")
202 | if rep.longrepr:
203 | longerr = ""
204 | for line in rep.longreprtext.splitlines():
205 | longerr += line + "\n"
206 | self.update_test_error(longerr)
207 |
208 | def append_test_metrics_row(self):
209 |
210 | test_row_text = TestRow(
211 | sname=str(ConfigVars._suite_name),
212 | name=str(ConfigVars._test_name),
213 | stat=str(ConfigVars._test_status),
214 | dur=str(round(ConfigVars._duration, 2)),
215 | msg=str(ConfigVars._current_error[:50]),
216 | runt=str(time.time()).replace('.', '')
217 | )
218 |
219 | floating_error_text = FloatingError(full_msg=str(ConfigVars._current_error), runt = str(time.time()).replace('.', ''))
220 |
221 | if (self.rerun is not None) and (max_rerun() is not None):
222 | if (ConfigVars._test_status == 'FAIL') or (ConfigVars._test_status == 'ERROR'): ConfigVars._pvalue += 1
223 |
224 | if (ConfigVars._pvalue == max_rerun() + 1) or (ConfigVars._test_status == 'PASS'):
225 | if ((ConfigVars._test_status == 'FAIL') or (ConfigVars._test_status == 'ERROR')) and (
226 | ConfigVars.screen_base != ''): self.generate_screenshot_data()
227 |
228 | if len(ConfigVars._current_error) < 49:
229 | test_row_text.floating_error_text = str('')
230 | else:
231 | test_row_text.floating_error_text = str(floating_error_text)
232 | test_row_text.full_msg = str(ConfigVars._current_error)
233 |
234 | ConfigVars._test_metrics_content += str(test_row_text)
235 | ConfigVars._pvalue = 0
236 |
237 | elif (self.rerun is not None) and (
238 | (ConfigVars._test_status == 'xFAIL') or (ConfigVars._test_status == 'xPASS') or (
239 | ConfigVars._test_status == 'SKIP')):
240 |
241 | if len(ConfigVars._current_error) < 49:
242 | test_row_text.floating_error_text = ''
243 | else:
244 | test_row_text.floating_error_text = str(floating_error_text)
245 | test_row_text.full_msg = str(ConfigVars._current_error)
246 |
247 | ConfigVars._test_metrics_content += str(test_row_text)
248 |
249 | elif (self.rerun is None) or (max_rerun() is None):
250 | if ((ConfigVars._test_status == 'FAIL') or (ConfigVars._test_status == 'ERROR')) and (
251 | ConfigVars.screen_base != ''): self.generate_screenshot_data()
252 |
253 | if len(ConfigVars._current_error) < 49:
254 | test_row_text.floating_error_text = ''
255 | else:
256 | test_row_text.floating_error_text = str(floating_error_text)
257 | test_row_text.full_msg = str(ConfigVars._current_error)
258 |
259 | logging.warning(f"Test Metrics Row: {test_row_text}")
260 |
261 | ConfigVars._test_metrics_content += str(test_row_text)
262 |
263 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name), {})['suite_name'] = str(
264 | ConfigVars._suite_name)
265 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name), {}).setdefault('tests',
266 | {}).setdefault(
267 | len(ConfigVars._scenario) - 1, {})['status'] = str(ConfigVars._test_status)
268 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name), {}).setdefault('tests',
269 | {}).setdefault(
270 | len(ConfigVars._scenario) - 1, {})['message'] = str(ConfigVars._current_error)
271 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name), {}).setdefault('tests',
272 | {}).setdefault(
273 | len(ConfigVars._scenario) - 1, {})['test_name'] = str(ConfigVars._test_name)
274 |
275 | if (self.rerun is not None) and (max_rerun() is not None):
276 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name), {}).setdefault('tests',
277 | {}).setdefault(
278 | len(ConfigVars._scenario) - 1, {})['rerun'] = str(self.rerun)
279 | else:
280 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name), {}).setdefault('tests',
281 | {}).setdefault(
282 | len(ConfigVars._scenario) - 1, {})['rerun'] = '0'
283 |
284 | def generate_screenshot_data(self):
285 |
286 | os.makedirs(ConfigVars.screen_base + '/pytest_screenshots', exist_ok=True)
287 |
288 | _screenshot_name = round(time.time())
289 | _screenshot_suite_name = ConfigVars._suite_name.split('/')[-1:][0].replace('.py', '')
290 | _screenshot_test_name = ConfigVars._test_name
291 | if len(ConfigVars._test_name) >= 19: ConfigVars._screenshot_test_name = ConfigVars._test_name[-17:]
292 | _screenshot_error = ConfigVars._current_error
293 |
294 | ConfigVars.screen_img.save(
295 | ConfigVars.screen_base + '/pytest_screenshots/' + str(_screenshot_name) + '.png'
296 | )
297 |
298 | # attach screenshots
299 | self.attach_screenshots(_screenshot_name, _screenshot_suite_name, _screenshot_test_name, _screenshot_error)
300 | _screenshot_name = ''
301 | _screenshot_suite_name = ''
302 | _screenshot_test_name = ''
303 | _screenshot_error = ''
304 |
305 | def append_suite_metrics_row(self, name):
306 | self._test_names(ConfigVars._test_name, clear='yes')
307 | self._test_suites(name)
308 |
309 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault('status',
310 | {})[
311 | 'total_pass'] = int(ConfigVars._spass_tests)
312 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault('status',
313 | {})[
314 | 'total_skip'] = int(ConfigVars._sskip_tests)
315 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault('status',
316 | {})[
317 | 'total_xpass'] = int(ConfigVars._sxpass_tests)
318 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault('status',
319 | {})[
320 | 'total_xfail'] = int(ConfigVars._sxfail_tests)
321 |
322 | if (self.rerun is not None) and (max_rerun() is not None):
323 | _base_suite = self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {})[
324 | 'tests']
325 | for i in _base_suite:
326 | ConfigVars._srerun_tests += int(_base_suite[int(i)]['rerun'])
327 |
328 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault(
329 | 'status', {})[
330 | 'total_rerun'] = int(ConfigVars._srerun_tests)
331 | else:
332 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault(
333 | 'status', {})[
334 | 'total_rerun'] = 0
335 |
336 | for i in self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {})['tests']:
337 | if 'ERROR' in \
338 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {})['tests'][
339 | i]['status']:
340 | ConfigVars._suite_error += 1
341 | elif 'FAIL' == \
342 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {})['tests'][
343 | i][
344 | 'status']:
345 | ConfigVars._suite_fail += 1
346 |
347 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault('status',
348 | {})[
349 | 'total_fail'] = ConfigVars._suite_fail
350 | self.json_data['content']['suites'].setdefault(len(ConfigVars._test_suite_name) - 1, {}).setdefault('status',
351 | {})[
352 | 'total_error'] = ConfigVars._suite_error
353 |
354 | suite_row_text = SuiteRow(
355 | sname=str(name),
356 | spass=str(ConfigVars._spass_tests),
357 | sfail=str(ConfigVars._suite_fail),
358 | sskip=str(ConfigVars._sskip_tests),
359 | sxpass=str(ConfigVars._sxpass_tests),
360 | sxfail=str(ConfigVars._sxfail_tests),
361 | serror=str(ConfigVars._suite_error),
362 | srerun=str(ConfigVars._srerun_tests)
363 | )
364 |
365 | ConfigVars._suite_metrics_content += str(suite_row_text)
366 |
367 | self._test_passed(int(ConfigVars._spass_tests))
368 | self._test_failed(int(ConfigVars._suite_fail))
369 | self._test_skipped(int(ConfigVars._sskip_tests))
370 | self._test_xpassed(int(ConfigVars._sxpass_tests))
371 | self._test_xfailed(int(ConfigVars._sxfail_tests))
372 | self._test_error(int(ConfigVars._suite_error))
373 |
374 | ConfigVars._spass_tests = 0
375 | ConfigVars._sfail_tests = 0
376 | ConfigVars._sskip_tests = 0
377 | ConfigVars._sxpass_tests = 0
378 | ConfigVars._sxfail_tests = 0
379 | ConfigVars._serror_tests = 0
380 | ConfigVars._srerun_tests = 0
381 | ConfigVars._suite_fail = 0
382 | ConfigVars._suite_error = 0
383 |
384 | def set_initial_trigger(self):
385 | ConfigVars._initial_trigger = False
386 |
387 | def update_previous_suite_name(self):
388 | ConfigVars._previous_suite_name = ConfigVars._suite_name
389 |
390 | def update_counts(self, rep):
391 | if rep.when == "call" and rep.passed:
392 | if hasattr(rep, "wasxfail"):
393 | ConfigVars._sxpass_tests += 1
394 | else:
395 | ConfigVars._spass_tests += 1
396 |
397 | if rep.failed:
398 | if getattr(rep, "when", None) == "call":
399 | if hasattr(rep, "wasxfail"):
400 | ConfigVars._sxpass_tests += 1
401 | else:
402 | ConfigVars._sfail_tests += 1
403 | else:
404 | pass
405 |
406 | if rep.skipped:
407 | if hasattr(rep, "wasxfail"):
408 | ConfigVars._sxfail_tests += 1
409 | else:
410 | ConfigVars._sskip_tests += 1
411 |
412 | def update_test_error(self, msg):
413 | ConfigVars._current_error = msg
414 |
415 | def update_test_status(self, status):
416 | ConfigVars._test_status = status
417 |
418 | def increment_xpass(self):
419 | ConfigVars._xpass += 1
420 |
421 | def increment_xfail(self):
422 | ConfigVars._xfail += 1
423 |
424 | def increment_pass(self):
425 | ConfigVars._pass += 1
426 |
427 | def increment_fail(self):
428 | ConfigVars._fail += 1
429 |
430 | def increment_skip(self):
431 | ConfigVars._skip += 1
432 |
433 | def increment_error(self):
434 | ConfigVars._error += 1
435 | ConfigVars._serror_tests += 1
436 |
437 | def _date(self):
438 | return date.today().strftime("%B %d, %Y")
439 |
440 | def _test_suites(self, name):
441 | ConfigVars._test_suite_name.append(name.split('/')[-1].replace('.py', ''))
442 |
443 | def _test_names(self, name, **kwargs):
444 | if (self.rerun is None) or (max_rerun() is None): ConfigVars._scenario.append(name)
445 | try:
446 | if kwargs['clear'] == 'yes': ConfigVars._scenario = []
447 | except Exception:
448 | pass
449 |
450 | def _test_passed(self, value):
451 | ConfigVars._test_pass_list.append(value)
452 |
453 | def _test_failed(self, value):
454 | ConfigVars._test_fail_list.append(value)
455 |
456 | def _test_skipped(self, value):
457 | ConfigVars._test_skip_list.append(value)
458 |
459 | def _test_xpassed(self, value):
460 | ConfigVars._test_xpass_list.append(value)
461 |
462 | def _test_xfailed(self, value):
463 | ConfigVars._test_xfail_list.append(value)
464 |
465 | def _test_error(self, value):
466 | ConfigVars._test_error_list.append(value)
467 |
468 | def renew_template_text(self, logo_url):
469 | template_text = HtmlTemplate(
470 | custom_logo=logo_url,
471 | execution_time=str(ConfigVars._execution_time),
472 | title=ConfigVars._title,
473 | total=str(
474 | ConfigVars._aspass + ConfigVars._asfail + ConfigVars._asskip + ConfigVars._aserror + ConfigVars._asxpass + ConfigVars._asxfail),
475 | executed=str(ConfigVars._executed),
476 | _pass=str(ConfigVars._aspass),
477 | fail=str(ConfigVars._asfail),
478 | skip=str(ConfigVars._asskip),
479 | error=str(ConfigVars._aserror),
480 | xpass=str(ConfigVars._asxpass),
481 | xfail=str(ConfigVars._asxfail),
482 | rerun=str(ConfigVars._asrerun),
483 | suite_metrics_row=str(ConfigVars._suite_metrics_content),
484 | test_metrics_row=str(ConfigVars._test_metrics_content),
485 | date=str(self._date()),
486 | test_suites=str(ConfigVars._test_suite_name),
487 | test_suite_length=str(len(ConfigVars._test_suite_name)),
488 | test_suite_pass=str(ConfigVars._test_pass_list),
489 | test_suites_fail=str(ConfigVars._test_fail_list),
490 | test_suites_skip=str(ConfigVars._test_skip_list),
491 | test_suites_xpass=str(ConfigVars._test_xpass_list),
492 | test_suites_xfail=str(ConfigVars._test_fail_list),
493 | test_suites_error=str(ConfigVars._test_error_list),
494 | archive_status=str(ConfigVars._archive_tab_content),
495 | archive_body_content=str(ConfigVars._archive_body_content),
496 | archive_count=str(ConfigVars._archive_count),
497 | archives=str(ConfigVars.archives),
498 | max_failure_suite_name_final=str(ConfigVars.max_failure_suite_name_final),
499 | max_failure_suite_count=str(ConfigVars.max_failure_suite_count),
500 | similar_max_failure_suite_count=str(ConfigVars.similar_max_failure_suite_count),
501 | max_failure_total_tests=str(ConfigVars.max_failure_total_tests),
502 | max_failure_percent=str(ConfigVars.max_failure_percent),
503 | trends_label=str(ConfigVars.trends_label),
504 | tpass=str(ConfigVars.tpass),
505 | tfail=str(ConfigVars.tfail),
506 | tskip=str(ConfigVars.tskip),
507 | attach_screenshot_details=str(ConfigVars._attach_screenshot_details)
508 | )
509 |
510 | # template_text = template_text.replace("__executed_by__", str(platform.uname()[1]))
511 | # template_text = template_text.replace("__os_name__", str(platform.uname()[0]))
512 | # template_text = template_text.replace("__python_version__", str(sys.version.split(' ')[0]))
513 | # template_text = template_text.replace("__generated_date__", str(datetime.datetime.now().strftime("%b %d %Y, %H:%M")))
514 |
515 | return str(template_text)
516 |
517 | def generate_json_data(self, base):
518 | self.json_data['date'] = self._date()
519 | self.json_data['start_time'] = ConfigVars._start_execution_time
520 | self.json_data['total_suite'] = len(ConfigVars._test_suite_name)
521 |
522 | suite = self.json_data['content']['suites']
523 | for i in suite:
524 | for k in self.json_data['content']['suites'][i]['status']:
525 | if (k == 'total_fail' or k == 'total_error') and self.json_data['content']['suites'][i]['status'][k] != 0:
526 | self.json_data['status'] = "FAIL"
527 | break
528 | else:
529 | continue
530 |
531 | try:
532 | if self.json_data['status'] == "FAIL": break
533 | except KeyError:
534 | if len(ConfigVars._test_suite_name) == i + 1: self.json_data['status'] = "PASS"
535 |
536 | for i in suite:
537 | for k in self.json_data['content']['suites'][i]['status']:
538 | if k == 'total_pass':
539 | ConfigVars._aspass += self.json_data['content']['suites'][i]['status'][k]
540 | elif k == 'total_fail':
541 | ConfigVars._asfail += self.json_data['content']['suites'][i]['status'][k]
542 | elif k == 'total_skip':
543 | ConfigVars._asskip += self.json_data['content']['suites'][i]['status'][k]
544 | elif k == 'total_error':
545 | ConfigVars._aserror += self.json_data['content']['suites'][i]['status'][k]
546 | elif k == 'total_xpass':
547 | ConfigVars._asxpass += self.json_data['content']['suites'][i]['status'][k]
548 | elif k == 'total_xfail':
549 | ConfigVars._asxfail += self.json_data['content']['suites'][i]['status'][k]
550 | elif k == 'total_rerun':
551 | ConfigVars._asrerun += self.json_data['content']['suites'][i]['status'][k]
552 |
553 | ConfigVars._astotal = ConfigVars._aspass + ConfigVars._asfail + ConfigVars._asskip + ConfigVars._aserror + ConfigVars._asxpass + ConfigVars._asxfail
554 |
555 | self.json_data.setdefault('status_list', {})['pass'] = str(ConfigVars._aspass)
556 | self.json_data.setdefault('status_list', {})['fail'] = str(ConfigVars._asfail)
557 | self.json_data.setdefault('status_list', {})['skip'] = str(ConfigVars._asskip)
558 | self.json_data.setdefault('status_list', {})['error'] = str(ConfigVars._aserror)
559 | self.json_data.setdefault('status_list', {})['xpass'] = str(ConfigVars._asxpass)
560 | self.json_data.setdefault('status_list', {})['xfail'] = str(ConfigVars._asxfail)
561 | self.json_data.setdefault('status_list', {})['rerun'] = str(ConfigVars._asrerun)
562 | self.json_data['total_tests'] = str(ConfigVars._astotal)
563 |
564 | with open(base + '/output.json', 'w') as outfile:
565 | json.dump(self.json_data, outfile)
566 |
567 | def update_archives_template(self, base):
568 | f = glob.glob(base + '/archive/*.json')
569 | cf = glob.glob(base + '/output.json')
570 | if len(f) > 0:
571 | ConfigVars._archive_count = len(f) + 1
572 | self.load_archive(cf, value='current')
573 |
574 | f.sort(reverse=True)
575 | self.load_archive(f, value='history')
576 | else:
577 | ConfigVars._archive_count = 1
578 | self.load_archive(cf, value='current')
579 |
580 | def load_archive(self, f, value):
581 | def state(data):
582 | if data == 'fail':
583 | return 'times', '#fc6766'
584 | elif data == 'pass':
585 | return 'check', '#98cc64'
586 |
587 | for i, val in enumerate(f):
588 | with open(val) as json_file:
589 | data = json.load(json_file)
590 |
591 | suite_highlights(data)
592 | archive_row_text = ArchiveRow(astate=state(data['status'].lower())[0],
593 | astate_color=state(data['status'].lower())[1])
594 | if value == "current":
595 | archive_row_text.astatus = 'build #' + str(ConfigVars._archive_count)
596 | archive_row_textacount = str(ConfigVars._archive_count)
597 | else:
598 | archive_row_text.astatus = 'build #' + str(len(f) - i)
599 | archive_row_text.acount = str(len(f) - i)
600 |
601 | adate = datetime.strptime(
602 | data['date'].split(None, 1)[0][:1 + 2:] + ' ' +
603 | data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y"
604 | )
605 |
606 | atime = \
607 | "".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit(
608 | ':',
609 | 1)[0]
610 | archive_row_text.adate = str(adate.date()) + ' | ' + str(time_converter(atime))
611 | ConfigVars._archive_tab_content += str(archive_row_text)
612 |
613 | _archive_body_text = ArchiveBody(
614 | total_tests=data['total_tests'],
615 | date=data['date'].upper(),
616 | _pass=data['status_list']['pass'],
617 | fail=data['status_list']['fail'],
618 | skip=data['status_list']['skip'],
619 | xpass=data['status_list']['xpass'],
620 | xfail=data['status_list']['xfail'],
621 | error=data['status_list']['error'],
622 | status=data['status'].lower()
623 | )
624 |
625 | if value == "current":
626 | _archive_body_text.iloop = str(i)
627 | _archive_body_text.acount = str(ConfigVars._archive_count)
628 | else:
629 | _archive_body_text.iloop = str(i + 1)
630 | _archive_body_text.acount = str(len(f) - i)
631 |
632 | try:
633 | _archive_body_text.rerun = data['status_list']['rerun']
634 | except KeyError:
635 | _archive_body_text.rerun = '0'
636 |
637 | index = i
638 | if value != "current": index = i + 1
639 | ConfigVars.archives.setdefault(str(index), {})['pass'] = data['status_list']['pass']
640 | ConfigVars.archives.setdefault(str(index), {})['fail'] = data['status_list']['fail']
641 | ConfigVars.archives.setdefault(str(index), {})['skip'] = data['status_list']['skip']
642 | ConfigVars.archives.setdefault(str(index), {})['xpass'] = data['status_list']['xpass']
643 | ConfigVars.archives.setdefault(str(index), {})['xfail'] = data['status_list']['xfail']
644 | ConfigVars.archives.setdefault(str(index), {})['error'] = data['status_list']['error']
645 |
646 | try:
647 | ConfigVars.archives.setdefault(str(index), {})['rerun'] = data['status_list']['rerun']
648 | except KeyError:
649 | ConfigVars.archives.setdefault(str(index), {})['rerun'] = '0'
650 |
651 | ConfigVars.archives.setdefault(str(index), {})['total'] = data['total_tests']
652 | ConfigVars._archive_body_content += str(_archive_body_text)
653 |
654 | def update_trends(self, base):
655 |
656 | f2 = glob.glob(base + '/output.json')
657 | with open(f2[0]) as json_file:
658 | data = json.load(json_file)
659 | adate = datetime.strptime(
660 | data['date'].split(None, 1)[0][:1 + 2:] + ' ' +
661 | data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y"
662 | )
663 | atime = \
664 | "".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit(
665 | ':',
666 | 1)[0]
667 | ConfigVars.trends_label.append(
668 | str(time_converter(atime)).upper() + ' | ' + str(adate.date().strftime("%b")) + ' '
669 | + str(adate.date().strftime("%d")))
670 |
671 | ConfigVars.tpass.append(data['status_list']['pass'])
672 | ConfigVars.tfail.append(int(data['status_list']['fail']) + int(data['status_list']['error']))
673 | ConfigVars.tskip.append(data['status_list']['skip'])
674 |
675 | f = glob.glob(base + '/archive' + '/*.json')
676 | f.sort(reverse=True)
677 |
678 | for i, val in enumerate(f):
679 | with open(val) as json_file:
680 | data = json.load(json_file)
681 |
682 | adate = datetime.strptime(
683 | data['date'].split(None, 1)[0][:1 + 2:] + ' ' +
684 | data['date'].split(None, 1)[1].replace(',', ''), "%b %d %Y"
685 | )
686 | atime = \
687 | "".join(list(filter(lambda x: ':' in x, time.ctime(float(data['start_time'])).split(' ')))).rsplit(
688 | ':',
689 | 1)[0]
690 | ConfigVars.trends_label.append(
691 | str(time_converter(atime)).upper() + ' | ' + str(adate.date().strftime("%b")) + ' '
692 | + str(adate.date().strftime("%d")))
693 |
694 | ConfigVars.tpass.append(data['status_list']['pass'])
695 | ConfigVars.tfail.append(int(data['status_list']['fail']) + int(data['status_list']['error']))
696 | ConfigVars.tskip.append(data['status_list']['skip'])
697 |
698 | if i == 4: break
699 |
700 | def attach_screenshots(self, screen_name, test_suite, test_case, test_error):
701 |
702 | _screenshot_details = ScreenshotDetails(
703 | screen_name=str(screen_name),
704 | ts=str(test_suite),
705 | tc=str(test_case),
706 | te=str(test_error)
707 | )
708 |
709 | if len(test_case) == 17: test_case = '..' + test_case
710 |
711 | ConfigVars._attach_screenshot_details += str(_screenshot_details)
712 |
--------------------------------------------------------------------------------
/pytest_html_reporter/plugin.py:
--------------------------------------------------------------------------------
1 | from pytest_html_reporter.html_reporter import HTMLReporter
2 | from pytest_html_reporter.util import clean_screenshots, custom_title
3 |
4 |
5 | def pytest_addoption(parser):
6 | group = parser.getgroup("report generator")
7 |
8 | group.addoption(
9 | "--html-report",
10 | action="store",
11 | dest="path",
12 | default=".",
13 | help="path to generate html report",
14 | )
15 |
16 | group.addoption(
17 | "--title",
18 | action="store",
19 | dest="title",
20 | default="PYTEST REPORT",
21 | help="customize report title",
22 | )
23 |
24 | group.addoption(
25 | "--archive-count",
26 | action="store",
27 | dest="archive_count",
28 | default="",
29 | help="set maximum build count to display in the archives section",
30 | )
31 |
32 |
33 | def pytest_configure(config):
34 | path = config.getoption("path")
35 | clean_screenshots(path)
36 |
37 | title = config.getoption("title")
38 | custom_title(title)
39 |
40 | archive_count = config.getoption("archive_count")
41 |
42 | config._html = HTMLReporter(path, archive_count, config)
43 | config.pluginmanager.register(config._html)
44 |
45 |
46 |
--------------------------------------------------------------------------------
/pytest_html_reporter/time_converter.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime as dt
2 |
3 |
4 | def time_converter(time):
5 | midday_dt = dt.strptime('12:00', '%H:%M')
6 | time_dt = dt.strptime(time, '%H:%M')
7 |
8 | if time_dt >= midday_dt:
9 | if time_dt >= dt.strptime('13:00', '%H:%M'):
10 | hours, minutes = clamp_to_twelve(time_dt, midday_dt)
11 | time = f'{hours}:{minutes}'
12 | time += ' pm'
13 | else:
14 | if time_dt < dt.strptime('10:00', '%H:%M'):
15 | time = time[1:]
16 | if is_midnight(time_dt):
17 | hours, minutes = clamp_to_twelve(time_dt, midday_dt)
18 | time = f'{hours}:{minutes:02d}'
19 | time += ' am'
20 | return time
21 |
22 |
23 | def clamp_to_twelve(time_dt, midday_dt):
24 | clamp_dt = time_dt - midday_dt
25 | minutes, seconds = divmod(clamp_dt.seconds, 60)
26 | hours, minutes = divmod(minutes, 60)
27 | return [hours, minutes]
28 |
29 |
30 | def is_midnight(time_dt):
31 | return dt.strptime('00:00', '%H:%M') <= time_dt <= dt.strptime('00:59', '%H:%M')
32 |
--------------------------------------------------------------------------------
/pytest_html_reporter/util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import sys
4 | from collections import Counter
5 | from io import BytesIO
6 | from PIL import Image
7 |
8 | from pytest_html_reporter.const_vars import ConfigVars
9 |
10 |
11 | def suite_highlights(data):
12 | for i in data['content']['suites']:
13 | if data['content']['suites'][i]['status']['total_fail'] == 0:
14 | l = data['content']['suites'][i]['suite_name']
15 | if l not in ConfigVars.p_highlights:
16 | ConfigVars.p_highlights[l] = 1
17 | else:
18 | ConfigVars.p_highlights[l] += 1
19 | else:
20 | k = data['content']['suites'][i]['suite_name']
21 |
22 | if k not in ConfigVars.highlights:
23 | ConfigVars.highlights[k] = 1
24 | else:
25 | ConfigVars.highlights[k] += 1
26 |
27 |
28 | def generate_suite_highlights():
29 | if ConfigVars.highlights == {}:
30 | ConfigVars.max_failure_suite_name_final = 'No failures in History'
31 | ConfigVars.max_failure_suite_count = 0
32 | ConfigVars.max_failure_percent = '0'
33 | return
34 |
35 | ConfigVars.max_failure_suite_name = max(ConfigVars.highlights, key=ConfigVars.highlights.get)
36 | ConfigVars.max_failure_suite_count = ConfigVars.highlights[ConfigVars.max_failure_suite_name]
37 |
38 | if ConfigVars.max_failure_suite_name in ConfigVars.p_highlights:
39 | ConfigVars.max_failure_total_tests = ConfigVars.p_highlights[ConfigVars.max_failure_suite_name] + ConfigVars.max_failure_suite_count
40 | else:
41 | ConfigVars.max_failure_total_tests = ConfigVars.max_failure_suite_count
42 |
43 | ConfigVars.max_failure_percent = (ConfigVars.max_failure_suite_count / ConfigVars.max_failure_total_tests) * 100
44 |
45 | if ConfigVars.max_failure_suite_name.__len__() > 25:
46 | ConfigVars.max_failure_suite_name_final = ".." + ConfigVars.max_failure_suite_name[-23:]
47 | else:
48 | ConfigVars.max_failure_suite_name_final = ConfigVars.max_failure_suite_name
49 |
50 | res = Counter(ConfigVars.highlights.values())
51 | if max(res.values()) > 1: ConfigVars.similar_max_failure_suite_count = max(res.values())
52 |
53 |
54 | def max_rerun():
55 | indices = [i for i, s in enumerate(sys.argv) if 'reruns' in s]
56 |
57 | try:
58 | if "=" in sys.argv[int(indices[0])]:
59 | return int(sys.argv[int(indices[0])].split('=')[1])
60 | else:
61 | return int(sys.argv[int(indices[0]) + 1])
62 | except IndexError:
63 | return None
64 |
65 |
66 | def screenshot(data=None):
67 | from pytest_html_reporter.html_reporter import HTMLReporter
68 |
69 | ConfigVars.screen_base = HTMLReporter.base_path
70 | ConfigVars.screen_img = Image.open(BytesIO(data))
71 |
72 |
73 | def clean_screenshots(path):
74 | screenshot_dir = os.path.abspath(os.path.expanduser(os.path.expandvars(path))) + '/pytest_screenshots'
75 | if os.path.isdir(screenshot_dir):
76 | shutil.rmtree(screenshot_dir)
77 |
78 |
79 | def custom_title(title):
80 | ConfigVars._title = title[:26] + '...' if title.__len__() > 29 else title
81 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pytest
2 | pytest-cov
3 | coveralls
4 | selenium
5 | twine
6 | pytest-xdist
7 | beautifulsoup4
8 | pytest-rerunfailures
9 | Pillow
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | import codecs
6 | from setuptools import setup, find_packages
7 |
8 |
9 | def read(fname):
10 | file_path = os.path.join(os.path.dirname(__file__), fname)
11 | return codecs.open(file_path, encoding="utf-8").read()
12 |
13 |
14 | setup(
15 | name="pytest-html-reporter",
16 | version="0.3.0",
17 | author="Prashanth Sams",
18 | author_email="sams.prashanth@gmail.com",
19 | maintainer="Prashanth Sams",
20 | maintainer_email="sams.prashanth@gmail.com",
21 | license="MIT",
22 | url="https://github.com/prashanth-sams/pytest-html-reporter",
23 | description="Generates a static html report based on pytest framework",
24 | long_description=read("README.rst"),
25 | keywords=["pytest", "py.test", "html", "reporter", "report"],
26 | packages=find_packages(),
27 | python_requires=">=3.5",
28 | install_requires=["pytest", "Pillow"],
29 | classifiers=[
30 | "Framework :: Pytest",
31 | "Topic :: Software Development :: Testing",
32 | "Programming Language :: Python",
33 | "Operating System :: OS Independent",
34 | "License :: OSI Approved :: MIT License",
35 | ],
36 | entry_points={
37 | "pytest11": [
38 | "reporter = pytest_html_reporter.plugin",
39 | ],
40 | },
41 | )
42 |
--------------------------------------------------------------------------------
/tests/functional/Readme.md:
--------------------------------------------------------------------------------
1 | # pytest bank
2 | > pytest exercises
3 |
4 | ### Feature
5 | - [x] Basic
6 | - [x] Fixture #mock-data
7 | - [x] UseFixture #background-teardown
8 | - [x] Autouse
9 | - [x] Mark #Tags
10 | - [x] Parameterize #data-driven
11 | - [x] Yield #hooks
12 | - [x] Skip tests
13 |
14 | ### Pytest Runner
15 |
16 | | Type | Command |
17 | | -------------- | --------- |
18 | | generic run | `pytest -v -s pytest/test_yield_fixture.py` |
19 | | Run specific test case| `pytest -v -s pytest/test_yield_fixture.py::test_fail` |
20 | | Run tagged tests | `pytest -v -s pytest/test_mark.py -m 'slow'` |
21 |
--------------------------------------------------------------------------------
/tests/functional/test_approx.py:
--------------------------------------------------------------------------------
1 | from pytest import approx
2 |
3 |
4 | def test_aprox():
5 | assert 0.1 + 0.2 == approx(0.3)
6 | assert (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
7 | assert {"a": 0.1 + 0.2, "b": 0.2 + 0.4} == approx({"a": 0.3, "b": 0.6})
8 |
--------------------------------------------------------------------------------
/tests/functional/test_autouse.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | class DB:
5 | def __init__(self):
6 | self.data = []
7 |
8 | def begin(self, test_name):
9 | self.data.append(test_name)
10 |
11 | def rollback(self):
12 | self.data.pop()
13 |
14 |
15 | @pytest.fixture(scope="module")
16 | def db():
17 | return DB()
18 |
19 |
20 | class TestDB:
21 | @pytest.fixture(autouse=True)
22 | def transact(self, request, db):
23 | db.begin(request.function.__name__)
24 | yield
25 | db.rollback()
26 |
27 | def test_dbx1(self, db):
28 | assert db.data == ["test_dbx1"]
29 |
30 | def test_dbx2(self, db):
31 | assert db.data == ["test_dbx2"]
32 |
--------------------------------------------------------------------------------
/tests/functional/test_fixture.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | @pytest.fixture
5 | def mock_data():
6 | name = "Jesus"
7 | profession = "Saving world"
8 | history = "Created everything we know and what we see, you and me"
9 | return [name, profession, history]
10 |
11 |
12 | def test_fixture_pass(mock_data):
13 | assert mock_data[0] == "Jesus"
14 |
15 |
16 | def test_fixture_fail(mock_data):
17 | assert mock_data[1] == "Jesus"
18 |
--------------------------------------------------------------------------------
/tests/functional/test_mark.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | @pytest.mark.slow
5 | def test_fixture_pass():
6 | assert 5 == 5
7 |
8 |
9 | @pytest.mark.fast
10 | def test_fixture_fail():
11 | assert 5 == 6
12 |
--------------------------------------------------------------------------------
/tests/functional/test_parameterize.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | @pytest.mark.parametrize("a, b", [(1, 1), (1, 2)])
5 | def test_fixture_pass(a, b):
6 | assert a == b
7 |
--------------------------------------------------------------------------------
/tests/functional/test_screenshot.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | import unittest
3 | from pytest_html_reporter import attach
4 | import pytest
5 |
6 |
7 | @pytest.mark.skip(reason="skipping screenshot tests")
8 | class TestClass(unittest.TestCase):
9 | def __init__(self, driver):
10 | super().__init__(driver)
11 |
12 | def setUp(self):
13 | global driver
14 | self.driver = webdriver.Chrome()
15 |
16 | def test_demo(self):
17 | self.driver.get("http://devopsqa.wordpress.com/")
18 | assert 5 == 4
19 |
20 | def test_demo_2(self):
21 | self.driver.get("http://devopsqa.wordpress.com/")
22 | assert 5 == 4
23 |
24 | def test_demo_3(self):
25 | self.driver.get("http://devopsqa.wordpress.com/")
26 | assert 5 == 4
27 |
28 | def test_demo_4(self):
29 | self.driver.get("http://devopsqa.wordpress.com/")
30 | assert 5 == 4
31 |
32 | def test_demo_5(self):
33 | self.driver.get("http://devopsqa.wordpress.com/")
34 | assert 5 == 4
35 |
36 | def test_demo_6(self):
37 | self.driver.get("http://devopsqa.wordpress.com/")
38 | assert 5 == 4
39 |
40 | def test_demo_7(self):
41 | self.driver.get("http://devopsqa.wordpress.com/")
42 | assert 5 == 4
43 |
44 | def test_demo_8(self):
45 | self.driver.get("http://devopsqa.wordpress.com/")
46 | assert 5 == 4
47 |
48 | def test_demo_9(self):
49 | self.driver.get("http://devopsqa.wordpress.com/")
50 | assert 5 == 4
51 |
52 | def tearDown(self):
53 | self.screenshot_on_failure()
54 | self.driver.close()
55 | self.driver.quit()
56 |
57 | def screenshot_on_failure(self):
58 | for self._testMethodName, error in self._outcome.errors:
59 | if error:
60 | attach(data=self.driver.get_screenshot_as_png())
61 |
62 |
63 | if __name__ == '__main__':
64 | unittest.main()
--------------------------------------------------------------------------------
/tests/functional/test_selenium.py:
--------------------------------------------------------------------------------
1 | import time as t
2 | from selenium import webdriver
3 | import pytest
4 |
5 |
6 | @pytest.fixture
7 | def background():
8 |
9 | global driver
10 |
11 | driver = webdriver.Remote(
12 | command_executor="https://0.0.0.0:8080/wd/hub",
13 | desired_capabilities={"browserName": "chrome", "javascriptEnabled": True},
14 | )
15 | driver.get("https://google.ae")
16 | yield
17 | driver.close()
18 | driver.quit()
19 |
20 |
21 | @pytest.mark.usefixtures("background")
22 | class TestClass:
23 | def test_demo(self):
24 | driver.find_element_by_css_selector('[aria-label="Search"]').send_keys(
25 | "Jesus is coming soon!"
26 | )
27 | t.sleep(5)
28 |
--------------------------------------------------------------------------------
/tests/functional/test_simple.py:
--------------------------------------------------------------------------------
1 | def test_pass():
2 | pass
3 |
4 |
5 | def test_fail():
6 | raise Exception("fail")
7 |
8 |
9 | def test_failx2():
10 | raise Exception("fail")
11 |
--------------------------------------------------------------------------------
/tests/functional/test_skip_xfail_xpass.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | def test_skip():
5 | pytest.skip("skip this test")
6 |
7 |
8 | @pytest.mark.xfail
9 | def test_xfail():
10 | assert 5 == 3
11 |
12 |
13 | @pytest.mark.xfail
14 | def test_xpass():
15 | pass
16 |
--------------------------------------------------------------------------------
/tests/functional/test_usefixtures.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | """
4 | # can be used globally as well
5 | # content of pytest.ini
6 | [pytest]
7 | usefixtures = cleandir
8 | """
9 |
10 |
11 | @pytest.fixture
12 | def cleandir():
13 | print("before executing test")
14 | yield
15 | print("after executing test")
16 |
17 |
18 | @pytest.mark.usefixtures("cleandir")
19 | class TestClass:
20 | def test_fixture_pass(self):
21 | print("executing tests x1")
22 | pass
23 |
24 | def test_fixture_fail(self):
25 | print("executing tests x2")
26 | pass
27 |
--------------------------------------------------------------------------------
/tests/functional/test_yield_fixture.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | @pytest.yield_fixture()
5 | def setup():
6 | yield
7 |
8 |
9 | def test_pass(setup):
10 | assert True
11 |
12 |
13 | def test_fail(setup):
14 | assert False
15 |
--------------------------------------------------------------------------------
/tests/unit/helper.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 |
4 |
5 | def get_random_string(sz=10):
6 | return ''.join(random.choice(string.ascii_letters) for x in range(sz))
7 |
8 | def get_random_number(limit=10000):
9 | return random.randint(0, limit)
--------------------------------------------------------------------------------
/tests/unit/test_mvc_pages.py:
--------------------------------------------------------------------------------
1 | import re
2 | from datetime import date
3 |
4 | from bs4 import BeautifulSoup
5 |
6 | from html_page.archive_body import ArchiveBody
7 | from html_page.archive_row import ArchiveRow
8 | from html_page.floating_error import FloatingError
9 | from html_page.screenshot_details import ScreenshotDetails
10 | from html_page.suite_row import SuiteRow
11 | from html_page.template import HtmlTemplate
12 | from html_page.test_row import TestRow
13 | from tests.unit.helper import get_random_number, get_random_string
14 |
15 |
16 | def test_archive_body():
17 | acount = str(get_random_number())
18 | _date = str(date.today())
19 | iloop = str(get_random_number())
20 | total_tests = str(get_random_number())
21 | _pass = str(get_random_number())
22 | fail = str(get_random_number())
23 | skip = str(get_random_number())
24 | xpass = str(get_random_number())
25 | xfail = str(get_random_number())
26 | error = str(get_random_number())
27 | status = get_random_string()
28 |
29 | _archive_body_text = ArchiveBody(
30 | total_tests=total_tests,
31 | date=_date,
32 | _pass=_pass,
33 | fail=fail,
34 | skip=skip,
35 | xpass=xpass,
36 | xfail=xfail,
37 | error=error,
38 | status=status,
39 | acount=acount,
40 | iloop=iloop
41 | )
42 |
43 | soup = BeautifulSoup(str(_archive_body_text), "html.parser")
44 |
45 | acount_s = soup.find("h4", class_="archive-header")
46 | assert acount_s.text.strip().replace("#", "").replace("Build ", "") == acount
47 |
48 | _date_s = soup.find("div", class_="archive-date")
49 | assert _date_s.text.strip() == _date
50 |
51 | _pass_s = soup.find(lambda tag: tag.name == "div" and "PASSED" in tag.text, class_="counter")
52 | assert _pass_s.text.strip().split("\n")[0] == _pass
53 |
54 | fail_s = soup.find(lambda tag: tag.name == "div" and "FAIL" in tag.text, class_="counter")
55 | assert fail_s.text.strip().split("\n")[0] == fail
56 |
57 | skip_s = soup.find(lambda tag: tag.name == "div" and "SKIPPED" in tag.text, class_="counter")
58 | assert skip_s.text.strip().split("\n")[0] == skip
59 |
60 | xpass_s = soup.find(lambda tag: tag.name == "div" and "XPASSED" in tag.text, class_="counter")
61 | assert xpass_s.text.strip().split("\n")[0] == xpass
62 |
63 | xfail_s = soup.find(lambda tag: tag.name == "div" and "XFAILED" in tag.text, class_="counter")
64 | assert xfail_s.text.strip().split("\n")[0] == xfail
65 |
66 | error_s = soup.find(lambda tag: tag.name == "div" and "ERROR" in tag.text, class_="counter")
67 | assert error_s.text.strip().split("\n")[0] == error
68 |
69 | status_s = soup.find("section", id="statistic")["class"]
70 | assert status_s == [f"statistic-section-{status}", "one-page-section"]
71 |
72 | assert soup.find("div", id=f"archive-container-{iloop}")
73 | assert soup.find("div", id=f"archive-label-{iloop}")
74 | assert soup.find("canvas", id=f"archive-chart-{iloop}")
75 |
76 |
77 | def test_archive_row():
78 | acount = str(get_random_number())
79 | astate = get_random_string()
80 | astate_color = f"#{get_random_number()}"
81 | astatus = get_random_string()
82 | adate = str(date.today())
83 |
84 | archive_row = ArchiveRow(acount=acount, astate=astate, astate_color=astate_color, astatus=astatus, adate=adate)
85 | soup = BeautifulSoup(str(archive_row), "html.parser")
86 | assert soup.find("a", href=f"#list-item-{acount}")
87 | assert soup.find("i")["class"] == ["fa", f"fa-{astate}"]
88 | assert soup.findAll("span")[0].text.strip() == astatus
89 | assert soup.findAll("span")[1].text.strip() == adate
90 |
91 |
92 | def test_floating_error():
93 | runt = str(get_random_number())
94 | full_msg = get_random_string()
95 |
96 | floating_error = FloatingError(runt=runt, full_msg=full_msg)
97 |
98 | soup = BeautifulSoup(str(floating_error), "html.parser")
99 |
100 | error_link = soup.find("a", href=f"#myModal-{runt}")
101 |
102 | assert error_link
103 | assert error_link.text == "(...)"
104 |
105 | error_container = soup.find("div", id=f"myModal-{runt}")
106 |
107 | assert error_container
108 | assert soup.find("p").text.strip() == full_msg
109 |
110 |
111 | def test_screenshot_details():
112 | screen_name = get_random_string()
113 | ts = get_random_string()
114 | tc = get_random_string()
115 | te = get_random_string()
116 |
117 | screenshot_details = ScreenshotDetails(ts=ts, tc=tc, te=te,
118 | screen_name=screen_name)
119 | soup = BeautifulSoup(str(screenshot_details), "html.parser")
120 |
121 | screenshot_link = soup.find("a", class_="video")
122 | screen_path = f"pytest_screenshots/{screen_name}.png"
123 | assert screenshot_link["href"] == screen_path
124 | assert screenshot_link["style"] == f"background-image: url('{screen_path}');"
125 | assert screenshot_link["data-caption"] == f"SUITE: {ts} :: SCENARIO: {tc}"
126 |
127 | tc_row = soup.find(class_="video-hover-desc video-hover-small")
128 | assert tc_row.findAll("span")[0].text.strip() == tc
129 | assert tc_row.findAll("span")[1].text.strip() == te
130 |
131 | ts_p = soup.find("p", class_="text-desc")
132 | assert re.search(f"{ts}[\n\s]+{te}", ts_p.text.strip()), ts_p.text.strip()
133 | assert ts_p.find("strong").text.strip() == ts
134 |
135 | video_description = soup.find("div", id="Video-desc-01")
136 | assert video_description.find("h2").text.strip() == tc
137 | assert re.search(f"{ts}[\n\s]+{te}", video_description.find("p").text.strip())
138 | assert video_description.find("strong").text.strip() == ts
139 |
140 |
141 | def test_suite_row():
142 | sname = get_random_string()
143 | spass = str(get_random_number())
144 | sfail = str(get_random_number())
145 | sskip = str(get_random_number())
146 | sxpass = str(get_random_number())
147 | sxfail = str(get_random_number())
148 | serror = str(get_random_number())
149 | srerun = str(get_random_number())
150 |
151 | suite_row = SuiteRow(sname=sname, spass=spass, sfail=sfail, sskip=sskip, sxpass=sxpass, sxfail=sxfail,
152 | serror=serror, srerun=srerun)
153 |
154 | soup = BeautifulSoup(str(suite_row), "html.parser")
155 | for node, expected in zip(soup.findAll("td"), [sname, spass, sfail, sskip, sxpass, sxfail, serror, srerun]):
156 | assert node.text.strip() == expected
157 |
158 |
159 | def test_test_row():
160 | sname = get_random_string()
161 | name = get_random_string()
162 | stat = get_random_string()
163 | dur = str(get_random_number())
164 | msg = get_random_string()
165 | floating_error_text = get_random_string()
166 |
167 | test_row = TestRow(sname=sname, name=name, stat=stat, dur=dur, msg=msg, floating_error_text=floating_error_text)
168 | soup = BeautifulSoup(str(test_row), "html.parser")
169 |
170 | cells = soup.findAll("td")
171 |
172 | for node, expected in zip(cells[:-1], [sname, name, stat, dur]):
173 | assert node.text.strip() == expected
174 |
175 | assert re.search(f"{msg}[\s\n]*{floating_error_text}", cells[-1].text.strip())
176 |
177 | def test_template():
178 | custom_logo = get_random_string()
179 | execution_time = str(get_random_number())
180 | title = get_random_string()
181 | total = str(get_random_number())
182 | executed = str(get_random_number())
183 | _pass = str(get_random_number())
184 | fail = str(get_random_number())
185 | skip = str(get_random_number())
186 | error = str(get_random_number())
187 | xpass = str(get_random_number())
188 | xfail = str(get_random_number())
189 | rerun = str(get_random_number())
190 | suite_metrics_row = get_random_string()
191 | test_metrics_row = get_random_string()
192 | date = str(get_random_number())
193 | test_suites = str(get_random_number())
194 | test_suite_length = str(get_random_number())
195 | test_suite_pass = get_random_string()
196 | test_suites_fail = get_random_string()
197 | test_suites_skip = str(get_random_number())
198 | test_suites_xpass = str(get_random_number())
199 | test_suites_xfail = str(get_random_number())
200 | test_suites_error = str(get_random_number())
201 | archive_status = str(get_random_number())
202 | archive_body_content = get_random_string()
203 | archive_count = str(get_random_number())
204 | archives = str(get_random_number())
205 | max_failure_suite_name_final = get_random_string()
206 | max_failure_suite_count = str(get_random_number())
207 | similar_max_failure_suite_count = str(get_random_number())
208 | max_failure_total_tests = str(get_random_number())
209 | max_failure_percent = str(get_random_number())
210 | trends_label = get_random_string()
211 | tpass = str(get_random_number())
212 | tfail = str(get_random_number())
213 | tskip = str(get_random_number())
214 | attach_screenshot_details = get_random_string()
215 |
216 | template_page = HtmlTemplate(
217 | custom_logo=custom_logo,
218 | execution_time=execution_time,
219 | title=title,
220 | total=total,
221 | executed=executed,
222 | _pass=_pass,
223 | fail=fail,
224 | skip=skip,
225 | error=error,
226 | xpass=xpass,
227 | xfail=xfail,
228 | rerun=rerun,
229 | suite_metrics_row=suite_metrics_row,
230 | test_metrics_row=test_metrics_row,
231 | date=date,
232 | test_suites=test_suites,
233 | test_suite_length=test_suite_length,
234 | test_suite_pass=test_suite_pass,
235 | test_suites_fail=test_suites_fail,
236 | test_suites_skip=test_suites_skip,
237 | test_suites_xpass=test_suites_xpass,
238 | test_suites_xfail=test_suites_xfail,
239 | test_suites_error=test_suites_error,
240 | archive_status=archive_status,
241 | archive_body_content=archive_body_content,
242 | archive_count=archive_count,
243 | archives=archives,
244 | max_failure_suite_name_final=max_failure_suite_name_final,
245 | max_failure_suite_count=max_failure_suite_count,
246 | similar_max_failure_suite_count=similar_max_failure_suite_count,
247 | max_failure_total_tests=max_failure_total_tests,
248 | max_failure_percent=max_failure_percent,
249 | trends_label=trends_label,
250 | tpass=tpass,
251 | tfail=tfail,
252 | tskip=tskip,
253 | attach_screenshot_details=attach_screenshot_details
254 | )
255 |
256 | soup = BeautifulSoup(str(template_page), "html.parser")
257 |
258 | ### Checking if code-behind parts are really interpolated
259 |
260 | last_style_block = soup.findAll("style")[-1]
261 | style_block = f""".progress-bar.downloading {{
262 | background: -webkit-linear-gradient(left, #fc6665 {max_failure_percent}%,#50597b {max_failure_percent}%); /* Chrome10+,Safari5.1+ */
263 | background: -ms-linear-gradient(left, #fc6665 {max_failure_percent}%,#50597b {max_failure_percent}%); /* IE10+ */
264 | background: linear-gradient(to right, #fc6665 {max_failure_percent}%,#50597b {max_failure_percent}%); /* W3C */
265 | }}"""
266 |
267 | assert last_style_block.text.strip() == style_block
268 |
269 | wrimagecard = soup.find("img", id="wrimagecard")
270 | assert wrimagecard["src"] == custom_logo
271 |
272 | time_taken_label = soup.find("span", class_="time__taken")
273 | assert time_taken_label.text.strip() == f"Time taken {execution_time}"
274 |
275 | header_title = soup.find("div", class_="header__title")
276 | assert header_title.text.strip() == title
277 |
278 | header_date = soup.find("span", class_="header__date")
279 | assert header_date.text.strip() == date
280 |
281 | total_count = soup.find("span", class_="total__count")
282 | assert total_count.text.strip() == total
283 |
284 | test_metrics = soup.findAll("div", class_="footer-section__data")
285 | for metric, val in zip(test_metrics, (_pass, fail, skip, xpass, xfail, error, rerun)):
286 | assert metric.text.strip() == val
287 |
288 | test_suite_length_label = soup.find("div", class_="col-md-8 card border-right").find("div").find("div")
289 | assert re.search(f"Test Suite\\n\\s+{test_suite_length}", test_suite_length_label.text.strip())
290 |
291 | max_failure_dashboard = soup.find("div", class_="col-md-4 card border-left")
292 | assert max_failure_dashboard.find("div", class_="tooltip bs-tooltip-top tooltip-dark").find("div", class_="tooltip-inner").text.strip() == max_failure_suite_name_final
293 | assert max_failure_dashboard.find("p", class_="percentage").text.strip() == f"{max_failure_suite_count} /{max_failure_total_tests} Times"
294 |
295 | suite_metrics_table = soup.findAll("table", id="sm")
296 |
297 | for tbl in suite_metrics_table:
298 | assert tbl.find("tbody").text.strip() == suite_metrics_row
299 |
300 | archive_status_label = soup.find("div", id="list-example")
301 | assert archive_status_label.text.strip() == archive_status
302 |
303 | archive_body_content_label = soup.find("div", id="archives").findAll("div")[-1]
304 | assert archive_body_content_label.text.strip() == archive_body_content
305 |
306 | attach_screenshot_details_label = soup.find("div", id="main-content").find("div").find("div")
307 | assert attach_screenshot_details_label.text.strip() == attach_screenshot_details
308 |
309 | scripts = soup.findAll("script")
310 | assert [script for script in scripts if f"var x = parseInt({total});" in script.text]
311 | assert [script for script in scripts if f"data: [{_pass}, {fail}, {skip}, {xpass}, {xfail}, {error}]," in script.text]
312 | assert [script for script in scripts if f"var passPercent = Math.round(({_pass} / {total}) * 100)" in script.text]
313 | assert [script for script in scripts if f"for(var i=0; i<={archive_count}; i++)" in script.text and f"var archives = {archives};" in script.text]
314 | assert [
315 | script for script in scripts
316 | if f"labels: {test_suites}," in script.text
317 | and f"data: {test_suite_pass}" in script.text
318 | and f"data: {test_suites_fail}" in script.text
319 | and f"data: {test_suites_skip}" in script.text
320 | and f"data: {test_suites_xpass}" in script.text
321 | and f"data: {test_suites_xfail}" in script.text
322 | and f"data: {test_suites_error}" in script.text
323 | ]
324 | assert [script for script in scripts if f"labels : {trends_label}," in script.text
325 | and f"data : {tpass}" in script.text
326 | and f"data : {tfail}" in script.text
327 | and f"data : {tskip}" in script.text
328 | ]
329 |
--------------------------------------------------------------------------------
/tests/unit/test_plugin.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | myPath = os.path.dirname(os.path.abspath(__file__))
5 | sys.path.insert(0, myPath + '/../../')
6 | from pytest_html_reporter.html_reporter import HTMLReporter
7 |
8 |
9 | def test_report_path():
10 | HTMLReporter.path = "."
11 | assert len(HTMLReporter.report_path.__get__(HTMLReporter)[0]) >= 5
12 | assert HTMLReporter.report_path.__get__(HTMLReporter)[1] == "pytest_html_report.html"
13 |
14 | HTMLReporter.path = "./report/test.html"
15 | assert len(HTMLReporter.report_path.__get__(HTMLReporter)[0]) >= 5
16 | assert HTMLReporter.report_path.__get__(HTMLReporter)[1] == "test.html"
17 |
--------------------------------------------------------------------------------
/tests/unit/test_time_converter.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | myPath = os.path.dirname(os.path.abspath(__file__))
5 | sys.path.insert(0, myPath + '/../../')
6 | from pytest_html_reporter.time_converter import *
7 | import datetime
8 |
9 |
10 | def test_time_converter():
11 | if time_converter("18:31") == '6:31 pm':
12 | pass
13 | else:
14 | raise Exception("invalid method: time_converter")
15 |
16 |
17 | def test_clamp_to_twelve():
18 | time_dt = datetime.datetime(1900, 1, 1, 18, 40)
19 | midday_dt = datetime.datetime(1900, 1, 1, 12, 0)
20 |
21 | if clamp_to_twelve(time_dt, midday_dt) == [6, 40]:
22 | pass
23 | else:
24 | raise Exception("invalid method: clamp_to_twelve")
25 |
26 |
27 | def test_is_midnight():
28 | time_dt = datetime.datetime(1900, 1, 1, 18, 40)
29 |
30 | if not is_midnight(time_dt):
31 | pass
32 | else:
33 | raise Exception("invalid method: is_midnight")
34 |
--------------------------------------------------------------------------------
/tests/unit/test_util.py:
--------------------------------------------------------------------------------
1 | from pytest_html_reporter.util import max_rerun
2 |
3 |
4 | def test_max_rerun_none():
5 | assert max_rerun() is None
6 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py{37,py3}{,-ansi2html}, linting
3 |
4 | [testenv]
5 | setenv = PYTHONDONTWRITEBYTECODE=1
6 | deps =
7 | Pillow
8 | flake8-noqa
9 | py{37,py3}-ansi2html: ansi2html
10 | commands = pytest -v -r a {posargs}
11 |
12 | [testenv:linting]
13 | skip_install = True
14 | basepython = python3
15 | deps = pre-commit
16 | commands = pre-commit run --all-files --show-diff-on-failure
17 |
18 | [flake8]
19 | max-line-length = 120
20 | exclude = .eggs,.tox
21 |
22 | [pytest]
23 | testpaths = tests/unit/
--------------------------------------------------------------------------------