├── .gitignore ├── CHANGELOG ├── LICENSE ├── MANIFEST.in ├── README.md ├── doc └── en │ └── index.md ├── examples ├── basic.py ├── group.py └── inherit.py ├── ptest ├── __init__.py ├── assertion.py ├── config.py ├── decorator.py ├── enumeration.py ├── exception.py ├── htmltemplate │ ├── index.html │ ├── report.css │ └── report.js ├── main.py ├── plistener.py ├── plogger.py ├── reporter.py ├── screen_capturer.py ├── test_executor.py ├── test_filter.py ├── test_finder.py ├── test_suite.py └── util.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | 56 | # PyBuilder 57 | target/ 58 | 59 | # Pycharm 60 | .idea/ -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | ## Change Log 2 | 2.0.3 (compared to 2.0.2) 3 | 4 | - Do not support python 3.5 5 | 6 | 2.0.2 (compared to 2.0.1) 7 | 8 | - Do not support python 3.4 9 | 10 | 2.0.1 (compared to 2.0.0) 11 | 12 | - Implement f-string to be compatible with python < 3.6 13 | 14 | 2.0.0 (compared to 1.9.5) 15 | 16 | - Retire python 2.x 17 | - Support preporter log images. 18 | 19 | 1.9.5 (compared to 1.9.4) 20 | 21 | - Support empty data_provider. 22 | 23 | 1.9.4 (compared to 1.9.3) 24 | 25 | - Fix race condition issue. 26 | 27 | 1.9.3 (compared to 1.9.2) 28 | 29 | - Fix that ptest is broken with too many parallel test cases. 30 | 31 | 1.9.2 (compared to 1.9.1) 32 | 33 | - Support callable subject in assert_that assertion. 34 | 35 | 1.9.1 (compared to 1.9.0) 36 | 37 | - Add timestamp for log in html report. 38 | 39 | 1.9.0 (compared to 1.8.2) 40 | 41 | - Add splitter "-" for long command names. 42 | 43 | 1.8.2 (compared to 1.8.1) 44 | 45 | - Support namespace package. 46 | 47 | 1.8.1 (compared to 1.8.0) 48 | 49 | - Add is_all_in, is_any_in, is_none_in to assert_that assertion. 50 | 51 | 1.8.0 (compared to 1.7.7) 52 | 53 | - Support coroutine tests. 54 | 55 | - Support logging extra screenshot by preporter. 56 | 57 | - Optimize html report. 58 | 59 | 1.7.7 (compared to 1.7.6) 60 | 61 | - Optimize assertions. 62 | 63 | 1.7.6 (compared to 1.7.5) 64 | 65 | - Add command option -f(--filter) to filter tests. 66 | 67 | 1.7.5 (compared to 1.7.4) 68 | 69 | - Support customizing data name for @Test. 70 | 71 | 1.7.4 (compared to 1.7.3) 72 | 73 | - Display module info in html report. 74 | 75 | 1.7.3 (compared to 1.7.2) 76 | 77 | - Support displaying webdriver's logs in html report. 78 | 79 | 1.7.2 (compared to 1.7.1) 80 | 81 | - Fix @Test data provider issue in python 3. 82 | 83 | - Fix @Test timeout issue. 84 | 85 | 1.7.1 (compared to 1.7.0) 86 | 87 | - Improve performance of data provider. 88 | 89 | - Fix read property file issue. 90 | 91 | 1.7.0 (compared to 1.6.0) 92 | 93 | - Support data provider for @Test. 94 | 95 | - Fix encoding issue. 96 | 97 | 1.6.0 (compared to 1.5.3) 98 | 99 | - Add meets() in ObjSubject of assert_that assertion. 100 | 101 | - Support taking screenshots for multiple selenium webdrivers. 102 | 103 | 1.5.3 (compared to 1.5.2) 104 | 105 | - Fix the issue that "enabled" attribute for @Test doesn't work. 106 | 107 | 1.5.2 (compared to 1.5.1) 108 | 109 | - Fix install issue by adding CHANGELOG file. 110 | 111 | 1.5.1 (compared to 1.4.3) 112 | 113 | - Add documentation for ptest: https://github.com/KarlGong/ptest/wiki/documentation 114 | 115 | - Add "assert_that" assertion. 116 | 117 | - Ignore the test group if no group features are used. 118 | 119 | - Support run_group for @TestClass. 120 | 121 | - Support expected_exceptions for @Test. 122 | 123 | 1.4.3 (compared to 1.4.2) 124 | 125 | - Add command option -m(--merge-xunit-xmls) to merge the xunit result xmls. 126 | 127 | 1.4.2 (compared to 1.4.1) 128 | 129 | - Add detailed information for screenshot. 130 | 131 | 1.4.1 (compared to 1.4.0) 132 | 133 | - The instance variables defined in @BeforeSuite, @BeforeClass, @BeforeGroup can be accessed by other test fixtures. 134 | 135 | - Support custom args in test fixtures. 136 | 137 | - Add option (--python-paths) to specify additional python paths. 138 | 139 | 1.4.0 (compared to 1.3.2) 140 | 141 | - Support @BeforeSuite, @BeforeClass, @BeforeGroup, @AfterSuite, @AfterClass, @AfterGroup. 142 | 143 | - Support timeout for test fixtures. 144 | 145 | - Redesign the html report. 146 | 147 | 1.3.2 (compared to 1.3.1) 148 | 149 | - Add cmd line entry points for py3. 150 | 151 | - All temp data will be stored in temp folder. 152 | 153 | 1.3.1 (compared to 1.3.0) 154 | 155 | - Add examples folder. 156 | 157 | - Support declare additional arguments in test methods. 158 | 159 | 1.3.0 (compared to 1.2.2) 160 | 161 | - Support py3. 162 | 163 | - No extra package is needed to capture screenshot. 164 | 165 | 1.2.2 (compared to 1.2.1) 166 | 167 | - Support default value for config.get_property(). 168 | 169 | - Add filter for test case status in html report. 170 | 171 | 1.2.1 (compared to 1.2.0) 172 | 173 | - Support multiple test listeners. 174 | 175 | 1.2.0 (compared to 1.1.1) 176 | 177 | - Support run/debug in Pycharm via a ptest plugin. 178 | 179 | - Support filter test cases by group. 180 | 181 | 1.1.0 (compared to 1.0.4) 182 | 183 | - No extra codes are needed to support capturing screenshot for selenium test. 184 | 185 | - Add always_run attribute to @Test. 186 | 187 | - Add command option --disable-screenshot to disable taking screenshot for failed test fixture. 188 | 189 | - Support group in test class. 190 | 191 | 1.0.4 (compared to 1.0.3) 192 | 193 | - Support capture screenshot for no-selenium test. 194 | 195 | - Optimize the html report. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2015-present Karl Gong 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include CHANGELOG 4 | include setup.py 5 | graft examples 6 | graft doc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ptest 2 | 3 | [![image](https://img.shields.io/pypi/v/ptest.svg)](https://pypi.org/project/ptest) 4 | [![image](https://img.shields.io/pypi/pyversions/ptest.svg)](https://pypi.org/project/ptest) 5 | 6 | ptest is a light test framework for Python. With ptest, you can tag test 7 | classes & test cases by decorators, execute test cases by command line, 8 | and get clear reports. 9 | 10 | Find the latest version on github: 11 | or PyPI: 12 | 13 | The documentation is on github wiki: 14 | 15 | 16 | ## Installation 17 | 18 | The last stable release is available on PyPI and can be installed with 19 | `pip`. 20 | 21 | $ pip install ptest 22 | 23 | ## Pycharm Plugin 24 | 25 | A Pycharm plugin for ptest is released. Now it is easily to run/debug 26 | ptest within the IDE using the standard run configuration. Find the 27 | latest version on JetBrains: 28 | 29 | ## Best Practice 30 | 31 | Firstly, create a python file: *c:\folder\mytest.py* 32 | 33 | You can tag test class, test, before method, after method by adding 34 | decorator @TestClass, @Test, @BeforeMethod, @AfterMethod. 35 | 36 | ```python 37 | # c:\folder\mytest.py 38 | from ptest.decorator import TestClass, Test, BeforeMethod, AfterMethod 39 | from ptest.assertion import assert_equals, fail, assert_not_none 40 | from ptest.plogger import preporter 41 | from ptest import config 42 | 43 | @TestClass(run_mode="parallel") # the test cases in this class will be executed by multiple threads 44 | class PTestClass: 45 | @BeforeMethod(description="Prepare test data.") 46 | def before(self): 47 | preporter.info("setting expected result.") 48 | self.expected = 10 49 | 50 | @Test(tags=["regression", "smoke"]) 51 | def test1(self): 52 | assert_equals(10, self.expected) # pass 53 | 54 | @Test(tags="smoke, nightly") 55 | def test2(self): 56 | assert_not_none(config.get_property("key")) # assert the property defined via -D= in cmd line 57 | 58 | @Test(enabled=False) # won't be run 59 | def test3(self): 60 | fail("failed") 61 | 62 | @AfterMethod(always_run=True, description="Clean up") 63 | def after(self): 64 | preporter.info("cleaning up") 65 | ``` 66 | 67 | Then start to execute all the testcases in module *mytest.py* with 2 68 | threads. Use `-w` to specify the workspace, `-t` to specify the target 69 | and `-n` to specify the number of test executors(threads). In this case, 70 | workspace is *c:\folder*, target is *mytest* and number of test 71 | executors is *2*. 72 | 73 | *Note:* If you are using Windows, please confirm that 74 | **%python_installation_dir%\Scripts** (e.g., C:\Python35\Scripts, 75 | C:\Python37\Scripts) is added to the PATH environment variable. 76 | 77 | $ ptest -w c:\folder -t mytest -n 2 78 | 79 | The target can be package/module/class/method. If the target is 80 | package/module/class, all the test cases under target will be executed. 81 | For example, if you only want to execute the test *test1* in this 82 | module. 83 | 84 | $ ptest -w c:\folder -t mytest.PTestClass.test1 85 | 86 | For more options, please use `-h`. 87 | 88 | $ ptest -h 89 | 90 | For more code examples, please refer to the `examples` folder in source 91 | distribution or visit 92 | 93 | 94 | ## Contact me 95 | 96 | For information and suggestions you can contact me at 97 | 98 | -------------------------------------------------------------------------------- /examples/basic.py: -------------------------------------------------------------------------------- 1 | from ptest.decorator import TestClass, Test, BeforeMethod, AfterMethod, BeforeSuite, BeforeClass, BeforeGroup, \ 2 | AfterGroup, AfterClass, AfterSuite 3 | from ptest.assertion import assert_equals, fail, assert_not_none 4 | from ptest.plogger import preporter 5 | from ptest import config 6 | 7 | 8 | # the test cases in this class will be executed by multiple threads 9 | @TestClass(run_mode="parallel") 10 | class PTestClass: 11 | @BeforeSuite() 12 | def before_suite(self): 13 | preporter.info("before suite") 14 | 15 | @BeforeClass() 16 | def before_class(self): 17 | preporter.info("before class") 18 | 19 | @BeforeGroup() 20 | def before_group(self): 21 | preporter.info("before group") 22 | 23 | @BeforeMethod(description="Prepare test data.") 24 | def before(self): 25 | preporter.info("setting expected result.") 26 | self.expected = 10 27 | 28 | @Test(tags=["regression", "smoke"]) 29 | def test1(self): 30 | assert_equals(10, self.expected) # pass 31 | 32 | @Test(tags="smoke, nightly") 33 | def test2(self): 34 | # assert the property defined via -D= in cmd line 35 | assert_not_none(config.get_property("key")) 36 | 37 | @Test(enabled=False) # won't be run 38 | def test3(self): 39 | fail("failed") 40 | 41 | # always_run means that the @AfterMethod will be run even the @BeforeMethod failed 42 | @AfterMethod(always_run=True, description="Clean up") 43 | def after(self): 44 | preporter.info("cleaning up") 45 | 46 | @AfterGroup() 47 | def after_group(self): 48 | preporter.info("after group") 49 | 50 | @AfterClass() 51 | def after_class(self): 52 | preporter.info("after class") 53 | 54 | @AfterSuite() 55 | def after_suite(self): 56 | preporter.info("after suite") 57 | -------------------------------------------------------------------------------- /examples/group.py: -------------------------------------------------------------------------------- 1 | from ptest.decorator import TestClass, Test, BeforeMethod, AfterMethod 2 | from ptest.assertion import assert_equals 3 | from ptest.plogger import preporter 4 | 5 | CN_GROUP = "CN" 6 | US_GROUP = "US" 7 | 8 | 9 | @TestClass() 10 | class PTestClass: 11 | @BeforeMethod(group=CN_GROUP) 12 | def before_cn(self): 13 | self.expected = "cn" 14 | 15 | @BeforeMethod(group=US_GROUP) 16 | def before_us(self): 17 | self.expected = "us" 18 | 19 | @Test(group=CN_GROUP) 20 | def test_cn(self): 21 | assert_equals("cn", self.expected) 22 | 23 | @Test(group=US_GROUP) 24 | def test_us(self): 25 | assert_equals("us", self.expected) 26 | 27 | @AfterMethod(group=CN_GROUP) 28 | def after_cn(self): 29 | preporter.info("cleaning up") 30 | 31 | @AfterMethod(group=US_GROUP) 32 | def after_us(self): 33 | preporter.info("cleaning up") 34 | -------------------------------------------------------------------------------- /examples/inherit.py: -------------------------------------------------------------------------------- 1 | from ptest.decorator import TestClass, Test, BeforeMethod, AfterMethod 2 | from ptest.assertion import assert_equals 3 | from ptest.plogger import preporter 4 | 5 | 6 | class TestBase: 7 | @BeforeMethod() 8 | def before(self): 9 | preporter.info("setting expected result.") 10 | self.expected = 10 11 | 12 | @AfterMethod(always_run=True) 13 | def after(self): 14 | preporter.info("cleaning up") 15 | 16 | 17 | @TestClass() 18 | class PTestClass(TestBase): 19 | @Test() 20 | def test1(self): 21 | assert_equals(10, self.expected) # pass 22 | 23 | @Test() 24 | def test2(self): 25 | assert_equals(20, self.expected) # failed 26 | -------------------------------------------------------------------------------- /ptest/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "2.0.3" -------------------------------------------------------------------------------- /ptest/assertion.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | from datetime import datetime, date 4 | from numbers import Number 5 | 6 | from typing import Any, List, Set, Union, Iterable, Callable, Tuple, Dict, Type 7 | 8 | 9 | # ------------------------------------------- 10 | # ------------ simple assertion ------------- 11 | # ------------------------------------------- 12 | def assert_true(actual: Any, msg: str = ""): 13 | if actual is not True: 14 | __raise_error(msg, f"Expected: , Actual: <{actual}>.") 15 | 16 | 17 | def assert_false(actual: Any, msg: str = ""): 18 | if actual is not False: 19 | __raise_error(msg, f"Expected: , Actual: <{actual}>.") 20 | 21 | 22 | def assert_none(actual: Any, msg: str = ""): 23 | if actual is not None: 24 | __raise_error(msg, f"Expected: , Actual: <{actual}>.") 25 | 26 | 27 | def assert_not_none(actual: Any, msg: str = ""): 28 | if actual is None: 29 | __raise_error(msg, f"Expected: NOT , Actual: .") 30 | 31 | 32 | def assert_equals(actual: Any, expected: Any, msg: str = ""): 33 | if not actual == expected: 34 | __raise_error(msg, f"Expected: <{expected}>, Actual: <{actual}>.") 35 | 36 | 37 | def assert_not_equals(actual: Any, not_expected: Any, msg: str = ""): 38 | if actual == not_expected: 39 | __raise_error(msg, f"Expected: NOT <{not_expected}>, Actual: <{actual}>.") 40 | 41 | 42 | def assert_list_equals(actual_list: List, expected_list: List, msg: str = ""): 43 | if len(actual_list) != len(expected_list): 44 | __raise_error(msg, f"size of expected list <{expected_list}> is: <{len(expected_list)}>, but size of actual list <{actual_list}> is: <{len(actual_list)}>.") 45 | for i, element in enumerate(actual_list): 46 | if not element == expected_list[i]: 47 | __raise_error(msg, f"element of expected list <{expected_list}> is: <{expected_list[i]}>, but element of actual list <{actual_list}> is: <{actual_list[i]}>") 48 | 49 | 50 | def assert_list_elements_equal(actual_list: List, expected_list: List, msg: str = ""): 51 | diff_elements = [element for element in actual_list if not element in expected_list] 52 | if len(diff_elements) != 0: 53 | __raise_error(msg, f"expected list <{expected_list}> doesn't contain elements <{diff_elements}> from actual list <{actual_list}>.") 54 | diff_elements = [element for element in expected_list if not element in actual_list] 55 | if len(diff_elements) != 0: 56 | __raise_error(msg, f"actual list <{actual_list}> doesn't contain elements <{diff_elements}> from expected list <{expected_list}>.") 57 | 58 | 59 | def assert_set_contains(superset: Set, subset: Set, msg: str = ""): 60 | diff_elements = [element for element in subset if not element in superset] 61 | if len(diff_elements) != 0: 62 | __raise_error(msg, f"Superset <{superset}> doesn't contain elements <{diff_elements}> from subset <{subset}>.") 63 | 64 | 65 | def fail(msg: str = ""): 66 | __raise_error("", msg) 67 | 68 | 69 | def __raise_error(msg: str, error_msg: str): 70 | if msg: 71 | raise_msg = f"{msg}\n{error_msg}" 72 | else: 73 | raise_msg = error_msg 74 | raise AssertionError(raise_msg) 75 | 76 | 77 | # ------------------------------------------- 78 | # --------- "assert that" assertion --------- 79 | # ------------------------------------------- 80 | 81 | SUBJECT_TYPE_MAP = {} 82 | 83 | AllSubjects = Union["_Subject", "_ObjSubject", "_NoneSubject", "_StringSubject", "_BoolSubject", "_NumericSubject", "_ListOrTupleSubject", 84 | "_SetSubject", "_DictSubject", "_DateTimeSubject", "_DateSubject", "_CallableSubject"] 85 | 86 | IterableSubjects = Union["_Subject", "_ObjSubject", "_IterableSubject", "_StringSubject", "_ListOrTupleSubject", "_SetSubject", 87 | "_DictSubject"] 88 | 89 | 90 | def assert_that(subject: Any) -> AllSubjects: 91 | if subject is None: 92 | return _NoneSubject(subject) 93 | if isinstance(subject, str): 94 | return _StringSubject(subject) 95 | if isinstance(subject, bool): 96 | return _BoolSubject(subject) 97 | if isinstance(subject, Number): 98 | return _NumericSubject(subject) 99 | if isinstance(subject, (list, tuple)): 100 | return _ListOrTupleSubject(subject) 101 | if isinstance(subject, set): 102 | return _SetSubject(subject) 103 | if isinstance(subject, dict): 104 | return _DictSubject(subject) 105 | if isinstance(subject, datetime): 106 | return _DateTimeSubject(subject) 107 | if isinstance(subject, date): 108 | return _DateSubject(subject) 109 | if callable(subject): 110 | return _CallableSubject(subject) 111 | for subject_type, subject_class in SUBJECT_TYPE_MAP.items(): 112 | if isinstance(subject, subject_type): 113 | return subject_class(subject) 114 | return _ObjSubject(subject) 115 | 116 | 117 | # this method is used to format the elements 118 | def _format_elements(elements: Union[List, Tuple]) -> str: 119 | return f"<{str(elements)[1:-1]}>" 120 | 121 | 122 | # this method is used to format the obj 123 | def _format(obj: Any) -> str: 124 | return f"{type(obj).__name__} <{obj}>" 125 | 126 | 127 | class _Subject(object): 128 | def __init__(self, subject): 129 | self._subject = subject 130 | self._subject_name = None 131 | self._msg = None 132 | 133 | def named(self, name: str) -> AllSubjects: 134 | """ 135 | Give a name to this subject. 136 | """ 137 | self._subject_name = str(name) 138 | return self 139 | 140 | def with_message(self, message: str) -> AllSubjects: 141 | """ 142 | Set custom message for this assertion. 143 | """ 144 | self._msg = str(message) 145 | return self 146 | 147 | def _raise_error(self, error_msg: str, error: Type[Exception] = AssertionError): 148 | if self._msg: 149 | error_msg = f"{self._msg}\n{error_msg}" 150 | raise error(error_msg) 151 | 152 | def __str__(self): 153 | if self._subject_name is None: 154 | return _format(self._subject) 155 | else: 156 | return f"{_format(self._subject)} named \"{self._subject_name}\"" 157 | 158 | 159 | class _ObjSubject(_Subject): 160 | def __init__(self, subject): 161 | _Subject.__init__(self, subject) 162 | 163 | def is_instance_of(self, class_or_type_or_tuple) -> AllSubjects: 164 | """ 165 | Fails unless this subject is an instance of given class or type or tuple of types. 166 | 167 | Assert whether an object is an instance of a class or of a subclass thereof. 168 | With a type as second argument, return whether that is the object's type. 169 | The form using a tuple, is_instance_of(x, (A, B, ...)), is a shortcut for 170 | is_instance_of(x, A) or is_instance_of(x, B) or ... (etc.). 171 | """ 172 | if not isinstance(self._subject, class_or_type_or_tuple): 173 | self._raise_error(f"Unexpectedly that {self} is not instance of {_format_elements(class_or_type_or_tuple)}.") 174 | return self 175 | 176 | def is_type_of(self, type_) -> AllSubjects: 177 | """ 178 | Fails unless this subject is of given type. 179 | """ 180 | if type(self._subject) is not type_: 181 | self._raise_error(f"Unexpectedly that {self} is not type of <{type_}>.") 182 | return self 183 | 184 | def is_equal_to(self, other_obj: Any) -> AllSubjects: 185 | """ 186 | Fails unless this subject is equal to other obj. 187 | """ 188 | if not self._subject == other_obj: 189 | self._raise_error(f"Unexpectedly that {self} is not equal to {_format(other_obj)}.") 190 | return self 191 | 192 | def is_not_equal_to(self, other_obj: Any) -> AllSubjects: 193 | """ 194 | Fails unless this subject is not equal to other obj. 195 | """ 196 | if self._subject == other_obj: 197 | self._raise_error(f"Unexpectedly that {self} is equal to {_format(other_obj)}.") 198 | return self 199 | 200 | def is_same_as(self, other_obj: Any) -> AllSubjects: 201 | """ 202 | Fails unless this subject is identical to other obj. 203 | """ 204 | if self._subject is not other_obj: 205 | self._raise_error(f"Unexpectedly that {self} is not identical to {_format(other_obj)}.") 206 | return self 207 | 208 | def is_not_same_as(self, other_obj: Any) -> AllSubjects: 209 | """ 210 | Fails unless this subject is not identical to other obj. 211 | """ 212 | if self._subject is other_obj: 213 | self._raise_error(f"Unexpectedly that {self} is identical to {_format(other_obj)}.") 214 | return self 215 | 216 | def is_none(self: Any) -> AllSubjects: 217 | """ 218 | Fails unless this subject is None. 219 | """ 220 | if self._subject is not None: 221 | self._raise_error(f"Unexpectedly that {self} is not .") 222 | return self 223 | 224 | def is_not_none(self: Any) -> AllSubjects: 225 | """ 226 | Fails unless this subject is not None. 227 | """ 228 | if self._subject is None: 229 | self._raise_error(f"Unexpectedly that {self} is .") 230 | return self 231 | 232 | def is_in(self, iterable: Iterable) -> AllSubjects: 233 | """ 234 | Fails unless this subject is equal to one element in the given iterable. 235 | """ 236 | if self._subject not in iterable: 237 | self._raise_error(f"Unexpectedly that {self} is not in {_format(iterable)}.") 238 | return self 239 | 240 | def is_not_in(self, iterable: Iterable) -> AllSubjects: 241 | """ 242 | Fails unless this subject is not equal to any element in the given iterable. 243 | """ 244 | if self._subject in iterable: 245 | self._raise_error(f"Unexpectedly that {self} is in {_format(iterable)}.") 246 | return self 247 | 248 | def meets(self, func: Callable[[Any], Any]) -> AllSubjects: 249 | """ 250 | Fails unless this subject meets the given function. 251 | Note: The function must accepts one argument. 252 | 253 | For Example: 254 | assert_that(99).meets(lambda x: x > 0) 255 | 256 | def is_positive(num): 257 | return num > 0 258 | assert_that(99).meets(is_positive) 259 | """ 260 | if not func(self._subject): 261 | self._raise_error(f"Unexpectedly that {self} doesn't meet function <{func.__name__}>.") 262 | return self 263 | 264 | def attr(self, attribute_name: str) -> AllSubjects: 265 | """ 266 | Assert the attribute of this subject. If the attribute does not exist, raise AttributeError. 267 | """ 268 | if not hasattr(self._subject, attribute_name): 269 | self._raise_error(f"Unexpectedly that {self} doesn't have attribute <{attribute_name}>.", error=AttributeError) 270 | return assert_that(getattr(self._subject, attribute_name)) 271 | 272 | def has_attr(self, attribute_name: str) -> AllSubjects: 273 | """ 274 | Fails unless this subject has the given attribute. 275 | """ 276 | if not hasattr(self._subject, attribute_name): 277 | self._raise_error(f"Unexpectedly that {self} doesn't have attribute <{attribute_name}>.") 278 | return self 279 | 280 | def does_not_have_attr(self, attribute_name: str) -> AllSubjects: 281 | """ 282 | Fails unless this subject doesn't have the given attribute. 283 | """ 284 | if hasattr(self._subject, attribute_name): 285 | self._raise_error(f"Unexpectedly that {self} has attribute <{attribute_name}>.") 286 | return self 287 | 288 | def __getattr__(self, item): 289 | self._raise_error(f"Cannot perform assertion \"{item}\" for {self}.", error=AttributeError) 290 | 291 | 292 | class _NoneSubject(_ObjSubject): 293 | def __init__(self, subject): 294 | _ObjSubject.__init__(self, subject) 295 | 296 | 297 | class _BoolSubject(_ObjSubject): 298 | def __init__(self, subject): 299 | _ObjSubject.__init__(self, subject) 300 | 301 | def is_true(self) -> "_BoolSubject": 302 | """ 303 | Fails unless this subject is true. 304 | """ 305 | if self._subject is not True: 306 | self._raise_error(f"Unexpectedly that {self} is not .") 307 | return self 308 | 309 | def is_false(self) -> "_BoolSubject": 310 | """ 311 | Fails unless this subject is false. 312 | """ 313 | if self._subject is not False: 314 | self._raise_error(f"Unexpectedly that {self} is not .") 315 | return self 316 | 317 | 318 | class _NumericSubject(_ObjSubject): 319 | def __init__(self, subject): 320 | _ObjSubject.__init__(self, subject) 321 | 322 | def is_less_than(self, other_number: Number) -> "_NumericSubject": 323 | """ 324 | Fails unless this subject is less than other number. 325 | """ 326 | if self._subject >= other_number: 327 | self._raise_error(f"Unexpectedly that {self} is not less than {_format(other_number)}.") 328 | return self 329 | 330 | def is_greater_than(self, other_number: Number) -> "_NumericSubject": 331 | """ 332 | Fails unless this subject is greater than other number. 333 | """ 334 | if self._subject <= other_number: 335 | self._raise_error(f"Unexpectedly that {self} is not greater than {_format(other_number)}.") 336 | return self 337 | 338 | def is_less_than_or_equal_to(self, other_number: Number) -> "_NumericSubject": 339 | """ 340 | Fails unless this subject is less than or equal to other number. 341 | """ 342 | if self._subject > other_number: 343 | self._raise_error(f"Unexpectedly that {self} is greater than {_format(other_number)}.") 344 | return self 345 | 346 | def is_at_most(self, other_number: Number) -> "_NumericSubject": 347 | """ 348 | Fails unless this subject is less than or equal to other number. 349 | """ 350 | return self.is_less_than_or_equal_to(other_number) 351 | 352 | def is_greater_than_or_equal_to(self, other_number: Number) -> "_NumericSubject": 353 | """ 354 | Fails unless this subject is greater than or equal to other number. 355 | """ 356 | if self._subject < other_number: 357 | self._raise_error(f"Unexpectedly that {self} is less than {_format(other_number)}.") 358 | return self 359 | 360 | def is_at_least(self, other_number: Number) -> "_NumericSubject": 361 | """ 362 | Fails unless this subject is greater than or equal to other number. 363 | """ 364 | return self.is_greater_than_or_equal_to(other_number) 365 | 366 | def is_zero(self) -> "_NumericSubject": 367 | """ 368 | Fails unless this subject is zero (0). 369 | """ 370 | if self._subject != 0: 371 | self._raise_error(f"Unexpectedly that {self} is not <0>.") 372 | return self 373 | 374 | def is_not_zero(self) -> "_NumericSubject": 375 | """ 376 | Fails unless this subject is not zero (0). 377 | """ 378 | if self._subject == 0: 379 | self._raise_error(f"Unexpectedly that {self} is <0>.") 380 | return self 381 | 382 | def is_positive(self) -> "_NumericSubject": 383 | """ 384 | Fails unless this subject is positive. 385 | """ 386 | if self._subject <= 0: 387 | self._raise_error(f"Unexpectedly that {self} is not positive.") 388 | return self 389 | 390 | def is_negative(self) -> "_NumericSubject": 391 | """ 392 | Fails unless this subject is negative. 393 | """ 394 | if self._subject >= 0: 395 | self._raise_error(f"Unexpectedly that {self} is not negative.") 396 | return self 397 | 398 | def is_between(self, low: Number, high: Number) -> "_NumericSubject": 399 | """ 400 | Fails unless this subject is between low and high. 401 | 402 | Note: low and high are included 403 | """ 404 | if self._subject < low or self._subject > high: 405 | self._raise_error(f"Unexpectedly that {self} is not between low {_format(low)} and high {_format(high)}.") 406 | return self 407 | 408 | 409 | class _IterableSubject(_ObjSubject): 410 | def __init__(self, subject): 411 | _ObjSubject.__init__(self, subject) 412 | 413 | def is_empty(self) -> IterableSubjects: 414 | """ 415 | Fails unless this subject is empty. 416 | """ 417 | if len(self._subject) != 0: 418 | self._raise_error(f"Unexpectedly that {self} is not empty.") 419 | return self 420 | 421 | def is_not_empty(self) -> IterableSubjects: 422 | """ 423 | Fails unless this subject is not empty. 424 | """ 425 | if len(self._subject) == 0: 426 | self._raise_error(f"Unexpectedly that {self} is empty.") 427 | return self 428 | 429 | def has_length(self, expected_length: int) -> IterableSubjects: 430 | """ 431 | Fails unless this subject has the given length. 432 | """ 433 | if not len(self._subject) == expected_length: 434 | self._raise_error(f"Unexpectedly that {self} doesn't have a length of <{expected_length}>. It is <{len(self._subject)}>.") 435 | return self 436 | 437 | def contains(self, obj: Any) -> IterableSubjects: 438 | """ 439 | Fails unless this subject contains the given object. 440 | """ 441 | if obj not in self._subject: 442 | self._raise_error(f"Unexpectedly that {self} doesn't contain {_format(obj)}.") 443 | return self 444 | 445 | def does_not_contain(self, obj: Any) -> IterableSubjects: 446 | """ 447 | Fails unless this subject doesn't contain the given object. 448 | """ 449 | if obj in self._subject: 450 | self._raise_error(f"Unexpectedly that {self} contains {_format(obj)}.") 451 | return self 452 | 453 | def contains_all_in(self, iterable: Iterable) -> IterableSubjects: 454 | """ 455 | Fails unless this subject contains all the elements in the given iterable. 456 | """ 457 | uncontained_objs = [obj for obj in iterable if obj not in self._subject] 458 | if uncontained_objs: 459 | self._raise_error(f"Unexpectedly that {self} doesn't contain elements {_format_elements(uncontained_objs)} in {_format(iterable)}.") 460 | return self 461 | 462 | def is_all_in(self, iterable: Iterable) -> IterableSubjects: 463 | """ 464 | Fails unless all the elements in this subject are in the given iterable. 465 | """ 466 | uncontained_objs = [obj for obj in self._subject if obj not in iterable] 467 | if uncontained_objs: 468 | self._raise_error(f"Unexpectedly that {self} has elements {_format_elements(uncontained_objs)} not in {_format(iterable)}.") 469 | return self 470 | 471 | def contains_any_in(self, iterable: Iterable) -> IterableSubjects: 472 | """ 473 | Fails unless this subject contains any element in the given iterable. 474 | """ 475 | contained_objs = [obj for obj in iterable if obj in self._subject] 476 | if not contained_objs: 477 | self._raise_error(f"Unexpectedly that {self} doesn't contain any element in {_format(iterable)}.") 478 | return self 479 | 480 | def is_any_in(self, iterable: Iterable) -> IterableSubjects: 481 | """ 482 | Fails unless any element in this subject is in given iterable. 483 | """ 484 | contained_objs = [obj for obj in self._subject if obj in iterable] 485 | if not contained_objs: 486 | self._raise_error(f"Unexpectedly that {self} doesn't have any element in {_format(iterable)}.") 487 | return self 488 | 489 | def contains_none_in(self, iterable: Iterable) -> IterableSubjects: 490 | """ 491 | Fails unless this subject doesn't contain any element in the given iterable. 492 | """ 493 | contained_objs = [obj for obj in iterable if obj in self._subject] 494 | if contained_objs: 495 | self._raise_error(f"Unexpectedly that {self} contains elements {_format_elements(contained_objs)} in {_format(iterable)}.") 496 | return self 497 | 498 | def is_none_in(self, iterable: Iterable) -> IterableSubjects: 499 | """ 500 | Fails unless any element in this subject is not in the given iterable. 501 | """ 502 | contained_objs = [obj for obj in self._subject if obj in iterable] 503 | if contained_objs: 504 | self._raise_error(f"Unexpectedly that {self} has elements {_format_elements(contained_objs)} in {_format(iterable)}.") 505 | return self 506 | 507 | def length(self) -> "_NumericSubject": 508 | """ 509 | Assert the length of this subject. 510 | """ 511 | return _NumericSubject(len(self._subject)) 512 | 513 | def each(self) -> AllSubjects: 514 | """ 515 | For each obj in this subject. 516 | """ 517 | return _IterableEachSubject(self._subject) 518 | 519 | 520 | class _IterableEachSubject(object): 521 | def __init__(self, iterable_subject): 522 | self._iterable_subject = iterable_subject 523 | 524 | def __getattr__(self, item): 525 | if item in ["length", "index", "key", "attr"]: 526 | def each_attr(*args, **kwargs): 527 | iterable_subject = [] 528 | for subject in self._iterable_subject: 529 | iterable_subject.append(getattr(assert_that(subject), item)(*args, **kwargs)._subject) 530 | return _IterableEachSubject(iterable_subject) 531 | 532 | return each_attr 533 | elif item in ["each", "each_key", "each_value"]: 534 | def each_each(*args, **kwargs): 535 | iterable_subject = [] 536 | for iterable in self._iterable_subject: 537 | for subject in getattr(assert_that(iterable), item)(*args, **kwargs)._iterable_subject: 538 | iterable_subject.append(subject) 539 | return _IterableEachSubject(iterable_subject) 540 | 541 | return each_each 542 | else: 543 | def assert_each(*args, **kwargs): 544 | for subject in self._iterable_subject: 545 | getattr(assert_that(subject), item)(*args, **kwargs) 546 | return self 547 | 548 | return assert_each 549 | 550 | 551 | class _StringSubject(_IterableSubject): 552 | def __init__(self, subject): 553 | _IterableSubject.__init__(self, subject) 554 | 555 | def is_equal_to_ignoring_case(self, string: str) -> "_StringSubject": 556 | """ 557 | Fails unless this string is equal to other string ignoring case. 558 | """ 559 | if not self._subject.lower() == string.lower(): 560 | self._raise_error(f"Unexpectedly that {self} is not equal to {_format(string)} ignoring case.") 561 | return self 562 | 563 | def is_blank(self) -> "_StringSubject": 564 | """ 565 | Fails unless this string is blank. 566 | """ 567 | if len(self._subject.strip()) != 0: 568 | self._raise_error(f"Unexpectedly that {self} is not blank.") 569 | return self 570 | 571 | def is_not_blank(self) -> "_StringSubject": 572 | """ 573 | Fails unless this string is not blank. 574 | """ 575 | if len(self._subject.strip()) == 0: 576 | self._raise_error(f"Unexpectedly that {self} is blank.") 577 | return self 578 | 579 | def starts_with(self, prefix: str) -> "_StringSubject": 580 | """ 581 | Fails unless this string starts with the given string. 582 | """ 583 | if not self._subject.startswith(prefix): 584 | self._raise_error(f"Unexpectedly that {self} doesn't start with {_format(prefix)}.") 585 | return self 586 | 587 | def ends_with(self, suffix: str) -> "_StringSubject": 588 | """ 589 | Fails unless this string ends with the given string. 590 | """ 591 | if not self._subject.endswith(suffix): 592 | self._raise_error(f"Unexpectedly that {self} doesn't end with {_format(suffix)}.") 593 | return self 594 | 595 | def matches(self, regex: str) -> "_StringSubject": 596 | """ 597 | Fails unless this string matches the given regex. 598 | 599 | Note: If you want to match the entire string, just include anchors in the regex pattern. 600 | """ 601 | if not re.compile(regex).search(self._subject): 602 | self._raise_error(f"Unexpectedly that {self} doesn't match regex <{regex}>.") 603 | return self 604 | 605 | def does_not_match(self, regex: str) -> "_StringSubject": 606 | """ 607 | Fails unless this string doesn't match the given regex. 608 | 609 | Note: If you want to match the entire string, just include anchors in the regex pattern. 610 | """ 611 | if re.compile(regex).search(self._subject): 612 | self._raise_error(f"Unexpectedly that {self} matches regex <{regex}>.") 613 | return self 614 | 615 | def is_alpha(self) -> "_StringSubject": 616 | """ 617 | Fails unless this string contains only alphabetic chars. 618 | """ 619 | if not self._subject.isalpha(): 620 | self._raise_error(f"Unexpectedly that {self} doesn't contain only alphabetic chars.") 621 | return self 622 | 623 | def is_digit(self) -> "_StringSubject": 624 | """ 625 | Fails unless this string contains only digits. 626 | """ 627 | if not self._subject.isdigit(): 628 | self._raise_error(f"Unexpectedly that {self} doesn't contain only digits.") 629 | return self 630 | 631 | def is_lower(self) -> "_StringSubject": 632 | """ 633 | Fails unless this string contains only lowercase chars. 634 | """ 635 | if not self._subject == self._subject.lower(): 636 | self._raise_error(f"Unexpectedly that {self} doesn't contain only lowercase chars.") 637 | return self 638 | 639 | def is_upper(self) -> "_StringSubject": 640 | """ 641 | Fails unless this string contains only uppercase chars. 642 | """ 643 | if not self._subject == self._subject.upper(): 644 | self._raise_error(f"Unexpectedly that {self} doesn't contain only uppercase chars.") 645 | return self 646 | 647 | 648 | class _ListOrTupleSubject(_IterableSubject): 649 | def __init__(self, subject): 650 | _IterableSubject.__init__(self, subject) 651 | 652 | def has_same_elements_as(self, other_list_or_tuple: Union[List, Tuple]) -> "_ListOrTupleSubject": 653 | """ 654 | Fails unless this list/tuple has the same elements as other list/tuple. 655 | """ 656 | uncontained_objs = [obj for obj in other_list_or_tuple if obj not in self._subject] 657 | if uncontained_objs: 658 | self._raise_error(f"Unexpectedly that {self} doesn't contain elements {_format_elements(uncontained_objs)} in {_format(other_list_or_tuple)}.") 659 | 660 | uncontained_objs = [obj for obj in self._subject if obj not in other_list_or_tuple] 661 | if uncontained_objs: 662 | self._raise_error(f"Unexpectedly that {self} contains elements {_format_elements(uncontained_objs)} not in {_format(other_list_or_tuple)}.") 663 | return self 664 | 665 | def contains_duplicates(self) -> "_ListOrTupleSubject": 666 | """ 667 | Fails unless this list/tuple contains duplicate elements. 668 | """ 669 | if len(self._subject) == len(set(self._subject)): 670 | self._raise_error(f"Unexpectedly that {self} doesn't contain duplicate elements.") 671 | return self 672 | 673 | def does_not_contain_duplicates(self) -> "_ListOrTupleSubject": 674 | """ 675 | Fails unless this list/tuple doesn't contain duplicate elements. 676 | """ 677 | element_counter = {} 678 | for element in self._subject: 679 | if element in element_counter: 680 | element_counter[element] += 1 681 | else: 682 | element_counter[element] = 1 683 | duplicates = [element for element, count in element_counter.items() if count > 1] 684 | if duplicates: 685 | self._raise_error(f"Unexpectedly that {self} contains duplicate elements {_format_elements(duplicates)}.") 686 | return self 687 | 688 | def index(self, index: int) -> AllSubjects: 689 | """ 690 | Assert the obj of this list/tuple by index. If index doesn't exist, raise IndexError. 691 | """ 692 | if index >= len(self._subject) or index < 0: 693 | self._raise_error(f"Unexpectedly that {self} has no object of index <{index}>.", error=IndexError) 694 | return assert_that(self._subject[index]) 695 | 696 | 697 | class _SetSubject(_IterableSubject): 698 | def __init__(self, subject): 699 | _IterableSubject.__init__(self, subject) 700 | 701 | def is_super_of(self, other_set: Set) -> "_SetSubject": 702 | """ 703 | Fails unless this set is a superset of other set. 704 | """ 705 | uncontained_objs = [obj for obj in other_set if obj not in self._subject] 706 | if uncontained_objs: 707 | self._raise_error(f"Unexpectedly that {self} doesn't contain elements {_format_elements(uncontained_objs)} in {_format(other_set)}.") 708 | return self 709 | 710 | def is_sub_of(self, other_set: Set) -> "_SetSubject": 711 | """ 712 | Fails unless this set is a subset of other set. 713 | """ 714 | uncontained_objs = [obj for obj in self._subject if obj not in other_set] 715 | if uncontained_objs: 716 | self._raise_error(f"Unexpectedly that {self} contains elements {_format_elements(uncontained_objs)} not in {_format(other_set)}.") 717 | return self 718 | 719 | 720 | class _DictSubject(_IterableSubject): 721 | def __init__(self, subject): 722 | _IterableSubject.__init__(self, subject) 723 | 724 | def contains_key(self, key: Any) -> "_DictSubject": 725 | """ 726 | Fails unless this dict contains the given key. 727 | """ 728 | if key not in self._subject: 729 | self._raise_error(f"Unexpectedly that {self} doesn't contain key {_format(key)}.") 730 | return self 731 | 732 | def does_not_contain_key(self, key: Any) -> "_DictSubject": 733 | """ 734 | Fails unless this dict doesn't contain the given key. 735 | """ 736 | if key in self._subject: 737 | self._raise_error(f"Unexpectedly that {self} contains key {_format(key)}.") 738 | return self 739 | 740 | def contains_value(self, value: Any) -> "_DictSubject": 741 | """ 742 | Fails unless this dict contains the given value. 743 | """ 744 | if value not in self._subject.values(): 745 | self._raise_error(f"Unexpectedly that {self} doesn't contain value {_format(value)}.") 746 | return self 747 | 748 | def does_not_contain_value(self, value: Any) -> "_DictSubject": 749 | """ 750 | Fails unless this dict doesn't contain the given value. 751 | """ 752 | if value in self._subject.values(): 753 | self._raise_error(f"Unexpectedly that {self} contains value {_format(value)}.") 754 | return self 755 | 756 | def contains_entry(self, key: Any, value: Any) -> "_DictSubject": 757 | """ 758 | Fails unless this dict contains the given entry. 759 | """ 760 | if (key, value) not in self._subject.items(): 761 | self._raise_error(f"Unexpectedly that {self} doesn't contain entry, key: {_format(key)}, value: {_format(value)}.") 762 | return self 763 | 764 | def does_not_contain_entry(self, key: Any, value: Any) -> "_DictSubject": 765 | """ 766 | Fails unless this dict doesn't contain the given entry. 767 | """ 768 | if (key, value) in self._subject.items(): 769 | self._raise_error(f"Unexpectedly that {self} contains entry, key: {_format(key)}, value: {_format(value)}.") 770 | return self 771 | 772 | def is_super_of(self, other_dict: Dict) -> "_DictSubject": 773 | """ 774 | Fails unless this dict contains all the entries in other dict. 775 | """ 776 | uncontained_entries = [entry for entry in other_dict.items() if entry not in self._subject.items()] 777 | if uncontained_entries: 778 | self._raise_error(f"Unexpectedly that {self} doesn't contain entries {_format_elements(uncontained_entries)} in {_format(other_dict)}.") 779 | return self 780 | 781 | def is_sub_of(self, other_dict: Dict) -> "_DictSubject": 782 | """ 783 | Fails unless all the entries in this dict are in other dict. 784 | """ 785 | uncontained_entries = [entry for entry in self._subject.items() if entry not in other_dict.items()] 786 | if uncontained_entries: 787 | self._raise_error(f"Unexpectedly that {self} contains entries {_format_elements(uncontained_entries)} not in {_format(other_dict)}.") 788 | return self 789 | 790 | def key(self, key: Any) -> AllSubjects: 791 | """ 792 | Assert the value of this dict by key. If key doesn't exist, raise KeyError 793 | """ 794 | if key not in self._subject: 795 | self._raise_error(f"Unexpectedly that {self} doesn't contain key {_format(key)}.", error=KeyError) 796 | return assert_that(self._subject[key]) 797 | 798 | def each(self) -> "_ListOrTupleSubject": 799 | """ 800 | For each entry in this dict. 801 | """ 802 | return _IterableEachSubject(self._subject.items()) 803 | 804 | def each_key(self) -> AllSubjects: 805 | """ 806 | For each key in this dict. 807 | """ 808 | return _IterableEachSubject(self._subject.keys()) 809 | 810 | def each_value(self) -> AllSubjects: 811 | """ 812 | For each value in this dict. 813 | """ 814 | return _IterableEachSubject(self._subject.values()) 815 | 816 | 817 | class _DateSubject(_ObjSubject): 818 | def __init__(self, subject): 819 | _ObjSubject.__init__(self, subject) 820 | 821 | def is_before(self, other_date: date) -> "_DateSubject": 822 | """ 823 | Fails unless this date is before other date. 824 | """ 825 | if self._subject >= other_date: 826 | self._raise_error(f"Unexpectedly that {self} is not before {_format(other_date)}.") 827 | return self 828 | 829 | def is_after(self, other_date: date) -> "_DateSubject": 830 | """ 831 | Fails unless this date is after other date. 832 | """ 833 | if self._subject <= other_date: 834 | self._raise_error(f"Unexpectedly that {self} is not after {_format(other_date)}.") 835 | return self 836 | 837 | 838 | class _DateTimeSubject(_ObjSubject): 839 | def __init__(self, subject): 840 | _ObjSubject.__init__(self, subject) 841 | 842 | def is_before(self, other_datetime: datetime) -> "_DateTimeSubject": 843 | """ 844 | Fails unless this datetime is before other datetime. 845 | """ 846 | if self._subject >= other_datetime: 847 | self._raise_error(f"Unexpectedly that {self} is not before {_format(other_datetime)}.") 848 | return self 849 | 850 | def is_after(self, other_datetime: datetime) -> "_DateTimeSubject": 851 | """ 852 | Fails unless this datetime is after other datetime. 853 | """ 854 | if self._subject <= other_datetime: 855 | self._raise_error(f"Unexpectedly that {self} is not after {_format(other_datetime)}.") 856 | return self 857 | 858 | 859 | class _CallableSubject(_ObjSubject): 860 | def __init__(self, subject): 861 | _ObjSubject.__init__(self, subject) 862 | self._args = [] 863 | self._kwargs = {} 864 | 865 | def with_args(self, *args, **kwargs) -> "_CallableSubject": 866 | self._args = args 867 | self._kwargs = kwargs 868 | return self 869 | 870 | def raises_exception(self, exception_class: Type[Exception]): 871 | """ 872 | Fails unless this callable does't raise exception or raise wrong exception. 873 | """ 874 | try: 875 | self._subject(*self._args, **self._kwargs) 876 | except Exception as e: 877 | if not issubclass(e.__class__, exception_class): 878 | self._raise_error(f"Unexpectedly that {self} raises wrong exception <{e.__class__.__module__}.{e.__class__.__name__}>.") 879 | else: 880 | self._raise_error(f"Unexpectedly that {self} doesn't raise exception.") 881 | 882 | def will(self, interval: int = 1000, timeout: int = 30000) -> AllSubjects: 883 | """ 884 | Failed if this callable's result doesn't match following assertions until timing out. 885 | 886 | :param interval: interval of asserting, in milliseconds 887 | :param timeout: timeout of asserting, in milliseconds 888 | """ 889 | return _CallableWillSubject(self._subject, interval, timeout, self._args, self._kwargs) 890 | 891 | 892 | class _CallableWillSubject(object): 893 | def __init__(self, subject, interval, timeout, args, kwargs): 894 | self._subject = subject 895 | self._interval = interval 896 | self._timeout = timeout 897 | self._args = args 898 | self._kwargs = kwargs 899 | 900 | def __getattr__(self, item): 901 | if item in ["length", "index", "key", "attr", "each", "each_key", "each_value"]: 902 | raise AttributeError(f"Cannot call \"{item}\" in callable-will assertion.") 903 | 904 | def wrapper(*args, **kwargs): 905 | start_time = time.time() * 1000.0 906 | 907 | last_exception = {"value": None} 908 | 909 | try: 910 | getattr(assert_that(self._subject(*self._args, **self._kwargs)), item)(*args, **kwargs) 911 | except AssertionError as e: 912 | last_exception["value"] = e 913 | else: 914 | return self 915 | 916 | while (time.time() * 1000.0 - start_time) <= self._timeout: 917 | time.sleep(self._interval / 1000.0) 918 | try: 919 | getattr(assert_that(self._subject(*self._args, **self._kwargs)), item)(*args, **kwargs) 920 | except AssertionError as e: 921 | last_exception["value"] = e 922 | else: 923 | return self 924 | 925 | raise AssertionError(f"Callable's result doesn't match expected until timing out, last assertion error is:\n{last_exception['value']}") 926 | 927 | return wrapper 928 | -------------------------------------------------------------------------------- /ptest/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import re 4 | from optparse import OptionParser, OptionGroup 5 | 6 | from typing import List 7 | 8 | from . import __version__ 9 | 10 | _properties = {} 11 | _options = {} 12 | 13 | 14 | def get_option(option: str) -> str: 15 | try: 16 | return _options[option] 17 | except KeyError: 18 | return None 19 | 20 | 21 | def get_property(key: str, default: str = None) -> str: 22 | """ 23 | Get property value. 24 | If no property found, default value will be returned. 25 | """ 26 | try: 27 | return _properties[key] 28 | except KeyError: 29 | return default 30 | 31 | 32 | def get_int_property(key: str, default: int = None) -> int: 33 | """ 34 | Get property value and convert it to int. 35 | If no property found, default value will be returned. 36 | """ 37 | try: 38 | return int(_properties[key]) 39 | except KeyError: 40 | return default 41 | 42 | 43 | def get_float_property(key: str, default: float = None) -> float: 44 | """ 45 | Get property value and convert it to float. 46 | If no property found, default value will be returned. 47 | """ 48 | try: 49 | return float(_properties[key]) 50 | except KeyError: 51 | return default 52 | 53 | 54 | def get_boolean_property(key: str, default: bool = None) -> bool: 55 | """ 56 | Get property value and convert it to boolean. 57 | If no property found, default value will be returned. 58 | """ 59 | try: 60 | value = _properties[key] 61 | if value.lower() == "true": 62 | return True 63 | elif value.lower() == "false": 64 | return False 65 | raise ValueError("could not convert string to boolean: %s" % value) 66 | except KeyError: 67 | return default 68 | 69 | 70 | def get_list_property(key: str, default: List[str] = None, sep: str = ",") -> List[str]: 71 | """ 72 | Get property value and convert it to list. 73 | If no property found, default value will be returned. 74 | """ 75 | try: 76 | return _properties[key].split(sep) 77 | except KeyError: 78 | return default 79 | 80 | 81 | def load(args): 82 | option_args, property_args = __load_args(args) 83 | _parse_options(option_args) 84 | _load_properties_from_file() 85 | _parse_properties(property_args) 86 | 87 | 88 | def _load_properties_from_file(): 89 | property_file = get_option("property_file") 90 | if property_file is not None: 91 | file_object = open(property_file, encoding="utf-8") 92 | try: 93 | property_regex_str = r"^([^;#].*?)=(.*?)$" 94 | property_regex = re.compile(property_regex_str) 95 | for line in file_object: 96 | property_match = property_regex.search(line.strip()) 97 | if property_match: 98 | _properties[property_match.group(1)] = property_match.group(2) 99 | finally: 100 | file_object.close() 101 | 102 | 103 | def __load_args(args): 104 | property_args = [] 105 | option_args = [] 106 | property_regex_str = r"^-D(.*?)=(.*?)$" # the format of property definition must be -D= 107 | property_regex = re.compile(property_regex_str) 108 | for arg in args: 109 | property_match = property_regex.search(arg) 110 | if property_match: 111 | property_args.append(arg) 112 | else: 113 | option_args.append(arg) 114 | return option_args, property_args 115 | 116 | 117 | def _parse_properties(property_args): 118 | property_regex_str = r"^-D(.*?)=(.*?)$" # the format of property definition must be -D= 119 | property_regex = re.compile(property_regex_str) 120 | for arg in property_args: 121 | property_match = property_regex.search(arg) 122 | _properties[property_match.group(1)] = property_match.group(2) 123 | 124 | 125 | def _parse_options(option_args): 126 | parser = OptionParser(usage="ptest [options] [properties]", version="ptest %s for Python %s" % (__version__, platform.python_version()), 127 | description="ptest is a light test framework for Python.") 128 | 129 | # path and property 130 | parser.add_option("-w", "--workspace", action="store", dest="workspace", default=".", metavar="dir", 131 | help="Specify the workspace dir (relative to working directory). Default is current working directory.") 132 | parser.add_option("-P", "--python-paths", action="store", dest="python_paths", default=None, metavar="paths", 133 | help="Specify the additional locations (relative to workspace) where to search test libraries from when they are imported. " 134 | "Multiple paths can be given by separating them with a comma.") 135 | parser.add_option("-p", "--property-file", action="store", dest="property_file", default=None, metavar="file", 136 | help="Specify the .ini property file (relative to workspace). " 137 | "The properties in property file will be overwritten by user defined properties in cmd line. " 138 | "Get property via get_property() in module ptest.config.") 139 | 140 | # running 141 | parser.add_option("-R", "--run-failed", action="store", dest="run_failed", default=None, metavar="file", 142 | help="Specify the junit result xml path (relative to workspace) and run the failed/skipped test cases in it.") 143 | parser.add_option("-t", "--targets", action="store", dest="test_targets", default=None, metavar="targets", 144 | help="Specify the path of test targets, separated by comma. Test target can be package/module/class/method. " 145 | "The target path format is: package[.module[.class[.method]]] " 146 | "NOTE: ptest ONLY searches modules under --workspace, --python-paths and sys.path") 147 | parser.add_option("-f", "--filter", action="store", dest="test_filter", default=None, metavar="class", 148 | help="Specify the path of test filter class, select test cases to run by the specified filter. " 149 | "The test filter class should implement class TestFilter in ptest.testfilter " 150 | "The filter path format is: package.module.class " 151 | "NOTE: ptest ONLY searches modules under --workspace, --python-paths and sys.path") 152 | parser.add_option("-i", "--include-tags", action="store", dest="include_tags", default=None, metavar="tags", 153 | help="Select test cases to run by tags, separated by comma.") 154 | parser.add_option("-e", "--exclude-tags", action="store", dest="exclude_tags", default=None, metavar="tags", 155 | help="Select test cases not to run by tags, separated by comma. These test cases are not run even if included with --include-tags.") 156 | parser.add_option("-g", "--include-groups", action="store", dest="include_groups", default=None, metavar="groups", 157 | help="Select test cases to run by groups, separated by comma.") 158 | parser.add_option("-n", "--test-executor-number", action="store", dest="test_executor_number", metavar="int", 159 | default=1, help="Specify the number of test executors. Default value is 1.") 160 | 161 | # output 162 | parser.add_option("-o", "--output-dir", action="store", dest="output_dir", default="test-output", metavar="dir", 163 | help="Specify the output dir (relative to workspace).") 164 | parser.add_option("-r", "--report-dir", action="store", dest="report_dir", default="html-report", metavar="dir", 165 | help="Specify the html report dir (relative to output dir).") 166 | parser.add_option("-x", "--junit-xml", action="store", dest="junit_xml", default="junit-results.xml", 167 | metavar="file", help="Specify the junit result xml path (relative to output dir).") 168 | 169 | # miscellaneous 170 | parser.add_option("-l", "--listeners", action="store", dest="test_listeners", default=None, metavar="class", 171 | help="Specify the path of test listener classes, separated by comma. " 172 | "The listener class should implement class TestListener in ptest.plistener " 173 | "The listener path format is: package.module.class " 174 | "NOTE: 1. ptest ONLY searches modules under --workspace, --python-paths and sys.path " 175 | "2. The listener class must be thread safe if you set -n(--test-executor-number) greater than 1.") 176 | parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, 177 | help="Set ptest console to verbose mode.") 178 | parser.add_option("--temp", action="store", dest="temp", default="ptest-temp", metavar="dir", 179 | help="Specify the temp dir (relative to workspace).") 180 | parser.add_option("--disable-screenshot", action="store_true", dest="disable_screenshot", default=False, 181 | help="Disable taking screenshot for preporter.") 182 | 183 | # tool 184 | parser.add_option("-m", "--merge-junit-xmls", action="store", dest="merge_junit_xmls", default=None, metavar="files", 185 | help="Merge the junit result xmls (relative to workspace). Multiple files can be given by separating them with a comma." 186 | "Use --to to specify the path of merged junit result xml.") 187 | parser.add_option("--to", action="store", dest="to", default=None, metavar='path', 188 | help="Specify the 'to' destination (relative to workspace).") 189 | 190 | # user defined properties 191 | parser.add_option_group( 192 | OptionGroup(parser, "User defined properties", 193 | "Define properties via -D=. Get defined property via get_property() in module ptest.config.")) 194 | 195 | options, unknown_args = parser.parse_args(option_args) 196 | 197 | # only one of the main options can be specified 198 | main_options = [options.test_targets, options.run_failed, options.merge_junit_xmls] 199 | specified_options_count = len([option for option in main_options if option is not None]) 200 | if specified_options_count == 0: 201 | parser.error("You must specify one of the following options: -t(--targets), -R(--run-failed), -m(--merge-junit-xmls).") 202 | elif specified_options_count > 1: 203 | parser.error("You can ONLY specify one of the following options: -t(--targets), -R(--run-failed), -m(--merge-junit-xmls).") 204 | 205 | # check '--to' 206 | if options.merge_junit_xmls is not None and options.to is None: 207 | parser.error("You must use --to to specify the path of merged junit result xml (--merge-junit-xmls).") 208 | 209 | # spilt multiple values by comma 210 | def split(option_value): 211 | return None if option_value is None else option_value.split(",") 212 | 213 | options.python_paths = split(options.python_paths) 214 | options.test_targets = split(options.test_targets) 215 | options.include_tags = split(options.include_tags) 216 | options.exclude_tags = split(options.exclude_tags) 217 | options.include_groups = split(options.include_groups) 218 | options.test_listeners = split(options.test_listeners) 219 | options.merge_junit_xmls = split(options.merge_junit_xmls) 220 | 221 | # convert to full path for options 222 | def join_path(base_path, sub_path): 223 | return os.path.abspath(os.path.join(base_path, sub_path)) 224 | 225 | options.workspace = join_path(os.getcwd(), options.workspace) 226 | options.python_paths = None if options.python_paths is None else [join_path(options.workspace, path) for path in options.python_paths] 227 | options.property_file = None if options.property_file is None else join_path(options.workspace, options.property_file) 228 | 229 | options.run_failed = None if options.run_failed is None else join_path(options.workspace, options.run_failed) 230 | options.output_dir = join_path(options.workspace, options.output_dir) 231 | options.report_dir = join_path(options.output_dir, options.report_dir) 232 | options.junit_xml = join_path(options.output_dir, options.junit_xml) 233 | options.temp = join_path(options.workspace, options.temp) 234 | 235 | options.merge_junit_xmls = None if options.merge_junit_xmls is None else [join_path(options.workspace, path) for path in 236 | options.merge_junit_xmls] 237 | options.to = None if options.to is None else join_path(options.workspace, options.to) 238 | 239 | _options.update(options.__dict__) 240 | -------------------------------------------------------------------------------- /ptest/decorator.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import os 3 | import re 4 | from typing import Union, List, Tuple, Type, Dict, Callable, Iterable, Any 5 | from urllib.parse import urljoin, unquote 6 | from urllib.request import pathname2url 7 | 8 | from .enumeration import PDecoratorType, TestClassRunMode 9 | 10 | 11 | def TestClass(enabled: bool = True, run_mode: Union[str, TestClassRunMode] = "singleline", run_group: str = None, description: str = "", **custom_args): 12 | """ 13 | The TestClass decorator, it is used to mark a class as TestClass. 14 | 15 | :param enabled: enable or disable this test class. 16 | :param run_mode: the run mode of all the test cases in this test class. If set to "parallel", all the test cases can be run by multiple threads. 17 | If set to "singleline", all the test cases will be only run by one thread. 18 | :param run_group: the run group of this test class. If run group is specified, all the test classes in the same run group will be run one by one. 19 | If not, this test class will be belong to it own run group. 20 | :param description: the description of this test class. 21 | :param custom_args: the custom arguments of this test class. 22 | """ 23 | 24 | def tracer(cls): 25 | cls.__pd_type__ = PDecoratorType.TestClass 26 | cls.__enabled__ = enabled 27 | if isinstance(run_mode, str) and (run_mode.lower() in [TestClassRunMode.SingleLine.value, TestClassRunMode.Parallel.value]): 28 | cls.__run_mode__ = TestClassRunMode(run_mode.lower()) 29 | elif isinstance(run_mode, TestClassRunMode): 30 | cls.__run_mode__ = run_mode 31 | else: 32 | raise ValueError("Run mode <%s> is not supported. Please use <%s> or <%s>." % ( 33 | run_mode, TestClassRunMode.Parallel.value, TestClassRunMode.SingleLine.value)) 34 | cls.__run_group__ = None if run_group is None else str(run_group) 35 | cls.__description__ = description 36 | cls.__custom_args__ = custom_args 37 | return cls 38 | 39 | return tracer 40 | 41 | 42 | def BeforeSuite(enabled: bool = True, description: str = "", timeout: int = 0, **custom_args): 43 | """ 44 | The BeforeSuite test fixture, it will be executed before test suite started. 45 | 46 | :param enabled: enable or disable this test fixture. 47 | :param description: the description of this test fixture. 48 | :param timeout: the timeout of this test fixture (in seconds). 49 | :param custom_args: the custom arguments of this test fixture. 50 | """ 51 | 52 | def handle_func(func): 53 | func.__pd_type__ = PDecoratorType.BeforeSuite 54 | func.__enabled__ = enabled 55 | func.__description__ = description 56 | func.__timeout__ = timeout 57 | func.__custom_args__ = custom_args 58 | func.__location__ = __get_location(func) 59 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 60 | return func 61 | 62 | return handle_func 63 | 64 | 65 | def BeforeClass(enabled: bool = True, description: str = "", timeout: int = 0, **custom_args): 66 | """ 67 | The BeforeClass test fixture, it will be executed before test class started. 68 | 69 | :param enabled: enable or disable this test fixture. 70 | :param description: the description of this test fixture. 71 | :param timeout: the timeout of this test fixture (in seconds). 72 | :param custom_args: the custom arguments of this test fixture. 73 | """ 74 | 75 | def handle_func(func): 76 | func.__pd_type__ = PDecoratorType.BeforeClass 77 | func.__enabled__ = enabled 78 | func.__description__ = description 79 | func.__timeout__ = timeout 80 | func.__custom_args__ = custom_args 81 | func.__location__ = __get_location(func) 82 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 83 | return func 84 | 85 | return handle_func 86 | 87 | 88 | def BeforeGroup(enabled: bool = True, group: str = "DEFAULT", description: str = "", timeout: int = 0, **custom_args): 89 | """ 90 | The BeforeGroup test fixture, it will be executed before test group started. 91 | 92 | :param enabled: enable or disable this test fixture. 93 | :param group: the group that this test fixture belongs to. 94 | :param description: the description of this test fixture. 95 | :param timeout: the timeout of this test fixture (in seconds). 96 | :param custom_args: the custom arguments of this test fixture. 97 | """ 98 | 99 | def handle_func(func): 100 | func.__pd_type__ = PDecoratorType.BeforeGroup 101 | func.__enabled__ = enabled 102 | func.__group__ = group 103 | func.__description__ = description 104 | func.__timeout__ = timeout 105 | func.__custom_args__ = custom_args 106 | func.__location__ = __get_location(func) 107 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 108 | return func 109 | 110 | return handle_func 111 | 112 | 113 | def BeforeMethod(enabled: bool = True, group: str = "DEFAULT", description: str = "", timeout: int = 0, **custom_args): 114 | """ 115 | The BeforeMethod test fixture, it will be executed before test started. 116 | 117 | :param enabled: enable or disable this test fixture. 118 | :param group: the group that this test fixture belongs to. 119 | :param description: the description of this test fixture. 120 | :param timeout: the timeout of this test fixture (in seconds). 121 | :param custom_args: the custom arguments of this test fixture. 122 | """ 123 | 124 | def handle_func(func): 125 | func.__pd_type__ = PDecoratorType.BeforeMethod 126 | func.__enabled__ = enabled 127 | func.__group__ = group 128 | func.__description__ = description 129 | func.__timeout__ = timeout 130 | func.__custom_args__ = custom_args 131 | func.__location__ = __get_location(func) 132 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 133 | return func 134 | 135 | return handle_func 136 | 137 | 138 | def Test(enabled: bool = True, 139 | tags: Union[str, List[str], Tuple[str, ...]] = [], 140 | expected_exceptions: Union[Type[Exception], List[Type[Exception]], Tuple[Type[Exception], ...], Dict[Type[Exception], str]] = None, 141 | data_provider: Iterable = None, 142 | data_name: Callable[[int, Any], str] = None, 143 | group: str = "DEFAULT", 144 | description: str = "", 145 | timeout: int = 0, 146 | **custom_args): 147 | """ 148 | The Test decorator, it is used to mark a test as Test. 149 | 150 | :param enabled: enable or disable this test. 151 | :param tags: the tags of this test. It can be string (separated by comma) or list or tuple. 152 | :param expected_exceptions: the expected exceptions of this test. 153 | If no exception or a different one is thrown, this test will be marked as failed. 154 | The possible values of this parameter are:: 155 | Exception Class: 156 | expected_exceptions=AttributeError 157 | Exception Class list or tuple: 158 | expected_exceptions=[AttributeError, IndexError] 159 | expected_exceptions=(AttributeError, IndexError) 160 | Exception Class and regular expression of expected message dict: 161 | expected_exceptions={AttributeError: '.*object has no attribute.*'} 162 | Note: If you want to match the entire exception message, just include anchors in the regex pattern. 163 | :param data_provider: the data provider for this test, the data provider must be iterable. 164 | Following test will be run four times with the test data supplied from data provider. 165 | @Test(data_provider=[(1, 1, 2), (2, 3, 5), (4, 5, 9), (9, 9, 18)]) 166 | def test_add(self, number1, number2, sum_): 167 | assert_that(number1 + number2).is_equal_to(sum_) 168 | :param data_name: the data name function of this test. 169 | Note: If no data_provider specified, data_name will be ignored. 170 | For example: 171 | @Test(data_provider=["foo", "bar"], data_name=lambda index, params: params[0]) 172 | def test_something(self, name): 173 | assert_that(name).is_not_none() 174 | The test names are test_something#foo and test_something#bar. 175 | :param group: the group that this test belongs to. 176 | :param description: the description of this test. 177 | :param timeout: the timeout of this test (in seconds). 178 | :param custom_args: the custom arguments of this test. 179 | """ 180 | 181 | def handle_func(func): 182 | func.__pd_type__ = PDecoratorType.Test 183 | func.__enabled__ = enabled 184 | func.__group__ = group 185 | func.__description__ = description 186 | # deal with tags 187 | if not tags: 188 | func.__tags__ = [] 189 | else: 190 | if isinstance(tags, str): 191 | tag_list = tags.split(",") 192 | elif isinstance(tags, (list, tuple)): 193 | tag_list = tags 194 | else: 195 | raise ValueError( 196 | "Tags type %s is not supported. Please use string (separated by comma) or list or tuple." % type(tags)) 197 | func.__tags__ = sorted([str(tag).strip() for tag in tag_list if str(tag).strip()]) 198 | # deal with expected exceptions 199 | if not expected_exceptions: 200 | func.__expected_exceptions__ = None 201 | else: 202 | exceptions = {} 203 | if inspect.isclass(expected_exceptions): 204 | if issubclass(expected_exceptions, Exception): 205 | exceptions[expected_exceptions] = None 206 | else: 207 | raise ValueError("Expected exception should be a sub class of Exception.") 208 | elif isinstance(expected_exceptions, (tuple, list)): 209 | for exception in expected_exceptions: 210 | if issubclass(exception, Exception): 211 | exceptions[exception] = None 212 | else: 213 | raise ValueError("Expected exception should be a sub class of Exception.") 214 | elif isinstance(expected_exceptions, dict): 215 | for exception, message in expected_exceptions.items(): 216 | if issubclass(exception, Exception): 217 | exceptions[exception] = re.compile(message) 218 | else: 219 | raise ValueError("Expected exception should be a sub class of Exception.") 220 | else: 221 | raise ValueError("Expected exceptions type %s is not supported. Please use class or list or tuple or dict." 222 | % type(expected_exceptions)) 223 | func.__expected_exceptions__ = exceptions 224 | 225 | func.__timeout__ = timeout 226 | func.__custom_args__ = custom_args 227 | func.__location__ = __get_location(func) 228 | func.__parameters_count__ = len(inspect.signature(func).parameters) 229 | # for data provider 230 | # normal zipped unzipped mocked 231 | # __parameters__ None None None not None 232 | # __data_index__ None None None not None 233 | # __data_provider__ None not None not None not None 234 | # __funcs__ [func] [] [mocks] [mock] 235 | func.__parameters__ = None 236 | func.__data_index__ = None 237 | func.__data_provider__ = None 238 | func.__funcs__ = [func] 239 | if data_provider is not None: 240 | func.__data_provider__ = data_provider 241 | func.__funcs__ = [] 242 | func.__data_name__ = data_name or (lambda index, params: index + 1) 243 | if len(inspect.signature(func.__data_name__).parameters) != 2: 244 | raise TypeError("Data name function must be declared with 2 parameters.") 245 | return func 246 | 247 | return handle_func 248 | 249 | 250 | def AfterMethod(enabled: bool = True, always_run: bool = True, group: str = "DEFAULT", description: str = "", timeout: int = 0, **custom_args): 251 | """ 252 | The AfterMethod test fixture, it will be executed after test finished. 253 | 254 | :param enabled: enable or disable this test fixture. 255 | :param always_run: if set to true, this test fixture will be run even if the @BeforeMethod is failed. Otherwise, this test fixture will be skipped. 256 | :param group: the group that this test fixture belongs to. 257 | :param description: the description of this test fixture. 258 | :param timeout: the timeout of this test fixture (in seconds). 259 | :param custom_args: the custom arguments of this test fixture. 260 | """ 261 | 262 | def handle_func(func): 263 | func.__pd_type__ = PDecoratorType.AfterMethod 264 | func.__enabled__ = enabled 265 | func.__always_run__ = always_run 266 | func.__group__ = group 267 | func.__description__ = description 268 | func.__timeout__ = timeout 269 | func.__custom_args__ = custom_args 270 | func.__location__ = __get_location(func) 271 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 272 | return func 273 | 274 | return handle_func 275 | 276 | 277 | def AfterGroup(enabled: bool = True, always_run: bool = True, group: str = "DEFAULT", description: str = "", timeout: int = 0, **custom_args): 278 | """ 279 | The AfterGroup test fixture, it will be executed after test group finished. 280 | 281 | :param enabled: enable or disable this test fixture. 282 | :param always_run: if set to true, this test fixture will be run even if the @BeforeGroup is failed. Otherwise, this test fixture will be skipped. 283 | :param group: the group that this test fixture belongs to. 284 | :param description: the description of this test fixture. 285 | :param timeout: the timeout of this test fixture (in seconds). 286 | :param custom_args: the custom arguments of this test fixture. 287 | """ 288 | 289 | def handle_func(func): 290 | func.__pd_type__ = PDecoratorType.AfterGroup 291 | func.__enabled__ = enabled 292 | func.__always_run__ = always_run 293 | func.__group__ = group 294 | func.__description__ = description 295 | func.__timeout__ = timeout 296 | func.__custom_args__ = custom_args 297 | func.__location__ = __get_location(func) 298 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 299 | return func 300 | 301 | return handle_func 302 | 303 | 304 | def AfterClass(enabled: bool = True, always_run: bool = True, description: str = "", timeout: int = 0, **custom_args): 305 | """ 306 | The AfterClass test fixture, it will be executed after test class finished. 307 | 308 | :param enabled: enable or disable this test fixture. 309 | :param always_run: if set to true, this test fixture will be run even if the @BeforeClass is failed. Otherwise, this test fixture will be skipped. 310 | :param description: the description of this test fixture. 311 | :param timeout: the timeout of this test fixture (in seconds). 312 | :param custom_args: the custom arguments of this test fixture. 313 | """ 314 | 315 | def handle_func(func): 316 | func.__pd_type__ = PDecoratorType.AfterClass 317 | func.__enabled__ = enabled 318 | func.__always_run__ = always_run 319 | func.__description__ = description 320 | func.__timeout__ = timeout 321 | func.__custom_args__ = custom_args 322 | func.__location__ = __get_location(func) 323 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 324 | return func 325 | 326 | return handle_func 327 | 328 | 329 | def AfterSuite(enabled: bool = True, always_run: bool = True, description: str = "", timeout: int = 0, **custom_args): 330 | """ 331 | The AfterSuite test fixture, it will be executed after test suite finished. 332 | 333 | :param enabled: enable or disable this test fixture. 334 | :param always_run: if set to true, this test fixture will be run even if the @BeforeSuite is failed. Otherwise, this test fixture will be skipped. 335 | :param description: the description of this test fixture. 336 | :param timeout: the timeout of this test fixture (in seconds). 337 | :param custom_args: the custom arguments of this test fixture. 338 | """ 339 | 340 | def handle_func(func): 341 | func.__pd_type__ = PDecoratorType.AfterSuite 342 | func.__enabled__ = enabled 343 | func.__always_run__ = always_run 344 | func.__description__ = description 345 | func.__timeout__ = timeout 346 | func.__custom_args__ = custom_args 347 | func.__location__ = __get_location(func) 348 | func.__parameters_count__ = __get_parameters_count_of_test_configuration(func) 349 | return func 350 | 351 | return handle_func 352 | 353 | 354 | def __get_location(func): 355 | file_path = os.path.abspath(inspect.getfile(func)) 356 | _, line_no = inspect.getsourcelines(func) 357 | return urljoin("file:", "%s:%s" % (unquote(pathname2url(file_path)), line_no)) 358 | 359 | 360 | def __get_parameters_count_of_test_configuration(func): 361 | parameters_count = len(inspect.signature(func).parameters) 362 | if parameters_count not in [1, 2]: 363 | raise TypeError("%s() cannot be declared with %s parameters. " 364 | "Please declare with 1 or 2 parameters (including self)." % (func.__name__, parameters_count)) 365 | return parameters_count 366 | -------------------------------------------------------------------------------- /ptest/enumeration.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class TestClassRunMode(Enum): 5 | SingleLine = "singleline" 6 | Parallel = "parallel" 7 | 8 | 9 | class PDecoratorType(Enum): 10 | BeforeSuite = "BeforeSuite" 11 | AfterSuite = "AfterSuite" 12 | TestClass = "TestClass" 13 | BeforeClass = "BeforeClass" 14 | AfterClass = "AfterClass" 15 | BeforeGroup = "BeforeGroup" 16 | AfterGroup = "AfterGroup" 17 | Test = "Test" 18 | BeforeMethod = "BeforeMethod" 19 | AfterMethod = "AfterMethod" 20 | 21 | 22 | class TestFixtureStatus(Enum): 23 | NOT_RUN = "not_run" 24 | RUNNING = "running" 25 | PASSED = "passed" 26 | SKIPPED = "skipped" 27 | FAILED = "failed" 28 | 29 | 30 | class TestCaseStatus(Enum): 31 | NOT_RUN = "not_run" 32 | RUNNING = "running" 33 | PASSED = "passed" 34 | SKIPPED = "skipped" 35 | FAILED = "failed" 36 | -------------------------------------------------------------------------------- /ptest/exception.py: -------------------------------------------------------------------------------- 1 | class PTestException(Exception): 2 | pass 3 | 4 | 5 | class ScreenshotError(PTestException): 6 | pass 7 | -------------------------------------------------------------------------------- /ptest/htmltemplate/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ptest html report 5 | 6 | 7 | 8 | 9 | 10 | 11 |
12 | Generated by ptest {version} at {current_time} 13 |
{system_info} 14 |
15 |

Test Results

16 | 17 |
18 | 36 |
37 |
38 | 39 |
40 |
41 |
42 |
43 | 44 | 48 | 49 | -------------------------------------------------------------------------------- /ptest/htmltemplate/report.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: Menlo, Monaco, Consolas, "Courier New", monospace; 3 | font-size: 13px; 4 | } 5 | 6 | #meta { 7 | margin-top: 5px; 8 | text-align: right; 9 | float: right; 10 | } 11 | 12 | #system-info { 13 | color: #666666; 14 | } 15 | 16 | #splitter { 17 | position: absolute; 18 | cursor: col-resize; 19 | top: 0; 20 | right: -10px; 21 | height: 100%; 22 | width: 10px; 23 | } 24 | 25 | .all { 26 | background-color: #43a3d0 !important; 27 | } 28 | 29 | .passed { 30 | background-color: #5FB65F !important; 31 | } 32 | 33 | .failed { 34 | background-color: #FF6E6E !important; 35 | } 36 | 37 | .skipped { 38 | background-color: #FFB626 !important; 39 | } 40 | 41 | .panel { 42 | background-color: #fff; 43 | border: 1px solid #ddd; 44 | border-radius: 4px; 45 | box-shadow: 0 1px 1px rgba(0, 0, 0, .05); 46 | } 47 | 48 | .panel-heading { 49 | color: #333; 50 | background-color: #f5f5f5; 51 | padding: 10px 15px; 52 | border-bottom: 1px solid #ddd; 53 | border-top-left-radius: 3px; 54 | border-top-right-radius: 3px; 55 | } 56 | 57 | .panel-body { 58 | overflow-x: auto; 59 | padding: 15px; 60 | } 61 | 62 | .navigation { 63 | width: 350px; 64 | position: fixed; 65 | top: 76px; 66 | bottom: 2px; 67 | } 68 | 69 | .navigation .panel-heading { 70 | overflow: hidden; 71 | display: flex; 72 | } 73 | 74 | .filter-btn { 75 | display: inline-block; 76 | margin: 0 5px; 77 | padding: 5px 6px; 78 | color: #fff; 79 | font-weight: bold; 80 | text-align: center; 81 | vertical-align: middle; 82 | cursor: pointer; 83 | background-image: none; 84 | border: 1px solid transparent; 85 | border-radius: 4px; 86 | opacity: 0.4; 87 | } 88 | 89 | .filter-btn:hover { 90 | opacity: 0.6; 91 | } 92 | 93 | .filter-btn.selected { 94 | opacity: 1; 95 | border: 1px solid #43a3d0; 96 | box-shadow: 0 1px 1px #43a3d0; 97 | } 98 | 99 | .filter-btn .badge { 100 | margin-left: 4px; 101 | padding: 2px 6px; 102 | border-radius: 10px; 103 | background-color: #fff; 104 | } 105 | 106 | .filter-btn.all .badge { 107 | color: #43a3d0; 108 | } 109 | 110 | .filter-btn.passed .badge { 111 | color: #5FB65F; 112 | } 113 | 114 | .filter-btn.failed .badge { 115 | color: #FF6E6E; 116 | } 117 | 118 | .filter-btn.skipped .badge { 119 | color: #FFB626; 120 | } 121 | 122 | .navigation .panel-body{ 123 | overflow-y: auto; 124 | height: calc(100% - 78px); 125 | } 126 | 127 | .navigation .panel-body:hover .toolbar { 128 | visibility: visible; 129 | } 130 | 131 | .navigation .toolbar { 132 | top: 50px; 133 | right: 18px; 134 | padding: 5px; 135 | visibility: hidden; 136 | position: absolute; 137 | z-index: 1; 138 | background-color: #fff; 139 | } 140 | 141 | .navigation .toolbar .expand-all, 142 | .navigation .toolbar .collapse-all { 143 | cursor: pointer; 144 | color: #43A3D0; 145 | text-decoration: underline; 146 | } 147 | 148 | .tree { 149 | margin-top: -6px; 150 | } 151 | 152 | .tree > ul { 153 | padding: 0; 154 | } 155 | 156 | .tree > ul ul { 157 | margin-left: -18px; 158 | } 159 | 160 | .tree .item { 161 | position: relative; 162 | cursor: pointer; 163 | border: 1px solid rgb(204, 204, 204); 164 | border-radius: 5px; 165 | width: 220px; 166 | height: 26px; 167 | box-shadow: 0 1px 1px rgba(0, 0, 0, .05); 168 | } 169 | 170 | .tree .item:after { 171 | content: ""; 172 | display: block; 173 | pointer-events: none; 174 | width: 100%; 175 | height: 100%; 176 | margin-left: 26px; 177 | } 178 | 179 | .tree .item:hover { 180 | border: 1px solid #BAE0F3; 181 | box-shadow: 0 1px 1px #BAE0F3; 182 | } 183 | 184 | .tree .item.selected { 185 | border: 1px solid #43a3d0; 186 | box-shadow: 0 1px 1px #43a3d0; 187 | } 188 | 189 | .tree .item .sign { 190 | height: 18px; 191 | float: left; 192 | padding: 4px 10px; 193 | border-top-left-radius: 4px; 194 | border-bottom-left-radius: 4px; 195 | width: 7px; 196 | } 197 | 198 | .tree .item .name { 199 | float: left; 200 | margin-top: 5px; 201 | padding-left: 4px; 202 | overflow: hidden; 203 | text-overflow: ellipsis; 204 | white-space: nowrap; 205 | width: 186px; 206 | } 207 | 208 | .tree .item .type { 209 | position: absolute; 210 | left: 23px; 211 | top: -8px; 212 | font-size: 12px; 213 | padding: 0 3px; 214 | color: #aaa; 215 | background-color: #fff; 216 | } 217 | 218 | .tree .item .number { 219 | display: none; 220 | position: absolute; 221 | right: 10px; 222 | top: -8px; 223 | font-size: 12px; 224 | padding: 0 3px; 225 | color: #43a3d0; 226 | background-color: #fff; 227 | } 228 | 229 | .tree .item:hover .number { 230 | display: block; 231 | } 232 | 233 | .tree .item .rate-container { 234 | bottom: 0; 235 | left: 26px; 236 | width: 194px; 237 | height: 3px; 238 | position: absolute; 239 | } 240 | 241 | .tree .item .rate { 242 | float: left; 243 | height: 100%; 244 | } 245 | 246 | .tree li { 247 | list-style-type: none; 248 | margin: 0; 249 | padding: 10px 5px 0 5px; 250 | position: relative 251 | } 252 | 253 | .tree li::before, .tree li::after { 254 | content: ''; 255 | left: -8px; 256 | position: absolute; 257 | right: auto 258 | } 259 | 260 | .tree li::before { 261 | border-left: 1px solid #999; 262 | bottom: 50px; 263 | height: 100%; 264 | top: 0; 265 | width: 1px 266 | } 267 | 268 | .tree li::after { 269 | border-top: 1px solid #999; 270 | height: 20px; 271 | top: 23px; 272 | width: 13px 273 | } 274 | 275 | .tree > ul > li::before, .tree > ul > li::after { 276 | border: 0; 277 | } 278 | 279 | .tree li:last-child::before { 280 | height: 23px; 281 | } 282 | 283 | .detail { 284 | width: calc(100% - 360px); 285 | float: left; 286 | position: relative; 287 | margin-top: 12px; 288 | margin-left: 360px; 289 | bottom: 2px; 290 | } 291 | 292 | .detail>.panel-heading { 293 | box-sizing: border-box; 294 | position: absolute; 295 | width: 100%; 296 | min-height: 48px; 297 | z-index: 1; 298 | } 299 | 300 | .detail>.panel-heading.fixed { 301 | position: fixed; 302 | top: 0; 303 | width: calc(100% - 376px); 304 | } 305 | 306 | .detail>.panel-heading .text { 307 | display: inline-block; 308 | font-size: 16px; 309 | font-weight: bold; 310 | word-wrap: break-word; 311 | word-break: break-all; 312 | } 313 | 314 | .detail>.panel-heading .badge { 315 | display: inline-block; 316 | margin-left: 12px; 317 | padding-right: 4px; 318 | padding-left: 4px; 319 | color: #fff; 320 | font-weight: bold; 321 | border: 4px solid transparent; 322 | border-radius: 12px; 323 | } 324 | 325 | .detail>.panel-body { 326 | margin-top: 48px; 327 | box-sizing: border-box; 328 | } 329 | 330 | .detail td { 331 | padding-top: 2px; 332 | padding-bottom: 2px; 333 | } 334 | 335 | .detail td:first-child { 336 | padding-right: 20px; 337 | } 338 | 339 | .overview { 340 | margin-bottom: 15px; 341 | } 342 | 343 | .overview .tag { 344 | margin-right: 10px; 345 | padding-right: 3px; 346 | padding-left: 3px; 347 | color: #fff; 348 | background-color: #5bc0de; 349 | border: 2px solid transparent; 350 | border-radius: 5px; 351 | } 352 | 353 | .overview .group { 354 | margin-right: 10px; 355 | padding-right: 3px; 356 | padding-left: 3px; 357 | color: #fff; 358 | background-color: #777; 359 | border: 2px solid transparent; 360 | border-radius: 5px; 361 | } 362 | 363 | .test-fixture { 364 | margin-bottom: 20px; 365 | } 366 | 367 | .test-fixture .panel-heading { 368 | color: #fff; 369 | font-weight: bold; 370 | padding: 5px 17px; 371 | opacity: 0.9; 372 | } 373 | 374 | .test-fixture .panel-heading:hover { 375 | opacity: 1; 376 | } 377 | 378 | .test-fixture .logs p { 379 | margin: 2px 0; 380 | } 381 | 382 | .test-fixture .logs .log-level { 383 | font-weight: bold; 384 | text-transform: uppercase; 385 | color: #777; 386 | } 387 | 388 | .test-fixture .logs .debug { 389 | color: #749A06; 390 | } 391 | 392 | .test-fixture .logs .info { 393 | color: #00A3E8; 394 | } 395 | 396 | .test-fixture .logs .warning { 397 | color: #ECBA04; 398 | } 399 | 400 | .test-fixture .logs .error { 401 | color: #FF0000 402 | } 403 | 404 | .test-fixture .logs .critical { 405 | color: #CE2400; 406 | } 407 | 408 | .test-fixture .images { 409 | border-left: 1px #979797 dashed; 410 | margin: 8px 0 8px 20px; 411 | display: flex; 412 | } 413 | 414 | .test-fixture .image:first-child { 415 | margin-left: 20px; 416 | } 417 | 418 | .test-fixture .image { 419 | margin-right: 10px; 420 | } 421 | 422 | .test-fixture .image .link { 423 | display: inline-block; 424 | padding: 1px; 425 | line-height: 0; 426 | border-radius: 4px; 427 | transition: background-color 0.5s ease-out; 428 | } 429 | 430 | .test-fixture .image .link:hover { 431 | background-color: #4ae; 432 | transition: none; 433 | } 434 | 435 | .test-fixture .image img{ 436 | height: 40px; 437 | border: 1px #c3e3fc solid; 438 | border-radius: 3px; 439 | } 440 | 441 | /* 442 | * dashboard 443 | */ 444 | .dashboard { 445 | margin-bottom: 15px; 446 | } 447 | 448 | .dashboard:after { 449 | content: ""; 450 | display: block; 451 | clear: both; 452 | } 453 | 454 | .dashboard .pieChart { 455 | float: left; 456 | width: 200px; 457 | height: 200px; 458 | margin: 10px; 459 | } 460 | 461 | .dashboard .legend-group { 462 | float: left; 463 | margin-top: 20px; 464 | margin-left: 40px; 465 | 466 | } 467 | 468 | .dashboard .legend { 469 | height: 18px; 470 | } 471 | 472 | .dashboard .legend .icon { 473 | float: left; 474 | width: 13px; 475 | height: 13px; 476 | } 477 | 478 | .dashboard .legend .text { 479 | float: left; 480 | margin-top: -1px; 481 | margin-left: 4px; 482 | } 483 | 484 | /* 485 | * Lightbox v2.8.1 486 | */ 487 | 488 | .lightboxOverlay { 489 | position: absolute; 490 | top: 0; 491 | left: 0; 492 | z-index: 9999; 493 | background-color: black; 494 | opacity: 0.8; 495 | display: none; 496 | } 497 | 498 | .lightbox { 499 | position: fixed; 500 | top: 50px; 501 | left: 0; 502 | width: 100%; 503 | z-index: 10000; 504 | text-align: center; 505 | line-height: 0; 506 | font-weight: normal; 507 | } 508 | 509 | .lightbox .lb-image { 510 | display: block; 511 | height: auto; 512 | max-width: inherit; 513 | border-radius: 3px; 514 | } 515 | 516 | .lightbox a img { 517 | border: none; 518 | } 519 | 520 | .lb-outerContainer { 521 | position: relative; 522 | background-color: white; 523 | width: 250px; 524 | height: 250px; 525 | margin: 0 auto; 526 | border-radius: 4px; 527 | } 528 | 529 | .lb-outerContainer:after { 530 | content: ""; 531 | display: table; 532 | clear: both; 533 | } 534 | 535 | .lb-container { 536 | height: 100%; 537 | width: 100%; 538 | padding: 4px; 539 | } 540 | 541 | .lb-loader { 542 | position: absolute; 543 | top: 43%; 544 | left: 0; 545 | height: 25%; 546 | width: 100%; 547 | text-align: center; 548 | line-height: 0; 549 | } 550 | 551 | .lb-cancel { 552 | display: block; 553 | width: 32px; 554 | height: 32px; 555 | margin: 0 auto; 556 | } 557 | 558 | .lb-nav { 559 | position: absolute; 560 | top: 0; 561 | left: 0; 562 | height: 100%; 563 | width: 100%; 564 | z-index: 10; 565 | } 566 | 567 | .lb-container > .nav { 568 | left: 0; 569 | } 570 | 571 | .lb-nav a { 572 | outline: none; 573 | height: 100%; 574 | cursor: pointer; 575 | display: block; 576 | opacity: 0; 577 | transition: opacity 0.6s; 578 | } 579 | 580 | .lb-nav a.lb-prev { 581 | width: 25%; 582 | left: 0; 583 | float: left; 584 | background: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAtCAYAAADsvzj/AAAFF0lEQVR4Ac2ZW0xcVRSGPTNnhlPKcCsUAeeChkEVxhutDQwzMANaqamNWgpaH+yDIaZp1cRHbgH0gTsxkmDCI/hiRAqgD5qYRgKQ8II6TE00wfgGAcIdKeM/ydrNZIezxxg9m518gRxWmn6s9a9zhvNQJBL5T/gfjokwA5Uw0zWFeHBOugiTsAArfSWZky+iABVowAZSwRkiDSTRz1iHlJMmogATsIDTIAPYgRs8SeTTtXSQSLVKFNkivIQKksDDJFCsquqLmqZdAa/i+yCuPQ1cJHOKjdpJEWGdsIFs8BQoy83NvTEzMzO3t7f318HBweHc3Nxdj8dznWQeIWmpIryENUaiCPgdDsfN+fn5XyLcWV5eDlmt1gBqHgOpbAHIFmESySAHeECF0+m8hd/+vcgxZ3d39wBj9grqCkA6iaiyRBRunJhEpcvl+nBhYeG3iM7Z2dnZgkg1ZSgNqLI6wgebSVTZ7faPlpaW/tSTWF9f36ivr+9AbQkF3iZRhAs2dSInJ+eDUCj0h0Biq7S09BPUBkEhyAKJssKusE6QRCGoQLDfn56eDulJrK6ubgeDwS7UXgTPAztIkXUfUbhxKgLlyMRtBPtXPYm1tbXdqqoqJnEOOGhbJQCTkSJ8sJlEMNoJrFhdicPDw6PKyspe1FaD85yE2YBnLUGwSSIrK+s2bnZLehIbGxubfr+/B7WXSMJJ42QlCcVAES7YJJGdnR0dp7BgnLZKSko6qBPngIvrBEkYIKIT7PLoOKET4TjB7kbty+A8SaRxmcAxQEQn2BUI9q3Z2dl7gk7sINhRiZeoE87jMmGECB/s3JhgR8dJV2Jzc3Pb5/N1UieKKdgsEyaAY5wIk2Dj5GHBRifCgmBHb3adLBNsO3HBNkxEAWZwCmSCx4EPwb4ZJ9jbCHYXSRQDpyDYhomoNFIOUIRMvINO/KQnsbKyshMIBD5D7RVwgQWblzBahD2Sp5jN5jzM+9uLi4s/60mEw+FNbKcvUH8DVIECcAZoXLCliaRaLBbX8PBwb0RwRkZGfkftx+BdUM4+KInDbdxoWUCKoih5CQkJgYGBgS/xs6PjRPb394+ampp+RP174CIoBGcpYypQZIqYY+4dz4DLvb29Y6LONDY2fou6OuAF+SCDZCgj8kQSQDqNihfU9vX1TYlkGhoa7qDuDVBKMpQVrjMG30fYCs6gAHuRmdqurq5JkUxLS8sEaq+CMq4zJGOgCB2Fk8kHJSaTqaazs3Pi2MzQaWtrm0RtDfDFyCQyGUNFOJlEkMlkwLWenp5vRDKtra1TNGYsM5mcjKEifGeYjBfUQUaYmebm5omYzLjFC8C4zyNqTGfcNDZ1/2ABjKHudZLXkTFARJAZN/CqqnqNMqN7Ojo6vqMF4ONkVFmvFUQLQNiZ7u7u76PZAn6S4TJjrIhoAdT+iwXAdQYYKCJaAG/iPhNvAYyj7jXwAngUpAGrDBF+ATCZAuBXFOX60NDQ3TiPM1/hyfoyPf7kgNNSXyvwmSGZMk3T3hocHPwhzlPzJLLFnpZT5PztV5wZNyilbTZFmTnZrxU4GZWXATV4ap4kmeNELlEticjsSHyZq/39/V/j374P2Lk/Pj5+BznxUuDlj1acJ4B8cAH/4er29vbPR0dH58fGxubx/ac2my1Ab3iz5Yc9/gJIB05QCJ4Fz9FXD3gC5HIfi+WKCGQ0GpuzwA7yCDtdS+b/SCFfRPwaQqPxSSaS6JrlwUjR+RtEvCM0ct4sLQAAAABJRU5ErkJggg==') left 50% no-repeat; 585 | } 586 | 587 | .lb-nav a.lb-prev:hover { 588 | opacity: 1; 589 | } 590 | 591 | .lb-nav a.lb-next { 592 | width: 25%; 593 | right: 0; 594 | float: right; 595 | background: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAtCAYAAADsvzj/AAAFDUlEQVR4Ac2ZS0xcVRjHvTN3hisw0GIRZ3AeLWHQWqdVsRqgA86AUmpqoy20Whd2YYhprJq45BVAF7yJkQQTluDGiEhBF5qYRsIjYYMKQxNNMO4gQHgjZfxP8pF8ufEe0qQ5pyf5BTKcWfzyff/vnHt5xLQ0wgbsQCfswEY80BWPxx8I5sUlHMBJP0nm4RfRWAUMkAqOgseII8AFDNqjPYwiGuEAySADeEEuOEkE6bNjIIX22riQchHWSo+SRACc1nU9ahjGG+ASfn8Vn+WT0BNUMV0so04kFTwJTodCoeuTk5N3dnd397a3t/8dHx+fzM7OvoG/nQPPADdwscqoF2HBPgJynE5nZGFhYTZuWlNTU3/4fL6b2FMMnmUyTpJRLqKTSAbIQyu9vrW1tRv/n4Uqzfv9/g+x7xUQAh6QxmVUV0SnKRWESMXm5uZ63GJNT0//GQgEPsHeUibD20xTLeKioBdUV1e3rKysrFrJzM3N/eP1ej/F3jImIxgAcsOeDLLAKRAtLCz8HDKWlZmdnf3b4/F8zCojGADyz5F04AUvgPJoNNq2tLS0YSUzNjY2iwHwEWXmFHCzymiqRGwgiaaXD7wIysvKytqWl5e3rGQwAO4iM7ewt4SmmYfLqLpr2U0yZ0FFaWlp597e3r6VDEbzXapMlGQEA0COiEYyTmozP8lcKC4u7lhdXV2zksGhOZeVlXWLy5gHgDwRJsMqE6A2qygoKGhBm60L2izmdruZjGkAyBShxTNzlGTOgvMYAO2iAYDKxKjNSgQDQI6IRWb8VJnXMADaUZlNK5mJiYl5DAC6AQgGgCwRWjaWGR/IB+fD4XDr2trahqDN5lEZ3mbZ5gEgW4QPAD6aK3BotmIArAsqE2MDIMTajGTkinAZ3mb5NAAS58zGIQPgJvaGwVMgk5597ECTLcJl+AB4GVyKRCJfLi4uijLzGzLzHrWYj1pMVyXCB4BBz/J5oAzcwDT7OhaLWZ4zMzMzvyNX79rt9uOUNyewqRSxsbzk0Jh9H3w2MDDwV1yw+vv7Ox0OR4C+q1REAzr1+ON0TpSDD+rq6n7d2dmxusbs9/T0fJOUlBTRNO2gIg6lGSGJYyAXFIFrtbW1P4oq0dnZOYR9F8EZdqaoCDtVgrJBEoXgck1Nzfciia6urlHsu0rSOSADJEkXYRK8EufAlYaGhtsiiba2thFk4kAij75Po1fiOcIkkplEGFQ2NTWNCBz2W1tbb9tstkrsLaDvcQlN5hWFS2SyTFxubGwcFUl0dHT8gH1VTCITJHMJWSLmYAcPMlFfXy9sJ0gkMnGNpEnCXAkJIhYSReAtBHvosGCTRBgEWSV0qc8jPNhMIgyutLS0/CSSSGRC1/Uqkg5aZUKGiDkTQVAMqtrb238+RGJUHGyZb1F4Je4/2FfFwZYr4qRb7QnwEngTwR4+5JxIZOJtcbDlv2lMAR5wBjfUi7h2fCuS6Ovru6Np2nVqvzwmQcFW9+43HeSg10twix0RSfT29v5iGMY7dMLniTOh+N8KghN7lKZTIQgKMiG/IkwkCJELFiL7uMWOYE+lWUL8elRNa51APoqGh4cTN9p7TOJed3f3d4nz5P4l1ITdDU66XK5Ic3PzF0NDQ1ODg4NT+P0rCFbQM3qu4MRWLsIfX7PB0yAEngPP089TwA8yBMFWKmJ+qZBGj7FecJzw0mfpwBBLqBexseAbIBWkESnAEPybQLnIf4JfIzSb+FymAAAAAElFTkSuQmCC') right 50% no-repeat; 596 | } 597 | 598 | .lb-nav a.lb-next:hover { 599 | opacity: 1; 600 | } 601 | 602 | 603 | .lb-nav a.lb-rotate { 604 | width: 50%; 605 | float: left; 606 | background: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAACXBIWXMAAAsTAAALEwEAmpwYAAAABGdBTUEAALGOfPtRkwAAACBjSFJNAAB6JQAAgIMAAPn/AACA6QAAdTAAAOpgAAA6mAAAF2+SX8VGAAAN5klEQVR42mL8//8/w3AAAAHExDBMAEAAscAYTo6O1DCPEYlmgtKMSOL/0fA/JHGKAEAAsVDJ8SDMDDUPhFmR2MxIMQ9y8F8g/gPFv5DY/6CYLE8BBBALDocxITmOAckR6JbBHM8GxBxAzAnEXFCaAyrOguSRf0ge+AnE36H4GxD/gIr/hnqWJA8BBBALFk/AQpQdimFqfqNZxAiV5wZiHiDmB2JeKIZ5hg3qWWYsHvkB9cAXIP6EhJE9RbSHAALwUS4pAMIwEBV0kQpq/PT+F2z8VKrFdmECWRQXDgzZDGQehHxBai0nxXo1FCBBF2XNCsTIntgoeQBAa+3CmgfEzhjTNqyKv+OTUr5CiIf3Jzm3EpGLMW4KIHNV7wp4F2f3C/QKIBYssQEKTWEgFmdmZhYRExeXZGJkZHz69OlbqEe+QD3FCg19QaC0sCwQaGhqKmpqasoBgaCEhAQPPz8/OxsbGyvQHHBS/QsEv379+vP58+efT548+XLnzp0Pt27denzj+vX7r169egSUfgNUB8KvoPR7qJ0/CcUOQAAxwuoRYKnFBA1hMSCW5eXlVXF1c3O2tLQ0/g0MyQMHDlzas3v3pX///iF7hFtRUVHOw8PDSF9fX0FOXl4QGCOspKRtkNm3b9/+cO7cuUe7d+069/jx49tA4ddA/ByIX0A99BGa3P7g8gxAALGg1Sls0OQkoqmlZRgXF+fDx8cHSmYMSkpK0uzs7CJbNm++AvT8X0FBQS5PLy9tVxcXbVlgDDAxMTGSU9qwsrKyaGlpiQBjUhgYaIrbtm27un/fvpMfPnzghqYOdmgy/ghN1r+xeQYggNDzCBs0k/ICk4YMzBMgICYmxgf0mCUw+rkeP3r0MTomRtvQ0FAa6BCqVKrA5MmorKwsmJmZaWViYiK/aNGiAzdv3LiEVGDA7MHqGYAAQs8jzLBi9P+/f2zolgkLC3MlJiYaAu1kAMYIB4gmBsCSLzHqgeUCEzBmgJEsFzB1yhSB06dPHwcGHjPUfchVAEqeAQggbB4B1wvAvMCMzSKgZziIcPR/EA1M/6DM/RdEQx3JDMz8YAz1FCMuz0lLS/OWlZe7zp07l2PH9u0HQAUFUmX6F5pn/sLUAwQQC5YamgmMiQ1uNAAtkT48fPjw7YsXLz4C0/rXnz9//gZ5jA2YDoHJlQuYbPkVFBWFgAWdABBw4jILKMeRlJRk+wuof/fu3b+hSeonUl0GL5YBAogFR3ODJAAMrH/AOuHbpUuXXly8cOHx7Tt3Hr95/fo1MDa+/vnz5weocIAGPzOwUGADFhpcQkJCIsASTxpY2smZmprKAot5Hmxmg5JwQmKi3fv37z+cOXMGVol+h3oI1rxhAAggittaQE/8v379+rsN69ffvHr16tNPnz69/P79+3NosfkJaulvpAqX7du3b1xAh/E8e/bs3pUrV8QvXryo6uDoqGVmZiYDyiPodkhKSvIACxeHd0Bw7949WF32Fbn2BwggXB5hZCIyZQFDGEx//fbt98uXLz9Ci8l30HrgNdQzv6BJgBla/4BLRqCHBYD4JdB9b+7evfsCqN/M1dVVhYeHB6OgARbPEkA501mzZj0DxvAHaO0P88wfgADClbSITmKg3ApMIvyRkZEawMrwx5HDh19DM+ZXpCbHd2h6ZoLFCtQzoPbZO2AS/HL//v2vq1et+gnKSz4+PmrAAgHFbUA+k42trdaJkyevAZMvyMy3UM+AzP4LEEC4MjtJeYWbm5tVT09PAtgaMBERFv69b9++t8Ckw4rURvuJ1NCEJTFYEvkMTfe/gIXD/+3bt3MAmzYcTk5OSujlDajZ42Bvr3fp4sXbQA8LQptIoBj/CRBATLgyOjAzspOaX4AxIxIYFGRpbWOjAUxyXEiVGQNax+oP1HNfoCH7FIgfAvHje3fv3tyzZ8/l27duvUM3H9hmYzQ0MlJWV1eXhTZSeaE1PxNAAKFnLHjHB+goAXIyv5SUlIiKioo00CPInSpssfsfqT4A5auXIA8BQ/rFrZs37x48ePAOrP5BBsAWBq+llZU61BM80CTKDBBA6B75B0sKomJiHKR6AlQLfvz48ROw6H0LrFD/IJX1OLVA5X9Bkwgodl4DzXh+7fr1R8+ePv0ENOc/WtuMCdjuk4J6gBvWFgMIIBYsngCl10/A0PgGTOffgemfA1SpAfsNf3A1M4AlHNhBX75+/QosSi8dPXr0LNABH5FqX0Ke+QtNauB+CVDvhzdv3ry+cfPmW3EJCVAfhxmplGQEFsfC0KYUOyz5AgQQukdg0fx6186dO4AVlxSwkyR55+7dN8CSBZGRmJj+I3nnPxMz8z9gdPwEhuQbYJP8DrB2fwwNXZytVSye+QO1H1wIAMPkE7CF8B4YoHLIHoG283iQutigQoUJIIBY0Az7CfUIC7A5vRva++NGyrTIIyAMWLqvX6D63yJ1iv4QmTL/IRUCP3//+vX908eP34HNm7/A0hBFIbSeYUbOgwABhOyRv0iO+Qc18B1Sv50Jh0dgofkbqR/+Fa0Z8Z8Ez8Bbt3+ATZ9/QIylhcyMNvTECBBA6BXiH6Tk8A0pJhiRmtH/cWRYWOv0N1ILldTRECakugzYQWZiZIJ0k1Ed+ecPsrlgNwEEEAuWTPcPKb2iV4z/CaTz/2gWkDrqCav1QQ1lDmCS4gLmU4zuxJcvX34hBRS4BQwQQCx4HMRARPOFWqOFyJ06cBcXCHilpKX50BuRoCYMqBRFypegFPAPIIBYSLCEGWkEkQnLqOEfAsUsodiADUMJAEsmAWDLQlhVVVUY1AlDr6tePH/+FlYowFrAAAHEQoQnWJBGEWE1KQvUI+iZ+zdyr40MT4DqBxFgqSSmrKIiKy8vL4A+qAHMH/+ATflnUHu/wdpxAAHEQsATsCY3H9QS0EAcLyjegX2KX1CDkIvbLySMEDIieYIXOgwFqrHFgU0cZQcHBxVg/sBwH7Bh+eXosWM3oXZ9gVW6AAGEzyPMSJaIg7rRwAwoYWxsrK2nr68CzHBfjx8/fvU+MHigajignanPSMUuthFCRqSMjewJUENQRlpGBtTJ0tXR0RHFKJuBzRVgy+H+zRs3HkOb8DC7/gEEEAuBfMEObWVKAJsqipaWluaeXl7mampqMsCa/pewkJDcmjVrjjx//vw+NJNyI3nmO44BaWakQW+QJ0RAnUCQJ4SFhVU8PT1NHR0dFZmxFLuvXr36un/fvgvAbAJLATCP/AcIIBYCaRdkGR+wxSlrYWlp4+fra6OkrCwO64O4uLrq//v/nw3YzT0J9Aw3MMT4ocnvPZpn/iGZyYqU30AtBxFg5pYA2qHg5eVlBvSIBjCPsGMbFzhy5MiNS5cu3YYG1jto3gQ3gQACiIWYjK5vYKAZGBhoDcx84sgKgOU8u6+vry6w7cO7d+9eUB/iLrC99RLYPnqP1GH6iVQAwGID1FfhBRatgnx8fOLA1qySo5OTDjBfKHJycmIdcr1+/frrnTt3ngQG1kvo2DC8dwiSBwggogYf3r979wGUlEBlOHrrF1Q8AntzykBPCp09e1b+3LlzDx7cv/8U2Oj7AAzFb0CM4hEgYAdiLmCMCsjIyEgCO0oKZqamCiqqqkK4Brlev379bemSJQfv3rlzG2k8+DPSWAADQACxEGj3gNtPZ86cuQ9sTp9JS0sTB8UCNsWg4U4FBQUBW1tbVWD/GzSY8Pr5s2cfgDH0DRhDv0G2sTAzswgICHADm+ECwEgQBSZTYWCS4sY2coI0TvZr8eLFR0+ePHkOyH0G9ch75NgAAYAAIuQRWHf03e5du04CQ1EoJibGCtsoB6wrCupXAzE3sHSTBRbRf4D9mN+gqQTQ/AgbsLkBbJKzAUtvFtBgAr6RRvDIzNevv5YsXnxi544dh4HcJ9Au8WukqQ14IQIQQCwEOjs/oGnxFdAx3OvXrdsLbB78SklJsREREeHCMaoCpkBtJBAGJkd2ZAsZiRzBBHasvs2ePfvogf37jwAj9AFQ6BE0Nj7Cilxk9QABREyMfIJNbAINZAR2uH5/eP/+c3xCgp2GhoYIIYfBPEZKd/nGjRtvFixYcOj0qVOnoTHxCJqs3iHNYqHUTwABxEJEr+071AB4n+XUqVM/Hz58+AJYp1i4uLiog0YCyRwqRmkMvnz58svu3btvbdu69TiQfQealJ5CY+IdUnGO0WoACCDkGStimiqgekIUWoFJAPOEBLA5oeHu4WFkoK8vLScvz8MEG3okticFBKBpuPPnzz8Fxvb5W7duXQeWdC+gjoeNVn7E5wkQAAggYorf/0gGwHqRIEM/Ay38ePPmzXePHj26C2xaKGhraysBk5sUsPTil5KS4gLWCWzQ3hxKp+j79++/gCH+HViyfQQ2N55eu3bt/uPHj++Dhk+hDn8Fpd+jNXlwtt8AAoiYGEFvtsDHbpFmdEENSlBLlR9YPIuJAoGwiIigAD8/H7CE4mBlY2MBBjwDsPj6A3QsZFb37dt3oBH7T58+vYKOuHyAJp+3WFoGBBuhAAFEymj8f6QuLGyQ4CvUAaAKih/oID5gvfEYiLnu3LkDXjAAzDusQMwEzQfAFs3/3zjm2T9Cadi09C9S+vsAAUTOtAKyh5Cb8m+gjUZO5JUPQIezADHyyoe/aB75jkT/QIsBonudAAFE7vwI8qKYP2geYkVausGKNHiBvhYFeSUF8mAFWd1mgACixqIa9BhiRBsNYUDzCANSP+UfBYMVKAAggBiHy8IzgAADANYUKJxiYuoFAAAAAElFTkSuQmCC') 50% 50% no-repeat; 607 | } 608 | 609 | .lb-nav a.lb-rotate:hover { 610 | opacity:1 611 | } 612 | 613 | .lb-dataContainer { 614 | margin: 0 auto; 615 | padding-top: 5px; 616 | width: 100%; 617 | } 618 | 619 | .lb-data { 620 | padding: 0 4px; 621 | color: #ccc; 622 | } 623 | 624 | .lb-data .lb-details { 625 | text-align: left; 626 | line-height: 1.1em; 627 | } 628 | 629 | .lb-data .lb-caption { 630 | font-size: 13px; 631 | font-weight: bold; 632 | word-wrap: break-word; 633 | word-break: break-all; 634 | } 635 | 636 | .lb-data .lb-number { 637 | display: block; 638 | color: #999999; 639 | } 640 | -------------------------------------------------------------------------------- /ptest/main.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import shlex 4 | import traceback 5 | from xml.dom import minidom 6 | 7 | from .util import make_dirs, remove_tree 8 | 9 | 10 | def get_rerun_targets(xml_file: str): 11 | test_targets = [] 12 | doc = minidom.parse(xml_file) 13 | if doc.documentElement.nodeName == "testsuites": 14 | root = doc.documentElement 15 | else: 16 | root = doc 17 | for test_suite_node in root.getElementsByTagName("testsuite"): 18 | for test_case_node in test_suite_node.getElementsByTagName("testcase"): 19 | if test_case_node.getElementsByTagName("failure") or test_case_node.getElementsByTagName("skipped"): 20 | test_target = "%s.%s" % (test_case_node.getAttribute("classname"), test_case_node.getAttribute("name")) 21 | test_targets.append(test_target) 22 | return test_targets 23 | 24 | 25 | def merge_junit_xmls(xml_files: str, to_file: str): 26 | from .plogger import pconsole 27 | from .test_suite import default_test_suite 28 | 29 | pconsole.write_line("Start to merge junit result xmls...") 30 | 31 | test_case_results = {} 32 | 33 | for xml_file in xml_files: 34 | doc = minidom.parse(xml_file) 35 | if doc.documentElement.nodeName == "testsuites": 36 | root = doc.documentElement 37 | else: 38 | root = doc 39 | for test_suite_node in root.getElementsByTagName("testsuite"): 40 | for test_case_node in test_suite_node.getElementsByTagName("testcase"): 41 | test_case_name = "%s.%s" % (test_case_node.getAttribute("classname"), test_case_node.getAttribute("name")) 42 | test_case_status = 0 # passed 43 | if test_case_node.getElementsByTagName("failure"): 44 | test_case_status = 1 # failed 45 | elif test_case_node.getElementsByTagName("skipped"): 46 | test_case_status = 2 # skipped 47 | 48 | if test_case_name not in test_case_results or test_case_status < test_case_results[test_case_name]["status"]: 49 | test_case_results[test_case_name] = {"status": test_case_status, "node": test_case_node} 50 | 51 | doc = minidom.Document() 52 | test_suite_ele = doc.createElement("testsuite") 53 | doc.appendChild(test_suite_ele) 54 | test_suite_ele.setAttribute("name", default_test_suite.name) 55 | test_suite_ele.setAttribute("tests", str(len(test_case_results))) 56 | test_suite_ele.setAttribute("failures", str(len([result for result in test_case_results.values() if result["status"] == 1]))) 57 | test_suite_ele.setAttribute("skips", str(len([result for result in test_case_results.values() if result["status"] == 2]))) 58 | test_suite_ele.setAttribute("errors", "0") 59 | 60 | for test_case_result in test_case_results.values(): 61 | test_suite_ele.appendChild(test_case_result["node"]) 62 | 63 | if os.path.exists(to_file): 64 | pconsole.write_line("Cleaning old merged junit result xml...") 65 | os.remove(to_file) 66 | else: 67 | make_dirs(os.path.dirname(to_file)) 68 | 69 | f = open(to_file, mode="w", encoding="utf-8") 70 | try: 71 | doc.writexml(f, "\t", "\t", "\n", "utf-8") 72 | pconsole.write_line("Merged junit xml is generated at %s" % to_file) 73 | except Exception: 74 | pconsole.write_line("Failed to generate merged junit xml.\n%s" % traceback.format_exc()) 75 | finally: 76 | f.close() 77 | 78 | 79 | def main(args=None): 80 | import sys 81 | from . import config 82 | # load arguments 83 | if args is None: 84 | args = sys.argv[1:] 85 | elif not isinstance(args, (tuple, list)): 86 | if not isinstance(args, str): 87 | sys.stderr.write("ERROR: args <%s> is not a string or argument list." % args) 88 | return 89 | args = shlex.split(args) 90 | config.load(args) 91 | 92 | # merge junit result xmls 93 | junit_xmls = config.get_option("merge_junit_xmls") 94 | if junit_xmls is not None: 95 | merge_junit_xmls(junit_xmls, config.get_option("to")) 96 | return 97 | 98 | # run test 99 | from .test_filter import TestFilterGroup, TestIncludeTagsFilter, TestExcludeTagsFilter, TestIncludeGroupsFilter 100 | from . import test_executor, reporter, plistener 101 | from .test_finder import TestFinder 102 | from .test_suite import default_test_suite 103 | from .plogger import pconsole 104 | 105 | pconsole.write_line("Starting ptest...") 106 | 107 | # add workspace to python path 108 | workspace = config.get_option("workspace") 109 | sys.path.insert(0, workspace) 110 | pconsole.write_line("Workspace:") 111 | pconsole.write_line(" %s" % workspace) 112 | 113 | # add python_paths to python path 114 | python_paths = config.get_option("python_paths") 115 | if python_paths is not None: 116 | pconsole.write_line("Python paths:") 117 | for python_path in python_paths: 118 | sys.path.append(python_path) 119 | pconsole.write_line(" %s" % python_path) 120 | 121 | # test filter group 122 | test_filter_group = TestFilterGroup() 123 | 124 | include_tags = config.get_option("include_tags") 125 | if include_tags is not None: 126 | test_filter_group.append_filter(TestIncludeTagsFilter(include_tags)) 127 | 128 | exclude_tags = config.get_option("exclude_tags") 129 | if exclude_tags is not None: 130 | test_filter_group.append_filter(TestExcludeTagsFilter(exclude_tags)) 131 | 132 | include_groups = config.get_option("include_groups") 133 | if include_groups is not None: 134 | test_filter_group.append_filter(TestIncludeGroupsFilter(include_groups)) 135 | 136 | filter_path = config.get_option("test_filter") 137 | if filter_path is not None: 138 | splitted_filter_path = filter_path.split(".") 139 | filter_module = importlib.import_module(".".join(splitted_filter_path[:-1])) 140 | filter_class = getattr(filter_module, splitted_filter_path[-1]) 141 | test_filter_group.append_filter(filter_class()) 142 | 143 | if test_filter_group: 144 | pconsole.write_line("Test filters:") 145 | for test_filter in test_filter_group: 146 | pconsole.write_line(" %s" % test_filter) 147 | 148 | # get test targets 149 | test_targets = config.get_option("test_targets") 150 | if test_targets is not None: 151 | pconsole.write_line("Test targets:") 152 | for test_target in test_targets: 153 | test_finder = TestFinder(test_target, test_filter_group, default_test_suite) 154 | test_finder.find_tests() 155 | if test_finder.repeated_test_count: 156 | pconsole.write_line( 157 | " %s (%s tests found, %s repeated)" % (test_target, test_finder.found_test_count, test_finder.repeated_test_count)) 158 | else: 159 | pconsole.write_line(" %s (%s tests found)" % (test_target, test_finder.found_test_count)) 160 | else: 161 | # rerun failed/skipped test cases 162 | pconsole.write_line("Run failed/skipped tests in junit xml:") 163 | junit_xml = config.get_option("run_failed") 164 | test_targets = get_rerun_targets(junit_xml) 165 | found_test_count = 0 166 | for test_target in test_targets: 167 | test_finder = TestFinder(test_target, test_filter_group, default_test_suite) 168 | test_finder.find_tests() 169 | found_test_count += test_finder.found_test_count 170 | pconsole.write_line(" %s (%s tests found)" % (junit_xml, found_test_count)) 171 | 172 | # add test listeners 173 | listener_paths = config.get_option("test_listeners") 174 | if listener_paths is not None: 175 | pconsole.write_line("Test listeners:") 176 | for listener_path in listener_paths: 177 | pconsole.write_line(" %s" % listener_path) 178 | splitted_listener_path = listener_path.split(".") 179 | listener_module = importlib.import_module(".".join(splitted_listener_path[:-1])) 180 | listener_class = getattr(listener_module, splitted_listener_path[-1]) 181 | plistener.test_listeners.append(listener_class()) 182 | 183 | # init test suite 184 | default_test_suite.init() 185 | test_cases = default_test_suite.test_cases 186 | 187 | # exit if no tests found 188 | if len(test_cases) == 0: 189 | pconsole.write_line("=" * 100) 190 | pconsole.write_line("No tests found. Please check your command line options.") 191 | return 192 | 193 | # add webdriver instance to test executor to support capturing screenshot for webdriver 194 | try: 195 | from selenium.webdriver.remote.webdriver import WebDriver 196 | except ImportError as ie: 197 | pass 198 | else: 199 | def add_web_driver(executor, web_driver): 200 | web_drivers = executor.get_property("web_drivers") 201 | if web_drivers is None: 202 | web_drivers = [] 203 | executor.update_properties({"web_drivers": web_drivers}) 204 | web_drivers.append(web_driver) 205 | 206 | def new_start_client(self): 207 | try: 208 | current_executor = test_executor.current_executor() 209 | add_web_driver(current_executor, self) 210 | add_web_driver(current_executor.parent_test_executor, self) 211 | add_web_driver(current_executor.parent_test_executor.parent_test_executor, self) 212 | except AttributeError as ae: 213 | pass 214 | 215 | def remove_web_driver(executor, web_driver): 216 | web_drivers = executor.get_property("web_drivers") 217 | if web_drivers: 218 | web_drivers.remove(web_driver) 219 | 220 | def new_stop_client(self): 221 | try: 222 | current_executor = test_executor.current_executor() 223 | remove_web_driver(current_executor, self) 224 | remove_web_driver(current_executor.parent_test_executor, self) 225 | remove_web_driver(current_executor.parent_test_executor.parent_test_executor, self) 226 | except AttributeError as ae: 227 | pass 228 | 229 | WebDriver.start_client = new_start_client 230 | WebDriver.stop_client = new_stop_client 231 | 232 | # print test names 233 | pconsole.write_line("=" * 100) 234 | pconsole.write_line("Start to run following %s tests:" % len(test_cases)) 235 | pconsole.write_line("-" * 30) 236 | for test_case in test_cases: 237 | pconsole.write_line(" %s" % test_case.full_name) 238 | pconsole.write_line("=" * 100) 239 | 240 | # clean and create temp dir 241 | temp_dir = config.get_option("temp") 242 | if os.path.exists(temp_dir): 243 | remove_tree(temp_dir, remove_root=False) 244 | else: 245 | make_dirs(temp_dir) 246 | 247 | # run test cases 248 | test_executor.TestSuiteExecutor(default_test_suite, int(config.get_option("test_executor_number"))).start_and_join() 249 | 250 | # log the test results 251 | status_count = default_test_suite.status_count 252 | pconsole.write_line("") 253 | pconsole.write_line("=" * 100) 254 | pconsole.write_line("Test finished in %.2fs." % default_test_suite.elapsed_time) 255 | pconsole.write_line("Total: %s, passed: %s, failed: %s, skipped: %s. Pass rate: %.1f%%." % ( 256 | status_count.total, status_count.passed, status_count.failed, status_count.skipped, default_test_suite.pass_rate)) 257 | 258 | # generate the test report 259 | pconsole.write_line("") 260 | pconsole.write_line("=" * 100) 261 | reporter.generate_junit_xml(config.get_option("junit_xml")) 262 | reporter.generate_html_report(config.get_option("report_dir")) 263 | 264 | # clean temp dir 265 | remove_tree(temp_dir) 266 | -------------------------------------------------------------------------------- /ptest/plistener.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | from .test_suite import TestSuite, TestClass, TestGroup, TestCase 4 | 5 | 6 | class TestListener(object): 7 | def on_test_suite_start(self, test_suite: TestSuite): 8 | pass 9 | 10 | def on_test_suite_finish(self, test_suite: TestSuite): 11 | pass 12 | 13 | def on_test_class_start(self, test_class: TestClass): 14 | pass 15 | 16 | def on_test_class_finish(self, test_class: TestClass): 17 | pass 18 | 19 | def on_test_group_start(self, test_group: TestGroup): 20 | pass 21 | 22 | def on_test_group_finish(self, test_group: TestGroup): 23 | pass 24 | 25 | def on_test_case_start(self, test_case: TestCase): 26 | pass 27 | 28 | def on_test_case_finish(self, test_case: TestCase): 29 | pass 30 | 31 | 32 | class TestListenerGroup(object): 33 | def __init__(self): 34 | self.__test_listeners = [] 35 | self.__outer_test_listener = None 36 | 37 | def append(self, test_listener: TestListener): 38 | self.__test_listeners.append(test_listener) 39 | 40 | def set_outer_test_listener(self, outer_test_listener: TestListener): 41 | self.__outer_test_listener = outer_test_listener 42 | 43 | def on_test_suite_start(self, test_suite: TestSuite): 44 | if self.__outer_test_listener: 45 | self.__outer_test_listener.on_test_suite_start(test_suite) 46 | for test_listener in self.__test_listeners: 47 | try: 48 | test_listener.on_test_suite_start(test_suite) 49 | except Exception: 50 | from .plogger import pconsole 51 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 52 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 53 | 54 | def on_test_suite_finish(self, test_suite: TestSuite): 55 | for test_listener in self.__test_listeners: 56 | try: 57 | test_listener.on_test_suite_finish(test_suite) 58 | except Exception: 59 | from .plogger import pconsole 60 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 61 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 62 | if self.__outer_test_listener: 63 | self.__outer_test_listener.on_test_suite_finish(test_suite) 64 | 65 | def on_test_class_start(self, test_class: TestClass): 66 | if self.__outer_test_listener: 67 | self.__outer_test_listener.on_test_class_start(test_class) 68 | for test_listener in self.__test_listeners: 69 | try: 70 | test_listener.on_test_class_start(test_class) 71 | except Exception: 72 | from .plogger import pconsole 73 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 74 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 75 | 76 | def on_test_class_finish(self, test_class: TestClass): 77 | for test_listener in self.__test_listeners: 78 | try: 79 | test_listener.on_test_class_finish(test_class) 80 | except Exception: 81 | from .plogger import pconsole 82 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 83 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 84 | if self.__outer_test_listener: 85 | self.__outer_test_listener.on_test_class_finish(test_class) 86 | 87 | def on_test_group_start(self, test_group: TestGroup): 88 | if self.__outer_test_listener: 89 | self.__outer_test_listener.on_test_group_start(test_group) 90 | for test_listener in self.__test_listeners: 91 | try: 92 | test_listener.on_test_group_start(test_group) 93 | except Exception: 94 | from .plogger import pconsole 95 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 96 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 97 | 98 | def on_test_group_finish(self, test_group: TestGroup): 99 | for test_listener in self.__test_listeners: 100 | try: 101 | test_listener.on_test_group_finish(test_group) 102 | except Exception: 103 | from .plogger import pconsole 104 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 105 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 106 | if self.__outer_test_listener: 107 | self.__outer_test_listener.on_test_group_finish(test_group) 108 | 109 | def on_test_case_start(self, test_case: TestCase): 110 | if self.__outer_test_listener: 111 | self.__outer_test_listener.on_test_case_start(test_case) 112 | for test_listener in self.__test_listeners: 113 | try: 114 | test_listener.on_test_case_start(test_case) 115 | except Exception: 116 | from .plogger import pconsole 117 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 118 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 119 | 120 | def on_test_case_finish(self, test_case: TestCase): 121 | for test_listener in self.__test_listeners: 122 | try: 123 | test_listener.on_test_case_finish(test_case) 124 | except Exception: 125 | from .plogger import pconsole 126 | pconsole.write_line("The test listener %s.%s raised exception:\n%s" 127 | % (test_listener.__class__.__module__, test_listener.__class__.__name__, traceback.format_exc())) 128 | if self.__outer_test_listener: 129 | self.__outer_test_listener.on_test_case_finish(test_case) 130 | 131 | 132 | test_listeners = TestListenerGroup() 133 | -------------------------------------------------------------------------------- /ptest/plogger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import uuid 5 | from datetime import datetime 6 | from typing import List 7 | 8 | from ptest.util import escape_filename 9 | 10 | from . import config 11 | 12 | 13 | class PConsole: 14 | def __init__(self, out): 15 | self.out = out 16 | 17 | def write(self, msg: str): 18 | self.out.write(str(msg)) 19 | 20 | def write_line(self, msg: str): 21 | self.out.write(str(msg) + "\n") 22 | 23 | 24 | pconsole = PConsole(sys.stdout) 25 | pconsole_err = PConsole(sys.stderr) 26 | 27 | 28 | class PReporter: 29 | def __init__(self): 30 | pass 31 | 32 | def debug(self, msg: str, screenshot: bool = False, images: List[bytes] = []): 33 | self.__log(logging.DEBUG, msg, screenshot, images) 34 | 35 | def info(self, msg: str, screenshot: bool = False, images: List[bytes] = []): 36 | self.__log(logging.INFO, msg, screenshot, images) 37 | 38 | def warn(self, msg: str, screenshot: bool = False, images: List[bytes] = []): 39 | self.__log(logging.WARN, msg, screenshot, images) 40 | 41 | def error(self, msg: str, screenshot: bool = False, images: List[bytes] = []): 42 | self.__log(logging.ERROR, msg, screenshot, images) 43 | 44 | def critical(self, msg: str, screenshot: bool = False, images: List[bytes] = []): 45 | self.__log(logging.CRITICAL, msg, screenshot, images) 46 | 47 | def __log(self, level: int, msg: str, screenshot: bool = False, images: List[bytes] = []): 48 | from . import test_executor, screen_capturer 49 | 50 | try: 51 | running_test_fixture = test_executor.current_executor().get_property("running_test_fixture") 52 | except AttributeError as e: 53 | pconsole.write_line("[%s] %s" % (logging.getLevelName(level), msg)) 54 | else: 55 | log = {"time": str(datetime.now()), "level": logging.getLevelName(level).lower(), "message": str(msg)} 56 | log_hash_code = str(uuid.uuid4()).split("-")[0] 57 | path_prefix = "%s-%s" % (escape_filename(running_test_fixture.full_name), log_hash_code) 58 | if screenshot and not config.get_option("disable_screenshot"): 59 | log["screenshots"] = screen_capturer.take_screenshots(path_prefix) 60 | if images: 61 | image_dicts = [] 62 | for index, image in enumerate(images): 63 | image_dict = { 64 | "path": "%s-image-%s.png" % (path_prefix, index + 1) 65 | } 66 | with open(os.path.join(config.get_option("temp"), image_dict["path"]), mode="wb") as f: 67 | f.write(image) 68 | image_dicts.append(image_dict) 69 | log["images"] = image_dicts 70 | 71 | running_test_fixture.logs.append(log) 72 | 73 | if config.get_option("verbose"): 74 | # output to pconsole 75 | message = "[%s] %s" % (running_test_fixture.full_name, msg) 76 | pconsole.write_line(message) 77 | 78 | 79 | preporter = PReporter() 80 | -------------------------------------------------------------------------------- /ptest/reporter.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import platform 4 | import shutil 5 | import traceback 6 | from datetime import datetime 7 | from xml.dom import minidom 8 | 9 | from typing import List 10 | 11 | from . import config, __version__ 12 | from .enumeration import TestCaseStatus 13 | from .plogger import pconsole 14 | from .test_suite import default_test_suite, TestSuite, TestGroup, TestClass, TestCase, TestFixture 15 | from .util import make_dirs, remove_tree, escape_html 16 | 17 | current_dir = os.path.dirname(os.path.abspath(__file__)) 18 | 19 | 20 | def generate_junit_xml(xml_file_path: str): 21 | pconsole.write_line("Generating junit report...") 22 | doc = minidom.Document() 23 | test_suite_ele = doc.createElement("testsuite") 24 | doc.appendChild(test_suite_ele) 25 | status_count = default_test_suite.status_count 26 | test_suite_ele.setAttribute("name", default_test_suite.name) 27 | test_suite_ele.setAttribute("tests", str(status_count.total)) 28 | test_suite_ele.setAttribute("failures", str(status_count.failed)) 29 | test_suite_ele.setAttribute("skips", str(status_count.skipped)) 30 | test_suite_ele.setAttribute("errors", "0") 31 | test_suite_ele.setAttribute("time", "%.3f" % default_test_suite.elapsed_time) 32 | test_suite_ele.setAttribute("timestamp", str(default_test_suite.start_time)) 33 | 34 | for test_case in default_test_suite.test_cases: 35 | test_case_ele = doc.createElement("testcase") 36 | test_suite_ele.appendChild(test_case_ele) 37 | test_case_ele.setAttribute("name", test_case.name) 38 | test_case_ele.setAttribute("classname", test_case.test_class.full_name) 39 | test_case_ele.setAttribute("time", "%.3f" % test_case.elapsed_time) 40 | if test_case.status == TestCaseStatus.SKIPPED: 41 | skipped_ele = doc.createElement("skipped") 42 | test_case_ele.appendChild(skipped_ele) 43 | skipped_ele.setAttribute("message", test_case.skip_message) 44 | elif test_case.status == TestCaseStatus.FAILED: 45 | failure_ele = doc.createElement("failure") 46 | test_case_ele.appendChild(failure_ele) 47 | failure_ele.setAttribute("message", test_case.failure_message) 48 | failure_ele.setAttribute("type", test_case.failure_type) 49 | failure_ele.appendChild(doc.createTextNode(test_case.stack_trace)) 50 | 51 | if os.path.exists(xml_file_path): 52 | pconsole.write_line("Cleaning old junit report...") 53 | os.remove(xml_file_path) 54 | else: 55 | make_dirs(os.path.dirname(xml_file_path)) 56 | 57 | f = open(xml_file_path, mode="w", encoding="utf-8") 58 | try: 59 | doc.writexml(f, "\t", "\t", "\n", "utf-8") 60 | pconsole.write_line("junit report is generated at %s" % xml_file_path) 61 | except Exception as e: 62 | pconsole.write_line("Failed to generate junit report.\n%s" % traceback.format_exc()) 63 | finally: 64 | f.close() 65 | 66 | 67 | def generate_html_report(report_dir: str): 68 | pconsole.write_line("Generating html report...") 69 | 70 | if os.path.exists(report_dir): 71 | pconsole.write_line("Cleaning old html report...") 72 | remove_tree(report_dir, remove_root=False) 73 | else: 74 | make_dirs(report_dir) 75 | 76 | html_template_dir = os.path.join(current_dir, "htmltemplate") 77 | 78 | # copy js and css files to report dir 79 | for fn in os.listdir(html_template_dir): 80 | file_full_path = os.path.join(html_template_dir, fn) 81 | _, file_ext = os.path.splitext(fn) 82 | if os.path.isfile(file_full_path) and file_ext in [".js", ".css"]: 83 | shutil.copy(file_full_path, report_dir) 84 | 85 | # copy screenshots from temp dir to report dir 86 | temp_dir = config.get_option("temp") 87 | for fn in os.listdir(temp_dir): 88 | file_full_path = os.path.join(temp_dir, fn) 89 | _, file_ext = os.path.splitext(fn) 90 | if os.path.isfile(file_full_path) and file_ext == ".png": 91 | shutil.copy(file_full_path, report_dir) 92 | 93 | with open(os.path.join(html_template_dir, "index.html"), encoding="utf-8") as f: 94 | index_page_template = f.read() 95 | 96 | current_time = datetime.now() 97 | system_info = "%s / Python %s / %s" % (platform.node(), platform.python_version(), platform.platform()) 98 | test_suite_json = json.dumps(_get_test_suite_dict(default_test_suite)) 99 | index_page_content = index_page_template.format(version=__version__, current_time=current_time, system_info=system_info, 100 | test_suite_json=test_suite_json) 101 | 102 | f = open(os.path.join(report_dir, "index.html"), mode="w", encoding="utf-8") 103 | try: 104 | f.write(index_page_content) 105 | pconsole.write_line("html report is generated at %s" % os.path.abspath(report_dir)) 106 | except Exception as e: 107 | pconsole.write_line("Failed to generate html report.\n%s" % traceback.format_exc()) 108 | finally: 109 | f.close() 110 | 111 | 112 | def _get_test_suite_dict(test_suite: TestSuite): 113 | test_suite_dict = { 114 | "name": escape_html(test_suite.name), 115 | "fullName": escape_html(test_suite.full_name), 116 | "type": "suite", 117 | "testModules": _get_test_module_dicts(test_suite.test_classes), 118 | "startTime": str(test_suite.start_time), 119 | "endTime": str(test_suite.end_time), 120 | "elapsedTime": test_suite.elapsed_time, 121 | "total": test_suite.status_count.total, 122 | "passed": test_suite.status_count.passed, 123 | "failed": test_suite.status_count.failed, 124 | "skipped": test_suite.status_count.skipped 125 | } 126 | if not test_suite.before_suite.is_empty: 127 | test_suite_dict["beforeSuite"] = _get_test_fixture_dict(test_suite.before_suite) 128 | if not test_suite.after_suite.is_empty: 129 | test_suite_dict["afterSuite"] = _get_test_fixture_dict(test_suite.after_suite) 130 | return test_suite_dict 131 | 132 | 133 | def _get_test_module_dicts(test_classes: List[TestClass]): 134 | root_test_module_dict = { 135 | "name": "root", 136 | "testModules": [] 137 | } 138 | 139 | def get_or_new_module(modules, module_full_name): 140 | for module in modules: 141 | if module_full_name == module["fullName"]: 142 | return module 143 | new_module = { 144 | "name": module_full_name.split(".")[-1], 145 | "fullName": module_full_name, 146 | "type": "module", 147 | "testModules": [], 148 | "testClasses": [], 149 | "total": 0, 150 | "passed": 0, 151 | "failed": 0, 152 | "skipped": 0 153 | } 154 | modules.append(new_module) 155 | modules.sort(key=lambda m: m["name"]) 156 | return new_module 157 | 158 | for test_class_dict in [_get_test_class_dict(test_class) for test_class in test_classes]: 159 | current_test_module_dict = root_test_module_dict 160 | splitted_full_name = test_class_dict["fullName"].split(".")[:-1] 161 | for i in range(len(splitted_full_name)): 162 | test_module_dict = get_or_new_module(current_test_module_dict["testModules"], ".".join(splitted_full_name[:i + 1])) 163 | test_module_dict["total"] += test_class_dict["total"] 164 | test_module_dict["passed"] += test_class_dict["passed"] 165 | test_module_dict["failed"] += test_class_dict["failed"] 166 | test_module_dict["skipped"] += test_class_dict["skipped"] 167 | current_test_module_dict = test_module_dict 168 | current_test_module_dict["testClasses"].append(test_class_dict) 169 | current_test_module_dict["testClasses"].sort(key=lambda c: c["name"]) 170 | return root_test_module_dict["testModules"] 171 | 172 | 173 | def _get_test_class_dict(test_class: TestClass): 174 | test_class_dict = { 175 | "name": test_class.name, 176 | "fullName": test_class.full_name, 177 | "type": "class", 178 | "runMode": test_class.run_mode.value, 179 | "runGroup": test_class.run_group, 180 | "description": test_class.description, 181 | "startTime": str(test_class.start_time), 182 | "endTime": str(test_class.end_time), 183 | "elapsedTime": test_class.elapsed_time, 184 | "total": test_class.status_count.total, 185 | "passed": test_class.status_count.passed, 186 | "failed": test_class.status_count.failed, 187 | "skipped": test_class.status_count.skipped 188 | } 189 | if test_class.is_group_feature_used: 190 | test_class_dict["testGroups"] = sorted([_get_test_group_dict(test_group) for test_group in test_class.test_groups], 191 | key=lambda g: g["name"]) 192 | else: 193 | test_class_dict["testCases"] = sorted([_get_test_case_dict(test_case) for test_case in test_class.test_cases], 194 | key=lambda c: c["name"]) 195 | 196 | if not test_class.before_class.is_empty: 197 | test_class_dict["beforeClass"] = _get_test_fixture_dict(test_class.before_class) 198 | if not test_class.after_class.is_empty: 199 | test_class_dict["afterClass"] = _get_test_fixture_dict(test_class.after_class) 200 | return test_class_dict 201 | 202 | 203 | def _get_test_group_dict(test_group: TestGroup): 204 | test_group_dict = { 205 | "name": escape_html(test_group.name), 206 | "fullName": escape_html(test_group.full_name), 207 | "type": "group", 208 | "testCases": sorted([_get_test_case_dict(test_case) for test_case in test_group.test_cases], key=lambda c: c["name"]), 209 | "startTime": str(test_group.start_time), 210 | "endTime": str(test_group.end_time), 211 | "elapsedTime": test_group.elapsed_time, 212 | "total": test_group.status_count.total, 213 | "passed": test_group.status_count.passed, 214 | "failed": test_group.status_count.failed, 215 | "skipped": test_group.status_count.skipped 216 | } 217 | if not test_group.before_group.is_empty: 218 | test_group_dict["beforeGroup"] = _get_test_fixture_dict(test_group.before_group) 219 | if not test_group.after_group.is_empty: 220 | test_group_dict["afterGroup"] = _get_test_fixture_dict(test_group.after_group) 221 | return test_group_dict 222 | 223 | 224 | def _get_test_case_dict(test_case: TestCase): 225 | test_case_dict = { 226 | "name": escape_html(test_case.name), 227 | "fullName": escape_html(test_case.full_name), 228 | "type": "case", 229 | "startTime": str(test_case.start_time), 230 | "endTime": str(test_case.end_time), 231 | "elapsedTime": test_case.elapsed_time, 232 | "status": test_case.status.value, 233 | "tags": test_case.tags, 234 | "group": test_case.group, 235 | "description": test_case.description, 236 | "test": _get_test_fixture_dict(test_case.test), 237 | } 238 | if not test_case.before_method.is_empty: 239 | test_case_dict["beforeMethod"] = _get_test_fixture_dict(test_case.before_method) 240 | if not test_case.after_method.is_empty: 241 | test_case_dict["afterMethod"] = _get_test_fixture_dict(test_case.after_method) 242 | return test_case_dict 243 | 244 | 245 | def _get_test_fixture_dict(test_fixture: TestFixture): 246 | test_fixture_dict = { 247 | "name": escape_html(test_fixture.name), 248 | "fullName": escape_html(test_fixture.full_name), 249 | "type": "fixture", 250 | "status": test_fixture.status.value, 251 | "fixtureType": test_fixture.fixture_type.value, 252 | "startTime": str(test_fixture.start_time), 253 | "endTime": str(test_fixture.end_time), 254 | "elapsedTime": test_fixture.elapsed_time, 255 | "logs": escape_html(test_fixture.logs), 256 | "description": test_fixture.description 257 | } 258 | return test_fixture_dict 259 | -------------------------------------------------------------------------------- /ptest/screen_capturer.py: -------------------------------------------------------------------------------- 1 | import os 2 | from io import BytesIO 3 | 4 | from . import config 5 | from .exception import ScreenshotError 6 | 7 | # ---------------------------------------------------------------------- 8 | # -------- [ cross-platform multiple screenshots module ] -------------- 9 | # ---------------------------------------------------------------------- 10 | ''' 11 | Copyright (c) 2013-2015, Mickael 'Tiger-222' Schoentgen 12 | 13 | Permission to use, copy, modify, and distribute this software and its 14 | documentation for any purpose and without fee or royalty is hereby 15 | granted, provided that the above copyright notice appear in all copies 16 | and that both that copyright notice and this permission notice appear 17 | in supporting documentation or portions thereof, including 18 | modifications, that you make. 19 | ''' 20 | from struct import pack 21 | from platform import system 22 | from zlib import compress, crc32 23 | 24 | if system() == 'Darwin': 25 | try: 26 | import Quartz 27 | from LaunchServices import kUTTypePNG 28 | pyobjc_installed = True 29 | except ImportError: 30 | pyobjc_installed = False 31 | elif system() == 'Windows': 32 | from ctypes import c_void_p, create_string_buffer, sizeof, \ 33 | windll, Structure, POINTER, WINFUNCTYPE 34 | from ctypes.wintypes import BOOL, DOUBLE, DWORD, HBITMAP, HDC, \ 35 | HGDIOBJ, HWND, INT, LPARAM, LONG, RECT, UINT, WORD 36 | 37 | 38 | class BITMAPINFOHEADER(Structure): 39 | _fields_ = [('biSize', DWORD), ('biWidth', LONG), ('biHeight', LONG), 40 | ('biPlanes', WORD), ('biBitCount', WORD), 41 | ('biCompression', DWORD), ('biSizeImage', DWORD), 42 | ('biXPelsPerMeter', LONG), ('biYPelsPerMeter', LONG), 43 | ('biClrUsed', DWORD), ('biClrImportant', DWORD)] 44 | 45 | 46 | class BITMAPINFO(Structure): 47 | _fields_ = [('bmiHeader', BITMAPINFOHEADER), ('bmiColors', DWORD * 3)] 48 | 49 | 50 | class MSS(object): 51 | def enum_display_monitors(self, screen=0): 52 | raise NotImplementedError('MSS: subclasses need to implement this!') 53 | 54 | def get_pixels(self, monitor): 55 | raise NotImplementedError('MSS: subclasses need to implement this!') 56 | 57 | def save(self, output, screen=0): 58 | for i, monitor in enumerate(self.enum_display_monitors(screen)): 59 | if screen <= 0 or (screen > 0 and i + 1 == screen): 60 | self.save_img(data=self.get_pixels(monitor), 61 | width=monitor[b'width'], 62 | height=monitor[b'height'], 63 | output=output) 64 | 65 | def save_img(self, data, width, height, output): 66 | zcrc32 = crc32 67 | zcompr = compress 68 | len_sl = width * 3 69 | scanlines = b''.join( 70 | [pack(b'>B', 0) + data[y * len_sl:y * len_sl + len_sl] 71 | for y in range(height)]) 72 | 73 | magic = pack(b'>8B', 137, 80, 78, 71, 13, 10, 26, 10) 74 | 75 | # Header: size, marker, data, CRC32 76 | ihdr = [b'', b'IHDR', b'', b''] 77 | ihdr[2] = pack(b'>2I5B', width, height, 8, 2, 0, 0, 0) 78 | ihdr[3] = pack(b'>I', zcrc32(b''.join(ihdr[1:3])) & 0xffffffff) 79 | ihdr[0] = pack(b'>I', len(ihdr[2])) 80 | 81 | # Data: size, marker, data, CRC32 82 | idat = [b'', b'IDAT', b'', b''] 83 | idat[2] = zcompr(scanlines, 9) 84 | idat[3] = pack(b'>I', zcrc32(b''.join(idat[1:3])) & 0xffffffff) 85 | idat[0] = pack(b'>I', len(idat[2])) 86 | 87 | # Footer: size, marker, None, CRC32 88 | iend = [b'', b'IEND', b'', b''] 89 | iend[3] = pack(b'>I', zcrc32(iend[1]) & 0xffffffff) 90 | iend[0] = pack(b'>I', len(iend[2])) 91 | 92 | try: 93 | output.write(magic + b''.join(ihdr) + b''.join(idat) + b''.join(iend)) 94 | except: 95 | err = 'MSS: error writing data to "{0}".'.format(output) 96 | raise ScreenshotError(err) 97 | 98 | 99 | class MSSMac(MSS): 100 | def enum_display_monitors(self, screen=0): 101 | if screen == -1: 102 | rect = Quartz.CGRectInfinite 103 | yield ({ 104 | b'left': int(rect.origin.x), 105 | b'top': int(rect.origin.y), 106 | b'width': int(rect.size.width), 107 | b'height': int(rect.size.height) 108 | }) 109 | else: 110 | max_displays = 32 # Could be augmented, if needed ... 111 | rotations = {0.0: 'normal', 90.0: 'right', -90.0: 'left'} 112 | _, ids, _ = Quartz.CGGetActiveDisplayList(max_displays, None, None) 113 | for display in ids: 114 | rect = Quartz.CGRectStandardize(Quartz.CGDisplayBounds(display)) 115 | left, top = rect.origin.x, rect.origin.y 116 | width, height = rect.size.width, rect.size.height 117 | rot = Quartz.CGDisplayRotation(display) 118 | if rotations[rot] in ['left', 'right']: 119 | width, height = height, width 120 | yield ({ 121 | b'left': int(left), 122 | b'top': int(top), 123 | b'width': int(width), 124 | b'height': int(height) 125 | }) 126 | 127 | def get_pixels(self, monitor): 128 | width, height = monitor[b'width'], monitor[b'height'] 129 | left, top = monitor[b'left'], monitor[b'top'] 130 | rect = Quartz.CGRect((left, top), (width, height)) 131 | options = Quartz.kCGWindowListOptionOnScreenOnly 132 | winid = Quartz.kCGNullWindowID 133 | default = Quartz.kCGWindowImageDefault 134 | self.image = Quartz.CGWindowListCreateImage(rect, options, winid, default) 135 | if not self.image: 136 | raise ScreenshotError('MSS: CGWindowListCreateImage() failed.') 137 | return self.image 138 | 139 | def save_img(self, data, width, height, output): 140 | cf_data = Quartz.CFDataCreateMutable(Quartz.kCFAllocatorDefault, 0) 141 | dest = Quartz.CGImageDestinationCreateWithData(cf_data, kUTTypePNG, 1, None) 142 | if not dest: 143 | err = 'MSS: CGImageDestinationCreateWithURL() failed.' 144 | raise ScreenshotError(err) 145 | 146 | Quartz.CGImageDestinationAddImage(dest, data, None) 147 | if not Quartz.CGImageDestinationFinalize(dest): 148 | raise ScreenshotError('MSS: CGImageDestinationFinalize() failed.') 149 | 150 | cf_data_bytes = Quartz.CFDataGetBytes(cf_data, Quartz.CFRangeMake(0, Quartz.CFDataGetLength(cf_data)), None) 151 | output.write(cf_data_bytes) 152 | 153 | 154 | class MSSWindows(MSS): 155 | def __init__(self): 156 | self._set_argtypes() 157 | self._set_restypes() 158 | 159 | def _set_argtypes(self): 160 | self.MONITORENUMPROC = WINFUNCTYPE(INT, DWORD, DWORD, POINTER(RECT), 161 | DOUBLE) 162 | windll.user32.GetSystemMetrics.argtypes = [INT] 163 | windll.user32.EnumDisplayMonitors.argtypes = [HDC, c_void_p, 164 | self.MONITORENUMPROC, 165 | LPARAM] 166 | windll.user32.GetWindowDC.argtypes = [HWND] 167 | windll.gdi32.CreateCompatibleDC.argtypes = [HDC] 168 | windll.gdi32.CreateCompatibleBitmap.argtypes = [HDC, INT, INT] 169 | windll.gdi32.SelectObject.argtypes = [HDC, HGDIOBJ] 170 | windll.gdi32.BitBlt.argtypes = [HDC, INT, INT, INT, INT, HDC, INT, INT, 171 | DWORD] 172 | windll.gdi32.DeleteObject.argtypes = [HGDIOBJ] 173 | windll.gdi32.GetDIBits.argtypes = [HDC, HBITMAP, UINT, UINT, c_void_p, 174 | POINTER(BITMAPINFO), UINT] 175 | 176 | def _set_restypes(self): 177 | windll.user32.GetSystemMetrics.restypes = INT 178 | windll.user32.EnumDisplayMonitors.restypes = BOOL 179 | windll.user32.GetWindowDC.restypes = HDC 180 | windll.gdi32.CreateCompatibleDC.restypes = HDC 181 | windll.gdi32.CreateCompatibleBitmap.restypes = HBITMAP 182 | windll.gdi32.SelectObject.restypes = HGDIOBJ 183 | windll.gdi32.BitBlt.restypes = BOOL 184 | windll.gdi32.GetDIBits.restypes = INT 185 | windll.gdi32.DeleteObject.restypes = BOOL 186 | 187 | def enum_display_monitors(self, screen=-1): 188 | if screen == -1: 189 | SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN = 76, 77 190 | SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN = 78, 79 191 | left = windll.user32.GetSystemMetrics(SM_XVIRTUALSCREEN) 192 | right = windll.user32.GetSystemMetrics(SM_CXVIRTUALSCREEN) 193 | top = windll.user32.GetSystemMetrics(SM_YVIRTUALSCREEN) 194 | bottom = windll.user32.GetSystemMetrics(SM_CYVIRTUALSCREEN) 195 | yield ({ 196 | b'left': int(left), 197 | b'top': int(top), 198 | b'width': int(right), 199 | b'height': int(bottom) 200 | }) 201 | else: 202 | 203 | def _callback(monitor, dc, rect, data): 204 | rct = rect.contents 205 | monitors.append({ 206 | b'left': int(rct.left), 207 | b'top': int(rct.top), 208 | b'width': int(rct.right - rct.left), 209 | b'height': int(rct.bottom - rct.top) 210 | }) 211 | return 1 212 | 213 | monitors = [] 214 | callback = self.MONITORENUMPROC(_callback) 215 | windll.user32.EnumDisplayMonitors(0, 0, callback, 0) 216 | for mon in monitors: 217 | yield mon 218 | 219 | def get_pixels(self, monitor): 220 | width, height = monitor[b'width'], monitor[b'height'] 221 | left, top = monitor[b'left'], monitor[b'top'] 222 | SRCCOPY = 0xCC0020 223 | DIB_RGB_COLORS = BI_RGB = 0 224 | srcdc = memdc = bmp = None 225 | 226 | try: 227 | bmi = BITMAPINFO() 228 | bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER) 229 | bmi.bmiHeader.biWidth = width 230 | bmi.bmiHeader.biHeight = -height # Why minus? See [1] 231 | bmi.bmiHeader.biPlanes = 1 # Always 1 232 | bmi.bmiHeader.biBitCount = 24 233 | bmi.bmiHeader.biCompression = BI_RGB 234 | buffer_len = height * width * 3 235 | self.image = create_string_buffer(buffer_len) 236 | srcdc = windll.user32.GetWindowDC(0) 237 | memdc = windll.gdi32.CreateCompatibleDC(srcdc) 238 | bmp = windll.gdi32.CreateCompatibleBitmap(srcdc, width, height) 239 | windll.gdi32.SelectObject(memdc, bmp) 240 | windll.gdi32.BitBlt(memdc, 0, 0, width, height, srcdc, left, top, 241 | SRCCOPY) 242 | bits = windll.gdi32.GetDIBits(memdc, bmp, 0, height, self.image, 243 | bmi, DIB_RGB_COLORS) 244 | if bits != height: 245 | raise ScreenshotError('MSS: GetDIBits() failed.') 246 | finally: 247 | # Clean up 248 | if srcdc: 249 | windll.gdi32.DeleteObject(srcdc) 250 | if memdc: 251 | windll.gdi32.DeleteObject(memdc) 252 | if bmp: 253 | windll.gdi32.DeleteObject(bmp) 254 | 255 | # Replace pixels values: BGR to RGB 256 | self.image[2:buffer_len:3], self.image[0:buffer_len:3] = \ 257 | self.image[0:buffer_len:3], self.image[2:buffer_len:3] 258 | return self.image 259 | 260 | 261 | def mss(*args, **kwargs): 262 | mss_class = { 263 | 'Darwin': MSSMac, 264 | 'Windows': MSSWindows 265 | }[system()] 266 | 267 | return mss_class(*args, **kwargs) 268 | 269 | 270 | # ---------------------------------------------------------------------- 271 | # ----------- [ take screenshot for desktop & webdriver ] ------------- 272 | # ---------------------------------------------------------------------- 273 | def take_screenshots(path_prefix: str): 274 | screenshots = [] 275 | 276 | screenshot = { 277 | "source": "Desktop", 278 | "path": "%s-screenshot-0.png" % path_prefix 279 | } 280 | 281 | if system() == 'Darwin' and not pyobjc_installed: 282 | screenshot["error"] = "The package pyobjc is necessary for taking screenshot of desktop, please install it." 283 | else: 284 | try: 285 | output = BytesIO() 286 | mss().save(output=output, screen=-1) # -1 means all monitors 287 | value = output.getvalue() 288 | with open(os.path.join(config.get_option("temp"), screenshot["path"]), mode="wb") as f: 289 | f.write(value) 290 | except Exception as e: 291 | screenshot["error"] = str(e).strip() or "\n".join([str(arg) for arg in e.args]) 292 | 293 | screenshots.append(screenshot) 294 | 295 | from . import test_executor 296 | web_drivers = test_executor.current_executor().get_property("web_drivers") 297 | 298 | if web_drivers: 299 | for index, web_driver in enumerate(web_drivers): 300 | screenshot = { 301 | "source": "Web Driver", 302 | "path": "%s-screenshot-%s.png" % (path_prefix, index + 1) 303 | } 304 | 305 | try: 306 | screenshot["alert"] = web_driver.switch_to.alert.text 307 | except Exception as e: 308 | pass 309 | 310 | while True: 311 | try: 312 | web_driver.switch_to.alert.dismiss() 313 | except Exception as e: 314 | break 315 | 316 | try: 317 | screenshot["url"] = web_driver.current_url 318 | except Exception as e: 319 | pass 320 | 321 | try: 322 | screenshot["title"] = web_driver.title 323 | except Exception as e: 324 | pass 325 | 326 | try: 327 | value = web_driver.get_screenshot_as_png() 328 | with open(os.path.join(config.get_option("temp"), screenshot["path"]), mode="wb") as f: 329 | f.write(value) 330 | except Exception as e: 331 | screenshot["error"] = str(e).strip() or "\n".join([str(arg) for arg in e.args]) 332 | 333 | screenshots.append(screenshot) 334 | 335 | return screenshots -------------------------------------------------------------------------------- /ptest/test_executor.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | import traceback 4 | from copy import copy 5 | from datetime import datetime 6 | from functools import cmp_to_key 7 | 8 | from typing import List 9 | 10 | from .enumeration import TestCaseStatus, TestClassRunMode, TestFixtureStatus 11 | from .plistener import test_listeners 12 | from .plogger import preporter, pconsole, pconsole_err 13 | from .test_suite import AfterSuite, BeforeSuite, AfterClass, BeforeClass, BeforeGroup, AfterGroup, AfterMethod, BeforeMethod, Test, \ 14 | TestSuite, TestGroup, TestClass, TestCase, TestFixture 15 | from .util import call_function, kill_thread, format_thread_stack 16 | 17 | 18 | class TestExecutor(threading.Thread): 19 | def __init__(self, parent_test_executor: "TestExecutor", workers: int = 0): 20 | threading.Thread.__init__(self) 21 | self.parent_test_executor = parent_test_executor 22 | self.__properties = {} 23 | if self.parent_test_executor: 24 | for key, value in self.parent_test_executor.get_properties().items(): 25 | if isinstance(value, (list, tuple, set, dict)): 26 | self.__properties[key] = copy(value) 27 | else: 28 | self.__properties[key] = value 29 | self.workers = workers 30 | self.lock = threading.RLock() 31 | if self.workers == 0: 32 | self.acquire_worker() 33 | 34 | def _run(self): 35 | pass 36 | 37 | def run(self): 38 | try: 39 | self._run() 40 | finally: 41 | self.release_worker() 42 | 43 | def start_and_join(self): 44 | self.start() 45 | self.join() 46 | 47 | def update_properties(self, properties): 48 | self.__properties.update(properties) 49 | 50 | def clear_properties(self): 51 | self.__properties.clear() 52 | 53 | def get_property(self, key): 54 | try: 55 | return self.__properties[key] 56 | except KeyError: 57 | return None 58 | 59 | def get_properties(self): 60 | return self.__properties 61 | 62 | def allocate_worker(self, child_test_executor: "TestExecutor"): 63 | with self.lock: 64 | if self.workers > 0: 65 | self.workers -= 1 66 | child_test_executor.workers += 1 67 | return True 68 | else: 69 | return False 70 | 71 | def apply_worker(self): 72 | if self.parent_test_executor: 73 | with self.lock: 74 | if self.parent_test_executor.allocate_worker(self): 75 | return True 76 | else: 77 | if self.parent_test_executor.apply_worker(): 78 | return self.parent_test_executor.allocate_worker(self) 79 | else: 80 | return False 81 | else: 82 | with self.lock: 83 | return self.workers > 0 84 | 85 | def acquire_worker(self): 86 | while True: 87 | if self.apply_worker(): 88 | return 89 | else: 90 | time.sleep(1) 91 | 92 | def release_worker(self): 93 | if self.parent_test_executor: 94 | with self.parent_test_executor.lock: 95 | self.parent_test_executor.workers += self.workers 96 | self.workers = 0 97 | else: 98 | pass 99 | 100 | 101 | class TestSuiteExecutor(TestExecutor): 102 | def __init__(self, test_suite: TestSuite, workers: int): 103 | TestExecutor.__init__(self, None, workers) 104 | self.test_suite = test_suite 105 | 106 | def _run(self): 107 | before_suite_executor = TestFixtureExecutor(self, self.test_suite.before_suite) 108 | test_listeners.on_test_suite_start(self.test_suite) 109 | self.test_suite.start_time = datetime.now() 110 | before_suite_executor.start_and_join() 111 | 112 | test_class_run_group_executors = [] 113 | 114 | for test_class_run_group in self.test_suite.test_class_run_groups: 115 | test_class_run_group_executor = TestClassRunGroupExecutor(self, test_class_run_group) 116 | test_class_run_group_executors.append(test_class_run_group_executor) 117 | test_class_run_group_executor.start() 118 | 119 | for executor in test_class_run_group_executors: 120 | executor.join() 121 | 122 | after_suite_executor = TestFixtureExecutor(self, self.test_suite.after_suite) 123 | after_suite_executor.start_and_join() 124 | self.test_suite.end_time = datetime.now() 125 | test_listeners.on_test_suite_finish(self.test_suite) 126 | 127 | 128 | class TestClassRunGroupExecutor(TestExecutor): 129 | def __init__(self, test_suite_executor: TestSuiteExecutor, test_class_run_group: List[TestClass]): 130 | TestExecutor.__init__(self, test_suite_executor) 131 | self.test_class_run_group = test_class_run_group 132 | 133 | def _run(self): 134 | for test_class in self.test_class_run_group: 135 | TestClassExecutor(self, test_class).start_and_join() 136 | 137 | 138 | class TestClassExecutor(TestExecutor): 139 | def __init__(self, test_class_run_group_executor: TestClassRunGroupExecutor, test_class: TestClass): 140 | TestExecutor.__init__(self, test_class_run_group_executor) 141 | self.test_class = test_class 142 | 143 | def _run(self): 144 | before_class_executor = TestFixtureExecutor(self, self.test_class.before_class) 145 | test_listeners.on_test_class_start(self.test_class) 146 | self.test_class.start_time = datetime.now() 147 | before_class_executor.start_and_join() 148 | 149 | if self.test_class.run_mode == TestClassRunMode.SingleLine: 150 | for test_group in self.test_class.test_groups: 151 | TestGroupExecutor(self, test_group).start_and_join() 152 | else: 153 | test_group_executors = [] 154 | 155 | for test_group in self.test_class.test_groups: 156 | test_group_executor = TestGroupExecutor(self, test_group) 157 | test_group_executors.append(test_group_executor) 158 | test_group_executor.start() 159 | 160 | for executor in test_group_executors: 161 | executor.join() 162 | 163 | after_class_executor = TestFixtureExecutor(self, self.test_class.after_class) 164 | after_class_executor.start_and_join() 165 | self.test_class.end_time = datetime.now() 166 | test_listeners.on_test_class_finish(self.test_class) 167 | 168 | 169 | class TestGroupExecutor(TestExecutor): 170 | def __init__(self, test_class_executor: TestClassExecutor, test_group: TestGroup): 171 | TestExecutor.__init__(self, test_class_executor) 172 | self.test_group = test_group 173 | 174 | def _run(self): 175 | before_group_executor = TestFixtureExecutor(self, self.test_group.before_group) 176 | test_listeners.on_test_group_start(self.test_group) 177 | self.test_group.start_time = datetime.now() 178 | before_group_executor.start_and_join() 179 | 180 | if self.test_group.test_class.run_mode == TestClassRunMode.SingleLine: 181 | for test_case in self.test_group.test_cases: 182 | TestCaseExecutor(self, test_case).start_and_join() 183 | else: 184 | test_case_executors = [] 185 | 186 | for test_case in self.test_group.test_cases: 187 | test_case_executor = TestCaseExecutor(self, test_case) 188 | test_case_executors.append(test_case_executor) 189 | test_case_executor.start() 190 | 191 | for executor in test_case_executors: 192 | executor.join() 193 | 194 | after_group_executor = TestFixtureExecutor(self, self.test_group.after_group) 195 | after_group_executor.start_and_join() 196 | self.test_group.end_time = datetime.now() 197 | test_listeners.on_test_group_finish(self.test_group) 198 | 199 | 200 | class TestCaseExecutor(TestExecutor): 201 | def __init__(self, test_group_executor: TestGroupExecutor, test_case: TestCase): 202 | TestExecutor.__init__(self, test_group_executor) 203 | self.test_case = test_case 204 | 205 | def _run(self): 206 | before_method_executor = TestFixtureExecutor(self, self.test_case.before_method) 207 | test_listeners.on_test_case_start(self.test_case) 208 | self.test_case.start_time = datetime.now() 209 | before_method_executor.start_and_join() 210 | 211 | test_executor = TestFixtureExecutor(self, self.test_case.test) 212 | test_executor.start_and_join() 213 | 214 | logger_filler = "-" * (100 - len(self.test_case.full_name) - 6) 215 | if self.test_case.status == TestCaseStatus.PASSED: 216 | pconsole.write_line("%s%s|PASS|" % (self.test_case.full_name, logger_filler)) 217 | elif self.test_case.status == TestCaseStatus.FAILED: 218 | pconsole.write_line("%s%s|FAIL|" % (self.test_case.full_name, logger_filler)) 219 | elif self.test_case.status == TestCaseStatus.SKIPPED: 220 | pconsole.write_line("%s%s|SKIP|" % (self.test_case.full_name, logger_filler)) 221 | 222 | after_method_executor = TestFixtureExecutor(self, self.test_case.after_method) 223 | after_method_executor.start_and_join() 224 | self.test_case.end_time = datetime.now() 225 | test_listeners.on_test_case_finish(self.test_case) 226 | 227 | 228 | class TestFixtureExecutor(TestExecutor): 229 | def __init__(self, parent_test_executor: TestExecutor, test_fixture: TestFixture): 230 | TestExecutor.__init__(self, parent_test_executor) 231 | self.test_fixture = test_fixture 232 | 233 | def _run(self): 234 | if self.test_fixture.is_empty: return 235 | 236 | self.test_fixture.start_time = datetime.now() 237 | self.update_properties({"running_test_fixture": self.test_fixture}) 238 | 239 | failed_setup_fixture = self.test_fixture.context.get_failed_setup_fixture() 240 | if not failed_setup_fixture: 241 | self.run_test_fixture() 242 | elif isinstance(self.test_fixture, AfterSuite) and isinstance(failed_setup_fixture, BeforeSuite) and self.test_fixture.always_run: 243 | self.run_test_fixture() 244 | elif isinstance(self.test_fixture, AfterClass) and isinstance(failed_setup_fixture, BeforeClass) and self.test_fixture.always_run: 245 | self.run_test_fixture() 246 | elif isinstance(self.test_fixture, AfterGroup) and isinstance(failed_setup_fixture, BeforeGroup) and self.test_fixture.always_run: 247 | self.run_test_fixture() 248 | elif isinstance(self.test_fixture, AfterMethod) and isinstance(failed_setup_fixture, BeforeMethod) and self.test_fixture.always_run: 249 | self.run_test_fixture() 250 | else: 251 | self.skip_test_fixture(failed_setup_fixture) 252 | 253 | # spread before's attributes 254 | if isinstance(self.test_fixture, BeforeSuite): 255 | before_suite_dict = self.test_fixture.test_fixture_ref.__self__.__dict__ 256 | for test_class in self.test_fixture.test_suite.test_classes: 257 | test_class.test_class_ref.__dict__.update(before_suite_dict) 258 | for test_group in test_class.test_groups: 259 | test_group.test_class_ref.__dict__.update(before_suite_dict) 260 | for test_case in test_group.test_cases: 261 | test_case.test_case_ref.__self__.__dict__.update(before_suite_dict) 262 | elif isinstance(self.test_fixture, BeforeClass): 263 | before_class_dict = self.test_fixture.test_fixture_ref.__self__.__dict__ 264 | for test_group in self.test_fixture.test_class.test_groups: 265 | test_group.test_class_ref.__dict__.update(before_class_dict) 266 | for test_case in test_group.test_cases: 267 | test_case.test_case_ref.__self__.__dict__.update(before_class_dict) 268 | elif isinstance(self.test_fixture, BeforeGroup): 269 | before_group_dict = self.test_fixture.test_fixture_ref.__self__.__dict__ 270 | for test_case in self.test_fixture.test_group.test_cases: 271 | test_case.test_case_ref.__self__.__dict__.update(before_group_dict) 272 | 273 | self.update_properties({"running_test_fixture": None}) 274 | self.test_fixture.end_time = datetime.now() 275 | 276 | def run_test_fixture(self): 277 | self.test_fixture.status = TestFixtureStatus.RUNNING 278 | test_fixture_sub_executor = TestFixtureSubExecutor(self) 279 | test_fixture_sub_executor.start() 280 | if self.test_fixture.timeout > 0: 281 | test_fixture_sub_executor.join(self.test_fixture.timeout) 282 | if test_fixture_sub_executor.isAlive(): 283 | stack_trace = format_thread_stack(test_fixture_sub_executor) 284 | try: 285 | kill_thread(test_fixture_sub_executor) 286 | except Exception as e: 287 | pconsole_err.write_line(e) 288 | from .plogger import preporter 289 | self.test_fixture.status = TestFixtureStatus.FAILED 290 | self.test_fixture.failure_message = "Timed out executing this test fixture in %s seconds." % self.test_fixture.timeout 291 | self.test_fixture.failure_type = "TimeoutException" 292 | self.test_fixture.stack_trace = stack_trace 293 | preporter.error( 294 | "Failed with following message:\n%s\n%s" % (self.test_fixture.failure_message, self.test_fixture.stack_trace), True) 295 | else: 296 | test_fixture_sub_executor.join() 297 | 298 | def skip_test_fixture(self, caused_test_fixture: TestFixture): 299 | from .plogger import preporter 300 | self.test_fixture.status = TestFixtureStatus.SKIPPED 301 | self.test_fixture.skip_message = "@%s failed, so skipped." % caused_test_fixture.fixture_type.value 302 | preporter.warn("@%s failed, so skipped." % caused_test_fixture.fixture_type.value) 303 | 304 | 305 | class TestFixtureSubExecutor(TestExecutor): 306 | def __init__(self, test_fixture_executor: TestFixtureExecutor): 307 | TestExecutor.__init__(self, test_fixture_executor) 308 | self.test_fixture = test_fixture_executor.test_fixture 309 | self.setDaemon(True) 310 | 311 | def _run(self): 312 | if isinstance(self.test_fixture, Test): 313 | self.run_test() 314 | else: 315 | self.run_test_configuration() 316 | 317 | def run_test(self): 318 | if self.test_fixture.expected_exceptions: 319 | expected_exceptions = self.test_fixture.expected_exceptions 320 | expected_exceptions_names = str(["%s.%s" % (e.__module__, e.__name__) for e in expected_exceptions.keys()]) 321 | try: 322 | params = self.test_fixture.parameters or [] 323 | call_function(self.test_fixture.test_fixture_ref, *params) 324 | except Exception as e: 325 | exception = e.__class__ 326 | exception_name = "%s.%s" % (exception.__module__, exception.__name__) 327 | matched_exceptions = [expected_exception for expected_exception in expected_exceptions.keys() if 328 | issubclass(exception, expected_exception)] 329 | if matched_exceptions: 330 | def cmp_matched_exception(exception_a, exception_b): 331 | return -1 if issubclass(exception_a, exception_b) else 1 332 | 333 | matched_exception = sorted(matched_exceptions, key=cmp_to_key(cmp_matched_exception))[0] 334 | if not expected_exceptions[matched_exception] or expected_exceptions[matched_exception].search(str(e)): 335 | self.test_fixture.status = TestFixtureStatus.PASSED 336 | else: 337 | self.test_fixture.status = TestFixtureStatus.FAILED 338 | self.test_fixture.failure_message = "The exception <%s> was thrown with the wrong message: Expected message regex: <%s>, Actual message: <%s>." \ 339 | % (exception_name, expected_exceptions[matched_exception].pattern, str(e)) 340 | self.test_fixture.failure_type = "WrongExceptionMessageError" 341 | self.test_fixture.stack_trace = traceback.format_exc() 342 | preporter.error("Failed with following message:\n%s\n%s" % (self.test_fixture.failure_message, self.test_fixture.stack_trace), True) 343 | else: 344 | self.test_fixture.status = TestFixtureStatus.FAILED 345 | self.test_fixture.failure_message = "Expected exception: one of %s, Actual exception: <%s>." \ 346 | % (expected_exceptions_names, exception_name) 347 | self.test_fixture.failure_type = "WrongExceptionThrownError" 348 | self.test_fixture.stack_trace = traceback.format_exc() 349 | preporter.error("Failed with following message:\n%s\n%s" % (self.test_fixture.failure_message, self.test_fixture.stack_trace), True) 350 | else: 351 | self.test_fixture.status = TestFixtureStatus.FAILED 352 | self.test_fixture.failure_message = "Expected exception: one of %s, Actual: NO exception was thrown." \ 353 | % expected_exceptions_names 354 | self.test_fixture.failure_type = "NoExceptionThrownError" 355 | self.test_fixture.stack_trace = self.test_fixture.failure_message 356 | preporter.error("Failed with following message:\n%s" % self.test_fixture.failure_message, True) 357 | else: 358 | try: 359 | params = self.test_fixture.parameters or [] 360 | call_function(self.test_fixture.test_fixture_ref, *params) 361 | except Exception as e: 362 | self.test_fixture.status = TestFixtureStatus.FAILED 363 | self.test_fixture.failure_message = str(e).strip() or "\n".join([str(arg) for arg in e.args]) 364 | self.test_fixture.failure_type = "%s.%s" % (e.__class__.__module__, e.__class__.__name__) 365 | self.test_fixture.stack_trace = traceback.format_exc() 366 | preporter.error("Failed with following message:\n%s" % self.test_fixture.stack_trace, True) 367 | else: 368 | self.test_fixture.status = TestFixtureStatus.PASSED 369 | 370 | def run_test_configuration(self): 371 | try: 372 | params = {1: [], 2: [self.test_fixture.context]}[self.test_fixture.parameters_count] 373 | call_function(self.test_fixture.test_fixture_ref, *params) 374 | except Exception as e: 375 | self.test_fixture.status = TestFixtureStatus.FAILED 376 | self.test_fixture.failure_message = str(e).strip() or "\n".join([str(arg) for arg in e.args]) 377 | self.test_fixture.failure_type = "%s.%s" % (e.__class__.__module__, e.__class__.__name__) 378 | self.test_fixture.stack_trace = traceback.format_exc() 379 | preporter.error("Failed with following message:\n%s" % self.test_fixture.stack_trace, True) 380 | else: 381 | self.test_fixture.status = TestFixtureStatus.PASSED 382 | 383 | 384 | def current_executor(): 385 | return threading.currentThread() 386 | -------------------------------------------------------------------------------- /ptest/test_filter.py: -------------------------------------------------------------------------------- 1 | class TestFilter: 2 | def filter(self, test_ref): 3 | return True 4 | 5 | def __str__(self): 6 | return "Filter Class: %s.%s" % (self.__module__, self.__class__.__name__) 7 | 8 | 9 | class TestIncludeTagsFilter(TestFilter): 10 | def __init__(self, tags): 11 | self._tags = tags 12 | 13 | def filter(self, test_ref): 14 | return hasattr(test_ref, "__tags__") and len([val for val in self._tags if val in test_ref.__tags__]) != 0 15 | 16 | def __str__(self): 17 | return "Include Tags: %s" % ",".join(self._tags) 18 | 19 | 20 | class TestExcludeTagsFilter(TestFilter): 21 | def __init__(self, tags): 22 | self._tags = tags 23 | 24 | def filter(self, test_ref): 25 | return hasattr(test_ref, "__tags__") and len([val for val in self._tags if val in test_ref.__tags__]) == 0 26 | 27 | def __str__(self): 28 | return "Exclude Tags: %s" % ",".join(self._tags) 29 | 30 | 31 | class TestIncludeGroupsFilter(TestFilter): 32 | def __init__(self, groups): 33 | self._groups = groups 34 | 35 | def filter(self, test_ref): 36 | return hasattr(test_ref, "__group__") and test_ref.__group__ in self._groups 37 | 38 | def __str__(self): 39 | return "Include Groups: %s" % ",".join(self._groups) 40 | 41 | 42 | class TestFilterGroup: 43 | def __init__(self): 44 | self.__filters = [] 45 | 46 | def filter(self, test_ref): 47 | if not self.__filters: 48 | return True 49 | for ft in self.__filters: 50 | if not ft.filter(test_ref): 51 | return False 52 | return True 53 | 54 | def append_filter(self, test_filter): 55 | self.__filters.append(test_filter) 56 | 57 | def __len__(self): 58 | return len(self.__filters) 59 | 60 | def __getitem__(self, item): 61 | return self.__filters[item] 62 | -------------------------------------------------------------------------------- /ptest/test_finder.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import re 4 | 5 | from .enumeration import PDecoratorType 6 | from .test_filter import TestFilterGroup 7 | from .test_suite import TestSuite 8 | from .util import mock_func 9 | 10 | 11 | class TestFinder: 12 | def __init__(self, test_target: str, test_filter_group: TestFilterGroup, target_test_suite: TestSuite): 13 | self.test_target = test_target 14 | self.test_filter_group = test_filter_group 15 | self.target_test_suite = target_test_suite 16 | self.found_test_count = 0 17 | self.repeated_test_count = 0 18 | # test class / test case name filter 19 | self.test_class_name = None 20 | self.test_name = None 21 | self.test_data_name = None 22 | 23 | def find_tests(self): 24 | match_object = re.search(r"^(.*?)#(.*)$", self.test_target) 25 | if match_object: 26 | self.test_data_name = match_object.group(2) 27 | splitted_test_target = match_object.group(1).split(".") 28 | else: 29 | splitted_test_target = self.test_target.split(".") 30 | 31 | module_ref = None 32 | module_name_len = 0 33 | for i in range(len(splitted_test_target)): 34 | try: 35 | module_ref = importlib.import_module(".".join(splitted_test_target[:i + 1])) 36 | module_name_len = i + 1 37 | except ImportError as e: 38 | if splitted_test_target[i] in str(e): # python 2.x only display bar for target foo.bar 39 | break 40 | raise 41 | 42 | if module_ref is None: 43 | raise ImportError("Test target <%s> is invalid.\nNo module named <%s>." % (self.test_target, splitted_test_target[0])) 44 | 45 | test_target_len = len(splitted_test_target) 46 | if module_name_len == test_target_len: 47 | if self.test_data_name is not None: 48 | raise ImportError("Test target <%s> is invalid.\n" 49 | "It looks like a test case with data provider, " 50 | "but only test module <%s> is provided." 51 | % (self.test_target, ".".join(splitted_test_target))) 52 | 53 | if hasattr(module_ref, "__path__"): 54 | # test target is package 55 | self.find_tests_in_package(module_ref) 56 | else: 57 | # test target is module 58 | self.find_tests_in_module(module_ref) 59 | elif module_name_len == test_target_len - 1: 60 | if self.test_data_name is not None: 61 | raise ImportError("Test target <%s> is invalid.\n" 62 | "It looks like a test case with data provider, " 63 | "but only test class <%s> is provided." 64 | % (self.test_target, ".".join(splitted_test_target))) 65 | 66 | if hasattr(module_ref, "__path__"): 67 | raise ImportError("Test target <%s> is invalid.\n" 68 | "The test class in the __init__.py of package <%s> will be ignored." 69 | % (self.test_target, ".".join(splitted_test_target[:module_name_len]))) 70 | 71 | # test target is class 72 | self.test_class_name = splitted_test_target[-1] 73 | self.find_tests_in_module(module_ref) 74 | elif module_name_len == test_target_len - 2: 75 | if hasattr(module_ref, "__path__"): 76 | raise ImportError("Test target <%s> is invalid.\n" 77 | "The test case in the __init__.py of package <%s> will be ignored." 78 | % (self.test_target, ".".join(splitted_test_target[:module_name_len]))) 79 | 80 | # test target is method 81 | self.test_class_name = splitted_test_target[-2] 82 | self.test_name = splitted_test_target[-1] 83 | self.find_tests_in_module(module_ref) 84 | else: 85 | raise ImportError("Test target <%s> is probably invalid.\nModule <%s> exists but module <%s> doesn't." % ( 86 | self.test_target, ".".join(splitted_test_target[:module_name_len]), ".".join(splitted_test_target[:module_name_len + 1]))) 87 | 88 | def find_tests_in_package(self, package_ref): 89 | package_name = package_ref.__name__ 90 | if hasattr(package_ref.__path__, "_path"): 91 | package_path = package_ref.__path__._path[0] # namespace package 92 | else: 93 | package_path = package_ref.__path__[0] # regular package 94 | for fn in os.listdir(package_path): 95 | file_path = os.path.join(package_path, fn) 96 | if os.path.isdir(file_path) and "." not in fn: 97 | self.find_tests_in_package(importlib.import_module(package_name + "." + fn)) 98 | elif os.path.isfile(file_path): 99 | file_name, file_ext = os.path.splitext(fn) 100 | if fn != "__init__.py" and file_ext == ".py": 101 | self.find_tests_in_module(importlib.import_module(package_name + "." + file_name)) 102 | 103 | def find_tests_in_module(self, module_ref): 104 | for module_element in dir(module_ref): 105 | test_class_cls = getattr(module_ref, module_element) 106 | if hasattr(test_class_cls, "__pd_type__") and test_class_cls.__pd_type__ == PDecoratorType.TestClass \ 107 | and hasattr(test_class_cls, "__enabled__") and test_class_cls.__enabled__ \ 108 | and test_class_cls.__module__ == module_ref.__name__ \ 109 | and (not self.test_class_name or self.test_class_name == test_class_cls.__name__): 110 | self.find_tests_in_class(test_class_cls) 111 | 112 | def find_tests_in_class(self, test_class_cls): 113 | for class_element in dir(test_class_cls): 114 | test_func = getattr(test_class_cls, class_element) 115 | if hasattr(test_func, "__pd_type__") and test_func.__pd_type__ == PDecoratorType.Test \ 116 | and hasattr(test_func, "__enabled__") and test_func.__enabled__: 117 | if self.test_name is None: 118 | for func in unzip_func(test_class_cls, test_func): 119 | if self.test_filter_group.filter(func): 120 | self.__add_test(test_class_cls, func) 121 | else: 122 | if self.test_data_name is not None and test_func.__data_provider__: 123 | if self.test_name == test_func.__name__: 124 | for func in unzip_func(test_class_cls, test_func): 125 | if "%s#%s" % (self.test_name, self.test_data_name) == func.__name__ and self.test_filter_group.filter(func): 126 | self.__add_test(test_class_cls, func) 127 | elif self.test_data_name is None and self.test_name == test_func.__name__: 128 | for func in unzip_func(test_class_cls, test_func): 129 | if self.test_filter_group.filter(func): 130 | self.__add_test(test_class_cls, func) 131 | 132 | def __add_test(self, test_class_cls, func): 133 | self.found_test_count += 1 134 | if not self.target_test_suite.add_test_case(test_class_cls, func): 135 | self.repeated_test_count += 1 136 | 137 | 138 | def unzip_func(test_class_cls, test_func): 139 | if not test_func.__funcs__: # zipped 140 | name_map = {} 141 | for index, data in enumerate(test_func.__data_provider__): 142 | if isinstance(data, (list, tuple)): 143 | parameters_count = len(data) 144 | parameters = data 145 | else: 146 | parameters_count = 1 147 | parameters = [data] 148 | if parameters_count == test_func.__parameters_count__ - 1: 149 | mock = mock_func(test_func) 150 | mock_name = ("%s#%s" % (test_func.__name__, test_func.__data_name__(index, parameters))).replace(",", "_").replace(" ", "_") 151 | if mock_name in name_map: 152 | name_map[mock_name] += 1 153 | mock.__name__ = "%s(%s)" % (mock_name, name_map[mock_name] - 1) 154 | else: 155 | name_map[mock_name] = 1 156 | mock.__name__ = mock_name 157 | mock.__parameters__ = parameters 158 | mock.__data_index__ = index 159 | mock.__funcs__ = [mock] 160 | mock.__test_class__ = test_class_cls 161 | test_func.__funcs__.append(mock) 162 | else: 163 | raise TypeError("The data provider is trying to pass %s extra arguments but %s.%s() takes %s." 164 | % (parameters_count, test_class_cls.__name__, test_func.__name__, test_func.__parameters_count__ - 1)) 165 | elif not test_func.__data_provider__: # normal 166 | test_func.__funcs__[0].__test_class__ = test_class_cls 167 | if test_func.__parameters_count__ != 1: 168 | raise TypeError( 169 | "Since data provider is not specified, %s.%s() cannot be declared with %s parameters. Please declare with only 1 parameter (only self)." 170 | % (test_class_cls.__name__, test_func.__name__, test_func.__parameters_count__)) 171 | return test_func.__funcs__ 172 | -------------------------------------------------------------------------------- /ptest/test_suite.py: -------------------------------------------------------------------------------- 1 | import types 2 | from functools import cmp_to_key 3 | 4 | from .enumeration import PDecoratorType, TestFixtureStatus, TestClassRunMode, TestCaseStatus 5 | 6 | SECOND_MICROSECOND_CONVERSION_FACTOR = 1000000.0 7 | 8 | 9 | class StatusCount: 10 | def __init__(self): 11 | self.total = 0 12 | self.not_run = 0 13 | self.running = 0 14 | self.passed = 0 15 | self.failed = 0 16 | self.skipped = 0 17 | 18 | 19 | class TestContainer: 20 | def __init__(self): 21 | self.start_time = None 22 | self.end_time = None 23 | self.test_cases = [] 24 | 25 | @property 26 | def elapsed_time(self) -> float: 27 | time_delta = self.end_time - self.start_time 28 | return time_delta.seconds + time_delta.microseconds / SECOND_MICROSECOND_CONVERSION_FACTOR 29 | 30 | @property 31 | def status_count(self) -> StatusCount: 32 | count = StatusCount() 33 | for test_case in self.test_cases: 34 | count.total += 1 35 | if test_case.status == TestCaseStatus.NOT_RUN: 36 | count.not_run += 1 37 | elif test_case.status == TestCaseStatus.RUNNING: 38 | count.running += 1 39 | elif test_case.status == TestCaseStatus.PASSED: 40 | count.passed += 1 41 | elif test_case.status == TestCaseStatus.FAILED: 42 | count.failed += 1 43 | elif test_case.status == TestCaseStatus.SKIPPED: 44 | count.skipped += 1 45 | return count 46 | 47 | @property 48 | def pass_rate(self) -> float: 49 | status_count = self.status_count 50 | return float(status_count.passed) * 100 / status_count.total 51 | 52 | 53 | class TestSuite(TestContainer): 54 | def __init__(self, name): 55 | TestContainer.__init__(self) 56 | self.test_classes = [] 57 | self.test_class_run_groups = [] 58 | self.name = name 59 | self.full_name = name 60 | self.before_suite = BeforeSuite(self, None) 61 | self.after_suite = AfterSuite(self, None) 62 | 63 | def init(self): 64 | self.init_test_fixtures() 65 | self.init_test_class_run_groups() 66 | self.sort_test_class_run_groups() 67 | 68 | def init_test_fixtures(self): 69 | # reflect the before suite and after suite 70 | for test_class in self.test_classes: 71 | test_class_ref = test_class.test_class_ref.__class__() 72 | for element in dir(test_class_ref): 73 | attr = getattr(test_class_ref, element) 74 | if hasattr(attr, "__enabled__") and attr.__enabled__ \ 75 | and hasattr(attr, "__pd_type__"): 76 | if attr.__pd_type__ == PDecoratorType.BeforeSuite: 77 | self.before_suite = BeforeSuite(self, attr) 78 | elif attr.__pd_type__ == PDecoratorType.AfterSuite: 79 | self.after_suite = AfterSuite(self, attr) 80 | 81 | def init_test_class_run_groups(self): 82 | run_groups = {} 83 | run_group_index = 0 84 | for test_class in self.test_classes: 85 | if test_class.run_group is None: 86 | run_groups[run_group_index] = [test_class] 87 | run_group_index += 1 88 | elif test_class.run_group in run_groups: 89 | run_groups[test_class.run_group].append(test_class) 90 | else: 91 | run_groups[test_class.run_group] = [test_class] 92 | self.test_class_run_groups = run_groups.values() 93 | 94 | def sort_test_class_run_groups(self): 95 | run_groups = [] 96 | # sort the test classes in run group by its run mode 97 | for run_group in self.test_class_run_groups: 98 | run_groups.append(sorted(run_group, key=lambda test_class: test_class.run_mode.value, reverse=True)) 99 | 100 | # sort the test class run groups by its number of singleline test cases 101 | def cmp_run_group(run_group_a, run_group_b): 102 | single_line_count_a = single_line_count_b = parallel_count_a = parallel_count_b = 0 103 | for test_class in run_group_a: 104 | if test_class.run_mode == TestClassRunMode.SingleLine: 105 | single_line_count_a += len(test_class.test_cases) 106 | else: 107 | parallel_count_a += len(test_class.test_cases) 108 | 109 | for test_class in run_group_b: 110 | if test_class.run_mode == TestClassRunMode.SingleLine: 111 | single_line_count_b += len(test_class.test_cases) 112 | else: 113 | parallel_count_b += len(test_class.test_cases) 114 | 115 | if single_line_count_a == single_line_count_b: 116 | return parallel_count_a - parallel_count_b 117 | else: 118 | return single_line_count_a - single_line_count_b 119 | 120 | self.test_class_run_groups = sorted(run_groups, key=cmp_to_key(cmp_run_group), reverse=True) 121 | 122 | def get_failed_setup_fixture(self): 123 | if self.before_suite.status == TestFixtureStatus.FAILED: 124 | return self.before_suite 125 | return None 126 | 127 | def get_test_class(self, full_name: str): 128 | for test_class in self.test_classes: 129 | if test_class.full_name == full_name: 130 | return test_class 131 | return None 132 | 133 | def add_test_case(self, test_class_cls, test_case_func): 134 | # for the @TestClass can be inherited, so set full name here 135 | test_class_cls.__full_name__ = "%s.%s" % (test_class_cls.__module__, test_class_cls.__name__) 136 | test_class = self.get_test_class(test_class_cls.__full_name__) 137 | if test_class is None: 138 | test_class = TestClass(self, test_class_cls()) 139 | self.test_classes.append(test_class) 140 | 141 | test_group = test_class.get_test_group(test_case_func.__group__) 142 | if test_group is None: 143 | test_group = TestGroup(test_class, test_case_func.__group__, test_class_cls()) 144 | test_class.test_groups.append(test_group) 145 | 146 | test_case = test_group.get_test_case(test_case_func.__name__) 147 | if test_case is None: 148 | if hasattr(test_class_cls, test_case_func.__name__): # normal 149 | test_case = TestCase(test_group, getattr(test_class_cls(), test_case_func.__name__)) 150 | else: # mocked 151 | test_class_ref = test_class_cls() 152 | mock_method = types.MethodType(test_case_func, test_class_ref) 153 | setattr(test_class_ref, test_case_func.__name__, mock_method) 154 | test_case = TestCase(test_group, mock_method) 155 | test_group.test_cases.append(test_case) 156 | test_class.test_cases.append(test_case) 157 | self.test_cases.append(test_case) 158 | return True 159 | return False 160 | 161 | 162 | class TestClass(TestContainer): 163 | def __init__(self, test_suite: TestSuite, test_class_ref): 164 | TestContainer.__init__(self) 165 | self.test_suite = test_suite 166 | self.test_class_ref = test_class_ref 167 | self.test_groups = [] 168 | self.name = test_class_ref.__class__.__name__ 169 | self.full_name = test_class_ref.__full_name__ 170 | self.run_mode = test_class_ref.__run_mode__ 171 | self.run_group = test_class_ref.__run_group__ 172 | self.description = test_class_ref.__description__ 173 | self.custom_args = test_class_ref.__custom_args__ 174 | 175 | self.before_class = BeforeClass(self, None) 176 | self.after_class = AfterClass(self, None) 177 | # reflect the before class and after class 178 | for element in dir(test_class_ref): 179 | attr = getattr(test_class_ref, element) 180 | if hasattr(attr, "__enabled__") and attr.__enabled__ \ 181 | and hasattr(attr, "__pd_type__"): 182 | if attr.__pd_type__ == PDecoratorType.BeforeClass: 183 | self.before_class = BeforeClass(self, attr) 184 | elif attr.__pd_type__ == PDecoratorType.AfterClass: 185 | self.after_class = AfterClass(self, attr) 186 | 187 | def get_failed_setup_fixture(self) -> "TestFixture": 188 | setup_fixture = self.test_suite.get_failed_setup_fixture() 189 | if setup_fixture: 190 | return setup_fixture 191 | if self.before_class.status == TestFixtureStatus.FAILED: 192 | return self.before_class 193 | return None 194 | 195 | def get_test_group(self, name: str) -> "TestGroup": 196 | for test_group in self.test_groups: 197 | if test_group.name == name: 198 | return test_group 199 | return None 200 | 201 | @property 202 | def is_group_feature_used(self) -> bool: 203 | return not (len(self.test_groups) == 1 and self.test_groups[0].name == "DEFAULT" and self.test_groups[ 204 | 0].before_group.is_empty and self.test_groups[0].after_group.is_empty) 205 | 206 | 207 | class TestGroup(TestContainer): 208 | def __init__(self, test_class: TestClass, name: str, test_class_ref): 209 | TestContainer.__init__(self) 210 | self.test_class = test_class 211 | self.test_suite = self.test_class.test_suite 212 | self.test_class_ref = test_class_ref 213 | self.test_cases = [] 214 | self.name = name 215 | self.full_name = "%s<%s>" % (test_class.full_name, name) 216 | 217 | self.before_group = BeforeGroup(self, None) 218 | self.after_group = AfterGroup(self, None) 219 | # reflect the before group and after group 220 | for element in dir(test_class_ref): 221 | attr = getattr(test_class_ref, element) 222 | if hasattr(attr, "__enabled__") and attr.__enabled__ \ 223 | and hasattr(attr, "__group__") and attr.__group__ == self.name \ 224 | and hasattr(attr, "__pd_type__"): 225 | if attr.__pd_type__ == PDecoratorType.BeforeGroup: 226 | self.before_group = BeforeGroup(self, attr) 227 | elif attr.__pd_type__ == PDecoratorType.AfterGroup: 228 | self.after_group = AfterGroup(self, attr) 229 | 230 | def get_failed_setup_fixture(self) -> "TestFixture": 231 | setup_fixture = self.test_class.get_failed_setup_fixture() 232 | if setup_fixture: 233 | return setup_fixture 234 | if self.before_group.status == TestFixtureStatus.FAILED: 235 | return self.before_group 236 | return None 237 | 238 | def get_test_case(self, name: str) -> "TestCase": 239 | for test_case in self.test_cases: 240 | if test_case.name == name: 241 | return test_case 242 | return None 243 | 244 | 245 | class TestCase: 246 | def __init__(self, test_group: TestGroup, test_case_ref): 247 | self.test_group = test_group 248 | self.test_class = self.test_group.test_class 249 | self.test_suite = self.test_class.test_suite 250 | self.test_case_ref = test_case_ref 251 | self.name = test_case_ref.__name__ 252 | self.full_name = "%s.%s" % (self.test_class.full_name, self.name) 253 | self.start_time = None 254 | self.end_time = None 255 | 256 | self.test = Test(self, test_case_ref) 257 | 258 | self.tags = self.test.tags 259 | self.expected_exceptions = self.test.expected_exceptions 260 | self.parameters = self.test.parameters 261 | self.data_index = self.test.data_index 262 | self.group = self.test.group 263 | self.description = self.test.description 264 | self.custom_args = self.test.custom_args 265 | self.location = self.test.location 266 | 267 | self.before_method = BeforeMethod(self, None) 268 | self.after_method = AfterMethod(self, None) 269 | # reflect the before method and after method 270 | for element in dir(test_case_ref.__self__): 271 | attr = getattr(test_case_ref.__self__, element) 272 | if hasattr(attr, "__enabled__") and attr.__enabled__ \ 273 | and hasattr(attr, "__group__") and attr.__group__ == self.group \ 274 | and hasattr(attr, "__pd_type__"): 275 | if attr.__pd_type__ == PDecoratorType.BeforeMethod: 276 | self.before_method = BeforeMethod(self, attr) 277 | elif attr.__pd_type__ == PDecoratorType.AfterMethod: 278 | self.after_method = AfterMethod(self, attr) 279 | 280 | def get_failed_setup_fixture(self) -> "TestFixture": 281 | setup_fixture = self.test_group.get_failed_setup_fixture() 282 | if setup_fixture: 283 | return setup_fixture 284 | if self.before_method.status == TestFixtureStatus.FAILED: 285 | return self.before_method 286 | return None 287 | 288 | @property 289 | def failure_message(self) -> str: 290 | return self.test.failure_message 291 | 292 | @property 293 | def failure_type(self) -> str: 294 | return self.test.failure_type 295 | 296 | @property 297 | def stack_trace(self) -> str: 298 | return self.test.stack_trace 299 | 300 | @property 301 | def skip_message(self) -> str: 302 | return self.test.skip_message 303 | 304 | @property 305 | def status(self) -> TestCaseStatus: 306 | status_map = { 307 | TestFixtureStatus.NOT_RUN: TestCaseStatus.NOT_RUN, 308 | TestFixtureStatus.RUNNING: TestCaseStatus.RUNNING, 309 | TestFixtureStatus.PASSED: TestCaseStatus.PASSED, 310 | TestFixtureStatus.SKIPPED: TestCaseStatus.SKIPPED, 311 | TestFixtureStatus.FAILED: TestCaseStatus.FAILED, 312 | } 313 | return status_map[self.test.status] 314 | 315 | @property 316 | def elapsed_time(self) -> float: 317 | time_delta = self.end_time - self.start_time 318 | return time_delta.seconds + time_delta.microseconds / SECOND_MICROSECOND_CONVERSION_FACTOR 319 | 320 | 321 | class TestFixture: 322 | def __init__(self, context, test_fixture_ref, fixture_type: PDecoratorType): 323 | self.context = context 324 | self.fixture_type = fixture_type 325 | self.is_empty = False 326 | self.status = TestFixtureStatus.NOT_RUN 327 | if test_fixture_ref is None: 328 | self.is_empty = True 329 | return 330 | self.test_fixture_ref = test_fixture_ref 331 | self.name = test_fixture_ref.__name__ 332 | self.full_name = "" 333 | self.failure_message = "" 334 | self.failure_type = "" 335 | self.stack_trace = "" 336 | self.skip_message = "" 337 | self.start_time = None 338 | self.end_time = None 339 | self.logs = [] 340 | self.description = test_fixture_ref.__description__ 341 | self.timeout = test_fixture_ref.__timeout__ 342 | self.custom_args = test_fixture_ref.__custom_args__ 343 | self.location = test_fixture_ref.__location__ 344 | self.parameters_count = test_fixture_ref.__parameters_count__ 345 | 346 | @property 347 | def elapsed_time(self) -> float: 348 | time_delta = self.end_time - self.start_time 349 | return time_delta.seconds + time_delta.microseconds / SECOND_MICROSECOND_CONVERSION_FACTOR 350 | 351 | 352 | class BeforeSuite(TestFixture): 353 | def __init__(self, test_suite: TestSuite, test_fixture_ref): 354 | TestFixture.__init__(self, test_suite, test_fixture_ref, PDecoratorType.BeforeSuite) 355 | self.test_suite = self.context 356 | if not self.is_empty: 357 | self.full_name = "%s@%s" % (test_suite.name, self.fixture_type.value) 358 | 359 | 360 | class BeforeClass(TestFixture): 361 | def __init__(self, test_class: TestClass, test_fixture_ref): 362 | TestFixture.__init__(self, test_class, test_fixture_ref, PDecoratorType.BeforeClass) 363 | self.test_class = self.context 364 | self.test_suite = self.test_class.test_suite 365 | if not self.is_empty: 366 | self.full_name = "%s@%s" % (test_class.full_name, self.fixture_type.value) 367 | 368 | 369 | class BeforeGroup(TestFixture): 370 | def __init__(self, test_group: TestGroup, test_fixture_ref): 371 | TestFixture.__init__(self, test_group, test_fixture_ref, PDecoratorType.BeforeGroup) 372 | self.test_group = self.context 373 | self.test_class = self.test_group.test_class 374 | self.test_suite = self.test_group.test_suite 375 | if not self.is_empty: 376 | self.full_name = "%s@%s" % (test_group.full_name, self.fixture_type.value) 377 | self.group = test_fixture_ref.__group__ 378 | 379 | 380 | class BeforeMethod(TestFixture): 381 | def __init__(self, test_case: TestCase, test_fixture_ref): 382 | TestFixture.__init__(self, test_case, test_fixture_ref, PDecoratorType.BeforeMethod) 383 | self.test_case = self.context 384 | self.test_group = self.test_case.test_group 385 | self.test_class = self.test_case.test_class 386 | self.test_suite = self.test_case.test_suite 387 | if not self.is_empty: 388 | self.full_name = "%s@%s" % (test_case.full_name, self.fixture_type.value) 389 | self.group = test_fixture_ref.__group__ 390 | 391 | 392 | class Test(TestFixture): 393 | def __init__(self, test_case: TestCase, test_fixture_ref): 394 | TestFixture.__init__(self, test_case, test_fixture_ref, PDecoratorType.Test) 395 | self.full_name = "%s@%s" % (test_case.full_name, self.fixture_type.value) 396 | self.test_case = self.context 397 | self.test_group = self.test_case.test_group 398 | self.test_class = self.test_case.test_class 399 | self.test_suite = self.test_case.test_suite 400 | self.tags = test_fixture_ref.__tags__ 401 | self.expected_exceptions = test_fixture_ref.__expected_exceptions__ 402 | self.parameters = test_fixture_ref.__parameters__ 403 | self.data_index = test_fixture_ref.__data_index__ 404 | self.group = test_fixture_ref.__group__ 405 | 406 | 407 | class AfterMethod(TestFixture): 408 | def __init__(self, test_case: TestCase, test_fixture_ref): 409 | TestFixture.__init__(self, test_case, test_fixture_ref, PDecoratorType.AfterMethod) 410 | self.test_case = self.context 411 | self.test_group = self.test_case.test_group 412 | self.test_class = self.test_case.test_class 413 | self.test_suite = self.test_case.test_suite 414 | if not self.is_empty: 415 | self.full_name = "%s@%s" % (test_case.full_name, self.fixture_type.value) 416 | self.always_run = test_fixture_ref.__always_run__ 417 | self.group = test_fixture_ref.__group__ 418 | 419 | 420 | class AfterGroup(TestFixture): 421 | def __init__(self, test_group: TestGroup, test_fixture_ref): 422 | TestFixture.__init__(self, test_group, test_fixture_ref, PDecoratorType.AfterGroup) 423 | self.test_group = self.context 424 | self.test_class = self.test_group.test_class 425 | self.test_suite = self.test_group.test_suite 426 | if not self.is_empty: 427 | self.full_name = "%s@%s" % (test_group.full_name, self.fixture_type.value) 428 | self.always_run = test_fixture_ref.__always_run__ 429 | self.group = test_fixture_ref.__group__ 430 | 431 | 432 | class AfterClass(TestFixture): 433 | def __init__(self, test_class: TestClass, test_fixture_ref): 434 | TestFixture.__init__(self, test_class, test_fixture_ref, PDecoratorType.AfterClass) 435 | self.test_class = self.context 436 | self.test_suite = self.test_class.test_suite 437 | if not self.is_empty: 438 | self.full_name = "%s@%s" % (test_class.full_name, self.fixture_type.value) 439 | self.always_run = test_fixture_ref.__always_run__ 440 | 441 | 442 | class AfterSuite(TestFixture): 443 | def __init__(self, test_suite: TestSuite, test_fixture_ref): 444 | TestFixture.__init__(self, test_suite, test_fixture_ref, PDecoratorType.AfterSuite) 445 | self.test_suite = self.context 446 | if not self.is_empty: 447 | self.full_name = "%s@%s" % (test_suite.name, self.fixture_type.value) 448 | self.always_run = test_fixture_ref.__always_run__ 449 | 450 | 451 | default_test_suite = TestSuite("DefaultSuite") 452 | -------------------------------------------------------------------------------- /ptest/util.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import ctypes 3 | import errno 4 | import os 5 | import sys 6 | import time 7 | import traceback 8 | import types 9 | from threading import Thread 10 | 11 | from typing import Any 12 | 13 | 14 | def make_dirs(dir_path: str): 15 | try: 16 | os.makedirs(dir_path) 17 | except OSError as e: 18 | if e.errno != errno.EEXIST: # be happy if someone already created the path 19 | raise 20 | 21 | 22 | def remove_tree(dir_path: str, remove_root: bool = True): 23 | if os.path.exists(dir_path): 24 | for root, dirs, files in os.walk(dir_path, topdown=False): 25 | for name in files: 26 | os.remove(os.path.join(root, name)) 27 | for name in dirs: 28 | os.rmdir(os.path.join(root, name)) 29 | if remove_root is True: 30 | os.rmdir(dir_path) 31 | 32 | 33 | def mock_func(func): 34 | fn = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__) 35 | # in case fun was given attrs (note this dict is a shallow copy): 36 | fn.__dict__.update(func.__dict__) 37 | return fn 38 | 39 | 40 | def call_function(func, *args, **kwargs): 41 | if asyncio.iscoroutinefunction(func): 42 | loop = asyncio.new_event_loop() 43 | asyncio.set_event_loop(loop) 44 | return loop.run_until_complete(func.__call__(*args, **kwargs)) 45 | return func.__call__(*args, **kwargs) 46 | 47 | 48 | def kill_thread(thread: Thread): 49 | """ 50 | Kill a python thread from another thread. 51 | 52 | :param thread: a threading.Thread instance 53 | """ 54 | exc = ctypes.py_object(SystemExit) 55 | res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc) 56 | if res == 0: 57 | raise ValueError("nonexistent thread id") 58 | elif res > 1: 59 | # """if it returns a number greater than one, you're in trouble, 60 | # and you should call it again with exc=NULL to revert the effect""" 61 | ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) 62 | raise SystemError("PyThreadState_SetAsyncExc failed") 63 | 64 | start_time = time.time() 65 | while (time.time() - start_time) <= 30: 66 | if not thread.isAlive(): 67 | return 68 | time.sleep(1) 69 | 70 | raise SystemError("Timed out waiting for thread <%s> to be killed." % thread) 71 | 72 | 73 | def format_thread_stack(thread: Thread): 74 | stack_code = ["Stack Trace:"] 75 | stack = sys._current_frames()[thread.ident] 76 | for file_name, line_no, name, line in traceback.extract_stack(stack): 77 | stack_code.append(" File: \"%s\", line %d, in %s" % (file_name, line_no, name)) 78 | if line: 79 | stack_code.append(" %s" % (line.strip())) 80 | return "\n".join(stack_code) 81 | 82 | 83 | def escape_html(obj: Any): 84 | if isinstance(obj, dict): 85 | return {key: escape_html(value) for key, value in obj.items()} 86 | if isinstance(obj, list): 87 | return [escape_html(item) for item in obj] 88 | if isinstance(obj, str): 89 | return obj.replace("&", "&").replace("<", "<").replace(">", ">").replace(" ", " ").replace('"', """) \ 90 | .replace("\n", "
") 91 | return obj 92 | 93 | 94 | def escape_filename(name: str): 95 | _name = name 96 | escape_list = [ 97 | ("%", "%25"), 98 | ("\\", "%5C"), 99 | ("/", "%2F"), 100 | (":", "%3A"), 101 | ("*", "%01"), 102 | ("?", "%3F"), 103 | ("\"", "%22"), 104 | ("<", "%3C"), 105 | (">", "%3E"), 106 | ("|", "%7C") 107 | ] 108 | for char, to in escape_list: 109 | _name = _name.replace(char, to) 110 | return _name 111 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import platform 2 | from os import path 3 | 4 | from setuptools import setup 5 | 6 | from ptest import __version__ 7 | 8 | current_dir = path.abspath(path.dirname(__file__)) 9 | # Get the long description from the relevant file 10 | with open(path.join(current_dir, "README.md"), encoding="utf-8") as f: 11 | long_description = f.read() 12 | with open(path.join(current_dir, "CHANGELOG"), encoding="utf-8") as f: 13 | long_description += "\n" + f.read() 14 | 15 | classifiers = ["License :: OSI Approved :: Apache Software License", 16 | "Topic :: Software Development :: Testing", 17 | "Operating System :: Microsoft :: Windows", 18 | "Operating System :: MacOS :: MacOS X"] + \ 19 | [("Programming Language :: Python :: %s" % x) for x in "3.6 3.7 3.8 3.9".split()] 20 | 21 | 22 | def make_cmdline_entry_points(): 23 | target = "ptest.main:main" 24 | entry_points = [] 25 | version = platform.python_version() 26 | entry_points.append("ptest=%s" % target) 27 | entry_points.append("ptest3=%s" % target) 28 | entry_points.append("ptest-%s=%s" % (version[:3], target)) 29 | return entry_points 30 | 31 | 32 | def main(): 33 | setup( 34 | name="ptest", 35 | description="light test framework for Python", 36 | long_description=long_description, 37 | long_description_content_type="text/markdown", 38 | version=__version__, 39 | keywords="test testing framework automation python runner", 40 | author="Karl Gong", 41 | author_email="karl.gong@outlook.com", 42 | url="https://github.com/KarlGong/ptest", 43 | license="Apache", 44 | entry_points={ 45 | "console_scripts": make_cmdline_entry_points(), 46 | }, 47 | classifiers=classifiers, 48 | packages=["ptest"], 49 | package_data={"ptest": ["htmltemplate/*.*"]}, 50 | python_requires=">=3.6", 51 | zip_safe=False, 52 | ) 53 | 54 | 55 | if __name__ == "__main__": 56 | main() 57 | --------------------------------------------------------------------------------