├── .github └── workflows │ └── checks.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── VERSION ├── betelgeuse ├── __init__.py ├── collector.py ├── config.py ├── default_config.py ├── parser.py └── source_generator.py ├── docs ├── Makefile ├── conf.py ├── config.rst ├── customfieldsvalues.rst └── index.rst ├── pytest.ini ├── release.sh ├── requirements-dev.txt ├── requirements.txt ├── sample_project ├── results │ └── sample-junit-result.xml └── tests │ ├── __init__.py │ ├── test_init.py │ └── test_login_example.py ├── setup.py └── tests ├── __init__.py ├── data ├── __init__.py ├── ignore_dir │ ├── __init__.py │ └── test_ignore_dir.py └── test_sample.py ├── test_betelgeuse.py ├── test_collector.py ├── test_parser.py └── test_source_generator.py /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: Betelgeuse Checks 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ['3.10', '3.11', '3.12'] 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install -r requirements.txt 27 | pip install -U -r requirements-dev.txt 28 | - name: Run checks 29 | run: | 30 | make all 31 | - name: Upload Coverage to Codecov 32 | uses: codecov/codecov-action@v1 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | 56 | # PyBuilder 57 | target/ 58 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | {one line to give the program's name and a brief idea of what it does.} 635 | Copyright (C) {year} {name of author} 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | {project} Copyright (C) {year} {fullname} 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | 676 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE README.rst VERSION 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: test-coverage lint docs-html package 2 | 3 | docs-clean: 4 | @cd docs; $(MAKE) clean 5 | 6 | docs-html: 7 | @cd docs; $(MAKE) html 8 | 9 | init: 10 | pip install -r requirements.txt 11 | 12 | init-dev: 13 | pip install -r requirements-dev.txt 14 | 15 | lint: 16 | flake8 betelgeuse/ tests/ 17 | 18 | package: package-clean 19 | python setup.py --quiet sdist bdist_wheel 20 | 21 | package-clean: 22 | rm -rf build dist Betelgeuse.egg-info 23 | 24 | publish: package 25 | twine upload dist/* 26 | 27 | test-publish: 28 | python setup.py register -r testpypi 29 | python setup.py sdist upload -r testpypi 30 | python setup.py bdist_wheel upload -r testpypi 31 | 32 | test: 33 | py.test -vv tests 34 | 35 | test-coverage: 36 | py.test -vv --cov-report term --cov=betelgeuse tests 37 | 38 | test-watch: 39 | ptw tests 40 | 41 | .PHONY: all docs-clean docs-html init init-dev lint publish test-publish test \ 42 | test-coverage 43 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Betelgeuse 2 | ========== 3 | 4 | .. image:: https://codecov.io/gh/SatelliteQE/betelgeuse/branch/master/graph/badge.svg 5 | :target: https://codecov.io/gh/SatelliteQE/betelgeuse 6 | 7 | .. image:: https://readthedocs.org/projects/betelgeuse/badge/?version=latest 8 | :target: http://betelgeuse.readthedocs.org/en/latest/?badge=latest 9 | :alt: Documentation Status 10 | 11 | Betelgeuse reads standard Python test cases and generates XML files that are 12 | suited to be imported by Polarion importers. Possible generated XML files are: 13 | 14 | * Requirement Importer XML 15 | * Test Case Importer XML 16 | * Test Run Importer XML 17 | 18 | The `full documentation `_ is 19 | available on ReadTheDocs. 20 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.11.0 2 | -------------------------------------------------------------------------------- /betelgeuse/__init__.py: -------------------------------------------------------------------------------- 1 | """Betelgeuse. 2 | 3 | Betelgeuse is a Python program that reads standard Python test cases and 4 | generates XML files that are suited to be imported by Polarion importers. 5 | Possible generated XML files are: 6 | 7 | * Test Case Importer XML 8 | * Test Run Importer XML 9 | """ 10 | import itertools 11 | import json 12 | import logging 13 | import re 14 | import ssl 15 | import time 16 | from collections import Counter 17 | from xml.dom import minidom 18 | from xml.etree import ElementTree 19 | from xml.parsers.expat import ExpatError 20 | 21 | import click 22 | 23 | from betelgeuse import collector, config 24 | 25 | 26 | logging.captureWarnings(True) 27 | 28 | # Avoid SSL errors 29 | ssl._create_default_https_context = ssl._create_unverified_context 30 | 31 | INVALID_CHARS_REGEX = re.compile(r'[\\/.:"<>|~!@#$?%^&\'*()+`,=]') 32 | 33 | POLARION_STATUS = { 34 | 'error': 'failed', 35 | 'failure': 'failed', 36 | 'passed': 'passed', 37 | 'skipped': 'blocked', 38 | } 39 | 40 | TESTCASE_ATTRIBUTES_TO_FIELDS = { 41 | 'approver-ids': 'approvers', 42 | 'assignee-id': 'assignee', 43 | 'due-date': 'duedate', 44 | 'id': 'id', 45 | 'initial-estimate': 'initialestimate', 46 | 'status-id': 'status', 47 | } 48 | 49 | JUNIT_TEST_STATUS = ['error', 'failure', 'skipped'] 50 | 51 | # Cache for shared objects 52 | OBJ_CACHE = {'requirements': {}} 53 | 54 | 55 | def validate_key_value_option(ctx, param, value): 56 | """Validate an option that expects key=value formatted values.""" 57 | if value is None: 58 | return 59 | try: 60 | key, value = value.split('=', 1) 61 | return key, value 62 | except ValueError: 63 | raise click.BadParameter( 64 | '{} needs to be in format key=value'.format(param.name)) 65 | 66 | 67 | def load_custom_fields(custom_fields_opt): 68 | """Load the custom fields from the --custom-fields option. 69 | 70 | The --custom-fields option can receive either a string on the format 71 | ``key=value`` or a JSON string ``{"key":"value"}``, which will be loaded 72 | into a dictionary. 73 | 74 | If the value passed is not in JSON or key=value format it will be ignored. 75 | 76 | :param custom_fields_opt: A tuple of --custom-fields option. 77 | """ 78 | custom_fields = {} 79 | if not custom_fields_opt: 80 | return custom_fields 81 | for item in custom_fields_opt: 82 | if item.startswith('{'): 83 | custom_fields.update(json.loads(item)) 84 | elif '=' in item: 85 | key, value = item.split('=', 1) 86 | custom_fields[key.strip()] = value.strip() 87 | return custom_fields 88 | 89 | 90 | def map_steps(steps, expectedresults): 91 | """Map each step to its expected result. 92 | 93 | For example a docstring like:: 94 | 95 | '''My test 96 | 97 | :steps: 98 | 99 | 1. First step 100 | 2. Second step 101 | 3. Third step 102 | 103 | :expectedresults: 104 | 105 | 1. First step expected result. 106 | 2. Second step expected result. 107 | 3. Third step expected result. 108 | ''' 109 | 110 | Will produce a return like:: 111 | 112 | [ 113 | ('First step', 'First step expected result.'), 114 | ('Second step', 'Second step expected result.'), 115 | ('Third step', 'Third step expected result.'), 116 | ] 117 | 118 | :param steps: unparsed string expected to contain either a list of steps or 119 | a single paragraph. 120 | :param expectedresults: unparsed string expected to contain either a 121 | list of expectedresults or a single paragraph. 122 | """ 123 | try: 124 | parsed_steps = minidom.parseString(steps.encode('utf-8')) 125 | parsed_expectedresults = minidom.parseString( 126 | expectedresults.encode('utf-8')) 127 | except ExpatError: 128 | return [(steps, expectedresults)] 129 | if (parsed_steps.firstChild.tagName == 'p' and 130 | parsed_expectedresults.firstChild.tagName == 'p'): 131 | parsed_steps = [parsed_steps.firstChild.toxml()] 132 | parsed_expectedresults = [ 133 | parsed_expectedresults.firstChild.toxml()] 134 | elif (parsed_steps.firstChild.tagName == 'ol' and 135 | parsed_expectedresults.firstChild.tagName == 'ol'): 136 | parsed_steps = [ 137 | element.firstChild.toxml() 138 | for element in parsed_steps.getElementsByTagName('li') 139 | ] 140 | parsed_expectedresults = [ 141 | element.firstChild.toxml() 142 | for element in parsed_expectedresults.getElementsByTagName('li') 143 | ] 144 | else: 145 | parsed_steps = [steps] 146 | parsed_expectedresults = [expectedresults] 147 | if len(parsed_steps) == len(parsed_expectedresults): 148 | return list(zip(parsed_steps, parsed_expectedresults)) 149 | else: 150 | return [(steps, expectedresults)] 151 | 152 | 153 | def parse_junit(path): 154 | """Parse a jUnit XML file. 155 | 156 | Given the following jUnit file:: 157 | 158 | 159 | 160 | 161 | ... 162 | 163 | 164 | ... 165 | 166 | 167 | ... 168 | 169 | 170 | 171 | The return will be:: 172 | 173 | [ 174 | {'classname': 'foo1', 'name': 'test_passed', 'status': 'passed'}, 175 | {'classname': 'foo2', 'message': '...', 'name': 'test_skipped', 176 | 'status': 'skipped'}, 177 | {'classname': 'foo3', 'name': 'test_failure', 'status': 'passed'}, 178 | {'classname': 'foo3', 'name': 'test_error', 'status': 'passed'} 179 | ] 180 | 181 | :param str path: Path to the jUnit XML file. 182 | :return: A list of dicts with information about every test 183 | case result. 184 | """ 185 | root = ElementTree.parse(path).getroot() 186 | result = [] 187 | for testcase in root.iter('testcase'): 188 | data = testcase.attrib 189 | # Check if the test has passed or else... 190 | status = [ 191 | element for element in list(testcase) 192 | if element.tag in JUNIT_TEST_STATUS 193 | ] 194 | # ... no status means the test has passed 195 | if status: 196 | data['status'] = status[0].tag 197 | data.update(status[0].attrib) 198 | else: 199 | data['status'] = u'passed' 200 | 201 | result.append(data) 202 | return result 203 | 204 | 205 | def parse_test_results(test_results): 206 | """Return the summary of test results by their status. 207 | 208 | :param test_results: A list of dicts with information about 209 | test results, such as those reported in a jUnit file. 210 | :return: A dictionary containing a summary for all test results 211 | provided by the ``test_results`` parameter, broken down by their 212 | status. 213 | """ 214 | return Counter([test['status'] for test in test_results]) 215 | 216 | 217 | pass_config = click.make_pass_decorator(config.BetelgeuseConfig, ensure=True) 218 | 219 | 220 | @click.group() 221 | @click.option( 222 | '--config-module', 223 | envvar='BETELGEUSE_CONFIG_MODULE', 224 | help='Python import path to the config module. E.g. ' 225 | '"package.myconfig.module".', 226 | ) 227 | @click.version_option() 228 | @click.pass_context 229 | def cli(ctx, config_module): 230 | """Betelgeuse CLI command group.""" 231 | ctx.obj = config.BetelgeuseConfig(config_module) 232 | 233 | 234 | @cli.command('requirement') 235 | @click.option( 236 | '--approver', 237 | help='Whom the requirements will be approved by.', 238 | multiple=True, 239 | ) 240 | @click.option( 241 | '--assignee', 242 | help='Whom the requirements will be assigned to.', 243 | ) 244 | @click.option('--team', help='Team owning the requirement.',) 245 | @click.option( 246 | '--collect-ignore-path', 247 | help='Ignore path during test collection. ' 248 | 'This option can be specified multiple times.', 249 | multiple=True, 250 | type=click.Path(exists=True), 251 | ) 252 | @click.option( 253 | '--dry-run', 254 | help='Indicate to the importer to not make any change.', 255 | is_flag=True, 256 | ) 257 | @click.option( 258 | '--lookup-method', 259 | default='name', 260 | help='Indicates to the importer which lookup method to use. "id" for ' 261 | 'requirement id or "name" for requirement title.', 262 | type=click.Choice([ 263 | 'id', 264 | 'name', 265 | ]) 266 | ) 267 | @click.option( 268 | '--response-property', 269 | callback=validate_key_value_option, 270 | help='When defined, the impoter will mark all responses with the selector.' 271 | 'The format is "--response-property property_key=property_value".', 272 | ) 273 | @click.argument('source-code-path', type=click.Path(exists=True)) 274 | @click.argument('project') 275 | @click.argument('output-path') 276 | @pass_config 277 | def requirement( 278 | config, team, assignee, approver, collect_ignore_path, dry_run, 279 | lookup_method, response_property, source_code_path, project, 280 | output_path): 281 | """Generate an XML suited to be importer by the requirement importer. 282 | 283 | This will read the source code at SOURCE_CODE_PATH in order to capture the 284 | requirements and generate a XML file place at OUTPUT_PATH. The generated 285 | XML file will be ready to be imported by the XML Requirement Importer. 286 | 287 | The requirements will be created on the project ID provided by PROJECT and 288 | will be assigned to the Polarion user ID provided by USER. 289 | 290 | Other requirement importer options can be set by the various options this 291 | command accepts. Check their help for more information. 292 | """ 293 | requirements = ElementTree.Element('requirements') 294 | requirements.set('project-id', project) 295 | if response_property: 296 | response_properties = ElementTree.Element('response-properties') 297 | element = ElementTree.Element('response-property') 298 | element.set('name', response_property[0]) 299 | element.set('value', response_property[1]) 300 | response_properties.append(element) 301 | requirements.append(response_properties) 302 | properties = ElementTree.Element('properties') 303 | properties.append(create_xml_property( 304 | 'dry-run', 'true' if dry_run else 'false')) 305 | properties.append(create_xml_property( 306 | 'lookup-method', lookup_method)) 307 | requirements.append(properties) 308 | 309 | source_testcases = itertools.chain(*collector.collect_tests( 310 | source_code_path, collect_ignore_path).values()) 311 | cache = [] 312 | for testcase in source_testcases: 313 | update_testcase_fields(config, testcase) 314 | if ('requirement' in testcase.fields and 315 | testcase.fields['requirement'] not in cache): 316 | requirement_title = testcase.fields['requirement'] 317 | fields = {} 318 | if 'team' in testcase.fields: 319 | fields['team'] = testcase.fields['team'] 320 | if assignee: 321 | fields['assignee'] = assignee 322 | if approver: 323 | fields['approvers'] = approver 324 | requirement = collector.Requirement(requirement_title, fields) 325 | requirement.fields.update( 326 | get_requirement_field_values(config, requirement) 327 | ) 328 | for field in requirement.fields.keys(): 329 | transform_func = getattr( 330 | config, 331 | 'TRANSFORM_REQUIREMENT_{}_VALUE'.format(field.upper()), 332 | None 333 | ) 334 | if callable(transform_func): 335 | fields[field] = transform_func( 336 | requirement.fields[field], 337 | requirement 338 | ) 339 | cache.append(requirement_title) 340 | requirements.append( 341 | create_xml_requirement(config, requirement) 342 | ) 343 | 344 | et = ElementTree.ElementTree(requirements) 345 | et.write(output_path, encoding='utf-8', xml_declaration=True) 346 | 347 | 348 | @cli.command('test-results') 349 | @click.option( 350 | '--path', 351 | default='junit-results.xml', 352 | help='Path to the jUnit XML file.', 353 | type=click.Path(exists=True, dir_okay=False), 354 | ) 355 | def test_results(path): 356 | """Summary of tests from the jUnit XML file.""" 357 | test_summary = parse_test_results(parse_junit(path)) 358 | summary = '\n'.join( 359 | ['{0}: {1}'.format(*status) for status in test_summary.items()] 360 | ).title() 361 | click.echo(summary) 362 | 363 | 364 | def create_xml_property(name, value): 365 | """Create an XML property element and set its name and value attributes.""" 366 | element = ElementTree.Element('property') 367 | element.set('name', name) 368 | element.set('value', value) 369 | return element 370 | 371 | 372 | def get_field_values(config, testcase): 373 | """Return a dict of fields and their values. 374 | 375 | For each field missing a value try to get a default value from the config 376 | module and include the field and its value on the returned dict. 377 | 378 | Fields with values other than ``None`` are returned untouched. 379 | 380 | :param testcase: a ``collector.TestFunction`` instance. 381 | :returns: a dict with all fields populated, the ones already with values 382 | and the ones with default value on the config module. 383 | """ 384 | fields = testcase.fields.copy() 385 | for field in config.TESTCASE_FIELDS + config.TESTCASE_CUSTOM_FIELDS: 386 | value = fields.get(field) 387 | if value is None: 388 | default = getattr( 389 | config, 'DEFAULT_{}_VALUE'.format(field.upper()), None) 390 | if callable(default): 391 | default = default(testcase) 392 | if default is not None: 393 | fields[field] = default 394 | return fields 395 | 396 | 397 | def get_requirement_field_values(config, requirement): 398 | """Return a dict of requirement fields and their values. 399 | 400 | For each field missing a value, try to get a default value from the config 401 | module and include the field and its value on the returned dict. 402 | 403 | Fields with values other than ``None`` are returned untouched. 404 | 405 | :param requirement: requirement title. 406 | :returns: a dict with all fields populated, include requirement title. 407 | """ 408 | fields = requirement.fields.copy() 409 | for field in config.REQUIREMENT_FIELDS + config.REQUIREMENT_CUSTOM_FIELDS: 410 | value = fields.get(field) 411 | if value is None: 412 | default = getattr( 413 | config, 414 | 'DEFAULT_REQUIREMENT_{}_VALUE'.format(field.upper()), 415 | None, 416 | ) 417 | if callable(default): 418 | default = default(requirement) 419 | if default is not None: 420 | fields[field] = default 421 | return fields 422 | 423 | 424 | def update_testcase_fields(config, testcase): 425 | """Apply testcase fields default values and transformations.""" 426 | if testcase.docstring and not isinstance(testcase.docstring, str): 427 | testcase.docstring = testcase.docstring.decode('utf8') 428 | 429 | # Check if any field needs a default value 430 | testcase.fields = {k.lower(): v for k, v in testcase.fields.items()} 431 | testcase.fields.update(get_field_values(config, testcase)) 432 | 433 | # Apply the available transformations to the testcase fields 434 | for field in testcase.fields.keys(): 435 | transform_func = getattr( 436 | config, 'TRANSFORM_{}_VALUE'.format(field.upper()), None) 437 | if callable(transform_func): 438 | testcase.fields[field] = transform_func( 439 | testcase.fields[field], testcase) 440 | 441 | 442 | def create_xml_testcase(config, testcase, automation_script_format): 443 | """Create an XML testcase element. 444 | 445 | The element will be in the format to be used by the XML test case importer. 446 | """ 447 | update_testcase_fields(config, testcase) 448 | 449 | # If automation_script is not defined on the docstring generate one 450 | if 'automation_script' not in testcase.fields: 451 | testcase.fields['automation_script'] = automation_script_format.format( 452 | path=testcase.module_def.path, 453 | line_number=testcase.function_def.lineno, 454 | ) 455 | 456 | # With all field processing in place, it is time to generate the XML 457 | # testcase node 458 | element = ElementTree.Element('testcase') 459 | 460 | # Set the testcase element attributes 461 | for attribute, field in TESTCASE_ATTRIBUTES_TO_FIELDS.items(): 462 | value = testcase.fields.get(field) 463 | if value is not None: 464 | element.set(attribute, value) 465 | 466 | # Title and description require their own node 467 | for field in ('title', 'description'): 468 | value = testcase.fields.get(field) 469 | if value is not None: 470 | field_element = ElementTree.Element(field) 471 | field_element.text = value 472 | element.append(field_element) 473 | 474 | # Should the testcase be linked to a Requiment? 475 | if 'requirement' in testcase.fields: 476 | linked_work_items = ElementTree.Element('linked-work-items') 477 | linked_work_item = ElementTree.Element('linked-work-item') 478 | linked_work_item.set('lookup-method', 'name') 479 | linked_work_item.set('role-id', 'verifies') 480 | linked_work_item.set('workitem-id', testcase.fields['requirement']) 481 | linked_work_items.append(linked_work_item) 482 | element.append(linked_work_items) 483 | 484 | # Steps and expected results will be mapped only if both are defined 485 | steps = testcase.fields.get('steps') 486 | expectedresults = testcase.fields.get('expectedresults') 487 | test_steps = None 488 | if steps and expectedresults: 489 | test_steps = ElementTree.Element('test-steps') 490 | for step, expectedresult in map_steps(steps, expectedresults): 491 | test_step = ElementTree.Element('test-step') 492 | test_step_column = ElementTree.Element('test-step-column') 493 | test_step_column.set('id', 'step') 494 | test_step_column.text = step 495 | test_step.append(test_step_column) 496 | test_step_column = ElementTree.Element('test-step-column') 497 | test_step_column.set('id', 'expectedResult') 498 | test_step_column.text = expectedresult 499 | test_step.append(test_step_column) 500 | test_steps.append(test_step) 501 | 502 | # Create the permutation parameter if needed 503 | if testcase.fields.get('parametrized') == 'yes': 504 | parameter = ElementTree.Element('parameter') 505 | parameter.set('name', 'pytest parameters') 506 | test_steps = test_steps or ElementTree.Element('test-steps') 507 | test_step = ElementTree.Element('test-step') 508 | test_step_column = ElementTree.Element('test-step-column') 509 | test_step_column.set('id', 'step') 510 | test_step_column.text = 'Iteration: ' 511 | test_step_column.append(parameter) 512 | test_step.append(test_step_column) 513 | test_step_column = ElementTree.Element('test-step-column') 514 | test_step_column.set('id', 'expectedResult') 515 | test_step_column.text = 'Pass' 516 | test_step.append(test_step_column) 517 | test_steps.append(test_step) 518 | 519 | if test_steps: 520 | element.append(test_steps) 521 | 522 | # Finally include the custom fields 523 | custom_fields = ElementTree.Element('custom-fields') 524 | for field in config.TESTCASE_CUSTOM_FIELDS: 525 | if field not in testcase.fields: 526 | continue 527 | custom_field = ElementTree.Element('custom-field') 528 | custom_field.set('content', testcase.fields[field]) 529 | custom_field.set('id', field) 530 | custom_fields.append(custom_field) 531 | element.append(custom_fields) 532 | return element 533 | 534 | 535 | def create_xml_requirement(config, requirement): 536 | """Create an XML requirement element. 537 | 538 | The element will be in the format to be used by the XML test case importer. 539 | """ 540 | element = ElementTree.Element('requirement') 541 | 542 | if 'assignee' in requirement.fields: 543 | element.set('assignee-id', requirement.fields['assignee']) 544 | element.set('status-id', 'approved') 545 | if 'approvers' in requirement.fields: 546 | element.set('approver-ids', ' '.join( 547 | f'{approver_id}:approved' 548 | for approver_id in requirement.fields['approvers'] 549 | )) 550 | if requirement.fields['priority']: 551 | element.set('priority-id', requirement.fields['priority']) 552 | if requirement.fields['severity']: 553 | element.set('severity-id', requirement.fields['severity']) 554 | 555 | title_element = ElementTree.Element('title') 556 | title_element.text = requirement.title 557 | element.append(title_element) 558 | 559 | custom_fields = ElementTree.Element('custom-fields') 560 | for field in config.REQUIREMENT_CUSTOM_FIELDS: 561 | if field not in requirement.fields: 562 | continue 563 | custom_field = ElementTree.Element('custom-field') 564 | custom_field.set('content', requirement.fields[field]) 565 | custom_field.set('id', field) 566 | custom_fields.append(custom_field) 567 | element.append(custom_fields) 568 | 569 | return element 570 | 571 | 572 | @cli.command('test-case') 573 | @click.option( 574 | '--automation-script-format', 575 | help=(r'The format for the automation-script field. The variables {path} ' 576 | 'and {line_number} are available and will be expanded to the test ' 577 | 'case module path and the line number where it is defined ' 578 | 'respectively. Default: {path}#{line_number}'), 579 | default='{path}#{line_number}', 580 | ) 581 | @click.option( 582 | '--collect-ignore-path', 583 | help='Ignore path during test collection. ' 584 | 'This option can be specified multiple times.', 585 | multiple=True, 586 | type=click.Path(exists=True), 587 | ) 588 | @click.option( 589 | '--dry-run', 590 | help='Indicate to the importer to not make any change.', 591 | is_flag=True, 592 | ) 593 | @click.option( 594 | '--lookup-method', 595 | default='custom', 596 | help='Indicates to the importer which lookup method to use. "id" for work ' 597 | 'item id, "custom" for custom id (default) or "name" for test case ' 598 | 'title.', 599 | type=click.Choice([ 600 | 'custom', 601 | 'id', 602 | 'name', 603 | ]) 604 | ) 605 | @click.option( 606 | '--lookup-method-custom-field-id', 607 | default='testCaseID', 608 | help='Indicates to the importer which field ID to use when using the ' 609 | 'custom id lookup method.', 610 | ) 611 | @click.option( 612 | '--response-property', 613 | callback=validate_key_value_option, 614 | help='When defined, the impoter will mark all responses with the selector.' 615 | 'The format is "--response-property property_key=property_value".', 616 | ) 617 | @click.argument('source-code-path', type=click.Path(exists=True)) 618 | @click.argument('project') 619 | @click.argument('output-path') 620 | @pass_config 621 | def test_case( 622 | config, automation_script_format, collect_ignore_path, dry_run, 623 | lookup_method, lookup_method_custom_field_id, response_property, 624 | source_code_path, project, output_path): 625 | """Generate an XML suited to be importer by the test-case importer. 626 | 627 | This will read the source code at SOURCE_CODE_PATH in order to capture the 628 | test cases and generate a XML file place at OUTPUT_PATH. The generated XML 629 | file will be ready to be imported by the XML Test Case Importer. 630 | 631 | The test cases will be created on the project ID provided by PROJECT and 632 | will be assigned to the Polarion user ID provided by USER. 633 | 634 | Other test case importer options can be set by the various options this 635 | command accepts. Check their help for more information. 636 | """ 637 | testcases = ElementTree.Element('testcases') 638 | testcases.set('project-id', project) 639 | if response_property: 640 | response_properties = ElementTree.Element('response-properties') 641 | element = ElementTree.Element('response-property') 642 | element.set('name', response_property[0]) 643 | element.set('value', response_property[1]) 644 | response_properties.append(element) 645 | testcases.append(response_properties) 646 | properties = ElementTree.Element('properties') 647 | properties.append(create_xml_property( 648 | 'dry-run', 'true' if dry_run else 'false')) 649 | properties.append(create_xml_property( 650 | 'lookup-method', lookup_method)) 651 | if lookup_method == 'custom': 652 | properties.append(create_xml_property( 653 | 'polarion-custom-lookup-method-field-id', 654 | lookup_method_custom_field_id 655 | )) 656 | testcases.append(properties) 657 | 658 | source_testcases = itertools.chain(*collector.collect_tests( 659 | source_code_path, collect_ignore_path, config=config).values()) 660 | for testcase in source_testcases: 661 | testcases.append( 662 | create_xml_testcase(config, testcase, automation_script_format)) 663 | 664 | et = ElementTree.ElementTree(testcases) 665 | et.write(output_path, encoding='utf-8', xml_declaration=True) 666 | 667 | 668 | @cli.command('test-run') 669 | @click.option( 670 | '--collect-ignore-path', 671 | help='Ignore path during test collection. ' 672 | 'This option can be specified multiple times.', 673 | multiple=True, 674 | type=click.Path(exists=True), 675 | ) 676 | @click.option( 677 | '--create-defects', 678 | help='Specify to make the importer create defects for failed tests.', 679 | is_flag=True, 680 | ) 681 | @click.option( 682 | '--custom-fields', 683 | help='Indicates to the importer which custom fields should be set. ' 684 | 'Expected format is either id=value or JSON format {"id":"value"}. This ' 685 | 'option can be specified multiple times.', 686 | multiple=True, 687 | ) 688 | @click.option( 689 | '--dry-run', 690 | help='Indicate to the importer to not make any change.', 691 | is_flag=True, 692 | ) 693 | @click.option( 694 | '--lookup-method', 695 | default='custom', 696 | help='Indicates to the importer which lookup method to use. "id" for work ' 697 | 'item id or "custom" for custom id (default).', 698 | type=click.Choice([ 699 | 'id', 700 | 'custom', 701 | ]) 702 | ) 703 | @click.option( 704 | '--lookup-method-custom-field-id', 705 | default='testCaseID', 706 | help='Indicates to the importer which field ID to use when using the ' 707 | 'custom id lookup method.', 708 | ) 709 | @click.option( 710 | '--no-include-skipped', 711 | help='Specify to make the importer not import skipped tests.', 712 | is_flag=True, 713 | ) 714 | @click.option( 715 | '--project-span-ids', 716 | help='A comma-separated list of project IDs used to set the project span ' 717 | 'field on the test run.' 718 | ) 719 | @click.option( 720 | '--response-property', 721 | callback=validate_key_value_option, 722 | help='When defined, the impoter will mark all responses with the selector.' 723 | 'The format is "--response-property property_key=property_value".', 724 | ) 725 | @click.option( 726 | '--status', 727 | default='finished', 728 | help='Define which status the test run should be set: "Finished" (default)' 729 | 'or "In Progress"', 730 | type=click.Choice([ 731 | 'finished', 732 | 'inprogress', 733 | ]) 734 | ) 735 | @click.option( 736 | '--test-run-group-id', 737 | help='Test Run GROUP ID to be created/updated.', 738 | ) 739 | @click.option( 740 | '--test-run-id', 741 | default='test-run-{0}'.format(time.time()), 742 | help='Test Run ID to be created/updated.', 743 | ) 744 | @click.option( 745 | '--test-run-template-id', 746 | help='Test Run template ID.' 747 | ) 748 | @click.option( 749 | '--test-run-title', 750 | help='Test Run title.', 751 | ) 752 | @click.option( 753 | '--test-run-type-id', 754 | help='Test Run type ID.' 755 | ) 756 | @click.argument('junit-path', type=click.Path(exists=True, dir_okay=False)) 757 | @click.argument('source-code-path', type=click.Path(exists=True)) 758 | @click.argument('user') 759 | @click.argument('project') 760 | @click.argument('output-path') 761 | @pass_config 762 | def test_run( 763 | config, collect_ignore_path, create_defects, custom_fields, dry_run, 764 | lookup_method, lookup_method_custom_field_id, no_include_skipped, 765 | response_property, status, test_run_group_id, test_run_id, 766 | test_run_template_id, test_run_title, test_run_type_id, junit_path, 767 | project_span_ids, source_code_path, user, project, output_path): 768 | """Generate an XML suited to be importer by the test-run importer. 769 | 770 | This will read the jUnit XML at JUNIT_PATH and the source code at 771 | SOURCE_CODE_PATH in order to generate a XML file place at OUTPUT_PATH. The 772 | generated XML file will be ready to be imported by the XML Test Run 773 | Importer. 774 | 775 | The test run will be created on the project ID provided by PROJECT and 776 | will be assigned to the Polarion user ID provided by USER. 777 | 778 | Other test run options can be set by the various options this command 779 | accepts. Check their help for more information. 780 | """ 781 | test_run_id = re.sub(INVALID_CHARS_REGEX, '', test_run_id) 782 | testsuites = ElementTree.Element('testsuites') 783 | properties = ElementTree.Element('properties') 784 | custom_fields = load_custom_fields(custom_fields) 785 | custom_fields.update({ 786 | 'polarion-create-defects': 787 | 'true' if create_defects else 'false', 788 | 'polarion-dry-run': 789 | 'true' if dry_run else 'false', 790 | 'polarion-include-skipped': 791 | 'false' if no_include_skipped else 'true', 792 | }) 793 | if response_property: 794 | key = 'polarion-response-' + response_property[0] 795 | custom_fields[key] = response_property[1] 796 | custom_fields['polarion-lookup-method'] = lookup_method 797 | if lookup_method == 'custom': 798 | custom_fields['polarion-custom-lookup-method-field-id'] = ( 799 | lookup_method_custom_field_id 800 | ) 801 | custom_fields['polarion-project-id'] = project 802 | custom_fields['polarion-testrun-id'] = test_run_id 803 | custom_fields['polarion-testrun-status-id'] = status 804 | if project_span_ids: 805 | custom_fields['polarion-project-span-ids'] = project_span_ids 806 | if test_run_group_id: 807 | custom_fields['polarion-group-id'] = test_run_group_id 808 | if test_run_template_id: 809 | custom_fields['polarion-testrun-template-id'] = test_run_template_id 810 | if test_run_title: 811 | custom_fields['polarion-testrun-title'] = test_run_title 812 | if test_run_type_id: 813 | custom_fields['polarion-testrun-type-id'] = test_run_type_id 814 | custom_fields['polarion-user-id'] = user 815 | properties_names = ( 816 | 'polarion-create-defects', 817 | 'polarion-custom-lookup-method-field-id', 818 | 'polarion-dry-run', 819 | 'polarion-group-id', 820 | 'polarion-include-skipped', 821 | 'polarion-lookup-method', 822 | 'polarion-project-id', 823 | 'polarion-project-span-ids', 824 | 'polarion-testrun-id', 825 | 'polarion-testrun-status-id', 826 | 'polarion-testrun-template-id', 827 | 'polarion-testrun-title', 828 | 'polarion-testrun-type-id', 829 | 'polarion-user-id', 830 | ) 831 | for name, value in custom_fields.items(): 832 | if (not name.startswith('polarion-custom-') and 833 | not name.startswith('polarion-response-') and 834 | name not in properties_names): 835 | name = 'polarion-custom-{}'.format(name) 836 | properties.append(create_xml_property(name, value)) 837 | testsuites.append(properties) 838 | 839 | testcases = {} 840 | for test in itertools.chain(*collector.collect_tests( 841 | source_code_path, collect_ignore_path).values()): 842 | update_testcase_fields(config, test) 843 | testcases[test.junit_id] = test 844 | testsuite = ElementTree.parse(junit_path).getroot() 845 | if testsuite.tag == 'testsuites': 846 | testsuite = list(testsuite)[0] 847 | 848 | for testcase in testsuite.iterfind('testcase'): 849 | junit_test_case_id = '{0}.{1}'.format( 850 | testcase.get('classname'), testcase.get('name')) 851 | pytest_parameters = None 852 | if '[' in junit_test_case_id: 853 | junit_test_case_id, pytest_parameters = junit_test_case_id.split( 854 | '[', 1) 855 | pytest_parameters = pytest_parameters[:-1] 856 | source_test_case = testcases.get(junit_test_case_id) 857 | if not source_test_case: 858 | click.echo( 859 | 'Found {} on jUnit report but not on source code, skipping...' 860 | .format(junit_test_case_id) 861 | ) 862 | continue 863 | test_properties = ElementTree.Element('properties') 864 | element = ElementTree.Element('property') 865 | element.set('name', 'polarion-testcase-id') 866 | element.set('value', source_test_case.fields['id']) 867 | test_properties.append(element) 868 | if (pytest_parameters and 869 | source_test_case.fields.get('parametrized') == 'yes'): 870 | element = ElementTree.Element('property') 871 | element.set('name', 'polarion-parameter-pytest parameters') 872 | element.set('value', pytest_parameters) 873 | test_properties.append(element) 874 | elif (pytest_parameters and 875 | source_test_case.fields.get('parametrized') != 'yes'): 876 | click.echo( 877 | '{} has a parametrized result of {} but its parametrized ' 878 | 'field is not set to yes. Only one result will be recorded.' 879 | .format(junit_test_case_id, pytest_parameters) 880 | ) 881 | testcase.append(test_properties) 882 | testsuites.append(testsuite) 883 | 884 | et = ElementTree.ElementTree(testsuites) 885 | et.write(output_path, encoding='utf-8', xml_declaration=True) 886 | -------------------------------------------------------------------------------- /betelgeuse/collector.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """Tools to walk a path and collect test methods and functions.""" 3 | import ast 4 | import collections 5 | import fnmatch 6 | import os 7 | 8 | from betelgeuse.parser import parse_docstring 9 | from betelgeuse.parser import parse_markers 10 | from betelgeuse.source_generator import gen_source 11 | 12 | 13 | class Requirement(object): 14 | """Holds information about Requirements.""" 15 | 16 | def __init__(self, title, fields=None): 17 | """Title is require but initial fields values can be passed.""" 18 | self.title = title 19 | self.fields = fields.copy() if fields else {} 20 | 21 | 22 | class TestFunction(object): 23 | """Wrapper for ``ast.FunctionDef`` which parse docstring information.""" 24 | 25 | def __init__( 26 | self, function_def, parent_class=None, testmodule=None, 27 | config=None 28 | ): 29 | """``ast.FunctionDef`` instance used to extract information.""" 30 | #: The unparsed testcase docstring 31 | self.docstring = ast.get_docstring(function_def) 32 | #: The ``ast.FunctionDef`` representation of the testcase method or 33 | #: function 34 | self.function_def = function_def 35 | #: The testcase function or method name 36 | self.name = function_def.name 37 | if parent_class: 38 | #: If the testcase is a method then the parent class name will be 39 | #: set, otherwise it will be ``None`` 40 | self.parent_class = parent_class.name 41 | #: If the testcase is a method then the parent ``ast.ClasDef`` 42 | #: representation of the parent class will be set, otherwise it 43 | #: will be ``None`` 44 | self.parent_class_def = parent_class 45 | #: If test case is a method then the parent class docstring will be 46 | #: set, otherwise it will be ``None`` 47 | self.class_docstring = ast.get_docstring(self.parent_class_def) 48 | else: 49 | self.parent_class = None 50 | self.parent_class_def = None 51 | self.class_docstring = None 52 | #: The parent module path in Python import path notation 53 | self.testmodule = testmodule.path 54 | #: The parent module ``ast.Module`` representation. 55 | self.module_def = testmodule 56 | #: The parent module docstring 57 | self.module_docstring = ast.get_docstring(self.module_def) 58 | #: The ``__init__.py`` path for the package containing the test module 59 | #: if it existis, or ``None`` otherwise 60 | self.pkginit = os.path.join( 61 | os.path.dirname(self.testmodule), '__init__.py') 62 | if os.path.exists(self.pkginit): 63 | #: If ``__init__.py`` module exists, this will be the 64 | #: ``ast.Module`` representation of that module, it will be 65 | #: ``None`` otherwise 66 | self.pkginit_def = ast.parse(''.join(open(self.pkginit))) 67 | #: If ``__init__.py`` module exists, this will be the 68 | #: docstring of that module, it will be ``None`` otherwise 69 | self.pkginit_docstring = ast.get_docstring(self.pkginit_def) 70 | else: 71 | self.pkginit = None 72 | self.pkginit_def = None 73 | self.pkginit_docstring = None 74 | #: The dictionary that will store the field values defined for the 75 | #: testcase. The field value resolution order is the test function or 76 | #: method docstring, the class docstring if it is a method, the module 77 | #: docstring and finally the ``__init__.py`` docstring if present. The 78 | #: first value found the search will stop. 79 | self.fields = {} 80 | #: The list of decorators applied to this testcase 81 | self.decorators = [ 82 | gen_source(decorator) 83 | for decorator in self.function_def.decorator_list 84 | ] 85 | #: The list of decorators applied to this testcase's parent class. If 86 | #: this testcase doesn'node have a parent class, then it will be 87 | #: ``None`` 88 | self.class_decorators = None 89 | if self.parent_class_def: 90 | self.class_decorators = [ 91 | gen_source(decorator) 92 | for decorator in self.parent_class_def.decorator_list 93 | ] 94 | self._parse_docstring() 95 | self._parse_markers(config) 96 | self.junit_id = self._generate_junit_id() 97 | 98 | if 'id' not in self.fields: 99 | self.fields['id'] = self.junit_id 100 | 101 | def _parse_markers(self, config=None): 102 | """Parse module, class and function markers.""" 103 | markers = [self.module_def.marker_list, 104 | self.class_decorators, 105 | self.decorators] 106 | if markers: 107 | self.fields.update({'markers': parse_markers(markers, config)}) 108 | 109 | def _parse_docstring(self): 110 | """Parse package, module, class and function docstrings.""" 111 | if self.docstring is None: 112 | return 113 | 114 | # Parse package, module, class and function docstrings. Every loop 115 | # updates the already defined fields. The order of processing ensures 116 | # that function docstring has more priority over class and module and 117 | # package docstrings respectively. 118 | docstrings = [ 119 | self.pkginit_docstring, 120 | self.module_docstring, 121 | self.class_docstring, 122 | self.docstring, 123 | ] 124 | for docstring in docstrings: 125 | if docstring and not isinstance(docstring, type(u'')): 126 | docstring = docstring.decode('utf-8') 127 | self.fields.update(parse_docstring(docstring)) 128 | 129 | def _generate_junit_id(self): 130 | """Generate the jUnit ID for the test. 131 | 132 | It could be either ``path.to.module.test_name`` or 133 | ``path.to.module.ClassName.test_name`` if the test methods is defined 134 | within a class. 135 | """ 136 | test_case_id_parts = [ 137 | self.testmodule.replace('/', '.').replace('.py', ''), 138 | self.name 139 | ] 140 | if self.parent_class is not None: 141 | test_case_id_parts.insert(-1, self.parent_class) 142 | return '.'.join(test_case_id_parts) 143 | 144 | 145 | def is_test_module(filename): 146 | """Indicate if ``filename`` match a test module file name.""" 147 | for pat in ('test_*.py', '*_test.py'): 148 | if fnmatch.fnmatch(filename, pat): 149 | return True 150 | return False 151 | 152 | 153 | def _module_markers(module_def): 154 | """Extract markers applied to testcases from the test module level. 155 | 156 | The markers list would be collected from the pytestmark global variable. 157 | """ 158 | markers = [] 159 | for node in module_def.body: 160 | if isinstance(node, ast.Assign): 161 | for target in node.targets: 162 | if isinstance(target, ast.Name) and target.id == 'pytestmark': 163 | if isinstance(node.value, ast.List): 164 | for item in node.value.elts: 165 | if isinstance(item, ast.Attribute): 166 | markers.append(item.attr) 167 | elif isinstance(node.value, ast.Attribute): 168 | markers.append(node.value.attr) 169 | return markers or None 170 | 171 | 172 | def _get_tests(path, config=None): 173 | """Collect tests for the test module located at ``path``.""" 174 | tests = [] 175 | with open(path) as handler: 176 | root = ast.parse(handler.read()) 177 | root.path = path # TODO improve how to pass the path to TestFunction 178 | # Updating test module with module level markers 179 | root.__dict__['marker_list'] = _module_markers(root) 180 | for node in ast.iter_child_nodes(root): 181 | if isinstance(node, ast.ClassDef): 182 | [ 183 | tests.append(TestFunction(subnode, node, root, config)) 184 | for subnode in ast.iter_child_nodes(node) 185 | if isinstance(subnode, ast.FunctionDef) and 186 | subnode.name.startswith('test_') 187 | ] 188 | elif (isinstance(node, ast.FunctionDef) and 189 | node.name.startswith('test_')): 190 | tests.append(TestFunction( 191 | node, testmodule=root, config=config)) 192 | return tests 193 | 194 | 195 | def collect_tests(path, ignore_paths=None, config=None): 196 | """Walk ``path`` and collect test methods and functions found. 197 | 198 | :param config: The config object of `config.BetelgeuseConfig` 199 | :param path: Either a file or directory path to look for test methods and 200 | functions. 201 | :return: A dict mapping a test module path and its test cases. 202 | """ 203 | path = os.path.normpath(path) 204 | if not ignore_paths: 205 | ignore_paths = () 206 | tests = collections.OrderedDict() 207 | if os.path.isfile(path) and path not in ignore_paths: 208 | if is_test_module(os.path.basename(path)): 209 | tests[path] = _get_tests(path, config) 210 | return tests 211 | for dirpath, _, filenames in os.walk(path): 212 | if dirpath in ignore_paths: 213 | continue 214 | for filename in filenames: 215 | path = os.path.join(dirpath, filename) 216 | if path in ignore_paths: 217 | continue 218 | if is_test_module(filename): 219 | tests[path] = _get_tests(path, config) 220 | return tests 221 | -------------------------------------------------------------------------------- /betelgeuse/config.py: -------------------------------------------------------------------------------- 1 | """Betelgeuse configuration.""" 2 | import importlib 3 | 4 | from betelgeuse import default_config 5 | 6 | 7 | class ConfigModuleError(Exception): 8 | """Indicate issues dealing with the config module.""" 9 | 10 | 11 | class BetelgeuseConfig(object): 12 | """Configuration object for Betelgeuse.""" 13 | 14 | def __init__(self, config_module=None): 15 | """Initialize the configuration.""" 16 | self._config_module = None 17 | for config in dir(default_config): 18 | if config.isupper(): 19 | setattr(self, config, getattr(default_config, config)) 20 | if config_module is not None: 21 | try: 22 | self._config_module = importlib.import_module(config_module) 23 | except ImportError: 24 | raise ConfigModuleError( 25 | 'Config module "{}" can\'t be imported. Make sure it is ' 26 | 'on the Python path.' 27 | .format(config_module) 28 | ) 29 | 30 | for config in dir(self._config_module): 31 | if config.isupper(): 32 | setattr(self, config, getattr(self._config_module, config)) 33 | -------------------------------------------------------------------------------- /betelgeuse/default_config.py: -------------------------------------------------------------------------------- 1 | """Default Betelgeuse configuration.""" 2 | from betelgeuse import parser 3 | 4 | 5 | #################### 6 | # Helper functions # 7 | #################### 8 | 9 | def _get_default_description(testcase): 10 | """Return the default value for description field.""" 11 | return parser.parse_rst(testcase.docstring) 12 | 13 | 14 | def _get_default_title(testcase): 15 | """Return the default value for title field.""" 16 | return testcase.name 17 | 18 | 19 | def _get_default_caseposneg(testcase): 20 | """Return the default value for caseposneg custom field.""" 21 | return 'negative' if 'negative' in testcase.name.lower() else 'positive' 22 | 23 | 24 | def _transform_to_lower(value, requirement_or_testcase): 25 | """Transform a field value to lower case. 26 | 27 | This is used to transform both Requirement and Test Case field values. The 28 | ``requirement_or_testcase`` argument is defined but not used. 29 | """ 30 | return value.lower() 31 | 32 | 33 | ###################### 34 | # Requirement Fields # 35 | ###################### 36 | 37 | #: Default testcase fields 38 | REQUIREMENT_FIELDS = ( 39 | 'approvers', 40 | 'assignee', 41 | 'categories', 42 | 'description', 43 | 'duedate', 44 | 'id', 45 | 'initialestimate', 46 | 'plannedin', 47 | 'priority', 48 | 'severity', 49 | 'status', 50 | 'title', 51 | ) 52 | 53 | #: Default requirement custom fields 54 | REQUIREMENT_CUSTOM_FIELDS = ( 55 | 'reqtype', 56 | ) 57 | 58 | ################### 59 | # Testcase Fields # 60 | ################### 61 | 62 | #: Default testcase fields 63 | TESTCASE_FIELDS = ( 64 | 'approvers', 65 | 'assignee', 66 | 'description', 67 | 'duedate', 68 | 'expectedresults', 69 | 'id', 70 | 'initialestimate', 71 | 'parametrized', 72 | 'requirement', 73 | 'status', 74 | 'steps', 75 | 'title', 76 | ) 77 | 78 | #: Default testcase custom fields 79 | TESTCASE_CUSTOM_FIELDS = ( 80 | 'arch', 81 | 'automation_script', 82 | 'caseautomation', 83 | 'casecomponent', 84 | 'caseimportance', 85 | 'caselevel', 86 | 'caseposneg', 87 | 'setup', 88 | 'subcomponent', 89 | 'subtype1', 90 | 'subtype2', 91 | 'tags', 92 | 'tcmsarguments', 93 | 'tcmsbug', 94 | 'tcmscaseid', 95 | 'tcmscategory', 96 | 'tcmscomponent', 97 | 'tcmsnotes', 98 | 'tcmsplan', 99 | 'tcmsreference', 100 | 'tcmsrequirement', 101 | 'tcmsscript', 102 | 'tcmstag', 103 | 'teardown', 104 | 'testtier', 105 | 'testtype', 106 | 'upstream', 107 | 'variant', 108 | ) 109 | 110 | ######################## 111 | # Default field values # 112 | ######################## 113 | 114 | # Test case fields default values 115 | DEFAULT_CASEAUTOMATION_VALUE = 'automated' 116 | DEFAULT_CASECOMPONENT_VALUE = '-' 117 | DEFAULT_CASEIMPORTANCE_VALUE = 'medium' 118 | DEFAULT_CASELEVEL_VALUE = 'component' 119 | DEFAULT_CASEPOSNEG_VALUE = _get_default_caseposneg 120 | DEFAULT_DESCRIPTION_VALUE = _get_default_description 121 | DEFAULT_PARAMETRIZED_VALUE = 'no' 122 | DEFAULT_SUBTYPE1_VALUE = '-' 123 | DEFAULT_TESTTYPE_VALUE = 'functional' 124 | DEFAULT_TITLE_VALUE = _get_default_title 125 | DEFAULT_UPSTREAM_VALUE = 'no' 126 | 127 | # Requirement fields default values 128 | DEFAULT_REQUIREMENT_PRIORITY_VALUE = 'high' 129 | DEFAULT_REQUIREMENT_SEVERITY_VALUE = 'should_have' 130 | DEFAULT_REQUIREMENT_STATUS_VALUE = 'approved' 131 | DEFAULT_REQUIREMENT_REQTYPE_VALUE = 'functional' 132 | 133 | #################################### 134 | # Value transformation definitions # 135 | #################################### 136 | 137 | # Test case value transform 138 | TRANSFORM_CASEAUTOMATION_VALUE = _transform_to_lower 139 | TRANSFORM_CASECOMPONENT_VALUE = _transform_to_lower 140 | TRANSFORM_CASEIMPORTANCE_VALUE = _transform_to_lower 141 | TRANSFORM_CASELEVEL_VALUE = _transform_to_lower 142 | TRANSFORM_CASEPOSNEG_VALUE = _transform_to_lower 143 | TRANSFORM_PARAMETRIZED_VALUE = _transform_to_lower 144 | TRANSFORM_SUBTYPE1_VALUE = _transform_to_lower 145 | TRANSFORM_TESTTYPE_VALUE = _transform_to_lower 146 | TRANSFORM_UPSTREAM_VALUE = _transform_to_lower 147 | 148 | # Requirement value transform 149 | TRANSFORM_REQUIREMENT_PRIORITY_VALUE = _transform_to_lower 150 | TRANSFORM_REQUIREMENT_SEVERITY_VALUE = _transform_to_lower 151 | TRANSFORM_REQUIREMENT_STATUS_VALUE = _transform_to_lower 152 | TRANSFORM_REQUIREMENT_REQTYPE_VALUE = _transform_to_lower 153 | -------------------------------------------------------------------------------- /betelgeuse/parser.py: -------------------------------------------------------------------------------- 1 | """Parsers for test docstrings.""" 2 | import re 3 | from collections import namedtuple 4 | from io import StringIO 5 | from xml.dom import minidom 6 | 7 | from docutils.core import publish_parts 8 | from docutils.parsers.rst import nodes, roles 9 | from docutils.readers import standalone 10 | from docutils.transforms import frontmatter 11 | from docutils.writers import html5_polyglot as writer 12 | 13 | 14 | RSTParseMessage = namedtuple('RSTParseMessage', 'line level message') 15 | 16 | 17 | class TableFieldListTranslator(writer.HTMLTranslator): 18 | """An HTML 5 translator which creates field lists as HTML tables.""" 19 | 20 | def visit_field_list(self, node): 21 | """Open the field list.""" 22 | # Keep simple paragraphs in the field_body to enable CSS 23 | # rule to start body on new line if the label is too long 24 | classes = 'field-list' 25 | if (self.is_compactable(node)): 26 | classes += ' simple' 27 | self.body.append(self.starttag(node, 'table', CLASS=classes)) 28 | 29 | def depart_field_list(self, node): 30 | """Close the field list.""" 31 | self.body.append('\n') 32 | 33 | def visit_field(self, node): 34 | """Open field.""" 35 | self.body.append(self.starttag( 36 | node, 'tr', '', CLASS=''.join(node.parent['classes']))) 37 | 38 | def depart_field(self, node): 39 | """Close field.""" 40 | self.body.append('\n') 41 | 42 | # as field is ignored, pass class arguments to field-name and field-body: 43 | 44 | def visit_field_name(self, node): 45 | """Open field name.""" 46 | self.body.append(self.starttag( 47 | node, 'th', '', CLASS=''.join(node.parent['classes']))) 48 | 49 | def depart_field_name(self, node): 50 | """Close field name.""" 51 | self.body.append('\n') 52 | 53 | def visit_field_body(self, node): 54 | """Open field body.""" 55 | self.body.append(self.starttag( 56 | node, 'td', '', CLASS=''.join(node.parent['classes']))) 57 | # prevent misalignment of following content if the field is empty: 58 | if not node.children: 59 | self.body.append('

') 60 | 61 | def depart_field_body(self, node): 62 | """Close field body.""" 63 | self.body.append('\n') 64 | 65 | 66 | class HTMLWriter(writer.Writer): 67 | """HTML writer which allows customizing the translator_class.""" 68 | 69 | def __init__(self, translator_class=None): 70 | """Initialize the writer and set the translator_class.""" 71 | writer.Writer.__init__(self) 72 | if translator_class is None: 73 | translator_class = writer.HTMLTranslator 74 | self.translator_class = translator_class 75 | 76 | 77 | class NoDocInfoReader(standalone.Reader): 78 | """Reader that does not do the DocInfo transformation. 79 | 80 | Extend standalone reader and drop the DocInfo transformation. Without that 81 | transformation, the first field list element will remain a field list and 82 | won't be converted to a docinfo element. 83 | """ 84 | 85 | def get_transforms(self): 86 | """Get default transforms without DocInfo.""" 87 | transforms = standalone.Reader.get_transforms(self) 88 | transforms.remove(frontmatter.DocInfo) 89 | return transforms 90 | 91 | 92 | def _register_roles(): 93 | """Register Python roles that Sphinx supports.""" 94 | for role in ( 95 | 'data', 'exc', 'func', 'class', 'const', 'attr', 'meth', 'mod', 96 | 'obj' 97 | ): 98 | roles.register_generic_role(role, nodes.raw) 99 | roles.register_generic_role('py:' + role, nodes.raw) 100 | 101 | 102 | def parse_rst(string, translator_class=None): 103 | """Parse a RST formatted string into HTML.""" 104 | if not string: 105 | return '' 106 | if not hasattr(_register_roles, '_roles_registered'): 107 | _register_roles() 108 | _register_roles._roles_registered = True 109 | 110 | warning_stream = StringIO() 111 | parts = publish_parts( 112 | string, 113 | reader=NoDocInfoReader(), 114 | settings_overrides={ 115 | 'embed_stylesheet': False, 116 | 'input_encoding': 'utf-8', 117 | 'syntax_highlight': 'short', 118 | 'warning_stream': warning_stream, 119 | }, 120 | writer=HTMLWriter(translator_class=translator_class), 121 | ) 122 | 123 | rst_parse_messages = [] 124 | for warning in warning_stream.getvalue().splitlines(): 125 | if not warning or ':' not in warning: 126 | continue 127 | warning = warning.split(' ', 2) 128 | rst_parse_messages.append(RSTParseMessage( 129 | line=warning[0].split(':')[1], 130 | level=warning[1].split('/')[0][1:].lower(), 131 | message=warning[2], 132 | )) 133 | warning_stream.close() 134 | 135 | # TODO: decide what to do with the rst parser warnings and errors 136 | return parts['html_body'] 137 | 138 | 139 | def parse_docstring(docstring=None): 140 | """Parse the docstring and return captured fields. 141 | 142 | For example in the following docstring (using single quote to demo):: 143 | 144 | '''Docstring content. 145 | 146 | More docstring content. 147 | 148 | :field1: value1 149 | :field2: value2 150 | :field3: value3 151 | ''' 152 | 153 | Will return a dict with the following content:: 154 | 155 | { 156 | 'field1': 'value1', 157 | 'field2': 'value2', 158 | 'field3': 'value3', 159 | } 160 | """ 161 | if not docstring: 162 | return {} 163 | 164 | fields_dict = {} 165 | parsed_docstring = parse_rst(docstring) 166 | if isinstance(parsed_docstring, type(u'')): 167 | parsed_docstring = parsed_docstring.encode('utf-8') 168 | document = minidom.parseString(parsed_docstring) 169 | field_lists = [ 170 | element for element in document.getElementsByTagName('dl') 171 | if element.attributes.get('class') and 172 | 'field-list' in element.attributes.get('class').value 173 | ] 174 | for field_list in field_lists: 175 | field_names = field_list.getElementsByTagName('dt') 176 | field_values = field_list.getElementsByTagName('dd') 177 | for field_name, field_value in zip(field_names, field_values): 178 | field_name = field_name.firstChild.nodeValue.lower() 179 | output = '' 180 | if (len(field_value.childNodes) == 2 and 181 | field_value.childNodes[0].tagName == 'p'): 182 | # childNodes will have two items because the first item will be 183 | # an element and the second item will be a text u'\n' 184 | output = field_value.childNodes[0].firstChild.nodeValue 185 | else: 186 | for node in field_value.childNodes: 187 | output += node.toxml() 188 | field_value = output 189 | fields_dict[field_name] = field_value 190 | return fields_dict 191 | 192 | 193 | def parse_markers(all_markers=None, config=None): 194 | """Parse the markers from module, class and test level for a test. 195 | 196 | This removes the mark prepended words and also pops out the 197 | ignorable marker from the list received from the config object. 198 | 199 | :returns string: Comma separated list of markers from all levels for a test 200 | """ 201 | resolved_markers = [] 202 | ignore_list = getattr(config, 'MARKERS_IGNORE_LIST', None) 203 | 204 | def _process_marker(_marker): 205 | 206 | marker_name = re.findall( 207 | r'(?:pytest\.mark\.)?([^(\s()]+)(?=\s*\(|\s*$)', _marker) 208 | if marker_name: 209 | marker_name = marker_name[0] 210 | 211 | # ignoring the marker if in ignore list 212 | if ignore_list and any( 213 | re.fullmatch( 214 | ignore_word, marker_name 215 | ) for ignore_word in ignore_list 216 | ): 217 | return 218 | 219 | resolved_markers.append(marker_name) 220 | 221 | for sec_marker in all_markers: 222 | # If the marker is none 223 | if not sec_marker: 224 | continue 225 | elif isinstance(sec_marker, list): 226 | for marker in sec_marker: 227 | _process_marker(marker) 228 | else: 229 | _process_marker(sec_marker) 230 | 231 | return ', '.join(resolved_markers) 232 | -------------------------------------------------------------------------------- /betelgeuse/source_generator.py: -------------------------------------------------------------------------------- 1 | """Generates source code from an AST. 2 | 3 | Inpired by the ast.NodeVisitor and unparse.py (see 4 | https://github.com/python/cpython/blob/master/Tools/parser/unparse.py) 5 | """ 6 | import ast 7 | import io 8 | import sys 9 | 10 | # Large float and imaginary literals get turned into infinities in the AST. We 11 | # unparse those infinities to INFSTR. 12 | INFSTR = '1e' + repr(sys.float_info.max_10_exp + 1) 13 | 14 | 15 | class SourceGenerator(): 16 | """Helper class to traverse the AST and generate the source code.""" 17 | 18 | def __init__(self, node): 19 | """Make the generated source code available on the ``source`` attr.""" 20 | self._source = io.StringIO('') 21 | self._visit(node) 22 | self.source = self._source.getvalue() 23 | self._source.close() 24 | 25 | def _visit(self, node): 26 | visitor = getattr(self, f'_visit_{node.__class__.__name__}'.lower()) 27 | visitor(node) 28 | 29 | def _iterate_seq(self, inter, f, seq): 30 | seq = iter(seq) 31 | try: 32 | f(next(seq)) 33 | except StopIteration: 34 | pass 35 | else: 36 | for x in seq: 37 | inter() 38 | f(x) 39 | 40 | def _visit_attribute(self, node): 41 | self._visit(node.value) 42 | self._source.write(f'.{node.attr}') 43 | 44 | def _visit_call(self, node): 45 | self._visit(node.func) 46 | self._source.write('(') 47 | comma = False 48 | for arg in node.args: 49 | if comma: 50 | self._source.write(', ') 51 | else: 52 | comma = True 53 | self._visit(arg) 54 | for keyword in node.keywords: 55 | if comma: 56 | self._source.write(', ') 57 | else: 58 | comma = True 59 | self._visit(keyword) 60 | self._source.write(')') 61 | 62 | def _write_constant(self, value): 63 | if isinstance(value, (float, complex)): 64 | # Substitute overflowing decimal literal for AST infinities. 65 | self._source.write(repr(value).replace('inf', INFSTR)) 66 | else: 67 | self._source.write(repr(value)) 68 | 69 | def _visit_constant(self, t): 70 | value = t.value 71 | if isinstance(value, tuple): 72 | self.write('(') 73 | if len(value) == 1: 74 | self._write_constant(value[0]) 75 | self.write(',') 76 | else: 77 | self._iterate_seq( 78 | lambda: self._source.write(', '), 79 | self._write_constant, 80 | value 81 | ) 82 | self.write(')') 83 | elif value is ...: 84 | self.write('...') 85 | else: 86 | if t.kind == 'u': 87 | self.write('u') 88 | self._write_constant(t.value) 89 | 90 | def _visit_name(self, node): 91 | self._source.write(node.id) 92 | 93 | def _visit_str(self, node): 94 | self._source.write(repr(node.s)) 95 | 96 | def _visit_tuple(self, node): 97 | self._source.write('(') 98 | if len(node.elts) == 1: 99 | self._visit(node.elts[0]) 100 | self._source.write(',') 101 | else: 102 | self._iterate_seq( 103 | lambda: self._source.write(', '), self._visit, node.elts) 104 | self._source.write(')') 105 | 106 | def _visit_bytes(self, node): 107 | self._source.write(repr(node.s)) 108 | 109 | def _visit_joinedstr(self, node): 110 | self._source.write('f') 111 | string = io.StringIO() 112 | self._fstring_joinedstr(node, string.write) 113 | self._source.write(repr(string.getvalue())) 114 | 115 | def _fstring_joinedstr(self, node, write): 116 | for value in node.values: 117 | meth = getattr(self, f'_fstring_{type(value).__name__}'.lower()) 118 | meth(value, write) 119 | 120 | def _fstring_str(self, node, write): 121 | value = node.s.replace('{', '{{').replace('}', '}}') 122 | write(value) 123 | 124 | def _fstring_constant(self, node, write): 125 | value = node.value.replace('{', '{{').replace('}', '}}') 126 | write(value) 127 | 128 | def _fstring_formattedvalue(self, node, write): 129 | write('{') 130 | expr = SourceGenerator(node.value).source 131 | if expr.startswith('{'): 132 | write(' ') # Separate pair of opening brackets as "{ {" 133 | write(expr) 134 | if node.conversion != -1: 135 | conversion = chr(node.conversion) 136 | write(f'!{conversion}') 137 | if node.format_spec: 138 | write(':') 139 | meth = getattr( 140 | self, f'_fstring_{type(node.format_spec).__name__}'.lower()) 141 | meth(node.format_spec, write) 142 | write('}') 143 | 144 | def _visit_nameconstant(self, node): 145 | self._source.write(repr(node.value)) 146 | 147 | def _visit_num(self, node): 148 | # Substitute overflowing decimal literal for AST infinities. 149 | self._source.write(repr(node.n).replace('inf', INFSTR)) 150 | 151 | def _visit_list(self, node): 152 | self._source.write('[') 153 | self._iterate_seq( 154 | lambda: self._source.write(', '), self._visit, node.elts) 155 | self._source.write(']') 156 | 157 | def _visit_listcomp(self, node): 158 | self._source.write('[') 159 | self._visit(node.elt) 160 | for gen in node.generators: 161 | self._visit(gen) 162 | self._source.write(']') 163 | 164 | def _visit_generatorexp(self, node): 165 | self._source.write('(') 166 | self._visit(node.elt) 167 | for gen in node.generators: 168 | self._visit(gen) 169 | self._source.write(')') 170 | 171 | def _visit_setcomp(self, node): 172 | self._source.write('{') 173 | self._visit(node.elt) 174 | for gen in node.generators: 175 | self._visit(gen) 176 | self._source.write('}') 177 | 178 | def _visit_dictcomp(self, node): 179 | self._source.write('{') 180 | self._visit(node.key) 181 | self._source.write(': ') 182 | self._visit(node.value) 183 | for gen in node.generators: 184 | self._visit(gen) 185 | self._source.write('}') 186 | 187 | def _visit_comprehension(self, node): 188 | self._source.write(' for ') 189 | self._visit(node.target) 190 | self._source.write(' in ') 191 | self._visit(node.iter) 192 | for if_clause in node.ifs: 193 | self._source.write(' if ') 194 | self._visit(if_clause) 195 | 196 | def _visit_ifexp(self, node): 197 | self._source.write('(') 198 | self._visit(node.body) 199 | self._source.write(' if ') 200 | self._visit(node.test) 201 | self._source.write(' else ') 202 | self._visit(node.orelse) 203 | self._source.write(')') 204 | 205 | def _visit_set(self, node): 206 | self._source.write('{') 207 | self._iterate_seq( 208 | lambda: self._source.write(', '), self._visit, node.elts) 209 | self._source.write('}') 210 | 211 | def _visit_dict(self, node): 212 | self._source.write('{') 213 | 214 | def write_key_value_pair(k, v): 215 | self._visit(k) 216 | self._source.write(': ') 217 | self._visit(v) 218 | 219 | def write_item(item): 220 | k, v = item 221 | if k is None: 222 | # for dictionary unpacking operator in dicts {**{'y': 2}} see 223 | # PEP 448 for details 224 | self._source.write('**') 225 | self._visit(v) 226 | else: 227 | write_key_value_pair(k, v) 228 | self._iterate_seq( 229 | lambda: self._source.write(', '), 230 | write_item, 231 | zip(node.keys, node.values) 232 | ) 233 | self._source.write('}') 234 | 235 | unop = {'Invert': '~', 'Not': 'not', 'UAdd': '+', 'USub': '-'} 236 | 237 | def _visit_unaryop(self, node): 238 | self._source.write('(') 239 | self._source.write(self.unop[node.op.__class__.__name__]) 240 | self._source.write(' ') 241 | self._visit(node.operand) 242 | self._source.write(')') 243 | 244 | binop = { 245 | 'Add': '+', 'Sub': '-', 'Mult': '*', 'MatMult': '@', 'Div': '/', 'Mod': 246 | '%', 'LShift': '<<', 'RShift': '>>', 'BitOr': '|', 'BitXor': '^', 247 | 'BitAnd': '&', 'FloorDiv': '//', 'Pow': '**' 248 | } 249 | 250 | def _visit_binop(self, node): 251 | self._source.write('(') 252 | self._visit(node.left) 253 | self._source.write(' ' + self.binop[node.op.__class__.__name__] + ' ') 254 | self._visit(node.right) 255 | self._source.write(')') 256 | 257 | cmpops = { 258 | 'Eq': '==', 'NotEq': '!=', 'Lt': '<', 'LtE': '<=', 'Gt': '>', 'GtE': 259 | '>=', 'Is': 'is', 'IsNot': 'is not', 'In': 'in', 'NotIn': 'not in' 260 | } 261 | 262 | def _visit_compare(self, node): 263 | self._source.write('(') 264 | self._visit(node.left) 265 | for o, e in zip(node.ops, node.comparators): 266 | self._source.write(' ' + self.cmpops[o.__class__.__name__] + ' ') 267 | self._visit(e) 268 | self._source.write(')') 269 | 270 | boolops = {ast.And: 'and', ast.Or: 'or'} 271 | 272 | def _visit_boolop(self, node): 273 | self._source.write('(') 274 | s = f' {self.boolops[node.op.__class__]} ' 275 | self._iterate_seq( 276 | lambda: self._source.write(s), self._visit, node.values) 277 | self._source.write(')') 278 | 279 | def _visit_subscript(self, node): 280 | self._visit(node.value) 281 | self._source.write('[') 282 | self._visit(node.slice) 283 | self._source.write(']') 284 | 285 | def _visit_starred(self, node): 286 | self._source.write('*') 287 | self._visit(node.value) 288 | 289 | def _visit_index(self, node): 290 | self._visit(node.value) 291 | 292 | def _visit_slice(self, node): 293 | if node.lower: 294 | self._visit(node.lower) 295 | self._source.write(':') 296 | if node.upper: 297 | self._visit(node.upper) 298 | if node.step: 299 | self._source.write(':') 300 | self._visit(node.step) 301 | 302 | def _visit_arg(self, node): 303 | self._source.write(node.arg) 304 | 305 | def _visit_arguments(self, node): 306 | first = True 307 | # normal arguments 308 | defaults = [None] * (len(node.args) - len(node.defaults)) 309 | defaults = defaults + node.defaults 310 | for a, d in zip(node.args, defaults): 311 | if first: 312 | first = False 313 | else: 314 | self._source.write(', ') 315 | self._visit(a) 316 | if d: 317 | self._source.write('=') 318 | self._visit(d) 319 | 320 | # varargs, or bare '*' if no varargs but keyword-only arguments present 321 | if node.vararg or node.kwonlyargs: 322 | if first: 323 | first = False 324 | else: 325 | self._source.write(', ') 326 | self._source.write('*') 327 | if node.vararg: 328 | self._source.write(node.vararg.arg) 329 | if node.vararg.annotation: 330 | self._source.write(': ') 331 | self._visit(node.vararg.annotation) 332 | 333 | # keyword-only arguments 334 | if node.kwonlyargs: 335 | for a, d in zip(node.kwonlyargs, node.kw_defaults): 336 | if first: 337 | first = False 338 | else: 339 | self._source.write(', ') 340 | self._visit(a) 341 | if d: 342 | self._source.write('=') 343 | self._visit(d) 344 | 345 | # kwargs 346 | if node.kwarg: 347 | if first: 348 | first = False 349 | else: 350 | self._source.write(', ') 351 | self._source.write(f'**{node.kwarg.arg}') 352 | if node.kwarg.annotation: 353 | self._source.write(': ') 354 | self._visit(node.kwarg.annotation) 355 | 356 | def _visit_keyword(self, node): 357 | if node.arg is None: 358 | self._source.write('**') 359 | else: 360 | self._source.write(node.arg) 361 | self._source.write('=') 362 | self._visit(node.value) 363 | 364 | def _visit_lambda(self, node): 365 | self._source.write('(') 366 | self._source.write('lambda ') 367 | self._visit(node.args) 368 | self._source.write(': ') 369 | self._visit(node.body) 370 | self._source.write(')') 371 | 372 | 373 | def gen_source(node): 374 | """Generate the source code based on the node AST.""" 375 | return SourceGenerator(node).source 376 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Betelgeuse.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Betelgeuse.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Betelgeuse" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Betelgeuse" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """Sphinx documentation generator configuration file. 3 | 4 | The full set of configuration options is listed on the Sphinx website: 5 | http://sphinx-doc.org/config.html 6 | """ 7 | from __future__ import unicode_literals 8 | 9 | import os 10 | import re 11 | import sys 12 | 13 | 14 | # Add the Betelgeuse root directory to the system path. This allows 15 | # references such as :mod:`betelgeuse.whatever` to be processed 16 | # correctly. 17 | ROOT_DIR = os.path.abspath(os.path.join( 18 | os.path.dirname(__file__), 19 | os.path.pardir 20 | )) 21 | sys.path.insert(0, ROOT_DIR) 22 | 23 | # We pass the raw version string to Version() to ensure it is compliant with 24 | # PEP 440. An InvalidVersion exception is raised if the version is 25 | # non-conformant, so the act of generating documentation serves as a unit test 26 | # for the contents of the `VERSION` file. 27 | # 28 | # We use the raw version string when generating documentation for the sake of 29 | # human friendliness: the meaning of '2016.02.18' is presumably more intuitive 30 | # than the meaning of '2016.2.18'. The regex enforcing this format allows 31 | # additional segments. This is done to allow multiple releases in a single day. 32 | # For example, 2016.02.18.3 is the fourth release in a given day. 33 | with open(os.path.join(ROOT_DIR, 'VERSION')) as handle: 34 | VERSION = handle.read().strip() 35 | assert re.match(r'\d+\.\d+\.\d+', VERSION) is not None 36 | 37 | # Project information --------------------------------------------------- 38 | 39 | project = u'Betelgeuse' 40 | copyright = u'2015, Satellite QE' 41 | version = release = VERSION 42 | 43 | # General configuration -------------------------------------------------- 44 | 45 | author = 'Satellite QE' 46 | autodoc_default_flags = ['members'] 47 | exclude_patterns = ['_build'] 48 | extensions = [ 49 | 'sphinx.ext.autodoc', 50 | 'sphinx.ext.autosectionlabel', 51 | ] 52 | master_doc = 'index' 53 | nitpicky = True 54 | source_suffix = '.rst' 55 | 56 | # Options for format-specific output ------------------------------------- 57 | 58 | htmlhelp_basename = 'Betelgeusedoc' 59 | latex_documents = [( 60 | master_doc, 61 | project + '.tex', 62 | project + ' Documentation', 63 | author, 64 | 'manual' 65 | )] 66 | man_pages = [( 67 | master_doc, 68 | project.lower(), 69 | project + ' Documentation', 70 | [author], 71 | 1 72 | )] 73 | texinfo_documents = [( 74 | master_doc, 75 | project, 76 | project + ' Documentation', 77 | author, 78 | project, 79 | ('Betelgeuse reads standard Python test cases and generates XML files ' 80 | 'that are suited to be imported by Polarion importers.'), 81 | 'Miscellaneous' 82 | )] 83 | -------------------------------------------------------------------------------- /docs/config.rst: -------------------------------------------------------------------------------- 1 | =============================== 2 | Betelgeuse Configuration Module 3 | =============================== 4 | 5 | Betelgeuse ships with a :ref:`Default Configuration` which defines custom 6 | fields, default values for some fields and some transformations. 7 | 8 | You can configure Betelgeuse and provide your own custom fields, default values 9 | and transformations by providing the ``--config-module`` option. The value of 10 | ``--config-module`` should be in Python path syntax, e.g. 11 | ``mycustom.config_module``. Note that the config module should be on the Python 12 | `import search path`_. 13 | 14 | Some custom fields are enumerations and expect some specific values. You can 15 | check the :doc:`Custom fields' values choices ` document 16 | for a reference of the values allowed for some of the fields. Not all field 17 | that are enumerations are listed on the document, only ones that are not 18 | supposed to be customized. 19 | 20 | .. note:: 21 | 22 | Polarion allows each project to customize its own custom fields. If you 23 | find an error message while importing the test cases, first check if the 24 | value matches the case (choice values are casesensitive) and then check if 25 | you project has customized the enumeration for that field. 26 | 27 | .. _import search path: http://www.diveintopython3.net/your-first-python-program.html#importsearchpath 28 | 29 | Tutorial 30 | ======== 31 | 32 | In this tutorial shows how to create a configuration module which adds two more 33 | custom fields by extending the default configuration custom fields list. In 34 | addition to that, it will provide a default value for each added custom field 35 | and a transformation function to do a final processing on the first added 36 | field. 37 | 38 | Let's start by creating a new file named ``my_custom_config.py``. Then the custom configuration can be added. 39 | 40 | To provide two additional custom fields it will be required to read the custom 41 | fields that ships with Betelgeuse and extend that with the new ones. The 42 | Betelgeuse's default configuration can be accessed by importing the module 43 | ``betelgeuse.default_config``. Add the following content to the 44 | ``my_custom_config.py`` file: 45 | 46 | .. code-block:: python 47 | 48 | from betelgeuse import default_config 49 | 50 | TESTCASE_CUSTOM_FIELDS = default_config.TESTCASE_CUSTOM_FIELDS + ['myfield1', 'myfield2'] 51 | 52 | By doing that and running ``betelgeuse --config-module my_custom_config 53 | test-case ...`` will make Betelgeuse include ``myfield1`` and ``myfield2`` to 54 | the generated XML if the test case docstring includes them. 55 | 56 | The next step is to provide a default value for each added field. Betelgeuse 57 | will look for a attribute called ``DEFAULT_{field_name.upper()}_VALUE`` for 58 | each field and, if it is defined, the default value will then be evaluated. The 59 | default value can be a plain string or a callable, for the latter it will be 60 | called and a test case object will be passed, see `Testcase objects`_ for 61 | information of available attributes. 62 | 63 | The ``myfield1`` default value will be a plain string and the ``myfield2`` 64 | default value will be a callable that will return the current date. Add the 65 | following lines to the ``my_custom_config.py`` file: 66 | 67 | .. code-block:: python 68 | 69 | import datetime 70 | 71 | def get_default_myfield2(testcase): 72 | """Return the current date as string.""" 73 | return str(datetime.date.today()) 74 | 75 | 76 | DEFAULT_MYFIELD1_VALUE = 'custom value' 77 | DEFAULT_MYFIELD2_VALUE = get_default_myfield2 78 | 79 | With that, whenever the added fields are not specified on a test case 80 | docstring, the configured default values are going to be used. 81 | 82 | In addition to custom values, a transformation function can be defined and that 83 | will be after assigning all the field values. A transformation function is 84 | useful, for example, to ensure lower or upper case on a value. Betelgeuse will 85 | look for an attribute called ``TRANSFORM_{field_name.upper()}_VALUE`` on the 86 | configuration module and if defined will call that function passing the value 87 | and the testcase object, see `Testcase objects`_ for information of available 88 | attributes. 89 | 90 | Let's define a transformation function that will prefix the ``myfield1`` with 91 | the value of ``myfield2``. Add the following lines to the 92 | ``my_custom_config.py`` file: 93 | 94 | .. code-block:: python 95 | 96 | def prefix_with_myfield2_value(value, testcase): 97 | """Prefix the value with the value of myfield2 field.""" 98 | return '{} {}'.format(testcase.fields['myfield2'], value) 99 | 100 | TRANSFORM_MYFIELD1_VALUE = prefix_with_myfield2_value 101 | 102 | With that the needed configuration is in place and Betelgeuse can be run:: 103 | 104 | betelgeuse --config-module betelgeuse_config test-case \ 105 | sample_project/tests \ 106 | PROJECT test-cases.xml 107 | 108 | It will generate the ``test-cases.xml`` file and the added fields should be set 109 | with the default values configured. It will use the default values since the 110 | new fields are not defined on any of the ``sample_project`` test cases. 111 | 112 | Starting at version `1.8.0`, Betelgeuse supports also configuring Requirements 113 | fields, custom fields, fields default values and field transformation 114 | functions. All that work in the same fashion as for Test Cases. The difference 115 | is that the default value attribute will be looked by 116 | ``DEFAULT_REQUIREMENT_{field_name.upper()}_VALUE`` and 117 | ``TRANSFORM_REQUIREMENT_{field_name.upper()}_VALUE`` for the transformation 118 | function. 119 | 120 | As an example, to define a default value for the ``priority`` field, you can do 121 | so by having the following on your config file 122 | ``DEFAULT_REQUIREMENT_PRIORITY_VALUE = "medium"``. If the default value is set 123 | as a callable, then it will be called passing the related ``Requirement`` 124 | object. Now, trying to specify a custom transformation function for the same 125 | field, could be done by having ``TRANSFORM_REQUIREMENT_PRIORITY_VALUE = 126 | my_custom_transform`` on the config. Note that transformation functions for 127 | Requirements' field values will receive the related field value as first 128 | argument and the related ``Requirement`` object. 129 | 130 | To pass the config module to the requirements command, run the following: 131 | 132 | .. code-block:: console 133 | 134 | $ betelgeuse --config-module my_custom_config requirement \ 135 | --assignee assignee \ 136 | --approver approver1 \ 137 | --approver approver2 \ 138 | sample_project/tests \ 139 | PROJECT \ 140 | betelgeuse-requirements.xml 141 | 142 | .. note:: 143 | 144 | Passing the options ``--assignee`` and ``--approver`` to the command above 145 | will set the values to the related field before processing the 146 | configuration module. That means that the values will be overridden if the 147 | config module define a default value for those field. The value passed via 148 | command line will be available on the ``Requirement`` object passed to the 149 | default or transformation callables. For example to access the value of 150 | assignee, do the following ``requirement_obj.fields.get("assignee")`` 151 | (before overriding it). 152 | 153 | Default Configuration 154 | ===================== 155 | 156 | The default configuration includes all the fields and custom fields that 157 | Betelgeuse will look for when parsing the source code. It also provides the 158 | default values and transformations for some of the fields. 159 | 160 | .. note:: 161 | 162 | The testcase fields are present on the configuration for information only. 163 | Each field requires specific processing, and because that, Betelgeuse won't 164 | be able to process additional fields. 165 | 166 | If you override the ``betelgeuse.default_config.TESTCASE_FIELDS`` and 167 | remove some of the fields they will not be processed and added to the 168 | generated XML. It is hightly recommended to avoid overriding or extending 169 | this configuration. 170 | 171 | You can override or extend any of the defined information on your configuration 172 | module. Make sure to use valid values or your import will fail since Betelgeuse 173 | does not validate the values on Polarion. 174 | 175 | .. literalinclude:: ../betelgeuse/default_config.py 176 | :linenos: 177 | 178 | Requirement objects 179 | =================== 180 | 181 | .. autoclass:: betelgeuse.collector.Requirement 182 | :members: 183 | 184 | Testcase objects 185 | ================ 186 | 187 | .. autoclass:: betelgeuse.collector.TestFunction 188 | :members: 189 | -------------------------------------------------------------------------------- /docs/customfieldsvalues.rst: -------------------------------------------------------------------------------- 1 | Custom fields' values choices 2 | ============================= 3 | 4 | The table below maps the custom fields which expect some specific values. For 5 | `testtype`, `subtype1` and `subtype2` please refer to the next table. 6 | 7 | +----------------+--------------------+ 8 | | Field | Values | 9 | +================+====================+ 10 | | arch | i386 | 11 | | +--------------------+ 12 | | | x8664 | 13 | | +--------------------+ 14 | | | ppc64 | 15 | | +--------------------+ 16 | | | ppc64 | 17 | | +--------------------+ 18 | | | s390x | 19 | | +--------------------+ 20 | | | ia64 | 21 | +----------------+--------------------+ 22 | | caseautomation | automated | 23 | | +--------------------+ 24 | | | manualonly | 25 | | +--------------------+ 26 | | | notautomated | 27 | +----------------+--------------------+ 28 | | caseimportance | critical | 29 | | +--------------------+ 30 | | | high | 31 | | +--------------------+ 32 | | | medium | 33 | | +--------------------+ 34 | | | low | 35 | +----------------+--------------------+ 36 | | caselevel | component [#f1]_ | 37 | | +--------------------+ 38 | | | integration [#f2]_ | 39 | | +--------------------+ 40 | | | system [#f3]_ | 41 | | +--------------------+ 42 | | | acceptance [#f4]_ | 43 | +----------------+--------------------+ 44 | | caseposneg | positive | 45 | | +--------------------+ 46 | | | negative | 47 | +----------------+--------------------+ 48 | | upstream | yes | 49 | | +--------------------+ 50 | | | no | 51 | +----------------+--------------------+ 52 | | variant | server | 53 | | +--------------------+ 54 | | | workstation | 55 | | +--------------------+ 56 | | | client | 57 | +----------------+--------------------+ 58 | 59 | The following table maps the values that can go with each `testtype`, 60 | `subtype1` and `subtype2`. Depending on the value for one field only a limited 61 | set of values can be used on the others fields. 62 | 63 | +---------------+------------------+----------------+ 64 | | testtype | subtype1 | subtype2 | 65 | +===============+==================+================+ 66 | | functional | \- | \- | 67 | +---------------+------------------+----------------+ 68 | | nonfunctional | \- | \- | 69 | + +------------------+----------------+ 70 | | | compliance | 508 | 71 | + + +----------------+ 72 | | | | commoncriteria | 73 | + + +----------------+ 74 | | | | fips | 75 | + + +----------------+ 76 | | | | whql | 77 | + +------------------+----------------+ 78 | | | documentation | help | 79 | + + +----------------+ 80 | | | | userguide | 81 | + +------------------+----------------+ 82 | | | i18nl10n | \- | 83 | + +------------------+----------------+ 84 | | | installability | \- | 85 | + +------------------+----------------+ 86 | | | interoperability | \- | 87 | + +------------------+----------------+ 88 | | | performance | load [#f5]_ | 89 | + + +----------------+ 90 | | | | stress [#f6]_ | 91 | + +------------------+----------------+ 92 | | | reliability | \- | 93 | + +------------------+----------------+ 94 | | | recoveryfailover | \- | 95 | + +------------------+----------------+ 96 | | | scalability | \- | 97 | + +------------------+----------------+ 98 | | | usability | \- | 99 | +---------------+------------------+----------------+ 100 | | structural | \- | \- | 101 | +---------------+------------------+----------------+ 102 | 103 | .. [#f1] Component testing (also known as unit, module or program testing) 104 | searches for defects in, and verifies the functioning of, software modules, 105 | programs, objects, classes, etc., that are separately testable. 106 | .. [#f2] Integration testing tests interfaces between components, interactions 107 | with different parts of a system. 108 | .. [#f3] In system testing, the test environment should correspond to the final 109 | target or production environment as much as possible in order to minimize 110 | the risk of environment-specific failures not being found in testing. 111 | .. [#f4] The goal in acceptance testing is to establish confidence in the 112 | system, parts of the system or specific non-functional characteristics of 113 | the system. 114 | .. [#f5] A type of performance testing conducted to evaluate the behavior of a 115 | component or system with increasing load, e.g. numbers of parallel users 116 | and/or numbers of transactions, to determine what load can be handled by 117 | the component or system. 118 | .. [#f6] A type of performance testing conducted to evaluate a system or 119 | component at or beyond the limits of its anticipated or specified 120 | workloads, or with reduced availability of resources such as access to 121 | memory or servers. 122 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Betelgeuse documentation 3 | ======================== 4 | 5 | .. contents:: Topics 6 | :local: 7 | 8 | 9 | What is Betelgeuse? 10 | =================== 11 | 12 | Betelgeuse is a Python program that reads standard Python test cases and 13 | generates XML files that are suited to be imported by Polarion importers. 14 | Possible generated XML files are: 15 | 16 | * Requirement Importer XML 17 | * Test Case Importer XML 18 | * Test Run Importer XML 19 | 20 | Quick Start 21 | =========== 22 | 23 | 1. Install betelguese from pypi. 24 | 25 | .. code-block:: console 26 | 27 | $ pip install betelgeuse 28 | 29 | 2. Alternatively you can install from source: 30 | 31 | .. code-block:: console 32 | 33 | $ git clone https://github.com/SatelliteQE/betelgeuse.git 34 | $ cd betelgeuse 35 | $ pip install -e . 36 | 37 | .. note:: It is always recommended to use python virtual environment 38 | 39 | How it works? 40 | ============= 41 | 42 | Assuming that you have a ``test_user.py`` file with the following content: 43 | 44 | .. code-block:: python 45 | 46 | import entities 47 | import unittest 48 | 49 | 50 | class EntitiesTest(unittest.TestCase): 51 | 52 | def test_positive_create_user(self): 53 | user = entities.User(name='David', age=20) 54 | self.assertEqual(user.name, 'David') 55 | self.assertEqual(user.age, 20) 56 | 57 | def test_positive_create_car(self): 58 | car = entities.Car(make='Honda', year=2016) 59 | self.assertEqual(car.make, 'Honda') 60 | self.assertEqual(car.year, 2016) 61 | 62 | Using the example above, Betelgeuse will recognize that there are 2 test cases 63 | available, and the following attributes will be derived: 64 | 65 | * Title: this attribute will be derived from the name of the test method itself: 66 | 67 | - test_positive_create_user 68 | - test_positive_create_car 69 | 70 | * ID: this attribute will be derived from the concatenation of the 71 | *module.test_name* or *module.ClassName.test_name* if the test method is 72 | defined within a class. In other words, *the Python import path* will be used 73 | to derived the ID. Using our example, the values generated would be: 74 | 75 | - test_user.EntitiesTest.test_positive_create_user 76 | - test_user.EntitiesTest.test_positive_create_car 77 | 78 | By default, the values automatically derived by Betelgeuse are not very 79 | flexible, specially in the case when you rename an existing test case or move 80 | it to a different class or module. It is recommended, therefore, the use of 81 | field list fields to provide a bit more information about the tests. 82 | 83 | .. code-block:: python 84 | 85 | import entities 86 | import unittest 87 | 88 | 89 | class EntitiesTest(unittest.TestCase): 90 | 91 | def test_positive_create_user(self): 92 | """Create a new user providing all expected attributes. 93 | 94 | :id: 1d73b8cc-a754-4637-8bae-d9d2aaf89003 95 | :title: Create a new user providing all expected attributes 96 | """ 97 | user = entities.User(name='David', age=20) 98 | self.assertEqual(user.name, 'David') 99 | self.assertEqual(user.age, 20) 100 | 101 | Now Betelgeuse can use the ``:title:`` field to derive a friendlier name for 102 | your test (instead of using *test_positive_create_user*) and a specific value 103 | for its ID. Other information can also be added to the docstring to provide 104 | more information, and this can be handled by adding more fields (named after 105 | Polarion fields and custom fields). 106 | 107 | .. note:: 108 | 109 | 1. Make sure that your ``IDs`` are indeed unique per test case. 110 | 2. You can generate a unique UUID using the following code snippet. 111 | 112 | .. code-block :: python 113 | 114 | import uuid 115 | uuid.uuid4() 116 | 117 | How steps and expectedresults work together 118 | ------------------------------------------- 119 | 120 | Betelgeuse will look for some fields when parsing the test cases but there is 121 | an special case: when both ``steps`` and ``expectedresults`` are defined 122 | together. 123 | 124 | Betelgeuse will try to match both and create paired step with an expected 125 | result. For example in the following docstring: 126 | 127 | .. code-block:: python 128 | 129 | """Create a new user providing all expected attributes. 130 | 131 | :id: 1d73b8cc-a754-4637-8bae-d9d2aaf89003 132 | :steps: Create an user with name and email 133 | :expectedresults: User is created without any error being raised 134 | """ 135 | 136 | A pair of ``Create an user with name and email`` step with ``User is created 137 | without any error being raised`` expected result will be created. If multiple 138 | steps and multiple expected is wanted, then a list can be used: 139 | 140 | .. code-block:: python 141 | 142 | """Create a new user providing all expected attributes. 143 | 144 | :id: 1d73b8cc-a754-4637-8bae-d9d2aaf89003 145 | :steps: 146 | 1. Open the user creation page 147 | 2. Fill name and email 148 | 3. Submit the form 149 | :expectedresults: 150 | 1. A page with a form with name and email will be displayed 151 | 2. The fields will be populated with the information filled in 152 | 3. User is created without any error being raised 153 | """ 154 | 155 | On the above example three pairs will be created. The first will match the 156 | first item on ``steps`` and first item on ``expectedresults``, the second pair 157 | will be the second item on ``steps`` and the second item on 158 | ``expectedresults``, so on and so forth. 159 | 160 | .. note:: 161 | 162 | If the number of items are not the same, then only one pair will be 163 | created. The step will be the HTML generated by the value of ``steps`` and 164 | the expected result will be the HTML generate by the value of 165 | ``expectedresults``. 166 | 167 | Usage Examples 168 | ============== 169 | 170 | .. note:: 171 | 172 | 1. For easy understanding of Betelgeuse, this repository is already included with 173 | ``sample_project`` folder. This folder contains sample tests and XML results which 174 | will help in setting up and testing Betelgeuse for your project. The sample 175 | commands used below also use this data. 176 | 177 | 2. Always run the test runner and Betelgeuse on the same directory to make 178 | sure that the test run ID mapping works fine. Otherwise Betelgeuse may 179 | report ID errors. More info can be found in `test-run command`_ section 180 | 181 | help command 182 | ------------ 183 | 184 | .. code-block:: console 185 | 186 | $ betelgeuse --help 187 | 188 | requirement command 189 | ------------------- 190 | 191 | The ``requirement`` command generates an XML file suited to be imported by the 192 | Requirement XML Importer. It reads the Python test suite source code and 193 | generates a XML file with all the information necessary for the Requirement XML 194 | Importer. 195 | 196 | .. code-block:: console 197 | 198 | $ betelgeuse requirement \ 199 | --assignee assignee \ 200 | --approver approver1 \ 201 | --approver approver2 \ 202 | sample_project/tests \ 203 | PROJECT \ 204 | betelgeuse-requirements.xml 205 | 206 | .. note:: 207 | 208 | Requirements must be created in order to link test cases to them. Make sure 209 | to import the requirements before the test cases. 210 | 211 | test-case command 212 | ----------------- 213 | 214 | The ``test-case`` command generates an XML file suited to be imported by the 215 | Test Case XML Importer. It reads the Python test suite source code and 216 | generates a XML file with all the information necessary for the Test Case XML 217 | Importer. 218 | 219 | The ``test-case`` command requires you to pass: 220 | 221 | * The path to the Python test suite source code 222 | * The Polarion project ID 223 | * The output XML file path (it will override if the file already exists) 224 | 225 | .. note:: 226 | 227 | Even though ``--response-property`` is optional, it is highly recommended 228 | to pass it because will be easier to monitor the importer messages (which 229 | is not handled by Betelgeuse). 230 | 231 | The example below shows how to run the command: 232 | 233 | .. code-block:: console 234 | 235 | $ betelgeuse test-case \ 236 | --automation-script-format "https://github.com/SatelliteQE/betelgeuse/tree/master/{path}#L{line_number}" \ 237 | sample_project/tests \ 238 | PROJECT \ 239 | betelgeuse-test-cases.xml 240 | 241 | 242 | test-results command 243 | -------------------- 244 | 245 | Gives a nice summary of test cases/results in the given jUnit XML file. 246 | 247 | .. code-block:: console 248 | 249 | $ betelgeuse test-results --path \ 250 | sample_project/results/sample-junit-result.xml 251 | 252 | Passed: 1 253 | 254 | test-run command 255 | ---------------- 256 | 257 | The ``test-run`` command generates an XML file suited to be imported by the 258 | Test Run XML importer. It takes: 259 | 260 | * A valid xUnit XML file 261 | * A Python test suite where test case IDs can be found 262 | 263 | And generates a resulting XML file with all the information necessary for the 264 | Test Run XML importer. 265 | 266 | The ``test-run`` command only requires you to pass: 267 | 268 | * The path to the xUnit XML file 269 | * The path to the Python test suite source code 270 | * The Polarion user ID 271 | * The Polarion project ID 272 | * The output XML file path (it will override if the file already exists) 273 | 274 | .. note:: 275 | 276 | Even though ``--response-property`` is optional, it is highly recommended 277 | to pass it because will be easier to monitor the importer messages (which 278 | is not handled by Betelgeuse). 279 | 280 | The example below shows how to run ``test-run`` command: 281 | 282 | .. code-block:: console 283 | 284 | $ betelgeuse test-run \ 285 | --response-property property_key=property_value \ 286 | sample_project/results/sample-junit-result.xml \ 287 | sample_project/tests/ \ 288 | testuser \ 289 | PROJECT \ 290 | betelgeuse-test-run.xml 291 | 292 | Polarion custom fields can be set by using the ``--custom-fields`` option. 293 | There are two ways to define custom fields: 294 | 295 | ``key=value`` format 296 | This a shortcut when you want to define plain strings as the value of a 297 | custom field. 298 | 299 | JSON format 300 | This approach suits better when the type of the custom field matters. For 301 | example, if a custom field expects a boolean as a value. 302 | 303 | Example using ``key=value`` format: 304 | 305 | .. code-block:: console 306 | 307 | $ betelgeuse test-run \ 308 | --custom-fields arch=x8664 \ 309 | --custom-fields variant=server \ 310 | --response-property property_key=property_value \ 311 | sample_project/results/sample-junit-result.xml \ 312 | sample_project/tests/ \ 313 | testuser \ 314 | PROJECT \ 315 | betelgeuse-test-run.xml 316 | 317 | Example using JSON format: 318 | 319 | .. code-block:: console 320 | 321 | $ betelgeuse test-run \ 322 | --custom-fields '{"isautomated":"true","arch":"x8664"}' \ 323 | --response-property property_key=property_value \ 324 | sample_project/results/sample-junit-result.xml \ 325 | sample_project/tests/ \ 326 | testuser \ 327 | PROJECT \ 328 | betelgeuse-test-run.xml 329 | 330 | .. warning:: 331 | 332 | Make sure to pass the the custom field ID (same as in Polarion) and its 333 | value. Also, pass custom field values as string since they will be 334 | converted to XML where there is no type information. 335 | 336 | Case Study - A real world sample Test Case 337 | =========================================== 338 | 339 | Field list fields can be used to provide more information about a test case. 340 | The more information one provides via these fields, the more accurate the data 341 | being imported into Polarion. For example: 342 | 343 | .. code-block:: python 344 | 345 | import entities 346 | import unittest 347 | 348 | class EntitiesTest(unittest.TestCase): 349 | 350 | def test_positive_create_user(self): 351 | """Create a new user providing all expected attributes. 352 | 353 | :id: 1d73b8cc-a754-4637-8bae-d9d2aaf89003 354 | :expectedresults: User is successfully created 355 | :requirement: User Management 356 | :caseautomation: Automated 357 | :caselevel: Acceptance 358 | :casecomponent: CLI 359 | :testtype: Functional 360 | :caseimportance: High 361 | :upstream: No 362 | """ 363 | user = entities.User(name='David', age=20) 364 | self.assertEqual(user.name, 'David') 365 | self.assertEqual(user.age, 20) 366 | 367 | When the above test case is collected, Betelgeuse will make use of all 9 fields 368 | provided and generates a more meaningful test case. 369 | 370 | Ok, this is cool. But wait, there is more! Betelgeuse will reuse fields defined 371 | in different levels, namely: 372 | 373 | - function level 374 | - class level 375 | - module level 376 | - package level 377 | 378 | This feature can be leveraged to minimize the amount of information that needs 379 | to be written for each test case. Since most of the time, test cases grouped in 380 | a module usually share the same generic information, one could move most of 381 | these fields to the ``module`` level and every single test case found by 382 | Betelgeuse will inherit these attributes. For example: 383 | 384 | 385 | .. code:: python 386 | 387 | """Test cases for entities. 388 | 389 | :caseautomation: Automated 390 | :casecomponent: CLI 391 | :caseimportance: High 392 | :caselevel: Acceptance 393 | :requirement: User Management 394 | :testtype: functional 395 | :upstream: no 396 | """ 397 | 398 | import entities 399 | import unittest 400 | 401 | 402 | class EntitiesTest(unittest.TestCase): 403 | 404 | def test_positive_create_user(self): 405 | """Create a new user providing all expected attributes. 406 | 407 | :id: 1d73b8cc-a754-4637-8bae-d9d2aaf89003 408 | :expectedresults: User is successfully created 409 | """ 410 | user = entities.User(name='David', age=20) 411 | self.assertEqual(user.name, 'David') 412 | self.assertEqual(user.age, 20) 413 | 414 | 415 | def test_positive_create_car(self): 416 | """Create a new car providing all expected attributes. 417 | 418 | :id: 71b9b000-b978-4a95-b6f8-83c09ed39c01 419 | :caseimportance: Medium 420 | :expectedresults: Car is successfully created and has no owner 421 | """ 422 | car = entities.Car(make='Honda', year=2016) 423 | self.assertEqual(car.make, 'Honda') 424 | self.assertEqual(car.year, 2016) 425 | 426 | Now all discovered test cases will inherit the attributes defined at the module 427 | level. Furthermore, the test case attributes can be overridden at the *class 428 | level* or at the *test case level*. Using the example above, since 429 | ``test_positive_create_car`` has its own *caseimportance* field defined, 430 | Betelgeuse will use its value of *Medium* for this test case alone while all 431 | other test cases will have a value of *High*, derived from the module. 432 | 433 | Betelgeuse is able to handle ``pytest`` parametrized tests and, in order to do 434 | so, set the ``parametrized`` field to ``yes`` on all tests that make use of the 435 | ``@pytest.parametrize`` decorator or a parametrized fixture. With that, the 436 | ``test-case`` command will generate an XML that instructs the importer to set 437 | the test case as being parametrized. And the ``test-run`` command will generate 438 | an XML that instructs the importer to set the result as an iteration result, if 439 | it finds a test result with ``[]`` on its name and the 440 | ``parametrized`` set to ``yes`` on the source code. 441 | 442 | Advanced Usage 443 | ============== 444 | 445 | Betelgeuse allows configuring the field processing to your own needs, check the 446 | :doc:`Betelgeuse Configuration Module ` documentation for more 447 | information. 448 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --ignore tests/data 3 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Test Betelgeuse for sanity. If all is well, generate a new commit, tag it, and 4 | # print instructions for further steps to take. 5 | # 6 | # NOTE: This script should be run from the repository root directory. Also in 7 | # order to actually release to PyPI a proper `.pypirc` file should be already 8 | # setup, for more information check 9 | # https://docs.python.org/3/distutils/packageindex.html#the-pypirc-file 10 | # 11 | set -euo pipefail 12 | 13 | # Make sure local fork is updated 14 | git fetch -p --all 15 | git checkout master 16 | git merge --ff-only origin/master 17 | 18 | OLD_VERSION="$(git tag --list | sort -V | tail -n 1)" 19 | if [[ $# -gt 0 ]]; then 20 | NEW_VERSION="${1}" 21 | else 22 | MAJOR_VERSION="$(echo "${OLD_VERSION}" | cut -d . -f 1)" 23 | MINOR_VERSION="$(echo "${OLD_VERSION}" | cut -d . -f 2)" 24 | NEW_VERSION="${MAJOR_VERSION}.$((MINOR_VERSION + 1)).0" 25 | fi 26 | 27 | if [[ $(echo -e "${OLD_VERSION}\n${NEW_VERSION}" | sort -V | tail -n 1) = "${OLD_VERSION}" ]]; then 28 | echo "The version must be greater than \"${OLD_VERSION}\"" 29 | exit 1 30 | fi 31 | 32 | # Bump version number 33 | echo "${NEW_VERSION}" > VERSION 34 | 35 | # Generate the package 36 | make package-clean package 37 | 38 | # Sanity check Betelgeuse packages on Python 2 39 | venv="$(mktemp --directory)" 40 | python3 -m venv "${venv}" 41 | set +u 42 | source "${venv}/bin/activate" 43 | set -u 44 | for dist in dist/*; do 45 | ls "${dist}" 46 | pip install --quiet -U pip 47 | pip install --quiet "${dist}" 48 | python -c "import betelgeuse" 1>/dev/null 49 | make test 50 | pip uninstall --quiet --yes betelgeuse 51 | done 52 | set +u 53 | deactivate 54 | set -u 55 | rm -rf "${venv}" 56 | 57 | # Get the changes from last release and commit 58 | git add VERSION 59 | git commit -m "Release version ${NEW_VERSION}" \ 60 | -m "Shortlog of commits since last release:" \ 61 | -m "$(git shortlog ${OLD_VERSION}.. | sed 's/^./ &/')" 62 | 63 | # Tag with the new version 64 | git tag "${NEW_VERSION}" 65 | 66 | fmt <self = <test_login_example.LoginTestCase testMethod=test_login_2> 2 | 3 | def test_login_2(self): 4 | """This is an expected failure 5 | 6 | :id: 5adbfbe3-9594-46bb-b8b6-d8ef3dbca6b6 7 | 8 | :steps: 9 | 10 | 1. First Step 11 | 2. Second Step 12 | 13 | :expectedresults: 14 | 15 | 1. First Result 16 | 2. Second Result 17 | """ 18 | > self.fail('Expected failure') 19 | E AssertionError: Expected failure 20 | 21 | test_login_example.py:39: AssertionErrorsample_project/tests/test_login_example.py:40: <py._xmlgen.raw object at 0x7fdf36f6a110> -------------------------------------------------------------------------------- /sample_project/tests/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | :requirement: Global Requirement 3 | """ 4 | -------------------------------------------------------------------------------- /sample_project/tests/test_init.py: -------------------------------------------------------------------------------- 1 | """Tests for the fields defined on the __init__.py module.""" 2 | 3 | 4 | def test_global_requirement(self): 5 | """Test case to find if the global requirement will be pulled. 6 | 7 | :id: 3b658fe1-3d96-4ddb-bdf9-2abd950567c7 8 | 9 | :steps: Run Betelgeuse and check if this test case to collect this test 10 | case. 11 | 12 | :expectedresults: The test case must have the requirement defined as the 13 | Global Requirement from the ``__init__.py`` file. 14 | """ 15 | pass 16 | -------------------------------------------------------------------------------- /sample_project/tests/test_login_example.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """Test class for Login 3 | 4 | :requirement: Importer Test 5 | """ 6 | 7 | import unittest 8 | 9 | 10 | class LoginTestCase(unittest.TestCase): 11 | """Tests for Login""" 12 | 13 | def test_login_1(self): 14 | """Check if a user is able to login with valid userid and password 15 | 16 | :id: 60e48736-43a9-11e6-bcaa-104a7da122d7 17 | 18 | :steps: Login to UI with valid userid and password 19 | 20 | :expectedresults: User is able to login successfully 21 | """ 22 | pass 23 | 24 | def test_login_2(self): 25 | """This is an expected failure 26 | 27 | :id: 5adbfbe3-9594-46bb-b8b6-d8ef3dbca6b6 28 | 29 | :steps: 30 | 31 | 1. First Step 32 | 2. Second Step 33 | 34 | :expectedresults: 35 | 36 | 1. First Result 37 | 2. Second Result 38 | """ 39 | self.fail('Expected failure') 40 | 41 | def test_login_3(self): 42 | """This is an expected skip 43 | 44 | :id: 76fdbb37-1b05-4f90-918e-d34e5e22ed7e 45 | """ 46 | self.skipTest('Expected skip') 47 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | """A setuptools-based script for installing Betelgeuse.""" 4 | from setuptools import find_packages, setup 5 | 6 | with open('README.rst') as handle: 7 | LONG_DESCRIPTION = handle.read() 8 | 9 | with open('VERSION') as handle: 10 | VERSION = handle.read().strip() 11 | 12 | setup( 13 | name='Betelgeuse', 14 | author='Elyézer Rezende, Og Maciel', 15 | author_email='erezende@redhat.com, omaciel@redhat.com', 16 | version=VERSION, 17 | packages=find_packages(include=['betelgeuse', 'betelgeuse.*']), 18 | install_requires=['click', 'docutils'], 19 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 20 | classifiers=[ 21 | 'Development Status :: 5 - Production/Stable', 22 | 'Intended Audience :: Developers', 23 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 24 | 'Programming Language :: Python :: 3 :: Only', 25 | 'Programming Language :: Python :: 3', 26 | 'Programming Language :: Python :: 3.6', 27 | 'Programming Language :: Python :: 3.7', 28 | 'Programming Language :: Python :: 3.8', 29 | 'Programming Language :: Python :: 3.9', 30 | ], 31 | description=( 32 | 'Betelgeuse is a Python program that reads standard Python test cases ' 33 | 'and generates XML files that are suited to be imported by Polarion ' 34 | 'importers.' 35 | ), 36 | entry_points=""" 37 | [console_scripts] 38 | betelgeuse=betelgeuse:cli 39 | """, 40 | include_package_data=True, 41 | license='GPLv3', 42 | long_description=LONG_DESCRIPTION, 43 | package_data={'': ['LICENSE']}, 44 | url='https://github.com/SatelliteQE/betelgeuse', 45 | ) 46 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for Betelgeuse.""" 2 | -------------------------------------------------------------------------------- /tests/data/__init__.py: -------------------------------------------------------------------------------- 1 | """Sample tests package.""" 2 | -------------------------------------------------------------------------------- /tests/data/ignore_dir/__init__.py: -------------------------------------------------------------------------------- 1 | """Package with tests to be ignored.""" 2 | -------------------------------------------------------------------------------- /tests/data/ignore_dir/test_ignore_dir.py: -------------------------------------------------------------------------------- 1 | """Tests to be ignored during collection.""" 2 | 3 | 4 | def test_ignore_1(): 5 | """Test ignore 1. 6 | 7 | :field1: value1 8 | :field2: value2 9 | """ 10 | 11 | 12 | def test_ignore_2(): 13 | """Test ignore 2. 14 | 15 | :field1: value1 16 | :field2: value2 17 | """ 18 | -------------------------------------------------------------------------------- /tests/data/test_sample.py: -------------------------------------------------------------------------------- 1 | # encoding=utf-8 2 | """Sample test module.""" 3 | import unittest 4 | import pytest 5 | 6 | 7 | pytestmark = [pytest.mark.run_in_one_thread, pytest.mark.tier1] 8 | 9 | CONSTANT = 'contant-value' 10 | 11 | 12 | def decorator(func): 13 | """No-op decorator.""" 14 | return func 15 | 16 | 17 | decorator.mark = object() 18 | 19 | 20 | def decorator_with_args(*args, **kwargs): 21 | """No-op decorator that expects arguments.""" 22 | def inner(func): 23 | return func 24 | return inner 25 | 26 | 27 | def test_function(): 28 | """Test function. 29 | 30 | :field1: value1 31 | :field2: value2 32 | """ 33 | pass 34 | 35 | 36 | @decorator 37 | @decorator.mark.something 38 | @decorator_with_args([1, b'bytes', ('a', 'b'), None]) 39 | @decorator_with_args(*[True, (True or False) and True]) 40 | @decorator_with_args((f'{CONSTANT!r:5>} with literal {{ and }}',)) 41 | @decorator_with_args({1, 2, -3}) 42 | @decorator_with_args({'a': 1, 'b': 2, **{'c': 3}}) 43 | @decorator_with_args([1, 2][0], [1, 2][:1], [1, 2][0:], [1, 2][0:1:1]) 44 | @decorator_with_args([i for i in range(5) if i % 2 == 0]) 45 | @decorator_with_args((i for i in range(5))) 46 | @decorator_with_args({i for i in range(5)}) 47 | @decorator_with_args({k: v for k in 'abcde' for v in range(5)}) 48 | @decorator_with_args(1, 2, 3, a=1, b=2) 49 | @decorator_with_args( 50 | dict(a=1, b=2), 51 | dict(**{'a': 1}), 52 | vars(decorator.mark), 53 | lambda a, *args, b=1, **kwargs: (a, args, b, kwargs), 54 | lambda a, *, b=1: (a, b), 55 | lambda v: v if v else None, 56 | ) 57 | def test_decorated_test(): 58 | """Test decorated function. 59 | 60 | :field1: value1 61 | :field2: value2 62 | """ 63 | 64 | 65 | class TestCase(unittest.TestCase): 66 | """Test case.""" 67 | 68 | def test_method(self): 69 | """Test method. 70 | 71 | :field1: value1 72 | :field2: value2 73 | """ 74 | pass 75 | 76 | def test_without_docstring(self): # noqa: D102 77 | pass 78 | 79 | 80 | @pytest.mark.on_prem_provisioning 81 | class TestclasswithMarkers: 82 | """Class to verify tests markers are collected from class.""" 83 | 84 | @pytest.mark.skipif(2 == 3, reason='2 is not 3') 85 | @pytest.mark.osp 86 | def test_markers_sample(self): 87 | """Test for markers at test level.""" 88 | assert True 89 | -------------------------------------------------------------------------------- /tests/test_betelgeuse.py: -------------------------------------------------------------------------------- 1 | """Betelgeuse unit tests.""" 2 | import click 3 | import mock 4 | import operator 5 | import os 6 | import pytest 7 | import re 8 | 9 | from click.testing import CliRunner 10 | from betelgeuse import ( 11 | INVALID_CHARS_REGEX, 12 | cli, 13 | create_xml_property, 14 | create_xml_testcase, 15 | default_config, 16 | load_custom_fields, 17 | map_steps, 18 | parse_junit, 19 | parse_test_results, 20 | validate_key_value_option, 21 | ) 22 | from betelgeuse.config import BetelgeuseConfig 23 | from io import StringIO 24 | from xml.etree import ElementTree 25 | 26 | 27 | JUNIT_XML = """ 28 | 29 | 30 | 31 | 32 | ... 33 | 34 | 35 | ... 36 | 37 | 38 | ... 39 | 40 | 41 | 42 | 43 | """ 44 | 45 | TEST_MODULE = ''' # noqa: Q000 46 | def test_something(): 47 | """This test something.""" 48 | 49 | def test_something_else(): 50 | """This test something else.""" 51 | ''' 52 | 53 | 54 | MULTIPLE_STEPS = """
    55 |
  1. First step

  2. 56 |
  3. Second step

  4. 57 |
  5. Third step

  6. 58 |
59 | """ 60 | 61 | MULTIPLE_EXPECTEDRESULTS = """
    62 |
  1. First step expected result.

  2. 63 |
  3. Second step expected result.

  4. 64 |
  5. Third step expected result.

  6. 65 |
66 | """ 67 | 68 | SINGLE_STEP = """

Single step

""" 69 | 70 | SINGLE_EXPECTEDRESULT = """

Single step expected result.

""" 71 | 72 | 73 | @pytest.fixture 74 | def cli_runner(): 75 | """Return a `click`->`CliRunner` object.""" 76 | return CliRunner() 77 | 78 | 79 | def test_load_custom_fields(): 80 | """Check if custom fields can be loaded using = notation.""" 81 | assert load_custom_fields(('isautomated=true',)) == { 82 | 'isautomated': 'true' 83 | } 84 | 85 | 86 | def test_load_custom_fields_empty(): 87 | """Check if empty value return empty dict for custom fields.""" 88 | assert load_custom_fields(('',)) == {} 89 | 90 | 91 | def test_load_custom_fields_none(): 92 | """Check if None value return empty dict for custom fields.""" 93 | assert load_custom_fields(None) == {} 94 | 95 | 96 | def test_load_custom_fields_json(): 97 | """Check if custom fields can be loaded using JSON data.""" 98 | assert load_custom_fields(('{"isautomated":true}',)) == { 99 | 'isautomated': True, 100 | } 101 | 102 | 103 | def test_map_single_step(): 104 | """Check if mapping single step works.""" 105 | mapped = [(SINGLE_STEP, SINGLE_EXPECTEDRESULT)] 106 | assert map_steps(SINGLE_STEP, SINGLE_EXPECTEDRESULT) == mapped 107 | 108 | 109 | def test_map_multiple_steps(): 110 | """Check if mapping multiple steps works.""" 111 | assert map_steps(MULTIPLE_STEPS, MULTIPLE_EXPECTEDRESULTS) == [ 112 | ('

First step

', '

First step expected result.

'), 113 | ('

Second step

', '

Second step expected result.

'), 114 | ('

Third step

', '

Third step expected result.

'), 115 | ] 116 | 117 | 118 | def test_get_multiple_steps_diff_items(): 119 | """Check if parsing multiple steps of different items works.""" 120 | multiple_steps = '\n'.join(MULTIPLE_STEPS.splitlines()[:-2] + ['\n']) 121 | assert map_steps( 122 | multiple_steps, MULTIPLE_EXPECTEDRESULTS) == [( 123 | '
    \n
  1. First step

  2. \n ' 124 | '
  3. Second step

  4. \n
\n', 125 | MULTIPLE_EXPECTEDRESULTS 126 | )] 127 | 128 | 129 | def test_parse_junit(): 130 | """Check if jUnit parsing works.""" 131 | junit_xml = StringIO(JUNIT_XML) 132 | assert parse_junit(junit_xml) == [ 133 | {'classname': 'foo1', 'name': 'test_passed', 'status': 'passed', 134 | 'line': '8', 'file': 'source.py'}, 135 | {'classname': 'foo1', 'name': 'test_passed_no_id', 'status': 'passed'}, 136 | {'classname': 'foo2', 'message': 'Skipped message', 137 | 'name': 'test_skipped', 'status': 'skipped'}, 138 | {'classname': 'foo3', 'name': 'test_failure', 139 | 'message': 'Failure message', 'status': 'failure', 'type': 'Type'}, 140 | {'classname': 'foo4', 'name': 'test_error', 'message': 'Error message', 141 | 'status': 'error', 'type': 'ExceptionName'}, 142 | {'classname': 'foo1', 'name': 'test_parametrized[a]', 143 | 'status': 'passed'}, 144 | {'classname': 'foo1', 'name': 'test_parametrized[b]', 145 | 'status': 'passed'}, 146 | ] 147 | junit_xml.close() 148 | 149 | 150 | def test_invalid_test_run_chars_regex(): 151 | """Check if invalid test run characters are handled.""" 152 | invalid_test_run_id = '\\/.:*"<>|~!@#$?%^&\'*()+`,=' 153 | assert re.sub(INVALID_CHARS_REGEX, '', invalid_test_run_id) == '' 154 | 155 | 156 | def test_parse_test_results(): 157 | """Check if parsing test results works.""" 158 | test_results = [ 159 | {'status': u'passed', 160 | 'name': 'test_positive_read', 161 | 'classname': 'tests.api.test_ReadTestCase', 162 | 'file': 'tests/api/test_foo.py', 163 | 'time': '4.13224601746', 164 | 'line': '521'}, 165 | {'status': u'passed', 166 | 'name': 'test_positive_delete', 167 | 'classname': 'tests.api.test_ReadTestCase', 168 | 'file': 'tests/api/test_foo.py', 169 | 'time': '4.13224601746', 170 | 'line': '538'}, 171 | {'status': u'failure', 172 | 'name': 'test_negative_read', 173 | 'classname': 'tests.api.test_ReadTestCase', 174 | 'file': 'tests/api/test_foo.py', 175 | 'time': '4.13224601746', 176 | 'line': '218'}, 177 | {'status': u'skipped', 178 | 'name': 'test_positive_update', 179 | 'classname': 'tests.api.test_ReadTestCase', 180 | 'file': 'tests/api/test_foo.py', 181 | 'time': '4.13224601746', 182 | 'line': '112'}, 183 | {'status': u'error', 184 | 'name': 'test_positive_create', 185 | 'classname': 'tests.api.test_ReadTestCase', 186 | 'file': 'tests/api/test_foo.py', 187 | 'time': '4.13224601746', 188 | 'line': '788'}, 189 | ] 190 | summary = parse_test_results(test_results) 191 | assert summary['passed'] == 2 192 | assert summary['failure'] == 1 193 | assert summary['skipped'] == 1 194 | assert summary['error'] == 1 195 | 196 | 197 | def test_test_results(cli_runner): 198 | """Check if test results command works.""" 199 | with cli_runner.isolated_filesystem(): 200 | with open('results.xml', 'w') as handler: 201 | handler.write(JUNIT_XML) 202 | result = cli_runner.invoke( 203 | cli, ['test-results', '--path', 'results.xml']) 204 | assert result.exit_code == 0 205 | assert 'Error: 1\n' in result.output 206 | assert 'Failure: 1\n' in result.output 207 | assert 'Passed: 4\n' in result.output 208 | assert 'Skipped: 1\n' in result.output 209 | 210 | 211 | def test_test_results_default_path(cli_runner): 212 | """Check if test results in the default path works.""" 213 | with cli_runner.isolated_filesystem(): 214 | with open('junit-results.xml', 'w') as handler: 215 | handler.write(JUNIT_XML) 216 | result = cli_runner.invoke(cli, ['test-results']) 217 | assert result.exit_code == 0 218 | assert 'Error: 1\n' in result.output 219 | assert 'Failure: 1\n' in result.output 220 | assert 'Passed: 4\n' in result.output 221 | assert 'Skipped: 1\n' in result.output 222 | 223 | 224 | def test_create_xml_property(): 225 | """Check if create_xml_property creates the expected XML tag.""" 226 | generated = ElementTree.tostring( 227 | create_xml_property('name', 'value'), 228 | encoding='unicode' 229 | ) 230 | assert generated == '' 231 | 232 | 233 | def test_create_xml_testcase(): 234 | """Check if create_xml_testcase creates the expected XML tag.""" 235 | testcase = mock.MagicMock() 236 | testcase.name = 'test_it_works' 237 | testcase.parent_class = 'FeatureTestCase' 238 | testcase.testmodule = 'tests/test_feature.py' 239 | testcase.docstring = 'Test feature docstring' 240 | testcase.fields = { 241 | field: field for field in 242 | default_config.TESTCASE_FIELDS + default_config.TESTCASE_CUSTOM_FIELDS 243 | } 244 | testcase.fields['parametrized'] = 'yes' 245 | config = BetelgeuseConfig() 246 | generated = ElementTree.tostring( 247 | create_xml_testcase(config, testcase, '{path}#{line_number}'), 248 | encoding='unicode' 249 | ) 250 | assert generated == ( 251 | 'title' 254 | 'description' 255 | '' 257 | 'steps' 258 | 'expectedresults' 259 | '' 260 | '' 261 | 'Iteration: ' 262 | '' 263 | 'Pass' 264 | '' 265 | '' 266 | '' 267 | '' 268 | '' 269 | '' 270 | '' 271 | '' 272 | '' 273 | '' 274 | '' 275 | '' 276 | '' 277 | '' 278 | '' 279 | '' 280 | '' 281 | '' 282 | '' 283 | '' 284 | '' 285 | '' 286 | '' 287 | '' 288 | '' 289 | '' 290 | '' 291 | '' 292 | '' 293 | '' 294 | '' 295 | ) 296 | 297 | 298 | def test_requirement(cli_runner): 299 | """Check if requirement command works.""" 300 | with cli_runner.isolated_filesystem(): 301 | with open('source.py', 'w') as handler: 302 | handler.write('') 303 | with mock.patch('betelgeuse.collector.collect_tests') as collect_tests: 304 | return_value_testcases = [] 305 | for index in range(5): 306 | t = mock.MagicMock() 307 | t.docstring = None 308 | t.fields = {'requirement': f'requirement{index}'} 309 | return_value_testcases.append(t) 310 | 311 | collect_tests.return_value = { 312 | 'source.py': return_value_testcases, 313 | } 314 | result = cli_runner.invoke( 315 | cli, 316 | [ 317 | 'requirement', 318 | '--approver', 'approver1', 319 | '--approver', 'approver2', 320 | '--assignee', 'assignee', 321 | '--dry-run', 322 | '--response-property', 'property_key=property_value', 323 | 'source.py', 324 | 'projectid', 325 | 'requirements.xml' 326 | ] 327 | ) 328 | assert result.exit_code == 0, result.output 329 | assert result.output.strip() == '' 330 | collect_tests.assert_called_once_with('source.py', ()) 331 | assert os.path.isfile('requirements.xml') 332 | root = ElementTree.parse('requirements.xml').getroot() 333 | assert root.tag == 'requirements' 334 | properties = root.find('properties') 335 | assert properties 336 | properties = [p.attrib for p in properties.findall('property')] 337 | expected = [ 338 | {'name': 'lookup-method', 'value': 'name'}, 339 | {'name': 'dry-run', 'value': 'true'}, 340 | ] 341 | for p in properties: 342 | assert p in expected 343 | for index, requirement in enumerate(root.findall('requirement')): 344 | children = [ 345 | ElementTree.tostring(child, encoding='unicode') 346 | for child in requirement 347 | ] 348 | assert children == [ 349 | f'requirement{index}', 350 | '' 351 | '' 352 | '' 353 | ] 354 | assert requirement.attrib == { 355 | 'approver-ids': 'approver1:approved approver2:approved', 356 | 'assignee-id': 'assignee', 357 | 'priority-id': 'high', 358 | 'severity-id': 'should_have', 359 | 'status-id': 'approved', 360 | } 361 | 362 | 363 | def test_test_run(cli_runner): 364 | """Check if test run command works.""" 365 | with cli_runner.isolated_filesystem(): 366 | with open('junit_report.xml', 'w') as handler: 367 | handler.write(JUNIT_XML) 368 | with open('source.py', 'w') as handler: 369 | handler.write('') 370 | with mock.patch('betelgeuse.collector') as collector: 371 | testcases = [ 372 | {'name': 'test_passed', 'testmodule': 'foo1'}, 373 | {'name': 'test_passed_no_id', 'testmodule': 'foo1'}, 374 | {'name': 'test_skipped', 'testmodule': 'foo2'}, 375 | {'name': 'test_failure', 'testmodule': 'foo3'}, 376 | {'name': 'test_error', 'testmodule': 'foo4'}, 377 | {'name': 'test_parametrized', 'testmodule': 'foo1'}, 378 | ] 379 | return_value_testcases = [] 380 | for test in testcases: 381 | t = mock.MagicMock() 382 | t.docstring = '' 383 | t.name = test['name'] 384 | t.parent_class = None 385 | t.testmodule = test['testmodule'] 386 | t.fields = {'id': str(id(t))} 387 | if t.name == 'test_parametrized': 388 | t.fields['parametrized'] = 'yes' 389 | t.junit_id = f'{test["testmodule"]}.{test["name"]}' 390 | return_value_testcases.append(t) 391 | 392 | collector.collect_tests.return_value = { 393 | 'source.py': return_value_testcases, 394 | } 395 | result = cli_runner.invoke( 396 | cli, 397 | [ 398 | 'test-run', 399 | '--dry-run', 400 | '--no-include-skipped', 401 | '--create-defects', 402 | '--custom-fields', 'field=value', 403 | '--project-span-ids', 'project1, project2', 404 | '--response-property', 'key=value', 405 | '--status', 'inprogress', 406 | '--test-run-id', 'test-run-id', 407 | '--test-run-group-id', 'test-run-group-id', 408 | '--test-run-template-id', 'test-run-template-id', 409 | '--test-run-title', 'test-run-title', 410 | '--test-run-type-id', 'test-run-type-id', 411 | 'junit_report.xml', 412 | 'source.py', 413 | 'userid', 414 | 'projectid', 415 | 'importer.xml' 416 | ] 417 | ) 418 | assert result.exit_code == 0, result.output 419 | collector.collect_tests.assert_called_once_with('source.py', ()) 420 | assert os.path.isfile('importer.xml') 421 | root = ElementTree.parse('importer.xml').getroot() 422 | assert root.tag == 'testsuites' 423 | properties = root.find('properties') 424 | assert properties 425 | by_name = operator.itemgetter('name') 426 | properties = sorted( 427 | [p.attrib for p in properties.findall('property')], 428 | key=by_name 429 | ) 430 | 431 | expected = [ 432 | {'name': 'polarion-create-defects', 'value': 'true'}, 433 | {'name': 'polarion-custom-field', 'value': 'value'}, 434 | {'name': 'polarion-custom-lookup-method-field-id', 435 | 'value': 'testCaseID'}, 436 | {'name': 'polarion-dry-run', 'value': 'true'}, 437 | {'name': 'polarion-include-skipped', 'value': 'false'}, 438 | {'name': 'polarion-lookup-method', 'value': 'custom'}, 439 | {'name': 'polarion-project-id', 'value': 'projectid'}, 440 | {'name': 'polarion-project-span-ids', 441 | 'value': 'project1, project2'}, 442 | {'name': 'polarion-response-key', 'value': 'value'}, 443 | {'name': 'polarion-testrun-status-id', 'value': 'inprogress'}, 444 | {'name': 'polarion-testrun-id', 'value': 'test-run-id'}, 445 | {'name': 'polarion-group-id', 'value': 'test-run-group-id'}, 446 | {'name': 'polarion-testrun-template-id', 447 | 'value': 'test-run-template-id'}, 448 | {'name': 'polarion-testrun-title', 'value': 'test-run-title'}, 449 | {'name': 'polarion-testrun-type-id', 450 | 'value': 'test-run-type-id'}, 451 | {'name': 'polarion-user-id', 'value': 'userid'}, 452 | ] 453 | expected.sort(key=by_name) 454 | assert properties == expected 455 | testsuite = root.find('testsuite') 456 | assert testsuite 457 | for index, testcase in enumerate(testsuite.findall('testcase')): 458 | properties = testcase.find('properties') 459 | assert properties 460 | p = properties.findall('property') 461 | assert 0 < len(p) <= 2 462 | print(index) 463 | print(ElementTree.tostring(testcase)) 464 | 465 | if len(p) == 2: 466 | testcase_id = str(id(return_value_testcases[-1])) 467 | else: 468 | testcase_id = str(id(return_value_testcases[index])) 469 | 470 | assert p[0].attrib == { 471 | 'name': 'polarion-testcase-id', 472 | 'value': testcase_id, 473 | } 474 | 475 | if len(p) == 2: 476 | assert p[1].attrib['name'] == ( 477 | 'polarion-parameter-pytest parameters' 478 | ) 479 | assert p[1].attrib['value'] in ('a', 'b') 480 | 481 | 482 | def test_validate_key_value_option(): 483 | """Check if validate_key_value_option works.""" 484 | # None value will be passed when the option is not specified. 485 | for value, result in (('key=value=', ('key', 'value=')), (None, None)): 486 | assert validate_key_value_option( 487 | None, mock.MagicMock(), value) == result 488 | 489 | 490 | def test_validate_key_value_option_exception(): 491 | """Check if validate_key_value_option validates invalid values.""" 492 | option = mock.MagicMock() 493 | option.name = 'option_name' 494 | msg = 'option_name needs to be in format key=value' 495 | for value in ('value', ''): 496 | with pytest.raises(click.BadParameter) as excinfo: 497 | validate_key_value_option(None, option, value) 498 | assert excinfo.value.message == msg 499 | -------------------------------------------------------------------------------- /tests/test_collector.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """Tests for :mod:`betelgeuse.collector`.""" 3 | import pytest 4 | 5 | from betelgeuse import collector 6 | 7 | 8 | @pytest.mark.parametrize( 9 | 'path', ('./tests/data', './tests/data/test_sample.py')) 10 | def test_collect_tests(path): 11 | """Check if ``collect_tests`` 'tests/data'collect tests.""" 12 | tests = collector.collect_tests(path) 13 | assert 'tests/data/test_sample.py' in tests 14 | assert len(tests['tests/data/test_sample.py']) == 5 15 | 16 | # Check if we are not doing a specific python module collection 17 | if path.endswith('.py'): 18 | return 19 | 20 | assert 'tests/data/ignore_dir/test_ignore_dir.py' in tests 21 | assert len(tests['tests/data/ignore_dir/test_ignore_dir.py']) == 2 22 | 23 | 24 | @pytest.mark.parametrize('ignore_path', ( 25 | 'tests/data/ignore_dir', 26 | 'tests/data/ignore_dir/test_ignore_dir.py' 27 | )) 28 | def test_collect_ignore_path(ignore_path): 29 | """Check if ``collect_tests`` don't collect tests on the ignore paths.""" 30 | tests = collector.collect_tests('tests/data', [ignore_path]) 31 | assert 'tests/data/ignore_dir/test_ignore_dir.py' not in tests 32 | assert 'tests/data/test_sample.py' in tests 33 | assert len(tests['tests/data/test_sample.py']) == 5 34 | 35 | 36 | @pytest.mark.parametrize('filename', ('test_module.py', 'module_test.py')) 37 | def test_is_test_module(filename): 38 | """Check ``is_test_module`` working for valid filenames.""" 39 | assert collector.is_test_module(filename) 40 | 41 | 42 | @pytest.mark.parametrize('filename', ('not_test_module.py', 'module.py')) 43 | def test_not_is_test_module(filename): 44 | """Check ``is_test_module`` working for invalid filenames.""" 45 | assert not collector.is_test_module(filename) 46 | -------------------------------------------------------------------------------- /tests/test_parser.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """Tests for :mod:`betelgeuse.parser`.""" 3 | import pytest 4 | import mock 5 | 6 | from betelgeuse import parser 7 | 8 | 9 | def test_parse_docstring(): 10 | """Check ``parse_docstring`` parser result.""" 11 | docstring = """ 12 | :field1: value1 13 | :field2: value2 14 | :field3: 15 | * item 1 16 | * item 2 17 | """ 18 | assert parser.parse_docstring(docstring) == { 19 | 'field1': 'value1', 20 | 'field2': 'value2', 21 | 'field3': '
    \n
  • item 1

  • \n' 22 | '
  • item 2

  • \n
\n', 23 | } 24 | 25 | 26 | @pytest.mark.parametrize('docstring', ('', None)) 27 | def test_parse_none_docstring(docstring): 28 | """Check ``parse_docstring`` returns empty dict on empty input.""" 29 | assert parser.parse_docstring(docstring) == {} 30 | 31 | 32 | def test_parse_docstring_special_characters(): 33 | """Check ``parse_docstring`` parser result.""" 34 | docstring = """ 35 | Description with an special character like é 36 | 37 | :field1: value with an special character like é 38 | """ 39 | assert parser.parse_docstring(docstring) == { 40 | u'field1': u'value with an special character like é', 41 | } 42 | 43 | 44 | @pytest.mark.parametrize('string', ('', None)) 45 | def test_parse_rst_empty_string(string): 46 | """Check ``parse_rst`` returns empty string on empty input.""" 47 | assert parser.parse_rst(string) == '' 48 | 49 | 50 | def test_parse_rst_translator_class(): 51 | """Check if ``parse_rst`` uses a custom translator_class.""" 52 | docstring = """ 53 | :field1: value1 54 | :field2: value2 55 | :field3: 56 | """ 57 | expected = ( 58 | '
\n' 59 | '
\n' 60 | '\n' 61 | '\n' 62 | '\n' 64 | '\n' 65 | '\n' 66 | '\n' 68 | '\n' 69 | '\n' 70 | '\n' 71 | '\n' 72 | '
field1

value1

\n' 63 | '
field2

value2

\n' 67 | '
field3

\n' 73 | '
\n' 74 | '
\n' 75 | ) 76 | assert parser.parse_rst( 77 | docstring, parser.TableFieldListTranslator) == expected 78 | 79 | 80 | def test_parse_rst_special_characters(): 81 | """Check if ``parse_rst`` plays nice with special characters.""" 82 | assert parser.parse_rst(u'String with special character like é') == ( 83 | u'
\n' 84 | u'

String with special character like é

\n' 85 | u'
\n' 86 | ) 87 | 88 | 89 | def test_parse_markers(): 90 | """ 91 | Test if the markers list is parsed. 92 | 93 | List should be comma separated list of markers from all levels after 94 | removing 'pytest.mark' text and ignore some markers. 95 | """ 96 | _mod_markers = 'pytest.mark.destructive' 97 | _class_markers = [ 98 | 'pytest.mark.on_prem_provisioning', 99 | "pytest.mark.usefixtures('cleandir')" 100 | ] 101 | _test_markers = [ 102 | "pytest.mark.parametrize('something', ['a', 'b'])", 103 | 'pytest.mark.skipif(not settings.robottelo.REPOS_HOSTING_URL)', 104 | 'pytest.mark.tier1' 105 | ] 106 | _all_markers = [_mod_markers, _class_markers, _test_markers] 107 | 108 | expected = 'destructive, on_prem_provisioning, tier1' 109 | config = mock.MagicMock() 110 | config.MARKERS_IGNORE_LIST = [ 111 | 'parametrize', 'skipif', 'usefixtures', 'skip_if_not_set'] 112 | assert parser.parse_markers(_all_markers, config=config) == expected 113 | -------------------------------------------------------------------------------- /tests/test_source_generator.py: -------------------------------------------------------------------------------- 1 | """Tests for :mod:`betelgeuse.source_generator`.""" 2 | from betelgeuse import collector 3 | import mock 4 | 5 | 6 | def test_source_generator(): 7 | """Check if ``collect_tests`` 'tests/data'collect tests.""" 8 | tests = collector.collect_tests('tests/data/test_sample.py') 9 | test_decorated_test = [ 10 | test for test in tests['tests/data/test_sample.py'] 11 | if test.name == 'test_decorated_test' 12 | ].pop() 13 | 14 | assert test_decorated_test.decorators == [ 15 | 'decorator', 16 | 'decorator.mark.something', 17 | "decorator_with_args([1, b'bytes', ('a', 'b'), None])", 18 | 'decorator_with_args(*[True, ((True or False) and True)])', 19 | "decorator_with_args((f'{CONSTANT!r:5>} with literal {{ and }}',))", 20 | 'decorator_with_args({1, 2, (- 3)})', 21 | "decorator_with_args({'a': 1, 'b': 2, **{'c': 3}})", 22 | 23 | 'decorator_with_args([1, 2][0], [1, 2][:1], [1, 2][0:], ' 24 | '[1, 2][0:1:1])', 25 | 26 | 'decorator_with_args([i for i in range(5) if ((i % 2) == 0)])', 27 | 'decorator_with_args((i for i in range(5)))', 28 | 'decorator_with_args({i for i in range(5)})', 29 | "decorator_with_args({k: v for k in 'abcde' for v in range(5)})", 30 | 'decorator_with_args(1, 2, 3, a=1, b=2)', 31 | 32 | 'decorator_with_args(' 33 | 'dict(a=1, b=2), ' 34 | "dict(**{'a': 1}), " 35 | 'vars(decorator.mark), ' 36 | '(lambda a, *args, b=1, **kwargs: (a, args, b, kwargs)), ' 37 | '(lambda a, *, b=1: (a, b)), ' 38 | '(lambda v: (v if v else None))' 39 | ')', 40 | ] 41 | 42 | 43 | def test_source_markers(): 44 | """Verifies if the test collection collects test markers.""" 45 | config = mock.Mock() 46 | config.MARKERS_IGNORE_LIST = [ 47 | 'parametrize', 'skipif', 'usefixtures', 'skip_if_not_set'] 48 | tests = collector.collect_tests('tests/data/test_sample.py', config=config) 49 | marked_test = [ 50 | test for test in tests['tests/data/test_sample.py'] 51 | if test.name == 'test_markers_sample' 52 | ].pop() 53 | assert marked_test.fields['markers'] == ('run_in_one_thread, tier1, ' 54 | 'on_prem_provisioning, osp') 55 | 56 | 57 | def test_source_singular_module_marker(): 58 | """Verifies the single module level marker is retrieved.""" 59 | mod_string = 'import pytest\n\npytestmark = pytest.mark.tier2' \ 60 | '\n\ndef test_sing():\n\tpass' 61 | with open('/tmp/test_singular.py', 'w') as tfile: 62 | tfile.writelines(mod_string) 63 | 64 | config = mock.Mock() 65 | config.MARKERS_IGNORE_LIST = ['tier3'] 66 | tests = collector.collect_tests('/tmp/test_singular.py', config=config) 67 | marked_test = [ 68 | test for test in tests['/tmp/test_singular.py'] 69 | if test.name == 'test_sing' 70 | ].pop() 71 | assert marked_test.fields['markers'] == 'tier2' 72 | --------------------------------------------------------------------------------