├── .flake8 ├── .gitignore ├── .travis.yml ├── Dockerfile ├── Jenkinsfile ├── LICENSE ├── Makefile ├── README.rst ├── automation_tools ├── __init__.py ├── baseimage.py ├── beaker.py ├── bz.py ├── manifest.py ├── repository.py ├── satellite5.py ├── satellite6 │ ├── __init__.py │ ├── capsule.py │ ├── capsule_config.json.sample │ ├── hammer.py │ └── log.py └── utils.py ├── docs ├── Makefile ├── api.rst ├── conf.py └── index.rst ├── fabfile.py ├── misc ├── base_image_creation │ ├── create-base-image.sh │ ├── ks_rhel6_template │ ├── ks_rhel7_template │ └── ks_rhel8_template └── cleanup_scripts │ ├── clean_docker.sh │ └── clean_vms.sh ├── requirements-optional.txt ├── requirements.txt ├── setup.py └── tests ├── __init__.py └── test_log.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 99 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | bin/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # Installer logs 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | 29 | # Unit test / coverage reports 30 | htmlcov/ 31 | .tox/ 32 | .coverage 33 | .cache 34 | nosetests.xml 35 | coverage.xml 36 | 37 | # Translations 38 | *.mo 39 | 40 | # Mr Developer 41 | .mr.developer.cfg 42 | .project 43 | .pydevproject 44 | .settings/ 45 | 46 | # Rope 47 | .ropeproject 48 | 49 | # Django stuff: 50 | *.log 51 | *.pot 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | - "3.6" 5 | install: 6 | - pip install flake8 7 | - pip install -r requirements.txt 8 | - pip install -r requirements-optional.txt coveralls 9 | script: 10 | - flake8 . 11 | - py.test --cov=automation_tools tests/ 12 | after_success: 13 | - converalls 14 | notifications: 15 | irc: "chat.freenode.net#robottelo" 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2 2 | MAINTAINER https://github.com/SatelliteQE 3 | 4 | RUN mkdir automation-tools 5 | COPY / /root/automation-tools 6 | RUN cd /root/automation-tools && pip install -r requirements.txt 7 | 8 | ENV HOME /root/automation-tools 9 | WORKDIR /root/automation-tools 10 | 11 | CMD ["python"] 12 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | node { 4 | label 'sat6-rhel' 5 | } 6 | 7 | } 8 | stages { 9 | stage('build') { 10 | agent { 11 | node { 12 | label 'sat6-rhel' 13 | } 14 | 15 | } 16 | steps { 17 | sh 'make gitflake8' 18 | } 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | {one line to give the program's name and a brief idea of what it does.} 635 | Copyright (C) {year} {name of author} 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | {project} Copyright (C) {year} {fullname} 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Commands -------------------------------------------------------------------- 2 | 3 | help: 4 | @echo "Please use \`make ' where is one of" 5 | @echo " docs to make documentation in the default format" 6 | @echo " docs-clean to remove docs and doc build artifacts" 7 | @echo " pyc-clean to delete all temporary artifacts" 8 | @echo " can-i-push? to check if local changes are suitable to push" 9 | @echo " install-commit-hook to install pre-commit hook to check if changes are suitable to push" 10 | @echo " gitflake8 to check flake8 styling only for modified files" 11 | 12 | docs: 13 | @cd docs; $(MAKE) html 14 | 15 | docs-clean: 16 | @cd docs; $(MAKE) clean 17 | 18 | pyc-clean: ## remove Python file artifacts 19 | $(info "Removing unused Python compiled files, caches and ~ backups...") 20 | find . -name '*.pyc' -exec rm -f {} + 21 | find . -name '*.pyo' -exec rm -f {} + 22 | find . -name '*~' -exec rm -f {} + 23 | find . -name '__pycache__' -exec rm -fr {} + 24 | 25 | gitflake8: 26 | $(info "Checking style and syntax errors with flake8 linter...") 27 | @which flake8 >> /dev/null || pip install flake8 28 | @flake8 $(shell git diff --name-only) --show-source 29 | 30 | can-i-push?: gitflake8 31 | $(info "!!! Congratulations your changes are good to fly, make a great PR! ${USER}++ !!!") 32 | 33 | install-commit-hook: 34 | $(info "Installing git pre-commit hook...") 35 | @touch .git/hooks/pre-commit 36 | @grep -q '^make can-i-push?' .git/hooks/pre-commit || echo "make can-i-push?" >> .git/hooks/pre-commit 37 | 38 | # Special Targets ------------------------------------------------------------- 39 | 40 | .PHONY: help docs docs-clean pyc-clean can-i-push? install-commit-hook gitflake8 41 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | automation-tools 2 | ================ 3 | 4 | .. image:: https://travis-ci.org/SatelliteQE/automation-tools.svg?branch=master 5 | :target: https://travis-ci.org/SatelliteQE/automation-tools 6 | 7 | .. image:: https://coveralls.io/repos/github/SatelliteQE/automation-tools/badge.svg?branch=master 8 | :target: https://coveralls.io/github/SatelliteQE/automation-tools?branch=master 9 | 10 | 11 | The `full documentation 12 | `_ is available 13 | on ReadTheDocs. It can also be generated locally as follows, so long 14 | as you have `Sphinx`_ and make installed:: 15 | 16 | cd docs 17 | make html 18 | 19 | .. _Sphinx: http://sphinx-doc.org/index.html -------------------------------------------------------------------------------- /automation_tools/baseimage.py: -------------------------------------------------------------------------------- 1 | """Tasks for base image creation and deployment""" 2 | from __future__ import print_function 3 | 4 | import os 5 | import time 6 | 7 | from fabric.api import env, get, put, run, settings 8 | from re import search, MULTILINE 9 | 10 | from six.moves.urllib.parse import urljoin 11 | 12 | 13 | def detect_imagename(os_url): 14 | """Task to detect image name by OS URL 15 | 16 | :param str os_url: URL of OS media to detect 17 | 18 | """ 19 | for updirs in (4, 3): # 4 dir ascends for RHEL, 3 ascends for CentOS 20 | comp_id_path = updirs * '../' + 'COMPOSE_ID' 21 | comp_id = run('wget -q -O- {}'.format(urljoin(os_url, comp_id_path)), quiet=True) 22 | if comp_id.succeeded: 23 | match_comp = search(r'(\w+)-([\d\.]+)-(?:\w+-)?([\d\.]+)', comp_id) 24 | image = match_comp.group(1).lower() + match_comp.group(2).replace('.', '') + '-' + \ 25 | match_comp.group(3) + '-base' 26 | break 27 | if not image: 28 | image = 'unknown-{}-base'.format(str(time.time()).split('.')[0]) 29 | 30 | print(image) 31 | return(image) 32 | 33 | 34 | def create_baseimage(os_url, image=None, auth_keys_url=None, dns_server=None, disable_ipv6=False): 35 | """Task to create standard base image using OS URL 36 | 37 | :param str os_url: URL of OS media to install 38 | :param str image: Image name to be created (without extension .img) 39 | :param str auth_keys_url: authorized_keys file URL to be put in baseimage 40 | :param str dns_server: Custom DNS server to be set in baseimage 41 | :param bool disable_ipv6: Flag to have IPv6 networking disabled (=True) or enabled (=False) 42 | 43 | """ 44 | if not os_url.endswith('/'): 45 | os_url += '/' 46 | if isinstance(disable_ipv6, str): 47 | disable_ipv6 = (disable_ipv6.lower() == 'true') 48 | 49 | # Detect OS version 50 | media = run('wget -q -O- {}'.format(urljoin(os_url, 'media.repo')), warn_only=True) 51 | if media.succeeded: 52 | match_name = search(r'^name\s*=\s*(\D*)\s+([\d\.]*)', media, MULTILINE) 53 | if match_name: 54 | os_ver = match_name.group(2).split('.')[0] 55 | else: 56 | os_ver = 8 57 | else: # CentOS 7 has no media.repo 58 | os_ver = 7 59 | 60 | if os_ver > 9: # map Fedora to RHEL8 kickstart 61 | os_ver = 8 62 | 63 | if not image: 64 | image = detect_imagename(os_url) 65 | 66 | put('misc/base_image_creation/ks_rhel{}_template'.format(os_ver), 'ks.cfg') 67 | run('sed -i "s|OS_URL|{}|g" ks.cfg'.format(os_url)) 68 | if os_ver not in [6, 7]: # for RHEL8+ derive AppStream URL from BaseOS URL 69 | run('sed -i "s|AS_URL|{}|g" ks.cfg'.format(os_url.replace('BaseOS', 'AppStream'))) 70 | run(r'sed -i "s|ENCRYPT_PASS|\\$1\\$xyz\\$7xHVh4/yhE6P00NIXbWZA/|g" ks.cfg') 71 | run('sed -i "s|AUTH_KEYS_URL|{}|g" ks.cfg'.format(auth_keys_url)) 72 | if not disable_ipv6: 73 | run('sed -i "/disable_ipv6/d" ks.cfg') 74 | if dns_server: 75 | run('sed -i "s|NAMESERVER|{}|g" ks.cfg'.format(dns_server)) 76 | else: 77 | run(r'sed -i "\|/etc/resolv.conf|d" ks.cfg') 78 | 79 | run('virsh undefine {}'.format(image), warn_only=True) 80 | run('virt-install --connect qemu:///system -n {img} -l {url} -w bridge:br0 ' 81 | '--initrd-inject ks.cfg -x "ks=file:/ks.cfg console=tty0 console=ttyS0,115200" ' 82 | '--disk path=/var/lib/libvirt/images/{img}.img,size=200,device=disk,bus=virtio,format=raw,' 83 | 'sparse=true --memory 4096 --vcpus 2 --cpu host --check-cpu --accelerate --hvm --force ' 84 | '--graphics vnc,listen=0.0.0.0 --clock offset=localtime' 85 | .format(img=image, url=os_url)) 86 | time.sleep(30) 87 | run('virsh destroy {}'.format(image)) 88 | 89 | return image 90 | 91 | 92 | def deploy_baseimage(image, hypervisors=[]): 93 | """Task to deploy specific image to set of hypervisors 94 | 95 | The following environment variables affect this command: 96 | 97 | PROVISIONING_HOSTS 98 | Set of hypervisor FQDNs/IPs to deploy image to 99 | 100 | :param str image: Image name to be deployed (without extension .img) 101 | :param list hypervisors: Set of hypervisor FQDNs/IPs to deploy image to 102 | 103 | """ 104 | hypervisors = hypervisors or os.environ.get('PROVISIONING_HOSTS', '') 105 | if isinstance(hypervisors, str): 106 | hypervisors = hypervisors.split() 107 | 108 | tmpimg = run('mktemp') 109 | run('qemu-img convert /var/lib/libvirt/images/{}.img -O qcow2 {}'.format(image, tmpimg)) 110 | src_fqdn = run('hostname') 111 | 112 | for target in hypervisors: 113 | if target == src_fqdn: # omit image hosting machine from deployment 114 | continue 115 | if env.forward_agent: # set by `-A` fab option and ssh agent must be available 116 | run('scp -p {} {}:{}'.format(tmpimg, target, tmpimg), warn_only=True) 117 | else: # no ssh agent, scp works only 3way 118 | get(tmpimg, tmpimg) 119 | with settings(host_string=target): 120 | put(tmpimg, tmpimg) 121 | 122 | with settings(host_string=target): 123 | run('qemu-img convert {} -O raw /var/lib/libvirt/images/{}.img'.format(tmpimg, image)) 124 | run('rm -f {}'.format(tmpimg)) 125 | 126 | run('rm -f {}'.format(tmpimg)) 127 | 128 | 129 | def deploy_baseimage_by_url(os_url, **kwargs): 130 | """Task to create standard base image using OS URL and deploy it to set of hypervisors 131 | 132 | :param str os_url: URL of OS media to install 133 | 134 | """ 135 | hypervisors = kwargs.pop('hypervisors', None) 136 | deploy_baseimage(create_baseimage(os_url, **kwargs), hypervisors=hypervisors) 137 | -------------------------------------------------------------------------------- /automation_tools/beaker.py: -------------------------------------------------------------------------------- 1 | """Tools to work with Beaker (https://beaker-project.org/). 2 | 3 | The ``bkr`` command-line utility must be available and configured. (Available 4 | via the ``beaker-client`` package on Fedora.) See the `Installing and 5 | configuring the client`_ section of the Beaker documentation. 6 | 7 | .. _Installing and configuring the client: 8 | https://beaker-project.org/docs/user-guide/bkr-client.html#installing-and-configuring-the-client 9 | 10 | """ 11 | import pprint 12 | import subprocess 13 | import xml.dom.minidom 14 | 15 | 16 | def main(): 17 | """Run :func:`beaker_jobid_to_system_info` and print the response.""" 18 | pprint.pprint(beaker_jobid_to_system_info(open('a.xml'))) 19 | 20 | 21 | def _beaker_process_recipe(recipe): 22 | """Process recipe and return info about it 23 | 24 | :param recipe: recipe (or guestrecipe) element to process 25 | 26 | """ 27 | recipe_info = {} 28 | res_task = False 29 | res_tag = False 30 | recipe_info['id'] = int(recipe.attributes['id'].value) 31 | recipe_info['system'] = recipe.attributes['system'].value 32 | recipe_info['arch'] = recipe.attributes['arch'].value 33 | recipe_info['distro'] = recipe.attributes['distro'].value 34 | recipe_info['variant'] = recipe.attributes['variant'].value 35 | 36 | # Do we have /distribution/reservesys? If so, status is based on that. 37 | tasks = recipe.getElementsByTagName('task') 38 | for task in reversed(tasks): 39 | if task.attributes['name'].value == '/distribution/reservesys': 40 | res_task = True 41 | res_task_element = task 42 | break 43 | 44 | # Do we have ? If so, status is recipe.status. 45 | reservesyss = recipe.getElementsByTagName('reservesys') 46 | for _ in reservesyss: 47 | res_tag = True 48 | break 49 | 50 | # Determine status of the recipe/system reservation 51 | if res_tag and not res_task: 52 | recipe_info['reservation'] = recipe.attributes['status'].value 53 | elif res_task and not res_tag: 54 | recipe_info['reservation'] = \ 55 | res_task_element.attributes['status'].value 56 | elif res_task and res_tag: 57 | recipe_info['reservation'] = ( 58 | 'ERROR: Looks like the recipe for this system have too many ' 59 | 'methods to reserve. Do not know what happens.' 60 | ) 61 | else: 62 | recipe_info['reservation'] = recipe.attributes['status'].value 63 | return recipe_info 64 | 65 | 66 | def beaker_jobid_to_system_info(job_id): 67 | """Get system reservation task status (plus other info) based on 68 | Beaker ``job_id``. 69 | 70 | This function requires configured bkr utility. We parse everithing from 71 | ``bkr job-results [--prettyxml] J:123456``, so if you see some breakage, 72 | please capture that output. 73 | 74 | For testing putposes, if you provide file descriptor instead of ``job_id``, 75 | XML will be loaded from there. 76 | 77 | :param job_id: The ID of a Beaker job. For example: 'J:123456' 78 | 79 | """ 80 | systems = [] 81 | 82 | # Get XML with job results and create DOM object 83 | if hasattr(job_id, 'read'): 84 | dom = xml.dom.minidom.parse(job_id) 85 | else: 86 | out = subprocess.check_output(['bkr', 'job-results', job_id]) 87 | dom = xml.dom.minidom.parseString(out) 88 | 89 | # Parse the DOM object. The XML have structure like this (all elements 90 | # except '' can appear more times): 91 | # 99 | # .repo`` will be deleted. Be aware that 53 | this task can delete repository files created by ``subscription-manager`` 54 | and other tools. But will raise ``ValueError`` if the repository name is 55 | ``redhat``. 56 | 57 | :raise: ``ValueError`` if repository name is 'redhat'. 58 | 59 | """ 60 | for name in args: 61 | name = name.rstrip('.repo') 62 | if name == 'redhat': 63 | raise ValueError('This task will not delete redhat.repo file.') 64 | run('rm -f /etc/yum.repos.d/{0}.repo'.format(name), warn_only=True) 65 | 66 | 67 | @_silencer 68 | def enable_repos(*args, **kwargs): 69 | """Enable repos passed as ``args`` using ``subscription-manager repos 70 | --enable``. 71 | 72 | For example:: 73 | 74 | enable_repos('repo1', 'repo2') 75 | 76 | Will run the command ``subscription-manager repos --enable "repo1" --enable 77 | "repo2"``. 78 | 79 | If the keyword argument ``silent`` is ``True`` then the stdout output will 80 | be hidden. 81 | 82 | """ 83 | run('subscription-manager repos {0}' 84 | .format(' '.join(['--enable "{0}"'.format(repo) for repo in args]))) 85 | 86 | 87 | def create_custom_repos(**kwargs): 88 | """Create custom repofiles. 89 | 90 | Each ``kwargs`` item will result in one repository file created. Where the 91 | key is the repository filename and repository name, and the value is the 92 | repository URL. 93 | 94 | For example:: 95 | 96 | create_custom_repo(custom_repo='http://repourl.domain.com/path') 97 | 98 | Will create a repository file named ``custom_repo.repo`` with the following 99 | contents:: 100 | 101 | [custom_repo] 102 | name=custom_repo 103 | baseurl=http://repourl.domain.com/path 104 | enabled=1 105 | gpgcheck=0 106 | 107 | """ 108 | for name, url in kwargs.items(): 109 | repo_file = StringIO() 110 | repo_file.write( 111 | u'[{name}]\n' 112 | u'name={name}\n' 113 | u'baseurl={url}\n' 114 | u'enabled=1\n' 115 | u'gpgcheck=0\n' 116 | .format(name=name, url=url) 117 | ) 118 | put(local_path=repo_file, 119 | remote_path='/etc/yum.repos.d/{0}.repo'.format(name)) 120 | repo_file.close() 121 | 122 | 123 | def enable_satellite_repos(sat_version='6.8', cdn=False, beta=False, disable_enabled=True): 124 | """Enable repositories required to install Satellite 6 125 | 126 | :param sat_version: Indicates which satellite version should be installed, default: latest 127 | :param cdn: Indicates if the CDN Satellite 6 repo should be enabled or not 128 | :param beta: Indicates if the Beta Satellite 6 repo should be enabled or 129 | not. The Beta repo is available through the CDN and, if both ``cdn`` 130 | and ``beta`` are ``True``, the beta repo will be used instead of the 131 | stable one. 132 | :param disable_enabled: If True, disable all repositories (including beaker 133 | repositories) before enabling repositories. 134 | 135 | """ 136 | if isinstance(cdn, str): 137 | cdn = (cdn.lower() == 'true') 138 | if isinstance(beta, str): 139 | beta = (beta.lower() == 'true') 140 | if isinstance(disable_enabled, str): 141 | disable_enabled = (disable_enabled.lower() == 'true') 142 | 143 | if disable_enabled is True: 144 | disable_beaker_repos(silent=True) 145 | disable_repos('*', silent=True, warn_only=True) 146 | 147 | os_version = distro_info()[1] 148 | if os_version > 7: 149 | repos = [ 150 | 'rhel-{0}-for-x86_64-baseos-rpms', 151 | 'rhel-{0}-for-x86_64-appstream-rpms', 152 | 'ansible-{2}-for-rhel-{0}-x86_64-rpms', 153 | ] 154 | else: 155 | repos = [ 156 | 'rhel-{0}-server-rpms', 157 | 'rhel-server-rhscl-{0}-rpms', 158 | 'rhel-{0}-server-ansible-{2}-rpms', 159 | ] 160 | 161 | if version(sat_version) > version(6.7): 162 | ansible_version = '2.9' 163 | else: 164 | ansible_version = '2.8' 165 | 166 | if beta: 167 | repos.append('rhel-server-{0}-satellite-6-beta-rpms') 168 | repos.append('rhel-{0}-server-satellite-maintenance-6-beta-rpms') 169 | elif cdn: 170 | repos.append('rhel-{0}-server-satellite-{1}-rpms') 171 | repos.append('rhel-{0}-server-satellite-maintenance-6-rpms') 172 | 173 | enable_repos(*[repo.format(os_version, sat_version, ansible_version) for repo in repos]) 174 | run('yum repolist') 175 | 176 | 177 | @_silencer 178 | def disable_beaker_repos(**kwargs): 179 | """Disable beaker repositories 180 | 181 | If yum-config-manager is available this task will disable the repos, if not 182 | it will move the beaker repo files to the running user home directory 183 | 184 | If the keyword argument ``silent`` is ``True`` then the stdout output will 185 | be hidden. 186 | 187 | """ 188 | # Clean up system if Beaker-based 189 | run('sed -i "s/^enabled=.*/enabled=0/" /etc/yum.repos.d/beaker-[^t]*.repo', warn_only=True) 190 | run('rm -rf /var/cache/yum*') 191 | 192 | 193 | def manage_custom_repos(**kwargs): 194 | """Enable or disable custom repositories. 195 | 196 | The keyword key is the repository filename and the boolean value indicates 197 | if it should enable if ``True`` or disable if ``False``. 198 | 199 | """ 200 | for name, enable in kwargs.items(): 201 | repo_file = '/etc/yum.repos.d/{0}.repo'.format(name) 202 | run('sed -i -e "s/^enabled=.*/enabled={0}/" {1}'.format( 203 | 1 if enable is True else 0, 204 | repo_file 205 | )) 206 | -------------------------------------------------------------------------------- /automation_tools/satellite5.py: -------------------------------------------------------------------------------- 1 | """A set of tasks for automating installation of Satellite5 servers. 2 | 3 | Many commands are affected by environment variables. Unless stated otherwise, 4 | all environment variables are required. 5 | 6 | """ 7 | from __future__ import print_function 8 | import os 9 | import sys 10 | 11 | from automation_tools import ( 12 | install_prerequisites, 13 | iso_download, 14 | setenforce, 15 | setup_ddns, 16 | subscribe, 17 | vm_create, 18 | vm_destroy, 19 | ) 20 | from automation_tools.repository import enable_satellite_repos 21 | from automation_tools.utils import update_packages 22 | from fabric.api import cd, env, execute, run 23 | 24 | 25 | def satellite5_product_install(create_vm=False, selinux_mode=None): 26 | """Task which installs only satellite5 product. 27 | 28 | If ``create_vm`` is True then ``vm_destroy`` and ``vm_create`` tasks will 29 | be run. Make sure to set the required environment variables for those 30 | tasks. Also, if one of the ``setup_ddns`` required environment variables 31 | is set then that task will run. 32 | 33 | :param bool create_vm: creates a virtual machine and then install the 34 | product on it. 35 | :param str selinux_mode: switches to specified selinux mode. 36 | 37 | """ 38 | # Command-line arguments are passed in as strings. 39 | if isinstance(create_vm, str): 40 | create_vm = (create_vm.lower() == 'true') 41 | 42 | if selinux_mode is None: 43 | selinux_mode = os.environ.get('SELINUX_MODE', 'enforcing') 44 | 45 | if create_vm: 46 | target_image = os.environ.get('TARGET_IMAGE') 47 | if target_image is None: 48 | print('The TARGET_IMAGE environment variable should be defined') 49 | sys.exit(1) 50 | 51 | execute(vm_destroy, target_image, delete_image=True) 52 | execute(vm_create) 53 | 54 | if 'DDNS_HASH' in os.environ or 'DDNS_PACKAGE_URL' in os.environ: 55 | execute( 56 | setup_ddns, env['vm_domain'], env['vm_ip'], host=env['vm_ip']) 57 | 58 | # When creating a vm the vm_ip will be set, otherwise use the fabric host 59 | host = env.get('vm_ip', env['host']) 60 | 61 | # Register and subscribe machine to Red Hat 62 | execute(subscribe, host=host) 63 | 64 | execute(install_prerequisites, host=host) 65 | execute(setenforce, selinux_mode, host=host) 66 | execute(enable_satellite_repos, host=host) 67 | execute(update_packages, warn_only=True) 68 | execute(satellite5_installer, host=host) 69 | 70 | 71 | def satellite5_installer(): 72 | """Installs Satellite 5 from an ISO image. 73 | 74 | The following environment variables affect this command: 75 | 76 | RHN_USERNAME 77 | Red Hat Network username. 78 | RHN_PASSWORD 79 | Red Hat Network password. 80 | ISO_URL 81 | The URL where the ISO will be downloaded. 82 | SATELLITE_CERT_URL 83 | The URL where the activation certificate will be downloaded. 84 | 85 | """ 86 | iso_url = os.environ.get('ISO_URL') 87 | if iso_url is None: 88 | print('Please provide a valid URL for the ISO image.') 89 | sys.exit(1) 90 | 91 | # Download and mount the ISO 92 | print('Downloading ISO...') 93 | iso_download(iso_url) 94 | run('umount ISO', warn_only=True) 95 | run('mkdir -p ISO') 96 | run('mount -t iso9660 -o loop *.iso ISO') 97 | 98 | # prepare the answer file 99 | opts = { 100 | 'admin-email': os.environ.get('ADMIN_EMAIL', 'root@localhost'), 101 | 'rhn-username': os.environ.get('RHN_USERNAME', ''), 102 | 'rhn-password': os.environ.get('RHN_PASSWORD', ''), 103 | 'rhn-profile-name': os.environ.get('RHN_PROFILE', ''), 104 | 'rhn-http-proxy': os.environ.get('RHN_HTTP_PROXY', ''), 105 | 'rhn-http-proxy-username': 106 | os.environ.get('RHN_HTTP_PROXY_USERNAME', ''), 107 | 'rhn-http-proxy-password': 108 | os.environ.get('RHN_HTTP_PROXY_PASSWORD', ''), 109 | 'ssl-set-org': os.environ.get('SSL_SET_ORG', 'Red Hat'), 110 | 'ssl-set-org-unit': os.environ.get('SSL_SET_ORG_UNIT', 'Satellite QE'), 111 | 'ssl-set-city': os.environ.get('SSL_SET_CITY', 'Brno'), 112 | 'ssl-set-state': os.environ.get('SSL_SET_STATE', 'BRQ'), 113 | 'ssl-set-country': os.environ.get('SSL_SET_COUNTRY', 'CZ'), 114 | 'ssl-password': os.environ.get('SSL_PASSWORD', 'reset'), 115 | 'satellite-cert-url': os.environ.get('SATELLITE_CERT_URL', '') 116 | } 117 | run( 118 | 'cat < /tmp/answers.txt\n' 119 | 'admin-email={admin-email}\n' 120 | 'rhn-username={rhn-username}\n' 121 | 'rhn-password={rhn-password}\n' 122 | 'rhn-profile-name={rhn-profile-name}\n' 123 | 'rhn-http-proxy={rhn-http-proxy}\n' 124 | 'rhn-http-proxy-username={rhn-http-proxy-username}\n' 125 | 'rhn-http-proxy-password={rhn-http-proxy-password}\n' 126 | 'ssl-config-sslvhost=y\n' 127 | 'ssl-set-org={ssl-set-org}\n' 128 | 'ssl-set-org-unit={ssl-set-org-unit}\n' 129 | 'ssl-set-city={ssl-set-city}\n' 130 | 'ssl-set-state={ssl-set-state}\n' 131 | 'ssl-set-country={ssl-set-country}\n' 132 | 'ssl-set-email={admin-email}\n' 133 | 'ssl-password={ssl-password}\n' 134 | 'satellite-cert-file=/tmp/SATCERT\n' 135 | 'enable-tftp=yes\n' 136 | 'EOF\n'.format(**opts) 137 | ) 138 | 139 | # download a certificate 140 | print('Downloading Certificate...') 141 | run('wget -nv -O /tmp/SATCERT {satellite-cert-url}'.format(**opts)) 142 | # ...and run the installer script. 143 | with cd('ISO'): 144 | run('./install.pl --answer-file=/tmp/answers.txt --non-interactive ' 145 | '--re-register --run-updater=yes --enable-tftp=yes') 146 | run('yum -y update') 147 | run('spacewalk-schema-upgrade -y') 148 | run('rhn-satellite restart') 149 | -------------------------------------------------------------------------------- /automation_tools/satellite6/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SatelliteQE/automation-tools/de090390670a0c9cdb5148137a88d0ace00116de/automation_tools/satellite6/__init__.py -------------------------------------------------------------------------------- /automation_tools/satellite6/capsule.py: -------------------------------------------------------------------------------- 1 | """Tasks for helping automating the provisioning of Satellite 6 Capsules""" 2 | from __future__ import print_function 3 | import json 4 | 5 | from fabric.api import env, get, put, run, settings, task 6 | from automation_tools.satellite6.hammer import ( 7 | hammer_activation_key_add_subscription, 8 | hammer_activation_key_create, 9 | hammer_content_view_add_repository, 10 | hammer_content_view_create, 11 | hammer_content_view_publish, 12 | hammer_product_create, 13 | hammer_repository_create, 14 | hammer_repository_synchronize, 15 | set_hammer_config 16 | ) 17 | 18 | 19 | class ImproperlyConfigured(Exception): 20 | """Indicates that the capsule configuration is somehow improperly 21 | configured 22 | - for example, if the configuration is not loaded or some required 23 | configuration is missing. 24 | """ 25 | 26 | 27 | class Credentials(object): 28 | """Stores a server SSH credentials information. 29 | 30 | Usage:: 31 | 32 | cred1 = Credentials('admin', 'password') 33 | cred2 = Credentials(key_filename='/path/to/ssh.key') 34 | """ 35 | def __init__(self, user=None, password=None, key_filename=None): 36 | self.user = user 37 | self.password = password 38 | self.key_filename = key_filename 39 | 40 | 41 | class HostConfig(Credentials): 42 | """Stores a host's hostname and credentials information. 43 | 44 | Usage:: 45 | 46 | cred1 = HostConfig('host1.example.com', 'admin', 'password') 47 | cred2 = HostConfig( 48 | 'host2.example.com', key_filename='/path/to/ssh.key') 49 | """ 50 | def __init__(self, hostname=None, port=22, *args, **kwargs): 51 | super(HostConfig, self).__init__(*args, **kwargs) 52 | self.hostname = hostname 53 | self.port = port 54 | 55 | @property 56 | def host_string(self): 57 | """Return a host_string in the format expected by Fabric""" 58 | return '{0}@{1}:{2}'.format(self.user, self.hostname, self.port) 59 | 60 | 61 | class Config(object): 62 | """Configuration information provide easy access to configuration and some 63 | helper methods to identify if some configuration is present or not. 64 | """ 65 | def __init__(self, path): 66 | self.path = path 67 | self.organization_label = None 68 | self.environment = None 69 | self.content_view = None 70 | self.activation_key = None 71 | self.admin_user = None 72 | self.admin_password = None 73 | self.defaults = None 74 | self.server = None 75 | self.capsules = [] 76 | self._key_filenames = set() 77 | 78 | self._parse() 79 | set_hammer_config(self.admin_user, self.admin_password) 80 | 81 | def _parse(self): 82 | """Parse the configuration and store the contents""" 83 | with open(self.path) as handler: 84 | data = json.load(handler) 85 | self.organization_label = data.get('organization-label') 86 | self.environment = data.get('environment') 87 | self.content_view = data.get('content-view') 88 | self.activation_key = data.get('activation-key') 89 | self.admin_user = data.get('admin-user') 90 | self.admin_password = data.get('admin-password') 91 | defaults = data.get('defaults') 92 | if defaults is not None and isinstance(defaults, dict): 93 | key_filename = defaults.get('key-filename') 94 | self._key_filenames.add(key_filename) 95 | self.defaults = Credentials( 96 | user=defaults.get('user'), 97 | password=defaults.get('password'), 98 | key_filename=key_filename, 99 | ) 100 | server = data.get('server') 101 | if server is not None and isinstance(server, dict): 102 | key_filename = server.get('key-filename') 103 | self._key_filenames.add(key_filename) 104 | self.server = HostConfig( 105 | hostname=server.get('hostname'), 106 | user=server.get('user', self.defaults.user), 107 | password=server.get('password', self.defaults.password), 108 | key_filename=key_filename, 109 | ) 110 | capsules = data.get('capsules') 111 | if capsules is not None and isinstance(capsules, list): 112 | for capsule in capsules: 113 | if capsule is not None and isinstance(capsule, dict): 114 | key_filename = capsule.get('key-filename') 115 | self._key_filenames.add(key_filename) 116 | self.capsules.append(HostConfig( 117 | hostname=capsule.get('hostname'), 118 | user=capsule.get('user', self.defaults.user), 119 | password=capsule.get( 120 | 'password', self.defaults.password), 121 | key_filename=key_filename, 122 | )) 123 | 124 | @property 125 | def key_filenames(self): 126 | """Return a list of collect key filenames or None if the list is 127 | empty. 128 | """ 129 | if self._key_filenames: 130 | return list(self._key_filenames) 131 | else: 132 | return None 133 | 134 | @property 135 | def passwords(self): 136 | """Return a dict in the format suited for Fabric usage in order to 137 | define passwords for hosts. 138 | """ 139 | passwords = {} 140 | if self.server.password and not self.server.key_filename: 141 | passwords[self.server.host_string] = self.server.password 142 | for capsule in self.capsules: 143 | if capsule.password and not capsule.key_filename: 144 | passwords[capsule.host_string] = capsule.password 145 | return passwords 146 | 147 | 148 | def _get_config(): 149 | """Get the capsule configuration if available in the fabric environment 150 | else raise ``ImproperlyConfigured``. 151 | """ 152 | config = env.get('capsule_config') 153 | if config is None: 154 | raise ImproperlyConfigured( 155 | 'Make sure to run load_capsule_config task.') 156 | return config 157 | 158 | 159 | @task 160 | def load_capsule_config(path): 161 | env['capsule_config'] = Config(path) 162 | 163 | 164 | @task 165 | def get_oauth_info(): 166 | """Get oauth_consumer_key, oauth_consumer_secret and pulp_oauth_secret 167 | information. 168 | 169 | :return: Tuple containing (oauth_consumer_key, oauth_consumer_secret, 170 | pulp_oauth_secret) 171 | """ 172 | result = run('grep oauth_consumer /etc/foreman/settings.yaml', quiet=True) 173 | for line in result.splitlines(): 174 | if 'oauth_consumer_key' in line: 175 | oauth_consumer_key = line.split(': ')[1].strip() 176 | if 'oauth_consumer_secret' in line: 177 | oauth_consumer_secret = line.split(': ')[1].strip() 178 | result = run('grep "^oauth_secret" /etc/pulp/server.conf', quiet=True) 179 | pulp_oauth_secret = result.split(': ')[1].strip() 180 | print( 181 | 'oauth_consumer_key: {0}\n' 182 | 'oauth_consumer_secret: {1}\n' 183 | 'pulp_oauth_secret: {2}' 184 | .format(oauth_consumer_key, oauth_consumer_secret, pulp_oauth_secret) 185 | ) 186 | return (oauth_consumer_key, oauth_consumer_secret, pulp_oauth_secret) 187 | 188 | 189 | @task 190 | def generate_capsule_certs(capsule_hostname, force=False): 191 | """Generate certificates for a capsule. 192 | 193 | Run ``capsule-certs-generate --capsule-fqdn --certs-tar 194 | "-certs.tar"`` in order to generate them. 195 | 196 | The resulting tarbal will be store on the working directory of the remote 197 | host. 198 | 199 | :param str capsule_hostname: The fully qualified domain name for the 200 | capsule. 201 | :param bool force: Force creation of the capsule cert even if it is 202 | already created. 203 | """ 204 | cert_path = '{0}-certs.tar'.format(capsule_hostname) 205 | result = run('[ -f {0} ]'.format(cert_path), quiet=True) 206 | if result.failed or force: 207 | run('capsule-certs-generate -v --foreman-proxy-fqdn {0} ' 208 | '--certs-tar {1} --certs-update-all' 209 | .format(capsule_hostname, cert_path)) 210 | return cert_path 211 | 212 | 213 | @task 214 | def register_capsule(): 215 | """Register the capsule on the Satellite 6 server.""" 216 | config = _get_config() 217 | run( 218 | 'yum -y localinstall ' 219 | 'http://{0}/pub/katello-ca-consumer-latest.noarch.rpm' 220 | .format(config.server.hostname), 221 | warn_only=True 222 | ) 223 | if config.activation_key: 224 | run( 225 | 'subscription-manager register ' 226 | '--org={0} --activationkey={1} --force' 227 | .format(config.organization_label, config.activation_key) 228 | ) 229 | elif config.content_view: 230 | run( 231 | 'subscription-manager register --username {0} --auto-attach ' 232 | '--force --password {1} --org {2} --environment {3} ' 233 | .format( 234 | config.admin_user, 235 | config.admin_password, 236 | config.organization_label, 237 | config.content_view, 238 | ) 239 | ) 240 | else: 241 | raise ImproperlyConfigured( 242 | 'An activation key or content_view name is required.') 243 | run('yum repolist') 244 | 245 | 246 | @task 247 | def capsule_installer( 248 | capsule_fqdn, cert_path, oauth_consumer_key, 249 | oauth_consumer_secret, pulp_oauth_secret): 250 | """Install and run capsule-installer.""" 251 | config = _get_config() 252 | run('yum -y install satellite-capsule') 253 | run( 254 | 'foreman-installer -v --scenario capsule ' 255 | '--certs-tar {cert_path} ' 256 | '--foreman-base-url "https://{parent_fqdn}" ' 257 | '--oauth-consumer-key "{oauth_consumer_key}" ' 258 | '--oauth-consumer-secret "{oauth_consumer_secret}" ' 259 | '--parent-fqdn "{parent_fqdn}" ' 260 | '--pulp-oauth-secret "{pulp_oauth_secret}" ' 261 | '--register-in-foreman true ' 262 | '--trusted-hosts "{capsule_fqdn}" ' 263 | '--trusted-hosts "{parent_fqdn}"' 264 | .format( 265 | capsule_fqdn=capsule_fqdn, 266 | cert_path=cert_path, 267 | oauth_consumer_key=oauth_consumer_key, 268 | oauth_consumer_secret=oauth_consumer_secret, 269 | parent_fqdn=config.server.hostname, 270 | pulp_oauth_secret=pulp_oauth_secret, 271 | ) 272 | ) 273 | 274 | 275 | @task 276 | def setup_capsule_content( 277 | activation_key_name, 278 | content_view_name, 279 | organization_id, 280 | product_name, 281 | rhel_repo_name, 282 | rhel_repo_url, 283 | satellite_capsule_repo_name, 284 | satellite_capsule_repo_url, 285 | satellite_tools_repo_name, 286 | satellite_tools_repo_url): 287 | """Setup the content used to provision a capsule. 288 | 289 | 290 | :param activation_key_name: name of the activation key which will be 291 | created 292 | :param content_view_name: name of the content view which will be created 293 | :param organization_id: organization where all entities will be created 294 | :param product_name: name of the product which will be created 295 | :param rhel_repo_name: name of the RHEL repository which will be created 296 | :param rhel_repo_url: URL of the RHEL repository which will be created 297 | :param satellite_capsule_repo_name: name of the capsule repository which 298 | will be created 299 | :param satellite_capsule_repo_url: URL of the capsule repository which will 300 | be created 301 | :param satellite_tools_repo_name: name of the satellite tools repository 302 | which will be created 303 | :param satellite_tools_repo_url: URL of the satellite tools repository 304 | which will be created 305 | """ 306 | hammer_product_create(product_name, organization_id) 307 | hammer_repository_create( 308 | rhel_repo_name, organization_id, product_name, rhel_repo_url) 309 | hammer_repository_create( 310 | satellite_capsule_repo_name, 311 | organization_id, 312 | product_name, 313 | satellite_capsule_repo_url 314 | ) 315 | hammer_repository_create( 316 | satellite_tools_repo_name, 317 | organization_id, 318 | product_name, 319 | satellite_tools_repo_url 320 | ) 321 | hammer_repository_synchronize( 322 | rhel_repo_name, organization_id, product_name) 323 | hammer_repository_synchronize( 324 | satellite_capsule_repo_name, organization_id, product_name) 325 | hammer_repository_synchronize( 326 | satellite_tools_repo_name, organization_id, product_name) 327 | hammer_content_view_create( 328 | content_view_name, organization_id) 329 | hammer_content_view_add_repository( 330 | content_view_name, organization_id, product_name, rhel_repo_name) 331 | hammer_content_view_add_repository( 332 | content_view_name, 333 | organization_id, 334 | product_name, 335 | satellite_capsule_repo_name 336 | ) 337 | hammer_content_view_add_repository( 338 | content_view_name, 339 | organization_id, 340 | product_name, 341 | satellite_tools_repo_name 342 | ) 343 | hammer_content_view_publish(content_view_name, organization_id) 344 | product_id = run( 345 | "hammer --csv subscription list --organization-id='{0}' " 346 | "--search='name=\"{1}\"' | awk -F, 'NR>1{{print$8}}'" 347 | .format(organization_id, product_name), 348 | quiet=True 349 | ) 350 | hammer_activation_key_create( 351 | activation_key_name, organization_id, content_view_name) 352 | hammer_activation_key_add_subscription( 353 | activation_key_name, organization_id, product_id) 354 | 355 | 356 | @task 357 | def setup_capsules(path): 358 | """Reads the configuration, create capsules and start content sync on 359 | them. 360 | """ 361 | load_capsule_config(path) 362 | config = env.capsule_config 363 | server = config.server.host_string 364 | 365 | # Let Fabric know how to log into the hosts 366 | env.passwords = config.passwords 367 | env.key_filename = config.key_filenames 368 | 369 | # The oauth information is needed for every capsule register. Cache this 370 | # information. 371 | with settings(host_string=server): 372 | oauth_info = get_oauth_info() 373 | 374 | # Register each capsule on the server 375 | for capsule in config.capsules: 376 | with settings(host_string=server): 377 | cert_path = generate_capsule_certs(capsule.hostname) 378 | get(remote_path=cert_path, local_path=cert_path) 379 | 380 | with settings(host_string=capsule.host_string): 381 | register_capsule() 382 | put(local_path=cert_path) 383 | capsule_installer(capsule.hostname, cert_path, *oauth_info) 384 | -------------------------------------------------------------------------------- /automation_tools/satellite6/capsule_config.json.sample: -------------------------------------------------------------------------------- 1 | { 2 | "organization-label": "Default-Organization", 3 | "environment": "dev", 4 | "content-view": "mycv", 5 | "activation-key": "capsule-ak", 6 | "admin-user": "admin", 7 | "admin-password": "changeme", 8 | "defaults": { 9 | "user": "root", 10 | "password": "rootpass", 11 | "key-filename": "path/to/ssh.key" 12 | }, 13 | "server": { 14 | "hostname": "sat.example.com", 15 | "user": "root", 16 | "password": "rootpass", 17 | "key-filename": "path/to/ssh.key" 18 | }, 19 | "capsules": [ 20 | { 21 | "hostname": "capsule1.example.com", 22 | "password": "rootpass", 23 | "key-filename": "path/to/ssh.key" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /automation_tools/satellite6/hammer.py: -------------------------------------------------------------------------------- 1 | """Tasks for helping to execute hammer commands on satellite""" 2 | from fabric.api import env, run, task 3 | import json 4 | 5 | 6 | class _AttributeDict(dict): 7 | """Simple dict subclass to allow arbitrary attibute access""" 8 | pass 9 | 10 | 11 | class _AttributeList(list): 12 | """Simple List subclass to allow arbitrary attribute access.""" 13 | pass 14 | 15 | 16 | class ImproperlyConfigured(Exception): 17 | """Indicates that the Hammer configuration is improperly configured 18 | - for example, hammer configuration is not set or missing. 19 | """ 20 | 21 | 22 | def _lower_dict_keys(d): 23 | """Helper for ensuring that all dictionary keys are lowercase.""" 24 | if isinstance(d, list): 25 | return [_lower_dict_keys(v) for v in d] 26 | elif isinstance(d, dict): 27 | return dict((k.lower(), _lower_dict_keys(v)) for k, v in d.iteritems()) 28 | else: 29 | return d 30 | 31 | 32 | def get_attribute_value(hammer_result, search_key, attribute): 33 | """Gets the attribute value from hammer_reult using the search key 34 | 35 | e.g. Run hammer() def for 'capsule list' and get hammer_result then 36 | search capsules 'id' attribute by 'capsule name' search key. 37 | 38 | :param list/dict hammer_result: hammer result from hammer() defination 39 | :param str attribute: The attribute name of which value to be fetched 40 | :param str search_key: The search_key whose attribute to be fetched 41 | :return Returns a string/list/dict as attribute value 42 | """ 43 | if isinstance(hammer_result, list): 44 | key_index = None 45 | for i in range(len(hammer_result)): 46 | if search_key in hammer_result[i].values(): 47 | key_index = i 48 | break 49 | else: 50 | raise KeyError( 51 | 'Unable to find search_key {} in given hammer ' 52 | 'result to get attribute value'.format(search_key)) 53 | return hammer_result[key_index][attribute] 54 | elif isinstance(hammer_result, dict): 55 | if search_key not in hammer_result.values(): 56 | raise KeyError( 57 | 'Unable to find search_key {} in given hammer ' 58 | 'result to get attribute value'.format(search_key)) 59 | return hammer_result[attribute] 60 | else: 61 | raise TypeError('hammer data is not one of type list/dict.') 62 | 63 | 64 | def set_hammer_config(user=None, password=None): 65 | """Sets the hammer admin username and password fabric env. variables to run 66 | hammer commands""" 67 | env['hammer_user'] = 'admin' if not user else user 68 | env['hammer_password'] = 'changeme' if not password else password 69 | 70 | 71 | @task 72 | def hammer(command, output='json'): 73 | """Run hammer -u -p --output . 74 | 75 | This method has a dependency on set_hammer_config function. 76 | 77 | :param str command: The hammer subcommand to run 78 | :param str output: The command output type which hammer supports, 79 | by default json 80 | :return: if output is json, then returns a JSON decoded object containing 81 | the result of the command. The returned object will exhibit ``failed`` 82 | and ``succeeded`` boolean attributes specifying whether the command 83 | failed or succeeded, and will also include the return code as the 84 | ``return_code`` attribute. 85 | Else, returns a string of given output type representation of hammer 86 | command output. 87 | """ 88 | output = output.lower() 89 | command_result = run( 90 | 'hammer --username {0} --password {1} --output {2} {3}' 91 | .format( 92 | env.get('hammer_user'), 93 | env.get('hammer_password'), 94 | output, 95 | command), 96 | quiet=True 97 | ) 98 | if output == 'json': 99 | try: 100 | data = json.loads(command_result) 101 | except ValueError: 102 | data = command_result 103 | result = _lower_dict_keys(data) 104 | if isinstance(result, list): 105 | result = _AttributeList(result) 106 | elif isinstance(result, dict): 107 | result = _AttributeDict(result) 108 | result.succeeded = command_result.succeeded 109 | result.failed = command_result.failed 110 | result.return_code = command_result.return_code 111 | return result 112 | elif output in ['base', 'table', 'silent', 'csv', 'yaml', 'json']: 113 | return command_result 114 | else: 115 | raise ValueError('Invalid output type \'{}\' has provided to get ' 116 | 'hammer output.'.format(output)) 117 | 118 | 119 | @task 120 | def hammer_capsule_lcenvs(capsule_id): 121 | """Get the available lifecycle environments of a capsule. 122 | 123 | :param capsule_id: The capsule ID to get the availables lifecycle 124 | environments. 125 | :returns: A list of lifecycle environment dictonaries. For example: 126 | ``[{u'organization': u'Default Organization', u'id': 1, 127 | u'name': u'Library'}]``. 128 | :rtype: list 129 | """ 130 | return hammer( 131 | 'capsule content available-lifecycle-environments --id {0}' 132 | .format(capsule_id), 133 | ) 134 | 135 | 136 | @task 137 | def hammer_capsule_add_lcenv(capsule_id, lcenv_id): 138 | """Add the lifecycle environment to the capsule. 139 | 140 | :param capsule_id: The capsule ID to add the lifecycle environment. 141 | :param lcenv_id: The lifecycle environment ID to add to the capsule. 142 | """ 143 | return hammer( 144 | 'capsule content add-lifecycle-environment ' 145 | '--environment-id {0} --id {1}' 146 | .format(lcenv_id, capsule_id) 147 | ) 148 | 149 | 150 | @task 151 | def hammer_product_create(name, organization_id): 152 | """Create a product 153 | 154 | :param name: name of the product 155 | :param organization_id: organization where the product will be created 156 | """ 157 | return hammer( 158 | 'product create --name "{0}" --organization-id "{1}"' 159 | .format(name, organization_id) 160 | ) 161 | 162 | 163 | @task 164 | def hammer_repository_create(name, organization_id, product_name, url): 165 | """Create a repository 166 | 167 | :param name: name of the repository 168 | :param organization_id: organization where the repository will be created 169 | :param product_name: name of the product which the repository belongs 170 | :param url: repository source URL 171 | """ 172 | return hammer( 173 | 'repository create --name "{0}" ' 174 | '--content-type "yum" ' 175 | '--organization-id "{1}" ' 176 | '--product "{2}" ' 177 | '--url "{3}"' 178 | .format(name, organization_id, product_name, url) 179 | ) 180 | 181 | 182 | @task 183 | def hammer_repository_set_enable(name, product, organization_id, arch): 184 | """Enables a Redhat Repository 185 | 186 | :param name: Name of the repository 187 | :param product: Name of the Product where repository is listed 188 | :param organization_id: Organization where the repository will be enabled 189 | :param arch: The architecture x86_64 or i386 or ia64 190 | """ 191 | return hammer( 192 | 'repository-set enable --name "{0}" ' 193 | '--product "{1}" ' 194 | '--organization-id {2} ' 195 | '--basearch "{3}"'.format( 196 | name, product, organization_id, arch) 197 | ) 198 | 199 | 200 | @task 201 | def hammer_repository_synchronize(name, organization_id, product_name): 202 | """Synchronize a repository 203 | 204 | :param name: name of the repository to synchronize 205 | :param organization_id: organization_id where the repository was created 206 | :param product_name: product name which the repository belongs 207 | """ 208 | return hammer( 209 | 'repository synchronize --name "{0}" ' 210 | '--organization-id "{1}" ' 211 | '--product "{2}"' 212 | .format(name, organization_id, product_name) 213 | ) 214 | 215 | 216 | @task 217 | def hammer_content_view_create(name, organization_id): 218 | """Create a content view 219 | 220 | :param name: name of the content view 221 | :param organization_id: organization where the content view will be created 222 | """ 223 | return hammer( 224 | 'content-view create --name "{0}" --organization-id "{1}"' 225 | .format(name, organization_id) 226 | ) 227 | 228 | 229 | @task 230 | def hammer_content_view_add_repository( 231 | name, organization_id, product_name, repository_name): 232 | """Add a repository to a content view 233 | 234 | :param name: name of the content view which the repository will be added 235 | :param organization_id: organization where the content view, product and 236 | repository were created 237 | :param product_name: name of the product where the repository was created 238 | :param repository_name: repository name which will be added to the content 239 | view 240 | """ 241 | return hammer( 242 | 'content-view add-repository --name "{0}" ' 243 | '--organization-id "{1}" ' 244 | '--product "{2}" ' 245 | '--repository "{3}"' 246 | .format(name, organization_id, product_name, repository_name) 247 | ) 248 | 249 | 250 | @task 251 | def hammer_content_view_publish(name, organization_id): 252 | """Publish a content view 253 | 254 | :param name: name of the content view which will be published 255 | :param organization_id: organization where the content view was created 256 | """ 257 | return hammer( 258 | 'content-view publish --name "{0}" --organization-id "{1}"' 259 | .format(name, organization_id) 260 | ) 261 | 262 | 263 | @task 264 | def hammer_content_view_promote_version( 265 | cv_name, cv_ver_id, lc_env_id, organization_id): 266 | """Promotes a content view version 267 | 268 | :param cv_name: name of the content view which will be published 269 | :param cv_ver_id: CV Version id to be promoted 270 | :param lc_env_id: LC Environment id onto which cv version to be promoted 271 | :param organization_id: organization where the content view was created 272 | """ 273 | return hammer('content-view version promote --content-view {0} --id {1} ' 274 | '--to-lifecycle-environment-id {2} --organization-id 1' 275 | .format(cv_name, cv_ver_id, lc_env_id)) 276 | 277 | 278 | @task 279 | def hammer_activation_key_create( 280 | name, organization_id, content_view_name, 281 | lifecycle_environment_name='Library'): 282 | """Create an activation key 283 | 284 | :param name: name of the acktivation key which will be created 285 | :param organization_id: organization where the activation key will be 286 | created 287 | :param content_view_name: content view name which will be linked to the 288 | activation key 289 | :param lifecycle_environment_name: lifecycle environment name which will be 290 | linked to the activation key 291 | """ 292 | return hammer( 293 | 'activation-key create --name "{0}" ' 294 | '--content-view "{1}" ' 295 | '--lifecycle-environment "{2}" ' 296 | '--organization-id "{3}"' 297 | .format( 298 | name, 299 | content_view_name, 300 | lifecycle_environment_name, 301 | organization_id 302 | ) 303 | ) 304 | 305 | 306 | @task 307 | def hammer_activation_key_add_subscription( 308 | name, organization_id, product_name): 309 | """Add a subscription to an activation key 310 | 311 | :param name: name of the activation key which the subscription will be 312 | added 313 | :param organization_id: organization where the activation key was created 314 | :param product_name: product name whose subscription will be added to the 315 | activation key 316 | """ 317 | subscription_id = get_product_subscription_id( 318 | organization_id, product_name) 319 | return hammer( 320 | 'activation-key add-subscription --name "{0}" ' 321 | '--organization-id "{1}" ' 322 | '--subscription-id "{2}"' 323 | .format(name, organization_id, subscription_id) 324 | ) 325 | 326 | 327 | @task 328 | def hammer_capsule_list(): 329 | """Get the list of all Satellite capsules. 330 | 331 | :returns: A list of (capsule_id, capsule_name) tuples. For example: 332 | ``[{u'url': u'https://capsule1.example.com:9090', u'id': 1, 333 | u'name': u'capsule1.example.com'}]``. 334 | :rtype: list 335 | """ 336 | return hammer('capsule list') 337 | 338 | 339 | @task 340 | def hammer_activation_key_content_override( 341 | ak_name, content_label, value, org_id): 342 | """Override Content value in Product Content of Actiavaton Key. 343 | 344 | :param ak_name: AK name in which contnets to be overrided 345 | :param content_label: Content name of to be overrided 346 | :param value: True/False for override to yes/no 347 | :param org_id: The organization to which AK belongs 348 | """ 349 | ak_id = get_attribute_value( 350 | hammer('activation-key list --organization-id {}'.format(org_id)), 351 | ak_name, 352 | 'id' 353 | ) 354 | return hammer( 355 | 'activation-key content-override --id {0} ' 356 | '--content-label {1} --value {2}'.format( 357 | ak_id, content_label, value)) 358 | 359 | 360 | def sync_capsule_content(capsule, sync=False): 361 | """Start content synchronization in the capsule. 362 | 363 | If The content synchronization is asynchronous, check the capsule 364 | logs to see when it have finished. 365 | 366 | :param dict capsule: A capsule dictionary containing ``id`` and ``name`` 367 | :param bool sync: Synchronize synchronously 368 | 369 | """ 370 | if capsule['id'] == 1: 371 | print('Skipping default capsule...') 372 | return 373 | lcenvs = hammer_capsule_lcenvs(capsule['id']) 374 | for lcenv in lcenvs: 375 | hammer_capsule_add_lcenv(capsule['id'], lcenv['id']) 376 | hammer('capsule content synchronize --id {0} {1}' 377 | .format(capsule['id'], '' if sync else '--async')) 378 | 379 | 380 | def get_product_subscription_id(organization_id, product_name): 381 | """Returns products subscription id 382 | 383 | :param string organization_id: Organization Id in which product is created 384 | :param string product_name: Product name of which subscription id to return 385 | """ 386 | return get_attribute_value( 387 | hammer('subscription list --organization-id {}'.format(organization_id)), 388 | product_name, 389 | 'id') 390 | 391 | 392 | def attach_subscription_to_host_from_satellite( 393 | organization_id, product_name, hostname): 394 | """Attaches product subscription to content host from satellite 395 | 396 | :param string organization_id: Organization Id in which product is created 397 | :param string product_name: Product name which to be added to content host 398 | :param string hostname: The hostname into which the product subscription 399 | will be added 400 | """ 401 | subscription_id = get_product_subscription_id( 402 | organization_id, product_name) 403 | return hammer('host subscription attach --subscription-id {0} ' 404 | '--host {1}'.format(subscription_id, hostname)) 405 | 406 | 407 | def hammer_determine_cv_and_env_from_ak(ak_name, organization_id): 408 | """Determines Content View and Lifecycle Environment from 409 | Activation Key 410 | 411 | :param string ak_name: Activation key name 412 | :param int organization_id: Organization id in which ak created 413 | :returns dictionary containing cv and lenv as keys with names as 414 | their values 415 | """ 416 | data = hammer('activation-key info --name {0} --organization-id ' 417 | '{1}'.format(ak_name, organization_id)) 418 | if not isinstance(data, (dict, list)): 419 | raise KeyError( 420 | 'Wrong Activation key provided for determining CV and Env') 421 | return get_attribute_value(data, ak_name, 'content view'), \ 422 | get_attribute_value(data, ak_name, 'lifecycle environment') 423 | 424 | 425 | def get_latest_cv_version(cv_name): 426 | """Calculates the latest CV version to be published 427 | 428 | :param string cv_name : Name of the CV for which version is to be 429 | calculated 430 | :return int : Calculated version to be created for CV 431 | """ 432 | cv_version_data = hammer( 433 | 'content-view version list --content-view {} ' 434 | '--organization-id 1'.format(cv_name)) 435 | latest_cv_ver = sorted([float(data['name'].split( 436 | '{} '.format(cv_name))[1]) for data in cv_version_data]).pop() 437 | return get_attribute_value(cv_version_data, '{0} {1}'.format( 438 | cv_name, latest_cv_ver), 'id') 439 | -------------------------------------------------------------------------------- /automation_tools/satellite6/log.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import unicode_literals 3 | 4 | from functools import partial 5 | from itertools import cycle 6 | from time import strftime 7 | 8 | import os 9 | from fabric.api import execute, run 10 | 11 | ERROR_TOKENS = ( 12 | 'ERROR', 13 | 'EXCEPTION', 14 | r'returned 1 instead of one of \[0\]', 15 | 'Could not find the inverse association for repository', 16 | 'undefined method' 17 | ) 18 | 19 | LOG_FILES = ( 20 | '/var/log/foreman-installer/satellite.log', 21 | '/var/log/satellite-installer/satellite-installer.log', 22 | '/var/log/capsule-installer/capsule-installer.log', 23 | '/var/log/foreman-installer/capsule.log', 24 | '/var/log/foreman/production.log', 25 | '/var/log/foreman-proxy/proxy.log', 26 | '/var/log/candlepin/candlepin.log', 27 | '/var/log/messages', 28 | '/var/log/mongodb/mongodb.log', 29 | '/var/log/tomcat/catalina.out' 30 | ) 31 | 32 | 33 | class LogAnalyzer(object): 34 | """Context Manager to analyze changes in logs during some process. 35 | Example: 36 | 37 | >>> from automation_tools.satellite6.log import LogAnalyzer 38 | >>> with LogAnalyzer('root@sathost.redhat.com'): 39 | ... print('Running some process, could be Satellite Upgrade') 40 | ... 41 | [root@sathost.redhat.com] Executing task 'get_line_count' 42 | Running some process, could be Satellite Upgrade 43 | #### Analyzing logs of host root@sathost.redhat.com 44 | [root@sathost.redhat.com] Executing task 'get_line_count' 45 | [root@sathost.redhat.com] Executing task 'fetch_appended_log_lines' 46 | 47 | """ 48 | 49 | def __init__(self, host, log_files=LOG_FILES): 50 | """Initializes context manager with Satellite hostname 51 | 52 | :param host: str the hostname 53 | """ 54 | self.host = host 55 | self.log_state = dict(zip(log_files, cycle([0]))) 56 | 57 | def __enter__(self): 58 | """ 59 | Fetch current line count for Satellite log files 60 | :return: LogAnalyzer 61 | """ 62 | self._update_log_files_state() 63 | return self 64 | 65 | def __exit__(self, exc_type, exc_val, exc_tb): 66 | """Analyzes log files checking if some error occurred since last 67 | log_state update 68 | """ 69 | print('#### Analyzing logs of host %s' % self.host) 70 | self._update_log_files_state() 71 | now = partial(strftime, '%H:%M:%S.%s') 72 | 73 | def fetch_appended_log_error_lines(): 74 | grep_expressions = ' '.join( 75 | r'-e "{}"'.format(token) for token in ERROR_TOKENS) 76 | cmd_template = 'tail -n {lines} {file} | grep {expressions} {file}' 77 | for log_file, lines_appended in self.log_state.items(): 78 | if lines_appended > 0: 79 | cmd = cmd_template.format( 80 | lines=lines_appended, 81 | file=log_file, 82 | expressions=grep_expressions 83 | ) 84 | _print_wrapper('### Analyzing %s:' % log_file) 85 | _print_wrapper('{} - Running {}'.format(now(), cmd)) 86 | content = run(cmd, quiet=True) 87 | _print_wrapper('{} - errors fetched'.format(now())) 88 | if not content: 89 | _print_wrapper('### No errors found') 90 | else: 91 | _print_wrapper('## Errors found:') 92 | _print_wrapper(content) 93 | 94 | execute(fetch_appended_log_error_lines, host=self.host) 95 | 96 | def _update_log_files_state(self): 97 | """Update log_dct with adding delta from current number of lines of 98 | each item and the last provided by dct. 99 | 100 | So this method can be used to check how many lines were appended on 101 | a file during some processes and used to tail them. If log_state is 102 | None a new dict is created with initial values been 0 (zero). 103 | """ 104 | 105 | def get_line_count(): 106 | for log_file, old_value in self.log_state.items(): 107 | try: 108 | current_value = int( 109 | run( 110 | 'wc -l < {}'.format(log_file), 111 | quiet=True 112 | ) 113 | ) 114 | except ValueError: 115 | self.log_state[log_file] = 0 116 | else: 117 | self.log_state[log_file] = current_value - old_value 118 | 119 | execute(get_line_count, host=self.host) 120 | 121 | 122 | def _save_full_log(host, log_file_path, content): 123 | """Save full log on upgrade-diff-logs dir 124 | 125 | :param host: str with host name 126 | :param log_file_path: file name path 127 | :param content: content to be saved 128 | """ 129 | dir_path = os.path.abspath('upgrade-diff-logs') 130 | if not os.path.exists(dir_path): 131 | os.mkdir(dir_path) 132 | log_file_name = os.path.split(log_file_path)[-1] 133 | log_file_name = '%s-%s' % (host, log_file_name) 134 | file_path = os.path.join(dir_path, log_file_name) 135 | with open(file_path, 'wb') as log_file: 136 | log_file.write(content) 137 | _print_wrapper('## Full upgrade logs saved on %s' % file_path) 138 | 139 | 140 | def _print_wrapper(s): 141 | """Just a wrapper to make mocking easier on tests""" 142 | if isinstance(s, bytes): 143 | print(s) 144 | else: # is unicode 145 | print(s.encode('utf-8')) 146 | -------------------------------------------------------------------------------- /automation_tools/utils.py: -------------------------------------------------------------------------------- 1 | """Utilities tasks and functions""" 2 | from __future__ import print_function 3 | 4 | import os 5 | import re 6 | import sys 7 | import subprocess 8 | import time 9 | 10 | from bs4 import BeautifulSoup 11 | from fabric.api import env, run, warn_only 12 | 13 | from six.moves.urllib.request import urlopen 14 | 15 | 16 | def distro_info(): 17 | """Task which figures out the distro information based on the 18 | /etc/redhat-release file 19 | 20 | A ``(distro, major_version)`` tuple is returned if called as a function. 21 | For RHEL X.Y.Z it will return ``('rhel', X)``. For Fedora X it will return 22 | ``('fedora', X)``. Be aware that the major_version is an integer. 23 | 24 | """ 25 | # Create/manage host cache 26 | cache = env.get('distro_info_cache') 27 | host = env['host'] 28 | if cache is None: 29 | cache = env['distro_info_cache'] = {} 30 | 31 | if host not in cache: 32 | # Grab the information and store on cache 33 | release_info = run('cat /etc/redhat-release', quiet=True) 34 | if release_info.failed: 35 | print('Failed to read /etc/redhat-release file') 36 | sys.exit(1) 37 | 38 | # Discover the distro 39 | if release_info.startswith('Red Hat Enterprise Linux'): 40 | distro = 'rhel' 41 | elif release_info.startswith('Fedora'): 42 | distro = 'fedora' 43 | else: 44 | distro = None 45 | 46 | # Discover the version 47 | match = re.search(r' ([0-9.]+) ', release_info) 48 | if match is not None: 49 | parts = match.group(1).split('.') 50 | # extract the major version 51 | major_version = int(parts[0]) 52 | # extract the minor version 53 | if len(parts) > 1: 54 | minor_version = int(parts[1]) 55 | else: 56 | minor_version = None 57 | else: 58 | major_version = minor_version = None 59 | 60 | if distro is None or major_version is None: 61 | print('Was not possible to fetch distro information') 62 | sys.exit(1) 63 | 64 | cache[host] = distro, major_version, minor_version 65 | 66 | distro, major_version, minor_version = cache[host] 67 | print('{0} {1} {2}'.format(distro, major_version, minor_version)) 68 | return distro, major_version, minor_version 69 | 70 | 71 | def update_packages(*args, **kwargs): 72 | """Updates all system packages or only ones specified by `args` 73 | 74 | Use this if you want to simply update all packages or some on system. 75 | Possibly useful for when doing upgrades, etc. 76 | 77 | """ 78 | if len(args) > 0: 79 | arguments = ' '.join(args) 80 | else: 81 | arguments = '' 82 | 83 | run( 84 | 'yum update -y {0}'.format(arguments), 85 | quiet=kwargs.get('quiet', False), 86 | warn_only=kwargs.get('warn_only', False), 87 | ) 88 | 89 | 90 | def run_command(cmd=None): 91 | """ Task to run only sane commands 92 | :param str cmd: command to be run 93 | 94 | """ 95 | if cmd: 96 | run(cmd) 97 | 98 | 99 | def version(satver='0'): 100 | satver = str(satver) 101 | return tuple(map(int, ('9999' if 'nightly' in satver else satver).split('.'))) 102 | 103 | 104 | def get_discovery_image(): 105 | """ Task for getting unattended foreman-discovery ISO image 106 | :return: foreman-discovery-image iso under /var/lib/libvirt/images/ 107 | """ 108 | if os.environ.get('BASE_URL') is not None: 109 | url = os.environ.get('BASE_URL') + '/Packages/' 110 | soup = BeautifulSoup(urlopen(url).read()) 111 | for link in soup.findAll('a'): 112 | if re.search(r'foreman-discovery-image-\d+', link.string): 113 | discovery_image = link.string 114 | try: 115 | run("wget -O /tmp/" + discovery_image + " " + url + discovery_image) 116 | run('cd /tmp/ ; rpm2cpio ' + discovery_image + '|cpio -idmv') 117 | run('cp /tmp/usr/share/foreman-discovery-image/' 118 | + discovery_image.split('.el')[0] + '.iso /tmp/') 119 | run('cp /tmp/usr/bin/discovery-remaster /tmp/') 120 | run('/tmp/discovery-remaster /tmp/' + discovery_image.split('.el')[0] 121 | + '.iso "fdi.pxgw=' + os.environ.get('GATEWAY') + 122 | ' fdi.pxdns=$(cat /etc/resolv.conf|grep -i "^nameserver"|' 123 | 'head -n1|cut -d " " -f2) proxy.url=https://' 124 | + os.environ.get('IPADDR') + 125 | ':9090 proxy.type=proxy fdi.pxfactname1=myfact ' 126 | 'fdi.pxfactvalue1=somevalue fdi.pxauto=1" /var/lib/libvirt/images/' 127 | + os.environ.get('DISCOVERY_ISO')) 128 | size = run('du -h "/var/lib/libvirt/images/"' + 129 | os.environ.get('DISCOVERY_ISO') 130 | + ' | cut -f1 | tr -d [:alpha:]') 131 | if int(size) < 150: 132 | raise Exception("Generated ISO size is less than 150M!" 133 | " Check if ISO is corrupted.") 134 | finally: 135 | run('rm /tmp/foreman-discovery-image* /tmp/discovery-remaster ' 136 | '/tmp/usr -rvf') 137 | else: 138 | print("Skipping...URL for discovery image not found!") 139 | 140 | 141 | def get_packages_name(html): 142 | soup = BeautifulSoup(html) 143 | anchors = soup.findAll('a') 144 | links = [] 145 | for a in anchors: 146 | links.append(a['href']) 147 | links = list(filter(lambda k: 'rpm' in k, links)) 148 | return links 149 | 150 | 151 | def get_packages(url, package_name): 152 | run('wget -P packages/ ' + url + package_name) 153 | 154 | 155 | def compare_builds(url1, url2): 156 | """ Task to to compare packages in two different release engineering builds 157 | and verify rpm signature. 158 | :return: Check Package Versions in both builds are same and all packages 159 | under RCM_COMPOSE_URL are signed! 160 | """ 161 | signature = os.getenv('SIGNATURE') 162 | signature_list = signature.split(',') 163 | flag = flag1 = flag2 = 0 164 | list1 = get_packages_name(urlopen(url1).read()) 165 | list1.sort() 166 | list2 = get_packages_name(urlopen(url2).read()) 167 | list2.sort() 168 | with warn_only(): 169 | try: 170 | run('mkdir packages') 171 | for pkg in range(len(list2)): 172 | get_packages(url2, list2[pkg]) 173 | for pkg in range(len(list2)): 174 | if 'NOT OK' not in run('rpm -K packages/' + list1[pkg]): 175 | flag1 = flag1 + 1 176 | package_sign = run('rpm -qpi packages/' + list2[pkg] + 177 | '| grep "Signature :" | rev | cut -d" " -f1 | rev') 178 | if package_sign in signature_list: 179 | flag2 = flag2 + 1 180 | else: 181 | print('Signatures from ' + str(signature_list) + 182 | ' not matched for ' + list2[pkg]) 183 | else: 184 | print(list2[pkg] + 'package is not signed') 185 | finally: 186 | run('rm packages -rf') 187 | 188 | print("========================= Overall Report ======================") 189 | 190 | print( 191 | "There are " + str(len(list1)) + " packages in " + url1 + " and " 192 | + str(len(list2)) + " packages in " + url2 193 | ) 194 | 195 | for pkg in range(len(list1)): 196 | if list1[pkg] == list2[pkg]: 197 | flag = flag + 1 198 | else: 199 | print( 200 | "The version of package " + list1[pkg] + 201 | " from build1 is not similar to version of package " + list2[ 202 | pkg] 203 | + " from build2." 204 | ) 205 | 206 | if flag == len(list1) - 1: 207 | print("Versions in both builds are same") 208 | else: 209 | print(str((len(list1)) - flag) + " packages version found mismatched!") 210 | 211 | if flag1 == len(list1): 212 | print("All packages are signed!") 213 | else: 214 | print(str(len(list1) - flag1) + 'packages are not signed!!') 215 | 216 | if flag2 == len(list1): 217 | print("Signature matched for all packages!!") 218 | else: 219 | print('Signatures from ' + str(signature_list) + ' for ' 220 | + str(len(list1) - flag2) + ' packages not matched!!') 221 | print("================================================================") 222 | 223 | 224 | def host_cmd_check(cmd, timeout=7): 225 | """Helper to run commands and poll until returncode 0 or timeout 226 | :param cmd: A string. The cmd you want to poll for. 227 | :param int timeout: The polling timeout in minutes. 228 | """ 229 | timeup = time.time() + int(timeout) * 60 230 | while True: 231 | command = subprocess.Popen( 232 | '{0}'.format(cmd), 233 | stdout=subprocess.PIPE, 234 | stderr=subprocess.PIPE, 235 | shell=True 236 | ) 237 | output = command.communicate() 238 | print(output) 239 | # Checking the return code of ping is 0 240 | if time.time() > timeup: 241 | print('Running {0} timed out for host '.format(cmd)) 242 | return False 243 | if command.returncode == 0: 244 | return True, output 245 | else: 246 | time.sleep(5) 247 | 248 | 249 | def host_ssh_availability_check(host): 250 | """This ensures the given host has ssh up and running.. 251 | :param host: A string. The IP or hostname of host. 252 | """ 253 | _, ip = host_pings(host) 254 | print('Checking SSH availability') 255 | _, output = host_cmd_check('nc -vn {0} 22 <<< \'\''.format(ip)) 256 | return output 257 | 258 | 259 | def host_pings(host): 260 | """This ensures the given IP/hostname pings succesfully. 261 | :param host: A string. The IP or hostname of host. 262 | """ 263 | _, output = host_cmd_check('ping -c1 {0}; echo $?'.format(host)) 264 | output = str(output[0]) 265 | ip = output[output.find("(") + 1:output.find(")")] 266 | status, _ = host_cmd_check('ping -c1 {0} | ' 267 | 'grep \'1 received\''.format(host)) 268 | return status, ip 269 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | @echo " coverage to run coverage check of the documentation (if enabled)" 49 | 50 | clean: 51 | rm -rf $(BUILDDIR)/* 52 | 53 | html: 54 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 55 | @echo 56 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 57 | 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | singlehtml: 64 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 65 | @echo 66 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 67 | 68 | pickle: 69 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 70 | @echo 71 | @echo "Build finished; now you can process the pickle files." 72 | 73 | json: 74 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 75 | @echo 76 | @echo "Build finished; now you can process the JSON files." 77 | 78 | htmlhelp: 79 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 80 | @echo 81 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 82 | ".hhp project file in $(BUILDDIR)/htmlhelp." 83 | 84 | qthelp: 85 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 86 | @echo 87 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 88 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 89 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/automation-tools.qhcp" 90 | @echo "To view the help file:" 91 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/automation-tools.qhc" 92 | 93 | devhelp: 94 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 95 | @echo 96 | @echo "Build finished." 97 | @echo "To view the help file:" 98 | @echo "# mkdir -p $$HOME/.local/share/devhelp/automation-tools" 99 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/automation-tools" 100 | @echo "# devhelp" 101 | 102 | epub: 103 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 104 | @echo 105 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 106 | 107 | latex: 108 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 109 | @echo 110 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 111 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 112 | "(use \`make latexpdf' here to do that automatically)." 113 | 114 | latexpdf: 115 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 116 | @echo "Running LaTeX files through pdflatex..." 117 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 118 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 119 | 120 | latexpdfja: 121 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 122 | @echo "Running LaTeX files through platex and dvipdfmx..." 123 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 124 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 125 | 126 | text: 127 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 128 | @echo 129 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 130 | 131 | man: 132 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 133 | @echo 134 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 135 | 136 | texinfo: 137 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 138 | @echo 139 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 140 | @echo "Run \`make' in that directory to run these through makeinfo" \ 141 | "(use \`make info' here to do that automatically)." 142 | 143 | info: 144 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 145 | @echo "Running Texinfo files through makeinfo..." 146 | make -C $(BUILDDIR)/texinfo info 147 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 148 | 149 | gettext: 150 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 151 | @echo 152 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 153 | 154 | changes: 155 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 156 | @echo 157 | @echo "The overview file is in $(BUILDDIR)/changes." 158 | 159 | linkcheck: 160 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 161 | @echo 162 | @echo "Link check complete; look for any errors in the above output " \ 163 | "or in $(BUILDDIR)/linkcheck/output.txt." 164 | 165 | doctest: 166 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 167 | @echo "Testing of doctests in the sources finished, look at the " \ 168 | "results in $(BUILDDIR)/doctest/output.txt." 169 | 170 | coverage: 171 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 172 | @echo "Testing of coverage in the sources finished, look at the " \ 173 | "results in $(BUILDDIR)/coverage/python.txt." 174 | 175 | xml: 176 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 177 | @echo 178 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 179 | 180 | pseudoxml: 181 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 182 | @echo 183 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 184 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ================= 3 | 4 | :mod:`fabfile` 5 | ------------------------- 6 | 7 | .. automodule:: fabfile 8 | :members: 9 | :undoc-members: 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Configuration file for the Sphinx documentation generator. 3 | 4 | For more information, see http://sphinx-doc.org/ 5 | 6 | """ 7 | # 8 | # automation-tools documentation build configuration file, created by 9 | # sphinx-quickstart on Thu Oct 16 15:57:56 2014. 10 | # 11 | # This file is execfile()d with the current directory set to its 12 | # containing dir. 13 | # 14 | # Note that not all possible configuration values are present in this 15 | # autogenerated file. 16 | # 17 | # All configuration values have a default; values that are commented out 18 | # serve to show the default. 19 | 20 | # (invalid-name) pylint:disable=C0103 21 | 22 | import os 23 | import sys 24 | 25 | # If extensions (or modules to document with autodoc) are in another directory, 26 | # add these directories to sys.path here. If the directory is relative to the 27 | # documentation root, use os.path.abspath to make it absolute, like shown here. 28 | sys.path.insert(0, os.path.abspath('..')) 29 | 30 | # -- General configuration ------------------------------------------------ 31 | 32 | # If your documentation needs a minimal Sphinx version, state it here. 33 | # needs_sphinx = '1.0' 34 | 35 | # Add any Sphinx extension module names here, as strings. They can be 36 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 | # ones. 38 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] 39 | 40 | # Add any paths that contain templates here, relative to this directory. 41 | templates_path = ['_templates'] 42 | 43 | # The suffix of source filenames. 44 | source_suffix = '.rst' 45 | 46 | # The encoding of source files. 47 | # source_encoding = 'utf-8-sig' 48 | 49 | # The master toctree document. 50 | master_doc = 'index' 51 | 52 | # General information about the project. 53 | project = u'automation-tools' 54 | copyright = u'2014, Elyezer Rezende' # redefined-builtin pylint:disable=W0622 55 | 56 | # The version info for the project you're documenting, acts as replacement for 57 | # |version| and |release|, also used in various other places throughout the 58 | # built documents. 59 | # 60 | # The short X.Y version. 61 | version = '0.0.1' 62 | # The full version, including alpha/beta/rc tags. 63 | release = '0.0.1' 64 | 65 | # The language for content autogenerated by Sphinx. Refer to documentation 66 | # for a list of supported languages. 67 | # 68 | # This is also used if you do content translation via gettext catalogs. 69 | # Usually you set "language" from the command line for these cases. 70 | language = None 71 | 72 | # There are two options for replacing |today|: either, you set today to some 73 | # non-false value, then it is used: 74 | # today = '' 75 | # Else, today_fmt is used as the format for a strftime call. 76 | # today_fmt = '%B %d, %Y' 77 | 78 | # List of patterns, relative to source directory, that match files and 79 | # directories to ignore when looking for source files. 80 | exclude_patterns = ['_build'] 81 | 82 | # The reST default role (used for this markup: `text`) to use for all 83 | # documents. 84 | # default_role = None 85 | 86 | # If true, '()' will be appended to :func: etc. cross-reference text. 87 | # add_function_parentheses = True 88 | 89 | # If true, the current module name will be prepended to all description 90 | # unit titles (such as .. function::). 91 | # add_module_names = True 92 | 93 | # If true, sectionauthor and moduleauthor directives will be shown in the 94 | # output. They are ignored by default. 95 | # show_authors = False 96 | 97 | # The name of the Pygments (syntax highlighting) style to use. 98 | pygments_style = 'sphinx' 99 | 100 | # A list of ignored prefixes for module index sorting. 101 | # modindex_common_prefix = [] 102 | 103 | # If true, keep warnings as "system message" paragraphs in the built documents. 104 | # keep_warnings = False 105 | 106 | # If true, Sphinx will warn about all references where the target cannot be 107 | # found. 108 | nitpicky = True 109 | 110 | # A list of (type, target) tuples (by default empty) that should be ignored 111 | # when generating warnings in “nitpicky mode”. 112 | # nitpick_ignore = [('py:obj', 'bool')] 113 | 114 | 115 | # -- Options for HTML output ---------------------------------------------- 116 | 117 | # The theme to use for HTML and HTML Help pages. See the documentation for 118 | # a list of builtin themes. 119 | html_theme = 'default' 120 | 121 | # Theme options are theme-specific and customize the look and feel of a theme 122 | # further. For a list of options available for each theme, see the 123 | # documentation. 124 | # html_theme_options = {} 125 | 126 | # Add any paths that contain custom themes here, relative to this directory. 127 | # html_theme_path = [] 128 | 129 | # The name for this set of Sphinx documents. If None, it defaults to 130 | # " v documentation". 131 | # html_title = None 132 | 133 | # A shorter title for the navigation bar. Default is the same as html_title. 134 | # html_short_title = None 135 | 136 | # The name of an image file (relative to this directory) to place at the top 137 | # of the sidebar. 138 | # html_logo = None 139 | 140 | # The name of an image file (within the static path) to use as favicon of the 141 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 142 | # pixels large. 143 | # html_favicon = None 144 | 145 | # Add any paths that contain custom static files (such as style sheets) here, 146 | # relative to this directory. They are copied after the builtin static files, 147 | # so a file named "default.css" will overwrite the builtin "default.css". 148 | # html_static_path = ['_static'] 149 | 150 | # Add any extra paths that contain custom files (such as robots.txt or 151 | # .htaccess) here, relative to this directory. These files are copied 152 | # directly to the root of the documentation. 153 | # html_extra_path = [] 154 | 155 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 156 | # using the given strftime format. 157 | # html_last_updated_fmt = '%b %d, %Y' 158 | 159 | # If true, SmartyPants will be used to convert quotes and dashes to 160 | # typographically correct entities. 161 | # html_use_smartypants = True 162 | 163 | # Custom sidebar templates, maps document names to template names. 164 | # html_sidebars = {} 165 | 166 | # Additional templates that should be rendered to pages, maps page names to 167 | # template names. 168 | # html_additional_pages = {} 169 | 170 | # If false, no module index is generated. 171 | # html_domain_indices = True 172 | 173 | # If false, no index is generated. 174 | # html_use_index = True 175 | 176 | # If true, the index is split into individual pages for each letter. 177 | # html_split_index = False 178 | 179 | # If true, links to the reST sources are added to the pages. 180 | # html_show_sourcelink = True 181 | 182 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 183 | # html_show_sphinx = True 184 | 185 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 186 | # html_show_copyright = True 187 | 188 | # If true, an OpenSearch description file will be output, and all pages will 189 | # contain a tag referring to it. The value of this option must be the 190 | # base URL from which the finished HTML is served. 191 | # html_use_opensearch = '' 192 | 193 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 194 | # html_file_suffix = None 195 | 196 | # Language to be used for generating the HTML full-text search index. 197 | # Sphinx supports the following languages: 198 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 199 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 200 | # html_search_language = 'en' 201 | 202 | # A dictionary with options for the search language support, empty by default. 203 | # Now only 'ja' uses this config value 204 | # html_search_options = {'type': 'default'} 205 | 206 | # The name of a javascript file (relative to the configuration directory) that 207 | # implements a search results scorer. If empty, the default will be used. 208 | # html_search_scorer = 'scorer.js' 209 | 210 | # Output file base name for HTML help builder. 211 | htmlhelp_basename = 'automation-toolsdoc' 212 | 213 | # -- Options for LaTeX output --------------------------------------------- 214 | 215 | latex_elements = { 216 | # The paper size ('letterpaper' or 'a4paper'). 217 | # 'papersize': 'letterpaper', 218 | 219 | # The font size ('10pt', '11pt' or '12pt'). 220 | # 'pointsize': '10pt', 221 | 222 | # Additional stuff for the LaTeX preamble. 223 | # 'preamble': '', 224 | 225 | # Latex figure (float) alignment 226 | # 'figure_align': 'htbp', 227 | } 228 | 229 | # Grouping the document tree into LaTeX files. List of tuples 230 | # (source start file, target name, title, 231 | # author, documentclass [howto, manual, or own class]). 232 | latex_documents = [( 233 | u'index', u'automation-tools.tex', u'automation-tools Documentation', 234 | u'Elyezer Rezende', u'manual' 235 | )] 236 | 237 | # The name of an image file (relative to this directory) to place at the top of 238 | # the title page. 239 | # latex_logo = None 240 | 241 | # For "manual" documents, if this is true, then toplevel headings are parts, 242 | # not chapters. 243 | # latex_use_parts = False 244 | 245 | # If true, show page references after internal links. 246 | # latex_show_pagerefs = False 247 | 248 | # If true, show URL addresses after external links. 249 | # latex_show_urls = False 250 | 251 | # Documents to append as an appendix to all manuals. 252 | # latex_appendices = [] 253 | 254 | # If false, no module index is generated. 255 | # latex_domain_indices = True 256 | 257 | 258 | # -- Options for manual page output --------------------------------------- 259 | 260 | # One entry per manual page. List of tuples 261 | # (source start file, name, description, authors, manual section). 262 | man_pages = [ 263 | ('index', 'automation-tools', u'automation-tools Documentation', 264 | [u'Elyezer Rezende'], 1) 265 | ] 266 | 267 | # If true, show URL addresses after external links. 268 | # man_show_urls = False 269 | 270 | 271 | # -- Options for Texinfo output ------------------------------------------- 272 | 273 | # Grouping the document tree into Texinfo files. List of tuples 274 | # (source start file, target name, title, author, 275 | # dir menu entry, description, category) 276 | texinfo_documents = [( 277 | u'index', u'automation-tools', u'automation-tools Documentation', 278 | u'Elyezer Rezende', u'automation-tools', 279 | u'One line description of project.', u'Miscellaneous' 280 | )] 281 | 282 | # Documents to append as an appendix to all manuals. 283 | # texinfo_appendices = [] 284 | 285 | # If false, no module index is generated. 286 | # texinfo_domain_indices = True 287 | 288 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 289 | # texinfo_show_urls = 'footnote' 290 | 291 | # If true, do not generate a @detailmenu in the "Top" node's menu. 292 | # texinfo_no_detailmenu = False 293 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | automation-tools documentation 2 | ============================== 3 | 4 | .. toctree:: 5 | :hidden: 6 | 7 | api 8 | 9 | A set of tools to help automating virtual machines to install Foreman and test 10 | it using `Robottelo `_. 11 | 12 | Installation 13 | ============ 14 | 15 | Install the following packages on your Operating System:: 16 | 17 | sudo yum -y install libcurl-devel libxml-devel libxslt-devel 18 | 19 | Automation tools depends on ``pycurl`` being installed, but installing it, 20 | specially on a virtual environment, is not straight forward. You must run the 21 | following script in order to have ``pycurl`` installed properly:: 22 | 23 | # Make sure pip is updated 24 | pip install -U pip 25 | 26 | # Install pycurl using the proper cryptography library 27 | if [ "$(curl --version | grep NSS 2>/dev/null)" ]; then 28 | pip install --compile --install-option="--with-nss" pycurl 29 | else 30 | pip install --compile --install-option="--with-openssl" pycurl 31 | fi 32 | 33 | Finally, python packages listed in `requirements.txt` must be installed before 34 | automation-tools can be used:: 35 | 36 | pip install -r requirements.txt 37 | 38 | If you prefer, you can install both ``pycurl`` and automation-tools 39 | `requirements.txt` running just one command:: 40 | 41 | PYCURL_SSL_LIBRARY=$(curl -V | sed -n 's/.*\(NSS\|OpenSSL\).*/\L\1/p') \ 42 | pip install -r requirements.txt 43 | 44 | Usage examples 45 | ============== 46 | 47 | Virtual Machine Management 48 | -------------------------- 49 | 50 | To create a virtual machine will be needed a base image, to list all available 51 | base images:: 52 | 53 | fab -H root@example.com vm_list_base 54 | 55 | Creating a virtual machine:: 56 | 57 | VM_RAM=512 VM_CPU=1 VM_DOMAIN=domain.example.com SOURCE_IMAGE=rhel7-base \ 58 | TARGET_IMAGE=test01 fab -H root@example.com vm_create 59 | 60 | Destroying a virtual machine:: 61 | 62 | fab -H root@example.com vm_destroy:test01,delete_image=True 63 | 64 | Listing virtual machines:: 65 | 66 | fab -H root@example.com vm_list 67 | 68 | Subscription Management 69 | ----------------------- 70 | 71 | Subscribe:: 72 | 73 | RHN_USERNAME=user@example.com RHN_PASSWORD=mysecret \ 74 | RHN_POOLID=poolid fab -H root@example.com subscribe 75 | 76 | Unsubscribe:: 77 | 78 | fab -H root@example.com unsubscribe 79 | 80 | Satellite Installation 81 | ---------------------- 82 | 83 | To install a compose build:: 84 | 85 | RHN_USERNAME=user@example.com RHN_PASSWORD=mysecret \ 86 | RHN_POOLID=poolid BASE_URL=http://example.com/Satellite/x86_64/os/ \ 87 | fab -H root@example.com \ 88 | product_install:satellite6-downstream 89 | 90 | To install a nightly build:: 91 | 92 | RHN_USERNAME=user@example.com RHN_PASSWORD=mysecret \ 93 | RHN_POOLID=poolid \ 94 | fab -H root@example.com \ 95 | product_install:satellite6-upstream 96 | 97 | 98 | To install from the CDN:: 99 | 100 | RHN_USERNAME=user@example.com RHN_PASSWORD=mysecret \ 101 | RHN_POOLID=poolid \ 102 | fab -H root@example.com \ 103 | product_install:satellite6-cdn 104 | 105 | 106 | Product install task will set the admin password to `changeme`. 107 | 108 | SAM Installation 109 | ---------------- 110 | 111 | To install a nightly build:: 112 | 113 | RHN_USERNAME=user@example.com RHN_PASSWORD=mysecret \ 114 | RHN_POOLID=poolid \ 115 | fab -H root@example.com \ 116 | product_install:sam-upstream 117 | 118 | Product install task will set the admin password to `changeme`. 119 | 120 | Miscellaneous 121 | ------------- 122 | 123 | Fabric will use your default ssh key, but if you want to specify a different 124 | one, use the `-i` option:: 125 | 126 | fab -i path/to/my_ssh_key task 127 | 128 | Documentation 129 | ------------- 130 | 131 | You can generate the documentation for automation-tools as follows, so long 132 | as you have `Sphinx`_ and make installed:: 133 | 134 | cd docs 135 | make html 136 | 137 | .. _Sphinx: http://sphinx-doc.org/index.html 138 | -------------------------------------------------------------------------------- /fabfile.py: -------------------------------------------------------------------------------- 1 | """Module which publish all automation-tools tasks""" 2 | from automation_tools import ( # noqa: F401 3 | add_repo, 4 | cdn_install, 5 | clean_rhsm, 6 | cleanup_idm, 7 | client_registration_test, 8 | configure_osp, 9 | configure_ad_external_auth, 10 | configure_idm_external_auth, 11 | configure_realm, 12 | configure_sonarqube, 13 | configure_telemetry, 14 | create_personal_git_repo, 15 | downstream_install, 16 | enroll_ad, 17 | enroll_idm, 18 | errata_upgrade, 19 | fix_hostname, 20 | fix_qdrouterd_listen_to_ipv6, 21 | foreman_debug, 22 | generate_capsule_certs, 23 | install_errata, 24 | install_katello_agent, 25 | install_prerequisites, 26 | iso_download, 27 | iso_install, 28 | partition_disk, 29 | performance_tuning, 30 | product_install, 31 | remove_katello_agent, 32 | run_errata, 33 | set_service_check_status, 34 | set_yum_debug_level, 35 | setup_abrt, 36 | setup_alternate_capsule_ports, 37 | setup_avahi_discovery, 38 | setup_bfa_prevention, 39 | setup_capsule, 40 | setup_ddns, 41 | setup_default_capsule, 42 | setup_external_capsule, 43 | setup_default_docker, 44 | setup_default_libvirt, 45 | setup_default_subnet, 46 | setup_email_notification, 47 | setup_fake_manifest_certificate, 48 | setup_firewall, 49 | setup_foreman_discovery, 50 | setup_capsule_firewall, 51 | setup_python_code_coverage, 52 | setup_satellite_firewall, 53 | setup_libvirt_key, 54 | setup_local_rex_key, 55 | setup_proxy, 56 | setup_ruby_code_coverage, 57 | setup_rubysys_code_coverage, 58 | setup_rubytfm_code_coverage, 59 | setup_vm_provisioning, 60 | setup_http_proxy, 61 | subscribe, 62 | subscribe_dogfood, 63 | unsubscribe, 64 | update_basic_packages, 65 | update_rhsm_stage, 66 | upstream_install, 67 | vm_create, 68 | vm_destroy, 69 | vm_list, 70 | vm_list_base, 71 | ) 72 | from automation_tools.baseimage import ( # noqa: F401 73 | create_baseimage, 74 | deploy_baseimage, 75 | deploy_baseimage_by_url, 76 | detect_imagename, 77 | ) 78 | from automation_tools.manifest import ( # noqa: F401 79 | download_manifest, 80 | refresh_manifest, 81 | relink_manifest, 82 | validate_manifest, 83 | ) 84 | from automation_tools.repository import ( # noqa: F401 85 | create_custom_repos, 86 | delete_custom_repos, 87 | disable_beaker_repos, 88 | disable_repos, 89 | enable_repos, 90 | enable_satellite_repos, 91 | manage_custom_repos, 92 | ) 93 | from automation_tools.satellite5 import ( # noqa: F401 94 | satellite5_installer, 95 | satellite5_product_install, 96 | ) 97 | from automation_tools.utils import ( # noqa: F401 98 | compare_builds, 99 | distro_info, 100 | get_discovery_image, 101 | run_command, 102 | update_packages, 103 | host_ssh_availability_check, 104 | ) 105 | -------------------------------------------------------------------------------- /misc/base_image_creation/create-base-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$1" == "-h" ] ; then 3 | echo " Usage: `basename $0` [-h]" 4 | echo " *******Example*******" 5 | echo " Enter the Operating System version. [6,7,8]" 6 | echo " 6" 7 | echo " Enter the url of the Operating System." 8 | echo " http://xxxxxx.com/Server/x86_64/os/" 9 | echo " Enter the base image name.(Ex: rhel68 or rhel73)" 10 | echo " rhel68" 11 | echo " Enter the password for the base image." 12 | echo " password" 13 | echo " Enter the Authorized keys url for the base image. (Hosted authorized_keys file with jenkins key)" 14 | echo " http://xxxxx.com/xxx/authorized_keys" 15 | echo " Do you want to disable IPv6 in base image? (Y/n)" 16 | echo " y" 17 | echo " Enter custom DNS server if you want to set it in base image" 18 | echo " 10.x.xxx.xx" 19 | exit 0 20 | fi 21 | echo "Enter the Operating System version. [6,7,8]" 22 | read os_version 23 | echo "Enter the url of the Operating System." 24 | read os_url 25 | echo "Enter the base image name.(Ex: rhel68 or rhel73)" 26 | read base_image 27 | echo "Enter the password for the base image." 28 | read pass 29 | echo "Enter the Authorized keys url for the base image. (Hosted authorized_keys file with jenkins key)" 30 | read auth_url 31 | echo "Do you want to disable IPv6 in base image? (Y/n)" 32 | read disable_ipv6 33 | echo "Enter custom DNS server if you want to set it in base image" 34 | read dns_server 35 | 36 | if [[ $os_version =~ (6|7|8) ]] ; then 37 | cp ks_rhel${os_version}_template /root/base-image.ks 38 | else 39 | echo "OS Version can only be 6, 7 or 8" 40 | exit 41 | fi 42 | 43 | if [[ $base_image == *"beta"* ]] ; then 44 | sed -i "s/enabled=0/enabled=1/g" /root/base-image.ks 45 | fi 46 | 47 | if [[ $disable_ipv6 =~ ^(n|N|no|No)$ ]] ; then 48 | sed -i "/disable_ipv6/d" /root/base-image.ks 49 | fi 50 | 51 | # | is used as $os_url also could contain '/'. 52 | sed -i "s|OS_URL|$os_url|g" /root/base-image.ks 53 | sed -i "s|AS_URL|${os_url/BaseOS/AppStream}|g" /root/base-image.ks 54 | PASS=`openssl passwd -1 -salt xyz $pass` 55 | sed -i "s|ENCRYPT_PASS|'$PASS'|g" /root/base-image.ks 56 | sed -i "s|AUTH_KEYS_URL|$auth_url|g" /root/base-image.ks 57 | if [[ -n $dns_server ]] ; then 58 | sed -i "s|NAMESERVER|$dns_server|g" /root/base-image.ks 59 | else 60 | sed -i "/NAMESERVER/d" /root/base-image.ks 61 | fi 62 | 63 | virt-install --connect=qemu:///system \ 64 | --network=bridge:br0 \ 65 | --initrd-inject=/root/base-image.ks \ 66 | --extra-args="ks=file:/base-image.ks console=tty0 console=ttyS0,115200" \ 67 | --name=${base_image}-base \ 68 | --disk path=/var/lib/libvirt/images/${base_image}-base.img,size=200,device=disk,bus=virtio,format=raw,sparse=true \ 69 | --ram 4096 \ 70 | --vcpus=2 \ 71 | --check-cpu \ 72 | --accelerate \ 73 | --hvm \ 74 | --location=$os_url \ 75 | --cpu host \ 76 | --graphics vnc,listen=0.0.0.0 \ 77 | --clock offset=localtime \ 78 | --force 79 | 80 | # The argument `--cpu host` enables nested virtualization and is required to setup sat6 vms with provisioning support. 81 | -------------------------------------------------------------------------------- /misc/base_image_creation/ks_rhel6_template: -------------------------------------------------------------------------------- 1 | # Kickstart file automatically generated by anaconda. 2 | #version=DEVEL 3 | install 4 | url --url=OS_URL 5 | lang en_US.UTF-8 6 | keyboard us 7 | network --onboot yes --device eth0 --bootproto dhcp --noipv6 8 | rootpw --iscrypted ENCRYPT_PASS 9 | firewall --service=ssh 10 | authconfig --enableshadow --passalgo=sha512 11 | selinux --enforcing 12 | timezone America/New_York 13 | bootloader --location=mbr --driveorder=vda --append="crashkernel=auto rhgb quiet" 14 | # The following is the partition information you requested 15 | # Note that any partitions you deleted are not expressed 16 | # here so unless you clear all partitions first, this is 17 | # not guaranteed to work 18 | zerombr 19 | clearpart --all 20 | 21 | # No need to resize after the Os Install, Whatever be the disk size everything resides on / 22 | part /boot --fstype=ext4 --size=500 23 | part pv.253002 --grow --size=1 24 | volgroup vg_dhcp201 --pesize=4096 pv.253002 25 | logvol swap --name=lv_swap --vgname=vg_dhcp201 --grow --size=5984 --maxsize=5984 26 | logvol / --fstype=ext4 --name=lv_root --vgname=vg_dhcp201 --grow --size=1024 27 | 28 | 29 | repo --name="Red Hat Enterprise Linux" --baseurl=OS_URL --cost=100 30 | 31 | %packages 32 | @base 33 | @core 34 | wget 35 | yum-utils 36 | avahi-tools 37 | avahi 38 | qemu-guest-agent 39 | %end 40 | 41 | %post --logfile /var/log/baseimage-postinstall.log 42 | 43 | ssh-keygen -t rsa -N "" -f /root/.ssh/id_dsa 44 | curl -sS -o /root/.ssh/authorized_keys AUTH_KEYS_URL 45 | restorecon -rv /root/.ssh/authorized_keys 46 | sysctl -w net.ipv6.conf.all.disable_ipv6=1 >> /etc/sysctl.conf 47 | sed -Ei 's/^(search.*)/\1\nnameserver NAMESERVER/' /etc/resolv.conf 48 | echo 'UseDNS no' >> /etc/ssh/sshd_config 49 | 50 | cat < /etc/yum.repos.d/rhel6.repo 51 | [rhel6] 52 | name=rhel6 53 | baseurl=OS_URL 54 | enabled=0 55 | gpgcheck=0 56 | EOL 57 | 58 | chmod +x /etc/rc.local 59 | cat << EOF >> /etc/rc.local 60 | ( 61 | [ -f /root/setup_configured ] && echo exiting && exit 62 | iptables -I INPUT -d 224.0.0.251/32 -p udp -m udp --dport 5353 -m conntrack --ctstate NEW -j ACCEPT 63 | service iptables save 64 | ntpdate clock.redhat.com 65 | rm -f /etc/udev/rules.d/70-persistent-net.rules 66 | touch /root/setup_configured 67 | ) 2>&1 | tee -a /var/log/baseimage-firstboot.log 68 | EOF 69 | 70 | %end 71 | 72 | reboot 73 | -------------------------------------------------------------------------------- /misc/base_image_creation/ks_rhel7_template: -------------------------------------------------------------------------------- 1 | #version=RHEL7 2 | # System authorization information 3 | auth --enableshadow --passalgo=sha512 4 | # Use network installation 5 | url --url=OS_URL 6 | # Run the Setup Agent on first boot 7 | firstboot --enable 8 | ignoredisk --only-use=vda 9 | # Keyboard layouts 10 | keyboard --vckeymap=us --xlayouts='us' 11 | # System language 12 | lang en_US.UTF-8 13 | # Network information 14 | network --bootproto=dhcp --device=eth0 --ipv6=auto --activate 15 | # Root password 16 | rootpw --iscrypted ENCRYPT_PASS 17 | # System timezone 18 | timezone --utc America/New_York 19 | # System bootloader configuration 20 | bootloader --location=mbr --boot-drive=vda 21 | #autopart --type=lvm 22 | # Partition clearing information 23 | clearpart --all --initlabel --drives=vda 24 | # No need to resize after the Os Install, Whatever be the disk size everything resides on / 25 | part /boot --fstype=ext4 --size=500 26 | part pv.253002 --grow --size=1 27 | volgroup vg_dhcp201 --pesize=4096 pv.253002 28 | logvol swap --name=lv_swap --vgname=vg_dhcp201 --grow --size=5984 --maxsize=5984 29 | logvol / --fstype=ext4 --name=lv_root --vgname=vg_dhcp201 --grow --size=1024 30 | 31 | repo --name="Red Hat Enterprise Linux" --baseurl=OS_URL --cost=100 32 | 33 | %packages 34 | @core 35 | @base 36 | wget 37 | yum-utils 38 | avahi 39 | qemu-guest-agent 40 | %end 41 | 42 | %post --logfile /var/log/baseimage-postinstall.log 43 | 44 | ssh-keygen -t rsa -N "" -f /root/.ssh/id_dsa 45 | curl -sS -o /root/.ssh/authorized_keys AUTH_KEYS_URL 46 | sysctl -w net.ipv6.conf.all.disable_ipv6=1 >> /etc/sysctl.conf 47 | restorecon /etc/resolv.conf 48 | sed -Ei 's/^(search.*)/\1\nnameserver NAMESERVER/' /etc/resolv.conf && chattr +i /etc/resolv.conf 49 | echo 'UseDNS no' >> /etc/ssh/sshd_config 50 | 51 | cat < /etc/yum.repos.d/rhel7.repo 52 | [rhel7] 53 | name=rhel7 54 | baseurl=OS_URL 55 | enabled=0 56 | gpgcheck=0 57 | EOL 58 | 59 | chmod +x /etc/rc.local 60 | cat << EOF >> /etc/rc.local 61 | ( 62 | [ -f /root/setup_configured ] && echo exiting && exit 63 | firewall-cmd --add-service mdns 64 | firewall-cmd --runtime-to-permanent 65 | systemctl stop chronyd 66 | ntpdate clock.redhat.com 67 | systemctl start chronyd 68 | touch /root/setup_configured 69 | ) 2>&1 | tee -a /var/log/baseimage-firstboot.log 70 | EOF 71 | 72 | %end 73 | 74 | reboot 75 | -------------------------------------------------------------------------------- /misc/base_image_creation/ks_rhel8_template: -------------------------------------------------------------------------------- 1 | #version=RHEL8 2 | # System authorization information 3 | auth --enableshadow --passalgo=sha512 4 | # Use network installation 5 | url --url=OS_URL 6 | # Run the Setup Agent on first boot 7 | firstboot --enable 8 | ignoredisk --only-use=vda 9 | # Keyboard layouts 10 | keyboard --vckeymap=us --xlayouts='us' 11 | # System language 12 | lang en_US.UTF-8 13 | # Network information 14 | network --bootproto=dhcp --device=ens3 --ipv6=auto --activate 15 | # Root password 16 | rootpw --iscrypted ENCRYPT_PASS 17 | # System timezone 18 | timezone --utc America/New_York 19 | # System bootloader configuration 20 | bootloader --location=mbr --boot-drive=vda 21 | #autopart --type=lvm 22 | # Partition clearing information 23 | clearpart --all --initlabel --drives=vda 24 | # No need to resize after the Os Install, Whatever be the disk size everything resides on / 25 | part /boot --fstype=ext4 --size=500 26 | part pv.253002 --grow --size=1 27 | volgroup vg_dhcp201 --pesize=4096 pv.253002 28 | logvol swap --name=lv_swap --vgname=vg_dhcp201 --grow --size=5984 --maxsize=5984 29 | logvol / --fstype=ext4 --name=lv_root --vgname=vg_dhcp201 --grow --size=1024 30 | 31 | repo --name="AppStream" --baseurl=AS_URL --cost=100 32 | 33 | %packages 34 | @core 35 | @base 36 | wget 37 | yum-utils 38 | avahi 39 | qemu-guest-agent 40 | kexec-tools 41 | %end 42 | 43 | %post --logfile /var/log/baseimage-postinstall.log 44 | 45 | ssh-keygen -t rsa -N "" -f /root/.ssh/id_dsa 46 | curl -sS -o /root/.ssh/authorized_keys AUTH_KEYS_URL 47 | sysctl -w net.ipv6.conf.all.disable_ipv6=1 >> /etc/sysctl.conf 48 | restorecon /etc/resolv.conf 49 | sed -Ei 's/^(search.*)/\1\nnameserver NAMESERVER/' /etc/resolv.conf && chattr +i /etc/resolv.conf 50 | echo 'UseDNS no' >> /etc/ssh/sshd_config 51 | 52 | cat < /etc/yum.repos.d/rhel8.repo 53 | [rhel8] 54 | name=rhel8 55 | baseurl=OS_URL 56 | enabled=0 57 | gpgcheck=0 58 | EOL 59 | 60 | chmod +x /etc/rc.local 61 | cat << EOF >> /etc/rc.local 62 | ( 63 | [ -f /root/setup_configured ] && echo exiting && exit 64 | firewall-cmd --add-service mdns 65 | firewall-cmd --runtime-to-permanent 66 | systemctl stop chronyd 67 | ntpdate clock.redhat.com 68 | systemctl start chronyd 69 | touch /root/setup_configured 70 | ) 2>&1 | tee -a /var/log/baseimage-firstboot.log 71 | EOF 72 | 73 | %end 74 | 75 | reboot 76 | -------------------------------------------------------------------------------- /misc/cleanup_scripts/clean_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Define a timestamp function 3 | timestamp() { 4 | date +"%Y-%m-%d:%T" 5 | } 6 | echo "$(timestamp): Cleaning UP of Docker containers BEGINS" >> cleanup_docker.log 2>&1 7 | echo "$(timestamp): Stopping all the running docker containers" >> cleanup_docker.log 2>&1 8 | docker ps -a | grep 'days ago' | awk '{print $1}' | xargs --no-run-if-empty docker stop >> cleanup_docker.log 2>&1 9 | echo "$(timestamp): Removing all the docker containers" >> cleanup_docker.log 2>&1 10 | docker ps -a | grep 'days ago' | awk '{print $1}' | xargs --no-run-if-empty docker rm >> cleanup_docker.log 2>&1 11 | echo "$(timestamp): Cleaning UP of Docker containers ENDS" >> cleanup_docker.log 2>&1 12 | -------------------------------------------------------------------------------- /misc/cleanup_scripts/clean_vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Define a timestamp function 3 | timestamp() { 4 | date +"%Y-%m-%d:%T" 5 | } 6 | echo "$(timestamp): Cleaning UP of Virtual Machines BEGINS" >> /var/log/cleanup_vms.log 2>&1 7 | virsh list | grep -ve qe -ve Name | awk '{print $2}' > /root/vm_list.txt 8 | for i in `cat /root/vm_list.txt`; do virsh destroy $i; virsh undefine $i; virsh vol-delete --pool default /opt/robottelo/images/$i.img; done >> /var/log/cleanup_vms.log 2>&1 9 | echo "$(timestamp): Cleaning UP of Virtual Machines END" >> /var/log/cleanup_vms.log 2>&1 10 | -------------------------------------------------------------------------------- /requirements-optional.txt: -------------------------------------------------------------------------------- 1 | pytest==3.0.4 2 | pytest-cov==2.4.0 3 | coverage==4.2 4 | pytest-mock==1.5.0 5 | mock==2.0.0 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # "Now your `pip install -r requirements.txt` will work just as before. It will 2 | # first install the library located at the file path `.` and then move on to 3 | # its abstract dependencies" 4 | # 5 | # See: https://caremad.io/2013/07/setup-vs-requirement/ 6 | 7 | --editable . 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from os import system, environ 4 | try: 5 | from setuptools import setup 6 | except ImportError: 7 | from distutils.core import setup 8 | 9 | with open('README.rst', 'r') as f: 10 | readme = f.read() 11 | 12 | if system('curl --version | grep NSS 2>/dev/null') != 0: 13 | environ['PYCURL_SSL_LIBRARY'] = 'openssl' 14 | system('pip install --compile --install-option="--with-openssl" pycurl') 15 | else: 16 | environ['PYCURL_SSL_LIBRARY'] = 'nss' 17 | system('pip install --compile --install-option="--with-nss" pycurl') 18 | 19 | setup( 20 | name='automation_tools', 21 | version='0.1.0', 22 | description='Tools to help automating testing Foreman with Robottelo.', 23 | long_description=readme, 24 | author=u'Elyézer Rezende', 25 | author_email='erezende@redhat.com', 26 | url='https://github.com/SatelliteQE/automation-tools', 27 | packages=['automation_tools', 'automation_tools/satellite6'], 28 | package_data={'': ['LICENSE']}, 29 | package_dir={'automation_tools': 'automation_tools'}, 30 | include_package_data=True, 31 | install_requires=[ 32 | 'beautifulsoup4', 33 | 'Fabric<2', 34 | 'lxml', 35 | 'pycurl', 36 | 'pytest', 37 | 'python-bugzilla==1.2.2', 38 | 'requests', 39 | 'robozilla', 40 | 'six', 41 | 'unittest2', 42 | ], 43 | license='GNU GPL v3.0', 44 | classifiers=[ 45 | 'Development Status :: 5 - Production/Stable', 46 | 'Intended Audience :: Developers', 47 | 'Natural Language :: English', 48 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 49 | 'Programming Language :: Python', 50 | 'Programming Language :: Python :: 2.7', 51 | 'Programming Language :: Python :: 3.6', 52 | ], 53 | ) 54 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SatelliteQE/automation-tools/de090390670a0c9cdb5148137a88d0ace00116de/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_log.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import unicode_literals 3 | 4 | import pytest 5 | 6 | from automation_tools.satellite6.log import LogAnalyzer 7 | 8 | 9 | @pytest.fixture(autouse=True) 10 | def execute(mocker): 11 | """Mock fabric's execute function to avoid call through network""" 12 | execute_mock = mocker.patch('automation_tools.satellite6.log.execute') 13 | 14 | def execute_function_argmument(function, host): 15 | function() 16 | return host 17 | 18 | execute_mock.side_effect = execute_function_argmument 19 | return execute_mock 20 | 21 | 22 | def run_mock_helper(mocker, return_value): 23 | """Helper to mock run""" 24 | run_mock = mocker.patch('automation_tools.satellite6.log.run') 25 | run_mock.return_value = return_value 26 | return run_mock 27 | 28 | 29 | @pytest.fixture 30 | def run_50(mocker): 31 | """Mock fabric's run function to avoid call through network. return 32 | string 50 all the time run is executed""" 33 | return run_mock_helper(mocker, '50') 34 | 35 | 36 | @pytest.fixture 37 | def run_with_error(mocker): 38 | """Mock fabric's run function to avoid call through network. return 39 | string with file not available""" 40 | return run_mock_helper( 41 | mocker, 42 | '/bin/bash: /var/log/foreman-installer/satellite.log: No such file ' 43 | 'or directory') 44 | 45 | 46 | def test_log_analyzer_enter(run_50): 47 | """Check with __enter__ calls fabric functions to get log files state""" 48 | analyzer = LogAnalyzer('root@foo.bar') 49 | analyzer.__enter__() 50 | expected_state = { 51 | '/var/log/foreman-installer/satellite.log': 50, 52 | '/var/log/foreman-installer/capsule.log': 50, 53 | '/var/log/satellite-installer/satellite-installer.log': 50, 54 | '/var/log/capsule-installer/capsule-installer.log': 50, 55 | '/var/log/foreman/production.log': 50, 56 | '/var/log/foreman-proxy/proxy.log': 50, 57 | '/var/log/candlepin/candlepin.log': 50, 58 | '/var/log/messages': 50, 59 | '/var/log/mongodb/mongodb.log': 50, 60 | '/var/log/tomcat/catalina.out': 50 61 | } 62 | assert analyzer.log_state == expected_state 63 | assert run_50.call_count == len(expected_state) 64 | for log_file in expected_state: 65 | run_50.assert_any_call('wc -l < %s' % log_file, quiet=True) 66 | 67 | # Assertiing calling enter again will calculate delta 68 | run_50.return_value = 55 69 | 70 | analyzer._update_log_files_state() # noqa 71 | 72 | for lines_appended in analyzer.log_state.values(): 73 | assert 5 == lines_appended # result of 55 - 50 74 | 75 | 76 | def test_log_analyzer_exit(mocker): 77 | """Check exit get lines appended on log files""" 78 | analyzer = LogAnalyzer('root@foo.bar') 79 | 80 | # Mocking 81 | analyzer._update_log_files_state = mocker.Mock() # noqa 82 | 83 | # Defining log state with files with and without lines appended 84 | log_with_lines_appended = { 85 | '/var/log/candlepin/candlepin.log': 1, 86 | '/var/log/messages': 2, 87 | '/var/log/mongodb/mongodb.log': 3, 88 | '/var/log/tomcat/catalina.out': 4 89 | } 90 | 91 | log_without_lines_appended = { 92 | '/var/log/foreman-installer/satellite.log': 0, 93 | '/var/log/foreman/production.log': -1, 94 | '/var/log/foreman-proxy/proxy.log': 0, 95 | } 96 | 97 | analyzer.log_state.update(log_with_lines_appended) 98 | analyzer.log_state.update(log_without_lines_appended) 99 | 100 | # Defining context which will be returned for files with lines appended 101 | log_files_content = { 102 | '/var/log/candlepin/candlepin.log': 'foo', 103 | '/var/log/messages': 'bar', 104 | '/var/log/mongodb/mongodb.log': 'baz', 105 | '/var/log/tomcat/catalina.out': 'blah' 106 | } 107 | 108 | def tail_side_effect(tail_cmd, quiet): 109 | assert quiet 110 | for log_file, content in log_files_content.items(): 111 | if tail_cmd.endswith(log_file): 112 | return content 113 | 114 | run_mock = mocker.patch('automation_tools.satellite6.log.run') 115 | run_mock.side_effect = tail_side_effect 116 | 117 | analyzer.__exit__(None, None, None) 118 | 119 | analyzer._update_log_files_state.assert_called_once_with() # noqa 120 | 121 | assert run_mock.call_count == len(log_with_lines_appended) 122 | 123 | for log_file, lines_appended in log_with_lines_appended.items(): 124 | cmd = ( 125 | 'tail -n {lines} {file} | grep -e "ERROR" ' 126 | '-e "EXCEPTION" ' 127 | '-e "returned 1 instead of one of \\[0\\]" ' 128 | '-e "Could not find the inverse association for repository" ' 129 | '-e "undefined method" ' 130 | '{file}' 131 | ) 132 | run_mock.assert_any_call( 133 | cmd.format(lines=lines_appended, file=log_file), quiet=True) 134 | 135 | 136 | def test_log_analyzer_file_not_available(run_with_error): 137 | # Testing enter 138 | not_zero_state = { 139 | '/var/log/foreman-installer/satellite.log': 50, 140 | '/var/log/foreman/production.log': 50, 141 | '/var/log/foreman-proxy/proxy.log': 50, 142 | '/var/log/candlepin/candlepin.log': 50, 143 | '/var/log/messages': 50, 144 | '/var/log/mongodb/mongodb.log': 50, 145 | '/var/log/tomcat/catalina.out': 50 146 | } 147 | analyzer = LogAnalyzer('root@foo.bar') 148 | analyzer.log_state = dict(not_zero_state.items()) 149 | analyzer.__enter__() 150 | assert run_with_error.call_count == len(not_zero_state) 151 | for line_appended in analyzer.log_state.values(): 152 | assert 0 == line_appended 153 | 154 | # Testing exit 155 | run_with_error.reset_mock() 156 | analyzer.log_state = dict(not_zero_state.items()) 157 | analyzer.__exit__(None, None, None) 158 | assert run_with_error.call_count == len(not_zero_state) 159 | for line_appended in analyzer.log_state.values(): 160 | assert 0 == line_appended 161 | 162 | 163 | @pytest.fixture 164 | def print_mock(mocker): 165 | """Mock _print_wrapper function""" 166 | return mocker.patch('automation_tools.satellite6.log._print_wrapper') 167 | 168 | 169 | @pytest.fixture 170 | def save_log_mock(mocker): 171 | """Mock _print_wrapper function""" 172 | return mocker.patch('automation_tools.satellite6.log._save_full_log') 173 | --------------------------------------------------------------------------------