├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── gluster-ansible-roles.spec
├── meta
└── README.md
├── playbooks
├── README.md
├── cluster-cleanup
│ ├── cleanup_vars.yml
│ └── volume_cleanup.yml
├── gluster-cluster
│ ├── README.md
│ ├── cluster-vars.yml
│ └── gluster_volume.yml
└── hc-ansible-deployment
│ ├── README
│ ├── archive_config.yml
│ ├── archive_config_inventory.yml
│ ├── extra
│ └── 01_remove_engine_db_entry.yml
│ ├── gluster_inventory.yml
│ ├── gluster_network_inventory.yml
│ ├── hc_deployment.yml
│ ├── he_gluster_vars.json
│ ├── luks_tang_inventory.yml
│ ├── node_prep_inventory.yml
│ ├── node_replace_inventory.yml
│ ├── nx3_gluster_inventory.yml
│ ├── reconfigure_he_storage_inventory.yml
│ ├── single_node_gluster_inventory.yml
│ └── tasks
│ ├── add_hosts_storage_domains.yml
│ ├── backup.yml
│ ├── gluster_cleanup.yml
│ ├── gluster_deployment.yml
│ ├── gluster_network_setup.yml
│ ├── he_deployment.yml
│ ├── luks_device_cleanup.yml
│ ├── luks_tang_setup.yml
│ ├── reconfigure_he_storage.yml
│ ├── replace_node.yml
│ └── restore.yml
└── tests
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | \#*#
3 | *.pyc
4 | *.pyo
5 | .*.swp
6 | .vagrant
7 | .tags*
8 | *.retry
9 | *.egg-info
10 | .eggs
11 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Thank you for considering to contribute to Gluster Ansible. This document explains how the repository is organized and how to submit contributions.
4 |
5 | **Table of Contents**
6 |
7 | - [Introduction](#introduction)
8 | - [Submitting contributions](#submitting-contributions)
9 |
10 | ## Introduction
11 |
12 | We have the following categories into one of which your Ansible role might fit into:
13 |
14 | * [gluster.infra](https://github.com/gluster/gluster-ansible-infra)
15 | * [gluster.cluster](https://github.com/gluster/gluster-ansible-cluster)
16 | * [gluster.features](https://github.com/gluster/gluster-ansible-features)
17 | * [gluster.repositories](https://github.com/gluster/gluster-ansible-repositories)
18 | * [gluster.maintenance](https://github.com/gluster/gluster-ansible-maintenance)
19 |
20 | If your proposed contribution does not align with any of the above categories feel free to send a mail to gluster-devel@ we will be happy to discuss.
21 |
22 | ## Submitting contributions
23 |
24 | 1. Fork the repo and make changes in your local repo.
25 | 2. Make changes and commit. You may want to review your changes and run tests:
26 | 1. Ensure that you write a README.md for your role explaining the variables supported in the role. An example role can be found [here](https://github.com/gluster/gluster-ansible-infra)
27 | 2. Test the role and provide at least one playbook which uses the role.
28 | 3. [Open a Pull Request](https://help.github.com/articles/creating-a-pull-request/). Give it a meaningful title explaining the changes you are proposing, and then add further details in the description.
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Gluster Ansible Roles
2 |
3 | gluster-ansible project provides [Ansible](https://www.ansible.com/) roles to deploy, configure, and maintain GlusterFS clusters.
4 |
5 | The goal of gluster-ansible is to develop roles which will enable the user to:
6 |
7 | * subscribe to repositories which provides GlusterFS and related packages and install the packages.
8 | * create a GlusterFS cluster (Replicate, Distributed-Replicate, Arbiter, etc).
9 | * configure GlusterFS to enable features like NFS-Ganesha, CTDB, Geo-Replication etc.
10 | * upgrade/downgrade the cluster
11 | * expand/shrink the cluster
12 |
13 | The roles are classified into following categories, which will have sub-roles (if necessary) for specific task, which will be explained in detail in their respective repositories.
14 |
15 | * [gluster.infra](https://github.com/gluster/gluster-ansible-infra) - helps the user to get started in deploying GlusterFS filesystem
16 | * [gluster.cluster](https://github.com/gluster/gluster-ansible-cluster) - helps the user to set up a GlusterFS cluster, manage gluster volumes and peer operations.
17 | * [gluster.features](https://github.com/gluster/gluster-ansible-features) - implements GlusterFS usecases: nfs_ganesha, gluster_hc, ctdb, geo_replication.
18 | * [gluster.repositories](https://github.com/gluster/gluster-ansible-repositories) - helps user to register to RHSM and subscribe to repositories
19 | * [gluster.maintenance](https://github.com/gluster/gluster-ansible-maintenance) - helps user to replace nodes and other maintenance activities.
20 |
21 | To contribute to the project, refer [Contributing](CONTRIBUTING.md).
22 |
23 |
--------------------------------------------------------------------------------
/gluster-ansible-roles.spec:
--------------------------------------------------------------------------------
1 | %global rolesdir %{_sysconfdir}/ansible/roles/gluster.ansible
2 | %global docdir %{_datadir}/doc/gluster.ansible
3 | %global buildnum 27
4 |
5 | Name: gluster-ansible-roles
6 | Version: 1.0.5
7 | Release: %{buildnum}%{?dist}
8 | Summary: Ansible roles for GlusterFS deployment and management
9 |
10 | URL: https://github.com/gluster/gluster-ansible
11 | Source0: %{url}/archive/v%{version}-%{buildnum}.tar.gz#/%{name}-%{version}-%{buildnum}.tar.gz
12 | License: GPLv3
13 | BuildArch: noarch
14 |
15 | Requires: ansible-core >= 2.12
16 | Requires: gluster-ansible-infra >= 1.0.4
17 | Requires: gluster-ansible-features >= 1.0.5
18 | Requires: gluster-ansible-cluster >= 1.0
19 | Requires: gluster-ansible-repositories >= 1.0.1
20 | Requires: gluster-ansible-maintenance >= 1.0.1
21 |
22 | %description
23 | Collection of Ansible roles for the deploying and managing GlusterFS clusters.
24 |
25 | %prep
26 | %autosetup -p1 -n gluster-ansible-%{version}-%{buildnum}
27 |
28 | %build
29 |
30 | %install
31 | mkdir -p %{buildroot}/%{rolesdir}
32 | cp -a playbooks/ %{buildroot}/%{rolesdir}
33 |
34 | mkdir -p %{buildroot}/%{docdir}
35 | install -p -m 644 README.md LICENSE %{buildroot}/%{docdir}
36 |
37 |
38 | %files
39 | %{rolesdir}
40 | %doc %{docdir}
41 |
42 | %license LICENSE
43 |
44 | %changelog
45 | * Thu May 05 2022 Sandro Bonazzola - 1.0.5-27
46 | - Rebase on v1.0.5-27
47 |
48 | * Fri Apr 01 2022 Sandro Bonazzola - 1.0.5-26
49 | - Rebase on v1.0.5-26
50 |
51 | * Mon Mar 4 2019 Sachidananda Urs 1.0.4-5
52 | - Rename the package
53 |
54 | * Fri Feb 22 2019 Sachidananda Urs 1.0.0-2
55 | - Update example playbooks to clean up failed deployments
56 |
57 | * Thu Feb 21 2019 Sachidananda Urs 1.0.0-1
58 | - Bump the version number, stable enought to call 1.0.0
59 |
60 | * Tue Oct 23 2018 Sachidananda Urs 0.5
61 | - Address the security concerns regarding plaintext passwords
62 |
63 | * Fri Oct 12 2018 Sachidnanda Urs 0.4
64 | - Enhancements to examples
65 |
66 | * Mon Sep 24 2018 Sachidananda Urs 0.3
67 | - Added playbooks to illustrate end-to-end deployment
68 |
69 | * Fri Aug 31 2018 Sachidananda Urs 0.2
70 | - Added gluster-maintenance. Bug fixes across the roles
71 |
72 | * Tue Apr 24 2018 Sachidananda Urs 0.1
73 | - Initial release.
74 |
--------------------------------------------------------------------------------
/meta/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gluster/gluster-ansible/21aa88d0a57487302448e776e7f54616df7a558d/meta/README.md
--------------------------------------------------------------------------------
/playbooks/README.md:
--------------------------------------------------------------------------------
1 | # Playbooks for setting up GlusterFS and related usecases
2 |
3 | This directory will contain playbooks and related documentation for setting up GlusterFS usecases.
4 |
--------------------------------------------------------------------------------
/playbooks/cluster-cleanup/cleanup_vars.yml:
--------------------------------------------------------------------------------
1 | gluster_volumes: testvol
2 | gluster_infra_reset_mnt_paths:
3 | - /mnt/brick0
4 | - /mnt/brick1
5 | gluster_infra_reset_volume_groups:
6 | - vg_vdb
7 | - vg_vdc
8 | gluster_infra_reset_vdos:
9 | - foo_1
10 | - foo_2
11 |
--------------------------------------------------------------------------------
/playbooks/cluster-cleanup/volume_cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Reset the bricks
3 | remote_user: root
4 | gather_facts: no
5 | hosts: all
6 | vars_files:
7 | - cleanup_vars.yml
8 |
9 | pre_tasks:
10 | - name: Stop Gluster volume(s)
11 | command: "gluster volume stop {{ item }} force"
12 | #gluster_volume:
13 | # state: absent
14 | # name: "{{ item }}"
15 | with_items: "{{ gluster_volumes }}"
16 | run_once: true
17 |
18 | - name: Delete Gluster volume(s)
19 | command: "gluster volume delete {{ item }}"
20 | #gluster_volume:
21 | # state: absent
22 | # name: "{{ item }}"
23 | with_items: "{{ gluster_volumes }}"
24 | run_once: true
25 |
26 | roles:
27 | - gluster.infra
28 |
--------------------------------------------------------------------------------
/playbooks/gluster-cluster/README.md:
--------------------------------------------------------------------------------
1 | # How to deploy a Gluster cluster end-to-end?
2 |
3 | The cluster-vars.yml file contains variables for setting the backend disks using
4 | LVM thinpool and creating a Gluster volume. The example playbook creates a
5 | replica 2 volume with 4 bricks (2x2), but any volume type can be created.
6 | Please refer: https://github.com/gluster/gluster-ansible-cluster
7 |
8 | ## Usage:
9 | To run the playbook create an inventory file containing the hosts in question.
10 | Edit the inventory file to suit your needs. And run the playbook:
11 |
12 | ```
13 | $ ansible-playbook -i path-to-inventory gluster_volume.yml
14 |
15 | ```
16 |
--------------------------------------------------------------------------------
/playbooks/gluster-cluster/cluster-vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Other supported disktypes are RAID5, RAID10, and JBOD
3 | # https://github.com/gluster/gluster-ansible-infra#backend_setup documents the
4 | # variables.
5 | gluster_infra_disktype: RAID6
6 | gluster_infra_diskcount: 12
7 | gluster_infra_stripe_unit_size: 128
8 |
9 | # The list has one item, if more disks are to be used, append to the list
10 | # See: https://github.com/gluster/gluster-ansible-infra#volume-groups-variable
11 | # In case of JBODS a volume group can be created using more than one disk, by
12 | # providing comma separated list of pvnames, see the module documentation and
13 | # example at:
14 | # https://docs.ansible.com/ansible/2.5/modules/lvg_module.html#examples
15 | gluster_infra_volume_groups:
16 | - vgname: vg_vdb
17 | pvname: /dev/vdb
18 |
19 | # On poolmetadatasize
20 | # Internally, a thin pool contains a separate metadata device that is used to
21 | # track the (dynamically) allocated regions of the thin LVs and snapshots. The
22 | # poolmetadatasize option in the above command refers to the size of the pool
23 | # meta data device.
24 | # The maximum possible size for a metadata LV is 16 GiB. Gluster Storage
25 | # recommends creating the metadata device of the maximum supported size. You can
26 | # allocate less than the maximum if space is a concern, but in this case you
27 | # should allocate a minimum of 0.5% of the pool size.
28 |
29 | gluster_infra_thinpools:
30 | - vgname: 'vg_vdb'
31 | thinpoolname: 'thinpool_vg_vdb'
32 | thinpoolsize: '45G'
33 | poolmetadatasize: '1G'
34 |
35 | gluster_infra_lv_logicalvols:
36 | - vgname: vg_vdb
37 | thinpool: thinpool_vg_vdb
38 | lvname: gluster_lv_disk1
39 | lvsize: 200G
40 |
41 | # Mount the devices
42 | gluster_infra_mount_devices:
43 | - { path: '/gluster_bricks', vgname: vg_vdb, lvname: gluster_lv_disk1 }
44 |
45 |
46 | gluster_infra_fw_ports:
47 | - 2049/tcp
48 | - 54321/tcp
49 | - 5900/tcp
50 | - 5900-6923/tcp
51 | - 5666/tcp
52 | - 16514/tcp
53 | gluster_infra_fw_permanent: true
54 | gluster_infra_fw_state: enabled
55 | gluster_infra_fw_zone: public
56 | gluster_infra_fw_services:
57 | - glusterfs
58 |
59 |
60 | # Variable for creating a GlusterFS volume
61 | # groups['all'] tells to use the hosts mentioned in the inventory files to be
62 | # used in the cluster. If only a subset has to be used change the `all' to the
63 | # name of the section
64 |
65 | gluster_cluster_hosts: "{{ groups['all'] }}"
66 | gluster_cluster_volume: testvol
67 | gluster_cluster_transport: 'tcp'
68 | gluster_cluster_force: 'yes'
69 | # gluster_cluster_bricks variable should contain a comma separated directories
70 | # which will form the bricks for gluster cluster. See
71 | # https://docs.ansible.com/ansible/latest/modules/gluster_volume_module.html the
72 | # gluster_cluster_bricks maps to `bricks' variable in the module
73 | gluster_cluster_bricks: '/gluster_bricks/testvol'
74 | gluster_cluster_replica_count: 2
75 |
--------------------------------------------------------------------------------
/playbooks/gluster-cluster/gluster_volume.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Create a 2x2 gluster volume
3 | - name: Setup backend and create a GlusterFS cluster
4 | hosts: all
5 | remote_user: root
6 | gather_facts: no
7 | vars_files:
8 | - cluster-vars.yml
9 |
10 | roles:
11 | - gluster.infra
12 | - gluster.cluster
13 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/README:
--------------------------------------------------------------------------------
1 | This directory contains playbooks for various purposes
2 |
3 | 1. HC Deployment
4 | ------------------
5 | HC deployment playbook which is using both gluster-ansible role and ovirt-ansible-hosted-engine-setup role.
6 | 1. Inventory file is gluster_inventory.yml has gluster related configurations template.
7 | Enable "VDO config" for VDO and disable Non-VDO part.
8 | 2. The json file he_gluster_vars.json has Hosted-Engine related configurations for gluster.
9 |
10 | Note: To deploy IPV6 based hostedengine, use the following in he_gluster_vars.json file:
11 | "he_mount_options": "backup-volfile-servers=host2-backend-network-FQDN:host3-backend-network-FQDN,xlator-option=transport.address-family=inet6"
12 |
13 | Replace appropriate host name with host1,host2 and host3 in inventory file and json file.
14 | Make sure for proper HC deployment both files ( gluster_inventory.yml and he_gluster_vars.json )
15 | need to be modified with accurate values like host,device,pvs,vgs,lvs,fqdn,passwords etc
16 |
17 | How to run:
18 | # cd hc-ansible-deployment
19 | # ansible-playbook -i gluster_inventory.yml hc_deployment.yml --extra-vars='@he_gluster_vars.json'
20 |
21 | For gluster deployment clean up:
22 | # ansible-playbook -i gluster_inventory.yml tasks/gluster_cleanup.yml
23 |
24 | For single node HC deployment, there is separate inventory called "single_node_gluster_inventory.yml".
25 | How to run:
26 | # cd hc-ansible-deployment
27 | # ansible-playbook -i single_node_gluster_inventory.yml hc_deployment.yml --extra-vars='@he_gluster_vars.json'
28 |
29 | ovirt_repo_release_rpm value should be particular release repo if you are installing role from ansible-galaxy
30 |
31 | -> The below variables are required only when user wants to install pkgs using subscription-manager.
32 | "ovirt_repositories_ovirt_release_rpm": "http://resources.ovirt.org/pub/yum-repo/ovirt-master-release.rpm"
33 | "ovirt_repositories_use_subscription_manager": false,
34 | "ovirt_repositories_ovirt_version": 4.2,
35 | "ovirt_repositories_target_host": "host1",
36 | "ovirt_repositories_rh_username": "Username to use for subscription manager",
37 | "ovirt_repositories_rh_password": "Password to use for subscription manager",
38 | "ovirt_repositories_pool_ids": ["List of pools ids to subscribe to"],
39 | "ovirt_repositories_pools": ["List of subscription pool names"],
40 | "ovirt_repositories_repos_backup_path": "Directory to backup the original repositories configuration",
41 | "ovirt_repositories_force_register": false,
42 | "ovirt_repositories_rhsm_server_hostname": "Hostname of the RHSM server. By default it's used from rhsm configuration"
43 | For more details please refer: https://github.com/oVirt/ovirt-ansible-repositories
44 |
45 | 2. NBDE setup with Clevis/Tang
46 | -------------------------------
47 | 1. Sample inventory file is luks_tang_inventory.yml
48 | 2. Create a backup copy of the template inventory file
49 | 3. Edit the inventory file right set of values
50 | 4. Encrypt the inventory with ansible-vault # ansible-vault encrypt luks_tang_inventory.yml
51 | 5. Run the playbook to configure NBDE with LUKS device and tang server:
52 | # cd hc-ansible-playbook
53 | # ansible-playbook -i luks_tang_inventory.yml tasks/luks_tang_setup.yml --tags luksencrypt,bindtang --ask-vault-pass
54 | 6. If blaclisting the devices is also required, include 'blacklistdevices' along with the tags
55 | # ansible-playbook -i luks_tang_inventory.yml tasks/luks_tang_setup.yml --tags blacklistdevices,luksencrypt,bindtang --ask-vault-pass
56 |
57 | 3. Host replacement
58 | --------------------
59 | Create two inventory:
60 | 1- node_prep_inventory.yml - This inventory contains information for host preparation.
61 | 2- node_replace_inventory.yml - This inventory contains information about node which needs to be replaced and other hosts
62 | Primarily there are two tasks:
63 | 1- Gluster host preparation. This task has the tag "preparehost"
64 | 2- Gluster Peer Membership Restoration. This task has tag "restorepeer"
65 | If both operation need to be performed then run:
66 | # ansible-playbook -i node_prep_inventory.yml -i node_replace_inventory.yml tasks/replace_node.yml
67 | Or using tags indivisual operation can be performed like:
68 | # ansible-playbook -i node_prep_inventory.yml -i node_replace_inventory.yml tasks/replace_node.yml --tags restorepeer
69 | Or
70 | ansible-playbook -i node_prep_inventory.yml -i node_replace_inventory.yml tasks/replace_node.yml --tags preparehost
71 |
72 | 4. Gluster logical network
73 | ---------------------------
74 | 1- Add appropriate values in gluster_network_inventory.yml
75 | 2- Run playbook with inventory like:
76 | # ansible-playbook -i gluster_network_inventory.yml tasks/gluster_network_setup.yml
77 |
78 | 5. Backup of the host configuration files
79 | ------------------------------------------
80 | Backup of the host configuration files can be done concurrently on all the HC nodes.
81 | After running the backup playbook, make sure to copy the archive to safe location.
82 | To backup the configuration files on HC node run the following:
83 | # ansible-playbook -i archive_config_inventory.yml archive_config.yml --tasks backupfiles
84 |
85 | Restoring the configuration should be done on specific node only. So add only one hosts
86 | in the 'hosts' section of inventory. Also make sure to unarchive the backup tar file
87 | previously backed up.
88 | To restore the configuration files on that node, run with tag 'restorefiles'
89 | # ansible-playbook -i archive_config_inventory.yml archive_config.yml --tasks restorefiles
90 |
91 | 6. Reconfigure HostedEngine Storage
92 | ------------------------------------
93 | When replacing the host gluster primary volfile server with the new host, requires the
94 | hostedengine storage configuration to be updated. This tasks is handled with
95 | reconfigure_he_storage.yml playbook. The inventory file is reconfigure_he_storage_inventory.yml
96 |
97 | Run the playbook after populating right set of values to reconfigure_he_storage_inventory.yml
98 | # ansible-playbook -i reconfigure_he_storage_inventory.yml tasks/reconfigure_he_storage.yml
99 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/archive_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: tasks/backup.yml
3 | tags: backupfiles
4 | - import_playbook: tasks/restore.yml
5 | tags: restorefiles
6 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/archive_config_inventory.yml:
--------------------------------------------------------------------------------
1 | all:
2 | hosts:
3 | host1:
4 | host2:
5 | host3:
6 | vars:
7 | backup_dir: /archive
8 | nbde_setup: false
9 | upgrade: true
10 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/extra/01_remove_engine_db_entry.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove engine DB entry from storage_pool_iso_map
3 | command: >-
4 | /usr/share/ovirt-engine/dbscripts/engine-psql.sh -c "delete from storage_pool_iso_map where storage_id in (select id from storage_domain_static where (storage_name like 'engine'))"
5 | ignore_errors: yes
6 |
7 | - name: Remove engine DB entry from storage_server_connections
8 | command: >-
9 | /usr/share/ovirt-engine/dbscripts/engine-psql.sh -c "delete from storage_server_connections where connection like '%engine'"
10 | ignore_errors: yes
11 |
12 | - name: Remove hosted_storage DB entry from table storage_domain_dynamic
13 | command: >-
14 | /usr/share/ovirt-engine/dbscripts/engine-psql.sh -c "delete from storage_domain_dynamic where id in (select id from storage_domain_static where storage_name like 'engine%')"
15 | ignore_errors: yes
16 |
17 | - name: Remove hosted_storage DB entry from table storage_domain_static
18 | command: >-
19 | /usr/share/ovirt-engine/dbscripts/engine-psql.sh -c "delete from storage_domain_static where storage_name like 'engine%'"
20 | ignore_errors: yes
21 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/gluster_inventory.yml:
--------------------------------------------------------------------------------
1 | hc_nodes:
2 | hosts:
3 | # Host1
4 | :
5 |
6 | # Blacklist multipath devices which are used for gluster bricks
7 | # If you omit blacklist_mpath_devices it means all device will be whitelisted.
8 | # If the disks are not blacklisted, and then its taken that multipath configuration
9 | # exists in the server and one should provide /dev/mapper/ instead of /dev/sdx
10 | blacklist_mpath_devices:
11 | - sdb
12 | - sdc
13 |
14 | # Enable this section 'gluster_infra_vdo', if dedupe & compression is
15 | # required on that storage volume.
16 | # The variables refers to:
17 | # name - VDO volume name to be used
18 | # device - Disk name on which VDO volume to created
19 | # logicalsize - Logical size of the VDO volume.This value is 10 times
20 | # the size of the physical disk
21 | # emulate512 - VDO device is made as 4KB block sized storage volume(4KN)
22 | # slabsize - VDO slab size. If VDO logical size >= 1000G then
23 | # slabsize is 32G else slabsize is 2G
24 | #
25 | # Following VDO values are as per recommendation and treated as constants:
26 | # blockmapcachesize - 128M
27 | # writepolicy - auto
28 | #
29 | # gluster_infra_vdo:
30 | # - { name: 'vdo_sdc', device: '/dev/sdc', logicalsize: '5000G', emulate512: 'off', slabsize: '32G',
31 | # blockmapcachesize: '128M', writepolicy: 'auto' }
32 | # - { name: 'vdo_sdd', device: '/dev/sdd', logicalsize: '3000G', emulate512: 'off', slabsize: '32G',
33 | # blockmapcachesize: '128M', writepolicy: 'auto' }
34 |
35 | # When dedupe and compression is enabled on the device,
36 | # use pvname for that device as '/dev/mapper/
37 | #
38 | # The variables refers to:
39 | # vgname - VG to be created on the disk
40 | # pvname - Physical disk (/dev/sdc) or VDO volume (/dev/mapper/vdo_sdc)
41 | gluster_infra_volume_groups:
42 | - vgname: gluster_vg_sdb
43 | pvname: /dev/sdb
44 | - vgname: gluster_vg_sdc
45 | pvname: /dev/mapper/vdo_sdc
46 | - vgname: gluster_vg_sdd
47 | pvname: /dev/mapper/vdo_sdd
48 |
49 | gluster_infra_mount_devices:
50 | - path: /gluster_bricks/engine
51 | lvname: gluster_lv_engine
52 | vgname: gluster_vg_sdb
53 | - path: /gluster_bricks/data
54 | lvname: gluster_lv_data
55 | vgname: gluster_vg_sdc
56 | - path: /gluster_bricks/vmstore
57 | lvname: gluster_lv_vmstore
58 | vgname: gluster_vg_sdd
59 |
60 | # 'thinpoolsize' is the sum of sizes of all LVs to be created on that VG
61 | # In the case of VDO enabled, 'thinpoolsize' is 10 times the sum of sizes
62 | # of all LVs to be created on that VG. Recommended values for
63 | # 'poolmetadatasize' is 16GB and that should be considered exclusive of
64 | # 'thinpoolsize'
65 | gluster_infra_thinpools:
66 | - {vgname: 'gluster_vg_sdc', thinpoolname: 'gluster_thinpool_sdc', thinpoolsize: '500G', poolmetadatasize: '16G'}
67 | - {vgname: 'gluster_vg_sdd', thinpoolname: 'gluster_thinpool_sdd', thinpoolsize: '500G', poolmetadatasize: '16G'}
68 |
69 | # Enable the following section if LVM cache is to enabled
70 | # Following are the variables:
71 | # vgname - VG with the slow HDD device that needs caching
72 | # cachedisk - Comma separate value of slow HDD and fast SSD
73 | # In this example, /dev/sdb is the slow HDD, /dev/sde is fast SSD
74 | # cachelvname - LV cache name
75 | # cachethinpoolname - Thinpool to which the fast SSD to be attached
76 | # cachelvsize - Size of cache data LV. This is the SSD_size - (1/1000) of SSD_size
77 | # 1/1000th of SSD space will be used by cache LV meta
78 | # cachemode - writethrough or writeback
79 | # gluster_infra_cache_vars:
80 | # - vgname: gluster_vg_sdb
81 | # cachedisk: /dev/sdb,/dev/sde
82 | # cachelvname: cachelv_thinpool_sdb
83 | # cachethinpoolname: gluster_thinpool_sdb
84 | # cachelvsize: '250G'
85 | # cachemode: writethrough
86 |
87 | # Only the engine brick needs to be thickly provisioned
88 | # Engine brick requires 100GB of disk space
89 | gluster_infra_thick_lvs:
90 | - vgname: gluster_vg_sdb
91 | lvname: gluster_lv_engine
92 | size: 100G
93 |
94 | gluster_infra_lv_logicalvols:
95 | - vgname: gluster_vg_sdc
96 | thinpool: gluster_thinpool_sdc
97 | lvname: gluster_lv_data
98 | lvsize: 200G
99 | - vgname: gluster_vg_sdd
100 | thinpool: gluster_thinpool_sdd
101 | lvname: gluster_lv_vmstore
102 | lvsize: 200G
103 |
104 | #Host2
105 | :
106 |
107 | # Blacklist multipath devices which are used for gluster bricks
108 | # If you omit blacklist_mpath_devices it means all device will be whitelisted.
109 | # If the disks are not blacklisted, and then its taken that multipath configuration
110 | # exists in the server and one should provide /dev/mapper/ instead of /dev/sdx
111 | blacklist_mpath_devices:
112 | - sdb
113 | - sdc
114 |
115 | # Enable this section 'gluster_infra_vdo', if dedupe & compression is
116 | # required on that storage volume.
117 | # The variables refers to:
118 | # name - VDO volume name to be used
119 | # device - Disk name on which VDO volume to created
120 | # logicalsize - Logical size of the VDO volume.This value is 10 times
121 | # the size of the physical disk
122 | # emulate512 - VDO device is made as 4KB block sized storage volume(4KN)
123 | # slabsize - VDO slab size. If VDO logical size >= 1000G then
124 | # slabsize is 32G else slabsize is 2G
125 | #
126 | # Following VDO values are as per recommendation and treated as constants:
127 | # blockmapcachesize - 128M
128 | # writepolicy - auto
129 | #
130 | # gluster_infra_vdo:
131 | # - { name: 'vdo_sdc', device: '/dev/sdc', logicalsize: '5000G', emulate512: 'off', slabsize: '32G',
132 | # blockmapcachesize: '128M', writepolicy: 'auto' }
133 | # - { name: 'vdo_sdd', device: '/dev/sdd', logicalsize: '3000G', emulate512: 'off', slabsize: '32G',
134 | # blockmapcachesize: '128M', writepolicy: 'auto' }
135 |
136 | # When dedupe and compression is enabled on the device,
137 | # use pvname for that device as '/dev/mapper/
138 | #
139 | # The variables refers to:
140 | # vgname - VG to be created on the disk
141 | # pvname - Physical disk (/dev/sdc) or VDO volume (/dev/mapper/vdo_sdc)
142 | gluster_infra_volume_groups:
143 | - vgname: gluster_vg_sdb
144 | pvname: /dev/sdb
145 | - vgname: gluster_vg_sdc
146 | pvname: /dev/mapper/vdo_sdc
147 | - vgname: gluster_vg_sdd
148 | pvname: /dev/mapper/vdo_sdd
149 |
150 | gluster_infra_mount_devices:
151 | - path: /gluster_bricks/engine
152 | lvname: gluster_lv_engine
153 | vgname: gluster_vg_sdb
154 | - path: /gluster_bricks/data
155 | lvname: gluster_lv_data
156 | vgname: gluster_vg_sdc
157 | - path: /gluster_bricks/vmstore
158 | lvname: gluster_lv_vmstore
159 | vgname: gluster_vg_sdd
160 |
161 | # 'thinpoolsize' is the sum of sizes of all LVs to be created on that VG
162 | # In the case of VDO enabled, 'thinpoolsize' is 10 times the sum of sizes
163 | # of all LVs to be created on that VG. Recommended values for
164 | # 'poolmetadatasize' is 16GB and that should be considered exclusive of
165 | # 'thinpoolsize'
166 | gluster_infra_thinpools:
167 | - {vgname: 'gluster_vg_sdc', thinpoolname: 'gluster_thinpool_sdc', thinpoolsize: '500G', poolmetadatasize: '16G'}
168 | - {vgname: 'gluster_vg_sdd', thinpoolname: 'gluster_thinpool_sdd', thinpoolsize: '500G', poolmetadatasize: '16G'}
169 |
170 | # Enable the following section if LVM cache is to enabled
171 | # Following are the variables:
172 | # vgname - VG with the slow HDD device that needs caching
173 | # cachedisk - Comma separate value of slow HDD and fast SSD
174 | # In this example, /dev/sdb is the slow HDD, /dev/sde is fast SSD
175 | # cachelvname - LV cache name
176 | # cachethinpoolname - Thinpool to which the fast SSD to be attached
177 | # cachelvsize - Size of cache data LV. This is the SSD_size - (1/1000) of SSD_size
178 | # 1/1000th of SSD space will be used by cache LV meta
179 | # cachemode - writethrough or writeback
180 | # gluster_infra_cache_vars:
181 | # - vgname: gluster_vg_sdb
182 | # cachedisk: /dev/sdb,/dev/sde
183 | # cachelvname: cachelv_thinpool_sdb
184 | # cachethinpoolname: gluster_thinpool_sdb
185 | # cachelvsize: '250G'
186 | # cachemode: writethrough
187 |
188 | # Only the engine brick needs to be thickly provisioned
189 | # Engine brick requires 100GB of disk space
190 | gluster_infra_thick_lvs:
191 | - vgname: gluster_vg_sdb
192 | lvname: gluster_lv_engine
193 | size: 100G
194 |
195 | gluster_infra_lv_logicalvols:
196 | - vgname: gluster_vg_sdc
197 | thinpool: gluster_thinpool_sdc
198 | lvname: gluster_lv_data
199 | lvsize: 200G
200 | - vgname: gluster_vg_sdd
201 | thinpool: gluster_thinpool_sdd
202 | lvname: gluster_lv_vmstore
203 | lvsize: 200G
204 |
205 | #Host3
206 | :
207 |
208 | # Blacklist multipath devices which are used for gluster bricks
209 | # If you omit blacklist_mpath_devices it means all device will be whitelisted.
210 | # If the disks are not blacklisted, and then its taken that multipath configuration
211 | # exists in the server and one should provide /dev/mapper/ instead of /dev/sdx
212 | blacklist_mpath_devices:
213 | - sdb
214 | - sdd
215 |
216 | # Enable this section 'gluster_infra_vdo', if dedupe & compression is
217 | # required on that storage volume.
218 | # The variables refers to:
219 | # name - VDO volume name to be used
220 | # device - Disk name on which VDO volume to created
221 | # logicalsize - Logical size of the VDO volume.This value is 10 times
222 | # the size of the physical disk
223 | # emulate512 - VDO device is made as 4KB block sized storage volume(4KN)
224 | # slabsize - VDO slab size. If VDO logical size >= 1000G then
225 | # slabsize is 32G else slabsize is 2G
226 | #
227 | # Following VDO values are as per recommendation and treated as constants:
228 | # blockmapcachesize - 128M
229 | # writepolicy - auto
230 | #
231 | # gluster_infra_vdo:
232 | # - { name: 'vdo_sdc', device: '/dev/sdc', logicalsize: '5000G', emulate512: 'off', slabsize: '32G',
233 | # blockmapcachesize: '128M', writepolicy: 'auto' }
234 | # - { name: 'vdo_sdd', device: '/dev/sdd', logicalsize: '3000G', emulate512: 'off', slabsize: '32G',
235 | # blockmapcachesize: '128M', writepolicy: 'auto' }
236 |
237 | # When dedupe and compression is enabled on the device,
238 | # use pvname for that device as '/dev/mapper/
239 | #
240 | # The variables refers to:
241 | # vgname - VG to be created on the disk
242 | # pvname - Physical disk (/dev/sdc) or VDO volume (/dev/mapper/vdo_sdc)
243 | gluster_infra_volume_groups:
244 | - vgname: gluster_vg_sdb
245 | pvname: /dev/sdb
246 | - vgname: gluster_vg_sdc
247 | pvname: /dev/mapper/vdo_sdc
248 | - vgname: gluster_vg_sdd
249 | pvname: /dev/mapper/vdo_sdd
250 |
251 | gluster_infra_mount_devices:
252 | - path: /gluster_bricks/engine
253 | lvname: gluster_lv_engine
254 | vgname: gluster_vg_sdb
255 | - path: /gluster_bricks/data
256 | lvname: gluster_lv_data
257 | vgname: gluster_vg_sdc
258 | - path: /gluster_bricks/vmstore
259 | lvname: gluster_lv_vmstore
260 | vgname: gluster_vg_sdd
261 |
262 | # 'thinpoolsize' is the sum of sizes of all LVs to be created on that VG
263 | # In the case of VDO enabled, 'thinpoolsize' is 10 times the sum of sizes
264 | # of all LVs to be created on that VG. Recommended values for
265 | # 'poolmetadatasize' is 16GB and that should be considered exclusive of
266 | # 'thinpoolsize'
267 | gluster_infra_thinpools:
268 | - {vgname: 'gluster_vg_sdc', thinpoolname: 'gluster_thinpool_sdc', thinpoolsize: '500G', poolmetadatasize: '16G'}
269 | - {vgname: 'gluster_vg_sdd', thinpoolname: 'gluster_thinpool_sdd', thinpoolsize: '500G', poolmetadatasize: '16G'}
270 |
271 | # Enable the following section if LVM cache is to enabled
272 | # Following are the variables:
273 | # vgname - VG with the slow HDD device that needs caching
274 | # cachedisk - Comma separate value of slow HDD and fast SSD
275 | # In this example, /dev/sdb is the slow HDD, /dev/sde is fast SSD
276 | # cachelvname - LV cache name
277 | # cachethinpoolname - Thinpool to which the fast SSD to be attached
278 | # cachelvsize - Size of cache data LV. This is the SSD_size - (1/1000) of SSD_size
279 | # 1/1000th of SSD space will be used by cache LV meta
280 | # cachemode - writethrough or writeback
281 | # gluster_infra_cache_vars:
282 | # - vgname: gluster_vg_sdb
283 | # cachedisk: /dev/sdb,/dev/sde
284 | # cachelvname: cachelv_thinpool_sdb
285 | # cachethinpoolname: gluster_thinpool_sdb
286 | # cachelvsize: '250G'
287 | # cachemode: writethrough
288 |
289 | # Only the engine brick needs to be thickly provisioned
290 | # Engine brick requires 100GB of disk space
291 | gluster_infra_thick_lvs:
292 | - vgname: gluster_vg_sdb
293 | lvname: gluster_lv_engine
294 | size: 100G
295 |
296 | gluster_infra_lv_logicalvols:
297 | - vgname: gluster_vg_sdc
298 | thinpool: gluster_thinpool_sdc
299 | lvname: gluster_lv_data
300 | lvsize: 200G
301 | - vgname: gluster_vg_sdd
302 | thinpool: gluster_thinpool_sdd
303 | lvname: gluster_lv_vmstore
304 | lvsize: 200G
305 |
306 | # Common configurations
307 | vars:
308 | # In case of IPv6 based deployment "gluster_features_enable_ipv6" needs to be enabled,below line needs to be uncommented, like:
309 | # gluster_features_enable_ipv6: true
310 |
311 | # Add the required hosts in the cluster. It can be 3,6,9 or 12 hosts
312 | cluster_nodes:
313 | -
314 | -
315 | -
316 |
317 | gluster_features_hci_cluster: "{{ cluster_nodes }}"
318 |
319 | # Create Gluster volumes for hyperconverged setup in 2 formats
320 | # format-1: Create bricks for gluster 1x3 replica volumes by default
321 | # on the first 3 hosts
322 | # format-2: Create bricks on the specified hosts, and it can create
323 | # nx3 distributed-replicated or distributed arbitrated
324 | # replicate volumes
325 | # Note: format-1 and format-2 are mutually exclusive (ie) either
326 | # format-1 or format-2 to be used. Don't mix the formats for
327 | # different volumes
328 |
329 | # Format-1 - Creates gluster 1x3 replicate or arbitrated replicate volume
330 | # - engine, vmstore, data with bricks on first 3 hosts
331 | gluster_features_hci_volumes:
332 | - volname: engine
333 | brick: /gluster_bricks/engine/engine
334 | arbiter: 0
335 | - volname: data
336 | brick: /gluster_bricks/data/data
337 | arbiter: 0
338 | - volname: vmstore
339 | brick: /gluster_bricks/vmstore/vmstore
340 | arbiter: 0
341 |
342 | # Format-2 - Allows to create nx3 volumes, with bricks on specified host
343 | #gluster_features_hci_volumes:
344 | # - volname: engine
345 | # brick: /gluster_bricks/engine/engine
346 | # arbiter: 0
347 | # servers:
348 | # - host1
349 | # - host2
350 | # - host3
351 | #
352 | # # Following creates 2x3 'Data' gluster volume with bricks on host4,
353 | # # host5, host6, host7, host8, host9
354 | # - volname: data
355 | # brick: /gluster_bricks/data/data
356 | # arbiter: 0
357 | # servers:
358 | # - host4
359 | # - host5
360 | # - host6
361 | # - host7
362 | # - host8
363 | # - host9
364 | #
365 | # # Following creates 2x3 'vmstore' gluster volume with 2 bricks for
366 | # # each host
367 | # - volname: vmstore
368 | # brick: /gluster_bricks/vmstore1/vmstore1,/gluster_bricks/vmstore2/vmstore2
369 | # arbiter: 0
370 | # servers:
371 | # - host1
372 | # - host2
373 | # - host3
374 |
375 | # Firewall setup
376 | gluster_infra_fw_ports:
377 | - 2049/tcp
378 | - 54321/tcp
379 | - 5900-6923/tcp
380 | - 16514/tcp
381 | - 5666/tcp
382 | - 16514/tcp
383 | gluster_infra_fw_permanent: true
384 | gluster_infra_fw_state: enabled
385 | gluster_infra_fw_zone: public
386 | gluster_infra_fw_services:
387 | - glusterfs
388 | # Allowed values for 'gluster_infra_disktype' - RAID6, RAID5, JBOD
389 | gluster_infra_disktype: RAID6
390 |
391 | # 'gluster_infra_diskcount' is the number of data disks in the RAID set.
392 | # Note for JBOD its 1
393 | gluster_infra_diskcount: 10
394 |
395 | gluster_infra_stripe_unit_size: 256
396 | gluster_features_force_varlogsizecheck: false
397 | gluster_set_selinux_labels: true
398 |
399 | ## Auto add hosts vars
400 | gluster:
401 | hosts:
402 | :
403 | :
404 | vars:
405 | storage_domains:
406 | - {"name":"data","host":"host1-frontend-network-FQDN","address":"host1-backend-network-FQDN","path":"/data","function":"data","mount_options":"backup-volfile-servers=host2-backend-network-FQDN:host3-backend-network-FQDN"}
407 | - {"name":"vmstore","host":"host1-frontend-network-FQDN","address":"host1-backend-network-FQDN","path":"/vmstore","function":"data","mount_options":"backup-volfile-servers=host2-backend-network-FQDN:host3-backend-network-FQDN"}
408 |
409 | # In case of IPv6 based deployment there is additional mount option required i.e. xlator-option="transport.address-family=inet6", below needs to be replaced with above one.
410 | # Ex:
411 | #storage_domains:
412 | #- {"name":"data","host":"host1-frontend-network-FQDN","address":"host1-backend-network-FQDN","path":"/data","function":"data","mount_options":"backup-volfile-servers=host2-backend-network-FQDN:host3-backend-network-FQDN,xlator-option=transport.address-family=inet6"}
413 | #- {"name":"vmstore","host":"host1-frontend-network-FQDN","address":"host1-backend-network-FQDN","path":"/vmstore","function":"data","mount_options":"backup-volfile-servers=host2-backend-network-FQDN:host3-backend-network-FQDN,xlator-option=transport.address-family=inet6"}
414 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/gluster_network_inventory.yml:
--------------------------------------------------------------------------------
1 | all:
2 | hosts:
3 | localhost:
4 | vars:
5 | he_fqdn:
6 | he_admin_password:
7 | datacenter_name:
8 | cluster_name:
9 | boot_protocol:
10 | # Default version is v4
11 | version:
12 | mtu_value:
13 |
14 | # For dhcp boot_protocol
15 | cluster_nodes:
16 | - {host: 'host1-frontend-network-FQDN', interface: 'host1-network-interface-name'}
17 | - {host: 'host2-frontend-network-FQDN', interface: 'host2-network-interface-name'}
18 | - {host: 'host3-frontend-network-FQDN', interface: 'host3-network-interface-name'}
19 |
20 | # For static boot_protocol
21 | #cluster_nodes:
22 | #- {host: 'host1-frontend-network-FQDN', interface: 'host1-network-interface-name', address: , netmask: , gateway: }
23 | #- {host: 'host2-frontend-network-FQDN', interface: 'host2-network-interface-name', address: , netmask: , gateway: }
24 | #- {host: 'host3-frontend-network-FQDN', interface: 'host3-network-interface-name', address: , netmask: , gateway: }
25 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/hc_deployment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: tasks/gluster_deployment.yml
3 | - import_playbook: tasks/he_deployment.yml
4 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/he_gluster_vars.json:
--------------------------------------------------------------------------------
1 | {
2 | "he_appliance_password": "encrypt-password-using-ansible-vault",
3 | "he_admin_password": "UI-password-for-login",
4 | "he_domain_type": "glusterfs",
5 | "he_fqdn": "FQDN-for-Hosted-Engine",
6 | "he_vm_mac_addr": "Valid MAC address",
7 | "he_default_gateway": "Valid Gateway",
8 | "he_mgmt_network": "ovirtmgmt",
9 | "he_storage_domain_name": "HostedEngine",
10 | "he_storage_domain_path": "/engine",
11 | "he_storage_domain_addr": "host1-backend-network-FQDN",
12 | "he_mount_options": "backup-volfile-servers=host2-backend-network-FQDN:host3-backend-network-FQDN",
13 | "he_bridge_if": "interface name for bridge creation",
14 | "he_enable_hc_gluster_service": true,
15 | "he_mem_size_MB": "16384",
16 | "he_cluster": "Default",
17 | "he_vcpus": 4
18 | }
19 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/luks_tang_inventory.yml:
--------------------------------------------------------------------------------
1 | hc_nodes:
2 | hosts:
3 | host1:
4 | # 1. Blacklist multipath devices which are used for gluster bricks.
5 | # The disks nned not be blacklisted only when multipath configuration
6 | # exists in the server and in that case the value for 'devicename'
7 | # should be /dev/mapper/ instead of /dev/sdx
8 | # 2. On considering to blacklist the disks,include BOOT/OS disk too
9 | # In the following example, sda is the BOOT/OS disk
10 | blacklist_mpath_devices:
11 | - sda
12 | - sdb
13 | - sdc
14 |
15 | gluster_infra_luks_devices:
16 | - devicename: /dev/sdb
17 | passphrase: test123
18 | - devicename: /dev/sdc
19 | passphrase: test1234
20 |
21 | rootpassphrase:
22 | rootdevice:
23 | networkinterface:
24 | # Required below variables only if ip_config_method is static
25 | host_ip_addr:
26 | host_ip_prefix:
27 | host_net_gateway:
28 |
29 | host2:
30 | # 1. Blacklist multipath devices which are used for gluster bricks.
31 | # The disks need not be blacklisted only when multipath configuration
32 | # exists in the server and in that case the value for 'devicename'
33 | # should be /dev/mapper/ instead of /dev/sdx
34 | # 2. On considering to blacklist the disks,include BOOT/OS disk too
35 | # In the following example, sda is the BOOT/OS disk
36 | blacklist_mpath_devices:
37 | - sda
38 | - sdb
39 | - sdc
40 |
41 | gluster_infra_luks_devices:
42 | - devicename: /dev/sdb
43 | passphrase: test123
44 | - devicename: /dev/sdc
45 | passphrase: test1234
46 |
47 | rootpassphrase:
48 | rootdevice:
49 | networkinterface:
50 | # Required below variables only if ip_config_method is static
51 | host_ip_addr:
52 | host_ip_prefix:
53 | host_net_gateway:
54 |
55 | host3:
56 | # 1. Blacklist multipath devices which are used for gluster bricks.
57 | # The disks need not be blacklisted only when multipath configuration
58 | # exists in the server and in that case the value for 'devicename'
59 | # should be /dev/mapper/ instead of /dev/sdx
60 | # 2. On considering to blacklist the disks,include BOOT/OS disk too
61 | # In the following example, sda is the BOOT/OS disk
62 | blacklist_mpath_devices:
63 | - sda
64 | - sdb
65 | - sdc
66 |
67 | gluster_infra_luks_devices:
68 | - devicename: /dev/sdd
69 | passphrase: test123
70 | - devicename: /dev/sde
71 | passphrase: test1234
72 |
73 | rootpassphrase:
74 | rootdevice:
75 | networkinterface:
76 | # Required below variables only if ip_config_method is static
77 | host_ip_addr:
78 | host_ip_prefix:
79 | host_net_gateway:
80 |
81 | vars:
82 | ip_version:
83 | ip_config_method:
84 | # when tangd.socket is using the custom port other than 80
85 | # then please specify
86 | gluster_infra_tangservers:
87 | - url: http://:
88 | - url: http://:
89 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/node_prep_inventory.yml:
--------------------------------------------------------------------------------
1 | # Section for Host Preparation Phase
2 | hc_nodes:
3 | hosts:
4 | # Host - The node which need to be prepared for replacement
5 | :
6 |
7 | # Blacklist multipath devices which are used for gluster bricks
8 | # If you omit blacklist_mpath_devices it means all device will be whitelisted.
9 | # If the disks are not blacklisted, and then its taken that multipath configuration
10 | # exists in the server and one should provide /dev/mapper/ instead of /dev/sdx
11 | blacklist_mpath_devices:
12 | - sdb
13 | - sdc
14 |
15 | # Enable this section 'gluster_infra_vdo', if dedupe & compression is
16 | # required on that storage volume.
17 | # The variables refers to:
18 | # name - VDO volume name to be used
19 | # device - Disk name on which VDO volume to created
20 | # logicalsize - Logical size of the VDO volume.This value is 10 times
21 | # the size of the physical disk
22 | # emulate512 - VDO device is made as 4KB block sized storage volume(4KN)
23 | # slabsize - VDO slab size. If VDO logical size >= 1000G then
24 | # slabsize is 32G else slabsize is 2G
25 | #
26 | # Following VDO values are as per recommendation and treated as constants:
27 | # blockmapcachesize - 128M
28 | # writepolicy - auto
29 | #
30 | # gluster_infra_vdo:
31 | # - { name: 'vdo_sdc', device: '/dev/sdc', logicalsize: '5000G', emulate512: 'off', slabsize: '32G',
32 | # blockmapcachesize: '128M', writepolicy: 'auto' }
33 | # - { name: 'vdo_sdd', device: '/dev/sdd', logicalsize: '3000G', emulate512: 'off', slabsize: '32G',
34 | # blockmapcachesize: '128M', writepolicy: 'auto' }
35 |
36 | # When dedupe and compression is enabled on the device,
37 | # use pvname for that device as '/dev/mapper/
38 | #
39 | # The variables refers to:
40 | # vgname - VG to be created on the disk
41 | # pvname - Physical disk (/dev/sdc) or VDO volume (/dev/mapper/vdo_sdc)
42 | gluster_infra_volume_groups:
43 | - vgname: gluster_vg_sdb
44 | pvname: /dev/sdb
45 | - vgname: gluster_vg_sdc
46 | pvname: /dev/mapper/vdo_sdc
47 | - vgname: gluster_vg_sdd
48 | pvname: /dev/mapper/vdo_sdd
49 |
50 | gluster_infra_mount_devices:
51 | - path: /gluster_bricks/engine
52 | lvname: gluster_lv_engine
53 | vgname: gluster_vg_sdb
54 | - path: /gluster_bricks/data
55 | lvname: gluster_lv_data
56 | vgname: gluster_vg_sdc
57 | - path: /gluster_bricks/vmstore
58 | lvname: gluster_lv_vmstore
59 | vgname: gluster_vg_sdd
60 |
61 | # 'thinpoolsize' is the sum of sizes of all LVs to be created on that VG
62 | # In the case of VDO enabled, 'thinpoolsize' is 10 times the sum of sizes
63 | # of all LVs to be created on that VG. Recommended values for
64 | # 'poolmetadatasize' is 16GB and that should be considered exclusive of
65 | # 'thinpoolsize'
66 | gluster_infra_thinpools:
67 | - {vgname: 'gluster_vg_sdc', thinpoolname: 'gluster_thinpool_sdc', thinpoolsize: '500G', poolmetadatasize: '16G'}
68 | - {vgname: 'gluster_vg_sdd', thinpoolname: 'gluster_thinpool_sdd', thinpoolsize: '500G', poolmetadatasize: '16G'}
69 |
70 | # Enable the following section if LVM cache is to enabled
71 | # Following are the variables:
72 | # vgname - VG with the slow HDD device that needs caching
73 | # cachedisk - Comma separate value of slow HDD and fast SSD
74 | # In this example, /dev/sdb is the slow HDD, /dev/sde is fast SSD
75 | # cachelvname - LV cache name
76 | # cachethinpoolname - Thinpool to which the fast SSD to be attached
77 | # cachelvsize - Size of cache data LV. This is the SSD_size - (1/1000) of SSD_size
78 | # 1/1000th of SSD space will be used by cache LV meta
79 | # cachemode - writethrough or writeback
80 | # gluster_infra_cache_vars:
81 | # - vgname: gluster_vg_sdb
82 | # cachedisk: /dev/sdb,/dev/sde
83 | # cachelvname: cachelv_thinpool_sdb
84 | # cachethinpoolname: gluster_thinpool_sdb
85 | # cachelvsize: '250G'
86 | # cachemode: writethrough
87 |
88 | # Only the engine brick needs to be thickly provisioned
89 | # Engine brick requires 100GB of disk space
90 | gluster_infra_thick_lvs:
91 | - vgname: gluster_vg_sdb
92 | lvname: gluster_lv_engine
93 | size: 100G
94 |
95 | gluster_infra_lv_logicalvols:
96 | - vgname: gluster_vg_sdc
97 | thinpool: gluster_thinpool_sdc
98 | lvname: gluster_lv_data
99 | lvsize: 200G
100 | - vgname: gluster_vg_sdd
101 | thinpool: gluster_thinpool_sdd
102 | lvname: gluster_lv_vmstore
103 | lvsize: 200G
104 |
105 | # Common configurations
106 | vars:
107 | # In case of IPv6 based deployment "gluster_features_enable_ipv6" needs to be enabled,below line needs to be uncommented, like:
108 | # gluster_features_enable_ipv6: true
109 |
110 | # Firewall setup
111 | gluster_infra_fw_ports:
112 | - 2049/tcp
113 | - 54321/tcp
114 | - 5900-6923/tcp
115 | - 16514/tcp
116 | - 5666/tcp
117 | - 16514/tcp
118 | gluster_infra_fw_permanent: true
119 | gluster_infra_fw_state: enabled
120 | gluster_infra_fw_zone: public
121 | gluster_infra_fw_services:
122 | - glusterfs
123 | # Allowed values for 'gluster_infra_disktype' - RAID6, RAID5, JBOD
124 | gluster_infra_disktype: RAID6
125 |
126 | # 'gluster_infra_diskcount' is the number of data disks in the RAID set.
127 | # Note for JBOD its 1
128 | gluster_infra_diskcount: 10
129 |
130 | gluster_infra_stripe_unit_size: 256
131 | gluster_features_force_varlogsizecheck: false
132 | gluster_set_selinux_labels: true
133 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/node_replace_inventory.yml:
--------------------------------------------------------------------------------
1 | cluster_node:
2 | hosts:
3 | # This is the backend network FQDN of one of the active hosts. It can take a value of the hosts in gluster_maintenance_cluster_node.
4 | :
5 |
6 | vars:
7 | gluster_maintenance_old_node:
8 | gluster_maintenance_new_node:
9 | gluster_maintenance_cluster_node:
10 | gluster_maintenance_cluster_node_2:
11 |
12 | # Required for activating the host in RHV/oVirt Manager UI
13 | activate_host: true
14 | ovirt_engine_hostname:
15 | ovirt_engine_password:
16 | ovirt_cluster:
17 | gluster_maintenance_new_node_frontend:
18 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/nx3_gluster_inventory.yml:
--------------------------------------------------------------------------------
1 | hc_nodes:
2 | hosts:
3 | host1:
4 | gluster_infra_volume_groups:
5 | - vgname: gluster_vg_sdb
6 | pvname: /dev/sdb
7 | gluster_infra_mount_devices:
8 | - path: /gluster_bricks/engine
9 | lvname: gluster_lv_engine
10 | vgname: gluster_vg_sdb
11 | gluster_infra_thick_lvs:
12 | - vgname: gluster_vg_sdb
13 | lvname: gluster_lv_engine
14 | size: 10G
15 | host2:
16 | gluster_infra_volume_groups:
17 | - vgname: gluster_vg_sdb
18 | pvname: /dev/sdb
19 | gluster_infra_mount_devices:
20 | - path: /gluster_bricks/engine
21 | lvname: gluster_lv_engine
22 | vgname: gluster_vg_sdb
23 | gluster_infra_thick_lvs:
24 | - vgname: gluster_vg_sdb
25 | lvname: gluster_lv_engine
26 | size: 10G
27 |
28 | host3:
29 | gluster_infra_volume_groups:
30 | - vgname: gluster_vg_sdb
31 | pvname: /dev/sdb
32 | gluster_infra_mount_devices:
33 | - path: /gluster_bricks/engine
34 | lvname: gluster_lv_engine
35 | vgname: gluster_vg_sdb
36 | gluster_infra_thick_lvs:
37 | - vgname: gluster_vg_sdb
38 | lvname: gluster_lv_engine
39 | size: 10G
40 |
41 | host4:
42 | gluster_infra_volume_groups:
43 | - vgname: gluster_vg_sdb
44 | pvname: /dev/sdb
45 | gluster_infra_mount_devices:
46 | - path: /gluster_bricks/data
47 | lvname: gluster_lv_data
48 | vgname: gluster_vg_sdb
49 | - path: /gluster_bricks/vmstore
50 | lvname: gluster_lv_vmstore
51 | vgname: gluster_vg_sdb
52 | gluster_infra_thinpools:
53 | - vgname: gluster_vg_sdb
54 | thinpoolname: gluster_thinpool_gluster_vg_sdb
55 | thinpoolsize: 100G
56 | poolmetadatasize: 1G
57 | gluster_infra_lv_logicalvols:
58 | - vgname: gluster_vg_sdb
59 | thinpool: gluster_thinpool_gluster_vg_sdb
60 | lvname: gluster_lv_data
61 | lvsize: 10G
62 | - vgname: gluster_vg_sdb
63 | thinpool: gluster_thinpool_gluster_vg_sdb
64 | lvname: gluster_lv_vmstore
65 | lvsize: 10G
66 |
67 | host5:
68 | gluster_infra_volume_groups:
69 | - vgname: gluster_vg_sdb
70 | pvname: /dev/sdb
71 | gluster_infra_mount_devices:
72 | - path: /gluster_bricks/data
73 | lvname: gluster_lv_data
74 | vgname: gluster_vg_sdb
75 | - path: /gluster_bricks/vmstore
76 | lvname: gluster_lv_vmstore
77 | vgname: gluster_vg_sdb
78 | gluster_infra_thinpools:
79 | - vgname: gluster_vg_sdb
80 | thinpoolname: gluster_thinpool_gluster_vg_sdb
81 | thinpoolsize: 100G
82 | poolmetadatasize: 1G
83 | gluster_infra_lv_logicalvols:
84 | - vgname: gluster_vg_sdb
85 | thinpool: gluster_thinpool_gluster_vg_sdb
86 | lvname: gluster_lv_data
87 | lvsize: 10G
88 | - vgname: gluster_vg_sdb
89 | thinpool: gluster_thinpool_gluster_vg_sdb
90 | lvname: gluster_lv_vmstore
91 | lvsize: 10G
92 |
93 | host6:
94 | gluster_infra_volume_groups:
95 | - vgname: gluster_vg_sdb
96 | pvname: /dev/sdb
97 | gluster_infra_mount_devices:
98 | - path: /gluster_bricks/data
99 | lvname: gluster_lv_data
100 | vgname: gluster_vg_sdb
101 | - path: /gluster_bricks/vmstore
102 | lvname: gluster_lv_vmstore
103 | vgname: gluster_vg_sdb
104 | gluster_infra_thinpools:
105 | - vgname: gluster_vg_sdb
106 | thinpoolname: gluster_thinpool_gluster_vg_sdb
107 | thinpoolsize: 100G
108 | poolmetadatasize: 1G
109 | gluster_infra_lv_logicalvols:
110 | - vgname: gluster_vg_sdb
111 | thinpool: gluster_thinpool_gluster_vg_sdb
112 | lvname: gluster_lv_data
113 | lvsize: 10G
114 | - vgname: gluster_vg_sdb
115 | thinpool: gluster_thinpool_gluster_vg_sdb
116 | lvname: gluster_lv_vmstore
117 | lvsize: 10G
118 |
119 | vars:
120 | gluster_infra_disktype: RAID6
121 | gluster_infra_stripe_unit_size: 256
122 | gluster_infra_diskcount: 12
123 | gluster_set_selinux_labels: true
124 | gluster_infra_fw_ports:
125 | - 2049/tcp
126 | - 54321/tcp
127 | - 5900/tcp
128 | - 5900-6923/tcp
129 | - 5666/tcp
130 | - 16514/tcp
131 | gluster_infra_fw_permanent: true
132 | gluster_infra_fw_state: enabled
133 | gluster_infra_fw_zone: public
134 | gluster_infra_fw_services:
135 | - glusterfs
136 | gluster_features_force_varlogsizecheck: false
137 | cluster_nodes:
138 | - host1
139 | - host2
140 | - host3
141 | - host4
142 | - host5
143 | - host6
144 | gluster_features_hci_cluster: '{{ cluster_nodes }}'
145 |
146 | gluster_features_hci_volumes:
147 | - volname: engine
148 | brick: /gluster_bricks/engine/engine
149 | arbiter: 0
150 | servers:
151 | - host1
152 | - host2
153 | - host3
154 | - volname: data
155 | brick: /gluster_bricks/data/data
156 | arbiter: 0
157 | servers:
158 | - host4
159 | - host5
160 | - host6
161 | - volname: vmstore
162 | brick: /gluster_bricks/vmstore/vmstore
163 | arbiter: 1
164 | servers:
165 | - host4
166 | - host5
167 | - host6
168 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/reconfigure_he_storage_inventory.yml:
--------------------------------------------------------------------------------
1 | all:
2 | hosts:
3 | # Include only the active hosts in the cluster deployed with hosted-engine --deploy
4 | # Do not include old or new hosts in this cluster
5 | :
6 | :
7 |
8 | vars:
9 | gluster_maintenance_old_node:
10 | gluster_maintenance_new_node:
11 | ovirt_engine_hostname:
12 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/single_node_gluster_inventory.yml:
--------------------------------------------------------------------------------
1 | hc_nodes:
2 | hosts:
3 | # Host1
4 | :
5 |
6 | # Blacklist multipath devices which are used for gluster bricks
7 | # If you omit blacklist_mpath_devices it means all device will be whitelisted.
8 | # If the disks are not blacklisted, and then its taken that multipath configuration
9 | # exists in the server and one should provide /dev/mapper/ instead of /dev/sdx
10 | blacklist_mpath_devices:
11 | - sdb
12 | - sdc
13 |
14 | # Enable this section 'gluster_infra_vdo', if dedupe & compression is
15 | # required on that storage volume.
16 | # The variables refers to:
17 | # name - VDO volume name to be used
18 | # device - Disk name on which VDO volume to created
19 | # logicalsize - Logical size of the VDO volume.This value is 10 times
20 | # the size of the physical disk
21 | # emulate512 - VDO device is made as 4KB block sized storage volume(4KN)
22 | # slabsize - VDO slab size. If VDO logical size >= 1000G then
23 | # slabsize is 32G else slabsize is 2G
24 | #
25 | # Following VDO values are as per recommendation and treated as constants:
26 | # blockmapcachesize - 128M
27 | # writepolicy - auto
28 | #
29 | # gluster_infra_vdo:
30 | # - { name: 'vdo_sdc', device: '/dev/sdc', logicalsize: '5000G', emulate512: 'off', slabsize: '32G',
31 | # blockmapcachesize: '128M', writepolicy: 'auto' }
32 | # - { name: 'vdo_sdd', device: '/dev/sdd', logicalsize: '3000G', emulate512: 'off', slabsize: '32G',
33 | # blockmapcachesize: '128M', writepolicy: 'auto' }
34 |
35 | # When dedupe and compression is enabled on the device,
36 | # use pvname for that device as '/dev/mapper/
37 | #
38 | # The variables refers to:
39 | # vgname - VG to be created on the disk
40 | # pvname - Physical disk (/dev/sdc) or VDO volume (/dev/mapper/vdo_sdc)
41 | gluster_infra_volume_groups:
42 | - vgname: gluster_vg_sdb
43 | pvname: /dev/sdb
44 | - vgname: gluster_vg_sdc
45 | pvname: /dev/mapper/vdo_sdc
46 | - vgname: gluster_vg_sdd
47 | pvname: /dev/mapper/vdo_sdd
48 |
49 | gluster_infra_mount_devices:
50 | - path: /gluster_bricks/engine
51 | lvname: gluster_lv_engine
52 | vgname: gluster_vg_sdb
53 | - path: /gluster_bricks/data
54 | lvname: gluster_lv_data
55 | vgname: gluster_vg_sdc
56 | - path: /gluster_bricks/vmstore
57 | lvname: gluster_lv_vmstore
58 | vgname: gluster_vg_sdd
59 |
60 | # 'thinpoolsize' is the sum of sizes of all LVs to be created on that VG
61 | # In the case of VDO enabled, 'thinpoolsize' is 10 times the sum of sizes
62 | # of all LVs to be created on that VG. Recommended values for
63 | # 'poolmetadatasize' is 16GB and that should be considered exclusive of
64 | # 'thinpoolsize'
65 | gluster_infra_thinpools:
66 | - {vgname: 'gluster_vg_sdc', thinpoolname: 'gluster_thinpool_sdc', thinpoolsize: '500G', poolmetadatasize: '16G'}
67 | - {vgname: 'gluster_vg_sdd', thinpoolname: 'gluster_thinpool_sdd', thinpoolsize: '500G', poolmetadatasize: '16G'}
68 |
69 | # Enable the following section if LVM cache is to enabled
70 | # Following are the variables:
71 | # vgname - VG with the slow HDD device that needs caching
72 | # cachedisk - Comma separate value of slow HDD and fast SSD
73 | # In this example, /dev/sdb is the slow HDD, /dev/sde is fast SSD
74 | # cachelvname - LV cache name
75 | # cachethinpoolname - Thinpool to which the fast SSD to be attached
76 | # cachelvsize - Size of cache data LV. This is the SSD_size - (1/1000) of SSD_size
77 | # 1/1000th of SSD space will be used by cache LV meta
78 | # cachemode - writethrough or writeback
79 | # gluster_infra_cache_vars:
80 | # - vgname: gluster_vg_sdb
81 | # cachedisk: /dev/sdb,/dev/sde
82 | # cachelvname: cachelv_thinpool_sdb
83 | # cachethinpoolname: gluster_thinpool_sdb
84 | # cachelvsize: '250G'
85 | # cachemode: writethrough
86 |
87 | # Only the engine brick needs to be thickly provisioned
88 | # Engine brick requires 100GB of disk space
89 | gluster_infra_thick_lvs:
90 | - vgname: gluster_vg_sdb
91 | lvname: gluster_lv_engine
92 | size: 100G
93 |
94 | gluster_infra_lv_logicalvols:
95 | - vgname: gluster_vg_sdc
96 | thinpool: gluster_thinpool_sdc
97 | lvname: gluster_lv_data
98 | lvsize: 200G
99 | - vgname: gluster_vg_sdd
100 | thinpool: gluster_thinpool_sdd
101 | lvname: gluster_lv_vmstore
102 | lvsize: 200G
103 |
104 | # Common configurations
105 | vars:
106 | # In case of IPv6 based deployment "gluster_features_enable_ipv6" needs to be enabled,below line needs to be uncommented, like:
107 | # gluster_features_enable_ipv6: true
108 |
109 | cluster_nodes:
110 | -
111 | gluster_features_hci_volume_options:
112 | { group: 'distributed-virt',
113 | storage.owner-uid: '36',
114 | storage.owner-gid: '36',
115 | performance.strict-o-direct: 'on',
116 | network.remote-dio: 'off',
117 | network.ping-timeout: '20',
118 | }
119 |
120 | gluster_features_hci_cluster: "{{ cluster_nodes }}"
121 | gluster_features_hci_volumes:
122 | - volname: engine
123 | brick: /gluster_bricks/engine/engine
124 | - volname: data
125 | brick: /gluster_bricks/data/data
126 | - volname: vmstore
127 | brick: /gluster_bricks/vmstore/vmstore
128 |
129 | # Firewall setup
130 | gluster_infra_fw_ports:
131 | - 2049/tcp
132 | - 54321/tcp
133 | - 5900-6923/tcp
134 | - 16514/tcp
135 | - 5666/tcp
136 | - 16514/tcp
137 | gluster_infra_fw_permanent: true
138 | gluster_infra_fw_state: enabled
139 | gluster_infra_fw_zone: public
140 | gluster_infra_fw_services:
141 | - glusterfs
142 | # Allowed values for 'gluster_infra_disktype' - RAID6, RAID5, JBOD
143 | gluster_infra_disktype: JBOD
144 |
145 | # 'gluster_infra_diskcount' is the number of data disks in the RAID set.
146 | # Note for JBOD its 1
147 | gluster_infra_diskcount: 1
148 |
149 | gluster_infra_stripe_unit_size: 256
150 | gluster_features_force_varlogsizecheck: false
151 | gluster_set_selinux_labels: true
152 |
153 | ## Auto add storage domain vars
154 | gluster:
155 | hosts:
156 | :
157 |
158 | vars:
159 | storage_domains:
160 | - {"name":"data","host":"host1-frontend-network-fqdn","address":"host1-backend-network-fqdn","path":"/data","function":"data","mount_options":""}
161 | - {"name":"vmstore","host":"host1-frontend-network-fqdn","address":"host1-backend-network-fqdn","path":"/vmstore","function":"data","mount_options":""}
162 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/add_hosts_storage_domains.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | tasks:
4 | - name: Set Engine public key as authorized key without validating the TLS/SSL certificates
5 | connection: ssh
6 | authorized_key:
7 | user: root
8 | state: present
9 | # The certificate has to be verified by the user for its authenticity
10 | key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY
11 | validate_certs: False
12 | delegate_to: "{{ host }}"
13 | with_items: "{{ groups['gluster'] }}"
14 | loop_control:
15 | loop_var: host
16 | when: "'gluster' in groups"
17 |
18 | - name: Add additional gluster hosts to engine
19 | async: 50
20 | poll: 0
21 | ignore_errors: true
22 | ovirt_host:
23 | cluster: "{{ he_cluster }}"
24 | name: "{{ host }}"
25 | address: "{{ host }}"
26 | state: present
27 | public_key: true
28 | auth: "{{ ovirt_auth }}"
29 | hosted_engine: deploy
30 | with_items: "{{ groups['gluster'] }}"
31 | loop_control:
32 | loop_var: host
33 | when:
34 | - "'gluster' in groups"
35 | - add_additional_gluster_hosts | default(true)
36 |
37 | - name: "Add additional glusterfs storage domains"
38 | ignore_errors: true
39 | ovirt_storage_domain:
40 | name: "{{ sd.name }}"
41 | domain_function: "{{ sd.function }}"
42 | host: "{{ he_host_name }}"
43 | auth: "{{ ovirt_auth }}"
44 | data_center: "{{ datacenter_name }}"
45 | glusterfs:
46 | address: "{{ he_storage_domain_addr }}"
47 | mount_options: "{{ sd.mount_options }}"
48 | path: "{{ sd.path }}"
49 | with_items: "{{ hostvars[groups['gluster'][0]]['storage_domains'] }}"
50 | loop_control:
51 | loop_var: sd
52 | when: "'gluster' in groups"
53 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/backup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - name: Check if backup dir is already available
5 | stat:
6 | path: "{{ backup_dir }}"
7 | register: result
8 |
9 | - fail:
10 | msg: Backup directory "{{backup_dir}}" exists, remove it and retry
11 | when: result.stat.isdir is defined
12 |
13 | - name: Create temporary backup directory
14 | file:
15 | path: "{{ backup_dir }}"
16 | state: directory
17 |
18 | - name: Get the hostname
19 | shell: uname -n
20 | register: hostname
21 |
22 | - name: Add hostname details to archive
23 | shell: echo {{ hostname.stdout }} > {{ backup_dir }}/hostname
24 |
25 | - name: Dump the IP configuration details
26 | shell: ip addr show > {{ backup_dir }}/ipconfig
27 |
28 | - name: Dump the IPv4 routing information
29 | shell: ip route > {{ backup_dir }}/ip4route
30 |
31 | - name: Dump the IPv6 routing information
32 | shell: ip -6 route > {{ backup_dir }}/ip6route
33 |
34 | - name: Get the disk layout information
35 | shell: lsblk > {{ backup_dir }}/lsblk
36 |
37 | - name: Get the mount information for reference
38 | shell: df -Th > {{ backup_dir }}/mount
39 |
40 | - name: Check for VDO configuration
41 | stat:
42 | path: /etc/vdoconf.yml
43 | register: vdoconfstat
44 |
45 | - name: Copy VDO configuration, if available
46 | shell: cp -a /etc/vdoconf.yml "{{backup_dir}}"
47 | when: vdoconfstat.stat.isreg is defined
48 |
49 | - name: Backup fstab
50 | shell: cp -a /etc/fstab "{{backup_dir}}"
51 |
52 | - name: Backup /etc/ssh directory
53 | shell: cp -a /etc/ssh "{{backup_dir}}"
54 |
55 | - name: Backup glusterd config directory
56 | shell: cp -a /var/lib/glusterd "{{backup_dir}}"
57 |
58 | - name: Backup /etc/crypttab, if NBDE is enabled
59 | shell: cp -a /etc/crypttab "{{ backup_dir }}"
60 | when: nbde_setup is defined and nbde_setup
61 |
62 | - name: Backup keyfiles used for LUKS decryption
63 | shell: cp -a /etc/sd*keyfile "{{ backup_dir }}"
64 | when: nbde_setup is defined and nbde_setup
65 |
66 | - name: Check for the inventory file generated from cockpit
67 | stat:
68 | path: /etc/ansible/hc_wizard_inventory.yml
69 | register: inventory
70 |
71 | - name: Copy the host inventory file generated from cockpit
72 | shell: cp /etc/ansible/hc_wizard_inventory.yml {{ backup_dir }}
73 | when: inventory.stat.isreg is defined
74 |
75 | - name: Create a tar.gz with all the contents
76 | archive:
77 | path: "{{ backup_dir }}/*"
78 | dest: /root/rhvh-node-{{ hostname.stdout }}-backup.tar.gz
79 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/gluster_cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove gluster setup
3 | hosts: hc_nodes
4 | remote_user: root
5 | gather_facts: False
6 | ignore_errors: True
7 | tasks:
8 |
9 | - name: Stop Gluster volume(s)
10 | ansible.builtin.expect:
11 | command: gluster volume stop {{ item.volname }} force
12 | responses:
13 | (.*)Do you want to continue?(.*): "y"
14 | run_once: True
15 | with_items: "{{ gluster_features_hci_volumes }}"
16 | when: gluster_features_hci_volumes is defined
17 |
18 | - name: Delete Gluster volume(s)
19 | ansible.builtin.expect:
20 | command: gluster volume delete {{ item.volname }}
21 | responses:
22 | (.*)Do you want to continue?(.*): "y"
23 | run_once: True
24 | with_items: "{{ gluster_features_hci_volumes }}"
25 | when: gluster_features_hci_volumes is defined
26 | tags:
27 | - delete_volumes
28 |
29 | #- name: Delete the volumes
30 | # gluster_volume:
31 | # state: absent
32 | # name: "{{ item.volname }}"
33 | # run_once: True
34 | # with_items: "{{ gluster_features_hci_volumes }}"
35 | # when: gluster_features_hci_volumes is defined
36 | # tags:
37 | # - delete_volumes
38 |
39 | # Remove the brick directories
40 | - name: Remove brick directories
41 | file:
42 | state: absent
43 | path: "{{ item.brick }}"
44 | with_items: "{{ gluster_features_hci_volumes }}"
45 | when: gluster_features_hci_volumes is defined
46 |
47 | - name: Unmount the disks
48 | mount:
49 | state: absent
50 | path: "{{ item.path }}"
51 | with_items: "{{ gluster_infra_mount_devices }}"
52 |
53 | - name: Wipe filesystem from LVs
54 | shell: wipefs -a /dev/{{ item.vgname }}/{{ item.lvname}}
55 | register: shell_output
56 | changed_when: shell_output.rc == 0
57 | failed_when: False
58 | with_items: "{{ gluster_infra_mount_devices }}"
59 |
60 | - name: Delete volume groups
61 | command: vgremove {{ item.vgname }} -y
62 | # lvg:
63 | # vg: "{{ item.vgname }}"
64 | # state: absent
65 | # force: yes
66 | with_items: "{{ gluster_infra_volume_groups }}"
67 | when: gluster_infra_volume_groups is defined
68 |
69 | - name: Remove PV
70 | shell: pvremove {{ item.pvname }} -f
71 | register: shell_output
72 | changed_when: shell_output.rc == 0
73 | with_items: "{{ gluster_infra_volume_groups }}"
74 | when: gluster_infra_volume_groups is defined
75 | failed_when: False
76 |
77 | - name: Remove Cache PV
78 | shell: pvremove {{ item.cachedisk }} -f
79 | register: shell_output
80 | changed_when: shell_output.rc == 0
81 | with_items: "{{ gluster_infra_cache_vars }}"
82 | when: gluster_infra_cache_vars is defined
83 | failed_when: False
84 |
85 | # Remove vdo devices if any
86 | - name: Remove VDO devices
87 | command: "vdo remove -n {{ item.name }} --force"
88 | #vdo:
89 | # name: "{{ item.name }}"
90 | # state: absent
91 | with_items: "{{ gluster_infra_vdo }}"
92 | when: gluster_infra_vdo is defined
93 |
94 | - name: Remove ansibleStatus file
95 | file:
96 | path: /usr/share/cockpit/ovirt-dashboard/ansible/ansibleStatus.conf
97 | state: absent
98 |
99 | - name: Get the list of hosts to be detached
100 | shell: gluster pool list | egrep -vw '(localhost|Hostname)' | awk '{print $2}'
101 | register: peernodes
102 | run_once: true
103 |
104 | - name: Delete a node from the trusted storage pool
105 | command: gluster peer detach {{ item }} --mode=script
106 | with_items: "{{ cluster_nodes }}"
107 | when: item != inventory_hostname and peernodes.stdout_lines|length > 0
108 | run_once: true
109 |
110 | - name: Remove specified device from blacklist
111 | blockinfile:
112 | path: /etc/multipath/conf.d/blacklist.conf
113 | marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item }}"
114 | content: ""
115 | with_items: "{{ blacklist_mpath_devices }}"
116 | when: blacklist_mpath_devices is defined
117 |
118 | - name: Wipe devices
119 | shell: wipefs -a /dev/{{ item }}
120 | register: shell_output
121 | changed_when: shell_output.rc == 0
122 | failed_when: False
123 | with_items: "{{ blacklist_mpath_devices }}"
124 |
125 | - name: Reload multipathd
126 | shell: systemctl reload multipathd.service
127 | failed_when: False
128 | when: blacklist_mpath_devices is defined
129 |
130 | tags:
131 | - cleanup_bricks
132 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/gluster_deployment.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Gluster deployment start here
4 | - name: Setup backend and deploy HCI
5 | hosts: hc_nodes
6 | remote_user: root
7 | gather_facts: yes
8 | any_errors_fatal: true
9 |
10 | # We are moving some of the prerequisite checks from gluster.features role so
11 | # that we fail early even before backend setup
12 |
13 | pre_tasks:
14 | # Check if the hosts have valid hostnames
15 | - name: Check if valid hostnames are provided
16 | command:
17 | args:
18 | argv:
19 | - getent
20 | - ahosts
21 | - "{{ item }}"
22 | register: result
23 | with_items: "{{ groups['all'] }}"
24 | when: gluster_features_fqdn_check | default(true)
25 | delegate_to: localhost
26 | run_once: true
27 |
28 | - name: Check if provided hostnames are valid
29 | assert:
30 | that:
31 | - "result.results[0]['rc'] == 0"
32 | - "result.results[0]['stdout_lines'] | length > 0"
33 | fail_msg: "The given hostname is not valid FQDN"
34 | when: gluster_features_fqdn_check | default(true)
35 |
36 | # Check if /var has enough disk. We check for 15G minimum.
37 | - name: Check if /var/log has enough disk space
38 | shell: df -m /var/log | awk '/[0-9]%/ {print $4}'
39 | register: disk_size
40 | when: gluster_features_force_varlogsizecheck | default(true)
41 |
42 | - name: "Check if the /var is greater than 15G"
43 | assert:
44 | that:
45 | - "disk_size.stdout|int > gluster_features_min_disk|default(15000)"
46 | fail_msg: "The size of /var should be greater than or equal to 15G"
47 | when: gluster_features_force_varlogsizecheck | default(true)
48 |
49 |
50 | - name: Check if block device is 512B
51 | shell: >
52 | blockdev --getss {{ item.pvname }} | grep -Po -q "512" && echo true || echo false
53 | register: is512
54 | with_items: "{{ gluster_infra_volume_groups }}"
55 |
56 | - name: Check if block device is 4KN
57 | shell: >
58 | blockdev --getss {{ item.pvname }} | grep -Po -q "4096" && echo true || echo false
59 | register: is4KN
60 | with_items: "{{ gluster_infra_volume_groups }}"
61 |
62 | - fail:
63 | msg: "Mix of 4K and 512 Block devices are not allowed"
64 | with_nested:
65 | - "{{ is512.results }}"
66 | - "{{ is4KN.results }}"
67 | when: item[0].stdout|bool and item[1].stdout|bool
68 |
69 |
70 | # logical block size of 512 bytes. To disable the check set
71 | # gluster_features_512B_check to false. DELETE the below task once
72 | # OVirt limitation is fixed
73 | - name: Check if disks have logical block size of 512B
74 | command: blockdev --getss {{ item.pvname }}
75 | register: logical_blk_size
76 | when: gluster_infra_volume_groups is defined and
77 | item.pvname is not search("/dev/mapper") and
78 | gluster_features_512B_check|default(true)
79 | with_items: "{{ gluster_infra_volume_groups }}"
80 |
81 | - name: Check if logical block size is 512 bytes
82 | assert:
83 | that:
84 | - "item.stdout|int == 512"
85 | fail_msg: "The logical block size of disk is not 512 bytes"
86 | when: gluster_infra_volume_groups is defined and
87 | item.stdout is defined and
88 | gluster_features_512B_check|default(true)
89 | loop: "{{ logical_blk_size.results }}"
90 | loop_control:
91 | label: "Logical Block Size"
92 |
93 | - name: Check logical block size of VDO devices
94 | command: blockdev --getss {{ item.device }}
95 | register: logical_blk_size
96 | when: gluster_infra_vdo is defined and
97 | gluster_features_512B_check|default(true)
98 | with_items: "{{ gluster_infra_vdo }}"
99 |
100 | - name: Check if logical block size is 512 bytes
101 | assert:
102 | that:
103 | - "item.stdout|int == 512"
104 | fail_msg: "The logical block size of disk is not 512 bytes"
105 | when: gluster_infra_vdo is defined and
106 | item.stdout is defined and
107 | gluster_features_512B_check|default(true)
108 | loop: "{{ logical_blk_size.results }}"
109 | loop_control:
110 | label: "Logical Block Size"
111 |
112 | roles:
113 | - gluster.infra
114 | - gluster.features
115 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/gluster_network_setup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | tasks:
4 | - name: Obtain SSO token using username/password creds.
5 | ovirt_auth:
6 | url: https://{{ he_fqdn }}/ovirt-engine/api
7 | username: admin@internal
8 | password: "{{ he_admin_password }}"
9 |
10 | - name: Create oVirt gluster network
11 | ovirt_network:
12 | data_center: "{{ datacenter_name }}"
13 | name: gluster_net
14 | vm_network: false
15 | auth: "{{ ovirt_auth }}"
16 | mtu: "{{ mtu_value | default(1500) }}"
17 | clusters:
18 | - name: "{{ cluster_name }}"
19 | assigned: true
20 | required: false
21 | gluster: true
22 | migration: true
23 |
24 | - name: Attach gluster_net to host dhcp network
25 | ovirt_host_network:
26 | auth: "{{ ovirt_auth }}"
27 | name: "{{ item.host }}"
28 | save: true
29 | check: true
30 | interface: "{{ item.interface }}"
31 | networks:
32 | - name: gluster_net
33 | boot_protocol: "{{boot_protocol }}"
34 | version: "{{version | default('v4')}}"
35 | with_items: "{{ cluster_nodes }}"
36 | when: cluster_nodes is defined and boot_protocol == "dhcp"
37 |
38 | - name: Attach gluster_net to host static network
39 | ovirt_host_network:
40 | auth: "{{ ovirt_auth }}"
41 | name: "{{ item.host }}"
42 | save: true
43 | check: true
44 | interface: "{{ item.interface }}"
45 | networks:
46 | - name: gluster_net
47 | boot_protocol: "{{ boot_protocol }}"
48 | version: "{{ version | default('v4') }}"
49 | address: "{{ item.address }}"
50 | netmask: "{{ item.netmask }}"
51 | gateway: "{{ item.gateway }}"
52 | with_items: "{{ cluster_nodes }}"
53 | when: cluster_nodes is defined and boot_protocol == "static"
54 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/he_deployment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy oVirt hosted engine
3 | hosts: localhost
4 | roles:
5 | - role: ovirt.ovirt.hosted_engine_setup
6 |
7 | - import_playbook: add_hosts_storage_domains.yml
8 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/luks_device_cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove Multipath and NBDE setup
3 | hosts: hc_nodes
4 | remote_user: root
5 | gather_facts: False
6 | ignore_errors: True
7 | tasks:
8 | - name: Remove all attached key file with luks device
9 | no_log: true
10 | shell: cryptsetup luksRemoveKey {{ item.devicename }} /etc/{{ item.devicename.split('/')[-1] }}_keyfile
11 | with_items: "{{ gluster_infra_luks_devices }}"
12 | when: gluster_infra_luks_devices is defined
13 |
14 | - name: Erase all key slots for luks device
15 | no_log: true
16 | shell: echo 'YES' | cryptsetup erase {{ item.devicename }}
17 | with_items: "{{ gluster_infra_luks_devices }}"
18 | when: gluster_infra_luks_devices is defined
19 |
20 | - name: Remove luks device
21 | no_log: true
22 | shell: cryptsetup remove luks_{{ item.devicename.split('/')[-1] }}
23 | with_items: "{{ gluster_infra_luks_devices }}"
24 | when: gluster_infra_luks_devices is defined
25 |
26 | - name: Remove all key file with luks device
27 | no_log: true
28 | file:
29 | path: /etc/{{ item.devicename.split('/')[-1] }}_keyfile
30 | state: absent
31 | with_items: "{{ gluster_infra_luks_devices }}"
32 | when: gluster_infra_luks_devices is defined
33 |
34 | - name: Remove specified device info from /etc/crypttab
35 | blockinfile:
36 | path: /etc/crypttab
37 | marker: "# {mark} Entry for {{ item.devicename }}"
38 | content: ""
39 | with_items: "{{ gluster_infra_luks_devices }}"
40 | when: gluster_infra_luks_devices is defined
41 |
42 | - name: Erase luks signature from device
43 | shell: wipefs -a {{ item.devicename }}
44 | with_items: "{{ gluster_infra_luks_devices }}"
45 | when: gluster_infra_luks_devices is defined
46 |
47 | - name: Find key slots from root device
48 | shell: clevis-luks-list -d {{ rootdevice }} | cut -d ':' -f1
49 | register: result
50 | when: gluster_infra_tangservers is defined
51 |
52 | - name: Unbind tang server with clevis
53 | shell: clevis-luks-unbind -d {{ rootdevice }} -s {{ item }} -f
54 | with_items: "{{ result['stdout_lines'] }}"
55 | when: gluster_infra_tangservers is defined and result['stdout_lines'] | length > 0
56 |
57 | - name: Remove /etc/dracut.conf.d/clevis.conf
58 | file:
59 | path: /etc/dracut.conf.d/clevis.conf
60 | state: absent
61 | when: gluster_infra_tangservers is defined
62 |
63 | - name: Execute dracut -vf to configure tang
64 | command: dracut -vf --regenerate-all
65 | when: gluster_infra_tangservers is defined
66 |
67 | - name: Remove specified device from blacklist
68 | blockinfile:
69 | path: /etc/multipath/conf.d/blacklist.conf
70 | marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item }}"
71 | content: ""
72 | with_items: "{{ blacklist_mpath_devices }}"
73 | when: blacklist_mpath_devices is defined
74 |
75 | - name: Reload multipathd
76 | shell: systemctl reload multipathd.service
77 | failed_when: False
78 | when: blacklist_mpath_devices is defined
79 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/luks_tang_setup.yml:
--------------------------------------------------------------------------------
1 | # Configure clevis and tang for HCI
2 | # ansible-playbook -i inventory.yml luks_tang_setup.yml --tags luksencrypt,bindtang --ask-vault-pass
3 | # If you are willing to blacklist multipath devices then use "blacklistdevices" tags like:
4 | # ansible-playbook -i inventory.yml luks_tang_setup.yml --tags blacklistdevices,luksencrypt,bindtang --ask-vault-pass
5 | - name: Encrypt devices using LUKS and bind Tang servers
6 | hosts: hc_nodes
7 | remote_user: root
8 | gather_facts: no
9 | any_errors_fatal: true
10 |
11 | pre_tasks:
12 | # Check if root device is encrypted
13 | - name: Find whether root device is a luks device
14 | shell: cryptsetup isLuks {{ rootdevice }}
15 | register: result
16 | ignore_errors: True
17 | when: gluster_infra_tangservers is defined
18 | run_once: true
19 | tags:
20 | - bindtang
21 |
22 | - name: Check if root device is encrypted
23 | fail:
24 | msg: "The given boot device {{ rootdevice }} is not encrypted."
25 | when: "result.failed == true"
26 | tags:
27 | - bindtang
28 |
29 | roles:
30 | - gluster.infra
31 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/reconfigure_he_storage.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - name: Set the HostedEngine in maintenance
5 | run_once: true
6 | shell: hosted-engine --set-maintenance --mode=global
7 | delegate_to: localhost
8 |
9 | - name: Stop the ovirt-engine service
10 | connection: ssh
11 | run_once: true
12 | service:
13 | name: ovirt-engine
14 | state: stopped
15 | delegate_to: "{{ ovirt_engine_hostname }}"
16 |
17 | - name: Update the engine DB for hostedengine storage
18 | connection: ssh
19 | run_once: true
20 | shell: >
21 | /usr/share/ovirt-engine/dbscripts/engine-psql.sh -c "UPDATE
22 | storage_server_connections SET connection = '{{gluster_maintenance_new_node}}:/engine'
23 | WHERE id = (SELECT storage FROM storage_domains WHERE is_hosted_engine_storage = 't');"
24 | delegate_to: "{{ ovirt_engine_hostname }}"
25 |
26 | - name: Shutdown the HostedEngine VM
27 | shell: hosted-engine --vm-shutdown
28 | ignore_errors: true
29 |
30 | - name: Wait for 10 seconds for HostedEngine VM to shutdown
31 | wait_for:
32 | timeout: 10
33 |
34 | - name: Set the shared storage config locally
35 | shell: hosted-engine --set-shared-config storage {{ gluster_maintenance_new_node }}:/engine --type=he_local
36 |
37 | - name: Set the shared storage config global
38 | shell: hosted-engine --set-shared-config storage {{ gluster_maintenance_new_node }}:/engine --type=he_shared
39 | run_once: true
40 | delegate_to: localhost
41 |
42 | - name: Disconnect storage on all the hosts
43 | shell: hosted-engine --disconnect-storage
44 |
45 | - name: Stop ovirt HA agent
46 | service:
47 | name: ovirt-ha-agent
48 | state: stopped
49 |
50 | - name: Stop ovirt HA broker
51 | service:
52 | name: ovirt-ha-broker
53 | state: stopped
54 |
55 | - name: Stop vdsm service
56 | ignore_errors: true
57 | service:
58 | name: vdsmd
59 | state: stopped
60 |
61 | - name: Shutdown the sanlock daemon
62 | shell: sanlock client shutdown -f 1
63 |
64 | - name: Unmount the engine fuse mount
65 | shell: umount /rhev/data-center/mnt/glusterSD/{{ gluster_maintenance_old_node }}:_engine
66 |
67 | - name: check for any engine fuse mount
68 | shell: df -Th | grep fuse.glusterfs | grep engine | wc -l
69 | register: count
70 |
71 | - name: Lazy unmount the engine fuse mount
72 | shell: umount -l /rhev/data-center/mnt/glusterSD/{{ gluster_maintenance_old_node }}:_engine
73 | when: count.stdout|int > 0
74 |
75 | - name: Delete the old engine storage domain mount directory
76 | file:
77 | path: /rhev/data-center/mnt/glusterSD/{{ gluster_maintenance_old_node }}:_engine
78 | state: absent
79 |
80 | - name: Create a new directory
81 | file:
82 | path: /rhev/data-center/mnt/glusterSD/{{ gluster_maintenance_new_node }}:_engine
83 | state: directory
84 |
85 | - name: Unlink all the vdsm links
86 | shell: for link in `find /var/run/vdsm/storage -type l`; do unlink $link; done
87 |
88 | - name: Replace old server name with new server name
89 | replace:
90 | path: /etc/ovirt-hosted-engine/hosted-engine.conf
91 | regexp: "{{ gluster_maintenance_old_node }}"
92 | replace: "{{ gluster_maintenance_new_node }}"
93 |
94 | - name: Start vdsm
95 | service:
96 | name: vdsmd
97 | state: started
98 |
99 | - name: connect the storage
100 | shell: hosted-engine --connect-storage
101 |
102 | - name: Start ovirt HA agent
103 | service:
104 | name: ovirt-ha-agent
105 | state: started
106 |
107 | - name: Start ovirt HA broker
108 | service:
109 | name: ovirt-ha-broker
110 | state: started
111 |
112 | - name: Wait for (3 minutes) HA agents to sync
113 | wait_for:
114 | timeout: 180
115 |
116 | - name: Remove Global maintenance
117 | shell: hosted-engine --set-maintenance --mode=none
118 | run_once: true
119 | delegate_to: localhost
120 |
121 | - name: Start the hostedengine VM
122 | shell: hosted-engine --vm-start
123 | run_once: true
124 | delegate_to: localhost
125 |
126 | - name: wait for 1 minute
127 | wait_for:
128 | timeout: 60
129 |
130 | - name: Find out the host id of the old host
131 | shell: >
132 | hosted-engine --vm-status | grep "{{ gluster_maintenance_old_node }}" |
133 | grep -Eo 'id: [0-9]+' | cut -d ':' -f2
134 | register: idres
135 | run_once: true
136 | delegate_to: localhost
137 |
138 | - name: Remove the host id of the old host
139 | shell: hosted-engine --clean-metadata --host-id={{ idres.stdout|trim }} --force-clean
140 | delegate_to: localhost
141 | run_once: true
142 |
143 | - name: Restart oVirt HA broker service
144 | service:
145 | name: ovirt-ha-broker
146 | state: restarted
147 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/replace_node.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: hc_nodes
3 | remote_user: root
4 | gather_facts: yes
5 | tasks:
6 | - block:
7 | - name: Gluster host preparation
8 | include_role:
9 | name: "{{ line_item }}"
10 | with_items:
11 | - gluster.infra
12 | - gluster.features
13 | loop_control:
14 | loop_var: line_item
15 | tags:
16 | - preparehost
17 |
18 | - hosts: cluster_node
19 | remote_user: root
20 | gather_facts: yes
21 | tasks:
22 | - block:
23 | - name: Gluster Peer Membership Restoration
24 | include_role:
25 | name: gluster.maintenance
26 | tags:
27 | - restorepeer
28 |
--------------------------------------------------------------------------------
/playbooks/hc-ansible-deployment/tasks/restore.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | tasks:
4 | - name: Checking that the backup_dir location is right
5 | stat:
6 | path: "{{ backup_dir }}/hostname"
7 | register: hostnamefile
8 |
9 | - name: Fails if backup_dir is not right
10 | fail:
11 | msg: Use backup_dir point to extracted contents from backup
12 | when: hostnamefile.stat.isreg is not defined
13 |
14 | - name: Restore in the case of NBDE is enabled
15 | block:
16 | - name: Restore NBDE keyfiles, if available
17 | shell: cp "{{ backup_dir }}"/sd*keyfile /etc/
18 |
19 | - name: check for crypttab file
20 | stat:
21 | path: "{{ backup_dir }}/crypttab"
22 | register: crypttabfile
23 |
24 | - name: Fail if crypttab file is missing in NBDE restore
25 | fail:
26 | msg: crypttab file is missing in the backup
27 | when: crypttabfile.stat.isreg is not defined
28 |
29 | - name: Reconstruct crypttab entries from crypttab
30 | shell: grep etc {{ backup_dir }}/crypttab >> /etc/crypttab
31 |
32 | - name: Get the list of crypttab entries for bricks
33 | shell: cat /etc/crypttab | grep etc
34 | register: crypttab_entries
35 |
36 | - name: Open the LUKS devices
37 | shell: cryptsetup luksOpen {{ item.split()[1] }} {{ item.split()[0] }} -d {{ item.split()[2] }}
38 | loop: "{{ crypttab_entries.stdout_lines }}"
39 |
40 | when: nbde_setup is defined and nbde_setup
41 |
42 | - name: Check if vdo conf file exists
43 | stat:
44 | path: "{{backup_dir}}/vdoconf.yml"
45 | register: vdoconfres
46 |
47 | - name: Restore the VDO conf file
48 | copy:
49 | src: "{{backup_dir}}/vdoconf.yml"
50 | dest: /etc/vdoconf.yml
51 | when: vdoconfres.stat.isreg is defined
52 |
53 | - name: Get the VDO version
54 | shell: vdo status | grep 'vdo version' | cut -d':' -f2
55 | register: vdostatres
56 |
57 | - name: Register the version
58 | set_fact:
59 | vdovers: "{{ vdostatres.stdout | trim }}"
60 |
61 | - name: Remove VDO attribute about readCache with VDO vers >= 6.2
62 | lineinfile:
63 | path: /etc/vdoconf.yml
64 | regexp: readCache
65 | state: absent
66 | when: vdovers is version('6.2', '>=')
67 |
68 | - name: Restart VDO service
69 | service:
70 | name: vdo
71 | state: restarted
72 |
73 | - name: Check for the presence of fstab
74 | stat:
75 | path: "{{backup_dir}}/fstab"
76 | register: fstabres
77 |
78 | - fail:
79 | msg: fstab missing in the backup dir - "{{backup_dir}}"
80 | when: fstabres.stat.isreg is not defined
81 |
82 | - name: Create fstab entries for gluster bricks referring to /etc/fstab backup in tmp file
83 | shell: grep gluster_bricks {{backup_dir}}/fstab > /tmp/fstab
84 |
85 | - name: Append contents of temp fstab file to /etc/fstab
86 | shell: cat /tmp/fstab >> /etc/fstab
87 |
88 | - name: Reactivate all VGs
89 | shell: vgchange -a y
90 |
91 | - name: Find out the list of gluster brick mountpoint
92 | shell: cat /tmp/fstab | cut -d ' ' -f2
93 | register: glustermounts
94 |
95 | - name: Create mount directories
96 | file:
97 | path: "{{ item }}"
98 | state: directory
99 | with_items: "{{ glustermounts.stdout_lines }}"
100 |
101 | - name: Mount all the filesystems
102 | shell: mount -a
103 |
104 | - name: Apply SELinux Labels on these mount points
105 | shell: semanage fcontext -a -t glusterd_brick_t "{{ item }}"
106 | with_items: "{{ glustermounts.stdout_lines }}"
107 |
108 | - name: Restore context on the mount points
109 | shell: restorecon -Rv "{{ item }}"
110 | with_items: "{{ glustermounts.stdout_lines }}"
111 |
112 | - name: remove the temp fstab file
113 | file:
114 | path: /tmp/fstab
115 | state: absent
116 |
117 | - name: Restore key files under /etc/ssh
118 | shell: yes| cp "{{ backup_dir }}"/ssh/ssh_host* /etc/ssh/ 2>/dev/zero
119 |
120 | - name: Update file permission of files under /etc/ssh
121 | shell: chown -R root:ssh_keys /etc/ssh/ssh_host*key
122 |
123 | - name: Restart systemctl daemon
124 | shell: systemctl daemon-reload
125 |
126 | - name: Restore gluster configuration post upgrade
127 | block:
128 | - name: Check for the presence of glusterd.info in the archive
129 | stat:
130 | path: "{{backup_dir}}/glusterd/glusterd.info"
131 | register: glusterdres
132 |
133 | - fail:
134 | msg: glusterd.info is missing in the backup dir - {{backup_dir}}
135 | when: glusterdres.stat.isreg is not defined
136 |
137 | - name: Restore the gluster UUID of the host
138 | shell: grep -i uuid "{{backup_dir}}/glusterd/glusterd.info"
139 | register: uuidres
140 |
141 | - lineinfile:
142 | path: /var/lib/glusterd/glusterd.info
143 | regex: UUID
144 | line: "{{uuidres.stdout}}"
145 |
146 | - name: Check for the presence of peers directory
147 | stat:
148 | path: "{{backup_dir}}/glusterd/peers"
149 | register: peerdir
150 |
151 | - fail:
152 | msg: gluster peer files are missing in the archive
153 | when: peerdir.stat.isdir is not defined
154 |
155 | - name: Copy the peer files
156 | copy:
157 | src: "{{backup_dir}}/glusterd/peers/"
158 | dest: /var/lib/glusterd/peers/
159 |
160 | - name: Restart glusterd service
161 | service:
162 | name: glusterd
163 | state: restarted
164 |
165 | - name: Wait for 5 seconds for glusterd to sync on all the nodes
166 | wait_for:
167 | timeout: 5
168 |
169 | - name: Get the list of volumes
170 | shell: gluster volume list
171 | register: volumes
172 |
173 | - name: Enable self-heal on all the volumes
174 | ignore_errors: yes
175 | shell: gluster volume heal "{{ item }}"
176 | with_items: "{{ volumes.stdout_lines }}"
177 | when: upgrade is defined and upgrade
178 |
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gluster/gluster-ansible/21aa88d0a57487302448e776e7f54616df7a558d/tests/README.md
--------------------------------------------------------------------------------