├── .gitignore
├── LICENSE
├── README.md
├── TODO.md
├── scripts
├── build-rpms.sh
├── prepare-release.sh
├── storhaug.spec
├── timeout3.sh
├── vagrant-refresh.sh
└── watch-ha.sh
├── src
├── ganesha
├── ganesha_trigger
├── nfs-ha.conf.sample
├── smb-ha.conf.sample
├── storhaug
└── storhaug.conf.sample
└── vagrant-ansible
├── Vagrantfile
├── defaults.yaml
└── playbooks
├── files
├── 99-no-dns.conf
├── CTDB
├── ctdb
├── export.conf.j2
├── ganesha.conf
├── hosts.j2
├── nfs
├── nodes.j2
├── resolv.conf.j2
├── smb.conf.j2
├── storhaug.conf.j2
├── vagrant
└── vagrant.pub
├── group_vars
├── nfs_servers
├── smb_servers
└── storhaug
├── raw-el7.yml
├── roles
├── common
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── setup-Debian.yml
│ │ └── setup-RedHat.yml
│ └── templates
│ │ ├── firewall.bash.j2
│ │ └── firewall.j2
├── glusterfs
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── glusterfs-epel.repo
│ ├── tasks
│ │ ├── main.yml
│ │ ├── setup-Debian.yml
│ │ └── setup-RedHat.yml
│ └── vars
│ │ ├── Debian.yml
│ │ └── RedHat.yml
└── storhaug
│ ├── defaults
│ └── main.yml
│ ├── files
│ └── storhaug.repo
│ ├── handlers
│ └── main.yml
│ ├── tasks
│ ├── conf-NFS.yml
│ ├── conf-SMB.yml
│ ├── main.yml
│ ├── setup-AD.yml
│ └── setup-RedHat.yml
│ ├── templates
│ └── smb.conf.j2
│ └── vars
│ ├── NFS.yml
│ ├── SMB.yml
│ └── main.yml
└── storhaug.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | hacking
2 | .vagrant/
3 | *.sw*
4 | custom*
5 | active_vms.yml
6 | active_vms.yaml
7 | vagrant.yaml*
8 | vagrant.yml*
9 | host_vars/
10 | repo/
11 | tags
12 | *.retry
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
282 | How to Apply These Terms to Your New Programs
283 |
284 | If you develop a new program, and you want it to be of the greatest
285 | possible use to the public, the best way to achieve this is to make it
286 | free software which everyone can redistribute and change under these terms.
287 |
288 | To do so, attach the following notices to the program. It is safest
289 | to attach them to the start of each source file to most effectively
290 | convey the exclusion of warranty; and each file should have at least
291 | the "copyright" line and a pointer to where the full notice is found.
292 |
293 | {description}
294 | Copyright (C) {year} {fullname}
295 |
296 | This program is free software; you can redistribute it and/or modify
297 | it under the terms of the GNU General Public License as published by
298 | the Free Software Foundation; either version 2 of the License, or
299 | (at your option) any later version.
300 |
301 | This program is distributed in the hope that it will be useful,
302 | but WITHOUT ANY WARRANTY; without even the implied warranty of
303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
304 | GNU General Public License for more details.
305 |
306 | You should have received a copy of the GNU General Public License along
307 | with this program; if not, write to the Free Software Foundation, Inc.,
308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
309 |
310 | Also add information on how to contact you by electronic and paper mail.
311 |
312 | If the program is interactive, make it output a short notice like this
313 | when it starts in an interactive mode:
314 |
315 | Gnomovision version 69, Copyright (C) year name of author
316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
317 | This is free software, and you are welcome to redistribute it
318 | under certain conditions; type `show c' for details.
319 |
320 | The hypothetical commands `show w' and `show c' should show the appropriate
321 | parts of the General Public License. Of course, the commands you use may
322 | be called something other than `show w' and `show c'; they could even be
323 | mouse-clicks or menu items--whatever suits your program.
324 |
325 | You should also get your employer (if you work as a programmer) or your
326 | school, if any, to sign a "copyright disclaimer" for the program, if
327 | necessary. Here is a sample; alter the names:
328 |
329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program
330 | `Gnomovision' (which makes passes at compilers) written by James Hacker.
331 |
332 | {signature of Ty Coon}, 1 April 1989
333 | Ty Coon, President of Vice
334 |
335 | This General Public License does not permit incorporating your program into
336 | proprietary programs. If your program is a subroutine library, you may
337 | consider it more useful to permit linking proprietary applications with the
338 | library. If this is what you want to do, use the GNU Lesser General
339 | Public License instead of this License.
340 |
341 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # storhaug
2 | Pacemaker-based HA solution for clustered storage platforms
3 |
4 | Currently this is a WIP content dump. If you want to get this up and running, don't hesitate to ask. :)
5 |
6 | The author will update the project documentation properly once he is not jetlagged. ( EDIT: 1 year later, jetlagged again. :( )
7 |
8 | Some quick notes:
9 | * This is currently primarily aimed at CentOS 6 and 7.
10 | * The project includes a vagrant+ansible environment to quickly setup a virtual storhaug cluster.
11 | * To be able to run this on a Fedora machine, install the following packages: `vagrant-libvirt ansible`
12 | * From the vagrant-ansible directory, run `vagrant status`. This will produce a default `vagrant.yaml` configuration file to define the VM environment. Review the settings, then run`vagrant up`.
13 | * If you're developing with this, it is highly reocmmended to do the following:
14 | * Install vagrant-cachier: `vagrant plugin install vagrant-cachier`
15 | * Use `scripts/vagrant-refresh.sh` to manipulate/update your VMs. This helps work around the problem where Ansible provisioning triggers before Vagrant has finished rsync operations.
16 |
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 | # storhaug: TODO
2 |
3 | NFS-GANESHA:
4 | * Detect config file location by distro (lsb_release?)
5 | * Move various functions to RAs
6 |
7 | VIPs:
8 | * Don't name their RAs by ipcount
9 | * Implement delete_virt_ip()
10 |
11 | Pacemaker:
12 | * use portblock
13 | * Implement use of conntrackd
14 | * store storhaug config in shared db
15 | * storhaug RA?
16 |
17 | Project:
18 | * Setup and package for Fedora
19 | * Documentation?!
20 |
--------------------------------------------------------------------------------
/scripts/build-rpms.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | VERSION="${1}"
4 | HEADREL="${2:-1}"
5 | SPEC="${3:-scripts/storhaug.spec}"
6 |
7 | pushd `git rev-parse --show-toplevel`
8 |
9 | tar -czvf storhaug-${VERSION}.tar.gz --transform='s/^src/storhaug/' src/*
10 |
11 | rpmbuild -bs -D 'rhel 6' -D "_topdir `pwd`" -D "_sourcedir ." -D "dist .el6.centos" ${SPEC}
12 | rpmbuild -bs -D 'rhel 7' -D "_topdir `pwd`" -D "_sourcedir ." -D "dist .el7.centos" ${SPEC}
13 | rm -f storhaug-${VERSION}.tar.gz
14 |
15 | rm -rf repo
16 | mkdir -p repo/el6
17 | mkdir -p repo/el7
18 | sudo mock -r epel-6-x86_64 --resultdir repo/el6/ SRPMS/storhaug-${VERSION}-${HEADREL}.el6.centos.src.rpm
19 | sudo mock -r epel-7-x86_64 --resultdir repo/el7/ SRPMS/storhaug-${VERSION}-${HEADREL}.el7.centos.src.rpm
20 | createrepo repo/el6/
21 | createrepo repo/el7/
22 |
23 | rm -f repo/*/*.log
24 | rm -rf BUILD/ BUILDROOT/ RPMS/ SPECS/ SRPMS/
25 |
26 | popd
27 |
--------------------------------------------------------------------------------
/scripts/prepare-release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | NEW_TAG=$1
4 | NEW_VERSION=""
5 | if [[ "x$NEW_TAG" != "x" ]]; then
6 | NEW_VERSION="${NEW_TAG:1}"
7 | HEADREL=1
8 | fi
9 |
10 | SPEC="scripts/storhaug.spec"
11 |
12 | pushd `git rev-parse --show-toplevel`
13 |
14 | OLD_TAG=$(git describe --tags --abbrev=0 HEAD^1)
15 | OLD_VERSION="${OLD_TAG:1}"
16 | VERSION="${NEW_VERSION:-${OLD_VERSION}}"
17 | MIN="${VERSION#*.}"
18 | MAJ="${VERSION%.*}"
19 | RANGE="${OLD_TAG}.. -- src/"
20 | REVLIST=( $(git rev-list ${RANGE}) )
21 | RELEASE=$((${#REVLIST[@]}+1))
22 | HEADREL=${HEADREL:-${RELEASE}}
23 |
24 | sed -i "s/\\(define major_version \\).*/\\1${MAJ}/" ${SPEC}
25 | sed -i "s/\\(define minor_version \\).*/\\1${MIN}/" ${SPEC}
26 | sed -i "s/\\(define release \\)[^%]*\\(.*\\)/\\1${HEADREL}\\2/" ${SPEC}
27 |
28 | LOG="$(git log --pretty="tformat:* %cd %aN <%aE> - ${VERSION}-${HEADREL}%n%b" --date=local -1 ${REVLIST[$((RELEASE-1))]} | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //'; for ((i=1;i<${#REVLIST[@]};i++)); do git log --format="* %cd %aN <%aE> - ${OLD_VERSION}-$((RELEASE-i))%n%b" --date=local -1 ${REVLIST[i]} | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //'; done)"
29 |
30 | if ! grep -q "${VERSION}-${HEADREL}" ${SPEC}; then
31 | sed "/\%changelog/a ${LOG//$'\n'/\\n}\n" ${SPEC} | vim -c "file ${SPEC}.tmp" -c "/changelog" -
32 |
33 | if [ -f "${SPEC}.tmp" ]; then
34 | mv ${SPEC}.tmp ${SPEC}
35 | else
36 | echo "No changelog saved, aborting release..."
37 | exit
38 | fi
39 | fi
40 |
41 | ./scripts/build-rpms.sh ${VERSION} ${HEADREL} ${SPEC}
42 |
43 | popd
44 |
--------------------------------------------------------------------------------
/scripts/storhaug.spec:
--------------------------------------------------------------------------------
1 | %define major_version 0
2 | %define minor_version 13
3 | %define release 1%{?dist}
4 |
5 | Name: storhaug
6 | Summary: High-Availability Storage Server Add-on
7 | Version: %{major_version}.%{minor_version}
8 | Release: %{release}
9 | License: GPLv2+
10 | Group: Applications/System
11 | URL: http://www.redhat.com
12 | Vendor: Red Hat, Inc.
13 | BuildArch: noarch
14 | BuildRoot: %{_tmppath}/%{name}-%{version}-root
15 |
16 | Source0: %{name}-%{version}.tar.gz
17 |
18 | Requires: glusterfs-server
19 | %if %{defined rhel} && %{rhel} < 7
20 | Requires: cman
21 | Requires: pacemaker
22 | %else
23 | Requires: fence-agents-all
24 | %endif
25 | Requires: pcs
26 |
27 | %description
28 | High-Availability add-on for storage servers
29 |
30 | ### NFS (NFS-Ganesha)
31 | %package nfs
32 | Summary: storhaug NFS module
33 | Group: Applications/System
34 | Requires: storhaug = %{version}-%{release}
35 | Requires: nfs-ganesha
36 | Requires: nfs-ganesha-utils
37 |
38 | %description nfs
39 | High-Availability NFS add-on for storage servers
40 |
41 | ### SMB (Samba)
42 | %package smb
43 | Summary: storhaug SMB module
44 | Group: Applications/System
45 | Requires: storhaug = %{version}-%{release}
46 | Requires: ctdb >= 2.5
47 | Requires: samba
48 | Requires: samba-client
49 | Requires: samba-winbind
50 | Requires: samba-winbind-clients
51 |
52 | %description smb
53 | High-Availability SMB add-on for storage servers
54 |
55 |
56 | %prep
57 | %setup -q -n %{name}
58 |
59 | %install
60 | %{__rm} -rf %{buildroot}
61 |
62 | install -d -m 0755 %{buildroot}%{_sbindir}
63 | install -m 0700 storhaug %{buildroot}%{_sbindir}/storhaug
64 |
65 | sed -i 's/\%CONFDIR/\%{_sysconfdir}/' "%{buildroot}%{_sbindir}/storhaug"
66 |
67 | install -d -m 0700 %{buildroot}%{_sysconfdir}/sysconfig/storhaug.d
68 | install -m 0600 storhaug.conf.sample %{buildroot}%{_sysconfdir}/sysconfig/storhaug.conf
69 | install -m 0600 nfs-ha.conf.sample %{buildroot}%{_sysconfdir}/sysconfig/storhaug.d/nfs-ha.conf
70 | install -m 0600 smb-ha.conf.sample %{buildroot}%{_sysconfdir}/sysconfig/storhaug.d/smb-ha.conf
71 |
72 | install -d -m 0755 %{buildroot}%{_prefix}/lib/ocf/resource.d/heartbeat
73 | install -m 0755 ganesha %{buildroot}%{_prefix}/lib/ocf/resource.d/heartbeat/ganesha
74 | install -m 0755 ganesha_trigger %{buildroot}%{_prefix}/lib/ocf/resource.d/heartbeat/ganesha_trigger
75 |
76 | %post
77 | %if %{defined rhel} && %{rhel} < 7
78 | chkconfig corosync off
79 | chkconfig pacemaker on
80 | chkconfig pcsd on
81 | service pcsd start
82 | %else
83 | systemctl start pcsd.service
84 | systemctl enable pcsd.service
85 | %endif
86 |
87 |
88 | %post smb
89 | %if %{defined rhel} && %{rhel} < 7
90 | chkconfig ctdb off
91 | chkconfig smb off
92 | chkconfig nmb off
93 | chkconfig winbind off
94 | service ctdb stop
95 | service smb stop
96 | service nmb stop
97 | service winbind stop
98 | %else
99 | systemctl stop ctdb smb nmb winbind
100 | systemctl disable ctdb smb nmb winbind
101 | %endif
102 |
103 | %post nfs
104 | %if %{defined rhel} && %{rhel} < 7
105 | chkconfig nfs-server off
106 | chkconfig nfs-lock off
107 | service nfs-server stop
108 | service nfs-lock stop
109 | %else
110 | systemctl stop nfs-server nfs-lock
111 | systemctl disable nfs-server nfs-lock
112 | %endif
113 |
114 | %clean
115 | %{__rm} -rf %{buildroot}
116 |
117 | %files
118 | %defattr(-,root,root,-)
119 | %config(noreplace) %{_sysconfdir}/sysconfig/storhaug.conf
120 | %attr(755,root,root) %dir %{_sysconfdir}/sysconfig/storhaug.d/
121 | %{_sbindir}/storhaug
122 |
123 | %files nfs
124 | %defattr(-,root,root,-)
125 | %config(noreplace) %{_sysconfdir}/sysconfig/storhaug.d/nfs-ha.conf
126 | %{_prefix}/lib/ocf/resource.d/heartbeat/ganesha
127 | %{_prefix}/lib/ocf/resource.d/heartbeat/ganesha_trigger
128 |
129 | %files smb
130 | %defattr(-,root,root,-)
131 | %config(noreplace) %{_sysconfdir}/sysconfig/storhaug.d/smb-ha.conf
132 |
133 |
134 | %changelog
135 | * Fri Sep 30 2016 Jose A. Rivera - 0.13-1
136 | - Allow CTDB rec_lock to be optional.
137 | - Use crm_master to determine CTDB rec_master.
138 | - Remove CTDB lock file volume.
139 |
140 | * Sat Sep 24 2016 Jose A. Rivera - 0.12-1
141 | - Remove IP address parameter from trigger RA.
142 | - Trigger grace from notify action.
143 |
144 | * Sat Sep 24 2016 Jose A. Rivera - 0.11-6
145 | - Update shared state variable names
146 | - Fix erroneous directory names
147 | - Fix reverse reference links
148 |
149 | * Sat Sep 24 2016 Jose A. Rivera - 0.11-5
150 | - Properly detect and source only properly named config files.
151 |
152 | * Wed Sep 21 2016 Jose A. Rivera - 0.11-4
153 | - Missing copy functions.
154 | - Prepare function for move to RA.
155 | - Whitespace fixes.
156 |
157 | * Wed Sep 21 2016 Jose A. Rivera - 0.11-3
158 | - Allow configuration of the shared state FS type via
159 | HA_NFS_STATE_FS.
160 |
161 | * Wed Sep 21 2016 Jose A. Rivera - 0.11-2
162 | - Use helper functions to help simplify long functions.
163 |
164 | * Wed May 11 2016 Jose A. Rivera - 0.11-1
165 | - Overhaul addnode().
166 |
167 | * Wed May 11 2016 Jose A. Rivera - 0.10-4
168 | - Add hook for OCF_DEBUG_LIBRARY in RAs.
169 | - Cache local hostname.
170 | - Various cruft removals.
171 | - Improve cleanup, add cleanup-all.
172 | - Fix copy_config().
173 | - Don't be such a Red Hat.
174 |
175 | * Wed May 11 2016 Jose A. Rivera - 0.10-3
176 | - Parametize NFS-Ganesha config file.
177 | - Do /var/lib/nfs swap in NFS-Ganesha RA.
178 | - Parametize NFS-Ganesha shared state mountpoint.
179 |
180 | * Sat Mar 05 2016 Jose A. Rivera - 0.10-2
181 | - Shorten and clarify copyright notice
182 |
183 | * Mon Feb 29 2016 Jose A. Rivera - 0.10-1
184 | - Major reorganization of main script file
185 | - Provide HA for NFS-Ganesha, based on ganesha-ha
186 |
187 | * Mon Feb 29 2016 Jose A. Rivera - 0.9-2
188 | - Rename some variables and examples
189 | - Label service feature unimplemented
190 |
191 | * Fri Jan 29 2016 Jose A. Rivera - 0.9-1
192 | - Implement deterministic failover
193 | - Based on the version in ganesha-ha
194 |
195 | * Sun Jan 24 2016 Jose A. Rivera - 0.8-6
196 | - Remove uneccessary variables from setup_cluster()
197 | - Allow for configuration of GlusterFS mount points
198 | - Minor cleanups
199 |
200 | * Sat Jan 23 2016 Jose A. Rivera - 0.8-5
201 | - Add a logging function to streamline logging to syslog
202 |
203 | * Sat Jan 23 2016 Jose A. Rivera - 0.8-4
204 | - Largely cosmetic changes to bring storhaug more in line with Ganesha-HA
205 |
206 | * Sat Jan 23 2016 Jose A. Rivera - 0.8-3
207 | - Add an actual usage message under --help/-h
208 |
209 | * Wed Jan 20 2016 Jose A. Rivera - 0.8-2
210 | - Normalize whitespace in main storhaug file.
211 | - Change said file's mode to executable.
212 |
213 | * Mon Jan 18 2016 Jose A. Rivera - 0.8-1
214 | - Rename the project to storhaug
215 | - Remove CTDB RA from source
216 |
217 | * Mon Jan 18 2016 Jose A. Rivera - 0.7-2
218 | - Remove specfile and source tarball from source dir.
219 |
220 | * Mon Jan 18 2016 Jose A. Rivera - 0.7-1
221 | - Force cluster creation
222 | - Allow for definition of which nodes will be storage nodes
223 | - Enable direct-io for GlusterFS backend volumes
224 | - Temporarily comment out NFS functionality
225 |
226 | * Thu Nov 19 2015 Jose A. Rivera - 0.6-2
227 | - Add functionality for EL7
228 |
229 | * Thu Apr 23 2015 Jose A. Rivera - 0.6-1
230 | - Properly update CIB file during cluster creation
231 | - Better tempfile handling
232 | - Improve ganesha statedir creation
233 |
234 | * Wed Apr 15 2015 Jose A. Rivera - 0.5-1
235 | - Remove extraneous cleanup commands
236 |
237 | * Wed Apr 15 2015 Jose A. Rivera - 0.4-1
238 | - Add missing service
239 | - Add missing requires
240 |
241 | * Wed Apr 15 2015 Jose A. Rivera - 0.3-3
242 | - Fix installation config bug
243 |
244 | * Wed Apr 15 2015 Jose A. Rivera - 0.3-2
245 | - Don't install custom CTDB RA, update it in
246 | resource-agents package
247 |
248 | * Wed Apr 15 2015 Jose A. Rivera - 0.3-1
249 | - Add storage-ha script
250 | - Additional post-installation prep work
251 |
252 | * Wed Apr 15 2015 Jose A. Rivera - 0.2-2
253 | - Add Ganesha symlink
254 |
255 | * Mon Apr 13 2015 Jose A. Rivera - 0.2-1
256 | - Add config files
257 |
258 | * Wed Apr 08 2015 Jose A. Rivera - 0.1-1
259 | - Initial version
260 |
--------------------------------------------------------------------------------
/scripts/timeout3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # The Bash shell script executes a command with a time-out.
4 | # Upon time-out expiration SIGTERM (15) is sent to the process. If the signal
5 | # is blocked, then the subsequent SIGKILL (9) terminates it.
6 | #
7 | # Based on the Bash documentation example.
8 |
9 | # Hello Chet,
10 | # please find attached a "little easier" :-) to comprehend
11 | # time-out example. If you find it suitable, feel free to include
12 | # anywhere: the very same logic as in the original examples/scripts, a
13 | # little more transparent implementation to my taste.
14 | #
15 | # Dmitry V Golovashkin
16 |
17 | scriptName="${0##*/}"
18 |
19 | declare -i DEFAULT_TIMEOUT=9
20 | declare -i DEFAULT_INTERVAL=1
21 | declare -i DEFAULT_DELAY=1
22 |
23 | # Timeout.
24 | declare -i timeout=DEFAULT_TIMEOUT
25 | # Interval between checks if the process is still alive.
26 | declare -i interval=DEFAULT_INTERVAL
27 | # Delay between posting the SIGTERM signal and destroying the process by SIGKILL.
28 | declare -i delay=DEFAULT_DELAY
29 |
30 | function printUsage() {
31 | cat < 0)); do
79 | sleep $interval
80 | kill -0 $$ || exit 0
81 | ((t -= interval))
82 | done
83 |
84 | # Be nice, post SIGTERM first.
85 | # The 'exit 0' below will be executed if any preceeding command fails.
86 | kill -s SIGTERM $$ && kill -0 $$ || exit 0
87 | sleep $delay
88 | kill -s SIGKILL $$
89 | ) 2> /dev/null &
90 |
91 | exec "$@"
92 |
--------------------------------------------------------------------------------
/scripts/vagrant-refresh.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DESTROY=false
4 | PROVISION=false
5 |
6 | cd `git rev-parse --show-toplevel`
7 | cd vagrant-ansible
8 |
9 | VSTATUS=$(vagrant status)
10 | VRC=$?
11 |
12 | if [ $VRC -ne 0 ]; then
13 | echo -n $VSTATUS
14 | exit $VRC
15 | fi
16 |
17 | MACHINES=( $(echo "$VSTATUS" | grep "running" | awk '{print $1}') )
18 | NUM_MACH=${#MACHINES[@]}
19 |
20 | if [[ "$1" == "-f" ]]; then
21 | DESTROY=true
22 | PROVISION=true
23 | shift
24 | elif [[ "$1" == "-p" ]]; then
25 | PROVISION=true
26 | shift
27 | fi
28 |
29 | set -- ${@:-${MACHINES[@]}}
30 |
31 | if [[ $DESTROY ]]; then
32 | vagrant destroy
33 | vagrant up --no-provision $@
34 | fi
35 |
36 | if [[ $PROVISION ]]; then
37 | vagrant provision ${1}
38 | fi
39 |
40 | if [[ ! $DESTROY ]]; then
41 | vagrant rsync $@
42 | while [[ $# > 0 ]]; do
43 | vagrant ssh $1 -c "sudo sh -c \"stty cols 80; yum -y makecache all; yum -y reinstall storhaug* | cat\"" | while read -r line; do
44 | echo -en "[$1] $line\r\n"
45 | done &
46 | shift
47 | done
48 | fi
49 |
50 | wait
51 |
--------------------------------------------------------------------------------
/scripts/watch-ha.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CTDB_STATUS=""
4 | DIR=$( dirname "${BASH_SOURCE[0]}" )
5 | TIMEOUT="${DIR}/timeout3.sh"
6 |
7 | if [ -e /etc/ctdb/nodes ]; then
8 | CTDB_STATUS="echo; ${TIMEOUT} ctdb status && echo && ${TIMEOUT} ctdb getcapabilities;"
9 | fi
10 |
11 | watch -n1 "echo 'hostname: '`hostname`; echo; ${TIMEOUT} pcs status; ${CTDB_STATUS}"
12 |
--------------------------------------------------------------------------------
/src/ganesha:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Copyright (c) 2014 David Vossel
4 | # Copyright (c) 2015 Jose A. Rivera
5 | # All Rights Reserved.
6 | #
7 | # This program is free software; you can redistribute it and/or modify
8 | # it under the terms of version 2 of the GNU General Public License as
9 | # published by the Free Software Foundation.
10 | #
11 | # This program is distributed in the hope that it would be useful, but
12 | # WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 | #
15 | # Further, this software is distributed without any warranty that it is
16 | # free of the rightful claim of any third person regarding infringement
17 | # or the like. Any license provided herein, whether implied or
18 | # otherwise, applies only to this software file. Patent licenses, if
19 | # any, provided herein do not apply to combinations of this program with
20 | # other software, or any other product whatsoever.
21 | #
22 | # You should have received a copy of the GNU General Public License
23 | # along with this program; if not, write the Free Software Foundation,
24 | # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
25 | #
26 |
27 | #######################################################################
28 | # Initialization:
29 |
30 | if [ -n "${OCF_DEBUG_LIBRARY}" ]; then
31 | . ${OCF_DEBUG_LIBRARY}
32 | else
33 | : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
34 | . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
35 | fi
36 |
37 | #######################################################################
38 |
39 | default_config="/etc/ganesha/ganesha.conf"
40 | default_pidfile="/var/run/ganesha.pid"
41 | default_state_fs="glusterfs"
42 | default_state_mnt="/var/run/ganesha/state"
43 | binary=$(which ganesha.nfsd 2> /dev/null)
44 |
45 | if [ -z "$binary" ]; then
46 | binary="/usr/bin/ganesha.nfsd"
47 | fi
48 |
49 | meta_data() {
50 | cat <
52 |
53 |
54 | 1.0
55 |
56 |
57 | The ganesha Resource Agent manages the ganesha nfs daemon in user space.
58 |
59 | ganesha agent
60 |
61 |
62 |
63 |
64 |
65 | Full path to ganesha config file
66 |
67 | config file
68 |
69 |
70 |
71 |
72 |
73 |
74 | Full path to ganesha pidfile.
75 |
76 | pidfile path
77 |
78 |
79 |
80 |
81 |
82 |
83 | The type of filesystem used for NFS-Ganesha's shared state.
84 |
85 | shared state FS type
86 |
87 |
88 |
89 |
90 |
91 |
92 | The directory where NFS-Ganesha's shared state will be mounted.
93 |
94 | shared state FS type
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 | END
109 | }
110 |
111 | #######################################################################
112 |
113 | ganesha_usage() {
114 | cat </dev/null | grep -a "${binary}" > /dev/null 2>&1
190 | if [ $? -eq 0 ];then
191 | return $OCF_SUCCESS
192 | fi
193 | # deleting stale pidfile
194 | rm -f $pidfile
195 | fi
196 | return $OCF_NOT_RUNNING
197 | }
198 |
199 | ganesha_validate() {
200 | check_binary $binary
201 |
202 | # TODO verify kernal NFS is not up.
203 |
204 | # verify config file exists
205 | if ! [ -f "${OCF_RESKEY_config}" ]; then
206 | ocf_log err "No configfile found at ${OCF_RESKEY_config}"
207 | return $OCF_ERR_ARGS
208 | fi
209 | return $OCF_SUCCESS
210 | }
211 |
212 | : ${OCF_RESKEY_config=${default_config}}
213 | : ${OCF_RESKEY_pidfile=${default_pidfile}}
214 | : ${OCF_RESKEY_state_fs=${default_state_fs}}
215 | : ${OCF_RESKEY_state_mnt=${default_state_mnt}}
216 |
217 | case $__OCF_ACTION in
218 | meta-data) meta_data
219 | exit $OCF_SUCCESS
220 | ;;
221 | start) ganesha_start;;
222 | stop) ganesha_stop;;
223 | monitor) ganesha_monitor;;
224 | validate-all) ganesha_validate;;
225 | usage|help) ganesha_usage
226 | exit $OCF_SUCCESS
227 | ;;
228 | *) ganesha_usage
229 | exit $OCF_ERR_UNIMPLEMENTED
230 | ;;
231 | esac
232 | rc=$?
233 | ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
234 | exit $rc
235 |
--------------------------------------------------------------------------------
/src/ganesha_trigger:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Copyright (c) 2015 Jose A. Rivera
4 | # All Rights Reserved.
5 | #
6 | # This program is free software; you can redistribute it and/or modify
7 | # it under the terms of version 2 of the GNU General Public License as
8 | # published by the Free Software Foundation.
9 | #
10 | # This program is distributed in the hope that it would be useful, but
11 | # WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 | #
14 | # Further, this software is distributed without any warranty that it is
15 | # free of the rightful claim of any third person regarding infringement
16 | # or the like. Any license provided herein, whether implied or
17 | # otherwise, applies only to this software file. Patent licenses, if
18 | # any, provided herein do not apply to combinations of this program with
19 | # other software, or any other product whatsoever.
20 | #
21 | # You should have received a copy of the GNU General Public License
22 | # along with this program; if not, write the Free Software Foundation,
23 | # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 | #
25 |
26 | #######################################################################
27 | # Initialization:
28 |
29 | if [ -n "${OCF_DEBUG_LIBRARY}" ]; then
30 | . ${OCF_DEBUG_LIBRARY}
31 | else
32 | : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
33 | . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
34 | fi
35 |
36 | #######################################################################
37 |
38 | meta_data() {
39 | cat <
41 |
42 |
43 | 1.0
44 |
45 |
46 | The ganesha_trigger Resource Agent trigger the TAKE_IP GRACE action of the
47 | ganesha nfs daemon.
48 |
49 | ganesha TAKE_IP trigger
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 | END
64 | }
65 |
66 | #######################################################################
67 | CMD=`basename $0`
68 |
69 | trigger_usage() {
70 | cat <
7 | # All Rights Reserved.
8 | #
9 | # License: GPLv2 or any later version.
10 | # See the file LICENSE or http://www.gnu.org/licenses/gpl-2.0.en.html#SEC1
11 | #
12 |
13 | HA_NUM_SERVERS=0
14 | HA_SERVERS=""
15 | SYS_CONFDIR="/etc/sysconfig"
16 | HA_CONF="${SYS_CONFDIR}/storhaug.conf"
17 | HA_CONF_INCDIR="${SYS_CONFDIR}/storhaug.d"
18 | HA_MNT_DIR="/run/storhaug"
19 | HA_SMB_MNT_DIR="lock"
20 | HA_NFS_MNT_DIR="state"
21 | STORAGE_SERVERS=""
22 | STORAGE_NUM_SERVERS=0
23 | DETERMINISTIC_FAILOVER=false
24 | SERVICE_MAN="DISTRO_NOT_FOUND"
25 | SECRET_PEM="${HA_CONF_INCDIR}/secret.pem"
26 |
27 | ### Utility functions
28 |
29 | _hostname=$(hostname)
30 |
31 | usage()
32 | {
33 | echo -e "Usage: `basename "$0"` [] []"
34 | echo -e "Manage a storhaug high-availability (HA) storage cluster."
35 | echo -e "\nGlobal OPTIONS:"
36 | echo -e " -h, --help Output this useful help message"
37 | echo -e "\nCOMMANDS:"
38 | echo -e " status Check the status of the cluster"
39 | echo -e " setup Setup a new cluster"
40 | echo -e " teardown Teardown an existing cluster"
41 | echo -e " cleanup Cleanup local cluster config"
42 | echo -e " cleanup-all Cleanup cluster config on all nodes"
43 | echo -e " add Add a node to the cluster"
44 | echo -e " delete, remove Remove a node from the cluster"
45 | echo -e "\nCommand ARGUMENTS:"
46 | echo -e " add Add hostname NODE to the cluster"
47 | echo -e " remove Remove hostname NODE from the cluster"
48 | echo -e " delete Synonym for 'remove'"
49 | echo -e "\n\nConfiguration is read from the following locations:"
50 | echo -e " ${HA_CONF}"
51 | echo -e " ${HA_CONF_INCDIR}/*.conf"
52 | }
53 |
54 | parsebool()
55 | {
56 | case $(eval echo \${${1}}) in
57 | TRUE | True | true | YES | Yes | yes) declare "${1}"=true ;;
58 | FALSE | False | false | NO | No | no) declare "${1}"=false ;;
59 | *) storlog "ERR" "Couldn't parse boolean: ${1}=$(eval echo \${${1}})" ;;
60 | esac
61 | }
62 |
63 | storlog()
64 | {
65 | LEVEL=$1; shift
66 | case $LEVEL in
67 | ERR|ERROR)
68 | echo "ERROR: $1" >&2
69 | logger --tag="storhaug" -p "daemon.err" "$1"
70 | rm -rf $HA_CONF_secdir
71 | exit 1
72 | ;;
73 | WARN|WARNING)
74 | echo "WARNING: $1"
75 | logger --tag="storhaug" -p "daemon.warn" "$1"
76 | ;;
77 | INFO)
78 | echo "$1"
79 | logger --tag="storhaug" -p "daemon.info" "$1"
80 | ;;
81 | DEBUG)
82 | logger --tag="storhaug" -p "daemon.debug" "$1"
83 | ;;
84 | esac
85 | }
86 |
87 | sshdo()
88 | {
89 | if [ "${_hostname}" == "${1}" ] || \
90 | [ "${_hostname%%.*}" == "${1%%.*}" ] || \
91 | [ "localhost" == "${1}" ]; then
92 | ${2}
93 | else
94 | ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${1} "${2}"
95 | fi
96 | local _ret=$?
97 | if [ $_ret -ne 0 ]; then
98 | storlog "WARN" "Command failed on ${1}: ${2}"
99 | fi
100 | return $_ret
101 | }
102 |
103 | scpdo()
104 | {
105 | # avoid prompting for password, even with password-less scp
106 | # scp $host1:$file $host2:$file prompts for the password
107 | scp -3 -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i ${SECRET_PEM} ${1} ${2}
108 | local _ret=$?
109 | if [ $_ret -ne 0 ]; then
110 | storlog "WARN" "SCP failed from ${1} to ${2}"
111 | fi
112 | return $_ret
113 | }
114 |
115 | # Check that a symlink exists, create it otherwise.
116 | # Usage: ensure_ln
117 | ensure_ln ()
118 | {
119 | if [ ! -L "${2}" ] ; then
120 | rm -rf "${2}"
121 | else
122 | _t=$(readlink "${2}")
123 | if [ "$_t" != "${1}" ] ; then
124 | rm -f "${2}"
125 | fi
126 | fi
127 | # This is not an "else". It also re-creates the link if it was
128 | # removed above!
129 | if [ ! -e "${2}" ]; then
130 | ln -sf "${1}" "${2}"
131 | fi
132 | }
133 |
134 | # Check that a directory exists, create it otherwise.
135 | # Only use on paths guaranteed to be directories.
136 | ensure_dir ()
137 | {
138 | if [ ! -d "${1}" ] ; then
139 | mkdir -p "${1}"
140 | fi
141 | }
142 |
143 | # Check that a file exists, touch it otherwise.
144 | # Only use on paths guaranteed to be regular files.
145 | ensure_file ()
146 | {
147 | if [ ! -e "${1}" ]; then
148 | touch "${1}"
149 | fi
150 | }
151 |
152 | ### General cluster functions
153 |
154 | check_cluster_exists()
155 | {
156 | local name=${1}
157 |
158 | if [ -e /var/run/corosync.pid ]; then
159 | local cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
160 | if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then
161 | storlog "ERR" "Cluster $name already exists, exiting"
162 | fi
163 | fi
164 | }
165 |
166 | determine_servers()
167 | {
168 | local cmd=${1}
169 | local num_servers=0
170 | local tmp_ifs=${IFS}
171 |
172 | if [[ "X${cmd}X" != "XsetupX" ]]; then
173 | local ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
174 | IFS=$' '
175 | for server in ${ha_servers} ; do
176 | num_servers=$(expr ${num_servers} + 1)
177 | done
178 | IFS=${tmp_ifs}
179 | HA_NUM_SERVERS=${num_servers}
180 | HA_SERVERS="${ha_servers}"
181 | # TODO: Determine storage and vip servers from pcs status
182 | if [[ "x${STORAGE_NODES}" != "x" ]]; then
183 | STORAGE_SERVERS="${STORAGE_NODES//,/ }"
184 | STORAGE_NUM_SERVERS=$(wc -w <<< "${STORAGE_SERVERS}")
185 | else
186 | STORAGE_SERVERS=${HA_SERVERS}
187 | STORAGE_NUM_SERVERS=${HA_NUM_SERVERS}
188 | fi
189 | if [[ "x${HA_VIP_NODES}" != "x" ]]; then
190 | VIP_SERVERS="${HA_VIP_NODES//,/ }"
191 | fi
192 | else
193 | IFS=$','
194 | for server in ${HA_CLUSTER_NODES} ; do
195 | num_servers=$(expr ${num_servers} + 1)
196 | done
197 | IFS=${tmp_ifs}
198 | HA_NUM_SERVERS=${num_servers}
199 | HA_SERVERS="${HA_CLUSTER_NODES//,/ }"
200 | if [[ "x${STORAGE_NODES}" != "x" ]]; then
201 | STORAGE_SERVERS="${STORAGE_NODES//,/ }"
202 | STORAGE_NUM_SERVERS=$(wc -w <<< "${STORAGE_SERVERS}")
203 | else
204 | STORAGE_SERVERS=${HA_SERVERS}
205 | STORAGE_NUM_SERVERS=${HA_NUM_SERVERS}
206 | fi
207 | if [[ "x${HA_VIP_NODES}" != "x" ]]; then
208 | VIP_SERVERS="${HA_VIP_NODES//,/ }"
209 | fi
210 | fi
211 | }
212 |
213 | copy_config()
214 | {
215 | while [[ ${1} ]]; do
216 | if [ ${_hostname%%.*} != ${1%%.*} ]; then
217 | scpdo ${HA_CONF} ${1}:$(dirname ${HA_CONF})/
218 | scpdo ${HA_CONF_INCDIR} ${1}:$(dirname ${HA_CONF_INCDIR})/
219 | fi
220 | shift
221 | done
222 | }
223 |
224 | # TODO: Move to RA
225 | copy_nfs_config()
226 | {
227 | if [ ${_hostname%%.*} != ${1%%.*} ]; then
228 | scpdo ${HA_VOL_SERVER}:${HA_NFS_CONF} ${1}:${HA_NFS_CONF}
229 | scpdo ${HA_VOL_SERVER}:${HA_NFS_EXPDIR} ${1}:${HA_NFS_EXPDIR}
230 | fi
231 | }
232 |
233 | ### General resource functions
234 |
235 | clear_virt_ip_constraints()
236 | {
237 | local cibfile=${1}; shift
238 | pcs -f ${cibfile} constraint remove *vip* || \
239 | storlog "WARN" "Failed: pcs constraint remove *vip*"
240 | }
241 |
242 | do_create_virt_ip_constraints()
243 | {
244 | local cibfile=${1}; shift
245 | local ipcount=${1}; shift
246 | local primary=${1}; shift
247 | local weight="1000"
248 |
249 | # A set of location constraints to set the prefered order
250 | # for where a VIP should move
251 | while [[ ${1} ]]; do
252 | pcs -f ${cibfile} constraint location vip${ipcount} prefers ${1}=${weight} || \
253 | storlog "WARN" "Failed: pcs constraint location vip${ipcount} prefers ${1}=${weight}"
254 | weight=$(expr ${weight} + 1000)
255 | shift
256 | done
257 |
258 | # Set the highest preference for the VIP to its primary node
259 | pcs -f ${cibfile} constraint location vip${ipcount} prefers ${primary}=${weight} || \
260 | storlog "WARN" "Failed: pcs constraint location vip${ipcount} prefers ${primary}=${weight}"
261 | }
262 |
263 | create_virt_ip_constraints()
264 | {
265 | local cibfile=${1}; shift
266 | local ipcount=${1}; shift
267 | local srvcount=${ipcount}
268 | local primary=""
269 | local head=""
270 | local tail=""
271 |
272 | # build a list of failover peers, e.g. for a four node cluster, for node1,
273 | # the result is "node2 node3 node4"; for node2, "node3 node4 node1"
274 | # and so on.
275 | read -r -a servers <<< "${VIP_SERVERS:-STORAGE_SERVERS}"
276 | while [ ${srvcount} -gt ${STORAGE_NUM_SERVERS} ]; do
277 | srvcount=$((srvcount - STORAGE_NUM_SERVERS))
278 | done
279 | primary=${servers[${srvcount}-1]}
280 | if [ ${STORAGE_NUM_SERVERS} -gt 1 ]; then
281 | head=${servers[@]:${srvcount}-${STORAGE_NUM_SERVERS}-1}
282 | tail=${servers[@]:${srvcount}}
283 | fi
284 |
285 | do_create_virt_ip_constraints ${cibfile} ${ipcount} ${primary} ${tail} ${head}
286 | }
287 |
288 | stack_resources_on_vip_smb()
289 | {
290 | local cibfile=${1}; shift
291 | local ipcount=${1}; shift
292 |
293 | pcs -f ${cibfile} constraint colocation add vip${ipcount} with ctdb-master INFINITY
294 | pcs -f ${cibfile} constraint order ctdb-master then vip${ipcount}
295 | }
296 |
297 | stack_resources_on_vip_nfs()
298 | {
299 | local cibfile=${1}; shift
300 | local ipcount=${1}; shift
301 |
302 | pcs -f ${cibfile} resource create vip${ipcount}_trigger ocf:heartbeat:ganesha_trigger \
303 | --clone vip${ipcount}_trigger-clone vip${ipcount}_trigger \
304 | meta interleave="true" globally-unique="false" notify="true" clone-max=1
305 |
306 | pcs -f ${cibfile} constraint colocation add vip${ipcount}_trigger-clone with vip${ipcount} INFINITY
307 | pcs -f ${cibfile} constraint order vip${ipcount} then vip${ipcount}_trigger-clone
308 |
309 | pcs -f ${cibfile} constraint colocation add vip${ipcount} with nfs-ganesha-clone INFINITY
310 | pcs -f ${cibfile} constraint order nfs-ganesha-clone then vip${ipcount}
311 | }
312 |
313 | stack_resources_on_vip()
314 | {
315 | local cibfile=${1}; shift
316 | local ipcount=${1}; shift
317 |
318 | for svc in ${HA_SERVICES}; do
319 | if [[ "${svc}" == "nfs" ]]; then
320 | stack_resources_on_vip_nfs ${cibfile} ${ipcount}
321 | elif [[ "${svc}" == "smb" ]]; then
322 | stack_resources_on_vip_smb ${cibfile} ${ipcount}
323 | fi
324 | done
325 | }
326 |
327 | create_virt_ip()
328 | {
329 | local cibfile=${1}; shift
330 | local ipcount=${1}; shift
331 | local ip=${1}; shift
332 |
333 | pcs -f ${cibfile} resource create vip${ipcount} ocf:heartbeat:IPaddr2 \
334 | params \
335 | ip=${ip} \
336 | flush_routes="true" \
337 | op monitor interval=60s \
338 | meta resource-stickiness="0"
339 |
340 | pcs -f ${cibfile} constraint location vip${ipcount} rule resource-discovery=exclusive score=0 role eq storage
341 |
342 | stack_resources_on_vip ${cibfile} ${ipcount}
343 | }
344 |
345 | ### Setup functions
346 |
347 | setup_cluster()
348 | {
349 | local unclean=""
350 |
351 | storlog "INFO" "Setting up cluster ${HA_NAME} on the following servers: ${servers}"
352 |
353 | pcs cluster auth ${HA_SERVERS} -u hacluster -p ${HA_PASSWORD} --force
354 | pcs cluster setup --force --name ${HA_NAME} ${HA_SERVERS} || storlog "ERR" "Failed to setup cluster ${HA_NAME}"
355 | pcs cluster start --all || storlog "ERR" "Failed to start cluster ${HA_NAME}"
356 |
357 | sleep 3
358 | unclean=$(pcs status | grep -u "UNCLEAN")
359 | while [[ "${unclean}X" = "UNCLEANX" ]]; do
360 | sleep 1
361 | unclean=$(pcs status | grep -u "UNCLEAN")
362 | done
363 | sleep 1
364 |
365 | local tmp_ifs=${IFS}
366 | IFS=$' '
367 | for server in ${STORAGE_SERVERS:-$HA_SERVERS} ; do
368 | pcs property set --node $server role=storage || \
369 | storlog "WARN" "Failed: pcs property set --node $server role=storage"
370 | done
371 | IFS=${tmp_ifs}
372 |
373 | if [ ${HA_NUM_SERVERS} -lt 3 ]; then
374 | pcs property set no-quorum-policy=ignore || \
375 | storlog "WARN" "Failed: pcs property set no-quorum-policy=ignore"
376 | fi
377 | pcs property set stonith-enabled=false || storlog "WARN" "Failed: pcs property set stonith-enabled=false"
378 | }
379 |
380 | setup_create_resources_shared_glusterfs()
381 | {
382 | local cibfile="$1"
383 |
384 | mkdir -p "${HA_MNT_DIR}/${HA_NFS_MNT_DIR}"
385 | pcs -f ${cibfile} resource create ganesha_state ocf:heartbeat:Filesystem \
386 | params \
387 | device="localhost:/${HA_NFS_VOL}" \
388 | directory="${HA_MNT_DIR}/${HA_NFS_MNT_DIR}" \
389 | fstype="glusterfs" \
390 | options="_netdev,defaults,direct-io-mode=enable,transport=tcp,xlator-option=*client*.ping-timeout=10" \
391 | --clone ganesha_state-clone ganesha_state meta interleave="true" clone-max="${STORAGE_NUM_SERVERS}"
392 |
393 | pcs -f ${cibfile} constraint location ganesha_state-clone rule resource-discovery=exclusive score=0 role eq storage
394 |
395 | pcs cluster cib-push ${cibfile} || storlog "ERR" "Failed to create filesystem resources."
396 | }
397 |
398 | # nothing needed for now
399 | setup_create_resources_shared_rados_kv()
400 | {
401 | :
402 | }
403 |
404 | setup_create_resources_nfs()
405 | {
406 | local cibfile="$1"
407 |
408 | # Shared volumes
409 | if [[ ${HA_NFS_TYPE} == "glusterfs" ]]; then
410 | setup_create_resources_shared_glusterfs ${cibfile}
411 | elif [[ ${HA_NFS_TYPE} == "rados_kv" ]]; then
412 | setup_create_resources_shared_rados_kv ${cibfile}
413 | fi
414 |
415 | # Ganesha
416 | pcs -f ${cibfile} resource create nfs-ganesha ocf:heartbeat:ganesha \
417 | params \
418 | config="${HA_NFS_CONF}" \
419 | state_fs="${HA_NFS_TYPE}" \
420 | state_mnt="${HA_MNT_DIR}/${HA_NFS_MNT_DIR}" \
421 | --clone nfs-ganesha-clone ganesha meta interleave="true" \
422 | globally-unique="false" \
423 | notify="true"
424 |
425 | if [[ ${HA_NFS_TYPE} == "glusterfs" ]]; then
426 | # Ganesha: We need our shared state FS
427 | pcs -f ${cibfile} constraint colocation add nfs-ganesha-clone with ganesha_state-clone INFINITY
428 | pcs -f ${cibfile} constraint order ganesha_state-clone then nfs-ganesha-clone INFINITY
429 | fi
430 |
431 | pcs cluster cib-push ${cibfile} || storlog "ERR" "Failed to create nfs service resources."
432 | }
433 |
434 | setup_create_resources_smb()
435 | {
436 | local cibfile="$1"
437 |
438 | # CTDB
439 | pcs -f ${cibfile} resource create ctdb ocf:heartbeat:CTDB \
440 | params \
441 | ctdb_socket="/var/run/ctdb/ctdbd.socket" \
442 | ctdb_manages_winbind="no" \
443 | ctdb_manages_samba="no" \
444 | ctdb_logfile="/var/log/log.ctdb" \
445 | op monitor interval="10" timeout="30" \
446 | op monitor interval="11" timeout="30" role="Master" \
447 | op start interval="0" timeout="90" \
448 | op stop interval="0" timeout="100" \
449 | op promote interval="0" timeout="30" \
450 | op demote interval="0" timeout="30" \
451 | --master meta interleave="true" globally-unique="false" notify="true" clone-max="${STORAGE_NUM_SERVERS}"
452 |
453 | # Samba
454 | pcs -f ${cibfile} resource create nmb systemd:nmb \
455 | op start timeout="60" interval="0" \
456 | op stop timeout="60" interval="0" \
457 | op monitor interval="60" timeout="60"
458 | pcs -f ${cibfile} resource create smb systemd:smb \
459 | op start timeout="60" interval="0" \
460 | op stop timeout="60" interval="0" \
461 | op monitor interval="60" timeout="60"
462 | pcs -f ${cibfile} resource group add samba-group nmb smb
463 | pcs -f ${cibfile} resource clone samba-group meta interleave="true" clone-max="${STORAGE_NUM_SERVERS}"
464 |
465 | # Samba: We need CTDB
466 | pcs -f ${cibfile} constraint colocation add samba-group-clone with ctdb-master INFINITY
467 | pcs -f ${cibfile} constraint order ctdb-master then samba-group-clone INFINITY
468 |
469 | pcs cluster cib-push ${cibfile} || storlog "ERR" "Failed to create smb service resources."
470 | }
471 |
472 | setup_create_resources_vip()
473 | {
474 | local cibfile="$1"
475 |
476 | # Virtual IPs
477 | local ipcount=0
478 | for ip in ${HA_VIPS}; do
479 | ((ipcount++))
480 | create_virt_ip ${cibfile} ${ipcount} ${ip}
481 | done
482 |
483 | if [[ ${DETERMINISTIC_FAILOVER} == true ]]; then
484 | for ((i=1;i<=${ipcount};i++)); do
485 | create_virt_ip_constraints ${cibfile} ${i}
486 | done
487 | fi
488 |
489 | pcs cluster cib-push ${cibfile} || storlog "ERR" "Failed to create virtual IP resources."
490 | }
491 |
492 | setup_create_resources()
493 | {
494 | local cibfile=$(mktemp --tmpdir=$HA_CONF_secdir)
495 |
496 | pcs cluster cib ${cibfile}
497 |
498 | for svc in ${HA_SERVICES}; do
499 | if [[ "${svc}" == "nfs" ]]; then
500 | setup_create_resources_nfs ${cibfile}
501 | elif [[ "${svc}" == "smb" ]]; then
502 | setup_create_resources_smb ${cibfile}
503 | fi
504 | done
505 | setup_create_resources_vip ${cibfile}
506 |
507 | rm -f ${cibfile}
508 | }
509 |
510 | setup_state_volume_glusterfs()
511 | {
512 | local mnt=$(mktemp -d --tmpdir=$HA_CONF_secdir)
513 | local dname=""
514 | local dirname=""
515 | local staterefs="${mnt}/nfs-ganesha/.noderefs"
516 |
517 | mount -t glusterfs ${HA_SERVER}:/${HA_NFS_VOL} ${mnt}
518 | ensure_dir ${staterefs}
519 |
520 | dname=${_hostname#*.}
521 |
522 | for srv in ${STORAGE_SERVERS:-HA_SERVERS}; do
523 |
524 | if [[ "${srv}" == *${dname} ]]; then
525 | dirname=${srv}
526 | else
527 | dirname=${srv}.${dname}
528 | fi
529 |
530 | ensure_dir ${mnt}/nfs-ganesha/${dirname}/ganesha/v4recov
531 | ensure_dir ${mnt}/nfs-ganesha/${dirname}/ganesha/v4old
532 | ensure_dir ${mnt}/nfs-ganesha/${dirname}/statd/sm
533 | ensure_dir ${mnt}/nfs-ganesha/${dirname}/statd/sm.bak
534 | ensure_file ${mnt}/nfs-ganesha/${dirname}/state
535 | ensure_file ${mnt}/nfs-ganesha/${dirname}/statd/state
536 | ensure_ln ${HA_MNT_DIR}/${HA_NFS_MNT_DIR}/nfs-ganesha/${dirname} ${staterefs}/${dirname}
537 |
538 | for server in ${HA_SERVERS} ; do
539 | if [[ "${server}" == *${dname} ]]; then
540 | server=${server}
541 | else
542 | server=${server}.${dname}
543 | fi
544 | if [ ${server} != ${dirname} ]; then
545 | ensure_ln ${HA_MNT_DIR}/${HA_NFS_MNT_DIR}/nfs-ganesha/.noderefs/${server}/ganesha ${mnt}/nfs-ganesha/${dirname}/ganesha/${server}
546 | ensure_ln ${HA_MNT_DIR}/${HA_NFS_MNT_DIR}/nfs-ganesha/.noderefs/${server}/statd ${mnt}/nfs-ganesha/${dirname}/statd/${server}
547 | fi
548 | done
549 | shift
550 | done
551 |
552 | umount ${mnt}
553 | rmdir ${mnt}
554 | }
555 |
556 | # nothing needed for now
557 | setup_state_volume_rados_kv()
558 | {
559 | :
560 | }
561 |
562 | # TODO: Move to RA
563 | setup_state_volume()
564 | {
565 | if [[ ${HA_NFS_TYPE} == "glusterfs" ]]; then
566 | setup_state_volume_glusterfs
567 | elif [[ ${HA_NFS_TYPE} == "rados_kv" ]]; then
568 | setup_state_volume_rados_kv
569 | fi
570 | }
571 |
572 | ### Teardown functions
573 |
574 | teardown_cluster()
575 | {
576 | for server in ${HA_SERVERS} ; do
577 | if [[ ${HA_CLUSTER_NODES} != *${server}* ]]; then
578 | storlog "INFO" "${server} is not in config, removing"
579 | pcs cluster stop ${server} || storlog "WARN" "Failed: pcs cluster stop ${server}"
580 | pcs cluster node remove ${server} || storlog "WARN" "Failed: pcs cluster node remove ${server}"
581 | fi
582 | done
583 |
584 | # BZ 1193433 - pcs doesn't reload cluster.conf after modification
585 | # after teardown completes, a subsequent setup will appear to have
586 | # 'remembered' the deleted node. You can work around this by
587 | # issuing another
588 | # pcs cluster node remove $node
589 | # or
590 | # crm_node -f -R $server
591 | # or
592 | # cibadmin --delete --xml-text '
593 |
594 | pcs cluster stop --all || storlog "WARN" "Failed to stop cluster ${name}"
595 |
596 | pcs cluster destroy || storlog "ERR" "Failed to destroy cluster ${name}"
597 | }
598 |
599 | ### Cleanup functions
600 |
601 | cleanup_config()
602 | {
603 | local _cmd='eval "rm -rf ${SYS_CONFDIR}/cluster/cluster.conf*; \
604 | rm -rf /var/lib/pacemaker/cib/*; \
605 | rm -rf ${HA_NFS_EXPDIR}/*.conf; \
606 | sed -r -i -e '"'"'/^%include[[:space:]]+\".+\\.conf\"$/d'"'"' ${HA_NFS_CONF}"'
607 | sshdo "${1}" "${_cmd}"
608 | }
609 |
610 | ### AddNode functions
611 |
612 | addnode()
613 | {
614 | local node=${1}; shift
615 | local vip=${1}; shift
616 | local role=${1}; shift
617 |
618 | storlog "INFO" "Adding node ${node} to ${HA_NAME}"
619 |
620 | HA_CLUSTER_NODES="$HA_CLUSTER_NODES,$node"
621 | if [ "${role}" == *storage* ]; then
622 | STORAGE_NODES="$STORAGE_NODES,$node"
623 | fi
624 | if [ "x${vip}" != "x" ]; then
625 | HA_VIPS="${HA_VIPS} ${vip}"
626 | if [[ ${DETERMINISTIC_FAILOVER} == true && "x${HA_VIP_NODES}" != "x" ]]; then
627 | HA_VIP_NODES="${HA_VIP_NODES},${node}"
628 | fi
629 | fi
630 | determine_servers "add"
631 |
632 | pcs cluster node add ${node} || storlog "WARN" "Failed: pcs cluster node add ${node}"
633 | pcs cluster start ${node} || storlog "ERR" "Failed: pcs cluster start ${node}"
634 |
635 | if [ "${role}" == *storage* ]; then
636 | pcs property set --node ${node} role=storage || \
637 | storlog "WARN" "Failed: pcs property set --node ${node} role=storage"
638 | fi
639 |
640 | if [ "x${vip}" != "x" ]; then
641 | local cibfile=$(mktemp --tmpdir=$HA_CONF_secdir)
642 | pcs cluster cib ${cibfile}
643 |
644 | local ipcount=$(wc -w <<< "${HA_VIPS}")
645 | create_virt_ip ${cibfile} ${ipcount} ${vip}
646 | if [[ ${DETERMINISTIC_FAILOVER} == true ]]; then
647 | clear_virt_ip_constraints ${cibfile}
648 | for ((i=1;i<=${ipcount};i++)); do
649 | create_virt_ip_constraints ${cibfile} ${i}
650 | done
651 | fi
652 | pcs cluster cib-push ${cibfile} || \
653 | storlog "ERR" "Failed to add virtual IP resources."
654 | fi
655 |
656 | sed -i "s/\\(HA_CLUSTER_NODES=\\).*/\\1\"${HA_CLUSTER_NODES}\"/" ${HA_CONF}
657 | if [ "${role}" == *storage* ]; then
658 | if grep -q STORAGE_NODES ${HA_CONF}; then
659 | sed -i "s/\\(STORAGE_NODES=\\).*/\\1\"${STORAGE_NODES}\"/" ${HA_CONF}
660 | else
661 | echo "STORAGE_NODES=\"${STORAGE_NODES}\"" >> ${HA_CONF}
662 | fi
663 | fi
664 | if [ "x${vip}" != "x" ]; then
665 | sed -i "s/\\(HA_VIPS=\\).*/\\1\"${HA_VIPS}\"/" ${HA_CONF}
666 | if [[ ${DETERMINISTIC_FAILOVER} == true && "x${HA_VIP_NODES}" != "x" ]]; then
667 | sed -i "s/\\(HA_VIP_NODES=\\).*/\\1\"${HA_VIP_NODES}\"/" ${HA_CONF}
668 | fi
669 | fi
670 | }
671 |
672 | ### DeleteNode functions
673 |
674 | deletenode()
675 | {
676 | local node=${1}; shift
677 | local vip=${1}; shift
678 |
679 | storlog "INFO" "Deleting node ${node} from ${HA_NAME}"
680 |
681 | HA_CLUSTER_NODES="${HA_CLUSTER_NODES//$node}"
682 | if [[ ${DETERMINISTIC_FAILOVER} == true && "x${HA_VIP_NODES}" != "x" ]]; then
683 | HA_VIP_NODES="${HA_VIP_NODES//$node}"
684 | fi
685 | if [[ "x${STORAGE_NODES}" != "x" ]]; then
686 | STORAGE_NODES="${STORAGE_NODES//$node}"
687 | fi
688 | determine_servers "delete"
689 |
690 | pcs cluster node remove ${node} || storlog "ERR" "Failed: pcs cluster node remove ${node}"
691 |
692 | if [[ ${DETERMINISTIC_FAILOVER} == true ]]; then
693 | local cibfile=$(mktemp --tmpdir=$HA_CONF_secdir)
694 | pcs cluster cib ${cibfile}
695 |
696 | local ipcount=$(wc -w <<< "${HA_VIPS}")
697 | clear_virt_ip_constraints ${cibfile}
698 | # TODO: delete_virt_ip ${cibfile} ${ipcount} ${vip}
699 | for ((i=1;i<=${ipcount};i++)); do
700 | create_virt_ip_constraints ${cibfile} ${i}
701 | done
702 | pcs cluster cib-push ${cibfile} || storlog "ERR" "Failed to refresh deterministic failover."
703 | fi
704 |
705 | sed -i "s/\\(HA_CLUSTER_NODES=\\).*/\\1\"${HA_CLUSTER_NODES}\"/" ${HA_CONF}
706 | if [[ ${DETERMINISTIC_FAILOVER} == true && "x${HA_VIP_NODES}" != "x" ]]; then
707 | sed -i "s/\\(HA_VIP_NODES=\\).*/\\1\"${HA_VIP_NODES}\"/" ${HA_CONF}
708 | fi
709 | if grep -q STORAGE_NODES ${HA_CONF}; then
710 | sed -i "s/\\(STORAGE_NODES=\\).*/\\1\"${STORAGE_NODES}\"/" ${HA_CONF}
711 | fi
712 | }
713 |
714 | ### Refresh functions
715 |
716 | # TODO: Move to RA
717 | refresh_nfs_config()
718 | {
719 | local VOL=${1}; shift
720 | local HA_CONFDIR=${1}; shift
721 | local tganesha_vol_conf=$(mktemp)
722 |
723 | cp ${HA_NFS_EXPDIR}/export.$VOL.conf ${tganesha_vol_conf}
724 | local get_id="cat $HA_NFS_EXPDIR/export.$VOL.conf | grep Export_Id | cut -d \" \" -f8"
725 |
726 | if [ -e ${SECRET_PEM} ]; then
727 | while [[ ${1} ]]; do
728 | _id=$(sshdo ${current_host} "${get_id}")
729 | _out=$(sshdo ${current_host} "dbus-send --print-reply \
730 | --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
731 | org.ganesha.nfsd.exportmgr.RemoveExport uint16:$_id 2>&1")
732 | _ret=$?
733 | storlog "DEBUG" "${_out}"
734 | if [ ${_ret} -ne 0 ]; then
735 | storlog "ERR" "RemoveExport failed on ${current_host}."
736 | fi
737 |
738 | sleep 1
739 | sed -i s/Export_Id.*/"Export_Id= $_id ;"/ ${tganesha_vol_conf}
740 | if [ ${_hostname%%.*} != ${1%%.*} ]; then
741 | scpdo ${tganesha_vol_conf} \
742 | ${current_host}:${HA_NFS_EXPDIR}/export.$VOL.conf
743 | fi
744 |
745 | _out=$(sshdo ${current_host} "dbus-send --system \
746 | --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
747 | org.ganesha.nfsd.exportmgr.AddExport string:$HA_NFS_EXPDIR/export.$VOL.conf \
748 | string:\"EXPORT(Path=/$VOL)\"")
749 | _ret=$?
750 | storlog "DEBUG" "${_out}"
751 | if [ ${_ret} -ne 0 ]; then
752 | storlog "ERR" "AddExport failed on ${current_host}."
753 | fi
754 | storlog "DEBUG" "refresh-config completed for ${current_host}."
755 | shift
756 | done
757 | else
758 | storlog "ERR" "refresh-config failed: Passwordless ssh is not enabled."
759 | fi
760 | rm -f ${tganesha_vol_conf}
761 | }
762 |
763 | ### Mainline
764 |
765 | cmd=${1}; shift
766 | if [[ ${cmd} == *help || ${cmd} == "-h" ]]; then
767 | usage
768 | exit 0
769 | elif [[ ${cmd} == *status ]]; then
770 | exec pcs status
771 | exit 0
772 | fi
773 |
774 | HA_CONF_secdir=$(mktemp -d --tmpdir "$(basename $0).XXXXXXXXXX")
775 | HA_CONF_sec="$HA_CONF_secdir/sec.conf"
776 |
777 | # Filter all config files into secure format
778 | egrep '^#|^[^ ]*=[^;&]*' "$HA_CONF" > "$HA_CONF_sec"
779 | for conffile in `ls $HA_CONF_INCDIR/*.conf 2>/dev/null`; do
780 | egrep '^#|^[^ ]*=[^;&]*' "$conffile" >> "$HA_CONF_sec"
781 | done
782 |
783 | # Source/load the config
784 | . $HA_CONF_sec
785 |
786 | parsebool "DETERMINISTIC_FAILOVER"
787 |
788 | if [ -z "$HA_NFS_CONF" ]; then
789 | # Try loading the NFS-Ganesha config from various distro-specific locations
790 | if [ -f /etc/sysconfig/ganesha ]; then
791 | GANSYSCONF="/etc/sysconfig/ganesha"
792 | elif [ -f /etc/conf.d/ganesha ]; then
793 | GANSYSCONF="/etc/conf.d/ganesha"
794 | elif [ -f /etc/default/ganesha ]; then
795 | GANSYSCONF="/etc/default/ganesha"
796 | fi
797 |
798 | if [ -z "$GANSYSCONF" ]; then
799 | GANSYSOPTS=$(grep -s "OPTIONS" "$GANSYSCONF")
800 | if [ -n "$GANSYSOPTS" ] && grep -qs "-f" < "${GANSYSOPTS}"; then
801 | HA_NFS_CONF=$(sed -ne 's/^.*-f[= ]*([^\s]*)*/\1/p')
802 | fi
803 | fi
804 | fi
805 | HA_NFS_CONF="${HA_NFS_CONF:-/etc/ganesha/ganesha.conf}"
806 | HA_NFS_EXPDIR="${HA_NFS_EXPDIR:-$(dirname ${HA_NFS_CONF})/exports}"
807 |
808 | case "${cmd}" in
809 | setup | --setup)
810 | storlog "INFO" "Setting up ${HA_NAME}"
811 | check_cluster_exists ${HA_NAME}
812 | determine_servers "setup"
813 |
814 | if [ ${HA_NUM_SERVERS} -gt 1 ]; then
815 | setup_state_volume
816 | setup_cluster
817 | setup_create_resources
818 | copy_config ${HA_SERVERS}
819 | else
820 | storlog "ERR" "Insufficient servers for HA, aborting"
821 | fi
822 | ;;
823 | teardown | --teardown)
824 | storlog "INFO" "Tearing down ${HA_NAME}"
825 | determine_servers "teardown"
826 | teardown_cluster
827 | ;;
828 | cleanup | --cleanup)
829 | cleanup_config $_host
830 | ;;
831 | cleanup-all | --cleanup-all)
832 | for server in ${HA_SERVERS}; do
833 | cleanup_config $server
834 | done
835 | ;;
836 | add | --add)
837 | node=${1}; shift
838 | vip=${1}; shift
839 | role=${1}; shift
840 | copy_nfs_config ${node}
841 | addnode ${node} ${vip} ${role}
842 | copy_config ${HA_SERVERS}
843 | ;;
844 | delete | --delete | remove | --remove)
845 | node=${1}; shift
846 | deletenode ${node}
847 | copy_config ${HA_SERVERS}
848 | ;;
849 | *)
850 | storlog "ERR" "Unknown argument: ${cmd}"
851 | ;;
852 | esac
853 |
854 | rm -rf $HA_CONF_secdir
855 |
--------------------------------------------------------------------------------
/src/storhaug.conf.sample:
--------------------------------------------------------------------------------
1 | # Name of the HA cluster created.
2 | HA_NAME="storhaug"
3 |
4 | # Password of the hacluster user
5 | HA_PASSWORD="hacluster"
6 |
7 | # The server on which cluster-wide configuration is managed.
8 | # IP/Hostname
9 | HA_SERVER="server1"
10 |
11 | # The set of nodes that forms the HA cluster.
12 | # Comma-deliminated IP/Hostname list
13 | HA_CLUSTER_NODES="server1,server2,server3,..."
14 |
15 | # [OPTIONAL] A subset of HA nodes that will serve as storage servers.
16 | # Comma-deliminated IP/Hostname list
17 | STORAGE_NODES="server2,server3,..."
18 |
19 | # [OPTIONAL] Mount point for shared volumes used by HA resources.
20 | HA_MNT_DIR="/var/run/gluster"
21 |
22 | # Virtual IPs of each of the nodes specified above.
23 | # Whitespace-deliminated IP address list
24 | HA_VIPS="10.x.x.x 10.x.x.x"
25 |
26 | # Deterministic Failover: If true, configures virtual IP failover to occur in
27 | # a deterministic order. The optional VIP_NODES setting specifies which node
28 | # will serve as the primary/home node for the corresponding virtual IP address.
29 | # NOTE: These nodes should also be configured as storage nodes, either
30 | # implicitly in HA_CLUSTER_NODES or explicitly in STORAGE_NODES
31 | DETERMINISTIC_FAILOVER=false
32 | HA_VIP_NODES="server2,server3"
33 |
34 | # [NOT IMPLEMENTED YET] Managed access methods
35 | # Whitespace-delimited list. Valid values:
36 | # nfs
37 | # smb
38 | HA_SERVICES="nfs smb"
39 |
--------------------------------------------------------------------------------
/vagrant-ansible/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['VAGRANT_DEFAULT_PROVIDER'] = 'libvirt'
5 |
6 | VAGRANTFILE_API_VERSION = "2"
7 | EL_VER = "7"
8 |
9 | require 'yaml'
10 | require 'io/console'
11 |
12 | projectdir = File.expand_path File.dirname(__FILE__)
13 |
14 | #==============================================================================
15 | #
16 | # Load settings if present, otherwise output sane defaults and exit
17 | #
18 |
19 | settings = {}
20 |
21 | f = File.join(projectdir, 'vagrant.yaml')
22 |
23 | if File.exists?(f)
24 | begin
25 | settings = YAML::load_file f
26 | if settings == false then raise end
27 | rescue
28 | retry
29 | end
30 | else
31 | f2 = File.join(projectdir, 'defaults.yaml')
32 | if File.exists?(f2)
33 | settings = YAML::load_file f2
34 | File.open(f, 'w') do |file|
35 | file.write settings.to_yaml
36 | end
37 | end
38 | puts "Wrote initial (default) config: [ #{f} ]"
39 | puts "Please verify settings and run your command again."
40 | puts "See defaults.yaml for documentation."
41 | exit
42 | end
43 |
44 | vms = settings[:vms]
45 | vms_common = settings[:vms_common]
46 | groups = settings[:groups]
47 | group_vars = settings[:group_vars]
48 | samba = settings[:samba]
49 | ganesha = settings[:ganesha]
50 | ctdb = settings[:ctdb]
51 | ad = settings[:ad]
52 | gluster = settings[:gluster]
53 | ha = settings[:ha]
54 |
55 | #==============================================================================
56 | #
57 | # Derive virtual disk device names and partition numbers
58 | #
59 |
60 | driveletters = ('b'..'z').to_a
61 |
62 | vms_common[:disks].each_with_index do |disk,disk_num|
63 | disk[:num] = disk_num
64 | disk[:dev_names] = {
65 | :libvirt => "vd#{driveletters[disk[:num]]}",
66 | }
67 | disk[:parts].each_with_index do |part,part_num|
68 | part[:num] = part_num + 1
69 | end
70 | end
71 |
72 | #==============================================================================
73 | #
74 | # Define required software for groups
75 | #
76 |
77 | group_defs = {
78 | :ha_servers => {
79 | :install_pkgs => " storhaug",
80 | :services => [],
81 | },
82 | :smb_servers => {
83 | :install_pkgs => " storhaug-smb",
84 | :services => [],
85 | },
86 | :gluster_servers => {
87 | :install_pkgs => " glusterfs-server glusterfs-client",
88 | :services => [ "glusterd" ],
89 | },
90 | :nfs_servers => {
91 | :install_pkgs => " storhaug-nfs",
92 | :services => [ "nfs-ganesha" ],
93 | },
94 | :clients => {
95 | :install_pkgs => " cifs-utils glusterfs-fuse",
96 | },
97 | }
98 | if gluster[:setup_gluster]
99 | group_defs[:smb_servers][:install_pkgs] << " samba-vfs-glusterfs"
100 | group_defs[:nfs_servers][:install_pkgs] << " nfs-ganesha-gluster glusterfs-ganesha"
101 | end
102 | if not ctdb[:setup_ctdb]
103 | group_defs[:smb_servers][:services].push "winbind"
104 | group_defs[:smb_servers][:services].push "smb"
105 | group_defs[:smb_servers][:services].push "nmb"
106 | else
107 | group_defs[:smb_servers][:install_pkgs] << " ctdb"
108 | group_defs[:smb_servers][:services].push "ctdb"
109 | end
110 |
111 | #==============================================================================
112 | #
113 | # active_vms - Keep track of currently running VMs, since vagrant won't tell
114 | # us directly.
115 | #
116 |
117 | active_vms = []
118 |
119 | f = File.join(projectdir, 'active_vms.yaml')
120 |
121 | if File.exists?(f)
122 | begin
123 | active_vms = YAML::load_file f
124 | if active_vms == false then raise end
125 | rescue
126 | retry
127 | end
128 | end
129 |
130 | if ARGV[0] == "up"
131 | cmd_names = ARGV.drop(1).delete_if { |x| x.start_with?("-") or active_vms.include?(x) }
132 | if cmd_names.length > 0 then
133 | active_vms.push(*cmd_names)
134 | else
135 | vms.each do |x|
136 | if not active_vms.include?(x[:name])
137 | active_vms.push x[:name]
138 | end
139 | end
140 | end
141 | elsif ARGV[0] == "destroy" or ARGV[0] == "halt"
142 | cmd_names = ARGV.drop(1).delete_if { |x| x.start_with?("-") or not active_vms.include?(x) }
143 | if cmd_names.length > 0 then
144 | active_vms.delete_if { |x| cmd_names.include?(x) }
145 | else
146 | active_vms = []
147 | end
148 | end
149 |
150 | File.open(f, 'w+') do |file|
151 | file.write active_vms.to_yaml
152 | end
153 |
154 | if ENV['VAGRANT_LOG'] == 'debug'
155 | p "active_vms: #{active_vms}"
156 | end
157 |
158 | #==============================================================================
159 | #
160 | # Build group listings
161 | #
162 |
163 | groups.each do |name,group|
164 | if group.include? "all"
165 | groups[name] = active_vms
166 | else
167 | group.each_with_index do |node,i|
168 | case node
169 | when "first"
170 | groups[name][i] = active_vms[0]
171 | when "last"
172 | groups[name][i] = active_vms[-1]
173 | when "not first"
174 | groups[name] = active_vms.count > 1 ? active_vms[1..-1] : [ active_vms[0] ]
175 | when "not last"
176 | groups[name] = active_vms.count > 1 ? active_vms[0..-2] : [ active_vms[0] ]
177 | when node.is_a?(Integer)
178 | groups[name][i] = active_vms[node]
179 | else
180 | groups[name][i] = node
181 | end
182 | end
183 | end
184 | end
185 | if ad[:setup_ad] and not groups.keys.include? "ad_server"
186 | groups[:ad_server] = group[:smb_servers][0]
187 | end
188 |
189 | if ENV['VAGRANT_LOG'] == 'debug'
190 | p "groups: #{groups}"
191 | end
192 |
193 | #==============================================================================
194 | #
195 | # Collect packages to install and services to run
196 | #
197 |
198 | install_pkgs = {}
199 | services = {}
200 | if active_vms.length > 0
201 | active_vms.each do |name|
202 | install_pkgs[name] = "yum yum-utils python python-dnf python-simplejson libselinux-python xfsprogs gnupg "
203 | if vms_common[:install_pkgs]
204 | install_pkgs[name] << " " + vms_common[:install_pkgs]
205 | end
206 |
207 | services[name] = []
208 | if vms_common[:services]
209 | services[name].push vms_common[:services]
210 | end
211 | end
212 | groups.each do |name,group|
213 | group.each do |node|
214 | if group_defs and group_defs[name]
215 | install_pkgs[node] << group_defs[name][:install_pkgs] if group_defs[name][:install_pkgs]
216 | services[node].push group_defs[name][:services] if group_defs[name][:services]
217 | end
218 | if group_vars and group_vars[name]
219 | install_pkgs[node] << " " + group_vars[name][:install_pkgs] if group_vars[name][:install_pkgs]
220 | services[node].push group_vars[name][:services] if group_vars[name][:services]
221 | end
222 | end
223 | end
224 | vms.each do |vm|
225 | if vm['install_pkgs']
226 | install_pkgs[name] << " " + vm['install_pkgs']
227 | end
228 | if vm['services']
229 | services[name].push vm[:services]
230 | end
231 | end
232 | end
233 |
234 | #==============================================================================
235 | #
236 | # Vagrant config
237 | #
238 |
239 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
240 | config.ssh.insert_key = false
241 |
242 | config.vm.provider :libvirt do |libvirt|
243 | libvirt.storage_pool_name = vms_common[:storage_pool] ? vms_common[:storage_pool] : 'default'
244 | end
245 |
246 | vms.each do |machine|
247 | config.vm.define machine[:name] do |node|
248 | node.vm.box = vms_common[:box]
249 | node.vm.provider :libvirt do |domain|
250 | domain.memory = vms_common[:memory]
251 | domain.cpus = vms_common[:cpus]
252 | end
253 |
254 | if vms_common[:disks]
255 | vms_common[:disks].each do |disk|
256 | node.vm.provider :libvirt do |lv|
257 | lv.storage :file, :size => "#{disk[:size]}G", :device => "#{disk[:dev_names][:libvirt]}"
258 | disk[:dev] = disk[:dev_names][:libvirt]
259 | end
260 | end
261 | end
262 |
263 | if vms_common[:networks]
264 | vms_common[:networks].each_with_index do |net,i|
265 | netid = net[:netid]
266 | netopts = net.except(:netid)
267 | if machine[:networks] and i < machine[:networks].length
268 | netopts.merge!(machine[:networks][i])
269 | end
270 | node.vm.network netid, netopts
271 | end
272 | end
273 |
274 | if vms_common[:sync_folders]
275 | vms_common[:sync_folders].each do |sync|
276 | src = sync[:src]
277 | dest = sync[:dest]
278 | syncopts = sync.except(:src, :dest)
279 | node.vm.synced_folder src, dest, syncopts
280 | end
281 | end
282 | if machine[:sync_folders]
283 | machine[:sync_folders].each do |sync|
284 | src = sync[:src]
285 | dest = sync[:dest]
286 | syncopts = sync.except(:src, :dest)
287 | node.vm.synced_folder src, dest, syncopts
288 | end
289 | end
290 |
291 | end
292 | end
293 |
294 | if active_vms.length > 0 then
295 | config.vm.define active_vms[0], primary: true do |node|
296 | if ad[:setup_ad]
297 | print "AD Administrator password: "
298 | ad_passwd = STDIN.noecho(&:gets)
299 | end
300 |
301 | system 'mkdir', '-p', 'playbooks/host_vars/'
302 | active_vms.each do |node|
303 | host_vars = {}
304 | host_vars['install_pkgs'] = install_pkgs[node]
305 | host_vars['services'] = services[node]
306 | File.open('playbooks/host_vars/' + node.to_s, 'w+') do |file|
307 | file.write host_vars.to_yaml
308 | end
309 | end
310 |
311 | playbooks = []
312 | if ENV['RUN']
313 | playbooks.push(ENV['RUN'])
314 | else
315 | playbooks.push("playbooks/raw-#{vms_common[:os]}.yml")
316 | custom_pre_provision = ENV['CUSTOM_PRE'] ? ENV['CUSTOM_PRE'] : "playbooks/custom_pre.yml"
317 | if File.exists?(custom_pre_provision)
318 | playbooks.push(custom_pre_provision)
319 | end
320 | playbooks.push("playbooks/storhaug.yml")
321 | custom_post_provision = ENV['CUSTOM_POST'] ? ENV['CUSTOM_POST'] : "playbooks/custom_post.yml"
322 | if File.exists?(custom_post_provision)
323 | playbooks.push(custom_post_provision)
324 | end
325 | end
326 | playbooks.each do |playbook|
327 | node.vm.provision "ansible" do |ansible|
328 | if ENV['ANSIBLE_DEBUG']
329 | ansible.verbose = ENV['ANSIBLE_DEBUG']
330 | end
331 | ansible.playbook = playbook
332 | ansible.groups = {}
333 | groups.each do |name,group|
334 | ansible.groups[name.to_s] = group
335 | end
336 | ansible.extra_vars = {
337 | "el_ver" => EL_VER,
338 | "extra_disks" => vms_common[:disks],
339 | "vips" => ha[:virtual_ips],
340 | "ha_name" => ha[:ha_name],
341 | "samba" => samba,
342 | "ganesha" => ganesha,
343 | "ctdb" => ctdb,
344 | "ad" => ad,
345 | "gluster" => gluster,
346 | }
347 | if ad[:setup_ad]
348 | ansible.extra_vars['ad_passwd'] = ad_passwd
349 | end
350 | if vms_common[:extra_vars]
351 | ansible.extra_vars.merge! vms_common[:extra_vars]
352 | end
353 | if ENV['EXTRA_VARS']
354 | ansible.extra_vars.merge! eval ENV['EXTRA_VARS']
355 | end
356 | ansible.extra_vars['vagrant_home'] = ENV['VAGRANT_HOME'] ? ENV['VAGRANT_HOME'] : "~/.vagrant.d"
357 | ansible.limit = "all"
358 | end
359 | end
360 | end
361 | end
362 | end
363 |
--------------------------------------------------------------------------------
/vagrant-ansible/defaults.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | :vms:
3 | - :name: node1
4 | :networks:
5 | - :ip: 192.168.125.11
6 | - :ip: 192.168.126.11
7 | - :name: node2
8 | :networks:
9 | - :ip: 192.168.125.12
10 | - :ip: 192.168.126.12
11 | - :name: node3
12 | :networks:
13 | - :ip: 192.168.125.13
14 | - :ip: 192.168.126.13
15 | - :name: node4
16 | :networks:
17 | - :ip: 192.168.125.14
18 | - :ip: 192.168.126.14
19 | :vms_common:
20 | :box: centos/7
21 | :os: el7
22 | :memory: 2048
23 | :cpus: 2
24 | :networks:
25 | - :netid: :private_network
26 | - :netid: :private_network
27 | :disks:
28 | - :size: 2
29 | :parts:
30 | - :fs: xfs
31 | :mount: "/data"
32 | :name: data
33 | :size: 100%
34 | :sync_folders:
35 | - :src: "../src"
36 | :dest: "/shared/src"
37 | - :src: "../scripts"
38 | :dest: "/shared/scripts"
39 | - :src: "../repo"
40 | :dest: "/shared/repo"
41 | :type: rsync
42 | - :src: "playbooks"
43 | :dest: "/shared/playbooks"
44 | :type: rsync
45 | :groups:
46 | :ha_servers:
47 | - all
48 | :smb_servers:
49 | - all
50 | :nfs_servers:
51 | - all
52 | :gluster_servers:
53 | - all
54 | :ha:
55 | :ha_name: storhaug
56 | :virtual_ips:
57 | - 192.168.125.111
58 | - 192.168.125.112
59 | :samba:
60 | :setup_samba: true
61 | :shares:
62 | :share1:
63 | :comment: GlusterFS Share
64 | :path: "/"
65 | :read only: 'No'
66 | :guest ok: 'Yes'
67 | :kernel share modes: 'No'
68 | :vfs objects: glusterfs
69 | :glusterfs:loglevel: '10'
70 | :glusterfs:logfile: "/var/log/glusterfs/glusterfs-share.log"
71 | :glusterfs:volume: share
72 | :ctdb:
73 | :setup_ctdb: true
74 | :ad:
75 | :setup_ad: false
76 | :gluster:
77 | :setup_gluster: true
78 | :bricks_dir: "/data/bricks"
79 | :volumes:
80 | - :name: share
81 | :opts:
82 | :user.smb: disable
83 | :nfs.disable: 'true'
84 | :network.ping-timeout: '10'
85 | - :name: state
86 | :opts:
87 | :user.smb: disable
88 | :nfs.disable: 'true'
89 | :network.ping-timeout: '10'
90 | - :name: export_vol
91 | :opts:
92 | :user.smb: disable
93 | :nfs.disable: 'true'
94 | :network.ping-timeout: '10'
95 | :ganesha:
96 | :setup_ganesha: true
97 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/99-no-dns.conf:
--------------------------------------------------------------------------------
1 | [main]
2 | dns=none
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/CTDB:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # OCF Resource Agent for managing CTDB
4 | #
5 | # Copyright (c) 2009-2010 Novell Inc., Tim Serong
6 | # All Rights Reserved.
7 | #
8 | # This program is free software; you can redistribute it and/or modify
9 | # it under the terms of version 2 of the GNU General Public License as
10 | # published by the Free Software Foundation.
11 | #
12 | # This program is distributed in the hope that it would be useful, but
13 | # WITHOUT ANY WARRANTY; without even the implied warranty of
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 | #
16 | # Further, this software is distributed without any warranty that it is
17 | # free of the rightful claim of any third person regarding infringement
18 | # or the like. Any license provided herein, whether implied or
19 | # otherwise, applies only to this software file. Patent licenses, if
20 | # any, provided herein do not apply to combinations of this program with
21 | # other software, or any other product whatsoever.
22 | #
23 | # You should have received a copy of the GNU General Public License
24 | # along with this program; if not, write the Free Software Foundation,
25 | # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
26 | #
27 | #
28 | # OVERVIEW
29 | #
30 | # When run by itself, CTDB can handle IP failover and includes scripts
31 | # to manage various services (Samba, Winbind, HTTP, etc.). When run as
32 | # a resource in a Pacemaker cluster, this additional functionality
33 | # should not be used; instead one should define separate resources for
34 | # CTDB, Samba, Winbind, IP addresses, etc.
35 | #
36 | # As of 2010-11-17, there is no separate OCF Samba or Winbind RA, so
37 | # it is still possible to configure CTDB so that it manages these
38 | # resources itself. In future, once Samba and Winbind RAs are
39 | # available, this ability will be deprecated and ultimately removed.
40 | #
41 | # This RA intentionally provides no ability to configure CTDB such that
42 | # it manages IP failover, HTTP, NFS, etc.
43 | #
44 | #
45 | # TODO:
46 | # - ctdb_stop doesn't really support multiple independent CTDB instances,
47 | # unless they're running from distinct ctdbd binaries (it uses pkill
48 | # $OCF_RESKEY_ctdbd_binary if "ctdb stop" doesn't work, which it might
49 | # not under heavy load - this will kill all ctdbd instances on the
50 | # system). OTOH, running multiple CTDB instances per node is, well,
51 | # AFAIK, completely crazy. Can't run more than one in a vanilla CTDB
52 | # cluster, with the CTDB init script. So it might be nice to address
53 | # this for complete semantic correctness of the RA, but shouldn't
54 | # actually cause any trouble in real life.
55 | # - As much as possible, get rid of auto config generation
56 | # - Especially smb.conf
57 | # - Verify timeouts are sane
58 | # - Monitor differentiate between error and not running?
59 | # - Do we need to verify globally unique setting?
60 | # - Should set CTDB_NODES to ${HA_RSCTMP}/ctdb (generated based on
61 | # current nodes)
62 | # - Look at enabling set_ctdb_variables() if necessary.
63 | # - Probably possible for sysconfig file to not be restored if
64 | # CTDB dies unexpectedly.
65 | #
66 | #######################################################################
67 | # Initialization:
68 |
69 | : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
70 | . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
71 | CRM_MASTER="${HA_SBIN_DIR}/crm_master -l reboot"
72 |
73 | #######################################################################
74 | # Default parameter values:
75 |
76 | : ${OCF_RESKEY_ctdb_manages_samba:=no}
77 | : ${OCF_RESKEY_ctdb_manages_winbind:=no}
78 | : ${OCF_RESKEY_ctdb_service_smb:=""}
79 | : ${OCF_RESKEY_ctdb_service_nmb:=""}
80 | : ${OCF_RESKEY_ctdb_service_winbind:=""}
81 | : ${OCF_RESKEY_ctdb_samba_skip_share_check:=yes}
82 | : ${OCF_RESKEY_ctdb_monitor_free_memory:=100}
83 | : ${OCF_RESKEY_ctdb_start_as_disabled:=no}
84 | : ${OCF_RESKEY_ctdb_tunables:=""}
85 |
86 | : ${OCF_RESKEY_ctdb_config_dir:=/etc/ctdb}
87 | : ${OCF_RESKEY_ctdb_binary:=/usr/bin/ctdb}
88 | : ${OCF_RESKEY_ctdbd_binary:=/usr/sbin/ctdbd}
89 | : ${OCF_RESKEY_ctdb_socket:=/var/lib/ctdb/ctdb.socket}
90 | : ${OCF_RESKEY_ctdb_dbdir:=/var/lib/ctdb}
91 | : ${OCF_RESKEY_ctdb_logfile:=/var/log/ctdb/log.ctdb}
92 | : ${OCF_RESKEY_ctdb_debuglevel:=2}
93 |
94 | : ${OCF_RESKEY_smb_conf:=/etc/samba/smb.conf}
95 | : ${OCF_RESKEY_smb_passdb_backend:=tdbsam}
96 | : ${OCF_RESKEY_smb_idmap_backend:=tdb2}
97 |
98 | #######################################################################
99 |
100 | meta_data() {
101 | cat <
103 |
104 |
105 | 1.0
106 |
107 |
108 | This resource agent manages CTDB, allowing one to use Clustered Samba in a
109 | Linux-HA/Pacemaker cluster. You need a shared filesystem (e.g. OCFS2) on
110 | which the CTDB lock will be stored. Create /etc/ctdb/nodes containing a list
111 | of private IP addresses of each node in the cluster, then configure this RA
112 | as a clone. To have CTDB manage Samba, set ctdb_manages_samba="yes".
113 | Note that this option will be deprecated in future, in favour of configuring
114 | a separate Samba resource.
115 |
116 | For more information see http://linux-ha.org/wiki/CTDB_(resource_agent)
117 |
118 | CTDB Resource Agent
119 |
120 |
121 |
122 |
123 |
124 | The location of a shared lock file, common across all nodes.
125 | This must be on shared storage, e.g.: /shared-fs/samba/ctdb.lock
126 |
127 | CTDB shared lock file
128 |
129 |
130 |
131 |
132 |
133 | Should CTDB manage starting/stopping the Samba service for you?
134 | This will be deprecated in future, in favor of configuring a
135 | separate Samba resource.
136 |
137 | Should CTDB manage Samba?
138 |
139 |
140 |
141 |
142 |
143 | Should CTDB manage starting/stopping the Winbind service for you?
144 | This will be deprecated in future, in favor of configuring a
145 | separate Winbind resource.
146 |
147 | Should CTDB manage Winbind?
148 |
149 |
150 |
151 |
152 |
153 | Name of smb init script. Only necessary if CTDB is managing
154 | Samba directly. Will usually be auto-detected.
155 |
156 | Name of smb init script
157 |
158 |
159 |
160 |
161 |
162 | Name of nmb init script. Only necessary if CTDB is managing
163 | Samba directly. Will usually be auto-detected.
164 |
165 | Name of nmb init script
166 |
167 |
168 |
169 |
170 |
171 | Name of winbind init script. Only necessary if CTDB is managing
172 | Winbind directly. Will usually be auto-detected.
173 |
174 | Name of winbind init script
175 |
176 |
177 |
178 |
179 |
180 | If there are very many shares it may not be feasible to check that all
181 | of them are available during each monitoring interval. In that case
182 | this check can be disabled.
183 |
184 | Skip share check during monitor?
185 |
186 |
187 |
188 |
189 |
190 | If the amount of free memory drops below this value the node will
191 | become unhealthy and ctdb and all managed services will be shutdown.
192 | Once this occurs, the administrator needs to find the reason for the
193 | OOM situation, rectify it and restart ctdb with "service ctdb start".
194 |
195 | Minimum amount of free memory (MB)
196 |
197 |
198 |
199 |
200 |
201 | When set to yes, the CTDB node will start in DISABLED mode and not
202 | host any public ip addresses.
203 |
204 | Start CTDB disabled?
205 |
206 |
207 |
208 |
209 |
210 | This allows the setting of CTDB tunables as defined in
211 | "man ctdb-tunables". This is a whitespace-delimited list
212 | of "TunableName=VALUE" pairs.
213 |
214 | Define CTDB tunables
215 |
216 |
217 |
218 |
219 |
220 | The directory containing various CTDB configuration files.
221 | The "nodes" and "notify.sh" scripts are expected to be
222 | in this directory, as is the "events.d" subdirectory.
223 |
224 | CTDB config file directory
225 |
226 |
227 |
228 |
229 |
230 | Full path to the CTDB binary.
231 |
232 | CTDB binary path
233 |
234 |
235 |
236 |
237 |
238 | Full path to the CTDB cluster daemon binary.
239 |
240 | CTDB Daemon binary path
241 |
242 |
243 |
244 |
245 |
246 | Full path to the domain socket that ctdbd will create, used for
247 | local clients to attach and communicate with the ctdb daemon.
248 |
249 | CTDB socket location
250 |
251 |
252 |
253 |
254 |
255 | The directory to put the local CTDB database files in.
256 | Persistent database files will be put in ctdb_dbdir/persistent.
257 |
258 | CTDB database directory
259 |
260 |
261 |
262 |
263 |
264 | Full path to log file. To log to syslog instead, use the
265 | value "syslog".
266 |
267 | CTDB log file location
268 |
269 |
270 |
271 |
272 |
273 | What debug level to run at (0-10). Higher means more verbose.
274 |
275 | CTDB debug level
276 |
277 |
278 |
279 |
280 |
281 | Path to default samba config file. Only necessary if CTDB
282 | is managing Samba.
283 |
284 | Path to smb.conf
285 |
286 |
287 |
288 |
289 |
290 | The directory for smbd to use for storing such files as
291 | smbpasswd and secrets.tdb. Old versions of CTBD (prior to 1.0.50)
292 | required this to be on shared storage. This parameter should not
293 | be set for current versions of CTDB, and only remains in the RA
294 | for backwards compatibility.
295 |
296 | Samba private dir (deprecated)
297 |
298 |
299 |
300 |
301 |
302 | Which backend to use for storing user and possibly group
303 | information. Only necessary if CTDB is managing Samba.
304 |
305 | Samba passdb backend
306 |
307 |
308 |
309 |
310 |
311 | Which backend to use for SID/uid/gid mapping. Only necessary
312 | if CTDB is managing Samba.
313 |
314 | Samba idmap backend
315 |
316 |
317 |
318 |
319 |
320 | Which fileid:algorithm to use with vfs_fileid. The correct
321 | value depends on which clustered filesystem is in use, e.g.:
322 | for OCFS2, this should be set to "fsid". Only necessary if
323 | CTDB is managing Samba.
324 |
325 | Samba VFS fileid algorithm
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 | END
343 | }
344 |
345 | #######################################################################
346 |
347 | # Figure out path to /etc/sysconfig/ctdb (same logic as
348 | # loadconfig() from /etc/ctdb/functions
349 | if [ -f /etc/sysconfig/ctdb ]; then
350 | CTDB_SYSCONFIG=/etc/sysconfig/ctdb
351 | elif [ -f /etc/default/ctdb ]; then
352 | CTDB_SYSCONFIG=/etc/default/ctdb
353 | elif [ -f $OCF_RESKEY_ctdb_config_dir/ctdb ]; then
354 | CTDB_SYSCONFIG=$OCF_RESKEY_ctdb_config_dir/ctdb
355 | fi
356 |
357 | # Backup paths
358 | CTDB_SYSCONFIG_BACKUP=${CTDB_SYSCONFIG}.ctdb-ra-orig
359 |
360 | invoke_ctdb() {
361 | # CTDB's defaults are:
362 | local timeout=3
363 | local timelimit=120
364 | # ...but we override with the timeout for the current op:
365 | if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
366 | timeout=$((OCF_RESKEY_CRM_meta_timeout/1000))
367 | timelimit=$((OCF_RESKEY_CRM_meta_timeout/1000))
368 | fi
369 | $OCF_RESKEY_ctdb_binary --socket=$OCF_RESKEY_ctdb_socket \
370 | -t $timeout -T $timelimit \
371 | "$@"
372 | local rc=$?
373 | if [ $rc != 0 ]; then
374 | ocf_log err "FAILED: $OCF_RESKEY_ctdb_binary " \
375 | "--socket=$OCF_RESKEY_ctdb_socket -t $timeout " \
376 | "-T $timelimit $@"
377 | fi
378 |
379 | return $rc
380 | }
381 |
382 | # Enable any event scripts that are explicitly required.
383 | # Any others will ultimately be invoked or not based on how they ship
384 | # with CTDB, but will generally have no effect, beacuase the relevant
385 | # CTDB_MANAGES_* options won't be set in /etc/sysconfig/ctdb.
386 | enable_event_scripts() {
387 | local event_dir=$OCF_RESKEY_ctdb_config_dir/events.d
388 |
389 | if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then
390 | chmod u+x $event_dir/10.interface
391 | else
392 | chmod a-x $event_dir/10.interface
393 | fi
394 | if [ -f "${OCF_RESKEY_ctdb_config_dir}/static-routes" ]; then
395 | chmod u+x $event_dir/11.routing
396 | else
397 | chmod a-x $event_dir/11.routing
398 | fi
399 | if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" || \
400 | ocf_is_true "$OCF_RESKEY_ctdb_manages_winbind"; then
401 | chmod u+x $event_dir/50.samba
402 | else
403 | chmod a-x $event_dir/50.samba
404 | fi
405 | }
406 |
407 | # This function has no effect (currently no way to set CTDB_SET_*)
408 | # but remains here in case we need it in future.
409 | set_ctdb_variables() {
410 | rv=$OCF_SUCCESS
411 | set | grep ^CTDB_SET_ | cut -d_ -f3- |
412 | while read v; do
413 | varname=`echo $v | cut -d= -f1`
414 | value=`echo $v | cut -d= -f2`
415 | invoke_ctdb setvar $varname $value || rv=$OCF_ERR_GENERIC
416 | done || rv=$OCF_ERR_GENERIC
417 | return $rv
418 | }
419 |
420 |
421 | # Add necessary settings to /etc/samba/smb.conf. In a perfect world,
422 | # we'd be able to generate a new, temporary, smb.conf file somewhere,
423 | # something like:
424 | # include = /etc/samba/smb.conf
425 | # [global]
426 | # clustering = yes
427 | # # ...etc...
428 | # Unfortunately, we can't do this, because there's no way to tell the
429 | # smb init script where the temporary config is, so we just edit
430 | # the default config file.
431 | init_smb_conf() {
432 | # Don't screw around with the config if CTDB isn't managing Samba!
433 | ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" || return 0
434 |
435 | # replace these things in smb.conf
436 | local repl='# CTDB-RA:|passdb backend|clustering|idmap backend|private dir|ctdbd socket'
437 |
438 | local private_dir
439 | [ -n "$OCF_RESKEY_smb_private_dir" ] && private_dir="\tprivate dir = $OCF_RESKEY_smb_private_dir\n"
440 |
441 | local vfs_fileid
442 | local do_vfs=0
443 | if [ -n "$OCF_RESKEY_smb_fileid_algorithm" ]; then
444 | repl="${repl}|fileid:algorithm|fileid:mapping"
445 | vfs_fileid="\tfileid:algorithm = $OCF_RESKEY_smb_fileid_algorithm\n"
446 | if sed -n '/^[[:space:]]*\[global\]/,/^[[:space:]]*\[/p' $OCF_RESKEY_smb_conf | \
447 | grep -Eq '^[[:space:]]*vfs objects'; then
448 | # vfs objects already specified, will append fileid to existing line
449 | do_vfs=1
450 | else
451 | vfs_fileid="$vfs_fileid\tvfs objects = fileid\n"
452 | fi
453 | fi
454 | awk '
455 | /^[[:space:]]*\[/ { global = 0 }
456 | /^[[:space:]]*\[global\]/ { global = 1 }
457 | {
458 | if(global) {
459 | if ('$do_vfs' && $0 ~ /^[[:space:]]vfs objects/ && $0 !~ /fileid/) {
460 | print $0" fileid"
461 | } else if ($0 !~ /^[[:space:]]*('"$repl"')/) {
462 | print
463 | }
464 | } else {
465 | print
466 | }
467 | }' $OCF_RESKEY_smb_conf | sed "/^[[:space:]]*\[global\]/ a\\
468 | \t# CTDB-RA: Begin auto-generated section (do not change below)\n\
469 | \tpassdb backend = $OCF_RESKEY_smb_passdb_backend\n\
470 | \tclustering = yes\n\
471 | \tidmap backend = $OCF_RESKEY_smb_idmap_backend\n\
472 | \tctdbd socket = $OCF_RESKEY_ctdb_socket\n$private_dir$vfs_fileid\
473 | \t# CTDB-RA: End auto-generated section (do not change above)" > $OCF_RESKEY_smb_conf.$$
474 | mv -f $OCF_RESKEY_smb_conf.$$ $OCF_RESKEY_smb_conf
475 | }
476 |
477 |
478 | # Get rid of that section we added
479 | cleanup_smb_conf() {
480 | ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" || return 0
481 |
482 | sed '/# CTDB-RA: Begin/,/# CTDB-RA: End/d' $OCF_RESKEY_smb_conf > $OCF_RESKEY_smb_conf.$$
483 | mv -f $OCF_RESKEY_smb_conf.$$ $OCF_RESKEY_smb_conf
484 | }
485 |
486 | append_ctdb_sysconfig() {
487 | [ -n "$2" ] && echo "$1=$2" >> $CTDB_SYSCONFIG
488 | }
489 |
490 | # Generate a new, minimal CTDB config file that's just enough
491 | # to get CTDB running as configured by the RA parameters.
492 | generate_ctdb_sysconfig() {
493 | # Backup existing sysconfig if we're not already using an auto-generated one
494 | grep -qa '# CTDB-RA: Auto-generated' $CTDB_SYSCONFIG || cp -p $CTDB_SYSCONFIG $CTDB_SYSCONFIG_BACKUP
495 | if [ $? -ne 0 ]; then
496 | ocf_log warn "Unable to backup $CTDB_SYSCONFIG to $CTDB_SYSCONFIG_BACKUP"
497 | fi
498 |
499 | ocf_log info "Generating new $CTDB_SYSCONFIG"
500 |
501 | # Note to maintainers and other random hackers:
502 | # Parameters may need to be set here, for CTDB event
503 | # scripts to pick up, or may need to be passed to ctdbd
504 | # when starting, or both. Be careful. The CTDB source
505 | # tree and manpages are your friends. As a concrete
506 | # example, setting CTDB_START_AS_DISABLED here is
507 | # completely useless, as this is actually a command line
508 | # argument for ctdbd; it's not used anywhere else.
509 |
510 | cat >$CTDB_SYSCONFIG <&1) || return $OCF_ERR_GENERIC
541 | if [ $? -ne 0 ]; then
542 | rc=$OCF_ERR_GENERIC
543 | else
544 | if echo $status | grep -Eqs 'RECMASTER: YES'; then
545 | if [ $rc -eq $OCF_SUCCESS ]; then
546 | rc=$OCF_RUNNING_MASTER
547 | else
548 | rc=$OCF_FAILED_MASTER
549 | fi
550 | fi
551 | fi
552 | return $rc
553 | }
554 |
555 | ctdb_usage() {
556 | cat </dev/null
576 | for pdbase in $(ls $persistent_db_dir/*.tdb.[0-9] 2>/dev/null$) ; do
577 | /usr/bin/tdbdump $pdbase >/dev/null 2>/dev/null || {
578 | ocf_log err "Persistent database $pdbase is corrupted! CTDB will not start."
579 | return $OCF_ERR_GENERIC
580 | }
581 | done
582 |
583 | # Add necessary configuration to smb.conf
584 | init_smb_conf
585 | if [ $? -ne 0 ]; then
586 | ocf_log err "Failed to update $OCF_RESKEY_smb_conf."
587 | return $OCF_ERR_GENERIC
588 | fi
589 |
590 | # Generate new CTDB sysconfig
591 | generate_ctdb_sysconfig
592 | enable_event_scripts
593 |
594 | # Use logfile by default (and create the logdir if needed), or syslog if asked for
595 | local log_option
596 | if [ "$OCF_RESKEY_ctdb_logfile" = "syslog" ]; then
597 | log_option="--syslog"
598 | else
599 | log_option="--logfile=$OCF_RESKEY_ctdb_logfile"
600 | [ -d $(dirname "$OCF_RESKEY_ctdb_logfile") ] || \
601 | mkdir -p $(dirname "$OCF_RESKEY_ctdb_logfile")
602 | [ -f "$OCF_RESKEY_ctdb_logfile" ] || \
603 | touch "$OCF_RESKEY_ctdb_logfile"
604 | fi
605 | # public addresses file (should not be present, but need to set for correctness if it is)
606 | local pub_addr_option=""
607 | [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ] && \
608 | pub_addr_option="--public-addresses=${OCF_RESKEY_ctdb_config_dir}/public_addresses"
609 | # create the socket/run dir, if needed
610 | [ -d $(dirname "$OCF_RESKEY_ctdb_socket") ] || mkdir -p $(dirname "$OCF_RESKEY_ctdb_socket")
611 |
612 | # start as disabled and reclock
613 | local start_as_disabled="--start-as-disabled"
614 | if [ "x$OCF_RESKEY_ctdb_recovery_lock" != "x" ]; then
615 | reclock="--reclock=$OCF_RESKEY_ctdb_recovery_lock"
616 | ocf_is_true "$OCF_RESKEY_ctdb_start_as_disabled" || start_as_disabled=""
617 | else
618 | reclock=""
619 | fi
620 |
621 | # Start her up
622 | $OCF_RESKEY_ctdbd_binary \
623 | --nlist=$OCF_RESKEY_ctdb_config_dir/nodes \
624 | --socket=$OCF_RESKEY_ctdb_socket \
625 | --dbdir=$OCF_RESKEY_ctdb_dbdir \
626 | --dbdir-persistent=$OCF_RESKEY_ctdb_dbdir/persistent \
627 | --event-script-dir=$OCF_RESKEY_ctdb_config_dir/events.d \
628 | --notification-script=$OCF_RESKEY_ctdb_config_dir/notify.sh \
629 | --transport=tcp \
630 | $reclock $start_as_disabled $log_option $pub_addr_option \
631 | -d $OCF_RESKEY_ctdb_debuglevel
632 | if [ $? -ne 0 ]; then
633 | # cleanup smb.conf
634 | cleanup_smb_conf
635 | ocf_log err "Failed to execute $OCF_RESKEY_ctdbd_binary."
636 | return $OCF_ERR_GENERIC
637 | else
638 | if [ "x$OCF_RESKEY_ctdb_recovery_lock" == "x" ]; then
639 | invoke_ctdb setrecmasterrole off || return $OCF_ERR_GENERIC
640 | ctdb_crm_master || return $OCF_ERR_GENERIC
641 | fi
642 | # Wait a bit for CTDB to stabilize
643 | # (until start times out if necessary)
644 | while true; do
645 | # Initial sleep is intentional (ctdb init script
646 | # has sleep after ctdbd start, but before invoking
647 | # ctdb to talk to it)
648 | sleep 1
649 | status=$(invoke_ctdb status 2>/dev/null)
650 | if [ $? -ne 0 ]; then
651 | # CTDB will be running, kill it before returning
652 | ctdb_stop
653 | return $OCF_ERR_GENERIC
654 | elif ! echo $status | grep -Eqs 'UNHEALTHY[^ ]* \(THIS'; then
655 | # Status does not say this node is unhealthy,
656 | # so we're good to go. Do a bit of final
657 | # setup and (hopefully) return success.
658 | set_ctdb_variables
659 | return $?
660 | fi
661 | done
662 | fi
663 |
664 | # ctdbd will (or can) actually still be running at this point, so kill it
665 | ctdb_stop
666 |
667 | ocf_log err "Timeout waiting for CTDB to stabilize"
668 | return $OCF_ERR_GENERIC
669 | }
670 |
671 |
672 | ctdb_stop() {
673 | # Do nothing if already stopped
674 | pkill -0 -f $OCF_RESKEY_ctdbd_binary || return $OCF_SUCCESS
675 |
676 | # Tell it to die nicely
677 | invoke_ctdb shutdown >/dev/null 2>&1
678 |
679 | # No more Mr. Nice Guy
680 | count=0
681 | while pkill -0 -f $OCF_RESKEY_ctdbd_binary ; do
682 | sleep 1
683 | count=$(($count + 1))
684 | [ $count -gt 10 ] && {
685 | ocf_log info "killing ctdbd "
686 | pkill -9 -f $OCF_RESKEY_ctdbd_binary
687 | pkill -9 -f ${OCF_RESKEY_ctdb_config_dir}/events.d/
688 | }
689 | done
690 |
691 | # Cleanup smb.conf
692 | cleanup_smb_conf
693 |
694 | if [ "x$OCF_RESKEY_ctdb_recovery_lock" == "x" ]; then
695 | $CRM_MASTER -D
696 | fi
697 |
698 | # It was a clean shutdown, return success
699 | [ $rv -eq $OCF_SUCCESS ] && return $OCF_SUCCESS
700 |
701 | # Unclean shutdown, return success if there's no ctdbds left (we
702 | # killed them forcibly, but at least they're good and dead).
703 | pkill -0 -f $OCF_RESKEY_ctdbd_binary || return $OCF_SUCCESS
704 |
705 | # Problem: ctdb shutdown didn't work and neither did some vigorous
706 | # kill -9ing. Only thing to do is report failure.
707 | return $OCF_ERR_GENERIC
708 | }
709 |
710 |
711 | ctdb_monitor() {
712 | local status
713 | local rc=$OCF_SUCCESS
714 | # "ctdb status" exits non-zero if CTDB isn't running.
715 | # It can also exit non-zero if there's a timeout (ctdbd blocked,
716 | # stalled, massive load, or otherwise wedged). If it's actually
717 | # not running, STDERR will say "Errno:Connection refused(111)",
718 | # whereas if it's wedged, it'll say various other unpleasant things.
719 | status=$(invoke_ctdb status 2>&1) || rc=$OCF_NOT_RUNNING
720 | if [ $rc -ne $OCF_SUCCESS ]; then
721 | ocf_log info "CTDB status failed: $status"
722 | rc=$OCF_NOT_RUNNING
723 | else
724 | if echo $status | grep -Eqs 'DISABLED[^ ]* \(THIS'; then
725 | if crm_resource --resource ctdb --locate | grep -qs 'Master'; then
726 | invoke_ctdb enable || rc=$OCF_ERR_GENERIC
727 | fi
728 | elif ! echo $status | grep -Eqs '(OK|UNHEALTHY)[^ ]* \(THIS'; then
729 | ocf_log info "CTDB status: $(echo $status | grep -s '\(THIS NODE\)')"
730 | rc=$OCF_NOT_RUNNING
731 | fi
732 | fi
733 |
734 | if [ "x$OCF_RESKEY_ctdb_recovery_lock" == "x" ]; then
735 | rc=$(ctdb_master_return $rc)
736 | fi
737 |
738 | return $rc
739 | }
740 |
741 | ctdb_validate() {
742 | # Required binaries (full path to tdbdump is intentional, as that's
743 | # what's used in ctdb_start, which was lifted from the init script)
744 | for binary in pkill /usr/bin/tdbdump; do
745 | check_binary $binary
746 | done
747 |
748 | if [ -z "$CTDB_SYSCONFIG" ]; then
749 | ocf_log err "Can't find CTDB config file (expecting /etc/sysconfig/ctdb, /etc/default/ctdb or similar)"
750 | return $OCF_ERR_INSTALLED
751 | fi
752 |
753 | if ocf_is_true "$OCF_RESKEY_ctdb_manages_samba" && [ ! -f "$OCF_RESKEY_smb_conf" ]; then
754 | ocf_log err "Samba config file '$OCF_RESKEY_smb_conf' does not exist."
755 | return $OCF_ERR_INSTALLED
756 | fi
757 |
758 | if [ -f "${OCF_RESKEY_ctdb_config_dir}/public_addresses" ]; then
759 | ocf_log warn "CTDB file '${OCF_RESKEY_ctdb_config_dir}/public_addresses' exists - CTDB will try to manage IP failover!"
760 | fi
761 |
762 | if [ ! -f "$OCF_RESKEY_ctdb_config_dir/nodes" ]; then
763 | ocf_log err "$OCF_RESKEY_ctdb_config_dir/nodes does not exist."
764 | return $OCF_ERR_ARGS
765 | fi
766 |
767 | if [ -z "$OCF_RESKEY_ctdb_recovery_lock" ]; then
768 | ocf_log warn "ctdb_recovery_lock not specified - CTDB will not do split-brain prevention!"
769 | fi
770 |
771 | lock_dir=$(dirname "$OCF_RESKEY_ctdb_recovery_lock")
772 | touch "$lock_dir/$$" 2>/dev/null
773 | if [ $? != 0 ]; then
774 | ocf_log err "Directory for lock file '$OCF_RESKEY_ctdb_recovery_lock' does not exist, or is not writable."
775 | return $OCF_ERR_ARGS
776 | fi
777 | rm "$lock_dir/$$"
778 |
779 | return $OCF_SUCCESS
780 | }
781 |
782 |
783 | ctdb_notify() {
784 | local rc=$OCF_SUCCESS
785 | mode="${OCF_RESKEY_CRM_meta_notify_type}-${OCF_RESKEY_CRM_meta_notify_operation}"
786 | case "${mode}" in
787 | post-promote)
788 | if [ "x$OCF_RESKEY_CRM_meta_notify_master_resource" != "x" ]; then
789 | status=$(invoke_ctdb status 2>&1) || rc=$OCF_ERR_GENERIC
790 | if [ $rc -ne $OCF_ERR_GENERIC ] && echo $status | grep -Eqs 'DISABLED[^ ]* \(THIS'; then
791 | invoke_ctdb enable || rc=$OCF_ERR_GENERIC
792 | fi
793 | fi
794 | ;;
795 | esac
796 |
797 | if [ "x$OCF_RESKEY_ctdb_recovery_lock" == "x" ] && [ ${mode} != "*-start" ]; then
798 | ctdb_crm_master || rc=$OCF_ERR_GENERIC
799 | fi
800 |
801 | return $rc
802 | }
803 |
804 | ctdb_promote() {
805 | local rc
806 |
807 | # exit immediately if configuration is not valid
808 | ctdb_validate || exit $?
809 |
810 | # test the resource's current state
811 | ctdb_monitor
812 | rc=$?
813 | case "$rc" in
814 | "$OCF_SUCCESS")
815 | # Running as slave. Normal, expected behavior.
816 | ocf_log debug "Resource is currently running as Slave"
817 | ;;
818 | "$OCF_RUNNING_MASTER")
819 | # Already a master. Unexpected, but not a problem.
820 | ocf_log info "Resource is already running as Master"
821 | return $OCF_SUCCESS
822 | ;;
823 | "$OCF_NOT_RUNNING")
824 | # Currently not running. Need to start before promoting.
825 | ocf_log info "Resource is currently not running"
826 | ctdb_start
827 | ;;
828 | *)
829 | # Failed resource. Let the cluster manager recover.
830 | ocf_log err "Unexpected error, cannot promote"
831 | exit $rc
832 | ;;
833 | esac
834 |
835 | # actually promote the resource here (make sure to immediately
836 | # exit with an $OCF_ERR_ error code if anything goes seriously
837 | # wrong)
838 | invoke_ctdb setrecmasterrole on || exit $OCF_ERR_GENERIC
839 | invoke_ctdb enable || exit $OCF_ERR_GENERIC
840 |
841 | # After the resource has been promoted, check whether the
842 | # promotion worked. If the resource promotion is asynchronous, the
843 | # agent may spin on the monitor function here -- if the resource
844 | # does not assume the Master role within the defined timeout, the
845 | # cluster manager will consider the promote action failed.
846 | while true; do
847 | ctdb_master_return $OCF_SUCCESS
848 | if [ $? -eq $OCF_RUNNING_MASTER ]; then
849 | ocf_log debug "Resource promoted"
850 | break
851 | else
852 | ocf_log debug "Resource still awaiting promotion"
853 | sleep 1
854 | fi
855 | done
856 |
857 | # only return $OCF_SUCCESS if _everything_ succeeded as expected
858 | return $OCF_SUCCESS
859 | }
860 |
861 | ctdb_demote() {
862 | local rc
863 |
864 | # exit immediately if configuration is not valid
865 | ctdb_validate || exit $?
866 |
867 | # test the resource's current state
868 | ctdb_monitor
869 | rc=$?
870 | case "$rc" in
871 | "$OCF_RUNNING_MASTER")
872 | # Running as slave. Normal, expected behavior.
873 | ocf_log debug "Resource is currently running as Master"
874 | ;;
875 | "$OCF_SUCCESS")
876 | # Already a master. Unexpected, but not a problem.
877 | ocf_log info "Resource is already running as Slave"
878 | return $OCF_SUCCESS
879 | ;;
880 | "$OCF_NOT_RUNNING")
881 | # Currently not running. Need to start before promoting.
882 | ocf_log err "Resource is currently not running"
883 | exit $OCF_ERR_GENERIC
884 | ;;
885 | *)
886 | # Failed resource. Let the cluster manager recover.
887 | ocf_log err "Unexpected error, cannot promote"
888 | exit $rc
889 | ;;
890 | esac
891 |
892 | # actually demote the resource here (make sure to immediately
893 | # exit with an $OCF_ERR_ error code if anything goes seriously
894 | # wrong)
895 | invoke_ctdb setrecmasterrole off || exit $OCF_ERR_GENERIC
896 | invoke_ctdb enable || exit $OCF_ERR_GENERIC
897 |
898 | # After the resource has been demoted, check whether the
899 | # demotion worked. If the resource demotion is asynchronous, the
900 | # agent may spin on the monitor function here -- if the resource
901 | # does not assume the Slave role within the defined timeout, the
902 | # cluster manager will consider the demote action failed.
903 | while true; do
904 | ctdb_master_return $OCF_SUCCESS
905 | if [ $? -eq $OCF_RUNNING_MASTER ]; then
906 | ocf_log debug "Resource still awaiting demotion"
907 | sleep 1
908 | else
909 | ocf_log debug "Resource demoted"
910 | break
911 | fi
912 | done
913 |
914 | # only return $OCF_SUCCESS if _everything_ succeeded as expected
915 | return $OCF_SUCCESS
916 | }
917 |
918 | case $__OCF_ACTION in
919 | meta-data) meta_data
920 | exit $OCF_SUCCESS
921 | ;;
922 | start) ctdb_start;;
923 | stop) ctdb_stop;;
924 | monitor) ctdb_monitor;;
925 | validate-all) ctdb_validate;;
926 | notify) ctdb_notify;;
927 | promote) ctdb_promote;;
928 | demote) ctdb_demote;;
929 | usage|help) ctdb_usage
930 | exit $OCF_SUCCESS
931 | ;;
932 | *) ctdb_usage
933 | exit $OCF_ERR_UNIMPLEMENTED
934 | ;;
935 | esac
936 | rc=$?
937 | ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc"
938 | exit $rc
939 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/ctdb:
--------------------------------------------------------------------------------
1 | CTDB_NODES=/etc/ctdb/nodes
2 | CTDB_RECOVERY_LOCK=/gluster/lock/lockfile
3 | CTDB_MANAGES_SAMBA="no"
4 | CTDB_MANAGES_WINBIND="no"
5 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/export.conf.j2:
--------------------------------------------------------------------------------
1 | EXPORT{
2 | Export_Id = 90;
3 | Path="/{{ item.name }}";
4 | {% if item.FSAL|upper == "GLUSTER" %}
5 | FSAL {
6 | name = "GLUSTER";
7 | hostname="$(hostname -s)";
8 | volume="{{ item.name }}";
9 | }
10 | {% endif %}
11 | Access_type = RW;
12 | Squash = No_root_squash;
13 | Disable_ACL = TRUE;
14 | Pseudo="/{{ item.name }}";
15 | Protocols = "3,4" ;
16 | Transports = "UDP,TCP" ;
17 | SecType = "sys";
18 | Tag = "volume_export";
19 | }
20 | END
21 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/ganesha.conf:
--------------------------------------------------------------------------------
1 | NFSV4 {
2 | Grace_Period = 20;
3 | Lease_Lifetime = 15;
4 | }
5 | LOG {
6 | Default_log_level = "DEBUG";
7 | }
8 | EXPORT {
9 | Export_Id = 2;
10 | Path = "/export_vol";
11 | Pseudo = "/export_vol";
12 | Access_type = RW;
13 | Disable_ACL = true;
14 | Squash = "No_root_squash";
15 | Protocols = "3", "4" ;
16 | Transports = "UDP","TCP";
17 | SecType = "sys";
18 | FSAL {
19 | name = "GLUSTER";
20 | hostname = "localhost";
21 | volume = "export_vol";
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/hosts.j2:
--------------------------------------------------------------------------------
1 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
2 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
3 |
4 | {% for host in groups['ha_servers'] %}
5 | {{hostvars[host]['ansible_eth1']['ipv4']['address']}} {{host}}
6 | {% endfor %}
7 |
8 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/nfs:
--------------------------------------------------------------------------------
1 | NFS_HOSTNAME=SAMBA
2 | STATD_PORT=595
3 | STATD_OUTGOING_PORT=596
4 | MOUNTD_PORT=597
5 | RQUOTAD_PORT=598
6 | LOCKD_TCPPORT=599
7 | LOCKD_UDPPORT=599
8 | STATD_HOSTNAME="$NFS_HOSTNAME -H /etc/ctdb/statd-callout -p 97"
9 | RPCNFSDARGS="-N 4"
10 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/nodes.j2:
--------------------------------------------------------------------------------
1 | {% for host in groups['smb_servers'] %}
2 | {{hostvars[host]['ansible_eth1']['ipv4']['address']}}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/resolv.conf.j2:
--------------------------------------------------------------------------------
1 | nameserver {{ ad['dns'] }}
2 | search {{ ad['domain'] }}
3 | domain {{ ad['domain'] }}
4 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/smb.conf.j2:
--------------------------------------------------------------------------------
1 | [global]
2 | netbios name = {{ ha_name|upper }}
3 | {% if ad['setup_ad'] %}
4 | realm = {{ ad['domain']|upper }}
5 | workgroup = {{ ad['domain']|upper|regex_replace('^([^\\.]*).*$', '\\1') }}
6 | security = ADS
7 | winbind enum users = Yes
8 | winbind enum groups = Yes
9 | winbind use default domain = Yes
10 | idmap config {{ ad['domain']|upper|regex_replace('^([^\\.]*).*$', '\\1') }}:range = 500-40000
11 | idmap config {{ ad['domain']|upper|regex_replace('^([^\\.]*).*$', '\\1') }}:backend = autorid
12 | idmap config *:range = 70001-80000
13 | idmap config * : backend = tdb
14 | {% else %}
15 | workgroup = WORKGROUP
16 | security = user
17 | {% endif %}
18 | {%- if ctdb['setup_ctdb'] %}
19 | clustering = Yes
20 | {% endif %}
21 | log file = /var/log/samba/log.%m
22 | max log size = 50
23 | server max protocol = SMB3
24 | load printers = No
25 | disable spoolss = Yes
26 | show add printer wizard = No
27 | stat cache = No
28 | printing = bsd
29 | cups options = raw
30 | print command = lpr -r -P'%p' %s
31 | lpq command = lpq -P'%p'
32 | lprm command = lprm -P'%p' %j
33 | map archive = No
34 | map readonly = no
35 | store dos attributes = Yes
36 | kernel share modes = No
37 | debug pid = yes
38 | {% if samba['config'] is defined and samba['config'] %}
39 | {% for opt in samba['config'] %}
40 | {{ opt }} = {{ samba['config'][opt] }}
41 | {% endfor %}
42 | {% endif %}
43 |
44 | {% if samba['shares'] %}
45 | {% for share in samba['shares'] %}
46 | [{{share}}]
47 | {% for opt in samba['shares'][share] %}
48 | {{ opt }} = {{ samba['shares'][share][opt] }}
49 | {% endfor %}
50 | {% endfor %}
51 | {% endif %}
52 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/storhaug.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | # Name of the HA cluster created.
3 | HA_NAME="{{ ha_name }}"
4 |
5 | # Password of the hacluster user
6 | HA_PASSWORD="hacluster"
7 |
8 | # The server on which cluster-wide configuration is managed.
9 | # IP/Hostname
10 | HA_SERVER="{{ hostvars[groups['ha_servers'][0]]['ansible_hostname'] }}"
11 |
12 | # The set of nodes that forms the HA cluster.
13 | # Comma-deliminated IP/Hostname list
14 | HA_CLUSTER_NODES="{%- for host in groups['ha_servers'] -%}{{hostvars[host]['ansible_hostname']}}{% if not loop.last %},{% endif %}{%- endfor -%}"
15 |
16 | # [OPTIONAL] A subset of HA nodes that will serve as storage servers.
17 | # Comma-deliminated IP/Hostname list
18 | STORAGE_NODES="{%- for host in groups['gluster_servers'] -%}{{hostvars[host]['ansible_hostname']}}{% if not loop.last %},{% endif %}{%- endfor -%}"
19 |
20 | # Virtual IPs of each of the nodes specified above.
21 | # Whitespace-deliminated IP address list
22 | HA_VIPS="{{ vips|join(' ') }}"
23 |
24 | # Managed access methods
25 | # Whitespace-delimited list. Valid values:
26 | # nfs
27 | # smb
28 | HA_SERVICES="smb"
29 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/vagrant:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI
3 | w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP
4 | kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2
5 | hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO
6 | Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW
7 | yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd
8 | ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1
9 | Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf
10 | TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK
11 | iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A
12 | sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf
13 | 4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP
14 | cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk
15 | EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN
16 | CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX
17 | 3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG
18 | YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj
19 | 3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+
20 | dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz
21 | 6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC
22 | P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF
23 | llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ
24 | kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH
25 | +vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ
26 | NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s=
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/files/vagrant.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
2 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/group_vars/nfs_servers:
--------------------------------------------------------------------------------
1 | ---
2 | nfs: true
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/group_vars/smb_servers:
--------------------------------------------------------------------------------
1 | ---
2 | smb: true
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/group_vars/storhaug:
--------------------------------------------------------------------------------
1 | ---
2 | ansible_ssh_user: vagrant
3 | ansible_ssh_private_key_file: ~/.vagrant.d/insecure_private_key
4 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/raw-el7.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | name: Raw Installation (RedHat)
4 | become: yes
5 | gather_facts: True
6 |
7 | tasks:
8 | - name: Pause to allow vagrant to finish...
9 | pause: seconds=5
10 |
11 | - name: Check if offline
12 | set_fact:
13 | cache_flag: >-
14 | {% if offline is defined and offline %}-C{% endif %}
15 |
16 | - name: Generate distro cache path
17 | set_fact:
18 | cache_dir: >-
19 | {%- set distro = ansible_distribution + "/" + ansible_distribution_major_version -%}
20 | {{ vagrant_home }}/cache/{{ distro }}
21 |
22 | - name: Make sure yum cache dir exist
23 | local_action:
24 | module: file
25 | path: "{{ cache_dir }}/yum"
26 | state: directory
27 | become: no
28 |
29 | - name: Push yum cache
30 | synchronize: "src={{ cache_dir }}/yum dest=/var/cache"
31 |
32 | - name: Make sure playbook dependencies are installed
33 | raw: >
34 | for REPO in `find /shared/playbooks -name '*.repo'`; do
35 | cp $REPO /etc/yum.repos.d/;
36 | done;
37 | sed -i "s/keepcache=0/keepcache=1/" /etc/yum.conf;
38 | yum {{ cache_flag }} install -y http://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ el_ver }}.noarch.rpm;
39 | yum {{ cache_flag }} install -y centos-release-gluster;
40 | yum {{ cache_flag }} install -y --enablerepo=centos-gluster38-test {{ install_pkgs }};
41 | # rpm --import http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ el_ver }};
42 | # cd /etc/yum.repos.d/;
43 | # curl -O http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo;
44 | # curl -O http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha/2.3.2/EPEL.repo/nfs-ganesha.repo;
45 | # curl -O http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/glusterfs-samba-epel.repo;
46 | # rpm --import http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/pub.key;
47 | # rpm --import http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ el_ver }};
48 | # yum install -y http://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ el_ver }}.noarch.rpm;
49 |
50 | - name: Pull yum cache
51 | synchronize: "mode=pull dest={{ cache_dir }} src=/var/cache/yum"
52 | run_once: True
53 |
54 | - name: Push hosts file
55 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0644
56 | with_items:
57 | - { src: 'files/hosts.j2', dest: '/etc/hosts' }
58 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | firewall_allowed_tcp_ports:
3 | - "22"
4 | - "25"
5 | firewall_allowed_udp_ports: []
6 | firewall_forwarded_tcp_ports: []
7 | firewall_forwarded_udp_ports: []
8 | firewall_additional_rules: []
9 | firewall_log_dropped_packets: true
10 | firewall_ports: []
11 | firewall_interfaces: []
12 | firewall_services:
13 | - "ssh"
14 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Save iptables config
3 | command: service iptables save
4 |
5 | - name: Restart firewall
6 | command: service firewall restart
7 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Include variables and define needed variables.
3 | #- name: Include OS-specific variables.
4 | # include_vars: "{{ ansible_os_family }}.yml"
5 |
6 | # Setup/install tasks.
7 | #- include: setup-RedHat.yml
8 | # when: ansible_os_family == 'RedHat'
9 |
10 | #- include: setup-Debian.yml
11 | # when: ansible_os_family == 'Debian'
12 |
13 | - name: Enable firewall
14 | service: name=firewalld state=started enabled=yes
15 |
16 | - name: Assign firewall interfaces
17 | command: "firewall-cmd --permanent --add-interface={{ item }}"
18 | with_items: "{{ firewall_interfaces }}"
19 | when: firewall_interfaces
20 |
21 | - name: Enable firewall services
22 | firewalld: service={{ item }} permanent=true state=enabled
23 | with_items: "{{ firewall_services }}"
24 | when: firewall_services
25 |
26 | - name: Enable firewall port
27 | firewalld: port={{ item }} permanent=true state=enabled
28 | with_items: "{{ firewall_ports }}"
29 | when: firewall_ports
30 |
31 | - name: Reload firewall config
32 | command: "firewall-cmd --reload"
33 |
34 | #- name: Flush iptables the first time playbook runs.
35 | # command: iptables -F creates=/etc/init.d/firewall
36 |
37 | #- name: Copy firewall script into place.
38 | # template: src=firewall.bash.j2 dest=/etc/firewall.bash owner=root group=root mode=0744
39 | # notify: restart firewall
40 |
41 | #- name: Copy firewall init script into place.
42 | # template: src=firewall.j2 dest=/etc/init.d/firewall owner=root group=root mode=0755
43 |
44 | #- name: Ensure the firewall is enabled and will start on boot.
45 | # service: name=firewall state=started enabled=yes
46 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/tasks/setup-Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure common packages are installed.
3 | apt: pkg=iptables state=installed
4 | when: ansible_os_family == 'Debian'
5 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/tasks/setup-RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # EPEL
3 | - name: Import EPEL GPG key.
4 | rpm_key:
5 | key: "http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_version[:1] }}"
6 | state: present
7 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
8 |
9 | - name: Install EPEL repo.
10 | yum: pkg="http://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_version[:1] }}.noarch.rpm" state=installed
11 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux'
12 |
13 | # NFS
14 | - name: Fetch NFS-Ganesha repo file
15 | get_url: dest=/etc/yum.repos.d/nfs-ganesha.repo
16 | url=http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha/2.1.0/EPEL.repo/nfs-ganesha.repo
17 |
18 | - name: Set NFS-Ganesha repo file permissions
19 | file: owner=root group=root mode=0644
20 | path=/etc/yum.repos.d/nfs-ganesha.repo
21 |
22 | - name: Import NFS-Ganesha GPG key.
23 | rpm_key:
24 | key: "http://download.gluster.org/pub/gluster/glusterfs/nfs-ganesha/2.1.0/EPEL.repo/pub.key"
25 | state: present
26 |
27 | # SMB
28 | - name: Fetch Gluster Samba repo file
29 | get_url: dest=/etc/yum.repos.d/gluster-samba.repo
30 | url=http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/glusterfs-samba-epel.repo
31 |
32 | - name: Set Gluster Samba repo file permissions
33 | file: owner=root group=root mode=0644
34 | path=/etc/yum.repos.d/gluster-samba.repo
35 |
36 | - name: Import Gluster Samba GPG key.
37 | rpm_key:
38 | key: "http://download.gluster.org/pub/gluster/glusterfs/samba/EPEL.repo/pub.key"
39 | state: present
40 |
41 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/templates/firewall.bash.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # iptables firewall for common LAMP servers.
3 | #
4 | # This file should be located at /etc/firewall.bash, and is meant to work with
5 | # Jeff Geerling's firewall init script.
6 | #
7 | # Common port reference:
8 | # 22: SSH
9 | # 25: SMTP
10 | # 80: HTTP
11 | # 123: DNS
12 | # 443: HTTPS
13 | # 2222: SSH alternate
14 | # 4949: Munin
15 | # 6082: Varnish admin
16 | # 8080: HTTP alternate (often used with Tomcat)
17 | # 8983: Tomcat HTTP
18 | # 8443: Tomcat HTTPS
19 | # 9000: SonarQube
20 | #
21 | # @author Jeff Geerling
22 |
23 | # No spoofing.
24 | if [ -e /proc/sys/net/ipv4/conf/all/rp_filter ]
25 | then
26 | for filter in /proc/sys/net/ipv4/conf/*/rp_filter
27 | do
28 | echo 1 > $filter
29 | done
30 | fi
31 |
32 | # Remove all rules and chains.
33 | iptables -F
34 | iptables -X
35 |
36 | # Accept traffic from loopback interface (localhost).
37 | iptables -A INPUT -i lo -j ACCEPT
38 |
39 | # Forwarded ports.
40 | {# Add a rule for each forwarded port #}
41 | {% for forwarded_port in firewall_forwarded_tcp_ports %}
42 | iptables -t nat -I PREROUTING -p tcp --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
43 | iptables -t nat -I OUTPUT -p tcp -o lo --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
44 | {% endfor %}
45 | {% for forwarded_port in firewall_forwarded_udp_ports %}
46 | iptables -t nat -I PREROUTING -p udp --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
47 | iptables -t nat -I OUTPUT -p udp -o lo --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
48 | {% endfor %}
49 |
50 | # Open ports.
51 | {# Add a rule for each open port #}
52 | {% for port in firewall_allowed_tcp_ports %}
53 | iptables -A INPUT -p tcp -m tcp --dport {{ port }} -j ACCEPT
54 | {% endfor %}
55 | {% for port in firewall_allowed_udp_ports %}
56 | iptables -A INPUT -p udp -m udp --dport {{ port }} -j ACCEPT
57 | {% endfor %}
58 |
59 | # Accept icmp ping requests.
60 | iptables -A INPUT -p icmp -j ACCEPT
61 |
62 | # Allow NTP traffic for time synchronization.
63 | iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
64 | iptables -A INPUT -p udp --sport 123 -j ACCEPT
65 |
66 | # Additional custom rules.
67 | {% for rule in firewall_additional_rules %}
68 | {{ rule }}
69 | {% endfor %}
70 |
71 | # Allow established connections:
72 | iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
73 |
74 | # Log EVERYTHING (ONLY for Debug).
75 | # iptables -A INPUT -j LOG
76 |
77 | {% if firewall_log_dropped_packets %}
78 | # Log other incoming requests (all of which are dropped) at 15/minute max.
79 | iptables -A INPUT -m limit --limit 15/minute -j LOG --log-level 7 --log-prefix "Dropped by firewall: "
80 | {% endif %}
81 |
82 | # Drop all other traffic.
83 | iptables -A INPUT -j DROP
84 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/common/templates/firewall.j2:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 | # /etc/init.d/firewall
3 | #
4 | # Firewall init script, to be used with /etc/firewall.bash by Jeff Geerling.
5 | #
6 | # @author Jeff Geerling
7 |
8 | ### BEGIN INIT INFO
9 | # Provides: firewall
10 | # Required-Start: $remote_fs $syslog
11 | # Required-Stop: $remote_fs $syslog
12 | # Default-Start: 2 3 4 5
13 | # Default-Stop: 0 1 6
14 | # Short-Description: Start firewall at boot time.
15 | # Description: Enable the firewall.
16 | ### END INIT INFO
17 |
18 | # Carry out specific functions when asked to by the system
19 | case "$1" in
20 | start)
21 | echo "Starting firewall."
22 | /etc/firewall.bash
23 | ;;
24 | stop)
25 | echo "Stopping firewall."
26 | iptables -F
27 | ;;
28 | restart)
29 | echo "Restarting firewall."
30 | /etc/firewall.bash
31 | ;;
32 | status)
33 | echo -e "`iptables -L -n`"
34 | ;;
35 | *)
36 | echo "Usage: /etc/init.d/firewall {start|stop|status|restart}"
37 | exit 1
38 | ;;
39 | esac
40 |
41 | exit 0
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | glusterfs_default_release: ""
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/files/glusterfs-epel.repo:
--------------------------------------------------------------------------------
1 | # Place this file in your /etc/yum.repos.d/ directory
2 |
3 | [glusterfs-epel]
4 | name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
5 | baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/$basearch/
6 | enabled=1
7 | skip_if_unavailable=1
8 | gpgcheck=1
9 | gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
10 |
11 | [glusterfs-noarch-epel]
12 | name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
13 | baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/noarch
14 | enabled=1
15 | skip_if_unavailable=1
16 | gpgcheck=1
17 | gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
18 |
19 | [glusterfs-source-epel]
20 | name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
21 | baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/SRPMS
22 | enabled=0
23 | skip_if_unavailable=1
24 | gpgcheck=1
25 | gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
26 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Include variables and define needed variables.
3 | - name: Include OS-specific variables.
4 | include_vars: "{{ ansible_os_family }}.yml"
5 |
6 | # Setup/install tasks.
7 | #- include: setup-RedHat.yml
8 | # when: ansible_os_family == 'RedHat'
9 |
10 | #- include: setup-Debian.yml
11 | # when: ansible_os_family == 'Debian'
12 |
13 | - name: Ensure GlusterFS is started and enabled at boot.
14 | service: "name={{ glusterfs_daemon }} state=started enabled=yes"
15 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure GlusterFS is installed.
3 | apt:
4 | name: "{{ item }}"
5 | state: installed
6 | default_release: "{{ glusterfs_default_release }}"
7 | with_items:
8 | - glusterfs-server
9 | - glusterfs-client
10 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/tasks/setup-RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fetch GlusterFS repo file
3 | get_url: dest=/etc/yum.repos.d/glusterfs-epel.repo
4 | url=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo
5 |
6 | - name: Set GlusterFS repo file permissions
7 | file: owner=root group=root mode=0644
8 | path=/etc/yum.repos.d/glusterfs-epel.repo
9 |
10 | - name: Import GlusterFS GPG key.
11 | rpm_key:
12 | key: "http://download.gluster.org/pub/gluster/glusterfs/LATEST/pub.key"
13 | state: present
14 |
15 | - name: Ensure GlusterFS is installed.
16 | yum: "name=glusterfs-server,glusterfs-client state=installed"
17 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | glusterfs_daemon: glusterfs-server
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/glusterfs/vars/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | glusterfs_daemon: glusterd
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | smb: false
3 | nfs: false
4 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/files/storhaug.repo:
--------------------------------------------------------------------------------
1 | [storhaug]
2 | name=Storhaug RPMs
3 | baseurl=file:///shared/repo
4 | #baseurl=https://copr-be.cloud.fedoraproject.org/results/jarrpa/storhaug/epel-$releasever-$basearch/
5 | enabled=1
6 | skip_if_unavailable=0
7 | gpgcheck=0
8 |
9 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for storhaug
3 |
4 | #- name: Reload samba
5 | # service: name={{ samba_service_name }} state=reloaded
6 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/tasks/conf-NFS.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure NFS-Ganesha exports
3 | template: src=export.conf.j2 dest=/etc/glusterfs-ganesha/exports/export.{{item.name}}.conf
4 | with_items: "{{ nfs['exports'] }}"
5 |
6 | - name: Enable NFS-Ganesha exports
7 | shell: >
8 | grep -qs "export.{{ item.name }}.conf" /etc/glusterfs-ganesha/nfs-ganesha.conf
9 | if [[ ! $? ]]; then
10 | cat << END >> /etc/glusterfs-ganesha/nfs-ganesha.conf
11 | %include "/etc/glusterfs-ganesha/exports/export.{{ item.name }}.conf"
12 | END
13 | fi
14 | with_items: "{{ nfs['exports'] }}"
15 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/tasks/conf-SMB.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure SMB services
3 | service: "name={{ item.daemon }} state={{ item.state }} enabled={{ item.enabled }}"
4 | with_items:
5 | - { daemon: 'ctdb', state: 'stopped', enabled: 'no' }
6 | - { daemon: 'smb', state: 'stopped', enabled: 'no' }
7 | - { daemon: 'nmb', state: 'stopped', enabled: 'no' }
8 | - { daemon: 'winbind', state: 'stopped', enabled: 'no' }
9 |
10 | - name: Configure Samba server
11 | template: src=smb.conf.j2
12 | dest=/etc/samba/smb.conf
13 | owner=root group=root mode=0644
14 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for storhaug
3 |
4 | #- include_vars: NFS.yml
5 | # when: nfs
6 |
7 | - include_vars: SMB.yml
8 | when: smb
9 |
10 | #- include: conf-NFS.yml
11 | # when: nfs
12 |
13 | - include: conf-SMB.yml
14 | when: smb
15 |
16 | - name: Configure HA user
17 | user: name=hacluster password=buqSogFSZLJQM
18 |
19 | - name: Configure HA services
20 | service: "name={{ item.daemon }} state={{ item.state|default(omit) }} enabled={{ item.enabled|default(omit) }}"
21 | with_items:
22 | - { daemon: 'pcsd', state: 'started', enabled: 'yes' }
23 | - { daemon: 'pacemaker', enabled: 'yes' }
24 |
25 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/tasks/setup-AD.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Stop all Samba services
3 | service: name={{item.name}} state={{item.state}}
4 | with_items:
5 | - { name: 'smb', state: 'stopped' }
6 | - { name: 'nmb', state: 'stopped' }
7 | - { name: 'winbind', state: 'stopped' }
8 | - { name: 'ctdb', state: 'stopped' }
9 |
10 | - name: Mount CTDB reclock volume
11 | mount: name=/shared/lock src=localhost:/ctdb fstype=glusterfs opts=_netdev,defaults,direct-io-mode=enable,transport=tcp,xlator-option=*client*.ping-timeout=10 state=mounted
12 | when: ctdb['setup_ctdb']
13 |
14 | - name: Restart CTDB
15 | service: name={{item.name}} state={{item.state}}
16 | with_items:
17 | - { name: 'ctdb', state: 'restarted' }
18 | when: ctdb['setup_ctdb']
19 |
20 | - name: Verify CTDB is healthy
21 | shell: while true; do sleep 1; status=$(ctdb status 2>/dev/null); rc=$?; if [ $rc -ne 0 ]; then exit $rc; fi; if ! echo $status | grep -qs 'UNHEALTHY (THIS'; then exit; fi; done
22 | when: ctdb['setup_ctdb']
23 |
24 | - name: Configure resolv.conf for Active Directory
25 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744
26 | with_items:
27 | - { src: 'files/resolv.conf.j2', dest: '/etc/resolv.conf' }
28 |
29 | - name: Is NetworkManager installed?
30 | shell: which nmcli 2>&1 >/dev/null; exit $?
31 | register: detect_nm
32 | changed_when: False
33 | failed_when: False
34 |
35 | - name: Disable NetworkManager DNS
36 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744
37 | with_items:
38 | - { src: 'files/99-no-dns.conf', dest: '/etc/NetworkManager/conf.d/99-no-dns.conf' }
39 | when: detect_nm.rc == 0
40 |
41 | - name: Restart NetworkManager
42 | service: name="NetworkManager" state=restarted
43 | when: detect_nm.rc == 0
44 |
45 | - name: Configure nsswitch
46 | shell: sed -ri '/^(passwd|group)/s/$/ winbind/' /etc/nsswitch.conf
47 |
48 | - name: Join Active Directory domain
49 | shell: "net join -U Administrator%{{ ad_passwd }}"
50 | run_once: true
51 | delegate_to: "{{ groups['smb_servers'][0] }}"
52 |
53 | - name: Register Active Directory DNS
54 | shell: "net ads -P dns register {{ ha_name }}.{{ ad_domain }} {{ vips|join(' ') }}"
55 | run_once: true
56 | delegate_to: "{{ groups['smb_servers'][0] }}"
57 |
58 | - name: Verify Active Directory domain membership
59 | shell: net ads testjoin
60 | run_once: true
61 | delegate_to: "{{ groups['smb_servers'][0] }}"
62 |
63 | - name: Stop CTDB
64 | service: name={{item.name}} state={{item.state}}
65 | with_items:
66 | - { name: 'ctdb', state: 'stopped' }
67 | when: ctdb['setup_ctdb']
68 |
69 | - name: Unmount CTDB reclock volume
70 | mount: name=/gluster/lock src=localhost:/ctdb fstype=glusterfs state=unmounted
71 | when: ctdb['setup_ctdb']
72 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/tasks/setup-RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install Storhaug
3 | yum: pkg=storhaug,storhaug-smb state=present
4 |
5 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/templates/smb.conf.j2:
--------------------------------------------------------------------------------
1 | ## This file is managed by Ansible, all changes will be lost ##
2 | #
3 | {% macro print_hash(hash) %}
4 | {% for key, value in hash.iteritems() %}
5 | {{ "%-30s" | format(key) | replace("_"," ") }} = {{ value }}
6 | {% endfor %}
7 | {% endmacro %}
8 | {% macro print_section(hash, section='global') %}
9 | [{{ section }}]
10 | {{ print_hash(hash) }}
11 |
12 | {% endmacro %}
13 | # Default options
14 | {{ print_section(samba_global) }}
15 | {% if samba_global_custom is defined and samba_global_custom %}
16 | # Custom options
17 | {{ print_hash(hash=samba_global_custom) }}
18 | {% endif %}
19 | # Share definitions
20 | {% if samba_shares is defined and samba_shares %}
21 | {% for share in samba_shares.keys() %}
22 | {{ print_section(samba_shares[share], share) }}
23 | {% endfor %}
24 | {% endif %}
25 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/vars/NFS.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/vars/SMB.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Allow access to Samba through firewall for specified networks
3 | # If samba_allow is undefined or False, allow access from all
4 | samba_allow: []
5 |
6 | samba_workgroup: 'WORKGROUP'
7 | samba_netbios_name: '{{ ansible_hostname }}'
8 | samba_server_string: '%h file server'
9 |
10 | # Name of the /etc/init.d/ service script
11 | samba_service_name: 'smb'
12 |
13 | # Which hash variable is used to configure [global] section in smb.conf
14 | samba_global: '{{ samba_default_global }}'
15 |
16 | # You can specify additional options in a separate hash
17 | samba_global_custom: False
18 |
19 | # Which hash of hashes is used to configure shares in smb.conf
20 | samba_shares: '{{ samba_default_shares }}'
21 |
22 | # Default [global] configuration
23 | samba_default_global:
24 |
25 | # Browsing / Identification
26 | workgroup: '{{ samba_workgroup | default("WORKGROUP") }}'
27 | netbios_name: '{{ samba_netbios_name | default(ansible_hostname) }}'
28 | server_string: '{{ samba_server_string | default("%h file server") }}'
29 |
30 | # Authentication
31 | security: 'user'
32 | passdb_backend: 'tdbsam'
33 |
34 | # Disable printing by default
35 | printing: 'bsd'
36 | load_printers: 'no'
37 | printcap_name: '/dev/null'
38 | show_add_printer_wizard: 'no'
39 | disable_spoolss: 'yes'
40 |
41 |
42 | # Hash of hashes of default shares
43 | samba_default_shares:
44 |
45 | 'homes':
46 | comment: 'Home Directories'
47 | browseable: 'no'
48 | writable: 'yes'
49 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/roles/storhaug/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for storhaug
3 |
--------------------------------------------------------------------------------
/vagrant-ansible/playbooks/storhaug.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # file: storhaug.yml
3 | - hosts: ha_servers
4 | name: Initializing
5 | become: yes
6 |
7 | tasks:
8 | - name: Detect guest OS family
9 | group_by: key={{ ansible_os_family }}
10 | changed_when: False
11 |
12 | - name: Set hostname
13 | hostname: "name={{ inventory_hostname }}{% if ad['domain'] is defined and ad['domain'] %}.{{ ad['domain'] }}{% endif %}"
14 |
15 | - name: Disable SELinux
16 | selinux: state=disabled
17 |
18 | - name: Create extra disk partitions
19 | shell: "{ blkid | grep -q /dev/{{item[0].dev}}{{item[1].num}} && echo FOUND; } || { parted /dev/{{item[0].dev}} mklabel msdos && parted /dev/{{item[0].dev}} mkpart primary 512 {{item[1].size}}; }"
20 | register: part_result
21 | changed_when: "'FOUND' not in part_result.stdout"
22 | with_subelements:
23 | - "{{extra_disks}}"
24 | - parts
25 | when: extra_disks is defined
26 |
27 | - name: Create extra disk filesystems
28 | filesystem: fstype={{item[1].fs}} dev=/dev/{{item[0].dev}}{{item[1].num}}
29 | with_subelements:
30 | - "{{extra_disks}}"
31 | - parts
32 | when: extra_disks is defined
33 |
34 | - name: Mount extra disk filesystems
35 | mount: name={{item[1].mount}} src=/dev/{{item[0].dev}}{{item[1].num}} fstype={{item[1].fs}} state=mounted
36 | with_subelements:
37 | - "{{extra_disks}}"
38 | - parts
39 | when: extra_disks is defined
40 |
41 | - hosts: RedHat
42 | name: Server Setup (RedHat)
43 | become: yes
44 | gather_facts: False
45 |
46 | tasks:
47 | # - include: roles/common/tasks/setup-RedHat.yml
48 | # - include: roles/glusterfs/tasks/setup-RedHat.yml
49 | # when: gluster['setup_gluster']
50 |
51 | - hosts: none
52 | name: Server Setup (Common)
53 | become: yes
54 |
55 | roles:
56 | - common
57 |
58 | vars:
59 | - firewall_services:
60 | - ssh
61 | - glusterfs
62 | - samba
63 | - samba-client
64 | - nfs
65 | - high-availability
66 | - firewall_ports:
67 | - '4379/tcp'
68 | - firewall_interfaces:
69 | - 'eth0'
70 | - 'eth1'
71 |
72 | tasks:
73 | - name: Disable SELinux
74 | selinux: state=disabled
75 |
76 | - hosts: gluster_servers:smb_servers
77 | name: GlusterFS Setup
78 | become: yes
79 |
80 | roles:
81 | - { role: glusterfs, when: "gluster['setup_gluster'] is defined and gluster['setup_gluster']" }
82 |
83 | vars:
84 | - gluster_default_cluster: "{%- for host in groups['gluster_servers'] -%}{{hostvars[host]['ansible_hostname']}}{% if not loop.last %},{% endif %}{%- endfor -%}"
85 | - gluster_default_replica: "{{ 3 if groups['gluster_servers']|count >= 3 else (2 if groups['gluster_servers']|count == 2 else omit) }}"
86 | - gluster_default_bricks: 1
87 |
88 | tasks:
89 | - name: Generate bricks list
90 | set_fact:
91 | bricks: >-
92 | {%- for volume in gluster['volumes'] -%}
93 | {%- set cluster = gluster_default_cluster if 'cluster' not in volume.iterkeys() else volume.cluster -%}
94 | {% if ansible_hostname in cluster.split(',') or ( ansible_ib0 is defined and ansible_ib0['ipv4']['address'] in cluster.split(',') ) -%}
95 | {%- set bricks = gluster_default_bricks if 'bricks' not in volume.iterkeys() else volume.bricks -%}
96 | {%- if(bricks|int) and bricks > 0 -%}
97 | {%- for i in range(bricks) -%}
98 | {{ volume.name }}{{ i }}{% if not loop.last %},{% endif %}
99 | {%- endfor -%}
100 | {%- elif bricks|list -%}
101 | {%- for brick in bricks -%}
102 | {{ brick }}{% if not loop.last %},{% endif %}
103 | {%- endfor -%}
104 | {%- endif -%}
105 | {%- if not loop.last %},{% endif -%}
106 | {%- endif %}
107 | {% endfor %}
108 | when: gluster['setup_gluster'] is defined and gluster['setup_gluster']
109 |
110 | # - debug: msg="bricks {{ bricks.split(',') }}"
111 | # failed_when: true
112 |
113 | - name: Ensure Gluster brick directories exist.
114 | file: "path={{ [gluster['bricks_dir'], item]|join('/') if item[0] != '/' else item }} state=directory mode=0775"
115 | with_items: "{% if bricks is defined and bricks %}{{ bricks.split(',') if ',' in bricks else [ bricks ]}}{% else %}{{ [ '' ] }}{% endif %}"
116 | when: "gluster['setup_gluster'] is defined and gluster['setup_gluster'] and item"
117 |
118 | - name: Probe Samba peers
119 | command: gluster peer probe {{ item }}
120 | with_items: "{{ groups['smb_servers'] }}"
121 | run_once: true
122 | when: "'gluster_servers' in group_names"
123 | register: probe_result
124 | changed_when: "'already' not in probe_result.stdout and 'localhost' not in probe_result.stdout"
125 |
126 | - name: Configure Gluster volumes.
127 | gluster_volume:
128 | state: present
129 | name: "{{ item.name }}"
130 | bricks: >-
131 | {% set bricks = gluster_default_bricks if 'bricks' not in item.iterkeys() else item.bricks -%}
132 | {%- if (bricks|int) and bricks > 0 -%}
133 | {%- for i in range(bricks) -%}
134 | {{ [gluster['bricks_dir'], item.name]|join('/') }}{{ i }}{% if not loop.last %},{% endif %}
135 | {%- endfor -%}
136 | {%- elif bricks|list -%}
137 | {%- for brick in bricks -%}
138 | {% if brick[0] != "/" %}{{ gluster['bricks_dir'] }}/{% endif %}{{ brick }}{% if not loop.last %},{% endif %}
139 | {%- endfor -%}
140 | {%- else -%}
141 | {{ omit }}
142 | {%- endif %}
143 | replicas: >-
144 | {% if item.replica is defined and item.replica == 'n' -%}
145 | {{ groups['gluster_servers']|count if groups['gluster_servers']|count > 1 else omit }}
146 | {%- elif item.replica is defined and item.replica == 0 -%}
147 | {{ omit }}
148 | {%- elif groups['gluster_servers']|count >= (item.replica | default(gluster_default_replica)) -%}
149 | {{ item.replica | default(gluster_default_replica) }}
150 | {%- else -%}
151 | {{ omit }}
152 | {%- endif %}
153 | cluster: "{{ item.cluster | default(gluster_default_cluster) }}"
154 | options: "{{ item.opts|default(omit) }}"
155 | transport: "{{ item.transport|default(omit) }}"
156 | force: yes
157 | run_once: true
158 | delegate_to: "{{ groups['gluster_servers'][0] }}"
159 | when: gluster['setup_gluster'] is defined and gluster['setup_gluster']
160 | with_items: "{{ gluster['volumes'] }}"
161 | register: result
162 | until: result|success
163 | retries: 3
164 |
165 | - name: Start Gluster volumes.
166 | gluster_volume:
167 | name: "{{ item.name }}"
168 | state: started
169 | run_once: true
170 | when: gluster['setup_gluster'] is defined and gluster['setup_gluster']
171 | with_items: "{{ gluster['volumes'] }}"
172 |
173 | - name: Set volume permissions
174 | shell: "mount -t glusterfs localhost:/{{ item.name }} /mnt && chmod -c {{ item.root_mode|default('777') }} /mnt; umount /mnt"
175 | with_items: "{{ gluster['volumes'] }}"
176 | run_once: true
177 | when: gluster['setup_gluster'] is defined and gluster['setup_gluster']
178 | register: perms_result
179 | changed_when: "'changed' in perms_result.stdout"
180 |
181 | - name: Rebalance volumes (for lookup optimization)
182 | shell: "gluster volume rebalance {{ item.name }} status | grep -q completed || gluster volume rebalance {{ item.name }} start"
183 | when: "gluster['setup_gluster'] is defined and gluster['setup_gluster'] and 'bricks' in item.iterkeys() and ( ( (item.bricks|int) and item.bricks > 0 ) or ( item.bricks|list and item.bricks|length > 0 ) )"
184 | with_items: "{{ gluster['volumes'] }}"
185 | run_once: true
186 | ignore_errors: true
187 |
188 | - name: Verify rebalance complete
189 | shell: "gluster volume rebalance {{ item.name }} status"
190 | when: "gluster['setup_gluster'] is defined and gluster['setup_gluster'] and 'bricks' in item.iterkeys() and ( ( (item.bricks|int) and item.bricks > 0 ) or ( item.bricks|list and item.bricks|length > 0 ) )"
191 | with_items: "{{ gluster['volumes'] }}"
192 | run_once: true
193 | register: rebal
194 | until: "'completed' in rebal.stdout"
195 | ignore_errors: true
196 |
197 | - name: Mount volumes
198 | mount: name={{item.mount}} src=localhost:/{{item.name}} fstype=glusterfs opts=defaults,_netdev state=mounted
199 | with_items: "{{ gluster['volumes'] }}"
200 | when: "item['mount'] is defined"
201 | register: result
202 | until: result|success
203 |
204 | - hosts: smb_servers
205 | name: Server Setup (SMB)
206 | become: yes
207 |
208 | # roles:
209 | # - samba
210 |
211 | tasks:
212 | - name: Copy CTDB config files
213 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744
214 | with_items:
215 | - { src: "{% if ctdb['config_file'] is defined and ctdb['config_file'] %}{{ ctdb['config_file'] }}{% else %}files/ctdb{% endif %}", dest: '/etc/sysconfig/ctdb' }
216 | - { src: 'files/nodes.j2', dest: '/etc/ctdb/nodes' }
217 | when: ctdb['setup_ctdb'] is defined and ctdb['setup_ctdb']
218 |
219 | - name: Copy Samba config files
220 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744
221 | with_items:
222 | - { src: "{% if samba['config_file'] is defined and samba['config_file'] %}{{ samba['config_file'] }}{% else %}files/smb.conf.j2{% endif %}", dest: '/etc/samba/smb.conf' }
223 | when: samba['setup_samba'] is defined and samba['setup_samba']
224 |
225 | - name: Ensure share directories exist
226 | file: "path={{ samba['shares'][item]['path'] }} state=directory mode=0777"
227 | with_items: "{{ samba['shares'] }}"
228 | when: "samba['setup_samba'] is defined and samba['setup_samba'] and item and ('glusterfs:volume' not in samba['shares'][item].keys())"
229 |
230 | - hosts: nfs_servers
231 | name: Server Setup (NFS)
232 | become: yes
233 |
234 | tasks:
235 | - name: Copy Ganesha config files
236 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744
237 | with_items:
238 | - { src: "files/ganesha.conf", dest: '/etc/ganesha/ganesha.conf' }
239 | - { src: "files/nfs", dest: '/etc/sysconfig/nfs' }
240 | when: ganesha['setup_ganesha'] is defined and ganesha['setup_ganesha']
241 |
242 | - hosts: ad_server
243 | name: Active Directory Setup
244 | become: yes
245 |
246 | tasks:
247 | - include: roles/storhaug/tasks/setup-AD.yml
248 | when: ad['setup_ad'] is defined and ad['setup_ad']
249 |
250 | - hosts: ha_servers
251 | name: Storhaug Configuration and Initialization
252 | become: yes
253 |
254 | # roles:
255 | # - storhaug
256 |
257 | tasks:
258 | - name: Copy Storhaug config files
259 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0744
260 | with_items:
261 | - { src: 'files/storhaug.conf.j2', dest: '/etc/sysconfig/storhaug.conf' }
262 | - { src: 'files/CTDB', dest: '/usr/lib/ocf/resource.d/heartbeat/CTDB' }
263 |
264 | - name: Ensure SSH directory exists
265 | file: "path=/root/.ssh state=directory mode=0700"
266 |
267 | - name: Copy SSH access files
268 | template: src={{item.src}} dest={{item.dest}} owner=root group=root mode=0700
269 | with_items:
270 | - { src: 'files/vagrant.pub', dest: '/root/.ssh/authorized_keys' }
271 | - { src: 'files/vagrant', dest: '/etc/sysconfig/storhaug.d/secret.pem' }
272 |
273 | - name: Teardown any pre-existing cluster.
274 | shell: pcs cluster stop; pcs cluster destroy
275 |
276 | # - name: Start Storhaug
277 | # shell: storhaug setup
278 | # run_once: true
279 |
--------------------------------------------------------------------------------