├── .gitignore ├── ansible.cfg ├── tasks ├── linbit-excludes.yml ├── linbit-register.yml ├── linbit-firewall.yml ├── linbit-updates.yml ├── linbit-pacemaker-init.yml ├── linbit-drbd-init.yml ├── linbit-iscsi.yml └── linbit-nfs.yml ├── templates ├── r0.res ├── corosync.conf ├── r0-with-proxy.res ├── nfs-cib.txt └── iscsi-cib.txt ├── linbit-hosts.ini ├── scripts └── linbit-insert-excludes-centos7.sh ├── linbit-go.yml └── README /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | *.swp 3 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = linbit-hosts.ini 3 | -------------------------------------------------------------------------------- /tasks/linbit-excludes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Insert exclude lines into CentOS 7 Base repos 3 | - name: add excludes to base and updates repo. 4 | script: scripts/linbit-insert-excludes-centos7.sh 5 | when: ansible_distribution == 'CentOS' and ansible_distribution_major_version >= '7' 6 | 7 | # Add more distro support here :) 8 | -------------------------------------------------------------------------------- /tasks/linbit-register.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: fetch the latest linbit-manage-node.py 3 | get_url: 4 | url: "https://my.linbit.com/linbit-manage-node.py" 5 | dest: "/root/linbit-manage-node.py" 6 | mode: "0640" 7 | force: "yes" 8 | 9 | - name: register nodes using linbit-manage-node.py 10 | shell: bash -c "LB_USERNAME={{ lb_user }} LB_PASSWORD={{ lb_pass }} LB_CONTRACT_ID={{ lb_con_id }} LB_CLUSTER_ID={{ lb_clu_id }} python /root/linbit-manage-node.py" 11 | -------------------------------------------------------------------------------- /templates/r0.res: -------------------------------------------------------------------------------- 1 | resource r0 { 2 | device /dev/drbd0; 3 | disk {{ drbd_backing_disk }}; 4 | meta-disk internal; 5 | on {{ hostvars['linbit-ans-a']['ansible_hostname'] }} { 6 | address {{ hostvars['linbit-ans-a']['drbd_replication_ip'] }}:7999; 7 | node-id 0; 8 | } 9 | on {{ hostvars['linbit-ans-b']['ansible_hostname'] }} { 10 | address {{ hostvars['linbit-ans-b']['drbd_replication_ip'] }}:7999; 11 | node-id 1; 12 | } 13 | connection-mesh { 14 | hosts {{ hostvars['linbit-ans-a']['ansible_hostname'] }} {{ hostvars['linbit-ans-b']['ansible_hostname'] }}; 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /tasks/linbit-firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if firewalld is running 3 | shell: systemctl status firewalld 4 | register: firewalld 5 | 6 | - name: firewalld ports for DRBD 7 | firewalld: 8 | port: 7788-7999/tcp 9 | permanent: true 10 | immediate: true 11 | state: enabled 12 | when: firewalld.rc == 0 13 | 14 | - name: firewalld ports for Pacemaker/Corosync/DLM 15 | firewalld: 16 | service: high-availability 17 | permanent: true 18 | immediate: true 19 | state: enabled 20 | when: firewalld.rc == 0 21 | 22 | # should add support for iptables/ufw/etc 23 | -------------------------------------------------------------------------------- /templates/corosync.conf: -------------------------------------------------------------------------------- 1 | totem { 2 | version: 2 3 | secauth: off 4 | cluster_name: linbit-cluster 5 | transport: udpu 6 | rrp_mode: passive 7 | } 8 | 9 | nodelist { 10 | node { 11 | ring0_addr: {{ hostvars['linbit-ans-a']['drbd_replication_ip'] }} 12 | ring1_addr: {{ hostvars['linbit-ans-a']['mgmt_interface_ip'] }} 13 | nodeid: 1 14 | } 15 | node { 16 | ring0_addr: {{ hostvars['linbit-ans-b']['drbd_replication_ip'] }} 17 | ring1_addr: {{ hostvars['linbit-ans-b']['mgmt_interface_ip'] }} 18 | nodeid: 2 19 | } 20 | } 21 | 22 | quorum { 23 | provider: corosync_votequorum 24 | two_node: 1 25 | } 26 | 27 | logging { 28 | to_syslog: yes 29 | } 30 | 31 | -------------------------------------------------------------------------------- /tasks/linbit-updates.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: yum update all system packages to latest 3 | yum: name=* state=latest 4 | 5 | - name: check if reboot is needed 6 | shell: LAST_KERNEL=$(rpm -q --last kernel | awk 'NR==1{sub(/kernel-/,""); print $1}'); CURRENT_KERNEL=$(uname -r); if [ $LAST_KERNEL != $CURRENT_KERNEL ]; then echo 'reboot'; else echo 'no'; fi 7 | ignore_errors: true 8 | register: reboot_hint 9 | 10 | - name: rebooting to load new kernel 11 | command: bash -c "sleep 5 && reboot" 12 | ignore_errors: true 13 | async: 1 14 | poll: 0 15 | when: reboot_hint.stdout.find("reboot") != -1 16 | register: reboot_happened 17 | 18 | - name: Waiting for node to reboot 19 | local_action: wait_for host={{ ansible_ssh_host }} state=started 20 | when: reboot_happened.changed 21 | 22 | -------------------------------------------------------------------------------- /tasks/linbit-pacemaker-init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure Corosync 3 | template: src=../templates/corosync.conf dest=/etc/corosync/corosync.conf 4 | register: corosync 5 | 6 | # If corosync.conf was changed, restart pacemaker/corosync 7 | - name: restart Pacemaker if Corosync config is new 8 | systemd: name=pacemaker state=stopped 9 | when: corosync.changed 10 | 11 | - name: restart Corosync if config is new 12 | systemd: name=corosync state=restarted 13 | when: corosync.changed 14 | 15 | - name: restart Pacemaker if corosync config is new 16 | systemd: name=pacemaker state=started 17 | when: corosync.changed 18 | 19 | - name: enable Corosync for boot 20 | systemd: name=corosync enabled=yes 21 | 22 | - name: enable Pacemaker for boot 23 | systemd: name=pacemaker enabled=yes 24 | -------------------------------------------------------------------------------- /templates/r0-with-proxy.res: -------------------------------------------------------------------------------- 1 | resource r0 { 2 | protocol A; 3 | device /dev/drbd0; 4 | disk {{ drbd_backing_disk }}; 5 | meta-disk internal; 6 | proxy { 7 | memlimit 100M; 8 | } 9 | on {{ hostvars['linbit-ans-a']['ansible_hostname'] }} { 10 | address 127.0.0.1:7997; 11 | proxy on {{ hostvars['linbit-ans-a']['ansible_hostname'] }} { 12 | inside 127.0.0.1:7998; 13 | outside {{ hostvars['linbit-ans-a']['drbd_replication_ip'] }}:7999; 14 | } 15 | node-id 0; 16 | } 17 | on {{ hostvars['linbit-ans-b']['ansible_hostname'] }} { 18 | address 127.0.0.1:7997; 19 | proxy on {{ hostvars['linbit-ans-b']['ansible_hostname'] }} { 20 | inside 127.0.0.1:7998; 21 | outside {{ hostvars['linbit-ans-b']['drbd_replication_ip'] }}:7999; 22 | } 23 | node-id 1; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /linbit-hosts.ini: -------------------------------------------------------------------------------- 1 | # linbit-ans-a and linbit-ans-b must be resolvable by DNS. Add entries to /etc/hosts 2 | # as mentioned in the README if you cannot stand up test nodes named linbit-ans-a 3 | # and linbit-ans-b in your environment. 4 | # drbd_replication_ip should be the IP address on an interface directly connected to 5 | # the peer node. 6 | # mgmt_interface_ip should be the IP address that the node uses to communicate with 7 | # the rest of the network/clients. 8 | # drbd_backing_disk is the spare block device DRBD will attach to. 9 | # cluster_vip and cluster_vip_cidr are the virtual IP and network mask that will 10 | # be used to access clustered services, should a cluster configuration be deployed. 11 | [linbit] 12 | linbit-ans-a drbd_replication_ip="172.16.7.150" mgmt_interface_ip="192.168.7.150" 13 | linbit-ans-b drbd_replication_ip="172.16.7.151" mgmt_interface_ip="192.168.7.151" 14 | 15 | [linbit:vars] 16 | drbd_backing_disk="/dev/vdb" 17 | cluster_vip="192.168.7.152" 18 | cluster_vip_cidr="24" 19 | -------------------------------------------------------------------------------- /tasks/linbit-drbd-init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if drbd0 already present 3 | command: bash -c "drbdadm sh-dev all | grep -q drbd0; echo $?" 4 | register: drbd0_exists 5 | 6 | - name: configure DRBD device 7 | template: src=../templates/r0.res dest=/etc/drbd.d/r0.res 8 | register: drbd0_config 9 | 10 | - name: drbdadm create-md in remote shell 11 | shell: drbdadm create-md r0 --force >> /root/linbit-ans-drbd.log 12 | when: drbd0_exists.stdout == "1" 13 | 14 | - name: drbdadm up in remote shell 15 | shell: drbdadm up r0 >> /root/linbit-ans-drbd.log 16 | when: drbd0_exists.stdout == "1" 17 | 18 | - name: skip DRBD initial sync 19 | run_once: true 20 | shell: bash -c 'while [ $(drbdadm cstate r0) != "Connected" ]; do sleep 1s; done; drbdadm new-current-uuid r0 --clear-bitmap' >> /root/linbit-ans-drbd.log 21 | when: drbd0_exists.stdout == "1" 22 | 23 | # Adjust if configuration changed as catch all 24 | - name: drbdadm adjust in remote shell 25 | shell: drbdadm adjust r0 >> /root/linbit-ans-drbd.log 26 | when: drbd0_config.changed 27 | -------------------------------------------------------------------------------- /tasks/linbit-iscsi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install iSCSI specific packages 3 | yum: name={{ item }} update_cache=yes state=latest 4 | with_items: 5 | - targetcli 6 | 7 | - name: place the cib import file on both nodes 8 | template: src=templates/iscsi-cib.txt dest=/root/linbit-cib.txt 9 | register: cib_file 10 | 11 | - name: stop all cluster resources for replacement 12 | run_once: true 13 | shell: crm configure property stop-all-resources=true >> /root/linbit-ans-cib-import.log 14 | when: cib_file.changed 15 | 16 | - name: wait for resources to stop 17 | pause: 18 | seconds: 40 19 | when: cib_file.changed 20 | 21 | - name: import the cib on one node 22 | run_once: true 23 | shell: crm configure load replace /root/linbit-cib.txt >> /root/linbit-ans-cib-import.log 24 | when: cib_file.changed 25 | 26 | - name: start cluster resources after replacement 27 | run_once: true 28 | shell: crm configure property stop-all-resources=false >> /root/linbit-ans-cib-import.log 29 | when: cib_file.changed 30 | 31 | - name: check if firewalld is running 32 | shell: systemctl status firewalld 33 | register: firewalld 34 | 35 | - name: allow iscsi through firewalld 36 | firewalld: 37 | service: iscsi-target 38 | permanent: true 39 | immediate: true 40 | state: enabled 41 | when: firewalld.rc == 0 42 | -------------------------------------------------------------------------------- /scripts/linbit-insert-excludes-centos7.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ### Check for Pacemaker/Corosync related excludes in CentOS-Base, 3 | ### add them if missing and skip if they're correct 4 | 5 | ### vars 6 | repofile=/etc/yum.repos.d/CentOS-Base.repo 7 | excludes="pacemaker* corosync* drbd* cluster-glue* resource-agents* libqb* fence-agents* sbd*" 8 | repos=(base updates) 9 | # we need a version of $excludes that has * escaped 10 | r_excludes=$(echo $excludes | sed -r 's/([a-Z0-9])\*/\1\\\*/g') 11 | # set to on or off 12 | debug=off 13 | 14 | ### debug 15 | if [ $debug == "on" ]; then 16 | echo "$repofile" 17 | echo "$excludes" 18 | echo "$r_excludes" 19 | for repo in ${repos[*]}; do 20 | printf "%s\n" $repo 21 | done 22 | fi 23 | 24 | ### check for existing repo file 25 | if [ ! -f $repofile ]; then 26 | if [ $debug == "on" ]; then echo "$repofile is not a regular file"; fi 27 | exit 1 28 | fi 29 | 30 | ### check for excludes, and add them if needed 31 | ### TODO: it would be better if this iterated through the excludes 32 | ### and checked for each individually (an array of excludes) 33 | for repo in ${repos[*]}; do 34 | if $(sed -n "/\[$repo\]/,/\(\[\|^$\)/p" $repofile | grep -q "^exclude="); then 35 | if [ $debug == "on" ]; then echo "exclude line already exists for [$repo]..."; fi 36 | if $(sed -n "/\[$repo\]/,/\(\[\|^$\)/p" $repofile | grep -q "^exclude=$r_excludes"); then 37 | if [ $debug == "on" ]; then echo "and they're correct!"; fi 38 | else 39 | if [ $debug == "on" ]; then echo "and they're incorrect for us :("; fi 40 | exit 1 41 | fi 42 | else 43 | sed -i.bak "s/\[$repo\]/\[$repo\]\nexclude=$excludes/1" $repofile 44 | if [ $debug == "on" ]; then echo "added appropriate excludes to $repofile"; fi 45 | fi 46 | done 47 | 48 | exit 0 49 | -------------------------------------------------------------------------------- /templates/nfs-cib.txt: -------------------------------------------------------------------------------- 1 | primitive p_drbd_r0 ocf:linbit:drbd \ 2 | params drbd_resource=r0 \ 3 | op start interval=0s timeout=240 \ 4 | op promote interval=0s timeout=90 \ 5 | op demote interval=0s timeout=90 \ 6 | op stop interval=0s timeout=100 \ 7 | op monitor interval=29 role=Master \ 8 | op monitor interval=31 role=Slave 9 | ms ms_drbd_r0 p_drbd_r0 \ 10 | meta master-max=1 master-node-max=1 notify=true clone-max=2 clone-node-max=1 11 | primitive p_fs_drbd Filesystem \ 12 | params device="/dev/drbd0" directory="/drbd" fstype=xfs options="noatime,nodiratime" \ 13 | op start interval=0 timeout=60s \ 14 | op stop interval=0 timeout=60s \ 15 | op monitor interval=20 timeout=40s 16 | primitive p_nfsserver nfsserver \ 17 | params nfs_shared_infodir="/drbd/exports/nfs_shared_infodir" nfs_ip={{ cluster_vip }} \ 18 | op start interval=0s timeout=40s \ 19 | op stop interval=0s timeout=20s \ 20 | op monitor interval=10s timeout=20s 21 | primitive p_exportfs_root exportfs \ 22 | params clientspec="*" directory="/drbd/exports" fsid=0 unlock_on_stop=1 options="rw,sync,no_root_squash,insecure" \ 23 | op start interval=0s timeout=40s \ 24 | op stop interval=0s timeout=120s \ 25 | op monitor interval=10s timeout=20s 26 | primitive p_vip_ip IPaddr2 \ 27 | params ip={{ cluster_vip }} cidr_netmask={{ cluster_vip_cidr }} \ 28 | op start interval=0 timeout=20 \ 29 | op stop interval=0 timeout=20 \ 30 | op monitor interval=10s 31 | group g_nfs p_fs_drbd p_nfsserver p_exportfs_root p_exportfs_data p_vip_ip 32 | colocation cl_g_nfs-with-ms_drbd_r0 inf: g_nfs:Started ms_drbd_r0:Master 33 | order o_ms_drbd_r0-before-g_nfs inf: ms_drbd_r0:promote g_nfs:start 34 | property cib-bootstrap-options: \ 35 | no-quorum-policy=ignore \ 36 | stonith-enabled=false \ 37 | start-failure-is-fatal=false 38 | rsc_defaults rsc-options: \ 39 | migration-threshold=3 \ 40 | failure-timeout=60s 41 | -------------------------------------------------------------------------------- /linbit-go.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: linbit 3 | # Need to be root for this playbook to work 4 | vars: 5 | - ansible_ssh_user: root 6 | # Prompt to configure a specific cluster 7 | vars_prompt: 8 | - name: cluster_type 9 | prompt: "Specify cluster configuration (nfs, iscsi, or none)" 10 | default: "none" 11 | private: no 12 | - name: lb_user 13 | prompt: "http://my.linbit.com username" 14 | private: no 15 | - name: lb_pass 16 | prompt: "http://my.linbit.com password (will not be echoed)" 17 | private: yes 18 | - name: lb_con_id 19 | prompt: "LINBIT Contract ID (provided by LINBIT)" 20 | private: no 21 | - name: lb_clu_id 22 | prompt: "LINBIT Cluster ID (provided by LINBIT)" 23 | private: no 24 | 25 | tasks: 26 | # Put SELinux in permissive mode (log actions, don't block) 27 | - name: selinux to permissive 28 | selinux: 29 | policy: targeted 30 | state: permissive 31 | 32 | # Update system packages to the latest version 33 | - import_tasks: tasks/linbit-updates.yml 34 | 35 | # Register nodes with LINBIT and setup repo access 36 | - import_tasks: tasks/linbit-register.yml 37 | 38 | # Insert exclude lines into repos if needed 39 | - import_tasks: tasks/linbit-excludes.yml 40 | 41 | # Install LINBIT Packages 42 | - name: install packages from LINBIT 43 | yum: name={{ item }} update_cache=yes state=latest 44 | with_items: 45 | - kmod-drbd 46 | - drbd 47 | - drbdmanage 48 | - drbdtop 49 | - linbit-cluster-stack-corosync2 50 | 51 | # Add firewall rules for LINBIT cluster stack 52 | - import_tasks: tasks/linbit-firewall.yml 53 | 54 | # Initialize /dev/drbd0 55 | - import_tasks: tasks/linbit-drbd-init.yml 56 | 57 | # Initialize Pacemaker cluster 58 | - import_tasks: tasks/linbit-pacemaker-init.yml 59 | 60 | # Setup the specified cluster 61 | - import_tasks: tasks/linbit-nfs.yml 62 | when: cluster_type == "nfs" 63 | - import_tasks: tasks/linbit-iscsi.yml 64 | when: cluster_type == "iscsi" 65 | -------------------------------------------------------------------------------- /templates/iscsi-cib.txt: -------------------------------------------------------------------------------- 1 | primitive p_drbd_r0 ocf:linbit:drbd \ 2 | params drbd_resource=r0 \ 3 | op start interval=0s timeout=240 \ 4 | op promote interval=0s timeout=90 \ 5 | op demote interval=0s timeout=90 \ 6 | op stop interval=0s timeout=100 \ 7 | op monitor interval=29 role=Master \ 8 | op monitor interval=31 role=Slave 9 | ms ms_drbd_r0 p_drbd_r0 \ 10 | meta master-max=1 master-node-max=1 notify=true clone-max=2 clone-node-max=1 11 | primitive p_iscsi_target_0 iSCSITarget \ 12 | params iqn="iqn.2017-01.com.linbit:drbd0" implementation=lio-t portals="{{ cluster_vip }}:3260" \ 13 | op start interval=0 timeout=20 \ 14 | op stop interval=0 timeout=20 \ 15 | op monitor interval=20 timeout=40 16 | primitive p_iscsi_lun_0 iSCSILogicalUnit \ 17 | params target_iqn="iqn.2017-01.com.linbit:drbd0" implementation=lio-t scsi_sn=aaaaaaa0 lio_iblock=0 lun=0 path="/dev/drbd0" \ 18 | op start interval=0 timeout=20 \ 19 | op stop interval=0 timeout=20 \ 20 | op monitor interval=20 timeout=40 21 | primitive p_iscsi_portblock_on0 portblock \ 22 | params ip={{ cluster_vip }} portno=3260 protocol=tcp action=block \ 23 | op start timeout=20 interval=0 \ 24 | op stop timeout=20 interval=0 \ 25 | op monitor timeout=20 interval=20 26 | primitive p_iscsi_portblock_off0 portblock \ 27 | params ip={{ cluster_vip }} portno=3260 protocol=tcp action=unblock \ 28 | op start timeout=20 interval=0 \ 29 | op stop timeout=20 interval=0 \ 30 | op monitor timeout=20 interval=20 31 | primitive p_vip_ip0 IPaddr2 \ 32 | params ip={{ cluster_vip }} cidr_netmask={{ cluster_vip_cidr }} \ 33 | op start interval=0 timeout=20 \ 34 | op stop interval=0 timeout=20 \ 35 | op monitor interval=10s 36 | group g_iscsi0 p_iscsi_portblock_on0 p_vip_ip0 p_iscsi_target_0 p_iscsi_lun_0 p_iscsi_portblock_off0 37 | colocation cl_g_iscsi0-with-ms_drbd_r0 inf: g_iscsi0:Started ms_drbd_r0:Master 38 | order o_ms_drbd_r0-before-g_iscsi0 inf: ms_drbd_r0:promote g_iscsi0:start 39 | property cib-bootstrap-options: \ 40 | no-quorum-policy=ignore \ 41 | stonith-enabled=false \ 42 | start-failure-is-fatal=false 43 | rsc_defaults rsc-options: \ 44 | migration-threshold=3 \ 45 | failure-timeout=60s 46 | -------------------------------------------------------------------------------- /tasks/linbit-nfs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install NFS specific packages 3 | yum: name={{ item }} update_cache=yes state=latest 4 | with_items: 5 | - nfs-utils 6 | - rpcbind 7 | 8 | - name: place the cib import file on both nodes 9 | template: src=../templates/nfs-cib.txt dest=/root/linbit-cib.txt 10 | register: cib_file 11 | 12 | - name: stop all cluster resources for replacement 13 | run_once: true 14 | shell: crm configure property stop-all-resources=true >> /root/linbit-ans-cib-import.log 15 | when: cib_file.changed 16 | 17 | - name: wait for resources to stop 18 | pause: 19 | seconds: 40 20 | when: cib_file.changed 21 | 22 | - name: put the cluster into maintenance-mode 23 | run_once: true 24 | shell: crm configure property maintenance-mode=true >> /root/linbit-ans-cib-import.log 25 | when: cib_file.changed 26 | 27 | - name: check for DRBD device up 28 | command: bash -c "drbdadm cstate r0 | grep -q Connected; echo $?" 29 | register: drbd0_up 30 | 31 | - name: bring DRBD up in order to format 32 | shell: bash -c 'drbdadm up r0' >> /root/linbit-ans-drbd.log 33 | when: cib_file.changed and drbd0_up.stdout != 0 34 | 35 | - name: wait for DRBD to connect, then format it XFS 36 | shell: bash -c 'while [ $(drbdadm cstate r0) != "Connected" ]; do sleep 1s; done;' >> /root/linbit-ans-drbd.log 37 | when: cib_file.changed 38 | 39 | - name: create the filesystem if this is new 40 | run_once: true 41 | filesystem: 42 | fstype: xfs 43 | dev: /dev/drbd0 44 | force: yes 45 | when: cib_file.changed 46 | 47 | - name: take DRBD down to give control back to the cluster 48 | shell: bash -c 'drbdadm down r0' >> /root/linbit-ans-drbd.log 49 | when: cib_file.changed 50 | 51 | - name: take the cluster out of maintenance-mode 52 | run_once: true 53 | shell: crm configure property maintenance-mode=false >> /root/linbit-ans-cib-import.log 54 | when: cib_file.changed 55 | 56 | - name: import the cib on one node 57 | run_once: true 58 | shell: crm configure load replace /root/linbit-cib.txt >> /root/linbit-ans-cib-import.log 59 | when: cib_file.changed 60 | 61 | - name: start cluster resources after replacement 62 | run_once: true 63 | shell: crm configure property stop-all-resources=false >> /root/linbit-ans-cib-import.log 64 | when: cib_file.changed 65 | 66 | - name: check if firewalld is running 67 | shell: systemctl status firewalld 68 | register: firewalld 69 | 70 | - name: allow NFS through firewalld 71 | firewalld: 72 | service: nfs 73 | permanent: true 74 | immediate: true 75 | state: enabled 76 | when: firewalld.rc == 0 77 | 78 | - name: allow mountd through firewalld 79 | firewalld: 80 | service: mountd 81 | permanent: true 82 | immediate: true 83 | state: enabled 84 | when: firewalld.rc == 0 85 | 86 | - name: allow rpc-bind through firewalld 87 | firewalld: 88 | service: rpc-bind 89 | permanent: true 90 | immediate: true 91 | state: enabled 92 | when: firewalld.rc == 0 93 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | This repository contains an Ansible playbook that will deploy LINBIT's 2 | cluster stack to a pair of nodes, which only need to have IP addresses and 3 | password-less SSH for root setup on a minimal CentOS 7 install, optionally 4 | with a common Pacemaker configuration (iscsi, nfs, postgres, etc.). 5 | 6 | This playbook requires credentials for https://my.linbit.com. Reach out to 7 | sales@linbit.com for more information or to setup an account. If you do 8 | reach out to sales@linbit.com, make sure to include which country or 9 | continent you are located in so that the sales people don't get confused ;) 10 | 11 | Quick start: 12 | ------------ 13 | * Install Ansible on your workstation. 14 | * Ready two nodes with two IP addresses and passwordless SSH for root user. 15 | * Each node must have two separate network interfaces; preferably 16 | the interface for DRBD replication is directly connected to the peer. 17 | * Each node must have an unused block device, separate from the root 18 | volume for DRBD to use as backing storage. 19 | * Update ./linbit-hosts.ini for your environment: 20 | drbd_replication_ip: lower latency replication network 21 | mgmt_interface_ip: client/management network 22 | drbd_backing_disk: spare disk for backing DRBD 23 | cluster_vip: virtual IP that will move with the Primary 24 | cluster_vip_cidr: cidr netmask for the virtual IP 25 | * Add entries to /etc/hosts pointing linbit-ans-a and linbit-ans-b at your 26 | test nodes. Entries should read similar to the following: 27 | 192.168.7.150 linbit-ans-a 28 | 192.168.7.151 linbit-ans-b 29 | * Run the playbook: $ ansible-playbook linbit-go.yml 30 | TIP: If you don't want to be prompted for some/all of the vars, you 31 | can pass them in as extra arguements when you run the ansible-playbook 32 | using the following syntax (see linbit-go.yml for prompt names): 33 | $ ansible-playbook -e cluster_type="iscsi" -e lb_user="joeshmo" \ 34 | -e lb_pass="SuperSecret!" -e lb_con_id="101" \ 35 | -e lb_clu_id="4395" linbit-go.yml 36 | 37 | Files in this repo: 38 | ------------------- 39 | ansible.cfg: this is the configuration for Ansible itself. Ansible looks in 40 | the current working directory, home directory, and then finally /etc/, 41 | for it's configurations. The location of the inventory file is all that 42 | should be defined. 43 | 44 | linbit-hosts.ini: this is the inventory file, where we define hosts and the 45 | groups they belong to. The hostnames are used by Ansible to run commands over 46 | SSH, so these entries must be resolvable by DNS. We MUST define the following: 47 | 48 | Node specific variables: 49 | drbd_replication_ip: each node's IP address for DRBD replication 50 | mgmt_interface_ip: each node's IP address for management network 51 | 52 | Cluster variables: 53 | ansible_ssh_user=root: we must run as root, so this must be set to root 54 | drbd_backing_disk: the disk to be used to back DRBD (data will be destroyed) 55 | cluster_vip: the virtual IP for cluster services (won't use if not needed) 56 | cluster_vip_cidr: the cidr subnet mask for the cluster_vip 57 | 58 | LINBIT contract variables (will be prompted for these vars from LINBIT): 59 | lb_user: http://my.linbit.com portal user 60 | lb_pass: http://my.linbit.com portal pass (should prompt for this) 61 | lb_con_id: http://my.linbit.com contract ID to register cluster to 62 | lb_clu_id: http://my.linbit.com cluster ID to register nodes to 63 | 64 | linbit-go.yml: this is the main playbook that we're going to call. It should 65 | execute all the "common" tasks, prompt user for any specific configurations, 66 | and then use logic to perform/skip the more specific tasks accordingly. 67 | 68 | tasks: this directory contains task lists. They're currently broken out, maybe 69 | too granularly, so they could be reused if needed. 70 | 71 | scripts: this is a directory for scripts that could be used to perform tasks. 72 | 73 | templates: this is a directory for templates used by tasks. Ansible has a 74 | 'template' plugin that uses the jinja templating language to populate variables 75 | from the Ansible environment. The following are template files used to setup 76 | the cluster's configurations. 77 | 78 | Notes: 79 | ------ 80 | I've been trying to create cluster configuration specific tasks that overwrite 81 | any previously deployed cluster configurations; for example, currently you could 82 | chose to deploy an NFS cluster, and then later deploy an iSCSI cluster over it. 83 | This is probably not a good practice, and might be adding a lot of unneeded 84 | complexity to the cluster specific task lists. However, still trying for now ;) 85 | 86 | Currently, this is only designed for CentOS 7 targets. Supporting current 87 | Ubuntu, SLES, Debian, RHEL, and CentOS distros is a goal. 88 | 89 | Resources: 90 | ---------- 91 | https://docs.ansible.com/ansible/latest/ 92 | https://docs.linbit.com/docs/users-guide-9.0/ 93 | https://clusterlabs.org/doc/ 94 | --------------------------------------------------------------------------------